]> git.saurik.com Git - apple/libpthread.git/blob - src/pthread.c
libpthread-105.10.1.tar.gz
[apple/libpthread.git] / src / pthread.c
1 /*
2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44 /*
45 * MkLinux
46 */
47
48 /*
49 * POSIX Pthread Library
50 */
51
52 #include "internal.h"
53 #include "private.h"
54 #include "workqueue_private.h"
55 #include "introspection_private.h"
56 #include "qos_private.h"
57
58 #include <stdlib.h>
59 #include <errno.h>
60 #include <signal.h>
61 #include <unistd.h>
62 #include <mach/mach_init.h>
63 #include <mach/mach_vm.h>
64 #include <sys/time.h>
65 #include <sys/resource.h>
66 #include <sys/sysctl.h>
67 #include <sys/queue.h>
68 #include <sys/mman.h>
69 #include <machine/vmparam.h>
70 #define __APPLE_API_PRIVATE
71 #include <machine/cpu_capabilities.h>
72 #include <libkern/OSAtomic.h>
73
74 #include <_simple.h>
75 #include <platform/string.h>
76 #include <platform/compat.h>
77
78 extern int __sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
79 void *newp, size_t newlen);
80 extern void __exit(int) __attribute__((noreturn));
81
82 static void (*exitf)(int) = __exit;
83 __private_extern__ void* (*_pthread_malloc)(size_t) = NULL;
84 __private_extern__ void (*_pthread_free)(void *) = NULL;
85
86 //
87 // Global variables
88 //
89
90 // This global should be used (carefully) by anyone needing to know if a
91 // pthread (other than the main thread) has been created.
92 int __is_threaded = 0;
93
94 int __unix_conforming = 0;
95
96 // _pthread_list_lock protects _pthread_count, access to the __pthread_head
97 // list, and the parentcheck, childrun and childexit flags of the pthread
98 // structure. Externally imported by pthread_cancelable.c.
99 __private_extern__ pthread_lock_t _pthread_list_lock = LOCK_INITIALIZER;
100 __private_extern__ struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head);
101 static int _pthread_count = 1;
102
103 #if PTHREAD_LAYOUT_SPI
104
105 const struct pthread_layout_offsets_s pthread_layout_offsets = {
106 .plo_version = 1,
107 .plo_pthread_tsd_base_offset = offsetof(struct _pthread, tsd),
108 .plo_pthread_tsd_base_address_offset = 0,
109 .plo_pthread_tsd_entry_size = sizeof(((struct _pthread *)NULL)->tsd[0]),
110 };
111
112 #endif // PTHREAD_LAYOUT_SPI
113
114 //
115 // Static variables
116 //
117
118 // Mach message notification that a thread needs to be recycled.
119 typedef struct _pthread_reap_msg_t {
120 mach_msg_header_t header;
121 pthread_t thread;
122 mach_msg_trailer_t trailer;
123 } pthread_reap_msg_t;
124
125 #define pthreadsize ((size_t)mach_vm_round_page(sizeof(struct _pthread)))
126 static pthread_attr_t _pthread_attr_default = {0};
127 static struct _pthread _thread = {0};
128
129 static int default_priority;
130 static int max_priority;
131 static int min_priority;
132 static int pthread_concurrency;
133
134 // work queue support data
135 static void (*__libdispatch_workerfunction)(pthread_priority_t) = NULL;
136 static int __libdispatch_offset;
137
138 // supported feature set
139 int __pthread_supported_features;
140
141 //
142 // Function prototypes
143 //
144
145 // pthread primitives
146 static int _pthread_allocate(pthread_t *thread, const pthread_attr_t *attrs, void **stack);
147 static int _pthread_deallocate(pthread_t t);
148
149 static void _pthread_terminate(pthread_t t);
150
151 static void _pthread_struct_init(pthread_t t,
152 const pthread_attr_t *attrs,
153 void *stack,
154 size_t stacksize,
155 int kernalloc);
156
157 extern void _pthread_set_self(pthread_t);
158
159 static void _pthread_dealloc_reply_port(pthread_t t);
160
161 static inline void __pthread_add_thread(pthread_t t, bool parent);
162 static inline int __pthread_remove_thread(pthread_t t, bool child, bool *should_exit);
163
164 static int _pthread_find_thread(pthread_t thread);
165
166 static void _pthread_exit(pthread_t self, void *value_ptr) __dead2;
167 static void _pthread_setcancelstate_exit(pthread_t self, void *value_ptr, int conforming);
168
169 static inline void _pthread_introspection_thread_create(pthread_t t, bool destroy);
170 static inline void _pthread_introspection_thread_start(pthread_t t);
171 static inline void _pthread_introspection_thread_terminate(pthread_t t, void *freeaddr, size_t freesize, bool destroy);
172 static inline void _pthread_introspection_thread_destroy(pthread_t t);
173
174 extern void start_wqthread(pthread_t self, mach_port_t kport, void *stackaddr, void *unused, int reuse);
175 extern void thread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags);
176
177 void pthread_workqueue_atfork_child(void);
178
179 static bool __workq_newapi;
180
181 /* Compatibility: previous pthread API used WORKQUEUE_OVERCOMMIT to request overcommit threads from
182 * the kernel. This definition is kept here, in userspace only, to perform the compatibility shimm
183 * from old API requests to the new kext conventions.
184 */
185 #define WORKQUEUE_OVERCOMMIT 0x10000
186
187 /*
188 * Flags filed passed to bsdthread_create and back in pthread_start
189 31 <---------------------------------> 0
190 _________________________________________
191 | flags(8) | policy(8) | importance(16) |
192 -----------------------------------------
193 */
194
195 #define PTHREAD_START_CUSTOM 0x01000000
196 #define PTHREAD_START_SETSCHED 0x02000000
197 #define PTHREAD_START_DETACHED 0x04000000
198 #define PTHREAD_START_QOSCLASS 0x08000000
199 #define PTHREAD_START_QOSCLASS_MASK 0xffffff
200 #define PTHREAD_START_POLICY_BITSHIFT 16
201 #define PTHREAD_START_POLICY_MASK 0xff
202 #define PTHREAD_START_IMPORTANCE_MASK 0xffff
203
204 static int pthread_setschedparam_internal(pthread_t, mach_port_t, int, const struct sched_param *);
205 extern pthread_t __bsdthread_create(void *(*func)(void *), void * func_arg, void * stack, pthread_t thread, unsigned int flags);
206 extern int __bsdthread_register(void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t, mach_port_t, void *, void *, int), int,void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), int32_t *,__uint64_t);
207 extern int __bsdthread_terminate(void * freeaddr, size_t freesize, mach_port_t kport, mach_port_t joinsem);
208 extern __uint64_t __thread_selfid( void );
209 extern int __pthread_canceled(int);
210 extern int __pthread_kill(mach_port_t, int);
211
212 extern int __workq_open(void);
213 extern int __workq_kernreturn(int, void *, int, int);
214
215 #if defined(__i386__) || defined(__x86_64__)
216 static const mach_vm_address_t PTHREAD_STACK_HINT = 0xB0000000;
217 #else
218 #error no PTHREAD_STACK_HINT for this architecture
219 #endif
220
221 #ifdef __i386__
222 // Check for regression of <rdar://problem/13249323>
223 struct rdar_13249323_regression_static_assert { unsigned a[offsetof(struct _pthread, err_no) == 68 ? 1 : -1]; };
224 #endif
225
226 // Allocate a thread structure, stack and guard page.
227 //
228 // The thread structure may optionally be placed in the same allocation as the
229 // stack, residing above the top of the stack. This cannot be done if a
230 // custom stack address is provided.
231 //
232 // Similarly the guard page cannot be allocated if a custom stack address is
233 // provided.
234 //
235 // The allocated thread structure is initialized with values that indicate how
236 // it should be freed.
237
238 static int
239 _pthread_allocate(pthread_t *thread, const pthread_attr_t *attrs, void **stack)
240 {
241 int res;
242 kern_return_t kr;
243 pthread_t t = NULL;
244 mach_vm_address_t allocaddr = PTHREAD_STACK_HINT;
245 size_t allocsize = 0;
246 size_t guardsize = 0;
247 size_t stacksize = 0;
248
249 PTHREAD_ASSERT(attrs->stacksize >= PTHREAD_STACK_MIN);
250
251 *thread = NULL;
252 *stack = NULL;
253
254 // Allocate a pthread structure if necessary
255
256 if (attrs->stackaddr != NULL) {
257 PTHREAD_ASSERT(((uintptr_t)attrs->stackaddr % vm_page_size) == 0);
258 *stack = attrs->stackaddr;
259 allocsize = pthreadsize;
260 } else {
261 guardsize = attrs->guardsize;
262 stacksize = attrs->stacksize;
263 allocsize = stacksize + guardsize + pthreadsize;
264 }
265
266 kr = mach_vm_map(mach_task_self(),
267 &allocaddr,
268 allocsize,
269 vm_page_size - 1,
270 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE,
271 MEMORY_OBJECT_NULL,
272 0,
273 FALSE,
274 VM_PROT_DEFAULT,
275 VM_PROT_ALL,
276 VM_INHERIT_DEFAULT);
277
278 if (kr != KERN_SUCCESS) {
279 kr = mach_vm_allocate(mach_task_self(),
280 &allocaddr,
281 allocsize,
282 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
283 }
284
285 if (kr == KERN_SUCCESS) {
286 // The stack grows down.
287 // Set the guard page at the lowest address of the
288 // newly allocated stack. Return the highest address
289 // of the stack.
290 if (guardsize) {
291 (void)mach_vm_protect(mach_task_self(), allocaddr, guardsize, FALSE, VM_PROT_NONE);
292 }
293
294 // Thread structure resides at the top of the stack.
295 t = (void *)(allocaddr + stacksize + guardsize);
296 if (stacksize) {
297 // Returns the top of the stack.
298 *stack = t;
299 }
300 }
301
302 if (t != NULL) {
303 _pthread_struct_init(t, attrs, *stack, 0, 0);
304 t->freeaddr = (void *)allocaddr;
305 t->freesize = allocsize;
306 *thread = t;
307 res = 0;
308 } else {
309 res = EAGAIN;
310 }
311 return res;
312 }
313
314 static int
315 _pthread_deallocate(pthread_t t)
316 {
317 // Don't free the main thread.
318 if (t != &_thread) {
319 (void)mach_vm_deallocate(mach_task_self(), t->freeaddr, t->freesize);
320 }
321 return 0;
322 }
323
324 // Terminates the thread if called from the currently running thread.
325 PTHREAD_NORETURN
326 static void
327 _pthread_terminate(pthread_t t)
328 {
329 PTHREAD_ASSERT(t == pthread_self());
330
331 uintptr_t freeaddr = (uintptr_t)t->freeaddr;
332 size_t freesize = t->freesize - pthreadsize;
333
334 mach_port_t kport = _pthread_kernel_thread(t);
335 semaphore_t joinsem = t->joiner_notify;
336
337 _pthread_dealloc_reply_port(t);
338
339 // Shrink the pthread_t so that it does not include the stack
340 // so that we're always responsible for deallocating the stack.
341 t->freeaddr += freesize;
342 t->freesize = pthreadsize;
343
344 // After the call to __pthread_remove_thread, it is only safe to
345 // dereference the pthread_t structure if EBUSY has been returned.
346
347 bool destroy, should_exit;
348 destroy = (__pthread_remove_thread(t, true, &should_exit) != EBUSY);
349
350 if (t == &_thread) {
351 // Don't free the main thread.
352 freesize = 0;
353 } else if (destroy) {
354 // We were told not to keep the pthread_t structure around, so
355 // instead of just deallocating the stack, we should deallocate
356 // the entire structure.
357 freesize += pthreadsize;
358 }
359 if (freesize == 0) {
360 freeaddr = 0;
361 }
362 _pthread_introspection_thread_terminate(t, freeaddr, freesize, destroy);
363 if (should_exit) {
364 exitf(0);
365 }
366
367 __bsdthread_terminate((void *)freeaddr, freesize, kport, joinsem);
368 PTHREAD_ABORT("thread %p didn't terminate", t);
369 }
370
371 int
372 pthread_attr_destroy(pthread_attr_t *attr)
373 {
374 int ret = EINVAL;
375 if (attr->sig == _PTHREAD_ATTR_SIG) {
376 attr->sig = 0;
377 ret = 0;
378 }
379 return ret;
380 }
381
382 int
383 pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
384 {
385 int ret = EINVAL;
386 if (attr->sig == _PTHREAD_ATTR_SIG) {
387 *detachstate = attr->detached;
388 ret = 0;
389 }
390 return ret;
391 }
392
393 int
394 pthread_attr_getinheritsched(const pthread_attr_t *attr, int *inheritsched)
395 {
396 int ret = EINVAL;
397 if (attr->sig == _PTHREAD_ATTR_SIG) {
398 *inheritsched = attr->inherit;
399 ret = 0;
400 }
401 return ret;
402 }
403
404 int
405 pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param)
406 {
407 int ret = EINVAL;
408 if (attr->sig == _PTHREAD_ATTR_SIG) {
409 *param = attr->param;
410 ret = 0;
411 }
412 return ret;
413 }
414
415 int
416 pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy)
417 {
418 int ret = EINVAL;
419 if (attr->sig == _PTHREAD_ATTR_SIG) {
420 *policy = attr->policy;
421 ret = 0;
422 }
423 return ret;
424 }
425
426 // Default stack size is 512KB; independent of the main thread's stack size.
427 static const size_t DEFAULT_STACK_SIZE = 512 * 1024;
428
429 int
430 pthread_attr_init(pthread_attr_t *attr)
431 {
432 attr->stacksize = DEFAULT_STACK_SIZE;
433 attr->stackaddr = NULL;
434 attr->sig = _PTHREAD_ATTR_SIG;
435 attr->param.sched_priority = default_priority;
436 attr->param.quantum = 10; /* quantum isn't public yet */
437 attr->detached = PTHREAD_CREATE_JOINABLE;
438 attr->inherit = _PTHREAD_DEFAULT_INHERITSCHED;
439 attr->policy = _PTHREAD_DEFAULT_POLICY;
440 attr->fastpath = 1;
441 attr->schedset = 0;
442 attr->guardsize = vm_page_size;
443 attr->qosclass = _pthread_priority_make_newest(QOS_CLASS_DEFAULT, 0, 0);
444 return 0;
445 }
446
447 int
448 pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
449 {
450 int ret = EINVAL;
451 if (attr->sig == _PTHREAD_ATTR_SIG &&
452 (detachstate == PTHREAD_CREATE_JOINABLE ||
453 detachstate == PTHREAD_CREATE_DETACHED)) {
454 attr->detached = detachstate;
455 ret = 0;
456 }
457 return ret;
458 }
459
460 int
461 pthread_attr_setinheritsched(pthread_attr_t *attr, int inheritsched)
462 {
463 int ret = EINVAL;
464 if (attr->sig == _PTHREAD_ATTR_SIG &&
465 (inheritsched == PTHREAD_INHERIT_SCHED ||
466 inheritsched == PTHREAD_EXPLICIT_SCHED)) {
467 attr->inherit = inheritsched;
468 ret = 0;
469 }
470 return ret;
471 }
472
473 int
474 pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param)
475 {
476 int ret = EINVAL;
477 if (attr->sig == _PTHREAD_ATTR_SIG) {
478 /* TODO: Validate sched_param fields */
479 attr->param = *param;
480 attr->schedset = 1;
481 ret = 0;
482 }
483 return ret;
484 }
485
486 int
487 pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
488 {
489 int ret = EINVAL;
490 if (attr->sig == _PTHREAD_ATTR_SIG &&
491 (policy == SCHED_OTHER ||
492 policy == SCHED_RR ||
493 policy == SCHED_FIFO)) {
494 attr->policy = policy;
495 attr->schedset = 1;
496 ret = 0;
497 }
498 return ret;
499 }
500
501 int
502 pthread_attr_setscope(pthread_attr_t *attr, int scope)
503 {
504 int ret = EINVAL;
505 if (attr->sig == _PTHREAD_ATTR_SIG) {
506 if (scope == PTHREAD_SCOPE_SYSTEM) {
507 // No attribute yet for the scope.
508 ret = 0;
509 } else if (scope == PTHREAD_SCOPE_PROCESS) {
510 ret = ENOTSUP;
511 }
512 }
513 return ret;
514 }
515
516 int
517 pthread_attr_getscope(const pthread_attr_t *attr, int *scope)
518 {
519 int ret = EINVAL;
520 if (attr->sig == _PTHREAD_ATTR_SIG) {
521 *scope = PTHREAD_SCOPE_SYSTEM;
522 ret = 0;
523 }
524 return ret;
525 }
526
527 int
528 pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
529 {
530 int ret = EINVAL;
531 if (attr->sig == _PTHREAD_ATTR_SIG) {
532 *stackaddr = attr->stackaddr;
533 ret = 0;
534 }
535 return ret;
536 }
537
538 int
539 pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
540 {
541 int ret = EINVAL;
542 if (attr->sig == _PTHREAD_ATTR_SIG &&
543 ((uintptr_t)stackaddr % vm_page_size) == 0) {
544 attr->stackaddr = stackaddr;
545 attr->fastpath = 0;
546 attr->guardsize = 0;
547 ret = 0;
548 }
549 return ret;
550 }
551
552 int
553 pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
554 {
555 int ret = EINVAL;
556 if (attr->sig == _PTHREAD_ATTR_SIG) {
557 *stacksize = attr->stacksize;
558 ret = 0;
559 }
560 return ret;
561 }
562
563 int
564 pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
565 {
566 int ret = EINVAL;
567 if (attr->sig == _PTHREAD_ATTR_SIG &&
568 (stacksize % vm_page_size) == 0 &&
569 stacksize >= PTHREAD_STACK_MIN) {
570 attr->stacksize = stacksize;
571 ret = 0;
572 }
573 return ret;
574 }
575
576 int
577 pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t * stacksize)
578 {
579 int ret = EINVAL;
580 if (attr->sig == _PTHREAD_ATTR_SIG) {
581 *stackaddr = (void *)((uintptr_t)attr->stackaddr - attr->stacksize);
582 *stacksize = attr->stacksize;
583 ret = 0;
584 }
585 return ret;
586 }
587
588 // Per SUSv3, the stackaddr is the base address, the lowest addressable byte
589 // address. This is not the same as in pthread_attr_setstackaddr.
590 int
591 pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize)
592 {
593 int ret = EINVAL;
594 if (attr->sig == _PTHREAD_ATTR_SIG &&
595 ((uintptr_t)stackaddr % vm_page_size) == 0 &&
596 (stacksize % vm_page_size) == 0 &&
597 stacksize >= PTHREAD_STACK_MIN) {
598 attr->stackaddr = (void *)((uintptr_t)stackaddr + stacksize);
599 attr->stacksize = stacksize;
600 attr->fastpath = 0;
601 ret = 0;
602 }
603 return ret;
604 }
605
606 int
607 pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize)
608 {
609 int ret = EINVAL;
610 if (attr->sig == _PTHREAD_ATTR_SIG) {
611 /* Guardsize of 0 is valid, ot means no guard */
612 if ((guardsize % vm_page_size) == 0) {
613 attr->guardsize = guardsize;
614 attr->fastpath = 0;
615 ret = 0;
616 }
617 }
618 return ret;
619 }
620
621 int
622 pthread_attr_getguardsize(const pthread_attr_t *attr, size_t *guardsize)
623 {
624 int ret = EINVAL;
625 if (attr->sig == _PTHREAD_ATTR_SIG) {
626 *guardsize = attr->guardsize;
627 ret = 0;
628 }
629 return ret;
630 }
631
632
633 /*
634 * Create and start execution of a new thread.
635 */
636
637 static void
638 _pthread_body(pthread_t self)
639 {
640 _pthread_set_self(self);
641 __pthread_add_thread(self, false);
642 _pthread_exit(self, (self->fun)(self->arg));
643 }
644
645 void
646 _pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void *arg, size_t stacksize, unsigned int pflags)
647 {
648 if ((pflags & PTHREAD_START_CUSTOM) == 0) {
649 void *stackaddr = self;
650 _pthread_struct_init(self, &_pthread_attr_default, stackaddr, stacksize, 1);
651
652 if (pflags & PTHREAD_START_SETSCHED) {
653 self->policy = ((pflags >> PTHREAD_START_POLICY_BITSHIFT) & PTHREAD_START_POLICY_MASK);
654 self->param.sched_priority = (pflags & PTHREAD_START_IMPORTANCE_MASK);
655 }
656
657 if ((pflags & PTHREAD_START_DETACHED) == PTHREAD_START_DETACHED) {
658 self->detached &= ~PTHREAD_CREATE_JOINABLE;
659 self->detached |= PTHREAD_CREATE_DETACHED;
660 }
661 }
662
663 if ((pflags & PTHREAD_START_QOSCLASS) != 0) {
664 /* The QoS class is cached in the TSD of the pthread, so to reflect the
665 * class that the kernel brought us up at, the TSD must be primed from the
666 * flags parameter.
667 */
668 self->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = (pflags & PTHREAD_START_QOSCLASS_MASK);
669 } else {
670 /* Give the thread a default QoS tier, of zero. */
671 self->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0);
672 }
673
674 _pthread_set_kernel_thread(self, kport);
675 self->fun = fun;
676 self->arg = arg;
677
678 _pthread_body(self);
679 }
680
681 static void
682 _pthread_struct_init(pthread_t t,
683 const pthread_attr_t *attrs,
684 void *stack,
685 size_t stacksize,
686 int kernalloc)
687 {
688 t->sig = _PTHREAD_SIG;
689 t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_SELF] = t;
690 t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0);
691 LOCK_INIT(t->lock);
692 t->kernalloc = kernalloc;
693 if (kernalloc != 0) {
694 uintptr_t stackaddr = (uintptr_t)t;
695 t->stacksize = stacksize;
696 t->stackaddr = (void *)stackaddr;
697 t->freeaddr = (void *)(uintptr_t)(stackaddr - stacksize - vm_page_size);
698 t->freesize = pthreadsize + stacksize + vm_page_size;
699 } else {
700 t->stacksize = attrs->stacksize;
701 t->stackaddr = (void *)stack;
702 }
703 t->guardsize = attrs->guardsize;
704 t->detached = attrs->detached;
705 t->inherit = attrs->inherit;
706 t->policy = attrs->policy;
707 t->schedset = attrs->schedset;
708 t->param = attrs->param;
709 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
710 }
711
712 /* Need to deprecate this in future */
713 int
714 _pthread_is_threaded(void)
715 {
716 return __is_threaded;
717 }
718
719 /* Non portable public api to know whether this process has(had) atleast one thread
720 * apart from main thread. There could be race if there is a thread in the process of
721 * creation at the time of call . It does not tell whether there are more than one thread
722 * at this point of time.
723 */
724 int
725 pthread_is_threaded_np(void)
726 {
727 return __is_threaded;
728 }
729
730 mach_port_t
731 pthread_mach_thread_np(pthread_t t)
732 {
733 mach_port_t kport = MACH_PORT_NULL;
734
735 if (t == pthread_self()) {
736 /*
737 * If the call is on self, return the kernel port. We cannot
738 * add this bypass for main thread as it might have exited,
739 * and we should not return stale port info.
740 */
741 kport = _pthread_kernel_thread(t);
742 } else {
743 (void)_pthread_lookup_thread(t, &kport, 0);
744 }
745
746 return kport;
747 }
748
749 pthread_t
750 pthread_from_mach_thread_np(mach_port_t kernel_thread)
751 {
752 struct _pthread *p = NULL;
753
754 /* No need to wait as mach port is already known */
755 LOCK(_pthread_list_lock);
756
757 TAILQ_FOREACH(p, &__pthread_head, plist) {
758 if (_pthread_kernel_thread(p) == kernel_thread) {
759 break;
760 }
761 }
762
763 UNLOCK(_pthread_list_lock);
764
765 return p;
766 }
767
768 size_t
769 pthread_get_stacksize_np(pthread_t t)
770 {
771 int ret;
772 size_t size = 0;
773
774 if (t == NULL) {
775 return ESRCH; // XXX bug?
776 }
777
778 // since the main thread will not get de-allocated from underneath us
779 if (t == pthread_self() || t == &_thread) {
780 return t->stacksize;
781 }
782
783 LOCK(_pthread_list_lock);
784
785 ret = _pthread_find_thread(t);
786 if (ret == 0) {
787 size = t->stacksize;
788 } else {
789 size = ret; // XXX bug?
790 }
791
792 UNLOCK(_pthread_list_lock);
793
794 return size;
795 }
796
797 void *
798 pthread_get_stackaddr_np(pthread_t t)
799 {
800 int ret;
801 void *addr = NULL;
802
803 if (t == NULL) {
804 return (void *)(uintptr_t)ESRCH; // XXX bug?
805 }
806
807 // since the main thread will not get de-allocated from underneath us
808 if (t == pthread_self() || t == &_thread) {
809 return t->stackaddr;
810 }
811
812 LOCK(_pthread_list_lock);
813
814 ret = _pthread_find_thread(t);
815 if (ret == 0) {
816 addr = t->stackaddr;
817 } else {
818 addr = (void *)(uintptr_t)ret; // XXX bug?
819 }
820
821 UNLOCK(_pthread_list_lock);
822
823 return addr;
824 }
825
826 static mach_port_t
827 _pthread_reply_port(pthread_t t)
828 {
829 void *p;
830 if (t == NULL) {
831 p = _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY);
832 } else {
833 p = t->tsd[_PTHREAD_TSD_SLOT_MIG_REPLY];
834 }
835 return (mach_port_t)(uintptr_t)p;
836 }
837
838 static void
839 _pthread_set_reply_port(pthread_t t, mach_port_t reply_port)
840 {
841 void *p = (void *)(uintptr_t)reply_port;
842 if (t == NULL) {
843 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY, p);
844 } else {
845 t->tsd[_PTHREAD_TSD_SLOT_MIG_REPLY] = p;
846 }
847 }
848
849 static void
850 _pthread_dealloc_reply_port(pthread_t t)
851 {
852 mach_port_t reply_port = _pthread_reply_port(t);
853 if (reply_port != MACH_PORT_NULL) {
854 mig_dealloc_reply_port(reply_port);
855 }
856 }
857
858 pthread_t
859 pthread_main_thread_np(void)
860 {
861 return &_thread;
862 }
863
864 /* returns non-zero if the current thread is the main thread */
865 int
866 pthread_main_np(void)
867 {
868 pthread_t self = pthread_self();
869
870 return ((self->detached & _PTHREAD_CREATE_PARENT) == _PTHREAD_CREATE_PARENT);
871 }
872
873
874 /* if we are passed in a pthread_t that is NULL, then we return
875 the current thread's thread_id. So folks don't have to call
876 pthread_self, in addition to us doing it, if they just want
877 their thread_id.
878 */
879 int
880 pthread_threadid_np(pthread_t thread, uint64_t *thread_id)
881 {
882 int res = 0;
883 pthread_t self = pthread_self();
884
885 if (thread_id == NULL) {
886 return EINVAL;
887 }
888
889 if (thread == NULL || thread == self) {
890 *thread_id = self->thread_id;
891 } else {
892 LOCK(_pthread_list_lock);
893 res = _pthread_find_thread(thread);
894 if (res == 0) {
895 *thread_id = thread->thread_id;
896 }
897 UNLOCK(_pthread_list_lock);
898 }
899 return res;
900 }
901
902 int
903 pthread_getname_np(pthread_t thread, char *threadname, size_t len)
904 {
905 int res;
906
907 if (thread == NULL) {
908 return ESRCH;
909 }
910
911 LOCK(_pthread_list_lock);
912 res = _pthread_find_thread(thread);
913 if (res == 0) {
914 strlcpy(threadname, thread->pthread_name, len);
915 }
916 UNLOCK(_pthread_list_lock);
917 return res;
918 }
919
920 int
921 pthread_setname_np(const char *name)
922 {
923 int res;
924 pthread_t self = pthread_self();
925
926 size_t len = 0;
927 if (name != NULL) {
928 len = strlen(name);
929 }
930
931 /* protytype is in pthread_internals.h */
932 res = __proc_info(5, getpid(), 2, (uint64_t)0, (void*)name, (int)len);
933 if (res == 0) {
934 if (len > 0) {
935 strlcpy(self->pthread_name, name, MAXTHREADNAMESIZE);
936 } else {
937 bzero(self->pthread_name, MAXTHREADNAMESIZE);
938 }
939 }
940 return res;
941
942 }
943
944 PTHREAD_ALWAYS_INLINE
945 static inline void
946 __pthread_add_thread(pthread_t t, bool parent)
947 {
948 bool should_deallocate = false;
949 bool should_add = true;
950
951 LOCK(_pthread_list_lock);
952
953 // The parent and child threads race to add the thread to the list.
954 // When called by the parent:
955 // - set parentcheck to true
956 // - back off if childrun is true
957 // When called by the child:
958 // - set childrun to true
959 // - back off if parentcheck is true
960 if (parent) {
961 t->parentcheck = 1;
962 if (t->childrun) {
963 // child got here first, don't add.
964 should_add = false;
965 }
966
967 // If the child exits before we check in then it has to keep
968 // the thread structure memory alive so our dereferences above
969 // are valid. If it's a detached thread, then no joiner will
970 // deallocate the thread structure itself. So we do it here.
971 if (t->childexit) {
972 should_add = false;
973 should_deallocate = ((t->detached & PTHREAD_CREATE_DETACHED) == PTHREAD_CREATE_DETACHED);
974 }
975 } else {
976 t->childrun = 1;
977 if (t->parentcheck) {
978 // Parent got here first, don't add.
979 should_add = false;
980 }
981 if (t->wqthread) {
982 // Work queue threads have no parent. Simulate.
983 t->parentcheck = 1;
984 }
985 }
986
987 if (should_add) {
988 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
989 _pthread_count++;
990 }
991
992 UNLOCK(_pthread_list_lock);
993
994 if (parent) {
995 _pthread_introspection_thread_create(t, should_deallocate);
996 if (should_deallocate) {
997 _pthread_deallocate(t);
998 }
999 } else {
1000 _pthread_introspection_thread_start(t);
1001 }
1002 }
1003
1004 // <rdar://problem/12544957> must always inline this function to avoid epilogues
1005 // Returns EBUSY if the thread structure should be kept alive (is joinable).
1006 // Returns ESRCH if the thread structure is no longer valid (was detached).
1007 PTHREAD_ALWAYS_INLINE
1008 static inline int
1009 __pthread_remove_thread(pthread_t t, bool child, bool *should_exit)
1010 {
1011 int ret = 0;
1012
1013 bool should_remove = true;
1014
1015 LOCK(_pthread_list_lock);
1016
1017 // When a thread removes itself:
1018 // - Set the childexit flag indicating that the thread has exited.
1019 // - Return false if parentcheck is zero (must keep structure)
1020 // - If the thread is joinable, keep it on the list so that
1021 // the join operation succeeds. Still decrement the running
1022 // thread count so that we exit if no threads are running.
1023 // - Update the running thread count.
1024 // When another thread removes a joinable thread:
1025 // - CAREFUL not to dereference the thread before verifying that the
1026 // reference is still valid using _pthread_find_thread().
1027 // - Remove the thread from the list.
1028
1029 if (child) {
1030 t->childexit = 1;
1031 if (t->parentcheck == 0) {
1032 ret = EBUSY;
1033 }
1034 if ((t->detached & PTHREAD_CREATE_JOINABLE) != 0) {
1035 ret = EBUSY;
1036 should_remove = false;
1037 }
1038 *should_exit = (--_pthread_count <= 0);
1039 } else {
1040 ret = _pthread_find_thread(t);
1041 if (ret == 0) {
1042 // If we found a thread but it's not joinable, bail.
1043 if ((t->detached & PTHREAD_CREATE_JOINABLE) == 0) {
1044 should_remove = false;
1045 ret = ESRCH;
1046 }
1047 }
1048 }
1049 if (should_remove) {
1050 TAILQ_REMOVE(&__pthread_head, t, plist);
1051 }
1052
1053 UNLOCK(_pthread_list_lock);
1054
1055 return ret;
1056 }
1057
1058 int
1059 pthread_create(pthread_t *thread,
1060 const pthread_attr_t *attr,
1061 void *(*start_routine)(void *),
1062 void *arg)
1063 {
1064 pthread_t t = NULL;
1065 unsigned int flags = 0;
1066
1067 pthread_attr_t *attrs = (pthread_attr_t *)attr;
1068 if (attrs == NULL) {
1069 attrs = &_pthread_attr_default;
1070 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
1071 return EINVAL;
1072 }
1073
1074 if (attrs->detached == PTHREAD_CREATE_DETACHED) {
1075 flags |= PTHREAD_START_DETACHED;
1076 }
1077
1078 if (attrs->schedset != 0) {
1079 flags |= PTHREAD_START_SETSCHED;
1080 flags |= ((attrs->policy & PTHREAD_START_POLICY_MASK) << PTHREAD_START_POLICY_BITSHIFT);
1081 flags |= (attrs->param.sched_priority & PTHREAD_START_IMPORTANCE_MASK);
1082 } else if (attrs->qosclass != 0) {
1083 flags |= PTHREAD_START_QOSCLASS;
1084 flags |= (attrs->qosclass & PTHREAD_START_QOSCLASS_MASK);
1085 }
1086
1087 __is_threaded = 1;
1088
1089 void *stack;
1090
1091 if (attrs->fastpath) {
1092 // kernel will allocate thread and stack, pass stacksize.
1093 stack = (void *)attrs->stacksize;
1094 } else {
1095 // allocate the thread and its stack
1096 flags |= PTHREAD_START_CUSTOM;
1097
1098 int res;
1099 res = _pthread_allocate(&t, attrs, &stack);
1100 if (res) {
1101 return res;
1102 }
1103
1104 t->arg = arg;
1105 t->fun = start_routine;
1106 }
1107
1108 pthread_t t2;
1109 t2 = __bsdthread_create(start_routine, arg, stack, t, flags);
1110 if (t2 == (pthread_t)-1) {
1111 if (flags & PTHREAD_START_CUSTOM) {
1112 // free the thread and stack if we allocated it
1113 _pthread_deallocate(t);
1114 }
1115 return EAGAIN;
1116 }
1117 if (t == NULL) {
1118 t = t2;
1119 }
1120
1121 __pthread_add_thread(t, true);
1122
1123 // XXX if a thread is created detached and exits, t will be invalid
1124 *thread = t;
1125 return 0;
1126 }
1127
1128 int
1129 pthread_create_suspended_np(pthread_t *thread,
1130 const pthread_attr_t *attr,
1131 void *(*start_routine)(void *),
1132 void *arg)
1133 {
1134 int res;
1135 void *stack;
1136 mach_port_t kernel_thread = MACH_PORT_NULL;
1137
1138 const pthread_attr_t *attrs = attr;
1139 if (attrs == NULL) {
1140 attrs = &_pthread_attr_default;
1141 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
1142 return EINVAL;
1143 }
1144
1145 pthread_t t;
1146 res = _pthread_allocate(&t, attrs, &stack);
1147 if (res) {
1148 return res;
1149 }
1150
1151 *thread = t;
1152
1153 kern_return_t kr;
1154 kr = thread_create(mach_task_self(), &kernel_thread);
1155 if (kr != KERN_SUCCESS) {
1156 //PTHREAD_ABORT("thread_create() failed: %d", kern_res);
1157 return EINVAL; /* Need better error here? */
1158 }
1159
1160 _pthread_set_kernel_thread(t, kernel_thread);
1161 (void)pthread_setschedparam_internal(t, kernel_thread, t->policy, &t->param);
1162
1163 __is_threaded = 1;
1164
1165 t->arg = arg;
1166 t->fun = start_routine;
1167
1168 __pthread_add_thread(t, true);
1169
1170 // Set up a suspended thread.
1171 _pthread_setup(t, _pthread_body, stack, 1, 0);
1172 return res;
1173 }
1174
1175 int
1176 pthread_detach(pthread_t thread)
1177 {
1178 int res;
1179 bool join = false;
1180 semaphore_t sema = SEMAPHORE_NULL;
1181
1182 res = _pthread_lookup_thread(thread, NULL, 1);
1183 if (res) {
1184 return res; // Not a valid thread to detach.
1185 }
1186
1187 LOCK(thread->lock);
1188 if (thread->detached & PTHREAD_CREATE_JOINABLE) {
1189 if (thread->detached & _PTHREAD_EXITED) {
1190 // Join the thread if it's already exited.
1191 join = true;
1192 } else {
1193 thread->detached &= ~PTHREAD_CREATE_JOINABLE;
1194 thread->detached |= PTHREAD_CREATE_DETACHED;
1195 sema = thread->joiner_notify;
1196 }
1197 } else {
1198 res = EINVAL;
1199 }
1200 UNLOCK(thread->lock);
1201
1202 if (join) {
1203 pthread_join(thread, NULL);
1204 } else if (sema) {
1205 semaphore_signal(sema);
1206 }
1207
1208 return res;
1209 }
1210
1211 int
1212 pthread_kill(pthread_t th, int sig)
1213 {
1214 if (sig < 0 || sig > NSIG) {
1215 return EINVAL;
1216 }
1217
1218 mach_port_t kport = MACH_PORT_NULL;
1219 if (_pthread_lookup_thread(th, &kport, 0) != 0) {
1220 return ESRCH; // Not a valid thread.
1221 }
1222
1223 // Don't signal workqueue threads.
1224 if (th->wqthread != 0 && th->wqkillset == 0) {
1225 return ENOTSUP;
1226 }
1227
1228 int ret = __pthread_kill(kport, sig);
1229
1230 if (ret == -1) {
1231 ret = errno;
1232 }
1233 return ret;
1234 }
1235
1236 int
1237 __pthread_workqueue_setkill(int enable)
1238 {
1239 pthread_t self = pthread_self();
1240
1241 LOCK(self->lock);
1242 self->wqkillset = enable ? 1 : 0;
1243 UNLOCK(self->lock);
1244
1245 return 0;
1246 }
1247
1248 static void *
1249 __pthread_get_exit_value(pthread_t t, int conforming)
1250 {
1251 const int flags = (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING);
1252 void *value = t->exit_value;
1253 if (conforming) {
1254 if ((t->cancel_state & flags) == flags) {
1255 value = PTHREAD_CANCELED;
1256 }
1257 }
1258 return value;
1259 }
1260
1261 /* For compatibility... */
1262
1263 pthread_t
1264 _pthread_self(void) {
1265 return pthread_self();
1266 }
1267
1268 /*
1269 * Terminate a thread.
1270 */
1271 int __disable_threadsignal(int);
1272
1273 PTHREAD_NORETURN
1274 static void
1275 _pthread_exit(pthread_t self, void *value_ptr)
1276 {
1277 struct __darwin_pthread_handler_rec *handler;
1278
1279 // Disable signal delivery while we clean up
1280 __disable_threadsignal(1);
1281
1282 // Set cancel state to disable and type to deferred
1283 _pthread_setcancelstate_exit(self, value_ptr, __unix_conforming);
1284
1285 while ((handler = self->__cleanup_stack) != 0) {
1286 (handler->__routine)(handler->__arg);
1287 self->__cleanup_stack = handler->__next;
1288 }
1289 _pthread_tsd_cleanup(self);
1290
1291 LOCK(self->lock);
1292 self->detached |= _PTHREAD_EXITED;
1293 self->exit_value = value_ptr;
1294
1295 if ((self->detached & PTHREAD_CREATE_JOINABLE) &&
1296 self->joiner_notify == SEMAPHORE_NULL) {
1297 self->joiner_notify = (semaphore_t)os_get_cached_semaphore();
1298 }
1299 UNLOCK(self->lock);
1300
1301 // Clear per-thread semaphore cache
1302 os_put_cached_semaphore(SEMAPHORE_NULL);
1303
1304 _pthread_terminate(self);
1305 }
1306
1307 void
1308 pthread_exit(void *value_ptr)
1309 {
1310 pthread_t self = pthread_self();
1311 if (self->wqthread == 0) {
1312 _pthread_exit(self, value_ptr);
1313 } else {
1314 PTHREAD_ABORT("pthread_exit() may only be called against threads created via pthread_create()");
1315 }
1316 }
1317
1318 int
1319 pthread_getschedparam(pthread_t thread,
1320 int *policy,
1321 struct sched_param *param)
1322 {
1323 int ret;
1324
1325 if (thread == NULL) {
1326 return ESRCH;
1327 }
1328
1329 LOCK(_pthread_list_lock);
1330
1331 ret = _pthread_find_thread(thread);
1332 if (ret == 0) {
1333 if (policy) {
1334 *policy = thread->policy;
1335 }
1336 if (param) {
1337 *param = thread->param;
1338 }
1339 }
1340
1341 UNLOCK(_pthread_list_lock);
1342
1343 return ret;
1344 }
1345
1346 static int
1347 pthread_setschedparam_internal(pthread_t thread,
1348 mach_port_t kport,
1349 int policy,
1350 const struct sched_param *param)
1351 {
1352 policy_base_data_t bases;
1353 policy_base_t base;
1354 mach_msg_type_number_t count;
1355 kern_return_t ret;
1356
1357 switch (policy) {
1358 case SCHED_OTHER:
1359 bases.ts.base_priority = param->sched_priority;
1360 base = (policy_base_t)&bases.ts;
1361 count = POLICY_TIMESHARE_BASE_COUNT;
1362 break;
1363 case SCHED_FIFO:
1364 bases.fifo.base_priority = param->sched_priority;
1365 base = (policy_base_t)&bases.fifo;
1366 count = POLICY_FIFO_BASE_COUNT;
1367 break;
1368 case SCHED_RR:
1369 bases.rr.base_priority = param->sched_priority;
1370 /* quantum isn't public yet */
1371 bases.rr.quantum = param->quantum;
1372 base = (policy_base_t)&bases.rr;
1373 count = POLICY_RR_BASE_COUNT;
1374 break;
1375 default:
1376 return EINVAL;
1377 }
1378 ret = thread_policy(kport, policy, base, count, TRUE);
1379 return (ret != KERN_SUCCESS) ? EINVAL : 0;
1380 }
1381
1382 int
1383 pthread_setschedparam(pthread_t t, int policy, const struct sched_param *param)
1384 {
1385 mach_port_t kport = MACH_PORT_NULL;
1386 int res;
1387 int bypass = 1;
1388
1389 // since the main thread will not get de-allocated from underneath us
1390 if (t == pthread_self() || t == &_thread ) {
1391 kport = _pthread_kernel_thread(t);
1392 } else {
1393 bypass = 0;
1394 (void)_pthread_lookup_thread(t, &kport, 0);
1395 }
1396
1397 res = pthread_setschedparam_internal(t, kport, policy, param);
1398 if (res == 0) {
1399 if (bypass == 0) {
1400 // Ensure the thread is still valid.
1401 LOCK(_pthread_list_lock);
1402 res = _pthread_find_thread(t);
1403 if (res == 0) {
1404 t->policy = policy;
1405 t->param = *param;
1406 }
1407 UNLOCK(_pthread_list_lock);
1408 } else {
1409 t->policy = policy;
1410 t->param = *param;
1411 }
1412 }
1413 return res;
1414 }
1415
1416 int
1417 sched_get_priority_min(int policy)
1418 {
1419 return default_priority - 16;
1420 }
1421
1422 int
1423 sched_get_priority_max(int policy)
1424 {
1425 return default_priority + 16;
1426 }
1427
1428 int
1429 pthread_equal(pthread_t t1, pthread_t t2)
1430 {
1431 return (t1 == t2);
1432 }
1433
1434 // Force LLVM not to optimise this to a call to __pthread_set_self, if it does
1435 // then _pthread_set_self won't be bound when secondary threads try and start up.
1436 PTHREAD_NOINLINE
1437 void
1438 _pthread_set_self(pthread_t p)
1439 {
1440 extern void __pthread_set_self(void *);
1441
1442 if (p == NULL) {
1443 p = &_thread;
1444 }
1445
1446 uint64_t tid = __thread_selfid();
1447 if (tid == -1ull) {
1448 PTHREAD_ABORT("failed to set thread_id");
1449 }
1450
1451 p->tsd[_PTHREAD_TSD_SLOT_PTHREAD_SELF] = p;
1452 p->tsd[_PTHREAD_TSD_SLOT_ERRNO] = &p->err_no;
1453 p->thread_id = tid;
1454 __pthread_set_self(&p->tsd[0]);
1455 }
1456
1457 struct _pthread_once_context {
1458 pthread_once_t *pthread_once;
1459 void (*routine)(void);
1460 };
1461
1462 static void
1463 __pthread_once_handler(void *context)
1464 {
1465 struct _pthread_once_context *ctx = context;
1466 pthread_cleanup_push((void*)__os_once_reset, &ctx->pthread_once->once);
1467 ctx->routine();
1468 pthread_cleanup_pop(0);
1469 ctx->pthread_once->sig = _PTHREAD_ONCE_SIG;
1470 }
1471
1472 int
1473 pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
1474 {
1475 struct _pthread_once_context ctx = { once_control, init_routine };
1476 do {
1477 os_once(&once_control->once, &ctx, __pthread_once_handler);
1478 } while (once_control->sig == _PTHREAD_ONCE_SIG_init);
1479 return 0;
1480 }
1481
1482 void
1483 _pthread_testcancel(pthread_t thread, int isconforming)
1484 {
1485 const int flags = (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING);
1486
1487 LOCK(thread->lock);
1488 bool canceled = ((thread->cancel_state & flags) == flags);
1489 UNLOCK(thread->lock);
1490
1491 if (canceled) {
1492 pthread_exit(isconforming ? PTHREAD_CANCELED : 0);
1493 }
1494 }
1495
1496 void
1497 _pthread_exit_if_canceled(int error)
1498 {
1499 if (__unix_conforming && ((error & 0xff) == EINTR) && (__pthread_canceled(0) == 0)) {
1500 pthread_t self = pthread_self();
1501 if (self != NULL) {
1502 self->cancel_error = error;
1503 }
1504 pthread_exit(PTHREAD_CANCELED);
1505 }
1506 }
1507
1508 int
1509 pthread_getconcurrency(void)
1510 {
1511 return pthread_concurrency;
1512 }
1513
1514 int
1515 pthread_setconcurrency(int new_level)
1516 {
1517 if (new_level < 0) {
1518 return EINVAL;
1519 }
1520 pthread_concurrency = new_level;
1521 return 0;
1522 }
1523
1524 void
1525 _pthread_set_pfz(uintptr_t address)
1526 {
1527 }
1528
1529 #if !defined(PTHREAD_TARGET_EOS) && !defined(VARIANT_DYLD)
1530 void *
1531 malloc(size_t sz)
1532 {
1533 if (_pthread_malloc) {
1534 return _pthread_malloc(sz);
1535 } else {
1536 return NULL;
1537 }
1538 }
1539
1540 void
1541 free(void *p)
1542 {
1543 if (_pthread_free) {
1544 _pthread_free(p);
1545 }
1546 }
1547 #endif
1548
1549 /*
1550 * Perform package initialization - called automatically when application starts
1551 */
1552 struct ProgramVars; /* forward reference */
1553
1554 int
1555 __pthread_init(const struct _libpthread_functions *pthread_funcs, const char *envp[] __unused,
1556 const char *apple[] __unused, const struct ProgramVars *vars __unused)
1557 {
1558 // Save our provided pushed-down functions
1559 if (pthread_funcs) {
1560 exitf = pthread_funcs->exit;
1561
1562 if (pthread_funcs->version >= 2) {
1563 _pthread_malloc = pthread_funcs->malloc;
1564 _pthread_free = pthread_funcs->free;
1565 }
1566 }
1567
1568 //
1569 // Get host information
1570 //
1571
1572 kern_return_t kr;
1573 host_flavor_t flavor = HOST_PRIORITY_INFO;
1574 mach_msg_type_number_t count = HOST_PRIORITY_INFO_COUNT;
1575 host_priority_info_data_t priority_info;
1576 host_t host = mach_host_self();
1577 kr = host_info(host, flavor, (host_info_t)&priority_info, &count);
1578 if (kr != KERN_SUCCESS) {
1579 PTHREAD_ABORT("host_info(mach_host_self(), ...) failed: %s", mach_error_string(kr));
1580 } else {
1581 default_priority = priority_info.user_priority;
1582 min_priority = priority_info.minimum_priority;
1583 max_priority = priority_info.maximum_priority;
1584 }
1585 mach_port_deallocate(mach_task_self(), host);
1586
1587 //
1588 // Set up the main thread structure
1589 //
1590
1591 void *stackaddr;
1592 size_t stacksize = DFLSSIZ;
1593 size_t len = sizeof(stackaddr);
1594 int mib[] = { CTL_KERN, KERN_USRSTACK };
1595 if (__sysctl(mib, 2, &stackaddr, &len, NULL, 0) != 0) {
1596 stackaddr = (void *)USRSTACK;
1597 }
1598
1599 pthread_t thread = &_thread;
1600 pthread_attr_init(&_pthread_attr_default);
1601 _pthread_struct_init(thread, &_pthread_attr_default, stackaddr, stacksize, 0);
1602 thread->detached = PTHREAD_CREATE_JOINABLE;
1603
1604 // Finish initialization with common code that is reinvoked on the
1605 // child side of a fork.
1606
1607 // Finishes initialization of main thread attributes.
1608 // Initializes the thread list and add the main thread.
1609 // Calls _pthread_set_self() to prepare the main thread for execution.
1610 __pthread_fork_child_internal(thread);
1611
1612 // Set up kernel entry points with __bsdthread_register.
1613 pthread_workqueue_atfork_child();
1614
1615 return 0;
1616 }
1617
1618 int
1619 sched_yield(void)
1620 {
1621 swtch_pri(0);
1622 return 0;
1623 }
1624
1625 PTHREAD_NOEXPORT void
1626 __pthread_fork_child_internal(pthread_t p)
1627 {
1628 TAILQ_INIT(&__pthread_head);
1629 LOCK_INIT(_pthread_list_lock);
1630
1631 // Re-use the main thread's static storage if no thread was provided.
1632 if (p == NULL) {
1633 if (_thread.tsd[0] != 0) {
1634 bzero(&_thread, sizeof(struct _pthread));
1635 }
1636 p = &_thread;
1637 }
1638
1639 LOCK_INIT(p->lock);
1640 _pthread_set_kernel_thread(p, mach_thread_self());
1641 _pthread_set_reply_port(p, mach_reply_port());
1642 p->__cleanup_stack = NULL;
1643 p->joiner_notify = SEMAPHORE_NULL;
1644 p->joiner = MACH_PORT_NULL;
1645 p->detached |= _PTHREAD_CREATE_PARENT;
1646 p->tsd[__TSD_SEMAPHORE_CACHE] = SEMAPHORE_NULL;
1647
1648 // Initialize the list of threads with the new main thread.
1649 TAILQ_INSERT_HEAD(&__pthread_head, p, plist);
1650 _pthread_count = 1;
1651
1652 _pthread_set_self(p);
1653 _pthread_introspection_thread_start(p);
1654 }
1655
1656 /*
1657 * Query/update the cancelability 'state' of a thread
1658 */
1659 PTHREAD_NOEXPORT int
1660 _pthread_setcancelstate_internal(int state, int *oldstate, int conforming)
1661 {
1662 pthread_t self;
1663
1664 switch (state) {
1665 case PTHREAD_CANCEL_ENABLE:
1666 if (conforming) {
1667 __pthread_canceled(1);
1668 }
1669 break;
1670 case PTHREAD_CANCEL_DISABLE:
1671 if (conforming) {
1672 __pthread_canceled(2);
1673 }
1674 break;
1675 default:
1676 return EINVAL;
1677 }
1678
1679 self = pthread_self();
1680 LOCK(self->lock);
1681 if (oldstate) {
1682 *oldstate = self->cancel_state & _PTHREAD_CANCEL_STATE_MASK;
1683 }
1684 self->cancel_state &= ~_PTHREAD_CANCEL_STATE_MASK;
1685 self->cancel_state |= state;
1686 UNLOCK(self->lock);
1687 if (!conforming) {
1688 _pthread_testcancel(self, 0); /* See if we need to 'die' now... */
1689 }
1690 return 0;
1691 }
1692
1693 /* When a thread exits set the cancellation state to DISABLE and DEFERRED */
1694 static void
1695 _pthread_setcancelstate_exit(pthread_t self, void * value_ptr, int conforming)
1696 {
1697 LOCK(self->lock);
1698 self->cancel_state &= ~(_PTHREAD_CANCEL_STATE_MASK | _PTHREAD_CANCEL_TYPE_MASK);
1699 self->cancel_state |= (PTHREAD_CANCEL_DISABLE | PTHREAD_CANCEL_DEFERRED);
1700 if (value_ptr == PTHREAD_CANCELED) {
1701 // 4597450: begin
1702 self->detached |= _PTHREAD_WASCANCEL;
1703 // 4597450: end
1704 }
1705 UNLOCK(self->lock);
1706 }
1707
1708 int
1709 _pthread_join_cleanup(pthread_t thread, void ** value_ptr, int conforming)
1710 {
1711 // Returns ESRCH if the thread was not created joinable.
1712 int ret = __pthread_remove_thread(thread, false, NULL);
1713 if (ret != 0) {
1714 return ret;
1715 }
1716
1717 if (value_ptr) {
1718 *value_ptr = __pthread_get_exit_value(thread, conforming);
1719 }
1720 _pthread_introspection_thread_destroy(thread);
1721 _pthread_deallocate(thread);
1722 return 0;
1723 }
1724
1725 /* ALWAYS called with list lock and return with list lock */
1726 int
1727 _pthread_find_thread(pthread_t thread)
1728 {
1729 if (thread != NULL) {
1730 pthread_t p;
1731 loop:
1732 TAILQ_FOREACH(p, &__pthread_head, plist) {
1733 if (p == thread) {
1734 if (_pthread_kernel_thread(thread) == MACH_PORT_NULL) {
1735 UNLOCK(_pthread_list_lock);
1736 sched_yield();
1737 LOCK(_pthread_list_lock);
1738 goto loop;
1739 }
1740 return 0;
1741 }
1742 }
1743 }
1744 return ESRCH;
1745 }
1746
1747 int
1748 _pthread_lookup_thread(pthread_t thread, mach_port_t *portp, int only_joinable)
1749 {
1750 mach_port_t kport = MACH_PORT_NULL;
1751 int ret;
1752
1753 if (thread == NULL) {
1754 return ESRCH;
1755 }
1756
1757 LOCK(_pthread_list_lock);
1758
1759 ret = _pthread_find_thread(thread);
1760 if (ret == 0) {
1761 // Fail if we only want joinable threads and the thread found is
1762 // not in the detached state.
1763 if (only_joinable != 0 && (thread->detached & PTHREAD_CREATE_DETACHED) != 0) {
1764 ret = EINVAL;
1765 } else {
1766 kport = _pthread_kernel_thread(thread);
1767 }
1768 }
1769
1770 UNLOCK(_pthread_list_lock);
1771
1772 if (portp != NULL) {
1773 *portp = kport;
1774 }
1775
1776 return ret;
1777 }
1778
1779 void
1780 _pthread_clear_qos_tsd(mach_port_t thread_port)
1781 {
1782 if (thread_port == MACH_PORT_NULL || (uintptr_t)_pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF) == thread_port) {
1783 /* Clear the current thread's TSD, that can be done inline. */
1784 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0));
1785 } else {
1786 pthread_t p;
1787
1788 LOCK(_pthread_list_lock);
1789
1790 TAILQ_FOREACH(p, &__pthread_head, plist) {
1791 mach_port_t kp;
1792 while ((kp = _pthread_kernel_thread(p)) == MACH_PORT_NULL) {
1793 UNLOCK(_pthread_list_lock);
1794 sched_yield();
1795 LOCK(_pthread_list_lock);
1796 }
1797 if (thread_port == kp) {
1798 p->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0);
1799 break;
1800 }
1801 }
1802
1803 UNLOCK(_pthread_list_lock);
1804 }
1805 }
1806
1807 /***** pthread workqueue support routines *****/
1808
1809 PTHREAD_NOEXPORT void
1810 pthread_workqueue_atfork_child(void)
1811 {
1812 struct _pthread_registration_data data = {
1813 .dispatch_queue_offset = __PTK_LIBDISPATCH_KEY0 * sizeof(void *),
1814 };
1815
1816 int rv = __bsdthread_register(thread_start,
1817 start_wqthread,
1818 (int)pthreadsize,
1819 (void*)&data,
1820 (uintptr_t)sizeof(data),
1821 data.dispatch_queue_offset);
1822
1823 if (rv > 0) {
1824 __pthread_supported_features = rv;
1825 }
1826
1827 if (_pthread_priority_get_qos_newest(data.main_qos) != QOS_CLASS_UNSPECIFIED) {
1828 _pthread_set_main_qos(data.main_qos);
1829 _thread.tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = data.main_qos;
1830 }
1831
1832 if (__libdispatch_workerfunction != NULL) {
1833 // prepare the kernel for workq action
1834 (void)__workq_open();
1835 }
1836 }
1837
1838 void
1839 _pthread_wqthread(pthread_t self, mach_port_t kport, void *stackaddr, void *unused, int flags)
1840 {
1841 PTHREAD_ASSERT(flags & WQ_FLAG_THREAD_NEWSPI);
1842
1843 int thread_reuse = flags & WQ_FLAG_THREAD_REUSE;
1844 int thread_class = flags & WQ_FLAG_THREAD_PRIOMASK;
1845 int overcommit = (flags & WQ_FLAG_THREAD_OVERCOMMIT) != 0;
1846
1847 pthread_priority_t priority;
1848
1849 if ((__pthread_supported_features & PTHREAD_FEATURE_QOS_MAINTENANCE) == 0) {
1850 priority = _pthread_priority_make_version2(thread_class, 0, (overcommit ? _PTHREAD_PRIORITY_OVERCOMMIT_FLAG : 0));
1851 } else {
1852 priority = _pthread_priority_make_newest(thread_class, 0, (overcommit ? _PTHREAD_PRIORITY_OVERCOMMIT_FLAG : 0));
1853 }
1854
1855 if (thread_reuse == 0) {
1856 // New thread created by kernel, needs initialization.
1857 _pthread_struct_init(self, &_pthread_attr_default, stackaddr, DEFAULT_STACK_SIZE, 1);
1858 _pthread_set_kernel_thread(self, kport);
1859 self->wqthread = 1;
1860 self->wqkillset = 0;
1861
1862 // Not a joinable thread.
1863 self->detached &= ~PTHREAD_CREATE_JOINABLE;
1864 self->detached |= PTHREAD_CREATE_DETACHED;
1865
1866 // Update the running thread count and set childrun bit.
1867 // XXX this should be consolidated with pthread_body().
1868 _pthread_set_self(self);
1869 _pthread_introspection_thread_create(self, false);
1870 __pthread_add_thread(self, false);
1871
1872 // If we're running with fine-grained priority, we also need to
1873 // set this thread to have the QoS class provided to use by the kernel
1874 if (__pthread_supported_features & PTHREAD_FEATURE_FINEPRIO) {
1875 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, _pthread_priority_make_newest(thread_class, 0, 0));
1876 }
1877 }
1878
1879 #if WQ_DEBUG
1880 PTHREAD_ASSERT(self);
1881 PTHREAD_ASSERT(self == pthread_self());
1882 #endif // WQ_DEBUG
1883
1884 self->fun = (void *(*)(void *))__libdispatch_workerfunction;
1885 self->arg = (void *)(uintptr_t)thread_class;
1886
1887 if (__pthread_supported_features & PTHREAD_FEATURE_FINEPRIO) {
1888 if (!__workq_newapi) {
1889 /* Old thread priorities are inverted from where we have them in
1890 * the new flexible priority scheme. The highest priority is zero,
1891 * up to 2, with background at 3.
1892 */
1893 pthread_workqueue_function_t func = (pthread_workqueue_function_t)__libdispatch_workerfunction;
1894
1895 int opts = overcommit ? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT : 0;
1896
1897 if ((__pthread_supported_features & PTHREAD_FEATURE_QOS_DEFAULT) == 0) {
1898 /* Dirty hack to support kernels that don't have QOS_CLASS_DEFAULT. */
1899 switch (thread_class) {
1900 case QOS_CLASS_USER_INTERACTIVE:
1901 thread_class = QOS_CLASS_USER_INITIATED;
1902 break;
1903 case QOS_CLASS_USER_INITIATED:
1904 thread_class = QOS_CLASS_DEFAULT;
1905 break;
1906 default:
1907 break;
1908 }
1909 }
1910
1911 switch (thread_class) {
1912 /* QOS_CLASS_USER_INTERACTIVE is not currently requested by for old dispatch priority compatibility */
1913 case QOS_CLASS_USER_INITIATED:
1914 (*func)(WORKQ_HIGH_PRIOQUEUE, opts, NULL);
1915 break;
1916
1917 case QOS_CLASS_DEFAULT:
1918 /* B&I builders can't pass a QOS_CLASS_DEFAULT thread to dispatch, for fear of the QoS being
1919 * picked up by NSThread (et al) and transported around the system. So change the TSD to
1920 * make this thread look like QOS_CLASS_USER_INITIATED even though it will still run as legacy.
1921 */
1922 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, _pthread_priority_make_newest(QOS_CLASS_USER_INITIATED, 0, 0));
1923 (*func)(WORKQ_DEFAULT_PRIOQUEUE, opts, NULL);
1924 break;
1925
1926 case QOS_CLASS_UTILITY:
1927 (*func)(WORKQ_LOW_PRIOQUEUE, opts, NULL);
1928 break;
1929
1930 case QOS_CLASS_BACKGROUND:
1931 (*func)(WORKQ_BG_PRIOQUEUE, opts, NULL);
1932 break;
1933
1934 /* Legacy dispatch does not use QOS_CLASS_MAINTENANCE, so no need to handle it here */
1935 }
1936
1937 } else {
1938 /* "New" API, where dispatch is expecting to be given the thread priority */
1939 (*__libdispatch_workerfunction)(priority);
1940 }
1941 } else {
1942 /* We're the new library running on an old kext, so thread_class is really the workq priority. */
1943 pthread_workqueue_function_t func = (pthread_workqueue_function_t)__libdispatch_workerfunction;
1944 int options = overcommit ? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT : 0;
1945 (*func)(thread_class, options, NULL);
1946 }
1947
1948 __workq_kernreturn(WQOPS_THREAD_RETURN, NULL, 0, 0);
1949 _pthread_exit(self, NULL);
1950 }
1951
1952 /***** pthread workqueue API for libdispatch *****/
1953
1954 int
1955 _pthread_workqueue_init(pthread_workqueue_function2_t func, int offset, int flags)
1956 {
1957 if (flags != 0) {
1958 return ENOTSUP;
1959 }
1960
1961 __workq_newapi = true;
1962 __libdispatch_offset = offset;
1963
1964 int rv = pthread_workqueue_setdispatch_np((pthread_workqueue_function_t)func);
1965 return rv;
1966 }
1967
1968 void
1969 pthread_workqueue_setdispatchoffset_np(int offset)
1970 {
1971 __libdispatch_offset = offset;
1972 }
1973
1974 int
1975 pthread_workqueue_setdispatch_np(pthread_workqueue_function_t worker_func)
1976 {
1977 int res = EBUSY;
1978 if (__libdispatch_workerfunction == NULL) {
1979 // Check whether the kernel supports new SPIs
1980 res = __workq_kernreturn(WQOPS_QUEUE_NEWSPISUPP, NULL, __libdispatch_offset, 0);
1981 if (res == -1){
1982 res = ENOTSUP;
1983 } else {
1984 __libdispatch_workerfunction = (pthread_workqueue_function2_t)worker_func;
1985
1986 // Prepare the kernel for workq action
1987 (void)__workq_open();
1988 if (__is_threaded == 0) {
1989 __is_threaded = 1;
1990 }
1991 }
1992 }
1993 return res;
1994 }
1995
1996 int
1997 _pthread_workqueue_supported(void)
1998 {
1999 return __pthread_supported_features;
2000 }
2001
2002 int
2003 pthread_workqueue_addthreads_np(int queue_priority, int options, int numthreads)
2004 {
2005 int res = 0;
2006
2007 // Cannot add threads without a worker function registered.
2008 if (__libdispatch_workerfunction == NULL) {
2009 return EPERM;
2010 }
2011
2012 pthread_priority_t kp = 0;
2013
2014 if (__pthread_supported_features & PTHREAD_FEATURE_FINEPRIO) {
2015 /* The new kernel API takes the new QoS class + relative priority style of
2016 * priority. This entry point is here for compatibility with old libdispatch
2017 * versions (ie. the simulator). We request the corresponding new bracket
2018 * from the kernel, then on the way out run all dispatch queues that were
2019 * requested.
2020 */
2021
2022 int compat_priority = queue_priority & WQ_FLAG_THREAD_PRIOMASK;
2023 int flags = 0;
2024
2025 /* To make sure the library does not issue more threads to dispatch than
2026 * were requested, the total number of active requests is recorded in
2027 * __workq_requests.
2028 */
2029 if (options & WORKQ_ADDTHREADS_OPTION_OVERCOMMIT) {
2030 flags = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
2031 }
2032
2033 kp = _pthread_qos_class_encode_workqueue(compat_priority, flags);
2034
2035 } else {
2036 /* Running on the old kernel, queue_priority is what we pass directly to
2037 * the syscall.
2038 */
2039 kp = queue_priority & WQ_FLAG_THREAD_PRIOMASK;
2040
2041 if (options & WORKQ_ADDTHREADS_OPTION_OVERCOMMIT) {
2042 kp |= WORKQUEUE_OVERCOMMIT;
2043 }
2044 }
2045
2046 res = __workq_kernreturn(WQOPS_QUEUE_REQTHREADS, NULL, numthreads, (int)kp);
2047 if (res == -1) {
2048 res = errno;
2049 }
2050 return res;
2051 }
2052
2053 int
2054 _pthread_workqueue_addthreads(int numthreads, pthread_priority_t priority)
2055 {
2056 int res = 0;
2057
2058 if (__libdispatch_workerfunction == NULL) {
2059 return EPERM;
2060 }
2061
2062 if ((__pthread_supported_features & PTHREAD_FEATURE_FINEPRIO) == 0) {
2063 return ENOTSUP;
2064 }
2065
2066 res = __workq_kernreturn(WQOPS_QUEUE_REQTHREADS, NULL, numthreads, (int)priority);
2067 if (res == -1) {
2068 res = errno;
2069 }
2070 return res;
2071 }
2072
2073 /*
2074 * Introspection SPI for libpthread.
2075 */
2076
2077 static pthread_introspection_hook_t _pthread_introspection_hook;
2078
2079 pthread_introspection_hook_t
2080 pthread_introspection_hook_install(pthread_introspection_hook_t hook)
2081 {
2082 if (os_slowpath(!hook)) {
2083 PTHREAD_ABORT("pthread_introspection_hook_install was passed NULL");
2084 }
2085 pthread_introspection_hook_t prev;
2086 prev = __sync_swap(&_pthread_introspection_hook, hook);
2087 return prev;
2088 }
2089
2090 PTHREAD_NOINLINE
2091 static void
2092 _pthread_introspection_hook_callout_thread_create(pthread_t t, bool destroy)
2093 {
2094 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_CREATE, t, t,
2095 pthreadsize);
2096 if (!destroy) return;
2097 _pthread_introspection_thread_destroy(t);
2098 }
2099
2100 static inline void
2101 _pthread_introspection_thread_create(pthread_t t, bool destroy)
2102 {
2103 if (os_fastpath(!_pthread_introspection_hook)) return;
2104 _pthread_introspection_hook_callout_thread_create(t, destroy);
2105 }
2106
2107 PTHREAD_NOINLINE
2108 static void
2109 _pthread_introspection_hook_callout_thread_start(pthread_t t)
2110 {
2111 size_t freesize;
2112 void *freeaddr;
2113 if (t == &_thread) {
2114 freesize = t->stacksize + t->guardsize;
2115 freeaddr = t->stackaddr - freesize;
2116 } else {
2117 freesize = t->freesize - pthreadsize;
2118 freeaddr = t->freeaddr;
2119 }
2120 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_START, t,
2121 freeaddr, freesize);
2122 }
2123
2124 static inline void
2125 _pthread_introspection_thread_start(pthread_t t)
2126 {
2127 if (os_fastpath(!_pthread_introspection_hook)) return;
2128 _pthread_introspection_hook_callout_thread_start(t);
2129 }
2130
2131 PTHREAD_NOINLINE
2132 static void
2133 _pthread_introspection_hook_callout_thread_terminate(pthread_t t,
2134 void *freeaddr, size_t freesize, bool destroy)
2135 {
2136 if (destroy && freesize) {
2137 freesize -= pthreadsize;
2138 }
2139 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_TERMINATE, t,
2140 freeaddr, freesize);
2141 if (!destroy) return;
2142 _pthread_introspection_thread_destroy(t);
2143 }
2144
2145 static inline void
2146 _pthread_introspection_thread_terminate(pthread_t t, void *freeaddr,
2147 size_t freesize, bool destroy)
2148 {
2149 if (os_fastpath(!_pthread_introspection_hook)) return;
2150 _pthread_introspection_hook_callout_thread_terminate(t, freeaddr, freesize,
2151 destroy);
2152 }
2153
2154 PTHREAD_NOINLINE
2155 static void
2156 _pthread_introspection_hook_callout_thread_destroy(pthread_t t)
2157 {
2158 if (t == &_thread) return;
2159 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_DESTROY, t, t,
2160 pthreadsize);
2161 }
2162
2163 static inline void
2164 _pthread_introspection_thread_destroy(pthread_t t)
2165 {
2166 if (os_fastpath(!_pthread_introspection_hook)) return;
2167 _pthread_introspection_hook_callout_thread_destroy(t);
2168 }
2169