]> git.saurik.com Git - apple/libpthread.git/blame - src/pthread.c
libpthread-330.220.2.tar.gz
[apple/libpthread.git] / src / pthread.c
CommitLineData
f1a1da6c
A
1/*
2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
a0619f9c 5 *
f1a1da6c
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
a0619f9c 12 *
f1a1da6c
A
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
a0619f9c 20 *
f1a1da6c
A
21 * @APPLE_LICENSE_HEADER_END@
22 */
23/*
a0619f9c
A
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
f1a1da6c 37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
a0619f9c
A
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
f1a1da6c
A
43 */
44/*
45 * MkLinux
46 */
47
48/*
49 * POSIX Pthread Library
50 */
51
a0619f9c 52#include "resolver.h"
f1a1da6c
A
53#include "internal.h"
54#include "private.h"
55#include "workqueue_private.h"
56#include "introspection_private.h"
57#include "qos_private.h"
2546420a 58#include "tsd_private.h"
214d78a2
A
59#include "pthread/stack_np.h"
60#include "offsets.h" // included to validate the offsets at build time
f1a1da6c
A
61
62#include <stdlib.h>
63#include <errno.h>
64#include <signal.h>
65#include <unistd.h>
66#include <mach/mach_init.h>
67#include <mach/mach_vm.h>
214d78a2 68#include <mach/mach_sync_ipc.h>
f1a1da6c
A
69#include <sys/time.h>
70#include <sys/resource.h>
71#include <sys/sysctl.h>
72#include <sys/queue.h>
214d78a2 73#include <sys/ulock.h>
f1a1da6c
A
74#include <sys/mman.h>
75#include <machine/vmparam.h>
76#define __APPLE_API_PRIVATE
77#include <machine/cpu_capabilities.h>
f1a1da6c
A
78
79#include <_simple.h>
80#include <platform/string.h>
81#include <platform/compat.h>
82
83extern int __sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
214d78a2 84 void *newp, size_t newlen);
f1a1da6c 85extern void __exit(int) __attribute__((noreturn));
a0619f9c
A
86extern int __pthread_kill(mach_port_t, int);
87
214d78a2 88extern void _pthread_joiner_wake(pthread_t thread);
f1a1da6c 89
214d78a2
A
90#if !VARIANT_DYLD
91PTHREAD_NOEXPORT extern struct _pthread *_main_thread_ptr;
92#define main_thread() (_main_thread_ptr)
93#endif // VARIANT_DYLD
f1a1da6c 94
214d78a2
A
95// Default stack size is 512KB; independent of the main thread's stack size.
96#define DEFAULT_STACK_SIZE (size_t)(512 * 1024)
f1a1da6c 97
a0619f9c 98
214d78a2
A
99//
100// Global constants
101//
f1a1da6c 102
214d78a2
A
103/*
104 * The pthread may be offset into a page. In that event, by contract
105 * with the kernel, the allocation will extend PTHREAD_SIZE from the
106 * start of the next page. There's also one page worth of allocation
107 * below stacksize for the guard page. <rdar://problem/19941744>
108 */
109#define PTHREAD_SIZE ((size_t)mach_vm_round_page(sizeof(struct _pthread)))
110#define PTHREAD_ALLOCADDR(stackaddr, stacksize) ((stackaddr - stacksize) - vm_page_size)
111#define PTHREAD_ALLOCSIZE(stackaddr, stacksize) ((round_page((uintptr_t)stackaddr) + PTHREAD_SIZE) - (uintptr_t)PTHREAD_ALLOCADDR(stackaddr, stacksize))
f1a1da6c 112
214d78a2
A
113static const pthread_attr_t _pthread_attr_default = {
114 .sig = _PTHREAD_ATTR_SIG,
115 .stacksize = 0,
116 .detached = PTHREAD_CREATE_JOINABLE,
117 .inherit = _PTHREAD_DEFAULT_INHERITSCHED,
118 .policy = _PTHREAD_DEFAULT_POLICY,
119 .defaultguardpage = true,
120 // compile time constant for _pthread_default_priority(0)
121 .qosclass = (1U << (THREAD_QOS_LEGACY - 1 + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT)) |
122 ((uint8_t)-1 & _PTHREAD_PRIORITY_PRIORITY_MASK),
123};
f1a1da6c
A
124
125#if PTHREAD_LAYOUT_SPI
126
127const struct pthread_layout_offsets_s pthread_layout_offsets = {
128 .plo_version = 1,
129 .plo_pthread_tsd_base_offset = offsetof(struct _pthread, tsd),
130 .plo_pthread_tsd_base_address_offset = 0,
131 .plo_pthread_tsd_entry_size = sizeof(((struct _pthread *)NULL)->tsd[0]),
132};
133
134#endif // PTHREAD_LAYOUT_SPI
135
136//
214d78a2 137// Global exported variables
f1a1da6c
A
138//
139
214d78a2
A
140// This global should be used (carefully) by anyone needing to know if a
141// pthread (other than the main thread) has been created.
142int __is_threaded = 0;
143int __unix_conforming = 0;
f1a1da6c 144
214d78a2
A
145//
146// Global internal variables
147//
2546420a 148
214d78a2
A
149// _pthread_list_lock protects _pthread_count, access to the __pthread_head
150// list. Externally imported by pthread_cancelable.c.
151struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head);
152_pthread_lock _pthread_list_lock = _PTHREAD_LOCK_INITIALIZER;
153
154uint32_t _main_qos;
964d3577 155
214d78a2 156#if VARIANT_DYLD
964d3577 157// The main thread's pthread_t
214d78a2
A
158struct _pthread _main_thread __attribute__((aligned(64))) = { };
159#define main_thread() (&_main_thread)
160#else // VARIANT_DYLD
161struct _pthread *_main_thread_ptr;
162#endif // VARIANT_DYLD
f1a1da6c 163
214d78a2
A
164#if PTHREAD_DEBUG_LOG
165#include <fcntl.h>
166int _pthread_debuglog;
167uint64_t _pthread_debugstart;
168#endif
169
170//
171// Global static variables
172//
173static bool __workq_newapi;
174static uint8_t default_priority;
175#if !VARIANT_DYLD
176static uint8_t max_priority;
177static uint8_t min_priority;
178#endif // !VARIANT_DYLD
179static int _pthread_count = 1;
f1a1da6c 180static int pthread_concurrency;
214d78a2
A
181static uintptr_t _pthread_ptr_munge_token;
182
183static void (*exitf)(int) = __exit;
184#if !VARIANT_DYLD
185static void *(*_pthread_malloc)(size_t) = NULL;
186static void (*_pthread_free)(void *) = NULL;
187#endif // !VARIANT_DYLD
f1a1da6c
A
188
189// work queue support data
214d78a2
A
190PTHREAD_NORETURN
191static void
192__pthread_invalid_keventfunction(void **events, int *nevents)
193{
194 PTHREAD_CLIENT_CRASH(0, "Invalid kqworkq setup");
195}
196
197PTHREAD_NORETURN
198static void
199__pthread_invalid_workloopfunction(uint64_t *workloop_id, void **events, int *nevents)
200{
201 PTHREAD_CLIENT_CRASH(0, "Invalid kqwl setup");
202}
203static pthread_workqueue_function2_t __libdispatch_workerfunction;
204static pthread_workqueue_function_kevent_t __libdispatch_keventfunction = &__pthread_invalid_keventfunction;
205static pthread_workqueue_function_workloop_t __libdispatch_workloopfunction = &__pthread_invalid_workloopfunction;
f1a1da6c 206static int __libdispatch_offset;
214d78a2 207static int __pthread_supported_features; // supported feature set
f1a1da6c 208
214d78a2
A
209#if defined(__i386__) || defined(__x86_64__)
210static mach_vm_address_t __pthread_stack_hint = 0xB0000000;
211#else
212#error no __pthread_stack_hint for this architecture
213#endif
f1a1da6c
A
214
215//
216// Function prototypes
217//
218
219// pthread primitives
214d78a2
A
220static inline void _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs,
221 void *stack, size_t stacksize, void *freeaddr, size_t freesize);
f1a1da6c 222
214d78a2
A
223#if VARIANT_DYLD
224static void _pthread_set_self_dyld(void);
225#endif // VARIANT_DYLD
a0619f9c 226static inline void _pthread_set_self_internal(pthread_t, bool needs_tsd_base_set);
f1a1da6c
A
227
228static void _pthread_dealloc_reply_port(pthread_t t);
a0619f9c 229static void _pthread_dealloc_special_reply_port(pthread_t t);
f1a1da6c 230
214d78a2 231static inline void __pthread_started_thread(pthread_t t);
f1a1da6c 232
f1a1da6c 233static void _pthread_exit(pthread_t self, void *value_ptr) __dead2;
f1a1da6c 234
214d78a2 235static inline void _pthread_introspection_thread_create(pthread_t t);
f1a1da6c 236static inline void _pthread_introspection_thread_start(pthread_t t);
214d78a2 237static inline void _pthread_introspection_thread_terminate(pthread_t t);
f1a1da6c
A
238static inline void _pthread_introspection_thread_destroy(pthread_t t);
239
a0619f9c 240extern void _pthread_set_self(pthread_t);
964d3577
A
241extern void start_wqthread(pthread_t self, mach_port_t kport, void *stackaddr, void *unused, int reuse); // trampoline into _pthread_wqthread
242extern void thread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags); // trampoline into _pthread_start
f1a1da6c 243
f1a1da6c 244/*
a0619f9c 245 * Flags filed passed to bsdthread_create and back in pthread_start
214d78a2
A
246 * 31 <---------------------------------> 0
247 * _________________________________________
248 * | flags(8) | policy(8) | importance(16) |
249 * -----------------------------------------
250 */
251#define PTHREAD_START_CUSTOM 0x01000000 // <rdar://problem/34501401>
2546420a 252#define PTHREAD_START_SETSCHED 0x02000000
214d78a2 253// was PTHREAD_START_DETACHED 0x04000000
2546420a
A
254#define PTHREAD_START_QOSCLASS 0x08000000
255#define PTHREAD_START_TSD_BASE_SET 0x10000000
214d78a2 256#define PTHREAD_START_SUSPENDED 0x20000000
2546420a 257#define PTHREAD_START_QOSCLASS_MASK 0x00ffffff
f1a1da6c
A
258#define PTHREAD_START_POLICY_BITSHIFT 16
259#define PTHREAD_START_POLICY_MASK 0xff
260#define PTHREAD_START_IMPORTANCE_MASK 0xffff
261
214d78a2
A
262#if (!defined(__OPEN_SOURCE__) && TARGET_OS_OSX) || OS_VARIANT_RESOLVED // 40703288
263static int pthread_setschedparam_internal(pthread_t, mach_port_t, int,
264 const struct sched_param *);
265#endif
266
f1a1da6c
A
267extern pthread_t __bsdthread_create(void *(*func)(void *), void * func_arg, void * stack, pthread_t thread, unsigned int flags);
268extern int __bsdthread_register(void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t, mach_port_t, void *, void *, int), int,void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), int32_t *,__uint64_t);
269extern int __bsdthread_terminate(void * freeaddr, size_t freesize, mach_port_t kport, mach_port_t joinsem);
270extern __uint64_t __thread_selfid( void );
f1a1da6c 271
214d78a2
A
272#if __LP64__
273_Static_assert(offsetof(struct _pthread, tsd) == 224, "TSD LP64 offset");
f1a1da6c 274#else
214d78a2 275_Static_assert(offsetof(struct _pthread, tsd) == 176, "TSD ILP32 offset");
f1a1da6c 276#endif
2546420a
A
277_Static_assert(offsetof(struct _pthread, tsd) + _PTHREAD_STRUCT_DIRECT_THREADID_OFFSET
278 == offsetof(struct _pthread, thread_id),
279 "_PTHREAD_STRUCT_DIRECT_THREADID_OFFSET is correct");
f1a1da6c 280
214d78a2 281#pragma mark pthread attrs
f1a1da6c 282
214d78a2
A
283_Static_assert(sizeof(struct _pthread_attr_t) == sizeof(__darwin_pthread_attr_t),
284 "internal pthread_attr_t == external pthread_attr_t");
a0619f9c
A
285
286int
f1a1da6c
A
287pthread_attr_destroy(pthread_attr_t *attr)
288{
289 int ret = EINVAL;
290 if (attr->sig == _PTHREAD_ATTR_SIG) {
291 attr->sig = 0;
292 ret = 0;
293 }
294 return ret;
295}
296
a0619f9c 297int
f1a1da6c
A
298pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
299{
300 int ret = EINVAL;
301 if (attr->sig == _PTHREAD_ATTR_SIG) {
302 *detachstate = attr->detached;
303 ret = 0;
304 }
305 return ret;
306}
307
a0619f9c 308int
f1a1da6c
A
309pthread_attr_getinheritsched(const pthread_attr_t *attr, int *inheritsched)
310{
311 int ret = EINVAL;
312 if (attr->sig == _PTHREAD_ATTR_SIG) {
313 *inheritsched = attr->inherit;
314 ret = 0;
315 }
316 return ret;
317}
318
214d78a2
A
319static PTHREAD_ALWAYS_INLINE void
320_pthread_attr_get_schedparam(const pthread_attr_t *attr,
321 struct sched_param *param)
322{
323 if (attr->schedset) {
324 *param = attr->param;
325 } else {
326 param->sched_priority = default_priority;
327 param->quantum = 10; /* quantum isn't public yet */
328 }
329}
330
a0619f9c 331int
f1a1da6c
A
332pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param)
333{
334 int ret = EINVAL;
335 if (attr->sig == _PTHREAD_ATTR_SIG) {
214d78a2 336 _pthread_attr_get_schedparam(attr, param);
f1a1da6c
A
337 ret = 0;
338 }
339 return ret;
340}
341
a0619f9c 342int
f1a1da6c
A
343pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy)
344{
345 int ret = EINVAL;
346 if (attr->sig == _PTHREAD_ATTR_SIG) {
347 *policy = attr->policy;
348 ret = 0;
349 }
350 return ret;
351}
352
f1a1da6c
A
353int
354pthread_attr_init(pthread_attr_t *attr)
355{
214d78a2 356 *attr = _pthread_attr_default;
f1a1da6c
A
357 return 0;
358}
359
a0619f9c 360int
f1a1da6c
A
361pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
362{
363 int ret = EINVAL;
364 if (attr->sig == _PTHREAD_ATTR_SIG &&
214d78a2
A
365 (detachstate == PTHREAD_CREATE_JOINABLE ||
366 detachstate == PTHREAD_CREATE_DETACHED)) {
f1a1da6c
A
367 attr->detached = detachstate;
368 ret = 0;
369 }
370 return ret;
371}
372
a0619f9c 373int
f1a1da6c
A
374pthread_attr_setinheritsched(pthread_attr_t *attr, int inheritsched)
375{
376 int ret = EINVAL;
377 if (attr->sig == _PTHREAD_ATTR_SIG &&
214d78a2
A
378 (inheritsched == PTHREAD_INHERIT_SCHED ||
379 inheritsched == PTHREAD_EXPLICIT_SCHED)) {
f1a1da6c
A
380 attr->inherit = inheritsched;
381 ret = 0;
382 }
383 return ret;
384}
385
a0619f9c 386int
f1a1da6c
A
387pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param)
388{
389 int ret = EINVAL;
390 if (attr->sig == _PTHREAD_ATTR_SIG) {
391 /* TODO: Validate sched_param fields */
392 attr->param = *param;
393 attr->schedset = 1;
394 ret = 0;
395 }
396 return ret;
397}
398
a0619f9c 399int
f1a1da6c
A
400pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
401{
402 int ret = EINVAL;
214d78a2
A
403 if (attr->sig == _PTHREAD_ATTR_SIG && (policy == SCHED_OTHER ||
404 policy == SCHED_RR || policy == SCHED_FIFO)) {
405 if (!_PTHREAD_POLICY_IS_FIXEDPRI(policy)) {
406 /* non-fixedpri policy should remove cpupercent */
407 attr->cpupercentset = 0;
408 }
f1a1da6c 409 attr->policy = policy;
214d78a2 410 attr->policyset = 1;
f1a1da6c
A
411 ret = 0;
412 }
413 return ret;
414}
415
416int
417pthread_attr_setscope(pthread_attr_t *attr, int scope)
418{
419 int ret = EINVAL;
420 if (attr->sig == _PTHREAD_ATTR_SIG) {
421 if (scope == PTHREAD_SCOPE_SYSTEM) {
422 // No attribute yet for the scope.
423 ret = 0;
424 } else if (scope == PTHREAD_SCOPE_PROCESS) {
425 ret = ENOTSUP;
426 }
427 }
428 return ret;
429}
430
431int
432pthread_attr_getscope(const pthread_attr_t *attr, int *scope)
433{
434 int ret = EINVAL;
435 if (attr->sig == _PTHREAD_ATTR_SIG) {
436 *scope = PTHREAD_SCOPE_SYSTEM;
437 ret = 0;
438 }
439 return ret;
440}
441
442int
443pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
444{
445 int ret = EINVAL;
446 if (attr->sig == _PTHREAD_ATTR_SIG) {
447 *stackaddr = attr->stackaddr;
448 ret = 0;
449 }
450 return ret;
451}
452
453int
454pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
455{
456 int ret = EINVAL;
457 if (attr->sig == _PTHREAD_ATTR_SIG &&
214d78a2 458 ((uintptr_t)stackaddr % vm_page_size) == 0) {
f1a1da6c 459 attr->stackaddr = stackaddr;
214d78a2 460 attr->defaultguardpage = false;
f1a1da6c
A
461 attr->guardsize = 0;
462 ret = 0;
463 }
464 return ret;
465}
466
214d78a2
A
467static inline size_t
468_pthread_attr_stacksize(const pthread_attr_t *attr)
469{
470 return attr->stacksize ? attr->stacksize : DEFAULT_STACK_SIZE;
471}
472
f1a1da6c
A
473int
474pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
475{
476 int ret = EINVAL;
477 if (attr->sig == _PTHREAD_ATTR_SIG) {
214d78a2 478 *stacksize = _pthread_attr_stacksize(attr);
f1a1da6c
A
479 ret = 0;
480 }
481 return ret;
482}
483
484int
485pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
486{
487 int ret = EINVAL;
488 if (attr->sig == _PTHREAD_ATTR_SIG &&
214d78a2
A
489 (stacksize % vm_page_size) == 0 &&
490 stacksize >= PTHREAD_STACK_MIN) {
491 attr->stacksize = stacksize;
492 ret = 0;
493 }
494 return ret;
495}
496
497int
498pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t * stacksize)
499{
500 int ret = EINVAL;
501 if (attr->sig == _PTHREAD_ATTR_SIG) {
502 *stackaddr = (void *)((uintptr_t)attr->stackaddr - attr->stacksize);
503 *stacksize = _pthread_attr_stacksize(attr);
504 ret = 0;
505 }
506 return ret;
507}
508
509// Per SUSv3, the stackaddr is the base address, the lowest addressable byte
510// address. This is not the same as in pthread_attr_setstackaddr.
511int
512pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize)
513{
514 int ret = EINVAL;
515 if (attr->sig == _PTHREAD_ATTR_SIG &&
516 ((uintptr_t)stackaddr % vm_page_size) == 0 &&
517 (stacksize % vm_page_size) == 0 &&
518 stacksize >= PTHREAD_STACK_MIN) {
519 attr->stackaddr = (void *)((uintptr_t)stackaddr + stacksize);
f1a1da6c
A
520 attr->stacksize = stacksize;
521 ret = 0;
522 }
214d78a2
A
523 return ret;
524}
525
526int
527pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize)
528{
529 int ret = EINVAL;
530 if (attr->sig == _PTHREAD_ATTR_SIG && (guardsize % vm_page_size) == 0) {
531 /* Guardsize of 0 is valid, means no guard */
532 attr->defaultguardpage = false;
533 attr->guardsize = guardsize;
534 ret = 0;
535 }
536 return ret;
537}
538
539static inline size_t
540_pthread_attr_guardsize(const pthread_attr_t *attr)
541{
542 return attr->defaultguardpage ? vm_page_size : attr->guardsize;
543}
544
545int
546pthread_attr_getguardsize(const pthread_attr_t *attr, size_t *guardsize)
547{
548 int ret = EINVAL;
549 if (attr->sig == _PTHREAD_ATTR_SIG) {
550 *guardsize = _pthread_attr_guardsize(attr);
551 ret = 0;
552 }
553 return ret;
554}
555
556int
557pthread_attr_setcpupercent_np(pthread_attr_t *attr, int percent,
558 unsigned long refillms)
559{
560 int ret = EINVAL;
561 if (attr->sig == _PTHREAD_ATTR_SIG && percent < UINT8_MAX &&
562 refillms < _PTHREAD_ATTR_REFILLMS_MAX && attr->policyset &&
563 _PTHREAD_POLICY_IS_FIXEDPRI(attr->policy)) {
564 attr->cpupercent = percent;
565 attr->refillms = (uint32_t)(refillms & 0x00ffffff);
566 attr->cpupercentset = 1;
567 ret = 0;
568 }
569 return ret;
570}
571
572#pragma mark pthread lifetime
573
574// Allocate a thread structure, stack and guard page.
575//
576// The thread structure may optionally be placed in the same allocation as the
577// stack, residing above the top of the stack. This cannot be done if a
578// custom stack address is provided.
579//
580// Similarly the guard page cannot be allocated if a custom stack address is
581// provided.
582//
583// The allocated thread structure is initialized with values that indicate how
584// it should be freed.
585
586static pthread_t
587_pthread_allocate(const pthread_attr_t *attrs, void **stack)
588{
589 mach_vm_address_t allocaddr = __pthread_stack_hint;
97e5da41 590 size_t allocsize, guardsize, stacksize, pthreadoff;
214d78a2
A
591 kern_return_t kr;
592 pthread_t t;
593
594 PTHREAD_ASSERT(attrs->stacksize == 0 ||
595 attrs->stacksize >= PTHREAD_STACK_MIN);
596
597 // Allocate a pthread structure if necessary
598
599 if (attrs->stackaddr != NULL) {
600 PTHREAD_ASSERT(((uintptr_t)attrs->stackaddr % vm_page_size) == 0);
601 allocsize = PTHREAD_SIZE;
602 guardsize = 0;
97e5da41 603 pthreadoff = 0;
214d78a2
A
604 // <rdar://problem/42588315> if the attrs struct specifies a custom
605 // stack address but not a custom size, using ->stacksize here instead
606 // of _pthread_attr_stacksize stores stacksize as zero, indicating
607 // that the stack size is unknown.
608 stacksize = attrs->stacksize;
609 } else {
610 guardsize = _pthread_attr_guardsize(attrs);
611 stacksize = _pthread_attr_stacksize(attrs) + PTHREAD_T_OFFSET;
97e5da41
A
612 pthreadoff = stacksize + guardsize;
613 allocsize = pthreadoff + PTHREAD_SIZE;
214d78a2
A
614 allocsize = mach_vm_round_page(allocsize);
615 }
616
617 kr = mach_vm_map(mach_task_self(), &allocaddr, allocsize, vm_page_size - 1,
618 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE, MEMORY_OBJECT_NULL,
619 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
620
621 if (kr != KERN_SUCCESS) {
622 kr = mach_vm_allocate(mach_task_self(), &allocaddr, allocsize,
623 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
624 }
625 if (kr != KERN_SUCCESS) {
626 *stack = NULL;
627 return NULL;
628 }
629
630 // The stack grows down.
631 // Set the guard page at the lowest address of the
632 // newly allocated stack. Return the highest address
633 // of the stack.
634 if (guardsize) {
635 (void)mach_vm_protect(mach_task_self(), allocaddr, guardsize,
636 FALSE, VM_PROT_NONE);
637 }
638
639 // Thread structure resides at the top of the stack (when using a
640 // custom stack, allocsize == PTHREAD_SIZE, so places the pthread_t
641 // at allocaddr).
97e5da41 642 t = (pthread_t)(allocaddr + pthreadoff);
214d78a2
A
643 if (attrs->stackaddr) {
644 *stack = attrs->stackaddr;
645 } else {
646 *stack = t;
647 }
648
649 _pthread_struct_init(t, attrs, *stack, stacksize, allocaddr, allocsize);
650 return t;
651}
652
653PTHREAD_NOINLINE
654void
655_pthread_deallocate(pthread_t t, bool from_mach_thread)
656{
657 kern_return_t ret;
658
659 // Don't free the main thread.
660 if (t != main_thread()) {
661 if (!from_mach_thread) { // see __pthread_add_thread
662 _pthread_introspection_thread_destroy(t);
663 }
664 ret = mach_vm_deallocate(mach_task_self(), t->freeaddr, t->freesize);
665 PTHREAD_ASSERT(ret == KERN_SUCCESS);
666 }
f1a1da6c
A
667}
668
214d78a2
A
669#pragma clang diagnostic push
670#pragma clang diagnostic ignored "-Wreturn-stack-address"
671
672PTHREAD_NOINLINE
673static void*
674_pthread_current_stack_address(void)
f1a1da6c 675{
214d78a2
A
676 int a;
677 return &a;
f1a1da6c
A
678}
679
214d78a2
A
680#pragma clang diagnostic pop
681
682void
683_pthread_joiner_wake(pthread_t thread)
f1a1da6c 684{
214d78a2
A
685 uint32_t *exit_gate = &thread->tl_exit_gate;
686
687 for (;;) {
688 int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO, exit_gate, 0);
689 if (ret == 0 || ret == -ENOENT) {
690 return;
691 }
692 if (ret != -EINTR) {
693 PTHREAD_INTERNAL_CRASH(-ret, "pthread_join() wake failure");
694 }
f1a1da6c 695 }
f1a1da6c
A
696}
697
214d78a2
A
698// Terminates the thread if called from the currently running thread.
699PTHREAD_NORETURN PTHREAD_NOINLINE PTHREAD_NOT_TAIL_CALLED
700static void
701_pthread_terminate(pthread_t t, void *exit_value)
f1a1da6c 702{
214d78a2
A
703 PTHREAD_ASSERT(t == pthread_self());
704
705 _pthread_introspection_thread_terminate(t);
706
707 uintptr_t freeaddr = (uintptr_t)t->freeaddr;
708 size_t freesize = t->freesize;
709 bool should_exit;
710
711 // the size of just the stack
712 size_t freesize_stack = t->freesize;
713
714 // We usually pass our structure+stack to bsdthread_terminate to free, but
715 // if we get told to keep the pthread_t structure around then we need to
716 // adjust the free size and addr in the pthread_t to just refer to the
717 // structure and not the stack. If we do end up deallocating the
718 // structure, this is useless work since no one can read the result, but we
719 // can't do it after the call to pthread_remove_thread because it isn't
720 // safe to dereference t after that.
721 if ((void*)t > t->freeaddr && (void*)t < t->freeaddr + t->freesize){
722 // Check to ensure the pthread structure itself is part of the
723 // allocation described by freeaddr/freesize, in which case we split and
724 // only deallocate the area below the pthread structure. In the event of a
725 // custom stack, the freeaddr/size will be the pthread structure itself, in
726 // which case we shouldn't free anything (the final else case).
727 freesize_stack = trunc_page((uintptr_t)t - (uintptr_t)freeaddr);
728
729 // describe just the remainder for deallocation when the pthread_t goes away
730 t->freeaddr += freesize_stack;
731 t->freesize -= freesize_stack;
732 } else if (t == main_thread()) {
733 freeaddr = t->stackaddr - pthread_get_stacksize_np(t);
734 uintptr_t stackborder = trunc_page((uintptr_t)_pthread_current_stack_address());
735 freesize_stack = stackborder - freeaddr;
736 } else {
737 freesize_stack = 0;
738 }
739
740 mach_port_t kport = _pthread_kernel_thread(t);
741 bool keep_thread_struct = false, needs_wake = false;
742 semaphore_t custom_stack_sema = MACH_PORT_NULL;
743
744 _pthread_dealloc_special_reply_port(t);
745 _pthread_dealloc_reply_port(t);
746
747 _PTHREAD_LOCK(_pthread_list_lock);
748
749 // This piece of code interacts with pthread_join. It will always:
750 // - set tl_exit_gate to MACH_PORT_DEAD (thread exited)
751 // - set tl_exit_value to the value passed to pthread_exit()
752 // - decrement _pthread_count, so that we can exit the process when all
753 // threads exited even if not all of them were joined.
754 t->tl_exit_gate = MACH_PORT_DEAD;
755 t->tl_exit_value = exit_value;
756 should_exit = (--_pthread_count <= 0);
757
758 // If we see a joiner, we prepost that the join has to succeed,
759 // and the joiner is committed to finish (even if it was canceled)
760 if (t->tl_join_ctx) {
761 custom_stack_sema = _pthread_joiner_prepost_wake(t); // unsets tl_joinable
762 needs_wake = true;
763 }
764
765 // Joinable threads that have no joiner yet are kept on the thread list
766 // so that pthread_join() can later discover the thread when it is joined,
767 // and will have to do the pthread_t cleanup.
768 if (t->tl_joinable) {
769 t->tl_joiner_cleans_up = keep_thread_struct = true;
770 } else {
771 TAILQ_REMOVE(&__pthread_head, t, tl_plist);
772 }
773
774 _PTHREAD_UNLOCK(_pthread_list_lock);
775
776 if (needs_wake) {
777 // When we found a waiter, we want to drop the very contended list lock
778 // before we do the syscall in _pthread_joiner_wake(). Then, we decide
779 // who gets to cleanup the pthread_t between the joiner and the exiting
780 // thread:
781 // - the joiner tries to set tl_join_ctx to NULL
782 // - the exiting thread tries to set tl_joiner_cleans_up to true
783 // Whoever does it first commits the other guy to cleanup the pthread_t
784 _pthread_joiner_wake(t);
785 _PTHREAD_LOCK(_pthread_list_lock);
786 if (t->tl_join_ctx) {
787 t->tl_joiner_cleans_up = true;
788 keep_thread_struct = true;
f1a1da6c 789 }
214d78a2 790 _PTHREAD_UNLOCK(_pthread_list_lock);
f1a1da6c 791 }
f1a1da6c 792
214d78a2
A
793 //
794 // /!\ dereferencing `t` past this point is not safe /!\
795 //
796
797 if (keep_thread_struct || t == main_thread()) {
798 // Use the adjusted freesize of just the stack that we computed above.
799 freesize = freesize_stack;
800 } else {
801 _pthread_introspection_thread_destroy(t);
f1a1da6c 802 }
214d78a2
A
803
804 // Check if there is nothing to free because the thread has a custom
805 // stack allocation and is joinable.
806 if (freesize == 0) {
807 freeaddr = 0;
808 }
809 if (should_exit) {
810 exitf(0);
811 }
812 __bsdthread_terminate((void *)freeaddr, freesize, kport, custom_stack_sema);
813 PTHREAD_INTERNAL_CRASH(t, "thread didn't terminate");
814}
815
816PTHREAD_NORETURN
817static void
818_pthread_terminate_invoke(pthread_t t, void *exit_value)
819{
820#if PTHREAD_T_OFFSET
821 void *p = NULL;
822 // <rdar://problem/25688492> During pthread termination there is a race
823 // between pthread_join and pthread_terminate; if the joiner is responsible
824 // for cleaning up the pthread_t struct, then it may destroy some part of the
825 // stack with it on 16k OSes. So that this doesn't cause _pthread_terminate()
826 // to crash because its stack has been removed from under its feet, just make
827 // sure termination happens in a part of the stack that is not on the same
828 // page as the pthread_t.
829 if (trunc_page((uintptr_t)__builtin_frame_address(0)) ==
830 trunc_page((uintptr_t)t)) {
831 p = alloca(PTHREAD_T_OFFSET);
832 }
833 // And this __asm__ volatile is needed to stop the compiler from optimising
834 // away the alloca() completely.
835 __asm__ volatile ("" : : "r"(p) );
836#endif
837 _pthread_terminate(t, exit_value);
f1a1da6c
A
838}
839
214d78a2 840#pragma mark pthread start / body
f1a1da6c
A
841
842/*
843 * Create and start execution of a new thread.
844 */
a0619f9c 845PTHREAD_NOINLINE PTHREAD_NORETURN
f1a1da6c 846static void
2546420a 847_pthread_body(pthread_t self, bool needs_tsd_base_set)
f1a1da6c 848{
2546420a 849 _pthread_set_self_internal(self, needs_tsd_base_set);
214d78a2
A
850 __pthread_started_thread(self);
851 _pthread_exit(self, (self->fun)(self->arg));
f1a1da6c
A
852}
853
a0619f9c 854PTHREAD_NORETURN
f1a1da6c 855void
214d78a2
A
856_pthread_start(pthread_t self, mach_port_t kport,
857 __unused void *(*fun)(void *), __unused void *arg,
858 __unused size_t stacksize, unsigned int pflags)
859{
2546420a
A
860 bool thread_tsd_bsd_set = (bool)(pflags & PTHREAD_START_TSD_BASE_SET);
861
214d78a2
A
862 if (os_unlikely(pflags & PTHREAD_START_SUSPENDED)) {
863 PTHREAD_INTERNAL_CRASH(0,
864 "kernel without PTHREAD_START_SUSPENDED support");
865 }
a0619f9c
A
866#if DEBUG
867 PTHREAD_ASSERT(MACH_PORT_VALID(kport));
868 PTHREAD_ASSERT(_pthread_kernel_thread(self) == kport);
869#endif
870 // will mark the thread initialized
871 _pthread_markcancel_if_canceled(self, kport);
872
2546420a 873 _pthread_body(self, !thread_tsd_bsd_set);
f1a1da6c
A
874}
875
a0619f9c
A
876PTHREAD_ALWAYS_INLINE
877static inline void
214d78a2
A
878_pthread_struct_init(pthread_t t, const pthread_attr_t *attrs,
879 void *stackaddr, size_t stacksize, void *freeaddr, size_t freesize)
f1a1da6c 880{
2546420a
A
881#if DEBUG
882 PTHREAD_ASSERT(t->sig != _PTHREAD_SIG);
883#endif
884
f1a1da6c
A
885 t->sig = _PTHREAD_SIG;
886 t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_SELF] = t;
214d78a2
A
887 t->tsd[_PTHREAD_TSD_SLOT_ERRNO] = &t->err_no;
888 if (attrs->schedset == 0) {
889 t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = attrs->qosclass;
890 } else {
891 t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] =
892 _pthread_unspecified_priority();
893 }
894 t->tsd[_PTHREAD_TSD_SLOT_PTR_MUNGE] = _pthread_ptr_munge_token;
895 t->tl_has_custom_stack = (attrs->stackaddr != NULL);
896
2546420a 897 _PTHREAD_LOCK_INIT(t->lock);
964d3577 898
964d3577 899 t->stackaddr = stackaddr;
214d78a2 900 t->stackbottom = stackaddr - stacksize;
2546420a
A
901 t->freeaddr = freeaddr;
902 t->freesize = freesize;
964d3577 903
214d78a2
A
904 t->guardsize = _pthread_attr_guardsize(attrs);
905 t->tl_joinable = (attrs->detached == PTHREAD_CREATE_JOINABLE);
f1a1da6c 906 t->inherit = attrs->inherit;
214d78a2 907 t->tl_policy = attrs->policy;
f1a1da6c 908 t->schedset = attrs->schedset;
214d78a2 909 _pthread_attr_get_schedparam(attrs, &t->tl_param);
f1a1da6c
A
910 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
911}
912
214d78a2
A
913#pragma mark pthread public interface
914
f1a1da6c
A
915/* Need to deprecate this in future */
916int
917_pthread_is_threaded(void)
918{
919 return __is_threaded;
920}
921
a0619f9c 922/* Non portable public api to know whether this process has(had) atleast one thread
f1a1da6c
A
923 * apart from main thread. There could be race if there is a thread in the process of
924 * creation at the time of call . It does not tell whether there are more than one thread
925 * at this point of time.
926 */
927int
928pthread_is_threaded_np(void)
929{
930 return __is_threaded;
931}
932
a0619f9c
A
933
934PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
935mach_port_t
936pthread_mach_thread_np(pthread_t t)
937{
938 mach_port_t kport = MACH_PORT_NULL;
214d78a2 939 (void)_pthread_is_valid(t, &kport);
f1a1da6c
A
940 return kport;
941}
942
a0619f9c 943PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
944pthread_t
945pthread_from_mach_thread_np(mach_port_t kernel_thread)
946{
947 struct _pthread *p = NULL;
948
949 /* No need to wait as mach port is already known */
2546420a 950 _PTHREAD_LOCK(_pthread_list_lock);
f1a1da6c 951
214d78a2 952 TAILQ_FOREACH(p, &__pthread_head, tl_plist) {
f1a1da6c
A
953 if (_pthread_kernel_thread(p) == kernel_thread) {
954 break;
955 }
956 }
957
2546420a 958 _PTHREAD_UNLOCK(_pthread_list_lock);
f1a1da6c
A
959
960 return p;
961}
962
a0619f9c 963PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
964size_t
965pthread_get_stacksize_np(pthread_t t)
966{
f1a1da6c 967 size_t size = 0;
214d78a2 968 size_t stacksize = t->stackaddr - t->stackbottom;
f1a1da6c
A
969
970 if (t == NULL) {
971 return ESRCH; // XXX bug?
972 }
2546420a
A
973
974#if !defined(__arm__) && !defined(__arm64__)
975 // The default rlimit based allocations will be provided with a stacksize
976 // of the current limit and a freesize of the max. However, custom
977 // allocations will just have the guard page to free. If we aren't in the
978 // latter case, call into rlimit to determine the current stack size. In
979 // the event that the current limit == max limit then we'll fall down the
980 // fast path, but since it's unlikely that the limit is going to be lowered
981 // after it's been change to the max, we should be fine.
982 //
983 // Of course, on arm rlim_cur == rlim_max and there's only the one guard
984 // page. So, we can skip all this there.
214d78a2 985 if (t == main_thread() && stacksize + vm_page_size != t->freesize) {
2546420a
A
986 // We want to call getrlimit() just once, as it's relatively expensive
987 static size_t rlimit_stack;
a0619f9c 988
2546420a
A
989 if (rlimit_stack == 0) {
990 struct rlimit limit;
991 int ret = getrlimit(RLIMIT_STACK, &limit);
a0619f9c 992
2546420a
A
993 if (ret == 0) {
994 rlimit_stack = (size_t) limit.rlim_cur;
995 }
996 }
a0619f9c 997
2546420a 998 if (rlimit_stack == 0 || rlimit_stack > t->freesize) {
214d78a2 999 return stacksize;
2546420a
A
1000 } else {
1001 return rlimit_stack;
1002 }
1003 }
1004#endif /* !defined(__arm__) && !defined(__arm64__) */
1005
214d78a2
A
1006 if (t == pthread_self() || t == main_thread()) {
1007 size = stacksize;
1008 goto out;
f1a1da6c
A
1009 }
1010
214d78a2
A
1011 if (_pthread_validate_thread_and_list_lock(t)) {
1012 size = stacksize;
1013 _PTHREAD_UNLOCK(_pthread_list_lock);
f1a1da6c 1014 } else {
a0619f9c 1015 size = ESRCH; // XXX bug?
f1a1da6c
A
1016 }
1017
214d78a2
A
1018out:
1019 // <rdar://problem/42588315> binary compatibility issues force us to return
1020 // DEFAULT_STACK_SIZE here when we do not know the size of the stack
1021 return size ? size : DEFAULT_STACK_SIZE;
f1a1da6c
A
1022}
1023
a0619f9c 1024PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
1025void *
1026pthread_get_stackaddr_np(pthread_t t)
1027{
f1a1da6c 1028 // since the main thread will not get de-allocated from underneath us
214d78a2 1029 if (t == pthread_self() || t == main_thread()) {
f1a1da6c
A
1030 return t->stackaddr;
1031 }
1032
214d78a2
A
1033 if (!_pthread_validate_thread_and_list_lock(t)) {
1034 return (void *)(uintptr_t)ESRCH; // XXX bug?
f1a1da6c
A
1035 }
1036
214d78a2 1037 void *addr = t->stackaddr;
2546420a 1038 _PTHREAD_UNLOCK(_pthread_list_lock);
f1a1da6c
A
1039 return addr;
1040}
1041
a0619f9c 1042
f1a1da6c
A
1043static mach_port_t
1044_pthread_reply_port(pthread_t t)
1045{
1046 void *p;
1047 if (t == NULL) {
1048 p = _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY);
1049 } else {
1050 p = t->tsd[_PTHREAD_TSD_SLOT_MIG_REPLY];
1051 }
1052 return (mach_port_t)(uintptr_t)p;
1053}
1054
1055static void
1056_pthread_set_reply_port(pthread_t t, mach_port_t reply_port)
1057{
1058 void *p = (void *)(uintptr_t)reply_port;
1059 if (t == NULL) {
1060 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY, p);
1061 } else {
1062 t->tsd[_PTHREAD_TSD_SLOT_MIG_REPLY] = p;
1063 }
1064}
1065
1066static void
1067_pthread_dealloc_reply_port(pthread_t t)
1068{
1069 mach_port_t reply_port = _pthread_reply_port(t);
1070 if (reply_port != MACH_PORT_NULL) {
1071 mig_dealloc_reply_port(reply_port);
1072 }
1073}
1074
a0619f9c
A
1075static mach_port_t
1076_pthread_special_reply_port(pthread_t t)
1077{
1078 void *p;
1079 if (t == NULL) {
1080 p = _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY);
1081 } else {
1082 p = t->tsd[_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY];
1083 }
1084 return (mach_port_t)(uintptr_t)p;
1085}
1086
1087static void
1088_pthread_dealloc_special_reply_port(pthread_t t)
1089{
1090 mach_port_t special_reply_port = _pthread_special_reply_port(t);
1091 if (special_reply_port != MACH_PORT_NULL) {
214d78a2
A
1092 thread_destruct_special_reply_port(special_reply_port,
1093 THREAD_SPECIAL_REPLY_PORT_ALL);
a0619f9c
A
1094 }
1095}
1096
f1a1da6c
A
1097pthread_t
1098pthread_main_thread_np(void)
1099{
214d78a2 1100 return main_thread();
f1a1da6c
A
1101}
1102
1103/* returns non-zero if the current thread is the main thread */
1104int
1105pthread_main_np(void)
1106{
214d78a2 1107 return pthread_self() == main_thread();
f1a1da6c
A
1108}
1109
1110
214d78a2
A
1111/*
1112 * if we are passed in a pthread_t that is NULL, then we return the current
1113 * thread's thread_id. So folks don't have to call pthread_self, in addition to
1114 * us doing it, if they just want their thread_id.
1115 */
a0619f9c 1116PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
1117int
1118pthread_threadid_np(pthread_t thread, uint64_t *thread_id)
1119{
1120 int res = 0;
1121 pthread_t self = pthread_self();
1122
1123 if (thread_id == NULL) {
1124 return EINVAL;
1125 }
1126
1127 if (thread == NULL || thread == self) {
1128 *thread_id = self->thread_id;
214d78a2
A
1129 } else if (!_pthread_validate_thread_and_list_lock(thread)) {
1130 res = ESRCH;
f1a1da6c 1131 } else {
214d78a2 1132 if (thread->thread_id == 0) {
a0619f9c
A
1133 res = EINVAL;
1134 } else {
f1a1da6c
A
1135 *thread_id = thread->thread_id;
1136 }
2546420a 1137 _PTHREAD_UNLOCK(_pthread_list_lock);
f1a1da6c
A
1138 }
1139 return res;
1140}
1141
a0619f9c 1142PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
1143int
1144pthread_getname_np(pthread_t thread, char *threadname, size_t len)
1145{
214d78a2
A
1146 if (thread == pthread_self()) {
1147 strlcpy(threadname, thread->pthread_name, len);
1148 return 0;
1149 }
f1a1da6c 1150
214d78a2 1151 if (!_pthread_validate_thread_and_list_lock(thread)) {
f1a1da6c
A
1152 return ESRCH;
1153 }
1154
214d78a2 1155 strlcpy(threadname, thread->pthread_name, len);
2546420a 1156 _PTHREAD_UNLOCK(_pthread_list_lock);
214d78a2 1157 return 0;
f1a1da6c
A
1158}
1159
a0619f9c 1160
f1a1da6c
A
1161int
1162pthread_setname_np(const char *name)
1163{
1164 int res;
1165 pthread_t self = pthread_self();
1166
1167 size_t len = 0;
1168 if (name != NULL) {
1169 len = strlen(name);
1170 }
1171
1172 /* protytype is in pthread_internals.h */
1173 res = __proc_info(5, getpid(), 2, (uint64_t)0, (void*)name, (int)len);
1174 if (res == 0) {
1175 if (len > 0) {
1176 strlcpy(self->pthread_name, name, MAXTHREADNAMESIZE);
1177 } else {
1178 bzero(self->pthread_name, MAXTHREADNAMESIZE);
1179 }
1180 }
1181 return res;
1182
1183}
1184
1185PTHREAD_ALWAYS_INLINE
1186static inline void
214d78a2 1187__pthread_add_thread(pthread_t t, bool from_mach_thread)
f1a1da6c 1188{
a0619f9c 1189 if (from_mach_thread) {
2546420a
A
1190 _PTHREAD_LOCK_FROM_MACH_THREAD(_pthread_list_lock);
1191 } else {
1192 _PTHREAD_LOCK(_pthread_list_lock);
1193 }
f1a1da6c 1194
214d78a2
A
1195 TAILQ_INSERT_TAIL(&__pthread_head, t, tl_plist);
1196 _pthread_count++;
2546420a 1197
214d78a2
A
1198 if (from_mach_thread) {
1199 _PTHREAD_UNLOCK_FROM_MACH_THREAD(_pthread_list_lock);
f1a1da6c 1200 } else {
214d78a2 1201 _PTHREAD_UNLOCK(_pthread_list_lock);
f1a1da6c
A
1202 }
1203
214d78a2
A
1204 if (!from_mach_thread) {
1205 // PR-26275485: Mach threads will likely crash trying to run
1206 // introspection code. Since the fall out from the introspection
1207 // code not seeing the injected thread is likely less than crashing
1208 // in the introspection code, just don't make the call.
1209 _pthread_introspection_thread_create(t);
f1a1da6c 1210 }
214d78a2 1211}
f1a1da6c 1212
214d78a2
A
1213PTHREAD_ALWAYS_INLINE
1214static inline void
1215__pthread_undo_add_thread(pthread_t t, bool from_mach_thread)
1216{
1217 if (from_mach_thread) {
1218 _PTHREAD_LOCK_FROM_MACH_THREAD(_pthread_list_lock);
2546420a 1219 } else {
214d78a2 1220 _PTHREAD_LOCK(_pthread_list_lock);
2546420a 1221 }
f1a1da6c 1222
214d78a2
A
1223 TAILQ_REMOVE(&__pthread_head, t, tl_plist);
1224 _pthread_count--;
1225
1226 if (from_mach_thread) {
1227 _PTHREAD_UNLOCK_FROM_MACH_THREAD(_pthread_list_lock);
f1a1da6c 1228 } else {
214d78a2 1229 _PTHREAD_UNLOCK(_pthread_list_lock);
f1a1da6c
A
1230 }
1231}
1232
f1a1da6c 1233PTHREAD_ALWAYS_INLINE
214d78a2
A
1234static inline void
1235__pthread_started_thread(pthread_t t)
f1a1da6c 1236{
214d78a2
A
1237 mach_port_t kport = _pthread_kernel_thread(t);
1238 if (os_slowpath(!MACH_PORT_VALID(kport))) {
1239 PTHREAD_CLIENT_CRASH(kport,
1240 "Unable to allocate thread port, possible port leak");
f1a1da6c 1241 }
214d78a2 1242 _pthread_introspection_thread_start(t);
f1a1da6c
A
1243}
1244
214d78a2
A
1245#define _PTHREAD_CREATE_NONE 0x0
1246#define _PTHREAD_CREATE_FROM_MACH_THREAD 0x1
1247#define _PTHREAD_CREATE_SUSPENDED 0x2
1248
2546420a 1249static int
214d78a2
A
1250_pthread_create(pthread_t *thread, const pthread_attr_t *attrs,
1251 void *(*start_routine)(void *), void *arg, unsigned int create_flags)
2546420a 1252{
f1a1da6c 1253 pthread_t t = NULL;
214d78a2
A
1254 void *stack = NULL;
1255 bool from_mach_thread = (create_flags & _PTHREAD_CREATE_FROM_MACH_THREAD);
f1a1da6c 1256
f1a1da6c
A
1257 if (attrs == NULL) {
1258 attrs = &_pthread_attr_default;
1259 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
1260 return EINVAL;
1261 }
1262
214d78a2 1263 unsigned int flags = PTHREAD_START_CUSTOM;
f1a1da6c 1264 if (attrs->schedset != 0) {
214d78a2
A
1265 struct sched_param p;
1266 _pthread_attr_get_schedparam(attrs, &p);
f1a1da6c
A
1267 flags |= PTHREAD_START_SETSCHED;
1268 flags |= ((attrs->policy & PTHREAD_START_POLICY_MASK) << PTHREAD_START_POLICY_BITSHIFT);
214d78a2 1269 flags |= (p.sched_priority & PTHREAD_START_IMPORTANCE_MASK);
f1a1da6c
A
1270 } else if (attrs->qosclass != 0) {
1271 flags |= PTHREAD_START_QOSCLASS;
1272 flags |= (attrs->qosclass & PTHREAD_START_QOSCLASS_MASK);
1273 }
214d78a2
A
1274 if (create_flags & _PTHREAD_CREATE_SUSPENDED) {
1275 flags |= PTHREAD_START_SUSPENDED;
1276 }
f1a1da6c
A
1277
1278 __is_threaded = 1;
1279
214d78a2
A
1280 t =_pthread_allocate(attrs, &stack);
1281 if (t == NULL) {
1282 return EAGAIN;
f1a1da6c
A
1283 }
1284
214d78a2
A
1285 t->arg = arg;
1286 t->fun = start_routine;
1287 __pthread_add_thread(t, from_mach_thread);
1288
1289 if (__bsdthread_create(start_routine, arg, stack, t, flags) ==
1290 (pthread_t)-1) {
a0619f9c
A
1291 if (errno == EMFILE) {
1292 PTHREAD_CLIENT_CRASH(0,
1293 "Unable to allocate thread port, possible port leak");
1294 }
214d78a2
A
1295 __pthread_undo_add_thread(t, from_mach_thread);
1296 _pthread_deallocate(t, from_mach_thread);
f1a1da6c
A
1297 return EAGAIN;
1298 }
f1a1da6c 1299
214d78a2
A
1300 if (create_flags & _PTHREAD_CREATE_SUSPENDED) {
1301 _pthread_markcancel_if_canceled(t, _pthread_kernel_thread(t));
1302 }
2546420a
A
1303
1304 // n.b. if a thread is created detached and exits, t will be invalid
f1a1da6c
A
1305 *thread = t;
1306 return 0;
1307}
1308
2546420a 1309int
214d78a2
A
1310pthread_create(pthread_t *thread, const pthread_attr_t *attr,
1311 void *(*start_routine)(void *), void *arg)
2546420a 1312{
214d78a2
A
1313 unsigned int flags = _PTHREAD_CREATE_NONE;
1314 return _pthread_create(thread, attr, start_routine, arg, flags);
2546420a
A
1315}
1316
1317int
214d78a2
A
1318pthread_create_from_mach_thread(pthread_t *thread, const pthread_attr_t *attr,
1319 void *(*start_routine)(void *), void *arg)
2546420a 1320{
214d78a2
A
1321 unsigned int flags = _PTHREAD_CREATE_FROM_MACH_THREAD;
1322 return _pthread_create(thread, attr, start_routine, arg, flags);
2546420a
A
1323}
1324
214d78a2
A
1325#if !defined(__OPEN_SOURCE__) && TARGET_OS_OSX // 40703288
1326/* Functions defined in machine-dependent files. */
1327PTHREAD_NOEXPORT void _pthread_setup_suspended(pthread_t th, void (*f)(pthread_t), void *sp);
1328
a0619f9c 1329PTHREAD_NORETURN
2546420a
A
1330static void
1331_pthread_suspended_body(pthread_t self)
1332{
1333 _pthread_set_self(self);
214d78a2 1334 __pthread_started_thread(self);
2546420a
A
1335 _pthread_exit(self, (self->fun)(self->arg));
1336}
1337
214d78a2
A
1338static int
1339_pthread_create_suspended_np(pthread_t *thread, const pthread_attr_t *attrs,
1340 void *(*start_routine)(void *), void *arg)
f1a1da6c 1341{
214d78a2 1342 pthread_t t;
f1a1da6c
A
1343 void *stack;
1344 mach_port_t kernel_thread = MACH_PORT_NULL;
1345
f1a1da6c
A
1346 if (attrs == NULL) {
1347 attrs = &_pthread_attr_default;
1348 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
1349 return EINVAL;
1350 }
1351
214d78a2
A
1352 t = _pthread_allocate(attrs, &stack);
1353 if (t == NULL) {
1354 return EAGAIN;
f1a1da6c 1355 }
a0619f9c 1356
214d78a2
A
1357 if (thread_create(mach_task_self(), &kernel_thread) != KERN_SUCCESS) {
1358 _pthread_deallocate(t, false);
1359 return EAGAIN;
f1a1da6c
A
1360 }
1361
1362 _pthread_set_kernel_thread(t, kernel_thread);
214d78a2
A
1363 (void)pthread_setschedparam_internal(t, kernel_thread,
1364 t->tl_policy, &t->tl_param);
a0619f9c 1365
f1a1da6c
A
1366 __is_threaded = 1;
1367
1368 t->arg = arg;
1369 t->fun = start_routine;
a0619f9c 1370 t->cancel_state |= _PTHREAD_CANCEL_INITIALIZED;
214d78a2 1371 __pthread_add_thread(t, false);
f1a1da6c
A
1372
1373 // Set up a suspended thread.
214d78a2
A
1374 _pthread_setup_suspended(t, _pthread_suspended_body, stack);
1375 *thread = t;
1376 return 0;
1377}
1378#endif // !defined(__OPEN_SOURCE__) && TARGET_OS_OSX
1379
1380int
1381pthread_create_suspended_np(pthread_t *thread, const pthread_attr_t *attr,
1382 void *(*start_routine)(void *), void *arg)
1383{
1384#if !defined(__OPEN_SOURCE__) && TARGET_OS_OSX // 40703288
1385 if (_os_xbs_chrooted) {
1386 return _pthread_create_suspended_np(thread, attr, start_routine, arg);
1387 }
1388#endif
1389 unsigned int flags = _PTHREAD_CREATE_SUSPENDED;
1390 return _pthread_create(thread, attr, start_routine, arg, flags);
f1a1da6c
A
1391}
1392
a0619f9c
A
1393
1394PTHREAD_NOEXPORT_VARIANT
1395int
f1a1da6c
A
1396pthread_detach(pthread_t thread)
1397{
a0619f9c 1398 int res = 0;
214d78a2 1399 bool join = false, wake = false;
f1a1da6c 1400
214d78a2
A
1401 if (!_pthread_validate_thread_and_list_lock(thread)) {
1402 return ESRCH;
f1a1da6c
A
1403 }
1404
214d78a2 1405 if (!thread->tl_joinable) {
f1a1da6c 1406 res = EINVAL;
214d78a2 1407 } else if (thread->tl_exit_gate == MACH_PORT_DEAD) {
a0619f9c
A
1408 // Join the thread if it's already exited.
1409 join = true;
1410 } else {
214d78a2
A
1411 thread->tl_joinable = false; // _pthread_joiner_prepost_wake uses this
1412 if (thread->tl_join_ctx) {
1413 (void)_pthread_joiner_prepost_wake(thread);
1414 wake = true;
1415 }
f1a1da6c 1416 }
214d78a2 1417 _PTHREAD_UNLOCK(_pthread_list_lock);
f1a1da6c
A
1418
1419 if (join) {
1420 pthread_join(thread, NULL);
214d78a2
A
1421 } else if (wake) {
1422 _pthread_joiner_wake(thread);
f1a1da6c 1423 }
f1a1da6c
A
1424 return res;
1425}
1426
a0619f9c
A
1427PTHREAD_NOEXPORT_VARIANT
1428int
f1a1da6c 1429pthread_kill(pthread_t th, int sig)
a0619f9c 1430{
f1a1da6c
A
1431 if (sig < 0 || sig > NSIG) {
1432 return EINVAL;
1433 }
1434
1435 mach_port_t kport = MACH_PORT_NULL;
214d78a2 1436 if (!_pthread_is_valid(th, &kport)) {
f1a1da6c
A
1437 return ESRCH; // Not a valid thread.
1438 }
1439
1440 // Don't signal workqueue threads.
1441 if (th->wqthread != 0 && th->wqkillset == 0) {
1442 return ENOTSUP;
1443 }
1444
1445 int ret = __pthread_kill(kport, sig);
1446
1447 if (ret == -1) {
1448 ret = errno;
1449 }
1450 return ret;
1451}
1452
a0619f9c
A
1453PTHREAD_NOEXPORT_VARIANT
1454int
f1a1da6c
A
1455__pthread_workqueue_setkill(int enable)
1456{
1457 pthread_t self = pthread_self();
1458
2546420a 1459 _PTHREAD_LOCK(self->lock);
f1a1da6c 1460 self->wqkillset = enable ? 1 : 0;
2546420a 1461 _PTHREAD_UNLOCK(self->lock);
f1a1da6c
A
1462
1463 return 0;
1464}
1465
f1a1da6c
A
1466
1467/* For compatibility... */
1468
1469pthread_t
214d78a2
A
1470_pthread_self(void)
1471{
f1a1da6c
A
1472 return pthread_self();
1473}
1474
1475/*
1476 * Terminate a thread.
1477 */
214d78a2 1478extern int __disable_threadsignal(int);
f1a1da6c
A
1479
1480PTHREAD_NORETURN
a0619f9c 1481static void
214d78a2 1482_pthread_exit(pthread_t self, void *exit_value)
f1a1da6c
A
1483{
1484 struct __darwin_pthread_handler_rec *handler;
1485
1486 // Disable signal delivery while we clean up
1487 __disable_threadsignal(1);
1488
1489 // Set cancel state to disable and type to deferred
214d78a2 1490 _pthread_setcancelstate_exit(self, exit_value);
f1a1da6c
A
1491
1492 while ((handler = self->__cleanup_stack) != 0) {
1493 (handler->__routine)(handler->__arg);
1494 self->__cleanup_stack = handler->__next;
1495 }
1496 _pthread_tsd_cleanup(self);
1497
f1a1da6c
A
1498 // Clear per-thread semaphore cache
1499 os_put_cached_semaphore(SEMAPHORE_NULL);
1500
214d78a2 1501 _pthread_terminate_invoke(self, exit_value);
f1a1da6c
A
1502}
1503
1504void
214d78a2 1505pthread_exit(void *exit_value)
f1a1da6c
A
1506{
1507 pthread_t self = pthread_self();
214d78a2
A
1508 if (os_unlikely(self->wqthread)) {
1509 PTHREAD_CLIENT_CRASH(0, "pthread_exit() called from a thread "
1510 "not created by pthread_create()");
f1a1da6c 1511 }
214d78a2 1512 _pthread_exit(self, exit_value);
f1a1da6c
A
1513}
1514
a0619f9c
A
1515
1516PTHREAD_NOEXPORT_VARIANT
1517int
214d78a2 1518pthread_getschedparam(pthread_t thread, int *policy, struct sched_param *param)
f1a1da6c 1519{
214d78a2 1520 if (!_pthread_validate_thread_and_list_lock(thread)) {
f1a1da6c
A
1521 return ESRCH;
1522 }
a0619f9c 1523
214d78a2
A
1524 if (policy) *policy = thread->tl_policy;
1525 if (param) *param = thread->tl_param;
2546420a 1526 _PTHREAD_UNLOCK(_pthread_list_lock);
214d78a2 1527 return 0;
f1a1da6c
A
1528}
1529
a0619f9c 1530
214d78a2 1531
a0619f9c
A
1532PTHREAD_ALWAYS_INLINE
1533static inline int
214d78a2
A
1534pthread_setschedparam_internal(pthread_t thread, mach_port_t kport, int policy,
1535 const struct sched_param *param)
f1a1da6c
A
1536{
1537 policy_base_data_t bases;
1538 policy_base_t base;
1539 mach_msg_type_number_t count;
1540 kern_return_t ret;
1541
1542 switch (policy) {
1543 case SCHED_OTHER:
1544 bases.ts.base_priority = param->sched_priority;
1545 base = (policy_base_t)&bases.ts;
1546 count = POLICY_TIMESHARE_BASE_COUNT;
1547 break;
1548 case SCHED_FIFO:
1549 bases.fifo.base_priority = param->sched_priority;
1550 base = (policy_base_t)&bases.fifo;
1551 count = POLICY_FIFO_BASE_COUNT;
1552 break;
1553 case SCHED_RR:
1554 bases.rr.base_priority = param->sched_priority;
1555 /* quantum isn't public yet */
1556 bases.rr.quantum = param->quantum;
1557 base = (policy_base_t)&bases.rr;
1558 count = POLICY_RR_BASE_COUNT;
1559 break;
1560 default:
1561 return EINVAL;
1562 }
1563 ret = thread_policy(kport, policy, base, count, TRUE);
1564 return (ret != KERN_SUCCESS) ? EINVAL : 0;
1565}
1566
a0619f9c
A
1567PTHREAD_NOEXPORT_VARIANT
1568int
f1a1da6c
A
1569pthread_setschedparam(pthread_t t, int policy, const struct sched_param *param)
1570{
1571 mach_port_t kport = MACH_PORT_NULL;
f1a1da6c
A
1572 int bypass = 1;
1573
1574 // since the main thread will not get de-allocated from underneath us
214d78a2 1575 if (t == pthread_self() || t == main_thread()) {
f1a1da6c
A
1576 kport = _pthread_kernel_thread(t);
1577 } else {
1578 bypass = 0;
214d78a2
A
1579 if (!_pthread_is_valid(t, &kport)) {
1580 return ESRCH;
1581 }
f1a1da6c 1582 }
a0619f9c 1583
214d78a2
A
1584 int res = pthread_setschedparam_internal(t, kport, policy, param);
1585 if (res) return res;
1586
1587 if (bypass) {
1588 _PTHREAD_LOCK(_pthread_list_lock);
1589 } else if (!_pthread_validate_thread_and_list_lock(t)) {
1590 // Ensure the thread is still valid.
1591 return ESRCH;
f1a1da6c 1592 }
214d78a2
A
1593
1594 t->tl_policy = policy;
1595 t->tl_param = *param;
1596 _PTHREAD_UNLOCK(_pthread_list_lock);
1597 return 0;
f1a1da6c
A
1598}
1599
a0619f9c 1600
f1a1da6c
A
1601int
1602sched_get_priority_min(int policy)
1603{
1604 return default_priority - 16;
1605}
1606
1607int
1608sched_get_priority_max(int policy)
1609{
1610 return default_priority + 16;
1611}
1612
a0619f9c 1613int
f1a1da6c
A
1614pthread_equal(pthread_t t1, pthread_t t2)
1615{
1616 return (t1 == t2);
1617}
1618
a0619f9c 1619/*
2546420a
A
1620 * Force LLVM not to optimise this to a call to __pthread_set_self, if it does
1621 * then _pthread_set_self won't be bound when secondary threads try and start up.
1622 */
f1a1da6c
A
1623PTHREAD_NOINLINE
1624void
1625_pthread_set_self(pthread_t p)
1626{
214d78a2
A
1627#if VARIANT_DYLD
1628 if (os_likely(!p)) {
1629 return _pthread_set_self_dyld();
1630 }
1631#endif // VARIANT_DYLD
1632 _pthread_set_self_internal(p, true);
2546420a 1633}
f1a1da6c 1634
214d78a2
A
1635#if VARIANT_DYLD
1636// _pthread_set_self_dyld is noinline+noexport to allow the option for
1637// static libsyscall to adopt this as the entry point from mach_init if
1638// desired
1639PTHREAD_NOINLINE PTHREAD_NOEXPORT
1640void
1641_pthread_set_self_dyld(void)
2546420a 1642{
214d78a2
A
1643 pthread_t p = main_thread();
1644 p->thread_id = __thread_selfid();
2546420a 1645
214d78a2
A
1646 if (os_unlikely(p->thread_id == -1ull)) {
1647 PTHREAD_INTERNAL_CRASH(0, "failed to set thread_id");
f1a1da6c
A
1648 }
1649
214d78a2
A
1650 // <rdar://problem/40930651> pthread self and the errno address are the
1651 // bare minimium TSD setup that dyld needs to actually function. Without
1652 // this, TSD access will fail and crash if it uses bits of Libc prior to
1653 // library initialization. __pthread_init will finish the initialization
1654 // during library init.
f1a1da6c
A
1655 p->tsd[_PTHREAD_TSD_SLOT_PTHREAD_SELF] = p;
1656 p->tsd[_PTHREAD_TSD_SLOT_ERRNO] = &p->err_no;
214d78a2
A
1657 _thread_set_tsd_base(&p->tsd[0]);
1658}
1659#endif // VARIANT_DYLD
1660
1661PTHREAD_ALWAYS_INLINE
1662static inline void
1663_pthread_set_self_internal(pthread_t p, bool needs_tsd_base_set)
1664{
1665 p->thread_id = __thread_selfid();
1666
1667 if (os_unlikely(p->thread_id == -1ull)) {
1668 PTHREAD_INTERNAL_CRASH(0, "failed to set thread_id");
1669 }
2546420a
A
1670
1671 if (needs_tsd_base_set) {
1672 _thread_set_tsd_base(&p->tsd[0]);
1673 }
f1a1da6c
A
1674}
1675
a0619f9c
A
1676
1677// <rdar://problem/28984807> pthread_once should have an acquire barrier
1678PTHREAD_ALWAYS_INLINE
1679static inline void
1680_os_once_acquire(os_once_t *predicate, void *context, os_function_t function)
1681{
1682 if (OS_EXPECT(os_atomic_load(predicate, acquire), ~0l) != ~0l) {
1683 _os_once(predicate, context, function);
1684 OS_COMPILER_CAN_ASSUME(*predicate == ~0l);
1685 }
1686}
1687
f1a1da6c
A
1688struct _pthread_once_context {
1689 pthread_once_t *pthread_once;
1690 void (*routine)(void);
1691};
1692
1693static void
1694__pthread_once_handler(void *context)
1695{
1696 struct _pthread_once_context *ctx = context;
1697 pthread_cleanup_push((void*)__os_once_reset, &ctx->pthread_once->once);
1698 ctx->routine();
1699 pthread_cleanup_pop(0);
1700 ctx->pthread_once->sig = _PTHREAD_ONCE_SIG;
1701}
1702
a0619f9c
A
1703PTHREAD_NOEXPORT_VARIANT
1704int
f1a1da6c
A
1705pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
1706{
1707 struct _pthread_once_context ctx = { once_control, init_routine };
1708 do {
a0619f9c 1709 _os_once_acquire(&once_control->once, &ctx, __pthread_once_handler);
f1a1da6c
A
1710 } while (once_control->sig == _PTHREAD_ONCE_SIG_init);
1711 return 0;
1712}
1713
f1a1da6c
A
1714
1715int
1716pthread_getconcurrency(void)
1717{
1718 return pthread_concurrency;
1719}
1720
1721int
1722pthread_setconcurrency(int new_level)
1723{
1724 if (new_level < 0) {
1725 return EINVAL;
1726 }
1727 pthread_concurrency = new_level;
1728 return 0;
1729}
1730
214d78a2
A
1731#if !defined(VARIANT_STATIC)
1732void *
1733malloc(size_t sz)
1734{
1735 if (_pthread_malloc) {
1736 return _pthread_malloc(sz);
1737 } else {
1738 return NULL;
1739 }
1740}
1741
1742void
1743free(void *p)
1744{
1745 if (_pthread_free) {
1746 _pthread_free(p);
1747 }
1748}
1749#endif // VARIANT_STATIC
1750
1751/*
1752 * Perform package initialization - called automatically when application starts
1753 */
1754struct ProgramVars; /* forward reference */
1755
1756#if !VARIANT_DYLD
2546420a
A
1757static unsigned long
1758_pthread_strtoul(const char *p, const char **endptr, int base)
f1a1da6c 1759{
2546420a 1760 uintptr_t val = 0;
a0619f9c 1761
2546420a
A
1762 // Expect hex string starting with "0x"
1763 if ((base == 16 || base == 0) && p && p[0] == '0' && p[1] == 'x') {
1764 p += 2;
1765 while (1) {
1766 char c = *p;
1767 if ('0' <= c && c <= '9') {
1768 val = (val << 4) + (c - '0');
1769 } else if ('a' <= c && c <= 'f') {
1770 val = (val << 4) + (c - 'a' + 10);
1771 } else if ('A' <= c && c <= 'F') {
1772 val = (val << 4) + (c - 'A' + 10);
1773 } else {
1774 break;
1775 }
1776 ++p;
1777 }
1778 }
1779
1780 *endptr = (char *)p;
1781 return val;
1782}
1783
1784static int
1785parse_main_stack_params(const char *apple[],
1786 void **stackaddr,
1787 size_t *stacksize,
1788 void **allocaddr,
1789 size_t *allocsize)
1790{
1791 const char *p = _simple_getenv(apple, "main_stack");
1792 if (!p) return 0;
1793
1794 int ret = 0;
1795 const char *s = p;
1796
1797 *stackaddr = _pthread_strtoul(s, &s, 16);
1798 if (*s != ',') goto out;
1799
1800 *stacksize = _pthread_strtoul(s + 1, &s, 16);
1801 if (*s != ',') goto out;
1802
1803 *allocaddr = _pthread_strtoul(s + 1, &s, 16);
1804 if (*s != ',') goto out;
1805
1806 *allocsize = _pthread_strtoul(s + 1, &s, 16);
1807 if (*s != ',' && *s != 0) goto out;
1808
1809 ret = 1;
1810out:
1811 bzero((char *)p, strlen(p));
1812 return ret;
f1a1da6c
A
1813}
1814
214d78a2
A
1815static void
1816parse_ptr_munge_params(const char *envp[], const char *apple[])
f1a1da6c 1817{
214d78a2
A
1818 const char *p, *s;
1819 p = _simple_getenv(apple, "ptr_munge");
1820 if (p) {
1821 _pthread_ptr_munge_token = _pthread_strtoul(p, &s, 16);
1822 bzero((char *)p, strlen(p));
f1a1da6c 1823 }
214d78a2
A
1824#if !DEBUG
1825 if (_pthread_ptr_munge_token) return;
1826#endif
1827 p = _simple_getenv(envp, "PTHREAD_PTR_MUNGE_TOKEN");
1828 if (p) {
1829 uintptr_t t = _pthread_strtoul(p, &s, 16);
1830 if (t) _pthread_ptr_munge_token = t;
f1a1da6c
A
1831 }
1832}
f1a1da6c
A
1833
1834int
2546420a 1835__pthread_init(const struct _libpthread_functions *pthread_funcs,
214d78a2
A
1836 const char *envp[], const char *apple[],
1837 const struct ProgramVars *vars __unused)
f1a1da6c
A
1838{
1839 // Save our provided pushed-down functions
1840 if (pthread_funcs) {
1841 exitf = pthread_funcs->exit;
1842
1843 if (pthread_funcs->version >= 2) {
1844 _pthread_malloc = pthread_funcs->malloc;
1845 _pthread_free = pthread_funcs->free;
1846 }
1847 }
1848
1849 //
1850 // Get host information
1851 //
1852
1853 kern_return_t kr;
1854 host_flavor_t flavor = HOST_PRIORITY_INFO;
1855 mach_msg_type_number_t count = HOST_PRIORITY_INFO_COUNT;
1856 host_priority_info_data_t priority_info;
1857 host_t host = mach_host_self();
1858 kr = host_info(host, flavor, (host_info_t)&priority_info, &count);
1859 if (kr != KERN_SUCCESS) {
214d78a2 1860 PTHREAD_INTERNAL_CRASH(kr, "host_info() failed");
f1a1da6c 1861 } else {
214d78a2
A
1862 default_priority = (uint8_t)priority_info.user_priority;
1863 min_priority = (uint8_t)priority_info.minimum_priority;
1864 max_priority = (uint8_t)priority_info.maximum_priority;
f1a1da6c
A
1865 }
1866 mach_port_deallocate(mach_task_self(), host);
1867
1868 //
1869 // Set up the main thread structure
1870 //
1871
2546420a
A
1872 // Get the address and size of the main thread's stack from the kernel.
1873 void *stackaddr = 0;
1874 size_t stacksize = 0;
1875 void *allocaddr = 0;
1876 size_t allocsize = 0;
1877 if (!parse_main_stack_params(apple, &stackaddr, &stacksize, &allocaddr, &allocsize) ||
1878 stackaddr == NULL || stacksize == 0) {
1879 // Fall back to previous bevhaior.
1880 size_t len = sizeof(stackaddr);
1881 int mib[] = { CTL_KERN, KERN_USRSTACK };
1882 if (__sysctl(mib, 2, &stackaddr, &len, NULL, 0) != 0) {
1883#if defined(__LP64__)
1884 stackaddr = (void *)USRSTACK64;
1885#else
1886 stackaddr = (void *)USRSTACK;
1887#endif
1888 }
1889 stacksize = DFLSSIZ;
1890 allocaddr = 0;
1891 allocsize = 0;
f1a1da6c
A
1892 }
1893
214d78a2
A
1894 // Initialize random ptr_munge token from the kernel.
1895 parse_ptr_munge_params(envp, apple);
1896
1897 // libpthread.a in dyld "owns" the main thread structure itself and sets
1898 // up the tsd to point to it. So take the pthread_self() from there
1899 // and make it our main thread point.
1900 pthread_t thread = (pthread_t)_pthread_getspecific_direct(
1901 _PTHREAD_TSD_SLOT_PTHREAD_SELF);
1902 PTHREAD_ASSERT(thread);
1903 _main_thread_ptr = thread;
1904
1905 PTHREAD_ASSERT(_pthread_attr_default.qosclass ==
1906 _pthread_default_priority(0));
2546420a 1907 _pthread_struct_init(thread, &_pthread_attr_default,
214d78a2
A
1908 stackaddr, stacksize, allocaddr, allocsize);
1909 thread->tl_joinable = true;
f1a1da6c
A
1910
1911 // Finish initialization with common code that is reinvoked on the
1912 // child side of a fork.
1913
1914 // Finishes initialization of main thread attributes.
1915 // Initializes the thread list and add the main thread.
1916 // Calls _pthread_set_self() to prepare the main thread for execution.
a0619f9c
A
1917 _pthread_main_thread_init(thread);
1918
76b7b9a2 1919 struct _pthread_registration_data registration_data;
f1a1da6c 1920 // Set up kernel entry points with __bsdthread_register.
76b7b9a2 1921 _pthread_bsdthread_init(&registration_data);
f1a1da6c 1922
76b7b9a2 1923 // Have pthread_key and pthread_mutex do their init envvar checks.
964d3577 1924 _pthread_key_global_init(envp);
76b7b9a2 1925 _pthread_mutex_global_init(envp, &registration_data);
964d3577 1926
a0619f9c
A
1927#if PTHREAD_DEBUG_LOG
1928 _SIMPLE_STRING path = _simple_salloc();
1929 _simple_sprintf(path, "/var/tmp/libpthread.%d.log", getpid());
1930 _pthread_debuglog = open(_simple_string(path),
1931 O_WRONLY | O_APPEND | O_CREAT | O_NOFOLLOW | O_CLOEXEC, 0666);
1932 _simple_sfree(path);
1933 _pthread_debugstart = mach_absolute_time();
1934#endif
f1a1da6c 1935
a0619f9c 1936 return 0;
f1a1da6c 1937}
214d78a2 1938#endif // !VARIANT_DYLD
f1a1da6c
A
1939
1940PTHREAD_NOEXPORT void
a0619f9c 1941_pthread_main_thread_init(pthread_t p)
f1a1da6c
A
1942{
1943 TAILQ_INIT(&__pthread_head);
2546420a 1944 _PTHREAD_LOCK_INIT(_pthread_list_lock);
2546420a 1945 _PTHREAD_LOCK_INIT(p->lock);
f1a1da6c
A
1946 _pthread_set_kernel_thread(p, mach_thread_self());
1947 _pthread_set_reply_port(p, mach_reply_port());
1948 p->__cleanup_stack = NULL;
214d78a2
A
1949 p->tl_join_ctx = NULL;
1950 p->tl_exit_gate = MACH_PORT_NULL;
a0619f9c 1951 p->tsd[__TSD_SEMAPHORE_CACHE] = (void*)SEMAPHORE_NULL;
214d78a2 1952 p->tsd[__TSD_MACH_SPECIAL_REPLY] = 0;
a0619f9c 1953 p->cancel_state |= _PTHREAD_CANCEL_INITIALIZED;
f1a1da6c
A
1954
1955 // Initialize the list of threads with the new main thread.
214d78a2 1956 TAILQ_INSERT_HEAD(&__pthread_head, p, tl_plist);
f1a1da6c
A
1957 _pthread_count = 1;
1958
f1a1da6c
A
1959 _pthread_introspection_thread_start(p);
1960}
1961
f1a1da6c 1962int
a0619f9c 1963sched_yield(void)
f1a1da6c 1964{
214d78a2
A
1965 swtch_pri(0);
1966 return 0;
f1a1da6c
A
1967}
1968
a0619f9c
A
1969// XXX remove
1970void
1971cthread_yield(void)
f1a1da6c 1972{
a0619f9c
A
1973 sched_yield();
1974}
f1a1da6c 1975
a0619f9c
A
1976void
1977pthread_yield_np(void)
1978{
1979 sched_yield();
f1a1da6c
A
1980}
1981
a0619f9c
A
1982
1983
214d78a2 1984// Libsystem knows about this symbol and exports it to libsyscall
a0619f9c 1985PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
1986void
1987_pthread_clear_qos_tsd(mach_port_t thread_port)
1988{
1989 if (thread_port == MACH_PORT_NULL || (uintptr_t)_pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF) == thread_port) {
1990 /* Clear the current thread's TSD, that can be done inline. */
214d78a2
A
1991 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS,
1992 _pthread_unspecified_priority());
f1a1da6c
A
1993 } else {
1994 pthread_t p;
1995
2546420a 1996 _PTHREAD_LOCK(_pthread_list_lock);
f1a1da6c 1997
214d78a2 1998 TAILQ_FOREACH(p, &__pthread_head, tl_plist) {
964d3577 1999 mach_port_t kp = _pthread_kernel_thread(p);
f1a1da6c 2000 if (thread_port == kp) {
214d78a2
A
2001 p->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] =
2002 _pthread_unspecified_priority();
f1a1da6c
A
2003 break;
2004 }
2005 }
2006
2546420a 2007 _PTHREAD_UNLOCK(_pthread_list_lock);
f1a1da6c
A
2008 }
2009}
2010
a0619f9c 2011
214d78a2
A
2012#pragma mark pthread/stack_np.h public interface
2013
2014
2015#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__arm64__)
2016typedef uintptr_t frame_data_addr_t;
2017
2018struct frame_data {
2019 frame_data_addr_t frame_addr_next;
2020 frame_data_addr_t ret_addr;
2021};
2022#else
2023#error ********** Unimplemented architecture
2024#endif
2025
2026uintptr_t
2027pthread_stack_frame_decode_np(uintptr_t frame_addr, uintptr_t *return_addr)
2028{
2029 struct frame_data *frame = (struct frame_data *)frame_addr;
2030
2031 if (return_addr) {
2032 *return_addr = (uintptr_t)frame->ret_addr;
2033 }
2034
2035 return (uintptr_t)frame->frame_addr_next;
2036}
2037
2038
2039#pragma mark pthread workqueue support routines
2040
f1a1da6c
A
2041
2042PTHREAD_NOEXPORT void
76b7b9a2 2043_pthread_bsdthread_init(struct _pthread_registration_data *data)
f1a1da6c 2044{
76b7b9a2
A
2045 bzero(data, sizeof(*data));
2046 data->version = sizeof(struct _pthread_registration_data);
2047 data->dispatch_queue_offset = __PTK_LIBDISPATCH_KEY0 * sizeof(void *);
2048 data->return_to_kernel_offset = __TSD_RETURN_TO_KERNEL * sizeof(void *);
2049 data->tsd_offset = offsetof(struct _pthread, tsd);
2050 data->mach_thread_self_offset = __TSD_MACH_THREAD_SELF * sizeof(void *);
f1a1da6c 2051
214d78a2
A
2052 int rv = __bsdthread_register(thread_start, start_wqthread, (int)PTHREAD_SIZE,
2053 (void*)data, (uintptr_t)sizeof(*data), data->dispatch_queue_offset);
f1a1da6c
A
2054
2055 if (rv > 0) {
214d78a2
A
2056 int required_features =
2057 PTHREAD_FEATURE_FINEPRIO |
2058 PTHREAD_FEATURE_BSDTHREADCTL |
2059 PTHREAD_FEATURE_SETSELF |
2060 PTHREAD_FEATURE_QOS_MAINTENANCE |
2061 PTHREAD_FEATURE_QOS_DEFAULT;
2062 if ((rv & required_features) != required_features) {
2063 PTHREAD_INTERNAL_CRASH(rv, "Missing required kernel support");
a0619f9c 2064 }
f1a1da6c
A
2065 __pthread_supported_features = rv;
2066 }
2067
76b7b9a2
A
2068 /*
2069 * TODO: differentiate between (-1, EINVAL) after fork (which has the side
2070 * effect of resetting the child's stack_addr_hint before bailing out) and
2071 * (-1, EINVAL) because of invalid arguments. We'd probably like to treat
2072 * the latter as fatal.
2073 *
2074 * <rdar://problem/36451838>
2075 */
2076
2077 pthread_priority_t main_qos = (pthread_priority_t)data->main_qos;
2546420a 2078
214d78a2 2079 if (_pthread_priority_thread_qos(main_qos) != THREAD_QOS_UNSPECIFIED) {
2546420a 2080 _pthread_set_main_qos(main_qos);
214d78a2
A
2081 main_thread()->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = main_qos;
2082 }
2083
2084 if (data->stack_addr_hint) {
2085 __pthread_stack_hint = data->stack_addr_hint;
f1a1da6c
A
2086 }
2087
2088 if (__libdispatch_workerfunction != NULL) {
2089 // prepare the kernel for workq action
2090 (void)__workq_open();
2091 }
2092}
2093
214d78a2
A
2094PTHREAD_NOINLINE
2095static void
2096_pthread_wqthread_legacy_worker_wrap(pthread_priority_t pp)
2097{
2098 /* Old thread priorities are inverted from where we have them in
2099 * the new flexible priority scheme. The highest priority is zero,
2100 * up to 2, with background at 3.
2101 */
2102 pthread_workqueue_function_t func = (pthread_workqueue_function_t)__libdispatch_workerfunction;
2103 bool overcommit = (pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG);
2104 int opts = overcommit ? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT : 0;
2105
2106 switch (_pthread_priority_thread_qos(pp)) {
2107 case THREAD_QOS_USER_INITIATED:
2108 return (*func)(WORKQ_HIGH_PRIOQUEUE, opts, NULL);
2109 case THREAD_QOS_LEGACY:
2110 /* B&I builders can't pass a QOS_CLASS_DEFAULT thread to dispatch, for fear of the QoS being
2111 * picked up by NSThread (et al) and transported around the system. So change the TSD to
2112 * make this thread look like QOS_CLASS_USER_INITIATED even though it will still run as legacy.
2113 */
2114 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS,
2115 _pthread_priority_make_from_thread_qos(THREAD_QOS_USER_INITIATED, 0, 0));
2116 return (*func)(WORKQ_DEFAULT_PRIOQUEUE, opts, NULL);
2117 case THREAD_QOS_UTILITY:
2118 return (*func)(WORKQ_LOW_PRIOQUEUE, opts, NULL);
2119 case THREAD_QOS_BACKGROUND:
2120 return (*func)(WORKQ_BG_PRIOQUEUE, opts, NULL);
2121 }
2122 PTHREAD_INTERNAL_CRASH(pp, "Invalid pthread priority for the legacy interface");
2123}
2124
2125PTHREAD_ALWAYS_INLINE
2126static inline pthread_priority_t
2127_pthread_wqthread_priority(int flags)
f1a1da6c 2128{
214d78a2
A
2129 pthread_priority_t pp = 0;
2130 thread_qos_t qos;
2131
2132 if (flags & WQ_FLAG_THREAD_KEVENT) {
2133 pp |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG;
2134 }
2135 if (flags & WQ_FLAG_THREAD_EVENT_MANAGER) {
2136 return pp | _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
2137 }
f1a1da6c 2138
214d78a2
A
2139 if (flags & WQ_FLAG_THREAD_OVERCOMMIT) {
2140 pp |= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
2141 }
2142 if (flags & WQ_FLAG_THREAD_PRIO_QOS) {
2143 qos = (thread_qos_t)(flags & WQ_FLAG_THREAD_PRIO_MASK);
2144 pp = _pthread_priority_make_from_thread_qos(qos, 0, pp);
2145 } else if (flags & WQ_FLAG_THREAD_PRIO_SCHED) {
2146 pp |= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
2147 pp |= (flags & WQ_FLAG_THREAD_PRIO_MASK);
2148 } else {
2149 PTHREAD_INTERNAL_CRASH(flags, "Missing priority");
2150 }
2151 return pp;
2152}
964d3577 2153
214d78a2
A
2154PTHREAD_NOINLINE
2155static void
2156_pthread_wqthread_setup(pthread_t self, mach_port_t kport, void *stacklowaddr,
2157 int flags)
2158{
2159 void *stackaddr = self;
2160 size_t stacksize = (uintptr_t)self - (uintptr_t)stacklowaddr;
f1a1da6c 2161
214d78a2
A
2162 _pthread_struct_init(self, &_pthread_attr_default, stackaddr, stacksize,
2163 PTHREAD_ALLOCADDR(stackaddr, stacksize),
2164 PTHREAD_ALLOCSIZE(stackaddr, stacksize));
f1a1da6c 2165
214d78a2
A
2166 _pthread_set_kernel_thread(self, kport);
2167 self->wqthread = 1;
2168 self->wqkillset = 0;
2169 self->tl_joinable = false;
2170 self->cancel_state |= _PTHREAD_CANCEL_INITIALIZED;
f1a1da6c 2171
214d78a2
A
2172 // Update the running thread count and set childrun bit.
2173 bool thread_tsd_base_set = (bool)(flags & WQ_FLAG_THREAD_TSD_BASE_SET);
2174 _pthread_set_self_internal(self, !thread_tsd_base_set);
2175 __pthread_add_thread(self, false);
2176 __pthread_started_thread(self);
2177}
2546420a 2178
214d78a2
A
2179PTHREAD_NORETURN PTHREAD_NOINLINE
2180static void
2181_pthread_wqthread_exit(pthread_t self)
2182{
2183 pthread_priority_t pp;
2184 thread_qos_t qos;
964d3577 2185
214d78a2
A
2186 pp = (pthread_priority_t)self->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS];
2187 qos = _pthread_priority_thread_qos(pp);
2188 if (qos == THREAD_QOS_UNSPECIFIED || qos > WORKQ_THREAD_QOS_CLEANUP) {
2189 // Reset QoS to something low for the cleanup process
2190 pp = _pthread_priority_make_from_thread_qos(WORKQ_THREAD_QOS_CLEANUP, 0, 0);
2191 self->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = (void *)pp;
2192 }
f1a1da6c 2193
214d78a2
A
2194 _pthread_exit(self, NULL);
2195}
f1a1da6c 2196
214d78a2
A
2197// workqueue entry point from kernel
2198void
2199_pthread_wqthread(pthread_t self, mach_port_t kport, void *stacklowaddr,
2200 void *keventlist, int flags, int nkevents)
2201{
2202 if ((flags & WQ_FLAG_THREAD_REUSE) == 0) {
2203 _pthread_wqthread_setup(self, kport, stacklowaddr, flags);
964d3577 2204 }
f1a1da6c 2205
214d78a2
A
2206 pthread_priority_t pp;
2207 if (flags & WQ_FLAG_THREAD_OUTSIDEQOS) {
2208 self->wqoutsideqos = 1;
2209 pp = _pthread_priority_make_from_thread_qos(THREAD_QOS_LEGACY, 0,
2210 _PTHREAD_PRIORITY_FALLBACK_FLAG);
2211 } else {
2212 self->wqoutsideqos = 0;
2213 pp = _pthread_wqthread_priority(flags);
f1a1da6c
A
2214 }
2215
214d78a2 2216 self->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = (void *)pp;
f1a1da6c 2217
214d78a2
A
2218 // avoid spills on the stack hard to keep used stack space minimal
2219 if (nkevents == WORKQ_EXIT_THREAD_NKEVENT) {
2220 goto exit;
2221 } else if (flags & WQ_FLAG_THREAD_WORKLOOP) {
a0619f9c 2222 self->fun = (void *(*)(void*))__libdispatch_workloopfunction;
214d78a2
A
2223 self->wq_retop = WQOPS_THREAD_WORKLOOP_RETURN;
2224 self->wq_kqid_ptr = ((kqueue_id_t *)keventlist - 1);
2225 self->arg = keventlist;
2226 self->wq_nevents = nkevents;
2227 } else if (flags & WQ_FLAG_THREAD_KEVENT) {
964d3577 2228 self->fun = (void *(*)(void*))__libdispatch_keventfunction;
214d78a2
A
2229 self->wq_retop = WQOPS_THREAD_KEVENT_RETURN;
2230 self->wq_kqid_ptr = NULL;
2231 self->arg = keventlist;
2232 self->wq_nevents = nkevents;
964d3577 2233 } else {
a0619f9c 2234 self->fun = (void *(*)(void*))__libdispatch_workerfunction;
214d78a2
A
2235 self->wq_retop = WQOPS_THREAD_RETURN;
2236 self->wq_kqid_ptr = NULL;
2237 self->arg = (void *)(uintptr_t)pp;
2238 self->wq_nevents = 0;
2239 if (os_likely(__workq_newapi)) {
2240 (*__libdispatch_workerfunction)(pp);
2241 } else {
2242 _pthread_wqthread_legacy_worker_wrap(pp);
2243 }
2244 goto just_return;
964d3577 2245 }
964d3577 2246
214d78a2
A
2247 if (nkevents > 0) {
2248kevent_errors_retry:
2249 if (self->wq_retop == WQOPS_THREAD_WORKLOOP_RETURN) {
2250 ((pthread_workqueue_function_workloop_t)self->fun)
2251 (self->wq_kqid_ptr, &self->arg, &self->wq_nevents);
a0619f9c 2252 } else {
214d78a2
A
2253 ((pthread_workqueue_function_kevent_t)self->fun)
2254 (&self->arg, &self->wq_nevents);
a0619f9c 2255 }
214d78a2
A
2256 int rc = __workq_kernreturn(self->wq_retop, self->arg, self->wq_nevents, 0);
2257 if (os_unlikely(rc > 0)) {
2258 self->wq_nevents = rc;
964d3577 2259 goto kevent_errors_retry;
a0619f9c 2260 }
214d78a2
A
2261 if (os_unlikely(rc < 0)) {
2262 PTHREAD_INTERNAL_CRASH(self->err_no, "kevent (workloop) failed");
f1a1da6c
A
2263 }
2264 } else {
214d78a2
A
2265just_return:
2266 __workq_kernreturn(self->wq_retop, NULL, 0, 0);
2546420a
A
2267 }
2268
214d78a2
A
2269exit:
2270 _pthread_wqthread_exit(self);
f1a1da6c
A
2271}
2272
214d78a2
A
2273
2274#pragma mark pthread workqueue API for libdispatch
2275
f1a1da6c 2276
a0619f9c
A
2277_Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN == WQ_KEVENT_LIST_LEN,
2278 "Kernel and userland should agree on the event list size");
2279
f1a1da6c
A
2280void
2281pthread_workqueue_setdispatchoffset_np(int offset)
2282{
2283 __libdispatch_offset = offset;
2284}
2285
a0619f9c
A
2286static int
2287pthread_workqueue_setdispatch_with_workloop_np(pthread_workqueue_function2_t queue_func,
2288 pthread_workqueue_function_kevent_t kevent_func,
2289 pthread_workqueue_function_workloop_t workloop_func)
f1a1da6c
A
2290{
2291 int res = EBUSY;
2292 if (__libdispatch_workerfunction == NULL) {
2293 // Check whether the kernel supports new SPIs
964d3577 2294 res = __workq_kernreturn(WQOPS_QUEUE_NEWSPISUPP, NULL, __libdispatch_offset, kevent_func != NULL ? 0x01 : 0x00);
f1a1da6c
A
2295 if (res == -1){
2296 res = ENOTSUP;
2297 } else {
964d3577
A
2298 __libdispatch_workerfunction = queue_func;
2299 __libdispatch_keventfunction = kevent_func;
a0619f9c 2300 __libdispatch_workloopfunction = workloop_func;
f1a1da6c
A
2301
2302 // Prepare the kernel for workq action
2303 (void)__workq_open();
2304 if (__is_threaded == 0) {
2305 __is_threaded = 1;
2306 }
2307 }
2308 }
2309 return res;
2310}
2311
964d3577 2312int
a0619f9c
A
2313_pthread_workqueue_init_with_workloop(pthread_workqueue_function2_t queue_func,
2314 pthread_workqueue_function_kevent_t kevent_func,
2315 pthread_workqueue_function_workloop_t workloop_func,
2316 int offset, int flags)
964d3577
A
2317{
2318 if (flags != 0) {
2319 return ENOTSUP;
2320 }
a0619f9c 2321
964d3577
A
2322 __workq_newapi = true;
2323 __libdispatch_offset = offset;
a0619f9c
A
2324
2325 int rv = pthread_workqueue_setdispatch_with_workloop_np(queue_func, kevent_func, workloop_func);
964d3577
A
2326 return rv;
2327}
2328
a0619f9c
A
2329int
2330_pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func,
2331 pthread_workqueue_function_kevent_t kevent_func,
2332 int offset, int flags)
2333{
2334 return _pthread_workqueue_init_with_workloop(queue_func, kevent_func, NULL, offset, flags);
2335}
2336
964d3577
A
2337int
2338_pthread_workqueue_init(pthread_workqueue_function2_t func, int offset, int flags)
2339{
2340 return _pthread_workqueue_init_with_kevent(func, NULL, offset, flags);
2341}
2342
2343int
2344pthread_workqueue_setdispatch_np(pthread_workqueue_function_t worker_func)
2345{
a0619f9c 2346 return pthread_workqueue_setdispatch_with_workloop_np((pthread_workqueue_function2_t)worker_func, NULL, NULL);
964d3577
A
2347}
2348
f1a1da6c
A
2349int
2350_pthread_workqueue_supported(void)
2351{
a0619f9c
A
2352 if (os_unlikely(!__pthread_supported_features)) {
2353 PTHREAD_INTERNAL_CRASH(0, "libpthread has not been initialized");
2354 }
2355
f1a1da6c
A
2356 return __pthread_supported_features;
2357}
2358
2359int
2360pthread_workqueue_addthreads_np(int queue_priority, int options, int numthreads)
2361{
2362 int res = 0;
2363
2364 // Cannot add threads without a worker function registered.
2365 if (__libdispatch_workerfunction == NULL) {
2366 return EPERM;
2367 }
2368
2369 pthread_priority_t kp = 0;
214d78a2
A
2370 int compat_priority = queue_priority & WQ_FLAG_THREAD_PRIO_MASK;
2371 int flags = 0;
f1a1da6c 2372
214d78a2
A
2373 if (options & WORKQ_ADDTHREADS_OPTION_OVERCOMMIT) {
2374 flags = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
2375 }
f1a1da6c 2376
a0619f9c
A
2377#pragma clang diagnostic push
2378#pragma clang diagnostic ignored "-Wdeprecated-declarations"
214d78a2 2379 kp = _pthread_qos_class_encode_workqueue(compat_priority, flags);
a0619f9c 2380#pragma clang diagnostic pop
f1a1da6c 2381
f1a1da6c
A
2382 res = __workq_kernreturn(WQOPS_QUEUE_REQTHREADS, NULL, numthreads, (int)kp);
2383 if (res == -1) {
2384 res = errno;
2385 }
2386 return res;
2387}
2388
a0619f9c
A
2389bool
2390_pthread_workqueue_should_narrow(pthread_priority_t pri)
2391{
2392 int res = __workq_kernreturn(WQOPS_SHOULD_NARROW, NULL, (int)pri, 0);
2393 if (res == -1) {
2394 return false;
2395 }
2396 return res;
2397}
2398
f1a1da6c
A
2399int
2400_pthread_workqueue_addthreads(int numthreads, pthread_priority_t priority)
2401{
2402 int res = 0;
2403
2404 if (__libdispatch_workerfunction == NULL) {
2405 return EPERM;
2406 }
2407
214d78a2
A
2408#if TARGET_OS_OSX
2409 // <rdar://problem/37687655> Legacy simulators fail to boot
2410 //
2411 // Older sims set the deprecated _PTHREAD_PRIORITY_ROOTQUEUE_FLAG wrongly,
2412 // which is aliased to _PTHREAD_PRIORITY_SCHED_PRI_FLAG and that XNU
2413 // validates and rejects.
2414 //
2415 // As a workaround, forcefully unset this bit that cannot be set here
2416 // anyway.
2417 priority &= ~_PTHREAD_PRIORITY_SCHED_PRI_FLAG;
2418#endif
f1a1da6c
A
2419
2420 res = __workq_kernreturn(WQOPS_QUEUE_REQTHREADS, NULL, numthreads, (int)priority);
2421 if (res == -1) {
2422 res = errno;
2423 }
2424 return res;
2425}
2426
964d3577
A
2427int
2428_pthread_workqueue_set_event_manager_priority(pthread_priority_t priority)
2429{
2430 int res = __workq_kernreturn(WQOPS_SET_EVENT_MANAGER_PRIORITY, NULL, (int)priority, 0);
2431 if (res == -1) {
2432 res = errno;
2433 }
2434 return res;
2435}
2436
214d78a2
A
2437int
2438_pthread_workloop_create(uint64_t workloop_id, uint64_t options, pthread_attr_t *attr)
2439{
2440 struct kqueue_workloop_params params = {
2441 .kqwlp_version = sizeof(struct kqueue_workloop_params),
2442 .kqwlp_id = workloop_id,
2443 .kqwlp_flags = 0,
2444 };
2445
2446 if (!attr) {
2447 return EINVAL;
2448 }
2449
2450 if (attr->schedset) {
2451 params.kqwlp_flags |= KQ_WORKLOOP_CREATE_SCHED_PRI;
2452 params.kqwlp_sched_pri = attr->param.sched_priority;
2453 }
2454
2455 if (attr->policyset) {
2456 params.kqwlp_flags |= KQ_WORKLOOP_CREATE_SCHED_POL;
2457 params.kqwlp_sched_pol = attr->policy;
2458 }
2459
2460 if (attr->cpupercentset) {
2461 params.kqwlp_flags |= KQ_WORKLOOP_CREATE_CPU_PERCENT;
2462 params.kqwlp_cpu_percent = attr->cpupercent;
2463 params.kqwlp_cpu_refillms = attr->refillms;
2464 }
2465
2466 int res = __kqueue_workloop_ctl(KQ_WORKLOOP_CREATE, 0, &params,
2467 sizeof(params));
2468 if (res == -1) {
2469 res = errno;
2470 }
2471 return res;
2472}
2473
2474int
2475_pthread_workloop_destroy(uint64_t workloop_id)
2476{
2477 struct kqueue_workloop_params params = {
2478 .kqwlp_version = sizeof(struct kqueue_workloop_params),
2479 .kqwlp_id = workloop_id,
2480 };
2481
2482 int res = __kqueue_workloop_ctl(KQ_WORKLOOP_DESTROY, 0, &params,
2483 sizeof(params));
2484 if (res == -1) {
2485 res = errno;
2486 }
2487 return res;
2488}
2489
2490
2491#pragma mark Introspection SPI for libpthread.
2492
f1a1da6c
A
2493
2494static pthread_introspection_hook_t _pthread_introspection_hook;
2495
2496pthread_introspection_hook_t
2497pthread_introspection_hook_install(pthread_introspection_hook_t hook)
2498{
f1a1da6c 2499 pthread_introspection_hook_t prev;
a0619f9c 2500 prev = _pthread_atomic_xchg_ptr((void**)&_pthread_introspection_hook, hook);
f1a1da6c
A
2501 return prev;
2502}
2503
2504PTHREAD_NOINLINE
2505static void
214d78a2 2506_pthread_introspection_hook_callout_thread_create(pthread_t t)
f1a1da6c
A
2507{
2508 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_CREATE, t, t,
2546420a 2509 PTHREAD_SIZE);
f1a1da6c
A
2510}
2511
2512static inline void
214d78a2 2513_pthread_introspection_thread_create(pthread_t t)
f1a1da6c
A
2514{
2515 if (os_fastpath(!_pthread_introspection_hook)) return;
214d78a2 2516 _pthread_introspection_hook_callout_thread_create(t);
f1a1da6c
A
2517}
2518
2519PTHREAD_NOINLINE
2520static void
2521_pthread_introspection_hook_callout_thread_start(pthread_t t)
2522{
2523 size_t freesize;
2524 void *freeaddr;
214d78a2
A
2525 if (t == main_thread()) {
2526 size_t stacksize = t->stackaddr - t->stackbottom;
2527 freesize = stacksize + t->guardsize;
f1a1da6c
A
2528 freeaddr = t->stackaddr - freesize;
2529 } else {
2546420a 2530 freesize = t->freesize - PTHREAD_SIZE;
f1a1da6c
A
2531 freeaddr = t->freeaddr;
2532 }
2533 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_START, t,
2534 freeaddr, freesize);
2535}
2536
2537static inline void
2538_pthread_introspection_thread_start(pthread_t t)
2539{
2540 if (os_fastpath(!_pthread_introspection_hook)) return;
2541 _pthread_introspection_hook_callout_thread_start(t);
2542}
2543
2544PTHREAD_NOINLINE
2545static void
214d78a2 2546_pthread_introspection_hook_callout_thread_terminate(pthread_t t)
f1a1da6c 2547{
214d78a2
A
2548 size_t freesize;
2549 void *freeaddr;
2550 if (t == main_thread()) {
2551 size_t stacksize = t->stackaddr - t->stackbottom;
2552 freesize = stacksize + t->guardsize;
2553 freeaddr = t->stackaddr - freesize;
2554 } else {
2555 freesize = t->freesize - PTHREAD_SIZE;
2556 freeaddr = t->freeaddr;
f1a1da6c
A
2557 }
2558 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_TERMINATE, t,
2559 freeaddr, freesize);
f1a1da6c
A
2560}
2561
2562static inline void
214d78a2 2563_pthread_introspection_thread_terminate(pthread_t t)
f1a1da6c
A
2564{
2565 if (os_fastpath(!_pthread_introspection_hook)) return;
214d78a2 2566 _pthread_introspection_hook_callout_thread_terminate(t);
f1a1da6c
A
2567}
2568
2569PTHREAD_NOINLINE
2570static void
2571_pthread_introspection_hook_callout_thread_destroy(pthread_t t)
2572{
f1a1da6c 2573 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_DESTROY, t, t,
2546420a 2574 PTHREAD_SIZE);
f1a1da6c
A
2575}
2576
2577static inline void
2578_pthread_introspection_thread_destroy(pthread_t t)
2579{
2580 if (os_fastpath(!_pthread_introspection_hook)) return;
2581 _pthread_introspection_hook_callout_thread_destroy(t);
2582}
2583
214d78a2
A
2584#pragma mark libplatform shims
2585
2586#include <platform/string.h>
2587
2588// pthread_setup initializes large structures to 0,
2589// which the compiler turns into a library call to memset.
2590//
2591// To avoid linking against Libc, provide a simple wrapper
2592// that calls through to the libplatform primitives
2593
2594#undef memset
2595PTHREAD_NOEXPORT
2596void *
2597memset(void *b, int c, size_t len)
2598{
2599 return _platform_memset(b, c, len);
2600}
2601
2602#undef bzero
2603PTHREAD_NOEXPORT
2604void
2605bzero(void *s, size_t n)
2606{
2607 _platform_bzero(s, n);
2608}
2609
2610#undef memcpy
2611PTHREAD_NOEXPORT
2612void *
2613memcpy(void* a, const void* b, unsigned long s)
2614{
2615 return _platform_memmove(a, b, s);
2616}
2617