]>
Commit | Line | Data |
---|---|---|
f1a1da6c A |
1 | /* |
2 | * Copyright (c) 2000-2013 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
a0619f9c | 5 | * |
f1a1da6c A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. Please obtain a copy of the License at | |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
11 | * file. | |
a0619f9c | 12 | * |
f1a1da6c A |
13 | * The Original Code and all software distributed under the License are |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
18 | * Please see the License for the specific language governing rights and | |
19 | * limitations under the License. | |
a0619f9c | 20 | * |
f1a1da6c A |
21 | * @APPLE_LICENSE_HEADER_END@ |
22 | */ | |
23 | /* | |
a0619f9c A |
24 | * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 |
25 | * All Rights Reserved | |
26 | * | |
27 | * Permission to use, copy, modify, and distribute this software and | |
28 | * its documentation for any purpose and without fee is hereby granted, | |
29 | * provided that the above copyright notice appears in all copies and | |
30 | * that both the copyright notice and this permission notice appear in | |
31 | * supporting documentation. | |
32 | * | |
33 | * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE | |
34 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
35 | * FOR A PARTICULAR PURPOSE. | |
36 | * | |
f1a1da6c | 37 | * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR |
a0619f9c A |
38 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM |
39 | * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, | |
40 | * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION | |
41 | * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
42 | * | |
f1a1da6c A |
43 | */ |
44 | /* | |
45 | * MkLinux | |
46 | */ | |
47 | ||
48 | /* | |
49 | * POSIX Pthread Library | |
50 | */ | |
51 | ||
52 | #include "internal.h" | |
f1a1da6c A |
53 | |
54 | #include <stdlib.h> | |
55 | #include <errno.h> | |
56 | #include <signal.h> | |
57 | #include <unistd.h> | |
58 | #include <mach/mach_init.h> | |
59 | #include <mach/mach_vm.h> | |
214d78a2 | 60 | #include <mach/mach_sync_ipc.h> |
f1a1da6c A |
61 | #include <sys/time.h> |
62 | #include <sys/resource.h> | |
63 | #include <sys/sysctl.h> | |
64 | #include <sys/queue.h> | |
214d78a2 | 65 | #include <sys/ulock.h> |
f1a1da6c A |
66 | #include <sys/mman.h> |
67 | #include <machine/vmparam.h> | |
68 | #define __APPLE_API_PRIVATE | |
69 | #include <machine/cpu_capabilities.h> | |
c6e5f90c A |
70 | #if __has_include(<ptrauth.h>) |
71 | #include <ptrauth.h> | |
72 | #endif // __has_include(<ptrauth.h>) | |
c1f56ec9 A |
73 | #include <os/thread_self_restrict.h> |
74 | #include <os/tsd.h> | |
f1a1da6c | 75 | |
214d78a2 A |
76 | // Default stack size is 512KB; independent of the main thread's stack size. |
77 | #define DEFAULT_STACK_SIZE (size_t)(512 * 1024) | |
f1a1da6c | 78 | |
214d78a2 A |
79 | // |
80 | // Global constants | |
81 | // | |
f1a1da6c | 82 | |
214d78a2 A |
83 | /* |
84 | * The pthread may be offset into a page. In that event, by contract | |
85 | * with the kernel, the allocation will extend PTHREAD_SIZE from the | |
86 | * start of the next page. There's also one page worth of allocation | |
87 | * below stacksize for the guard page. <rdar://problem/19941744> | |
88 | */ | |
c1f56ec9 | 89 | #define PTHREAD_SIZE ((size_t)mach_vm_round_page(sizeof(struct pthread_s))) |
214d78a2 A |
90 | #define PTHREAD_ALLOCADDR(stackaddr, stacksize) ((stackaddr - stacksize) - vm_page_size) |
91 | #define PTHREAD_ALLOCSIZE(stackaddr, stacksize) ((round_page((uintptr_t)stackaddr) + PTHREAD_SIZE) - (uintptr_t)PTHREAD_ALLOCADDR(stackaddr, stacksize)) | |
f1a1da6c | 92 | |
214d78a2 A |
93 | static const pthread_attr_t _pthread_attr_default = { |
94 | .sig = _PTHREAD_ATTR_SIG, | |
95 | .stacksize = 0, | |
96 | .detached = PTHREAD_CREATE_JOINABLE, | |
97 | .inherit = _PTHREAD_DEFAULT_INHERITSCHED, | |
98 | .policy = _PTHREAD_DEFAULT_POLICY, | |
99 | .defaultguardpage = true, | |
100 | // compile time constant for _pthread_default_priority(0) | |
101 | .qosclass = (1U << (THREAD_QOS_LEGACY - 1 + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT)) | | |
102 | ((uint8_t)-1 & _PTHREAD_PRIORITY_PRIORITY_MASK), | |
103 | }; | |
f1a1da6c A |
104 | |
105 | #if PTHREAD_LAYOUT_SPI | |
106 | ||
107 | const struct pthread_layout_offsets_s pthread_layout_offsets = { | |
108 | .plo_version = 1, | |
c1f56ec9 | 109 | .plo_pthread_tsd_base_offset = offsetof(struct pthread_s, tsd), |
f1a1da6c | 110 | .plo_pthread_tsd_base_address_offset = 0, |
c1f56ec9 | 111 | .plo_pthread_tsd_entry_size = sizeof(((struct pthread_s *)NULL)->tsd[0]), |
f1a1da6c A |
112 | }; |
113 | ||
114 | #endif // PTHREAD_LAYOUT_SPI | |
115 | ||
116 | // | |
214d78a2 | 117 | // Global exported variables |
f1a1da6c A |
118 | // |
119 | ||
214d78a2 A |
120 | // This global should be used (carefully) by anyone needing to know if a |
121 | // pthread (other than the main thread) has been created. | |
122 | int __is_threaded = 0; | |
c1f56ec9 | 123 | const int __unix_conforming = 1; // we're always conformant, but it's exported |
f1a1da6c | 124 | |
214d78a2 A |
125 | // |
126 | // Global internal variables | |
127 | // | |
2546420a | 128 | |
214d78a2 A |
129 | // _pthread_list_lock protects _pthread_count, access to the __pthread_head |
130 | // list. Externally imported by pthread_cancelable.c. | |
131 | struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head); | |
132 | _pthread_lock _pthread_list_lock = _PTHREAD_LOCK_INITIALIZER; | |
133 | ||
134 | uint32_t _main_qos; | |
964d3577 | 135 | |
214d78a2 | 136 | #if VARIANT_DYLD |
964d3577 | 137 | // The main thread's pthread_t |
c1f56ec9 | 138 | struct pthread_s _main_thread OS_ALIGNED(64); |
214d78a2 | 139 | #else // VARIANT_DYLD |
c1f56ec9 A |
140 | pthread_t _main_thread_ptr; |
141 | void *(*_pthread_malloc)(size_t); | |
142 | void (*_pthread_free)(void *); | |
214d78a2 | 143 | #endif // VARIANT_DYLD |
f1a1da6c | 144 | |
214d78a2 A |
145 | #if PTHREAD_DEBUG_LOG |
146 | #include <fcntl.h> | |
147 | int _pthread_debuglog; | |
148 | uint64_t _pthread_debugstart; | |
149 | #endif | |
150 | ||
151 | // | |
152 | // Global static variables | |
153 | // | |
154 | static bool __workq_newapi; | |
155 | static uint8_t default_priority; | |
156 | #if !VARIANT_DYLD | |
157 | static uint8_t max_priority; | |
158 | static uint8_t min_priority; | |
159 | #endif // !VARIANT_DYLD | |
160 | static int _pthread_count = 1; | |
f1a1da6c | 161 | static int pthread_concurrency; |
e3ecba16 | 162 | uintptr_t _pthread_ptr_munge_token; |
214d78a2 A |
163 | |
164 | static void (*exitf)(int) = __exit; | |
f1a1da6c A |
165 | |
166 | // work queue support data | |
c1f56ec9 | 167 | OS_NORETURN OS_COLD |
214d78a2 A |
168 | static void |
169 | __pthread_invalid_keventfunction(void **events, int *nevents) | |
170 | { | |
171 | PTHREAD_CLIENT_CRASH(0, "Invalid kqworkq setup"); | |
172 | } | |
173 | ||
c1f56ec9 | 174 | OS_NORETURN OS_COLD |
214d78a2 A |
175 | static void |
176 | __pthread_invalid_workloopfunction(uint64_t *workloop_id, void **events, int *nevents) | |
177 | { | |
178 | PTHREAD_CLIENT_CRASH(0, "Invalid kqwl setup"); | |
179 | } | |
180 | static pthread_workqueue_function2_t __libdispatch_workerfunction; | |
181 | static pthread_workqueue_function_kevent_t __libdispatch_keventfunction = &__pthread_invalid_keventfunction; | |
182 | static pthread_workqueue_function_workloop_t __libdispatch_workloopfunction = &__pthread_invalid_workloopfunction; | |
214d78a2 | 183 | static int __pthread_supported_features; // supported feature set |
f1a1da6c | 184 | |
214d78a2 A |
185 | #if defined(__i386__) || defined(__x86_64__) |
186 | static mach_vm_address_t __pthread_stack_hint = 0xB0000000; | |
c6e5f90c A |
187 | #elif defined(__arm__) || defined(__arm64__) |
188 | static mach_vm_address_t __pthread_stack_hint = 0x30000000; | |
214d78a2 A |
189 | #else |
190 | #error no __pthread_stack_hint for this architecture | |
191 | #endif | |
f1a1da6c A |
192 | |
193 | // | |
194 | // Function prototypes | |
195 | // | |
196 | ||
197 | // pthread primitives | |
214d78a2 A |
198 | static inline void _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, |
199 | void *stack, size_t stacksize, void *freeaddr, size_t freesize); | |
f1a1da6c | 200 | |
214d78a2 A |
201 | #if VARIANT_DYLD |
202 | static void _pthread_set_self_dyld(void); | |
203 | #endif // VARIANT_DYLD | |
c6e5f90c | 204 | static inline void _pthread_set_self_internal(pthread_t); |
f1a1da6c | 205 | |
214d78a2 | 206 | static inline void __pthread_started_thread(pthread_t t); |
f1a1da6c | 207 | |
f1a1da6c | 208 | static void _pthread_exit(pthread_t self, void *value_ptr) __dead2; |
f1a1da6c | 209 | |
214d78a2 | 210 | static inline void _pthread_introspection_thread_create(pthread_t t); |
f1a1da6c | 211 | static inline void _pthread_introspection_thread_start(pthread_t t); |
214d78a2 | 212 | static inline void _pthread_introspection_thread_terminate(pthread_t t); |
f1a1da6c A |
213 | static inline void _pthread_introspection_thread_destroy(pthread_t t); |
214 | ||
f1a1da6c | 215 | /* |
a0619f9c | 216 | * Flags filed passed to bsdthread_create and back in pthread_start |
214d78a2 A |
217 | * 31 <---------------------------------> 0 |
218 | * _________________________________________ | |
219 | * | flags(8) | policy(8) | importance(16) | | |
220 | * ----------------------------------------- | |
221 | */ | |
222 | #define PTHREAD_START_CUSTOM 0x01000000 // <rdar://problem/34501401> | |
2546420a | 223 | #define PTHREAD_START_SETSCHED 0x02000000 |
214d78a2 | 224 | // was PTHREAD_START_DETACHED 0x04000000 |
2546420a A |
225 | #define PTHREAD_START_QOSCLASS 0x08000000 |
226 | #define PTHREAD_START_TSD_BASE_SET 0x10000000 | |
214d78a2 | 227 | #define PTHREAD_START_SUSPENDED 0x20000000 |
2546420a | 228 | #define PTHREAD_START_QOSCLASS_MASK 0x00ffffff |
f1a1da6c A |
229 | #define PTHREAD_START_POLICY_BITSHIFT 16 |
230 | #define PTHREAD_START_POLICY_MASK 0xff | |
231 | #define PTHREAD_START_IMPORTANCE_MASK 0xffff | |
232 | ||
214d78a2 | 233 | #pragma mark pthread attrs |
f1a1da6c | 234 | |
a0619f9c | 235 | int |
f1a1da6c A |
236 | pthread_attr_destroy(pthread_attr_t *attr) |
237 | { | |
238 | int ret = EINVAL; | |
239 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
240 | attr->sig = 0; | |
241 | ret = 0; | |
242 | } | |
243 | return ret; | |
244 | } | |
245 | ||
a0619f9c | 246 | int |
f1a1da6c A |
247 | pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate) |
248 | { | |
249 | int ret = EINVAL; | |
250 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
251 | *detachstate = attr->detached; | |
252 | ret = 0; | |
253 | } | |
254 | return ret; | |
255 | } | |
256 | ||
a0619f9c | 257 | int |
f1a1da6c A |
258 | pthread_attr_getinheritsched(const pthread_attr_t *attr, int *inheritsched) |
259 | { | |
260 | int ret = EINVAL; | |
261 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
262 | *inheritsched = attr->inherit; | |
263 | ret = 0; | |
264 | } | |
265 | return ret; | |
266 | } | |
267 | ||
c1f56ec9 | 268 | static OS_ALWAYS_INLINE void |
214d78a2 A |
269 | _pthread_attr_get_schedparam(const pthread_attr_t *attr, |
270 | struct sched_param *param) | |
271 | { | |
272 | if (attr->schedset) { | |
273 | *param = attr->param; | |
274 | } else { | |
275 | param->sched_priority = default_priority; | |
276 | param->quantum = 10; /* quantum isn't public yet */ | |
277 | } | |
278 | } | |
279 | ||
a0619f9c | 280 | int |
f1a1da6c A |
281 | pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param) |
282 | { | |
283 | int ret = EINVAL; | |
284 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
214d78a2 | 285 | _pthread_attr_get_schedparam(attr, param); |
f1a1da6c A |
286 | ret = 0; |
287 | } | |
288 | return ret; | |
289 | } | |
290 | ||
a0619f9c | 291 | int |
f1a1da6c A |
292 | pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy) |
293 | { | |
294 | int ret = EINVAL; | |
295 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
296 | *policy = attr->policy; | |
297 | ret = 0; | |
298 | } | |
299 | return ret; | |
300 | } | |
301 | ||
f1a1da6c A |
302 | int |
303 | pthread_attr_init(pthread_attr_t *attr) | |
304 | { | |
214d78a2 | 305 | *attr = _pthread_attr_default; |
f1a1da6c A |
306 | return 0; |
307 | } | |
308 | ||
a0619f9c | 309 | int |
f1a1da6c A |
310 | pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate) |
311 | { | |
312 | int ret = EINVAL; | |
313 | if (attr->sig == _PTHREAD_ATTR_SIG && | |
214d78a2 A |
314 | (detachstate == PTHREAD_CREATE_JOINABLE || |
315 | detachstate == PTHREAD_CREATE_DETACHED)) { | |
f1a1da6c A |
316 | attr->detached = detachstate; |
317 | ret = 0; | |
318 | } | |
319 | return ret; | |
320 | } | |
321 | ||
a0619f9c | 322 | int |
f1a1da6c A |
323 | pthread_attr_setinheritsched(pthread_attr_t *attr, int inheritsched) |
324 | { | |
325 | int ret = EINVAL; | |
326 | if (attr->sig == _PTHREAD_ATTR_SIG && | |
214d78a2 A |
327 | (inheritsched == PTHREAD_INHERIT_SCHED || |
328 | inheritsched == PTHREAD_EXPLICIT_SCHED)) { | |
f1a1da6c A |
329 | attr->inherit = inheritsched; |
330 | ret = 0; | |
331 | } | |
332 | return ret; | |
333 | } | |
334 | ||
a0619f9c | 335 | int |
f1a1da6c A |
336 | pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param) |
337 | { | |
338 | int ret = EINVAL; | |
339 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
340 | /* TODO: Validate sched_param fields */ | |
341 | attr->param = *param; | |
342 | attr->schedset = 1; | |
343 | ret = 0; | |
344 | } | |
345 | return ret; | |
346 | } | |
347 | ||
c1f56ec9 A |
348 | #define _PTHREAD_POLICY_IS_FIXEDPRI(x) ((x) == SCHED_RR || (x) == SCHED_FIFO) |
349 | ||
a0619f9c | 350 | int |
f1a1da6c A |
351 | pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy) |
352 | { | |
353 | int ret = EINVAL; | |
214d78a2 A |
354 | if (attr->sig == _PTHREAD_ATTR_SIG && (policy == SCHED_OTHER || |
355 | policy == SCHED_RR || policy == SCHED_FIFO)) { | |
356 | if (!_PTHREAD_POLICY_IS_FIXEDPRI(policy)) { | |
357 | /* non-fixedpri policy should remove cpupercent */ | |
358 | attr->cpupercentset = 0; | |
359 | } | |
f1a1da6c | 360 | attr->policy = policy; |
214d78a2 | 361 | attr->policyset = 1; |
f1a1da6c A |
362 | ret = 0; |
363 | } | |
364 | return ret; | |
365 | } | |
366 | ||
367 | int | |
368 | pthread_attr_setscope(pthread_attr_t *attr, int scope) | |
369 | { | |
370 | int ret = EINVAL; | |
371 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
372 | if (scope == PTHREAD_SCOPE_SYSTEM) { | |
373 | // No attribute yet for the scope. | |
374 | ret = 0; | |
375 | } else if (scope == PTHREAD_SCOPE_PROCESS) { | |
376 | ret = ENOTSUP; | |
377 | } | |
378 | } | |
379 | return ret; | |
380 | } | |
381 | ||
382 | int | |
383 | pthread_attr_getscope(const pthread_attr_t *attr, int *scope) | |
384 | { | |
385 | int ret = EINVAL; | |
386 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
387 | *scope = PTHREAD_SCOPE_SYSTEM; | |
388 | ret = 0; | |
389 | } | |
390 | return ret; | |
391 | } | |
392 | ||
393 | int | |
394 | pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr) | |
395 | { | |
396 | int ret = EINVAL; | |
397 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
398 | *stackaddr = attr->stackaddr; | |
399 | ret = 0; | |
400 | } | |
401 | return ret; | |
402 | } | |
403 | ||
404 | int | |
405 | pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr) | |
406 | { | |
407 | int ret = EINVAL; | |
408 | if (attr->sig == _PTHREAD_ATTR_SIG && | |
c1f56ec9 | 409 | ((mach_vm_address_t)stackaddr & vm_page_mask) == 0) { |
f1a1da6c | 410 | attr->stackaddr = stackaddr; |
214d78a2 | 411 | attr->defaultguardpage = false; |
f1a1da6c A |
412 | attr->guardsize = 0; |
413 | ret = 0; | |
414 | } | |
415 | return ret; | |
416 | } | |
417 | ||
214d78a2 A |
418 | static inline size_t |
419 | _pthread_attr_stacksize(const pthread_attr_t *attr) | |
420 | { | |
421 | return attr->stacksize ? attr->stacksize : DEFAULT_STACK_SIZE; | |
422 | } | |
423 | ||
f1a1da6c A |
424 | int |
425 | pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize) | |
426 | { | |
427 | int ret = EINVAL; | |
428 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
214d78a2 | 429 | *stacksize = _pthread_attr_stacksize(attr); |
f1a1da6c A |
430 | ret = 0; |
431 | } | |
432 | return ret; | |
433 | } | |
434 | ||
435 | int | |
436 | pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize) | |
437 | { | |
c1f56ec9 A |
438 | #if TARGET_OS_OSX |
439 | // If the caller is doing something reasonable, help them out. | |
440 | if (stacksize % 0x1000 == 0) { | |
441 | stacksize = round_page(stacksize); | |
442 | } | |
443 | #endif // TARGET_OS_OSX | |
444 | ||
f1a1da6c A |
445 | int ret = EINVAL; |
446 | if (attr->sig == _PTHREAD_ATTR_SIG && | |
c1f56ec9 | 447 | ((stacksize & vm_page_mask) == 0) && |
214d78a2 A |
448 | stacksize >= PTHREAD_STACK_MIN) { |
449 | attr->stacksize = stacksize; | |
450 | ret = 0; | |
451 | } | |
452 | return ret; | |
453 | } | |
454 | ||
455 | int | |
456 | pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t * stacksize) | |
457 | { | |
458 | int ret = EINVAL; | |
459 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
460 | *stackaddr = (void *)((uintptr_t)attr->stackaddr - attr->stacksize); | |
461 | *stacksize = _pthread_attr_stacksize(attr); | |
462 | ret = 0; | |
463 | } | |
464 | return ret; | |
465 | } | |
466 | ||
467 | // Per SUSv3, the stackaddr is the base address, the lowest addressable byte | |
468 | // address. This is not the same as in pthread_attr_setstackaddr. | |
469 | int | |
470 | pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize) | |
471 | { | |
472 | int ret = EINVAL; | |
473 | if (attr->sig == _PTHREAD_ATTR_SIG && | |
c1f56ec9 A |
474 | (((mach_vm_address_t)stackaddr & vm_page_mask) == 0) && |
475 | ((stacksize & vm_page_mask) == 0) && | |
214d78a2 A |
476 | stacksize >= PTHREAD_STACK_MIN) { |
477 | attr->stackaddr = (void *)((uintptr_t)stackaddr + stacksize); | |
f1a1da6c A |
478 | attr->stacksize = stacksize; |
479 | ret = 0; | |
480 | } | |
214d78a2 A |
481 | return ret; |
482 | } | |
483 | ||
484 | int | |
485 | pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize) | |
486 | { | |
c1f56ec9 A |
487 | #if TARGET_OS_OSX |
488 | // If the caller is doing something reasonable, help them out. | |
489 | if (guardsize % 0x1000 == 0) { | |
490 | guardsize = round_page(guardsize); | |
491 | } | |
492 | #endif // TARGET_OS_OSX | |
493 | ||
214d78a2 | 494 | int ret = EINVAL; |
c1f56ec9 A |
495 | if (attr->sig == _PTHREAD_ATTR_SIG && |
496 | (guardsize & vm_page_mask) == 0) { | |
214d78a2 A |
497 | /* Guardsize of 0 is valid, means no guard */ |
498 | attr->defaultguardpage = false; | |
499 | attr->guardsize = guardsize; | |
500 | ret = 0; | |
501 | } | |
502 | return ret; | |
503 | } | |
504 | ||
505 | static inline size_t | |
506 | _pthread_attr_guardsize(const pthread_attr_t *attr) | |
507 | { | |
508 | return attr->defaultguardpage ? vm_page_size : attr->guardsize; | |
509 | } | |
510 | ||
511 | int | |
512 | pthread_attr_getguardsize(const pthread_attr_t *attr, size_t *guardsize) | |
513 | { | |
514 | int ret = EINVAL; | |
515 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
516 | *guardsize = _pthread_attr_guardsize(attr); | |
517 | ret = 0; | |
518 | } | |
519 | return ret; | |
520 | } | |
521 | ||
522 | int | |
523 | pthread_attr_setcpupercent_np(pthread_attr_t *attr, int percent, | |
524 | unsigned long refillms) | |
525 | { | |
526 | int ret = EINVAL; | |
527 | if (attr->sig == _PTHREAD_ATTR_SIG && percent < UINT8_MAX && | |
528 | refillms < _PTHREAD_ATTR_REFILLMS_MAX && attr->policyset && | |
529 | _PTHREAD_POLICY_IS_FIXEDPRI(attr->policy)) { | |
530 | attr->cpupercent = percent; | |
531 | attr->refillms = (uint32_t)(refillms & 0x00ffffff); | |
532 | attr->cpupercentset = 1; | |
533 | ret = 0; | |
534 | } | |
535 | return ret; | |
536 | } | |
537 | ||
538 | #pragma mark pthread lifetime | |
539 | ||
540 | // Allocate a thread structure, stack and guard page. | |
541 | // | |
542 | // The thread structure may optionally be placed in the same allocation as the | |
543 | // stack, residing above the top of the stack. This cannot be done if a | |
544 | // custom stack address is provided. | |
545 | // | |
546 | // Similarly the guard page cannot be allocated if a custom stack address is | |
547 | // provided. | |
548 | // | |
549 | // The allocated thread structure is initialized with values that indicate how | |
550 | // it should be freed. | |
551 | ||
552 | static pthread_t | |
c6e5f90c A |
553 | _pthread_allocate(const pthread_attr_t *attrs, void **stack, |
554 | bool from_mach_thread) | |
214d78a2 A |
555 | { |
556 | mach_vm_address_t allocaddr = __pthread_stack_hint; | |
97e5da41 | 557 | size_t allocsize, guardsize, stacksize, pthreadoff; |
214d78a2 A |
558 | kern_return_t kr; |
559 | pthread_t t; | |
560 | ||
c6e5f90c A |
561 | if (os_unlikely(attrs->stacksize != 0 && |
562 | attrs->stacksize < PTHREAD_STACK_MIN)) { | |
563 | PTHREAD_CLIENT_CRASH(attrs->stacksize, "Stack size in attrs is too small"); | |
564 | } | |
565 | ||
c1f56ec9 A |
566 | if (os_unlikely((mach_vm_address_t)attrs->stackaddr & vm_page_mask)) { |
567 | PTHREAD_CLIENT_CRASH(attrs->stackaddr, "Unaligned stack addr in attrs"); | |
c6e5f90c | 568 | } |
214d78a2 A |
569 | |
570 | // Allocate a pthread structure if necessary | |
571 | ||
572 | if (attrs->stackaddr != NULL) { | |
214d78a2 A |
573 | allocsize = PTHREAD_SIZE; |
574 | guardsize = 0; | |
97e5da41 | 575 | pthreadoff = 0; |
214d78a2 A |
576 | // <rdar://problem/42588315> if the attrs struct specifies a custom |
577 | // stack address but not a custom size, using ->stacksize here instead | |
578 | // of _pthread_attr_stacksize stores stacksize as zero, indicating | |
579 | // that the stack size is unknown. | |
580 | stacksize = attrs->stacksize; | |
581 | } else { | |
582 | guardsize = _pthread_attr_guardsize(attrs); | |
583 | stacksize = _pthread_attr_stacksize(attrs) + PTHREAD_T_OFFSET; | |
97e5da41 A |
584 | pthreadoff = stacksize + guardsize; |
585 | allocsize = pthreadoff + PTHREAD_SIZE; | |
214d78a2 A |
586 | allocsize = mach_vm_round_page(allocsize); |
587 | } | |
588 | ||
589 | kr = mach_vm_map(mach_task_self(), &allocaddr, allocsize, vm_page_size - 1, | |
590 | VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE, MEMORY_OBJECT_NULL, | |
591 | 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); | |
592 | ||
593 | if (kr != KERN_SUCCESS) { | |
594 | kr = mach_vm_allocate(mach_task_self(), &allocaddr, allocsize, | |
595 | VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE); | |
c6e5f90c A |
596 | } else if (__syscall_logger && !from_mach_thread) { |
597 | // libsyscall will not output malloc stack logging events when | |
598 | // VM_MEMORY_STACK is passed in to facilitate mach thread promotion. | |
599 | // To avoid losing the stack traces for normal p-thread create | |
600 | // operations, libpthread must pretend to be the vm syscall and log | |
601 | // the allocations. <rdar://36418708> | |
602 | int eventTypeFlags = stack_logging_type_vm_allocate | | |
603 | stack_logging_type_mapped_file_or_shared_mem; | |
604 | __syscall_logger(eventTypeFlags | VM_MAKE_TAG(VM_MEMORY_STACK), | |
605 | (uintptr_t)mach_task_self(), (uintptr_t)allocsize, 0, | |
606 | (uintptr_t)allocaddr, 0); | |
214d78a2 | 607 | } |
c6e5f90c | 608 | |
214d78a2 A |
609 | if (kr != KERN_SUCCESS) { |
610 | *stack = NULL; | |
611 | return NULL; | |
c6e5f90c A |
612 | } else if (__syscall_logger && !from_mach_thread) { |
613 | // libsyscall will not output malloc stack logging events when | |
614 | // VM_MEMORY_STACK is passed in to facilitate mach thread promotion. | |
615 | // To avoid losing the stack traces for normal p-thread create | |
616 | // operations, libpthread must pretend to be the vm syscall and log | |
617 | // the allocations. <rdar://36418708> | |
618 | int eventTypeFlags = stack_logging_type_vm_allocate; | |
619 | __syscall_logger(eventTypeFlags | VM_MAKE_TAG(VM_MEMORY_STACK), | |
c1f56ec9 A |
620 | (uintptr_t)mach_task_self(), (uintptr_t)allocsize, 0, |
621 | (uintptr_t)allocaddr, 0); | |
214d78a2 A |
622 | } |
623 | ||
624 | // The stack grows down. | |
625 | // Set the guard page at the lowest address of the | |
626 | // newly allocated stack. Return the highest address | |
627 | // of the stack. | |
628 | if (guardsize) { | |
629 | (void)mach_vm_protect(mach_task_self(), allocaddr, guardsize, | |
630 | FALSE, VM_PROT_NONE); | |
631 | } | |
632 | ||
633 | // Thread structure resides at the top of the stack (when using a | |
634 | // custom stack, allocsize == PTHREAD_SIZE, so places the pthread_t | |
635 | // at allocaddr). | |
97e5da41 | 636 | t = (pthread_t)(allocaddr + pthreadoff); |
214d78a2 A |
637 | if (attrs->stackaddr) { |
638 | *stack = attrs->stackaddr; | |
639 | } else { | |
640 | *stack = t; | |
641 | } | |
642 | ||
643 | _pthread_struct_init(t, attrs, *stack, stacksize, allocaddr, allocsize); | |
644 | return t; | |
645 | } | |
646 | ||
c1f56ec9 | 647 | OS_NOINLINE |
214d78a2 A |
648 | void |
649 | _pthread_deallocate(pthread_t t, bool from_mach_thread) | |
650 | { | |
651 | kern_return_t ret; | |
652 | ||
653 | // Don't free the main thread. | |
654 | if (t != main_thread()) { | |
655 | if (!from_mach_thread) { // see __pthread_add_thread | |
656 | _pthread_introspection_thread_destroy(t); | |
657 | } | |
658 | ret = mach_vm_deallocate(mach_task_self(), t->freeaddr, t->freesize); | |
c6e5f90c A |
659 | if (ret != KERN_SUCCESS) { |
660 | PTHREAD_INTERNAL_CRASH(ret, "Unable to deallocate stack"); | |
661 | } | |
214d78a2 | 662 | } |
f1a1da6c A |
663 | } |
664 | ||
214d78a2 A |
665 | #pragma clang diagnostic push |
666 | #pragma clang diagnostic ignored "-Wreturn-stack-address" | |
667 | ||
c1f56ec9 | 668 | OS_NOINLINE |
214d78a2 A |
669 | static void* |
670 | _pthread_current_stack_address(void) | |
f1a1da6c | 671 | { |
214d78a2 A |
672 | int a; |
673 | return &a; | |
f1a1da6c A |
674 | } |
675 | ||
214d78a2 A |
676 | #pragma clang diagnostic pop |
677 | ||
c1f56ec9 | 678 | static void |
214d78a2 | 679 | _pthread_joiner_wake(pthread_t thread) |
f1a1da6c | 680 | { |
214d78a2 A |
681 | uint32_t *exit_gate = &thread->tl_exit_gate; |
682 | ||
683 | for (;;) { | |
684 | int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO, exit_gate, 0); | |
685 | if (ret == 0 || ret == -ENOENT) { | |
686 | return; | |
687 | } | |
688 | if (ret != -EINTR) { | |
689 | PTHREAD_INTERNAL_CRASH(-ret, "pthread_join() wake failure"); | |
690 | } | |
f1a1da6c | 691 | } |
f1a1da6c A |
692 | } |
693 | ||
c1f56ec9 A |
694 | static void |
695 | _pthread_dealloc_reply_port(pthread_t self) | |
696 | { | |
697 | mach_port_t port = _pthread_tsd_slot(self, MIG_REPLY); | |
698 | if (port != MACH_PORT_NULL) { | |
699 | // this will also set the TSD to MACH_PORT_NULL | |
700 | mig_dealloc_reply_port(port); | |
701 | } | |
702 | } | |
703 | ||
704 | static void | |
705 | _pthread_dealloc_special_reply_port(pthread_t self) | |
706 | { | |
707 | mach_port_t port = _pthread_tsd_slot(self, MACH_SPECIAL_REPLY); | |
708 | if (port != MACH_PORT_NULL) { | |
709 | _pthread_tsd_slot(self, MACH_SPECIAL_REPLY) = MACH_PORT_NULL; | |
710 | thread_destruct_special_reply_port(port, THREAD_SPECIAL_REPLY_PORT_ALL); | |
711 | } | |
712 | } | |
713 | ||
214d78a2 | 714 | // Terminates the thread if called from the currently running thread. |
c1f56ec9 | 715 | OS_NORETURN OS_NOINLINE OS_NOT_TAIL_CALLED |
214d78a2 A |
716 | static void |
717 | _pthread_terminate(pthread_t t, void *exit_value) | |
f1a1da6c | 718 | { |
214d78a2 A |
719 | _pthread_introspection_thread_terminate(t); |
720 | ||
721 | uintptr_t freeaddr = (uintptr_t)t->freeaddr; | |
722 | size_t freesize = t->freesize; | |
723 | bool should_exit; | |
724 | ||
725 | // the size of just the stack | |
726 | size_t freesize_stack = t->freesize; | |
727 | ||
728 | // We usually pass our structure+stack to bsdthread_terminate to free, but | |
729 | // if we get told to keep the pthread_t structure around then we need to | |
730 | // adjust the free size and addr in the pthread_t to just refer to the | |
731 | // structure and not the stack. If we do end up deallocating the | |
732 | // structure, this is useless work since no one can read the result, but we | |
733 | // can't do it after the call to pthread_remove_thread because it isn't | |
734 | // safe to dereference t after that. | |
735 | if ((void*)t > t->freeaddr && (void*)t < t->freeaddr + t->freesize){ | |
736 | // Check to ensure the pthread structure itself is part of the | |
737 | // allocation described by freeaddr/freesize, in which case we split and | |
738 | // only deallocate the area below the pthread structure. In the event of a | |
739 | // custom stack, the freeaddr/size will be the pthread structure itself, in | |
740 | // which case we shouldn't free anything (the final else case). | |
741 | freesize_stack = trunc_page((uintptr_t)t - (uintptr_t)freeaddr); | |
742 | ||
743 | // describe just the remainder for deallocation when the pthread_t goes away | |
744 | t->freeaddr += freesize_stack; | |
745 | t->freesize -= freesize_stack; | |
746 | } else if (t == main_thread()) { | |
747 | freeaddr = t->stackaddr - pthread_get_stacksize_np(t); | |
748 | uintptr_t stackborder = trunc_page((uintptr_t)_pthread_current_stack_address()); | |
749 | freesize_stack = stackborder - freeaddr; | |
750 | } else { | |
751 | freesize_stack = 0; | |
752 | } | |
753 | ||
c1f56ec9 | 754 | mach_port_t kport = _pthread_tsd_slot(t, MACH_THREAD_SELF); |
214d78a2 A |
755 | bool keep_thread_struct = false, needs_wake = false; |
756 | semaphore_t custom_stack_sema = MACH_PORT_NULL; | |
757 | ||
758 | _pthread_dealloc_special_reply_port(t); | |
759 | _pthread_dealloc_reply_port(t); | |
760 | ||
c1f56ec9 | 761 | _pthread_lock_lock(&_pthread_list_lock); |
214d78a2 A |
762 | |
763 | // This piece of code interacts with pthread_join. It will always: | |
764 | // - set tl_exit_gate to MACH_PORT_DEAD (thread exited) | |
765 | // - set tl_exit_value to the value passed to pthread_exit() | |
766 | // - decrement _pthread_count, so that we can exit the process when all | |
767 | // threads exited even if not all of them were joined. | |
768 | t->tl_exit_gate = MACH_PORT_DEAD; | |
769 | t->tl_exit_value = exit_value; | |
770 | should_exit = (--_pthread_count <= 0); | |
771 | ||
772 | // If we see a joiner, we prepost that the join has to succeed, | |
773 | // and the joiner is committed to finish (even if it was canceled) | |
774 | if (t->tl_join_ctx) { | |
775 | custom_stack_sema = _pthread_joiner_prepost_wake(t); // unsets tl_joinable | |
776 | needs_wake = true; | |
777 | } | |
778 | ||
779 | // Joinable threads that have no joiner yet are kept on the thread list | |
780 | // so that pthread_join() can later discover the thread when it is joined, | |
781 | // and will have to do the pthread_t cleanup. | |
782 | if (t->tl_joinable) { | |
783 | t->tl_joiner_cleans_up = keep_thread_struct = true; | |
784 | } else { | |
785 | TAILQ_REMOVE(&__pthread_head, t, tl_plist); | |
786 | } | |
787 | ||
c1f56ec9 | 788 | _pthread_lock_unlock(&_pthread_list_lock); |
214d78a2 A |
789 | |
790 | if (needs_wake) { | |
791 | // When we found a waiter, we want to drop the very contended list lock | |
792 | // before we do the syscall in _pthread_joiner_wake(). Then, we decide | |
793 | // who gets to cleanup the pthread_t between the joiner and the exiting | |
794 | // thread: | |
795 | // - the joiner tries to set tl_join_ctx to NULL | |
796 | // - the exiting thread tries to set tl_joiner_cleans_up to true | |
797 | // Whoever does it first commits the other guy to cleanup the pthread_t | |
798 | _pthread_joiner_wake(t); | |
c1f56ec9 | 799 | _pthread_lock_lock(&_pthread_list_lock); |
214d78a2 A |
800 | if (t->tl_join_ctx) { |
801 | t->tl_joiner_cleans_up = true; | |
802 | keep_thread_struct = true; | |
f1a1da6c | 803 | } |
c1f56ec9 | 804 | _pthread_lock_unlock(&_pthread_list_lock); |
f1a1da6c | 805 | } |
f1a1da6c | 806 | |
214d78a2 A |
807 | // |
808 | // /!\ dereferencing `t` past this point is not safe /!\ | |
809 | // | |
810 | ||
811 | if (keep_thread_struct || t == main_thread()) { | |
812 | // Use the adjusted freesize of just the stack that we computed above. | |
813 | freesize = freesize_stack; | |
814 | } else { | |
815 | _pthread_introspection_thread_destroy(t); | |
f1a1da6c | 816 | } |
214d78a2 A |
817 | |
818 | // Check if there is nothing to free because the thread has a custom | |
819 | // stack allocation and is joinable. | |
820 | if (freesize == 0) { | |
821 | freeaddr = 0; | |
822 | } | |
823 | if (should_exit) { | |
824 | exitf(0); | |
825 | } | |
826 | __bsdthread_terminate((void *)freeaddr, freesize, kport, custom_stack_sema); | |
827 | PTHREAD_INTERNAL_CRASH(t, "thread didn't terminate"); | |
828 | } | |
829 | ||
c1f56ec9 | 830 | OS_NORETURN |
214d78a2 A |
831 | static void |
832 | _pthread_terminate_invoke(pthread_t t, void *exit_value) | |
833 | { | |
834 | #if PTHREAD_T_OFFSET | |
835 | void *p = NULL; | |
836 | // <rdar://problem/25688492> During pthread termination there is a race | |
837 | // between pthread_join and pthread_terminate; if the joiner is responsible | |
838 | // for cleaning up the pthread_t struct, then it may destroy some part of the | |
839 | // stack with it on 16k OSes. So that this doesn't cause _pthread_terminate() | |
840 | // to crash because its stack has been removed from under its feet, just make | |
841 | // sure termination happens in a part of the stack that is not on the same | |
842 | // page as the pthread_t. | |
843 | if (trunc_page((uintptr_t)__builtin_frame_address(0)) == | |
844 | trunc_page((uintptr_t)t)) { | |
845 | p = alloca(PTHREAD_T_OFFSET); | |
846 | } | |
847 | // And this __asm__ volatile is needed to stop the compiler from optimising | |
848 | // away the alloca() completely. | |
849 | __asm__ volatile ("" : : "r"(p) ); | |
850 | #endif | |
851 | _pthread_terminate(t, exit_value); | |
f1a1da6c A |
852 | } |
853 | ||
214d78a2 | 854 | #pragma mark pthread start / body |
f1a1da6c | 855 | |
f1a1da6c | 856 | void |
214d78a2 A |
857 | _pthread_start(pthread_t self, mach_port_t kport, |
858 | __unused void *(*fun)(void *), __unused void *arg, | |
859 | __unused size_t stacksize, unsigned int pflags) | |
860 | { | |
214d78a2 | 861 | if (os_unlikely(pflags & PTHREAD_START_SUSPENDED)) { |
c6e5f90c | 862 | PTHREAD_INTERNAL_CRASH(pflags, |
214d78a2 A |
863 | "kernel without PTHREAD_START_SUSPENDED support"); |
864 | } | |
c6e5f90c A |
865 | if (os_unlikely((pflags & PTHREAD_START_TSD_BASE_SET) == 0)) { |
866 | PTHREAD_INTERNAL_CRASH(pflags, | |
867 | "thread_set_tsd_base() wasn't called by the kernel"); | |
868 | } | |
869 | PTHREAD_DEBUG_ASSERT(MACH_PORT_VALID(kport)); | |
c1f56ec9 | 870 | PTHREAD_DEBUG_ASSERT(_pthread_tsd_slot(self, MACH_THREAD_SELF) == kport); |
e3ecba16 | 871 | _pthread_validate_signature(self); |
a0619f9c A |
872 | _pthread_markcancel_if_canceled(self, kport); |
873 | ||
c6e5f90c A |
874 | _pthread_set_self_internal(self); |
875 | __pthread_started_thread(self); | |
876 | _pthread_exit(self, (self->fun)(self->arg)); | |
f1a1da6c A |
877 | } |
878 | ||
c1f56ec9 | 879 | OS_ALWAYS_INLINE |
a0619f9c | 880 | static inline void |
214d78a2 A |
881 | _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, |
882 | void *stackaddr, size_t stacksize, void *freeaddr, size_t freesize) | |
f1a1da6c | 883 | { |
e3ecba16 | 884 | _pthread_init_signature(t); |
c1f56ec9 A |
885 | _pthread_tsd_slot(t, PTHREAD_SELF) = t; |
886 | _pthread_tsd_slot(t, ERRNO) = &t->err_no; | |
214d78a2 | 887 | if (attrs->schedset == 0) { |
c1f56ec9 | 888 | _pthread_tsd_slot(t, PTHREAD_QOS_CLASS) = attrs->qosclass; |
214d78a2 | 889 | } else { |
c1f56ec9 | 890 | _pthread_tsd_slot(t, PTHREAD_QOS_CLASS) = |
214d78a2 A |
891 | _pthread_unspecified_priority(); |
892 | } | |
c1f56ec9 | 893 | _pthread_tsd_slot(t, PTR_MUNGE) = _pthread_ptr_munge_token; |
214d78a2 A |
894 | t->tl_has_custom_stack = (attrs->stackaddr != NULL); |
895 | ||
c1f56ec9 | 896 | _pthread_lock_init(&t->lock); |
964d3577 | 897 | |
964d3577 | 898 | t->stackaddr = stackaddr; |
214d78a2 | 899 | t->stackbottom = stackaddr - stacksize; |
2546420a A |
900 | t->freeaddr = freeaddr; |
901 | t->freesize = freesize; | |
964d3577 | 902 | |
214d78a2 A |
903 | t->guardsize = _pthread_attr_guardsize(attrs); |
904 | t->tl_joinable = (attrs->detached == PTHREAD_CREATE_JOINABLE); | |
f1a1da6c | 905 | t->inherit = attrs->inherit; |
214d78a2 | 906 | t->tl_policy = attrs->policy; |
f1a1da6c | 907 | t->schedset = attrs->schedset; |
214d78a2 | 908 | _pthread_attr_get_schedparam(attrs, &t->tl_param); |
f1a1da6c A |
909 | t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; |
910 | } | |
911 | ||
214d78a2 A |
912 | #pragma mark pthread public interface |
913 | ||
a0619f9c | 914 | /* Non portable public api to know whether this process has(had) atleast one thread |
f1a1da6c A |
915 | * apart from main thread. There could be race if there is a thread in the process of |
916 | * creation at the time of call . It does not tell whether there are more than one thread | |
917 | * at this point of time. | |
918 | */ | |
919 | int | |
920 | pthread_is_threaded_np(void) | |
921 | { | |
922 | return __is_threaded; | |
923 | } | |
924 | ||
925 | mach_port_t | |
926 | pthread_mach_thread_np(pthread_t t) | |
927 | { | |
928 | mach_port_t kport = MACH_PORT_NULL; | |
214d78a2 | 929 | (void)_pthread_is_valid(t, &kport); |
f1a1da6c A |
930 | return kport; |
931 | } | |
932 | ||
933 | pthread_t | |
934 | pthread_from_mach_thread_np(mach_port_t kernel_thread) | |
935 | { | |
c1f56ec9 | 936 | pthread_t p = NULL; |
f1a1da6c A |
937 | |
938 | /* No need to wait as mach port is already known */ | |
c1f56ec9 | 939 | _pthread_lock_lock(&_pthread_list_lock); |
f1a1da6c | 940 | |
214d78a2 | 941 | TAILQ_FOREACH(p, &__pthread_head, tl_plist) { |
c1f56ec9 | 942 | if (_pthread_tsd_slot(p, MACH_THREAD_SELF) == kernel_thread) { |
f1a1da6c A |
943 | break; |
944 | } | |
945 | } | |
946 | ||
c1f56ec9 | 947 | _pthread_lock_unlock(&_pthread_list_lock); |
f1a1da6c A |
948 | |
949 | return p; | |
950 | } | |
951 | ||
952 | size_t | |
953 | pthread_get_stacksize_np(pthread_t t) | |
954 | { | |
f1a1da6c A |
955 | size_t size = 0; |
956 | ||
957 | if (t == NULL) { | |
958 | return ESRCH; // XXX bug? | |
959 | } | |
2546420a | 960 | |
c6e5f90c | 961 | #if TARGET_OS_OSX |
2546420a A |
962 | // The default rlimit based allocations will be provided with a stacksize |
963 | // of the current limit and a freesize of the max. However, custom | |
964 | // allocations will just have the guard page to free. If we aren't in the | |
965 | // latter case, call into rlimit to determine the current stack size. In | |
966 | // the event that the current limit == max limit then we'll fall down the | |
967 | // fast path, but since it's unlikely that the limit is going to be lowered | |
968 | // after it's been change to the max, we should be fine. | |
969 | // | |
970 | // Of course, on arm rlim_cur == rlim_max and there's only the one guard | |
971 | // page. So, we can skip all this there. | |
c6e5f90c A |
972 | if (t == main_thread()) { |
973 | size_t stacksize = t->stackaddr - t->stackbottom; | |
974 | ||
975 | if (stacksize + vm_page_size != t->freesize) { | |
976 | // We want to call getrlimit() just once, as it's relatively | |
977 | // expensive | |
978 | static size_t rlimit_stack; | |
a0619f9c | 979 | |
c6e5f90c A |
980 | if (rlimit_stack == 0) { |
981 | struct rlimit limit; | |
982 | int ret = getrlimit(RLIMIT_STACK, &limit); | |
a0619f9c | 983 | |
c6e5f90c A |
984 | if (ret == 0) { |
985 | rlimit_stack = (size_t) limit.rlim_cur; | |
986 | } | |
2546420a | 987 | } |
a0619f9c | 988 | |
c6e5f90c A |
989 | if (rlimit_stack == 0 || rlimit_stack > t->freesize) { |
990 | return stacksize; | |
991 | } else { | |
992 | return round_page(rlimit_stack); | |
993 | } | |
2546420a A |
994 | } |
995 | } | |
c6e5f90c | 996 | #endif /* TARGET_OS_OSX */ |
2546420a | 997 | |
214d78a2 | 998 | if (t == pthread_self() || t == main_thread()) { |
c6e5f90c | 999 | size = t->stackaddr - t->stackbottom;; |
214d78a2 | 1000 | goto out; |
f1a1da6c A |
1001 | } |
1002 | ||
214d78a2 | 1003 | if (_pthread_validate_thread_and_list_lock(t)) { |
c6e5f90c | 1004 | size = t->stackaddr - t->stackbottom;; |
c1f56ec9 | 1005 | _pthread_lock_unlock(&_pthread_list_lock); |
f1a1da6c A |
1006 | } |
1007 | ||
214d78a2 A |
1008 | out: |
1009 | // <rdar://problem/42588315> binary compatibility issues force us to return | |
1010 | // DEFAULT_STACK_SIZE here when we do not know the size of the stack | |
1011 | return size ? size : DEFAULT_STACK_SIZE; | |
f1a1da6c A |
1012 | } |
1013 | ||
1014 | void * | |
1015 | pthread_get_stackaddr_np(pthread_t t) | |
1016 | { | |
f1a1da6c | 1017 | // since the main thread will not get de-allocated from underneath us |
214d78a2 | 1018 | if (t == pthread_self() || t == main_thread()) { |
f1a1da6c A |
1019 | return t->stackaddr; |
1020 | } | |
1021 | ||
214d78a2 A |
1022 | if (!_pthread_validate_thread_and_list_lock(t)) { |
1023 | return (void *)(uintptr_t)ESRCH; // XXX bug? | |
f1a1da6c A |
1024 | } |
1025 | ||
214d78a2 | 1026 | void *addr = t->stackaddr; |
c1f56ec9 | 1027 | _pthread_lock_unlock(&_pthread_list_lock); |
f1a1da6c A |
1028 | return addr; |
1029 | } | |
1030 | ||
f1a1da6c A |
1031 | pthread_t |
1032 | pthread_main_thread_np(void) | |
1033 | { | |
214d78a2 | 1034 | return main_thread(); |
f1a1da6c A |
1035 | } |
1036 | ||
1037 | /* returns non-zero if the current thread is the main thread */ | |
1038 | int | |
1039 | pthread_main_np(void) | |
1040 | { | |
214d78a2 | 1041 | return pthread_self() == main_thread(); |
f1a1da6c A |
1042 | } |
1043 | ||
c6e5f90c A |
1044 | static int |
1045 | _pthread_threadid_slow(pthread_t thread, uint64_t *thread_id) | |
1046 | { | |
1047 | unsigned int info_count = THREAD_IDENTIFIER_INFO_COUNT; | |
c1f56ec9 | 1048 | mach_port_t thport = _pthread_tsd_slot(thread, MACH_THREAD_SELF); |
c6e5f90c A |
1049 | struct thread_identifier_info info; |
1050 | kern_return_t kr; | |
1051 | ||
1052 | kr = thread_info(thport, THREAD_IDENTIFIER_INFO, | |
1053 | (thread_info_t)&info, &info_count); | |
1054 | if (kr == KERN_SUCCESS && info.thread_id) { | |
1055 | *thread_id = info.thread_id; | |
c1f56ec9 | 1056 | #if __LP64__ |
c6e5f90c | 1057 | os_atomic_store(&thread->thread_id, info.thread_id, relaxed); |
c1f56ec9 A |
1058 | #else |
1059 | os_atomic_store_wide(&thread->thread_id, info.thread_id, relaxed); | |
1060 | #endif | |
c6e5f90c A |
1061 | return 0; |
1062 | } | |
1063 | return EINVAL; | |
1064 | } | |
1065 | ||
214d78a2 A |
1066 | /* |
1067 | * if we are passed in a pthread_t that is NULL, then we return the current | |
1068 | * thread's thread_id. So folks don't have to call pthread_self, in addition to | |
1069 | * us doing it, if they just want their thread_id. | |
1070 | */ | |
f1a1da6c A |
1071 | int |
1072 | pthread_threadid_np(pthread_t thread, uint64_t *thread_id) | |
1073 | { | |
1074 | int res = 0; | |
1075 | pthread_t self = pthread_self(); | |
1076 | ||
1077 | if (thread_id == NULL) { | |
1078 | return EINVAL; | |
1079 | } | |
1080 | ||
1081 | if (thread == NULL || thread == self) { | |
1082 | *thread_id = self->thread_id; | |
214d78a2 A |
1083 | } else if (!_pthread_validate_thread_and_list_lock(thread)) { |
1084 | res = ESRCH; | |
f1a1da6c | 1085 | } else { |
c1f56ec9 | 1086 | #if __LP64__ |
c6e5f90c | 1087 | *thread_id = os_atomic_load(&thread->thread_id, relaxed); |
c1f56ec9 A |
1088 | #else |
1089 | *thread_id = os_atomic_load_wide(&thread->thread_id, relaxed); | |
1090 | #endif | |
c6e5f90c A |
1091 | if (os_unlikely(*thread_id == 0)) { |
1092 | // there is a race at init because the thread sets its own TID. | |
1093 | // correct this by asking mach | |
1094 | res = _pthread_threadid_slow(thread, thread_id); | |
f1a1da6c | 1095 | } |
c1f56ec9 | 1096 | _pthread_lock_unlock(&_pthread_list_lock); |
f1a1da6c A |
1097 | } |
1098 | return res; | |
1099 | } | |
1100 | ||
c1f56ec9 A |
1101 | int |
1102 | pthread_cpu_number_np(size_t *cpu_id) | |
1103 | { | |
1104 | if (cpu_id == NULL) { | |
1105 | errno = EINVAL; | |
1106 | return errno; | |
1107 | } | |
1108 | ||
1109 | *cpu_id = _os_cpu_number(); | |
1110 | return 0; | |
1111 | } | |
1112 | ||
f1a1da6c A |
1113 | int |
1114 | pthread_getname_np(pthread_t thread, char *threadname, size_t len) | |
1115 | { | |
214d78a2 A |
1116 | if (thread == pthread_self()) { |
1117 | strlcpy(threadname, thread->pthread_name, len); | |
1118 | return 0; | |
1119 | } | |
f1a1da6c | 1120 | |
214d78a2 | 1121 | if (!_pthread_validate_thread_and_list_lock(thread)) { |
f1a1da6c A |
1122 | return ESRCH; |
1123 | } | |
1124 | ||
214d78a2 | 1125 | strlcpy(threadname, thread->pthread_name, len); |
c1f56ec9 | 1126 | _pthread_lock_unlock(&_pthread_list_lock); |
214d78a2 | 1127 | return 0; |
f1a1da6c A |
1128 | } |
1129 | ||
1130 | int | |
1131 | pthread_setname_np(const char *name) | |
1132 | { | |
1133 | int res; | |
1134 | pthread_t self = pthread_self(); | |
1135 | ||
1136 | size_t len = 0; | |
1137 | if (name != NULL) { | |
1138 | len = strlen(name); | |
1139 | } | |
1140 | ||
e3ecba16 A |
1141 | _pthread_validate_signature(self); |
1142 | ||
f1a1da6c A |
1143 | res = __proc_info(5, getpid(), 2, (uint64_t)0, (void*)name, (int)len); |
1144 | if (res == 0) { | |
1145 | if (len > 0) { | |
1146 | strlcpy(self->pthread_name, name, MAXTHREADNAMESIZE); | |
1147 | } else { | |
1148 | bzero(self->pthread_name, MAXTHREADNAMESIZE); | |
1149 | } | |
1150 | } | |
1151 | return res; | |
1152 | ||
1153 | } | |
1154 | ||
6dae708f A |
1155 | #if TARGET_OS_OSX |
1156 | ||
c1f56ec9 A |
1157 | void |
1158 | pthread_jit_write_protect_np(int enable) | |
f1a1da6c | 1159 | { |
c1f56ec9 A |
1160 | if (!os_thread_self_restrict_rwx_is_supported()) { |
1161 | return; | |
1162 | } | |
1163 | ||
1164 | if (enable) { | |
1165 | os_thread_self_restrict_rwx_to_rx(); | |
1166 | } else { | |
1167 | os_thread_self_restrict_rwx_to_rw(); | |
1168 | } | |
1169 | } | |
1170 | ||
1171 | int pthread_jit_write_protect_supported_np() | |
1172 | { | |
1173 | return os_thread_self_restrict_rwx_is_supported(); | |
1174 | } | |
f1a1da6c | 1175 | |
6dae708f A |
1176 | #endif // TARGET_OS_OSX |
1177 | ||
c1f56ec9 A |
1178 | OS_ALWAYS_INLINE |
1179 | static inline void | |
1180 | __pthread_add_thread(pthread_t t, mach_port_t self, bool from_mach_thread) | |
1181 | { | |
1182 | _pthread_lock_lock(&_pthread_list_lock, self); | |
214d78a2 A |
1183 | TAILQ_INSERT_TAIL(&__pthread_head, t, tl_plist); |
1184 | _pthread_count++; | |
c1f56ec9 | 1185 | _pthread_lock_unlock(&_pthread_list_lock, self); |
f1a1da6c | 1186 | |
214d78a2 A |
1187 | if (!from_mach_thread) { |
1188 | // PR-26275485: Mach threads will likely crash trying to run | |
1189 | // introspection code. Since the fall out from the introspection | |
1190 | // code not seeing the injected thread is likely less than crashing | |
1191 | // in the introspection code, just don't make the call. | |
1192 | _pthread_introspection_thread_create(t); | |
f1a1da6c | 1193 | } |
214d78a2 | 1194 | } |
f1a1da6c | 1195 | |
c1f56ec9 | 1196 | OS_ALWAYS_INLINE |
214d78a2 | 1197 | static inline void |
c1f56ec9 | 1198 | __pthread_undo_add_thread(pthread_t t, mach_port_t self) |
214d78a2 | 1199 | { |
c1f56ec9 | 1200 | _pthread_lock_lock(&_pthread_list_lock, self); |
214d78a2 A |
1201 | TAILQ_REMOVE(&__pthread_head, t, tl_plist); |
1202 | _pthread_count--; | |
c1f56ec9 | 1203 | _pthread_lock_unlock(&_pthread_list_lock, self); |
f1a1da6c A |
1204 | } |
1205 | ||
c1f56ec9 | 1206 | OS_ALWAYS_INLINE |
214d78a2 A |
1207 | static inline void |
1208 | __pthread_started_thread(pthread_t t) | |
f1a1da6c | 1209 | { |
c1f56ec9 | 1210 | mach_port_t kport = _pthread_tsd_slot(t, MACH_THREAD_SELF); |
c6e5f90c | 1211 | if (os_unlikely(!MACH_PORT_VALID(kport))) { |
214d78a2 A |
1212 | PTHREAD_CLIENT_CRASH(kport, |
1213 | "Unable to allocate thread port, possible port leak"); | |
f1a1da6c | 1214 | } |
214d78a2 | 1215 | _pthread_introspection_thread_start(t); |
f1a1da6c A |
1216 | } |
1217 | ||
214d78a2 A |
1218 | #define _PTHREAD_CREATE_NONE 0x0 |
1219 | #define _PTHREAD_CREATE_FROM_MACH_THREAD 0x1 | |
1220 | #define _PTHREAD_CREATE_SUSPENDED 0x2 | |
1221 | ||
2546420a | 1222 | static int |
214d78a2 A |
1223 | _pthread_create(pthread_t *thread, const pthread_attr_t *attrs, |
1224 | void *(*start_routine)(void *), void *arg, unsigned int create_flags) | |
2546420a | 1225 | { |
f1a1da6c | 1226 | pthread_t t = NULL; |
214d78a2 A |
1227 | void *stack = NULL; |
1228 | bool from_mach_thread = (create_flags & _PTHREAD_CREATE_FROM_MACH_THREAD); | |
c1f56ec9 A |
1229 | mach_port_t self_kport; |
1230 | int rc = 0; | |
f1a1da6c | 1231 | |
f1a1da6c A |
1232 | if (attrs == NULL) { |
1233 | attrs = &_pthread_attr_default; | |
1234 | } else if (attrs->sig != _PTHREAD_ATTR_SIG) { | |
1235 | return EINVAL; | |
1236 | } | |
1237 | ||
214d78a2 | 1238 | unsigned int flags = PTHREAD_START_CUSTOM; |
f1a1da6c | 1239 | if (attrs->schedset != 0) { |
214d78a2 A |
1240 | struct sched_param p; |
1241 | _pthread_attr_get_schedparam(attrs, &p); | |
f1a1da6c A |
1242 | flags |= PTHREAD_START_SETSCHED; |
1243 | flags |= ((attrs->policy & PTHREAD_START_POLICY_MASK) << PTHREAD_START_POLICY_BITSHIFT); | |
214d78a2 | 1244 | flags |= (p.sched_priority & PTHREAD_START_IMPORTANCE_MASK); |
f1a1da6c A |
1245 | } else if (attrs->qosclass != 0) { |
1246 | flags |= PTHREAD_START_QOSCLASS; | |
1247 | flags |= (attrs->qosclass & PTHREAD_START_QOSCLASS_MASK); | |
1248 | } | |
214d78a2 A |
1249 | if (create_flags & _PTHREAD_CREATE_SUSPENDED) { |
1250 | flags |= PTHREAD_START_SUSPENDED; | |
1251 | } | |
f1a1da6c A |
1252 | |
1253 | __is_threaded = 1; | |
1254 | ||
c1f56ec9 | 1255 | t = _pthread_allocate(attrs, &stack, from_mach_thread); |
214d78a2 A |
1256 | if (t == NULL) { |
1257 | return EAGAIN; | |
f1a1da6c A |
1258 | } |
1259 | ||
c1f56ec9 A |
1260 | if (os_unlikely(from_mach_thread)) { |
1261 | self_kport = mach_thread_self(); | |
1262 | } else { | |
1263 | self_kport = _pthread_mach_thread_self_direct(); | |
1264 | } | |
1265 | ||
214d78a2 A |
1266 | t->arg = arg; |
1267 | t->fun = start_routine; | |
c1f56ec9 | 1268 | __pthread_add_thread(t, self_kport, from_mach_thread); |
214d78a2 A |
1269 | |
1270 | if (__bsdthread_create(start_routine, arg, stack, t, flags) == | |
1271 | (pthread_t)-1) { | |
a0619f9c A |
1272 | if (errno == EMFILE) { |
1273 | PTHREAD_CLIENT_CRASH(0, | |
1274 | "Unable to allocate thread port, possible port leak"); | |
1275 | } | |
c1f56ec9 | 1276 | __pthread_undo_add_thread(t, self_kport); |
214d78a2 | 1277 | _pthread_deallocate(t, from_mach_thread); |
c1f56ec9 A |
1278 | t = NULL; |
1279 | rc = EAGAIN; | |
1280 | } | |
1281 | if (from_mach_thread) { | |
1282 | mach_port_deallocate(mach_task_self(), self_kport); | |
f1a1da6c | 1283 | } |
f1a1da6c | 1284 | |
2546420a | 1285 | // n.b. if a thread is created detached and exits, t will be invalid |
f1a1da6c | 1286 | *thread = t; |
c1f56ec9 | 1287 | return rc; |
f1a1da6c A |
1288 | } |
1289 | ||
2546420a | 1290 | int |
214d78a2 A |
1291 | pthread_create(pthread_t *thread, const pthread_attr_t *attr, |
1292 | void *(*start_routine)(void *), void *arg) | |
2546420a | 1293 | { |
214d78a2 A |
1294 | unsigned int flags = _PTHREAD_CREATE_NONE; |
1295 | return _pthread_create(thread, attr, start_routine, arg, flags); | |
2546420a A |
1296 | } |
1297 | ||
1298 | int | |
214d78a2 A |
1299 | pthread_create_from_mach_thread(pthread_t *thread, const pthread_attr_t *attr, |
1300 | void *(*start_routine)(void *), void *arg) | |
2546420a | 1301 | { |
214d78a2 A |
1302 | unsigned int flags = _PTHREAD_CREATE_FROM_MACH_THREAD; |
1303 | return _pthread_create(thread, attr, start_routine, arg, flags); | |
2546420a A |
1304 | } |
1305 | ||
214d78a2 A |
1306 | int |
1307 | pthread_create_suspended_np(pthread_t *thread, const pthread_attr_t *attr, | |
1308 | void *(*start_routine)(void *), void *arg) | |
1309 | { | |
214d78a2 A |
1310 | unsigned int flags = _PTHREAD_CREATE_SUSPENDED; |
1311 | return _pthread_create(thread, attr, start_routine, arg, flags); | |
f1a1da6c A |
1312 | } |
1313 | ||
a0619f9c | 1314 | int |
f1a1da6c A |
1315 | pthread_detach(pthread_t thread) |
1316 | { | |
a0619f9c | 1317 | int res = 0; |
214d78a2 | 1318 | bool join = false, wake = false; |
f1a1da6c | 1319 | |
214d78a2 A |
1320 | if (!_pthread_validate_thread_and_list_lock(thread)) { |
1321 | return ESRCH; | |
f1a1da6c A |
1322 | } |
1323 | ||
214d78a2 | 1324 | if (!thread->tl_joinable) { |
f1a1da6c | 1325 | res = EINVAL; |
214d78a2 | 1326 | } else if (thread->tl_exit_gate == MACH_PORT_DEAD) { |
a0619f9c A |
1327 | // Join the thread if it's already exited. |
1328 | join = true; | |
1329 | } else { | |
214d78a2 A |
1330 | thread->tl_joinable = false; // _pthread_joiner_prepost_wake uses this |
1331 | if (thread->tl_join_ctx) { | |
1332 | (void)_pthread_joiner_prepost_wake(thread); | |
1333 | wake = true; | |
1334 | } | |
f1a1da6c | 1335 | } |
c1f56ec9 | 1336 | _pthread_lock_unlock(&_pthread_list_lock); |
f1a1da6c A |
1337 | |
1338 | if (join) { | |
1339 | pthread_join(thread, NULL); | |
214d78a2 A |
1340 | } else if (wake) { |
1341 | _pthread_joiner_wake(thread); | |
f1a1da6c | 1342 | } |
f1a1da6c A |
1343 | return res; |
1344 | } | |
1345 | ||
a0619f9c | 1346 | int |
f1a1da6c | 1347 | pthread_kill(pthread_t th, int sig) |
a0619f9c | 1348 | { |
f1a1da6c A |
1349 | if (sig < 0 || sig > NSIG) { |
1350 | return EINVAL; | |
1351 | } | |
1352 | ||
1353 | mach_port_t kport = MACH_PORT_NULL; | |
c6e5f90c A |
1354 | { |
1355 | if (!_pthread_is_valid(th, &kport)) { | |
1356 | return ESRCH; | |
1357 | } | |
f1a1da6c A |
1358 | } |
1359 | ||
1360 | int ret = __pthread_kill(kport, sig); | |
1361 | ||
1362 | if (ret == -1) { | |
1363 | ret = errno; | |
1364 | } | |
1365 | return ret; | |
1366 | } | |
1367 | ||
a0619f9c | 1368 | int |
f1a1da6c A |
1369 | __pthread_workqueue_setkill(int enable) |
1370 | { | |
c6e5f90c A |
1371 | { |
1372 | return __bsdthread_ctl(BSDTHREAD_CTL_WORKQ_ALLOW_KILL, enable, 0, 0); | |
1373 | } | |
f1a1da6c A |
1374 | } |
1375 | ||
f1a1da6c A |
1376 | /* |
1377 | * Terminate a thread. | |
1378 | */ | |
f1a1da6c | 1379 | |
c1f56ec9 | 1380 | OS_NORETURN |
a0619f9c | 1381 | static void |
214d78a2 | 1382 | _pthread_exit(pthread_t self, void *exit_value) |
f1a1da6c A |
1383 | { |
1384 | struct __darwin_pthread_handler_rec *handler; | |
1385 | ||
1386 | // Disable signal delivery while we clean up | |
1387 | __disable_threadsignal(1); | |
1388 | ||
1389 | // Set cancel state to disable and type to deferred | |
214d78a2 | 1390 | _pthread_setcancelstate_exit(self, exit_value); |
f1a1da6c A |
1391 | |
1392 | while ((handler = self->__cleanup_stack) != 0) { | |
1393 | (handler->__routine)(handler->__arg); | |
1394 | self->__cleanup_stack = handler->__next; | |
1395 | } | |
1396 | _pthread_tsd_cleanup(self); | |
1397 | ||
f1a1da6c A |
1398 | // Clear per-thread semaphore cache |
1399 | os_put_cached_semaphore(SEMAPHORE_NULL); | |
1400 | ||
214d78a2 | 1401 | _pthread_terminate_invoke(self, exit_value); |
f1a1da6c A |
1402 | } |
1403 | ||
1404 | void | |
214d78a2 | 1405 | pthread_exit(void *exit_value) |
f1a1da6c A |
1406 | { |
1407 | pthread_t self = pthread_self(); | |
214d78a2 A |
1408 | if (os_unlikely(self->wqthread)) { |
1409 | PTHREAD_CLIENT_CRASH(0, "pthread_exit() called from a thread " | |
1410 | "not created by pthread_create()"); | |
f1a1da6c | 1411 | } |
e3ecba16 | 1412 | _pthread_validate_signature(self); |
214d78a2 | 1413 | _pthread_exit(self, exit_value); |
f1a1da6c A |
1414 | } |
1415 | ||
c1f56ec9 A |
1416 | int |
1417 | pthread_self_is_exiting_np(void) | |
1418 | { | |
1419 | return (os_atomic_load(&pthread_self()->cancel_state, relaxed) & | |
1420 | _PTHREAD_CANCEL_EXITING) != 0; | |
1421 | } | |
a0619f9c | 1422 | |
a0619f9c | 1423 | int |
214d78a2 | 1424 | pthread_getschedparam(pthread_t thread, int *policy, struct sched_param *param) |
f1a1da6c | 1425 | { |
214d78a2 | 1426 | if (!_pthread_validate_thread_and_list_lock(thread)) { |
f1a1da6c A |
1427 | return ESRCH; |
1428 | } | |
a0619f9c | 1429 | |
214d78a2 A |
1430 | if (policy) *policy = thread->tl_policy; |
1431 | if (param) *param = thread->tl_param; | |
c1f56ec9 | 1432 | _pthread_lock_unlock(&_pthread_list_lock); |
214d78a2 | 1433 | return 0; |
f1a1da6c A |
1434 | } |
1435 | ||
c1f56ec9 | 1436 | OS_ALWAYS_INLINE |
a0619f9c | 1437 | static inline int |
214d78a2 A |
1438 | pthread_setschedparam_internal(pthread_t thread, mach_port_t kport, int policy, |
1439 | const struct sched_param *param) | |
f1a1da6c A |
1440 | { |
1441 | policy_base_data_t bases; | |
1442 | policy_base_t base; | |
1443 | mach_msg_type_number_t count; | |
1444 | kern_return_t ret; | |
1445 | ||
c1f56ec9 A |
1446 | if (os_unlikely(thread->wqthread)) { |
1447 | return ENOTSUP; | |
1448 | } | |
1449 | ||
f1a1da6c | 1450 | switch (policy) { |
c1f56ec9 A |
1451 | case SCHED_OTHER: |
1452 | bases.ts.base_priority = param->sched_priority; | |
1453 | base = (policy_base_t)&bases.ts; | |
1454 | count = POLICY_TIMESHARE_BASE_COUNT; | |
1455 | break; | |
1456 | case SCHED_FIFO: | |
1457 | bases.fifo.base_priority = param->sched_priority; | |
1458 | base = (policy_base_t)&bases.fifo; | |
1459 | count = POLICY_FIFO_BASE_COUNT; | |
1460 | break; | |
1461 | case SCHED_RR: | |
1462 | bases.rr.base_priority = param->sched_priority; | |
1463 | /* quantum isn't public yet */ | |
1464 | bases.rr.quantum = param->quantum; | |
1465 | base = (policy_base_t)&bases.rr; | |
1466 | count = POLICY_RR_BASE_COUNT; | |
1467 | break; | |
1468 | default: | |
1469 | return EINVAL; | |
f1a1da6c A |
1470 | } |
1471 | ret = thread_policy(kport, policy, base, count, TRUE); | |
1472 | return (ret != KERN_SUCCESS) ? EINVAL : 0; | |
1473 | } | |
1474 | ||
a0619f9c | 1475 | int |
f1a1da6c A |
1476 | pthread_setschedparam(pthread_t t, int policy, const struct sched_param *param) |
1477 | { | |
1478 | mach_port_t kport = MACH_PORT_NULL; | |
f1a1da6c A |
1479 | int bypass = 1; |
1480 | ||
1481 | // since the main thread will not get de-allocated from underneath us | |
214d78a2 | 1482 | if (t == pthread_self() || t == main_thread()) { |
e3ecba16 | 1483 | _pthread_validate_signature(t); |
c1f56ec9 | 1484 | kport = _pthread_tsd_slot(t, MACH_THREAD_SELF); |
f1a1da6c A |
1485 | } else { |
1486 | bypass = 0; | |
214d78a2 A |
1487 | if (!_pthread_is_valid(t, &kport)) { |
1488 | return ESRCH; | |
1489 | } | |
f1a1da6c | 1490 | } |
a0619f9c | 1491 | |
214d78a2 A |
1492 | int res = pthread_setschedparam_internal(t, kport, policy, param); |
1493 | if (res) return res; | |
1494 | ||
1495 | if (bypass) { | |
c1f56ec9 | 1496 | _pthread_lock_lock(&_pthread_list_lock); |
214d78a2 A |
1497 | } else if (!_pthread_validate_thread_and_list_lock(t)) { |
1498 | // Ensure the thread is still valid. | |
1499 | return ESRCH; | |
f1a1da6c | 1500 | } |
214d78a2 A |
1501 | |
1502 | t->tl_policy = policy; | |
1503 | t->tl_param = *param; | |
c1f56ec9 | 1504 | _pthread_lock_unlock(&_pthread_list_lock); |
214d78a2 | 1505 | return 0; |
f1a1da6c A |
1506 | } |
1507 | ||
1508 | int | |
1509 | sched_get_priority_min(int policy) | |
1510 | { | |
1511 | return default_priority - 16; | |
1512 | } | |
1513 | ||
1514 | int | |
1515 | sched_get_priority_max(int policy) | |
1516 | { | |
1517 | return default_priority + 16; | |
1518 | } | |
1519 | ||
a0619f9c | 1520 | int |
f1a1da6c A |
1521 | pthread_equal(pthread_t t1, pthread_t t2) |
1522 | { | |
1523 | return (t1 == t2); | |
1524 | } | |
1525 | ||
c1f56ec9 | 1526 | OS_NOINLINE |
f1a1da6c A |
1527 | void |
1528 | _pthread_set_self(pthread_t p) | |
1529 | { | |
214d78a2 A |
1530 | #if VARIANT_DYLD |
1531 | if (os_likely(!p)) { | |
1532 | return _pthread_set_self_dyld(); | |
1533 | } | |
1534 | #endif // VARIANT_DYLD | |
c6e5f90c A |
1535 | _pthread_set_self_internal(p); |
1536 | _thread_set_tsd_base(&p->tsd[0]); | |
2546420a | 1537 | } |
f1a1da6c | 1538 | |
214d78a2 A |
1539 | #if VARIANT_DYLD |
1540 | // _pthread_set_self_dyld is noinline+noexport to allow the option for | |
1541 | // static libsyscall to adopt this as the entry point from mach_init if | |
1542 | // desired | |
c1f56ec9 A |
1543 | OS_NOINLINE |
1544 | static void | |
214d78a2 | 1545 | _pthread_set_self_dyld(void) |
2546420a | 1546 | { |
214d78a2 A |
1547 | pthread_t p = main_thread(); |
1548 | p->thread_id = __thread_selfid(); | |
2546420a | 1549 | |
214d78a2 A |
1550 | if (os_unlikely(p->thread_id == -1ull)) { |
1551 | PTHREAD_INTERNAL_CRASH(0, "failed to set thread_id"); | |
f1a1da6c A |
1552 | } |
1553 | ||
214d78a2 A |
1554 | // <rdar://problem/40930651> pthread self and the errno address are the |
1555 | // bare minimium TSD setup that dyld needs to actually function. Without | |
1556 | // this, TSD access will fail and crash if it uses bits of Libc prior to | |
1557 | // library initialization. __pthread_init will finish the initialization | |
1558 | // during library init. | |
c1f56ec9 A |
1559 | _pthread_tsd_slot(p, PTHREAD_SELF) = p; |
1560 | _pthread_tsd_slot(p, ERRNO) = &p->err_no; | |
214d78a2 A |
1561 | _thread_set_tsd_base(&p->tsd[0]); |
1562 | } | |
1563 | #endif // VARIANT_DYLD | |
1564 | ||
c1f56ec9 | 1565 | OS_ALWAYS_INLINE |
214d78a2 | 1566 | static inline void |
c6e5f90c | 1567 | _pthread_set_self_internal(pthread_t p) |
214d78a2 | 1568 | { |
c1f56ec9 | 1569 | #if __LP64__ |
c6e5f90c | 1570 | os_atomic_store(&p->thread_id, __thread_selfid(), relaxed); |
c1f56ec9 A |
1571 | #else |
1572 | os_atomic_store_wide(&p->thread_id, __thread_selfid(), relaxed); | |
1573 | #endif | |
214d78a2 A |
1574 | |
1575 | if (os_unlikely(p->thread_id == -1ull)) { | |
1576 | PTHREAD_INTERNAL_CRASH(0, "failed to set thread_id"); | |
1577 | } | |
f1a1da6c A |
1578 | } |
1579 | ||
a0619f9c | 1580 | // <rdar://problem/28984807> pthread_once should have an acquire barrier |
c1f56ec9 | 1581 | OS_ALWAYS_INLINE |
a0619f9c A |
1582 | static inline void |
1583 | _os_once_acquire(os_once_t *predicate, void *context, os_function_t function) | |
1584 | { | |
1585 | if (OS_EXPECT(os_atomic_load(predicate, acquire), ~0l) != ~0l) { | |
1586 | _os_once(predicate, context, function); | |
1587 | OS_COMPILER_CAN_ASSUME(*predicate == ~0l); | |
1588 | } | |
1589 | } | |
1590 | ||
f1a1da6c A |
1591 | struct _pthread_once_context { |
1592 | pthread_once_t *pthread_once; | |
1593 | void (*routine)(void); | |
1594 | }; | |
1595 | ||
1596 | static void | |
1597 | __pthread_once_handler(void *context) | |
1598 | { | |
1599 | struct _pthread_once_context *ctx = context; | |
1600 | pthread_cleanup_push((void*)__os_once_reset, &ctx->pthread_once->once); | |
1601 | ctx->routine(); | |
1602 | pthread_cleanup_pop(0); | |
1603 | ctx->pthread_once->sig = _PTHREAD_ONCE_SIG; | |
1604 | } | |
1605 | ||
a0619f9c | 1606 | int |
f1a1da6c A |
1607 | pthread_once(pthread_once_t *once_control, void (*init_routine)(void)) |
1608 | { | |
1609 | struct _pthread_once_context ctx = { once_control, init_routine }; | |
1610 | do { | |
a0619f9c | 1611 | _os_once_acquire(&once_control->once, &ctx, __pthread_once_handler); |
f1a1da6c A |
1612 | } while (once_control->sig == _PTHREAD_ONCE_SIG_init); |
1613 | return 0; | |
1614 | } | |
1615 | ||
f1a1da6c A |
1616 | int |
1617 | pthread_getconcurrency(void) | |
1618 | { | |
1619 | return pthread_concurrency; | |
1620 | } | |
1621 | ||
1622 | int | |
1623 | pthread_setconcurrency(int new_level) | |
1624 | { | |
1625 | if (new_level < 0) { | |
1626 | return EINVAL; | |
1627 | } | |
1628 | pthread_concurrency = new_level; | |
1629 | return 0; | |
1630 | } | |
1631 | ||
214d78a2 A |
1632 | /* |
1633 | * Perform package initialization - called automatically when application starts | |
1634 | */ | |
214d78a2 A |
1635 | |
1636 | #if !VARIANT_DYLD | |
2546420a A |
1637 | static unsigned long |
1638 | _pthread_strtoul(const char *p, const char **endptr, int base) | |
f1a1da6c | 1639 | { |
2546420a | 1640 | uintptr_t val = 0; |
a0619f9c | 1641 | |
2546420a A |
1642 | // Expect hex string starting with "0x" |
1643 | if ((base == 16 || base == 0) && p && p[0] == '0' && p[1] == 'x') { | |
1644 | p += 2; | |
1645 | while (1) { | |
1646 | char c = *p; | |
1647 | if ('0' <= c && c <= '9') { | |
1648 | val = (val << 4) + (c - '0'); | |
1649 | } else if ('a' <= c && c <= 'f') { | |
1650 | val = (val << 4) + (c - 'a' + 10); | |
1651 | } else if ('A' <= c && c <= 'F') { | |
1652 | val = (val << 4) + (c - 'A' + 10); | |
1653 | } else { | |
1654 | break; | |
1655 | } | |
1656 | ++p; | |
1657 | } | |
1658 | } | |
1659 | ||
1660 | *endptr = (char *)p; | |
1661 | return val; | |
1662 | } | |
1663 | ||
1664 | static int | |
1665 | parse_main_stack_params(const char *apple[], | |
1666 | void **stackaddr, | |
1667 | size_t *stacksize, | |
1668 | void **allocaddr, | |
1669 | size_t *allocsize) | |
1670 | { | |
1671 | const char *p = _simple_getenv(apple, "main_stack"); | |
1672 | if (!p) return 0; | |
1673 | ||
1674 | int ret = 0; | |
1675 | const char *s = p; | |
1676 | ||
1677 | *stackaddr = _pthread_strtoul(s, &s, 16); | |
1678 | if (*s != ',') goto out; | |
1679 | ||
1680 | *stacksize = _pthread_strtoul(s + 1, &s, 16); | |
1681 | if (*s != ',') goto out; | |
1682 | ||
1683 | *allocaddr = _pthread_strtoul(s + 1, &s, 16); | |
1684 | if (*s != ',') goto out; | |
1685 | ||
1686 | *allocsize = _pthread_strtoul(s + 1, &s, 16); | |
1687 | if (*s != ',' && *s != 0) goto out; | |
1688 | ||
1689 | ret = 1; | |
1690 | out: | |
1691 | bzero((char *)p, strlen(p)); | |
1692 | return ret; | |
f1a1da6c A |
1693 | } |
1694 | ||
214d78a2 A |
1695 | static void |
1696 | parse_ptr_munge_params(const char *envp[], const char *apple[]) | |
f1a1da6c | 1697 | { |
214d78a2 | 1698 | const char *p, *s; |
e3ecba16 | 1699 | uintptr_t token = 0; |
214d78a2 A |
1700 | p = _simple_getenv(apple, "ptr_munge"); |
1701 | if (p) { | |
e3ecba16 | 1702 | token = _pthread_strtoul(p, &s, 16); |
214d78a2 | 1703 | bzero((char *)p, strlen(p)); |
f1a1da6c | 1704 | } |
c1f56ec9 A |
1705 | /* |
1706 | * In DEBUG we allow the environment variable to override the kernel | |
1707 | * security setting, including setting it to 0 which is helpful during | |
1708 | * debugging sessions. | |
1709 | * | |
1710 | * For other cases, the token must be set by the kernel or the environment | |
1711 | * variable to a non 0 value. | |
1712 | */ | |
214d78a2 | 1713 | #if !DEBUG |
e3ecba16 | 1714 | if (!token) { |
214d78a2 | 1715 | #endif |
e3ecba16 A |
1716 | p = _simple_getenv(envp, "PTHREAD_PTR_MUNGE_TOKEN"); |
1717 | if (p) { | |
1718 | uintptr_t t = _pthread_strtoul(p, &s, 16); | |
1719 | if (t) token = t; | |
1720 | } | |
1721 | #if !DEBUG | |
1722 | } | |
1723 | ||
1724 | if (!token) { | |
1725 | PTHREAD_INTERNAL_CRASH(token, "Token from the kernel is 0"); | |
f1a1da6c | 1726 | } |
c1f56ec9 | 1727 | #endif // !DEBUG |
e3ecba16 A |
1728 | |
1729 | _pthread_ptr_munge_token = token; | |
1730 | // we need to refresh the main thread signature now that we changed | |
1731 | // the munge token. We need to do it while TSAN will not look at it | |
1732 | _pthread_init_signature(_main_thread_ptr); | |
f1a1da6c | 1733 | } |
f1a1da6c | 1734 | |
f5f12756 A |
1735 | static void |
1736 | parse_main_thread_port(const char *apple[], mach_port_name_t *main_th) | |
1737 | { | |
1738 | const char *p, *s; | |
1739 | p = _simple_getenv(apple, "th_port"); | |
1740 | if (p) { | |
1741 | *main_th = (mach_port_name_t)_pthread_strtoul(p, &s, 16); | |
1742 | bzero((char *)p, strlen(p)); | |
1743 | } | |
1744 | } | |
1745 | ||
f1a1da6c | 1746 | int |
2546420a | 1747 | __pthread_init(const struct _libpthread_functions *pthread_funcs, |
214d78a2 A |
1748 | const char *envp[], const char *apple[], |
1749 | const struct ProgramVars *vars __unused) | |
f1a1da6c A |
1750 | { |
1751 | // Save our provided pushed-down functions | |
1752 | if (pthread_funcs) { | |
1753 | exitf = pthread_funcs->exit; | |
1754 | ||
1755 | if (pthread_funcs->version >= 2) { | |
1756 | _pthread_malloc = pthread_funcs->malloc; | |
1757 | _pthread_free = pthread_funcs->free; | |
1758 | } | |
1759 | } | |
1760 | ||
e3ecba16 A |
1761 | // libpthread.a in dyld "owns" the main thread structure itself and sets |
1762 | // up the tsd to point to it. So take the pthread_self() from there | |
1763 | // and make it our main thread point. | |
c1f56ec9 | 1764 | pthread_t thread = _pthread_self_direct(); |
e3ecba16 A |
1765 | if (os_unlikely(thread == NULL)) { |
1766 | PTHREAD_INTERNAL_CRASH(0, "PTHREAD_SELF TSD not initialized"); | |
1767 | } | |
1768 | _main_thread_ptr = thread; | |
1769 | // this needs to be done early so that pthread_self() works in TSAN | |
1770 | _pthread_init_signature(thread); | |
1771 | ||
f1a1da6c A |
1772 | // |
1773 | // Get host information | |
1774 | // | |
1775 | ||
1776 | kern_return_t kr; | |
1777 | host_flavor_t flavor = HOST_PRIORITY_INFO; | |
1778 | mach_msg_type_number_t count = HOST_PRIORITY_INFO_COUNT; | |
1779 | host_priority_info_data_t priority_info; | |
1780 | host_t host = mach_host_self(); | |
1781 | kr = host_info(host, flavor, (host_info_t)&priority_info, &count); | |
1782 | if (kr != KERN_SUCCESS) { | |
214d78a2 | 1783 | PTHREAD_INTERNAL_CRASH(kr, "host_info() failed"); |
f1a1da6c | 1784 | } else { |
214d78a2 A |
1785 | default_priority = (uint8_t)priority_info.user_priority; |
1786 | min_priority = (uint8_t)priority_info.minimum_priority; | |
1787 | max_priority = (uint8_t)priority_info.maximum_priority; | |
f1a1da6c A |
1788 | } |
1789 | mach_port_deallocate(mach_task_self(), host); | |
1790 | ||
1791 | // | |
1792 | // Set up the main thread structure | |
1793 | // | |
1794 | ||
2546420a A |
1795 | // Get the address and size of the main thread's stack from the kernel. |
1796 | void *stackaddr = 0; | |
1797 | size_t stacksize = 0; | |
1798 | void *allocaddr = 0; | |
1799 | size_t allocsize = 0; | |
1800 | if (!parse_main_stack_params(apple, &stackaddr, &stacksize, &allocaddr, &allocsize) || | |
1801 | stackaddr == NULL || stacksize == 0) { | |
1802 | // Fall back to previous bevhaior. | |
1803 | size_t len = sizeof(stackaddr); | |
1804 | int mib[] = { CTL_KERN, KERN_USRSTACK }; | |
1805 | if (__sysctl(mib, 2, &stackaddr, &len, NULL, 0) != 0) { | |
1806 | #if defined(__LP64__) | |
1807 | stackaddr = (void *)USRSTACK64; | |
1808 | #else | |
1809 | stackaddr = (void *)USRSTACK; | |
1810 | #endif | |
1811 | } | |
1812 | stacksize = DFLSSIZ; | |
1813 | allocaddr = 0; | |
1814 | allocsize = 0; | |
f1a1da6c A |
1815 | } |
1816 | ||
214d78a2 A |
1817 | // Initialize random ptr_munge token from the kernel. |
1818 | parse_ptr_munge_params(envp, apple); | |
1819 | ||
c6e5f90c | 1820 | PTHREAD_DEBUG_ASSERT(_pthread_attr_default.qosclass == |
214d78a2 | 1821 | _pthread_default_priority(0)); |
2546420a | 1822 | _pthread_struct_init(thread, &_pthread_attr_default, |
214d78a2 A |
1823 | stackaddr, stacksize, allocaddr, allocsize); |
1824 | thread->tl_joinable = true; | |
f1a1da6c | 1825 | |
f5f12756 A |
1826 | // Get main thread port name from the kernel. |
1827 | mach_port_name_t main_th_port = MACH_PORT_NULL; | |
1828 | parse_main_thread_port(apple, &main_th_port); | |
1829 | ||
f1a1da6c A |
1830 | // Finish initialization with common code that is reinvoked on the |
1831 | // child side of a fork. | |
1832 | ||
1833 | // Finishes initialization of main thread attributes. | |
1834 | // Initializes the thread list and add the main thread. | |
1835 | // Calls _pthread_set_self() to prepare the main thread for execution. | |
f5f12756 | 1836 | _pthread_main_thread_init(thread, main_th_port); |
a0619f9c | 1837 | |
76b7b9a2 | 1838 | struct _pthread_registration_data registration_data; |
f1a1da6c | 1839 | // Set up kernel entry points with __bsdthread_register. |
76b7b9a2 | 1840 | _pthread_bsdthread_init(®istration_data); |
f1a1da6c | 1841 | |
76b7b9a2 | 1842 | // Have pthread_key and pthread_mutex do their init envvar checks. |
964d3577 | 1843 | _pthread_key_global_init(envp); |
76b7b9a2 | 1844 | _pthread_mutex_global_init(envp, ®istration_data); |
964d3577 | 1845 | |
a0619f9c A |
1846 | #if PTHREAD_DEBUG_LOG |
1847 | _SIMPLE_STRING path = _simple_salloc(); | |
1848 | _simple_sprintf(path, "/var/tmp/libpthread.%d.log", getpid()); | |
1849 | _pthread_debuglog = open(_simple_string(path), | |
1850 | O_WRONLY | O_APPEND | O_CREAT | O_NOFOLLOW | O_CLOEXEC, 0666); | |
1851 | _simple_sfree(path); | |
1852 | _pthread_debugstart = mach_absolute_time(); | |
1853 | #endif | |
f1a1da6c | 1854 | |
a0619f9c | 1855 | return 0; |
f1a1da6c | 1856 | } |
214d78a2 | 1857 | #endif // !VARIANT_DYLD |
f1a1da6c | 1858 | |
c1f56ec9 | 1859 | void |
f5f12756 | 1860 | _pthread_main_thread_init(pthread_t p, mach_port_name_t main_thread_port) |
f1a1da6c A |
1861 | { |
1862 | TAILQ_INIT(&__pthread_head); | |
c1f56ec9 A |
1863 | _pthread_lock_init(&_pthread_list_lock); |
1864 | _pthread_lock_init(&p->lock); | |
f1a1da6c | 1865 | p->__cleanup_stack = NULL; |
214d78a2 A |
1866 | p->tl_join_ctx = NULL; |
1867 | p->tl_exit_gate = MACH_PORT_NULL; | |
f5f12756 A |
1868 | |
1869 | if (main_thread_port != MACH_PORT_NULL) { | |
1870 | _pthread_tsd_slot(p, MACH_THREAD_SELF) = main_thread_port; | |
1871 | } else { | |
1872 | // Can't get thread port from kernel or we are forking, fallback to mach_thread_self | |
1873 | _pthread_tsd_slot(p, MACH_THREAD_SELF) = mach_thread_self(); | |
1874 | } | |
c1f56ec9 A |
1875 | _pthread_tsd_slot(p, MIG_REPLY) = mach_reply_port(); |
1876 | _pthread_tsd_slot(p, MACH_SPECIAL_REPLY) = MACH_PORT_NULL; | |
1877 | _pthread_tsd_slot(p, SEMAPHORE_CACHE) = SEMAPHORE_NULL; | |
f1a1da6c A |
1878 | |
1879 | // Initialize the list of threads with the new main thread. | |
214d78a2 | 1880 | TAILQ_INSERT_HEAD(&__pthread_head, p, tl_plist); |
f1a1da6c A |
1881 | _pthread_count = 1; |
1882 | ||
f1a1da6c A |
1883 | _pthread_introspection_thread_start(p); |
1884 | } | |
1885 | ||
c28b7a9d A |
1886 | void |
1887 | _pthread_main_thread_postfork_init(pthread_t p) | |
1888 | { | |
f5f12756 | 1889 | _pthread_main_thread_init(p, MACH_PORT_NULL); |
c6e5f90c | 1890 | _pthread_set_self_internal(p); |
c28b7a9d A |
1891 | } |
1892 | ||
f1a1da6c | 1893 | int |
a0619f9c | 1894 | sched_yield(void) |
f1a1da6c | 1895 | { |
214d78a2 A |
1896 | swtch_pri(0); |
1897 | return 0; | |
f1a1da6c A |
1898 | } |
1899 | ||
c6e5f90c A |
1900 | // Libsystem knows about this symbol and exports it to libsyscall |
1901 | int | |
1902 | pthread_current_stack_contains_np(const void *addr, size_t length) | |
1903 | { | |
1904 | uintptr_t begin = (uintptr_t) addr, end; | |
1905 | uintptr_t stack_base = (uintptr_t) _pthread_self_direct()->stackbottom; | |
1906 | uintptr_t stack_top = (uintptr_t) _pthread_self_direct()->stackaddr; | |
1907 | ||
1908 | if (stack_base == stack_top) { | |
1909 | return -ENOTSUP; | |
1910 | } | |
1911 | ||
1912 | if (__builtin_add_overflow(begin, length, &end)) { | |
1913 | return -EINVAL; | |
1914 | } | |
1915 | ||
1916 | return stack_base <= begin && end <= stack_top; | |
1917 | } | |
1918 | ||
214d78a2 | 1919 | // Libsystem knows about this symbol and exports it to libsyscall |
c1f56ec9 | 1920 | |
f1a1da6c | 1921 | void |
c1f56ec9 | 1922 | _pthread_clear_qos_tsd(mach_port_t port) |
f1a1da6c | 1923 | { |
c1f56ec9 A |
1924 | pthread_priority_t pp = _pthread_unspecified_priority(); |
1925 | ||
1926 | if (port == MACH_PORT_NULL || _pthread_mach_thread_self_direct() == port) { | |
f1a1da6c | 1927 | /* Clear the current thread's TSD, that can be done inline. */ |
c1f56ec9 | 1928 | _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, pp); |
f1a1da6c A |
1929 | } else { |
1930 | pthread_t p; | |
1931 | ||
c1f56ec9 | 1932 | _pthread_lock_lock(&_pthread_list_lock); |
f1a1da6c | 1933 | |
214d78a2 | 1934 | TAILQ_FOREACH(p, &__pthread_head, tl_plist) { |
c1f56ec9 A |
1935 | mach_port_t kp = _pthread_tsd_slot(p, MACH_THREAD_SELF); |
1936 | if (port == kp) { | |
1937 | _pthread_tsd_slot(p, PTHREAD_QOS_CLASS) = pp; | |
f1a1da6c A |
1938 | break; |
1939 | } | |
1940 | } | |
1941 | ||
c1f56ec9 | 1942 | _pthread_lock_unlock(&_pthread_list_lock); |
f1a1da6c A |
1943 | } |
1944 | } | |
1945 | ||
214d78a2 A |
1946 | #pragma mark pthread/stack_np.h public interface |
1947 | ||
214d78a2 | 1948 | #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__arm64__) |
c6e5f90c A |
1949 | #if __ARM64_ARCH_8_32__ |
1950 | /* | |
1951 | * arm64_32 uses 64-bit sizes for the frame pointer and | |
1952 | * return address of a stack frame. | |
1953 | */ | |
1954 | typedef uint64_t frame_data_addr_t; | |
1955 | #else | |
214d78a2 | 1956 | typedef uintptr_t frame_data_addr_t; |
c6e5f90c | 1957 | #endif |
214d78a2 A |
1958 | |
1959 | struct frame_data { | |
1960 | frame_data_addr_t frame_addr_next; | |
1961 | frame_data_addr_t ret_addr; | |
1962 | }; | |
1963 | #else | |
1964 | #error ********** Unimplemented architecture | |
1965 | #endif | |
1966 | ||
1967 | uintptr_t | |
1968 | pthread_stack_frame_decode_np(uintptr_t frame_addr, uintptr_t *return_addr) | |
1969 | { | |
1970 | struct frame_data *frame = (struct frame_data *)frame_addr; | |
1971 | ||
1972 | if (return_addr) { | |
c6e5f90c A |
1973 | #if __has_feature(ptrauth_calls) |
1974 | *return_addr = (uintptr_t)ptrauth_strip((void *)frame->ret_addr, | |
1975 | ptrauth_key_return_address); | |
1976 | #else | |
214d78a2 | 1977 | *return_addr = (uintptr_t)frame->ret_addr; |
c6e5f90c | 1978 | #endif /* __has_feature(ptrauth_calls) */ |
214d78a2 A |
1979 | } |
1980 | ||
c6e5f90c A |
1981 | #if __has_feature(ptrauth_calls) |
1982 | return (uintptr_t)ptrauth_strip((void *)frame->frame_addr_next, | |
1983 | ptrauth_key_frame_pointer); | |
1984 | #endif /* __has_feature(ptrauth_calls) */ | |
214d78a2 A |
1985 | return (uintptr_t)frame->frame_addr_next; |
1986 | } | |
1987 | ||
214d78a2 A |
1988 | #pragma mark pthread workqueue support routines |
1989 | ||
c1f56ec9 | 1990 | void |
76b7b9a2 | 1991 | _pthread_bsdthread_init(struct _pthread_registration_data *data) |
f1a1da6c | 1992 | { |
76b7b9a2 A |
1993 | bzero(data, sizeof(*data)); |
1994 | data->version = sizeof(struct _pthread_registration_data); | |
1995 | data->dispatch_queue_offset = __PTK_LIBDISPATCH_KEY0 * sizeof(void *); | |
1996 | data->return_to_kernel_offset = __TSD_RETURN_TO_KERNEL * sizeof(void *); | |
c1f56ec9 | 1997 | data->tsd_offset = offsetof(struct pthread_s, tsd); |
76b7b9a2 | 1998 | data->mach_thread_self_offset = __TSD_MACH_THREAD_SELF * sizeof(void *); |
c1f56ec9 | 1999 | data->joinable_offset_bits = CHAR_BIT * (offsetof(struct pthread_s, tl_policy) + 1); |
f1a1da6c | 2000 | |
214d78a2 A |
2001 | int rv = __bsdthread_register(thread_start, start_wqthread, (int)PTHREAD_SIZE, |
2002 | (void*)data, (uintptr_t)sizeof(*data), data->dispatch_queue_offset); | |
f1a1da6c A |
2003 | |
2004 | if (rv > 0) { | |
214d78a2 A |
2005 | int required_features = |
2006 | PTHREAD_FEATURE_FINEPRIO | | |
2007 | PTHREAD_FEATURE_BSDTHREADCTL | | |
2008 | PTHREAD_FEATURE_SETSELF | | |
2009 | PTHREAD_FEATURE_QOS_MAINTENANCE | | |
2010 | PTHREAD_FEATURE_QOS_DEFAULT; | |
2011 | if ((rv & required_features) != required_features) { | |
2012 | PTHREAD_INTERNAL_CRASH(rv, "Missing required kernel support"); | |
a0619f9c | 2013 | } |
f1a1da6c A |
2014 | __pthread_supported_features = rv; |
2015 | } | |
2016 | ||
76b7b9a2 A |
2017 | /* |
2018 | * TODO: differentiate between (-1, EINVAL) after fork (which has the side | |
2019 | * effect of resetting the child's stack_addr_hint before bailing out) and | |
2020 | * (-1, EINVAL) because of invalid arguments. We'd probably like to treat | |
2021 | * the latter as fatal. | |
2022 | * | |
2023 | * <rdar://problem/36451838> | |
2024 | */ | |
2025 | ||
2026 | pthread_priority_t main_qos = (pthread_priority_t)data->main_qos; | |
2546420a | 2027 | |
214d78a2 | 2028 | if (_pthread_priority_thread_qos(main_qos) != THREAD_QOS_UNSPECIFIED) { |
2546420a | 2029 | _pthread_set_main_qos(main_qos); |
c1f56ec9 | 2030 | _pthread_tsd_slot(main_thread(), PTHREAD_QOS_CLASS) = main_qos; |
214d78a2 A |
2031 | } |
2032 | ||
2033 | if (data->stack_addr_hint) { | |
2034 | __pthread_stack_hint = data->stack_addr_hint; | |
f1a1da6c A |
2035 | } |
2036 | ||
2037 | if (__libdispatch_workerfunction != NULL) { | |
2038 | // prepare the kernel for workq action | |
2039 | (void)__workq_open(); | |
2040 | } | |
2041 | } | |
2042 | ||
c1f56ec9 | 2043 | OS_NOINLINE |
214d78a2 A |
2044 | static void |
2045 | _pthread_wqthread_legacy_worker_wrap(pthread_priority_t pp) | |
2046 | { | |
2047 | /* Old thread priorities are inverted from where we have them in | |
2048 | * the new flexible priority scheme. The highest priority is zero, | |
2049 | * up to 2, with background at 3. | |
2050 | */ | |
2051 | pthread_workqueue_function_t func = (pthread_workqueue_function_t)__libdispatch_workerfunction; | |
2052 | bool overcommit = (pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); | |
2053 | int opts = overcommit ? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT : 0; | |
2054 | ||
2055 | switch (_pthread_priority_thread_qos(pp)) { | |
2056 | case THREAD_QOS_USER_INITIATED: | |
2057 | return (*func)(WORKQ_HIGH_PRIOQUEUE, opts, NULL); | |
2058 | case THREAD_QOS_LEGACY: | |
2059 | /* B&I builders can't pass a QOS_CLASS_DEFAULT thread to dispatch, for fear of the QoS being | |
2060 | * picked up by NSThread (et al) and transported around the system. So change the TSD to | |
2061 | * make this thread look like QOS_CLASS_USER_INITIATED even though it will still run as legacy. | |
2062 | */ | |
2063 | _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, | |
2064 | _pthread_priority_make_from_thread_qos(THREAD_QOS_USER_INITIATED, 0, 0)); | |
2065 | return (*func)(WORKQ_DEFAULT_PRIOQUEUE, opts, NULL); | |
2066 | case THREAD_QOS_UTILITY: | |
2067 | return (*func)(WORKQ_LOW_PRIOQUEUE, opts, NULL); | |
2068 | case THREAD_QOS_BACKGROUND: | |
2069 | return (*func)(WORKQ_BG_PRIOQUEUE, opts, NULL); | |
2070 | } | |
2071 | PTHREAD_INTERNAL_CRASH(pp, "Invalid pthread priority for the legacy interface"); | |
2072 | } | |
2073 | ||
c1f56ec9 | 2074 | OS_ALWAYS_INLINE |
214d78a2 A |
2075 | static inline pthread_priority_t |
2076 | _pthread_wqthread_priority(int flags) | |
f1a1da6c | 2077 | { |
214d78a2 A |
2078 | pthread_priority_t pp = 0; |
2079 | thread_qos_t qos; | |
2080 | ||
2081 | if (flags & WQ_FLAG_THREAD_KEVENT) { | |
2082 | pp |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; | |
2083 | } | |
2084 | if (flags & WQ_FLAG_THREAD_EVENT_MANAGER) { | |
2085 | return pp | _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; | |
2086 | } | |
f1a1da6c | 2087 | |
214d78a2 A |
2088 | if (flags & WQ_FLAG_THREAD_OVERCOMMIT) { |
2089 | pp |= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; | |
2090 | } | |
2091 | if (flags & WQ_FLAG_THREAD_PRIO_QOS) { | |
2092 | qos = (thread_qos_t)(flags & WQ_FLAG_THREAD_PRIO_MASK); | |
2093 | pp = _pthread_priority_make_from_thread_qos(qos, 0, pp); | |
2094 | } else if (flags & WQ_FLAG_THREAD_PRIO_SCHED) { | |
2095 | pp |= _PTHREAD_PRIORITY_SCHED_PRI_MASK; | |
2096 | pp |= (flags & WQ_FLAG_THREAD_PRIO_MASK); | |
2097 | } else { | |
2098 | PTHREAD_INTERNAL_CRASH(flags, "Missing priority"); | |
2099 | } | |
2100 | return pp; | |
2101 | } | |
964d3577 | 2102 | |
c1f56ec9 | 2103 | OS_NOINLINE |
214d78a2 A |
2104 | static void |
2105 | _pthread_wqthread_setup(pthread_t self, mach_port_t kport, void *stacklowaddr, | |
2106 | int flags) | |
2107 | { | |
2108 | void *stackaddr = self; | |
2109 | size_t stacksize = (uintptr_t)self - (uintptr_t)stacklowaddr; | |
f1a1da6c | 2110 | |
214d78a2 A |
2111 | _pthread_struct_init(self, &_pthread_attr_default, stackaddr, stacksize, |
2112 | PTHREAD_ALLOCADDR(stackaddr, stacksize), | |
2113 | PTHREAD_ALLOCSIZE(stackaddr, stacksize)); | |
f1a1da6c | 2114 | |
c1f56ec9 | 2115 | _pthread_tsd_slot(self, MACH_THREAD_SELF) = kport; |
214d78a2 A |
2116 | self->wqthread = 1; |
2117 | self->wqkillset = 0; | |
2118 | self->tl_joinable = false; | |
f1a1da6c | 2119 | |
214d78a2 | 2120 | // Update the running thread count and set childrun bit. |
c6e5f90c A |
2121 | if (os_unlikely((flags & WQ_FLAG_THREAD_TSD_BASE_SET) == 0)) { |
2122 | PTHREAD_INTERNAL_CRASH(flags, | |
2123 | "thread_set_tsd_base() wasn't called by the kernel"); | |
2124 | } | |
2125 | _pthread_set_self_internal(self); | |
c1f56ec9 | 2126 | __pthread_add_thread(self, kport, false); |
214d78a2 A |
2127 | __pthread_started_thread(self); |
2128 | } | |
2546420a | 2129 | |
c1f56ec9 | 2130 | OS_NORETURN OS_NOINLINE |
214d78a2 A |
2131 | static void |
2132 | _pthread_wqthread_exit(pthread_t self) | |
2133 | { | |
c1f56ec9 A |
2134 | const thread_qos_t WORKQ_THREAD_QOS_CLEANUP = THREAD_QOS_LEGACY; |
2135 | pthread_priority_t pp = _pthread_tsd_slot(self, PTHREAD_QOS_CLASS); | |
214d78a2 | 2136 | thread_qos_t qos; |
964d3577 | 2137 | |
214d78a2 A |
2138 | qos = _pthread_priority_thread_qos(pp); |
2139 | if (qos == THREAD_QOS_UNSPECIFIED || qos > WORKQ_THREAD_QOS_CLEANUP) { | |
2140 | // Reset QoS to something low for the cleanup process | |
2141 | pp = _pthread_priority_make_from_thread_qos(WORKQ_THREAD_QOS_CLEANUP, 0, 0); | |
c1f56ec9 | 2142 | _pthread_tsd_slot(self, PTHREAD_QOS_CLASS) = pp; |
214d78a2 | 2143 | } |
f1a1da6c | 2144 | |
214d78a2 A |
2145 | _pthread_exit(self, NULL); |
2146 | } | |
f1a1da6c | 2147 | |
214d78a2 A |
2148 | // workqueue entry point from kernel |
2149 | void | |
2150 | _pthread_wqthread(pthread_t self, mach_port_t kport, void *stacklowaddr, | |
2151 | void *keventlist, int flags, int nkevents) | |
2152 | { | |
2153 | if ((flags & WQ_FLAG_THREAD_REUSE) == 0) { | |
2154 | _pthread_wqthread_setup(self, kport, stacklowaddr, flags); | |
964d3577 | 2155 | } |
f1a1da6c | 2156 | |
214d78a2 | 2157 | pthread_priority_t pp; |
c6e5f90c | 2158 | |
214d78a2 | 2159 | if (flags & WQ_FLAG_THREAD_OUTSIDEQOS) { |
c6e5f90c | 2160 | self->wq_outsideqos = 1; |
214d78a2 A |
2161 | pp = _pthread_priority_make_from_thread_qos(THREAD_QOS_LEGACY, 0, |
2162 | _PTHREAD_PRIORITY_FALLBACK_FLAG); | |
2163 | } else { | |
c6e5f90c | 2164 | self->wq_outsideqos = 0; |
214d78a2 | 2165 | pp = _pthread_wqthread_priority(flags); |
f1a1da6c A |
2166 | } |
2167 | ||
214d78a2 | 2168 | self->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = (void *)pp; |
f1a1da6c | 2169 | |
214d78a2 | 2170 | // avoid spills on the stack hard to keep used stack space minimal |
c6e5f90c A |
2171 | if (os_unlikely(nkevents == WORKQ_EXIT_THREAD_NKEVENT)) { |
2172 | _pthread_wqthread_exit(self); | |
214d78a2 | 2173 | } else if (flags & WQ_FLAG_THREAD_WORKLOOP) { |
c6e5f90c | 2174 | kqueue_id_t *kqidptr = (kqueue_id_t *)keventlist - 1; |
a0619f9c | 2175 | self->fun = (void *(*)(void*))__libdispatch_workloopfunction; |
214d78a2 A |
2176 | self->arg = keventlist; |
2177 | self->wq_nevents = nkevents; | |
c6e5f90c A |
2178 | (*__libdispatch_workloopfunction)(kqidptr, &self->arg, &self->wq_nevents); |
2179 | __workq_kernreturn(WQOPS_THREAD_WORKLOOP_RETURN, self->arg, self->wq_nevents, 0); | |
214d78a2 | 2180 | } else if (flags & WQ_FLAG_THREAD_KEVENT) { |
964d3577 | 2181 | self->fun = (void *(*)(void*))__libdispatch_keventfunction; |
214d78a2 A |
2182 | self->arg = keventlist; |
2183 | self->wq_nevents = nkevents; | |
c6e5f90c A |
2184 | (*__libdispatch_keventfunction)(&self->arg, &self->wq_nevents); |
2185 | __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN, self->arg, self->wq_nevents, 0); | |
964d3577 | 2186 | } else { |
a0619f9c | 2187 | self->fun = (void *(*)(void*))__libdispatch_workerfunction; |
214d78a2 A |
2188 | self->arg = (void *)(uintptr_t)pp; |
2189 | self->wq_nevents = 0; | |
2190 | if (os_likely(__workq_newapi)) { | |
2191 | (*__libdispatch_workerfunction)(pp); | |
2192 | } else { | |
2193 | _pthread_wqthread_legacy_worker_wrap(pp); | |
2194 | } | |
c6e5f90c | 2195 | __workq_kernreturn(WQOPS_THREAD_RETURN, NULL, 0, 0); |
2546420a A |
2196 | } |
2197 | ||
c6e5f90c A |
2198 | _os_set_crash_log_cause_and_message(self->err_no, |
2199 | "BUG IN LIBPTHREAD: __workq_kernreturn returned"); | |
2200 | /* | |
2201 | * 52858993: we should never return but the compiler insists on outlining, | |
2202 | * so the __builtin_trap() is in _start_wqthread in pthread_asm.s | |
2203 | */ | |
f1a1da6c A |
2204 | } |
2205 | ||
214d78a2 A |
2206 | #pragma mark pthread workqueue API for libdispatch |
2207 | ||
a0619f9c A |
2208 | _Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN == WQ_KEVENT_LIST_LEN, |
2209 | "Kernel and userland should agree on the event list size"); | |
2210 | ||
f1a1da6c A |
2211 | void |
2212 | pthread_workqueue_setdispatchoffset_np(int offset) | |
2213 | { | |
c6e5f90c | 2214 | __workq_kernreturn(WQOPS_QUEUE_NEWSPISUPP, NULL, offset, 0x00); |
f1a1da6c A |
2215 | } |
2216 | ||
c6e5f90c A |
2217 | int |
2218 | pthread_workqueue_setup(struct pthread_workqueue_config *cfg, size_t cfg_size) | |
f1a1da6c | 2219 | { |
c6e5f90c A |
2220 | int rv = EBUSY; |
2221 | struct workq_dispatch_config wdc_cfg; | |
2222 | size_t min_size = 0; | |
2223 | ||
2224 | if (cfg_size < sizeof(uint32_t)) { | |
2225 | return EINVAL; | |
2226 | } | |
2227 | ||
2228 | switch (cfg->version) { | |
c1f56ec9 A |
2229 | case 1: |
2230 | min_size = offsetof(struct pthread_workqueue_config, queue_label_offs); | |
2231 | break; | |
2232 | case 2: | |
2233 | min_size = sizeof(struct pthread_workqueue_config); | |
2234 | break; | |
2235 | default: | |
2236 | return EINVAL; | |
2237 | } | |
c6e5f90c A |
2238 | |
2239 | if (!cfg || cfg_size < min_size) { | |
2240 | return EINVAL; | |
2241 | } | |
2242 | ||
2243 | if (cfg->flags & ~PTHREAD_WORKQUEUE_CONFIG_SUPPORTED_FLAGS || | |
2244 | cfg->version < PTHREAD_WORKQUEUE_CONFIG_MIN_SUPPORTED_VERSION) { | |
2245 | return ENOTSUP; | |
2246 | } | |
2247 | ||
f1a1da6c | 2248 | if (__libdispatch_workerfunction == NULL) { |
c6e5f90c A |
2249 | __workq_newapi = true; |
2250 | ||
2251 | wdc_cfg.wdc_version = WORKQ_DISPATCH_CONFIG_VERSION; | |
2252 | wdc_cfg.wdc_flags = 0; | |
2253 | wdc_cfg.wdc_queue_serialno_offs = cfg->queue_serialno_offs; | |
2254 | #if WORKQ_DISPATCH_CONFIG_VERSION >= 2 | |
2255 | wdc_cfg.wdc_queue_label_offs = cfg->queue_label_offs; | |
2256 | #endif | |
2257 | ||
2258 | // Tell the kernel about dispatch internals | |
2259 | rv = (int) __workq_kernreturn(WQOPS_SETUP_DISPATCH, &wdc_cfg, sizeof(wdc_cfg), 0); | |
2260 | if (rv == -1) { | |
2261 | return errno; | |
f1a1da6c | 2262 | } else { |
c6e5f90c A |
2263 | __libdispatch_keventfunction = cfg->kevent_cb; |
2264 | __libdispatch_workloopfunction = cfg->workloop_cb; | |
2265 | __libdispatch_workerfunction = cfg->workq_cb; | |
f1a1da6c A |
2266 | |
2267 | // Prepare the kernel for workq action | |
2268 | (void)__workq_open(); | |
2269 | if (__is_threaded == 0) { | |
2270 | __is_threaded = 1; | |
2271 | } | |
c6e5f90c A |
2272 | |
2273 | return 0; | |
f1a1da6c A |
2274 | } |
2275 | } | |
c6e5f90c A |
2276 | |
2277 | return rv; | |
f1a1da6c A |
2278 | } |
2279 | ||
964d3577 | 2280 | int |
a0619f9c A |
2281 | _pthread_workqueue_init_with_workloop(pthread_workqueue_function2_t queue_func, |
2282 | pthread_workqueue_function_kevent_t kevent_func, | |
2283 | pthread_workqueue_function_workloop_t workloop_func, | |
2284 | int offset, int flags) | |
964d3577 | 2285 | { |
c6e5f90c A |
2286 | struct pthread_workqueue_config cfg = { |
2287 | .version = PTHREAD_WORKQUEUE_CONFIG_VERSION, | |
2288 | .flags = 0, | |
2289 | .workq_cb = queue_func, | |
2290 | .kevent_cb = kevent_func, | |
2291 | .workloop_cb = workloop_func, | |
2292 | .queue_serialno_offs = offset, | |
2293 | .queue_label_offs = 0, | |
2294 | }; | |
a0619f9c | 2295 | |
c6e5f90c | 2296 | return pthread_workqueue_setup(&cfg, sizeof(cfg)); |
964d3577 A |
2297 | } |
2298 | ||
a0619f9c A |
2299 | int |
2300 | _pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func, | |
2301 | pthread_workqueue_function_kevent_t kevent_func, | |
2302 | int offset, int flags) | |
2303 | { | |
2304 | return _pthread_workqueue_init_with_workloop(queue_func, kevent_func, NULL, offset, flags); | |
2305 | } | |
2306 | ||
964d3577 A |
2307 | int |
2308 | _pthread_workqueue_init(pthread_workqueue_function2_t func, int offset, int flags) | |
2309 | { | |
2310 | return _pthread_workqueue_init_with_kevent(func, NULL, offset, flags); | |
2311 | } | |
2312 | ||
2313 | int | |
2314 | pthread_workqueue_setdispatch_np(pthread_workqueue_function_t worker_func) | |
2315 | { | |
c6e5f90c A |
2316 | struct pthread_workqueue_config cfg = { |
2317 | .version = PTHREAD_WORKQUEUE_CONFIG_VERSION, | |
2318 | .flags = 0, | |
2319 | .workq_cb = (uint64_t)(pthread_workqueue_function2_t)worker_func, | |
2320 | .kevent_cb = 0, | |
2321 | .workloop_cb = 0, | |
2322 | .queue_serialno_offs = 0, | |
2323 | .queue_label_offs = 0, | |
2324 | }; | |
2325 | ||
2326 | return pthread_workqueue_setup(&cfg, sizeof(cfg)); | |
964d3577 A |
2327 | } |
2328 | ||
f1a1da6c A |
2329 | int |
2330 | _pthread_workqueue_supported(void) | |
2331 | { | |
a0619f9c A |
2332 | if (os_unlikely(!__pthread_supported_features)) { |
2333 | PTHREAD_INTERNAL_CRASH(0, "libpthread has not been initialized"); | |
2334 | } | |
2335 | ||
f1a1da6c A |
2336 | return __pthread_supported_features; |
2337 | } | |
2338 | ||
2339 | int | |
2340 | pthread_workqueue_addthreads_np(int queue_priority, int options, int numthreads) | |
2341 | { | |
2342 | int res = 0; | |
2343 | ||
2344 | // Cannot add threads without a worker function registered. | |
2345 | if (__libdispatch_workerfunction == NULL) { | |
2346 | return EPERM; | |
2347 | } | |
2348 | ||
2349 | pthread_priority_t kp = 0; | |
214d78a2 A |
2350 | int compat_priority = queue_priority & WQ_FLAG_THREAD_PRIO_MASK; |
2351 | int flags = 0; | |
f1a1da6c | 2352 | |
214d78a2 A |
2353 | if (options & WORKQ_ADDTHREADS_OPTION_OVERCOMMIT) { |
2354 | flags = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; | |
2355 | } | |
f1a1da6c | 2356 | |
a0619f9c A |
2357 | #pragma clang diagnostic push |
2358 | #pragma clang diagnostic ignored "-Wdeprecated-declarations" | |
214d78a2 | 2359 | kp = _pthread_qos_class_encode_workqueue(compat_priority, flags); |
a0619f9c | 2360 | #pragma clang diagnostic pop |
f1a1da6c | 2361 | |
f1a1da6c A |
2362 | res = __workq_kernreturn(WQOPS_QUEUE_REQTHREADS, NULL, numthreads, (int)kp); |
2363 | if (res == -1) { | |
2364 | res = errno; | |
2365 | } | |
2366 | return res; | |
2367 | } | |
2368 | ||
a0619f9c A |
2369 | bool |
2370 | _pthread_workqueue_should_narrow(pthread_priority_t pri) | |
2371 | { | |
2372 | int res = __workq_kernreturn(WQOPS_SHOULD_NARROW, NULL, (int)pri, 0); | |
2373 | if (res == -1) { | |
2374 | return false; | |
2375 | } | |
2376 | return res; | |
2377 | } | |
2378 | ||
f1a1da6c A |
2379 | int |
2380 | _pthread_workqueue_addthreads(int numthreads, pthread_priority_t priority) | |
2381 | { | |
2382 | int res = 0; | |
2383 | ||
2384 | if (__libdispatch_workerfunction == NULL) { | |
2385 | return EPERM; | |
2386 | } | |
2387 | ||
214d78a2 A |
2388 | #if TARGET_OS_OSX |
2389 | // <rdar://problem/37687655> Legacy simulators fail to boot | |
2390 | // | |
2391 | // Older sims set the deprecated _PTHREAD_PRIORITY_ROOTQUEUE_FLAG wrongly, | |
2392 | // which is aliased to _PTHREAD_PRIORITY_SCHED_PRI_FLAG and that XNU | |
2393 | // validates and rejects. | |
2394 | // | |
2395 | // As a workaround, forcefully unset this bit that cannot be set here | |
2396 | // anyway. | |
2397 | priority &= ~_PTHREAD_PRIORITY_SCHED_PRI_FLAG; | |
2398 | #endif | |
f1a1da6c A |
2399 | |
2400 | res = __workq_kernreturn(WQOPS_QUEUE_REQTHREADS, NULL, numthreads, (int)priority); | |
2401 | if (res == -1) { | |
2402 | res = errno; | |
2403 | } | |
2404 | return res; | |
2405 | } | |
2406 | ||
964d3577 A |
2407 | int |
2408 | _pthread_workqueue_set_event_manager_priority(pthread_priority_t priority) | |
2409 | { | |
2410 | int res = __workq_kernreturn(WQOPS_SET_EVENT_MANAGER_PRIORITY, NULL, (int)priority, 0); | |
2411 | if (res == -1) { | |
2412 | res = errno; | |
2413 | } | |
2414 | return res; | |
2415 | } | |
2416 | ||
214d78a2 A |
2417 | int |
2418 | _pthread_workloop_create(uint64_t workloop_id, uint64_t options, pthread_attr_t *attr) | |
2419 | { | |
2420 | struct kqueue_workloop_params params = { | |
2421 | .kqwlp_version = sizeof(struct kqueue_workloop_params), | |
2422 | .kqwlp_id = workloop_id, | |
2423 | .kqwlp_flags = 0, | |
2424 | }; | |
2425 | ||
2426 | if (!attr) { | |
2427 | return EINVAL; | |
2428 | } | |
2429 | ||
2430 | if (attr->schedset) { | |
2431 | params.kqwlp_flags |= KQ_WORKLOOP_CREATE_SCHED_PRI; | |
2432 | params.kqwlp_sched_pri = attr->param.sched_priority; | |
2433 | } | |
2434 | ||
2435 | if (attr->policyset) { | |
2436 | params.kqwlp_flags |= KQ_WORKLOOP_CREATE_SCHED_POL; | |
2437 | params.kqwlp_sched_pol = attr->policy; | |
2438 | } | |
2439 | ||
2440 | if (attr->cpupercentset) { | |
2441 | params.kqwlp_flags |= KQ_WORKLOOP_CREATE_CPU_PERCENT; | |
2442 | params.kqwlp_cpu_percent = attr->cpupercent; | |
2443 | params.kqwlp_cpu_refillms = attr->refillms; | |
2444 | } | |
2445 | ||
2446 | int res = __kqueue_workloop_ctl(KQ_WORKLOOP_CREATE, 0, ¶ms, | |
2447 | sizeof(params)); | |
2448 | if (res == -1) { | |
2449 | res = errno; | |
2450 | } | |
2451 | return res; | |
2452 | } | |
2453 | ||
2454 | int | |
2455 | _pthread_workloop_destroy(uint64_t workloop_id) | |
2456 | { | |
2457 | struct kqueue_workloop_params params = { | |
2458 | .kqwlp_version = sizeof(struct kqueue_workloop_params), | |
2459 | .kqwlp_id = workloop_id, | |
2460 | }; | |
2461 | ||
2462 | int res = __kqueue_workloop_ctl(KQ_WORKLOOP_DESTROY, 0, ¶ms, | |
2463 | sizeof(params)); | |
2464 | if (res == -1) { | |
2465 | res = errno; | |
2466 | } | |
2467 | return res; | |
2468 | } | |
2469 | ||
214d78a2 A |
2470 | #pragma mark Introspection SPI for libpthread. |
2471 | ||
f1a1da6c A |
2472 | static pthread_introspection_hook_t _pthread_introspection_hook; |
2473 | ||
2474 | pthread_introspection_hook_t | |
2475 | pthread_introspection_hook_install(pthread_introspection_hook_t hook) | |
2476 | { | |
f1a1da6c | 2477 | pthread_introspection_hook_t prev; |
a0619f9c | 2478 | prev = _pthread_atomic_xchg_ptr((void**)&_pthread_introspection_hook, hook); |
f1a1da6c A |
2479 | return prev; |
2480 | } | |
2481 | ||
c1f56ec9 A |
2482 | static inline void |
2483 | _pthread_introspection_call_hook(unsigned int event, | |
2484 | pthread_t thread, void *addr, size_t size) | |
2485 | { | |
2486 | pthread_t self = pthread_self(); | |
2487 | uint16_t old = self->introspection; | |
2488 | self->introspection = (uint16_t)event; | |
2489 | _pthread_introspection_hook(event, thread, addr, size); | |
2490 | self->introspection = old; | |
2491 | } | |
2492 | ||
2493 | OS_NOINLINE | |
f1a1da6c | 2494 | static void |
214d78a2 | 2495 | _pthread_introspection_hook_callout_thread_create(pthread_t t) |
f1a1da6c | 2496 | { |
c1f56ec9 | 2497 | _pthread_introspection_call_hook(PTHREAD_INTROSPECTION_THREAD_CREATE, t, t, |
2546420a | 2498 | PTHREAD_SIZE); |
f1a1da6c A |
2499 | } |
2500 | ||
2501 | static inline void | |
214d78a2 | 2502 | _pthread_introspection_thread_create(pthread_t t) |
f1a1da6c A |
2503 | { |
2504 | if (os_fastpath(!_pthread_introspection_hook)) return; | |
214d78a2 | 2505 | _pthread_introspection_hook_callout_thread_create(t); |
f1a1da6c A |
2506 | } |
2507 | ||
c1f56ec9 | 2508 | OS_NOINLINE |
f1a1da6c A |
2509 | static void |
2510 | _pthread_introspection_hook_callout_thread_start(pthread_t t) | |
2511 | { | |
2512 | size_t freesize; | |
2513 | void *freeaddr; | |
214d78a2 A |
2514 | if (t == main_thread()) { |
2515 | size_t stacksize = t->stackaddr - t->stackbottom; | |
2516 | freesize = stacksize + t->guardsize; | |
f1a1da6c A |
2517 | freeaddr = t->stackaddr - freesize; |
2518 | } else { | |
2546420a | 2519 | freesize = t->freesize - PTHREAD_SIZE; |
f1a1da6c A |
2520 | freeaddr = t->freeaddr; |
2521 | } | |
c1f56ec9 | 2522 | _pthread_introspection_call_hook(PTHREAD_INTROSPECTION_THREAD_START, t, |
f1a1da6c A |
2523 | freeaddr, freesize); |
2524 | } | |
2525 | ||
2526 | static inline void | |
2527 | _pthread_introspection_thread_start(pthread_t t) | |
2528 | { | |
2529 | if (os_fastpath(!_pthread_introspection_hook)) return; | |
2530 | _pthread_introspection_hook_callout_thread_start(t); | |
2531 | } | |
2532 | ||
c1f56ec9 | 2533 | OS_NOINLINE |
f1a1da6c | 2534 | static void |
214d78a2 | 2535 | _pthread_introspection_hook_callout_thread_terminate(pthread_t t) |
f1a1da6c | 2536 | { |
214d78a2 A |
2537 | size_t freesize; |
2538 | void *freeaddr; | |
2539 | if (t == main_thread()) { | |
2540 | size_t stacksize = t->stackaddr - t->stackbottom; | |
2541 | freesize = stacksize + t->guardsize; | |
2542 | freeaddr = t->stackaddr - freesize; | |
2543 | } else { | |
2544 | freesize = t->freesize - PTHREAD_SIZE; | |
2545 | freeaddr = t->freeaddr; | |
f1a1da6c | 2546 | } |
c1f56ec9 | 2547 | _pthread_introspection_call_hook(PTHREAD_INTROSPECTION_THREAD_TERMINATE, t, |
f1a1da6c | 2548 | freeaddr, freesize); |
f1a1da6c A |
2549 | } |
2550 | ||
2551 | static inline void | |
214d78a2 | 2552 | _pthread_introspection_thread_terminate(pthread_t t) |
f1a1da6c A |
2553 | { |
2554 | if (os_fastpath(!_pthread_introspection_hook)) return; | |
214d78a2 | 2555 | _pthread_introspection_hook_callout_thread_terminate(t); |
f1a1da6c A |
2556 | } |
2557 | ||
c1f56ec9 | 2558 | OS_NOINLINE |
f1a1da6c A |
2559 | static void |
2560 | _pthread_introspection_hook_callout_thread_destroy(pthread_t t) | |
2561 | { | |
c1f56ec9 | 2562 | _pthread_introspection_call_hook(PTHREAD_INTROSPECTION_THREAD_DESTROY, t, t, |
2546420a | 2563 | PTHREAD_SIZE); |
f1a1da6c A |
2564 | } |
2565 | ||
2566 | static inline void | |
2567 | _pthread_introspection_thread_destroy(pthread_t t) | |
2568 | { | |
2569 | if (os_fastpath(!_pthread_introspection_hook)) return; | |
2570 | _pthread_introspection_hook_callout_thread_destroy(t); | |
2571 | } | |
2572 | ||
214d78a2 | 2573 | #pragma mark libplatform shims |
c1f56ec9 | 2574 | #if !VARIANT_DYLD |
214d78a2 A |
2575 | |
2576 | #include <platform/string.h> | |
2577 | ||
2578 | // pthread_setup initializes large structures to 0, | |
2579 | // which the compiler turns into a library call to memset. | |
2580 | // | |
2581 | // To avoid linking against Libc, provide a simple wrapper | |
2582 | // that calls through to the libplatform primitives | |
2583 | ||
2584 | #undef memset | |
2585 | PTHREAD_NOEXPORT | |
2586 | void * | |
2587 | memset(void *b, int c, size_t len) | |
2588 | { | |
2589 | return _platform_memset(b, c, len); | |
2590 | } | |
2591 | ||
2592 | #undef bzero | |
2593 | PTHREAD_NOEXPORT | |
2594 | void | |
2595 | bzero(void *s, size_t n) | |
2596 | { | |
2597 | _platform_bzero(s, n); | |
2598 | } | |
2599 | ||
2600 | #undef memcpy | |
2601 | PTHREAD_NOEXPORT | |
2602 | void * | |
2603 | memcpy(void* a, const void* b, unsigned long s) | |
2604 | { | |
2605 | return _platform_memmove(a, b, s); | |
2606 | } | |
2607 | ||
c6e5f90c | 2608 | #endif // !VARIANT_DYLD |