]>
Commit | Line | Data |
---|---|---|
f1a1da6c A |
1 | /* |
2 | * Copyright (c) 2013 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. Please obtain a copy of the License at | |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
11 | * file. | |
12 | * | |
13 | * The Original Code and all software distributed under the License are | |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
18 | * Please see the License for the specific language governing rights and | |
19 | * limitations under the License. | |
20 | * | |
21 | * @APPLE_LICENSE_HEADER_END@ | |
22 | */ | |
23 | ||
24 | #include "internal.h" | |
25 | ||
26 | #include <_simple.h> | |
27 | #include <mach/mach_vm.h> | |
28 | #include <unistd.h> | |
29 | #include <spawn.h> | |
30 | #include <spawn_private.h> | |
31 | #include <sys/spawn_internal.h> | |
2546420a | 32 | #include <sys/ulock.h> |
f1a1da6c A |
33 | |
34 | // TODO: remove me when internal.h can include *_private.h itself | |
35 | #include "workqueue_private.h" | |
36 | #include "qos_private.h" | |
37 | ||
38 | static pthread_priority_t _main_qos = QOS_CLASS_UNSPECIFIED; | |
39 | ||
40 | #define PTHREAD_OVERRIDE_SIGNATURE (0x6f766572) | |
41 | #define PTHREAD_OVERRIDE_SIG_DEAD (0x7265766f) | |
42 | ||
43 | struct pthread_override_s | |
44 | { | |
45 | uint32_t sig; | |
f1a1da6c | 46 | mach_port_t kthread; |
964d3577 | 47 | pthread_t pthread; |
f1a1da6c A |
48 | pthread_priority_t priority; |
49 | bool malloced; | |
50 | }; | |
51 | ||
52 | void | |
53 | _pthread_set_main_qos(pthread_priority_t qos) | |
54 | { | |
55 | _main_qos = qos; | |
56 | } | |
57 | ||
58 | int | |
59 | pthread_attr_set_qos_class_np(pthread_attr_t *__attr, | |
60 | qos_class_t __qos_class, | |
61 | int __relative_priority) | |
62 | { | |
63 | if (!(__pthread_supported_features & PTHREAD_FEATURE_BSDTHREADCTL)) { | |
64 | return ENOTSUP; | |
65 | } | |
66 | ||
67 | if (__relative_priority > 0 || __relative_priority < QOS_MIN_RELATIVE_PRIORITY) { | |
68 | return EINVAL; | |
69 | } | |
70 | ||
71 | int ret = EINVAL; | |
72 | if (__attr->sig == _PTHREAD_ATTR_SIG) { | |
73 | if (!__attr->schedset) { | |
74 | __attr->qosclass = _pthread_priority_make_newest(__qos_class, __relative_priority, 0); | |
75 | __attr->qosset = 1; | |
76 | ret = 0; | |
77 | } | |
78 | } | |
79 | ||
80 | return ret; | |
81 | } | |
82 | ||
83 | int | |
84 | pthread_attr_get_qos_class_np(pthread_attr_t * __restrict __attr, | |
85 | qos_class_t * __restrict __qos_class, | |
86 | int * __restrict __relative_priority) | |
87 | { | |
88 | if (!(__pthread_supported_features & PTHREAD_FEATURE_BSDTHREADCTL)) { | |
89 | return ENOTSUP; | |
90 | } | |
91 | ||
92 | int ret = EINVAL; | |
93 | if (__attr->sig == _PTHREAD_ATTR_SIG) { | |
94 | if (__attr->qosset) { | |
95 | qos_class_t qos; int relpri; | |
96 | _pthread_priority_split_newest(__attr->qosclass, qos, relpri); | |
97 | ||
98 | if (__qos_class) { *__qos_class = qos; } | |
99 | if (__relative_priority) { *__relative_priority = relpri; } | |
100 | } else { | |
101 | if (__qos_class) { *__qos_class = 0; } | |
102 | if (__relative_priority) { *__relative_priority = 0; } | |
103 | } | |
104 | ret = 0; | |
105 | } | |
106 | ||
107 | return ret; | |
108 | } | |
109 | ||
110 | int | |
111 | pthread_set_qos_class_self_np(qos_class_t __qos_class, | |
112 | int __relative_priority) | |
113 | { | |
114 | if (!(__pthread_supported_features & PTHREAD_FEATURE_BSDTHREADCTL)) { | |
115 | return ENOTSUP; | |
116 | } | |
117 | ||
118 | if (__relative_priority > 0 || __relative_priority < QOS_MIN_RELATIVE_PRIORITY) { | |
119 | return EINVAL; | |
120 | } | |
121 | ||
122 | pthread_priority_t priority = _pthread_priority_make_newest(__qos_class, __relative_priority, 0); | |
123 | ||
124 | if (__pthread_supported_features & PTHREAD_FEATURE_SETSELF) { | |
125 | return _pthread_set_properties_self(_PTHREAD_SET_SELF_QOS_FLAG, priority, 0); | |
126 | } else { | |
127 | /* We set the thread QoS class in the TSD and then call into the kernel to | |
128 | * read the value out of it and set the QoS class. | |
129 | */ | |
130 | _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, priority); | |
a0619f9c | 131 | mach_port_t kport = _pthread_kernel_thread(pthread_self()); |
f1a1da6c A |
132 | int res = __bsdthread_ctl(BSDTHREAD_CTL_SET_QOS, kport, &pthread_self()->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS], 0); |
133 | ||
134 | if (res == -1) { | |
135 | res = errno; | |
136 | } | |
137 | ||
138 | return res; | |
139 | } | |
140 | } | |
141 | ||
142 | int | |
143 | pthread_set_qos_class_np(pthread_t __pthread, | |
144 | qos_class_t __qos_class, | |
145 | int __relative_priority) | |
146 | { | |
f1a1da6c A |
147 | if (__pthread != pthread_self()) { |
148 | /* The kext now enforces this anyway, if we check here too, it allows us to call | |
149 | * _pthread_set_properties_self later if we can. | |
150 | */ | |
151 | return EPERM; | |
152 | } | |
153 | ||
a0619f9c | 154 | return pthread_set_qos_class_self_np(__qos_class, __relative_priority); |
f1a1da6c A |
155 | } |
156 | ||
157 | int | |
158 | pthread_get_qos_class_np(pthread_t __pthread, | |
159 | qos_class_t * __restrict __qos_class, | |
160 | int * __restrict __relative_priority) | |
161 | { | |
162 | if (!(__pthread_supported_features & PTHREAD_FEATURE_BSDTHREADCTL)) { | |
163 | return ENOTSUP; | |
164 | } | |
165 | ||
166 | pthread_priority_t priority; | |
167 | ||
168 | if (__pthread == pthread_self()) { | |
169 | priority = _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS); | |
170 | } else { | |
171 | priority = __pthread->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS]; | |
172 | } | |
173 | ||
174 | qos_class_t qos; int relpri; | |
175 | _pthread_priority_split_newest(priority, qos, relpri); | |
176 | ||
177 | if (__qos_class) { *__qos_class = qos; } | |
178 | if (__relative_priority) { *__relative_priority = relpri; } | |
179 | ||
180 | return 0; | |
181 | } | |
182 | ||
183 | qos_class_t | |
184 | qos_class_self(void) | |
185 | { | |
186 | if (!(__pthread_supported_features & PTHREAD_FEATURE_BSDTHREADCTL)) { | |
187 | return QOS_CLASS_UNSPECIFIED; | |
188 | } | |
189 | ||
190 | pthread_priority_t p = _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS); | |
191 | qos_class_t c = _pthread_priority_get_qos_newest(p); | |
192 | ||
193 | return c; | |
194 | } | |
195 | ||
196 | qos_class_t | |
197 | qos_class_main(void) | |
198 | { | |
199 | return _pthread_priority_get_qos_newest(_main_qos); | |
200 | } | |
201 | ||
202 | pthread_priority_t | |
203 | _pthread_qos_class_encode(qos_class_t qos_class, int relative_priority, unsigned long flags) | |
204 | { | |
a0619f9c | 205 | return _pthread_priority_make_newest(qos_class, relative_priority, flags); |
f1a1da6c A |
206 | } |
207 | ||
208 | qos_class_t | |
209 | _pthread_qos_class_decode(pthread_priority_t priority, int *relative_priority, unsigned long *flags) | |
210 | { | |
211 | qos_class_t qos; int relpri; | |
212 | ||
a0619f9c | 213 | _pthread_priority_split_newest(priority, qos, relpri); |
f1a1da6c A |
214 | |
215 | if (relative_priority) { *relative_priority = relpri; } | |
216 | if (flags) { *flags = _pthread_priority_get_flags(priority); } | |
217 | return qos; | |
218 | } | |
219 | ||
a0619f9c A |
220 | // Encode a legacy workqueue API priority into a pthread_priority_t. This API |
221 | // is deprecated and can be removed when the simulator no longer uses it. | |
f1a1da6c A |
222 | pthread_priority_t |
223 | _pthread_qos_class_encode_workqueue(int queue_priority, unsigned long flags) | |
224 | { | |
f1a1da6c | 225 | switch (queue_priority) { |
a0619f9c A |
226 | case WORKQ_HIGH_PRIOQUEUE: |
227 | return _pthread_priority_make_newest(QOS_CLASS_USER_INITIATED, 0, flags); | |
228 | case WORKQ_DEFAULT_PRIOQUEUE: | |
229 | return _pthread_priority_make_newest(QOS_CLASS_DEFAULT, 0, flags); | |
230 | case WORKQ_LOW_PRIOQUEUE: | |
231 | case WORKQ_NON_INTERACTIVE_PRIOQUEUE: | |
232 | return _pthread_priority_make_newest(QOS_CLASS_UTILITY, 0, flags); | |
233 | case WORKQ_BG_PRIOQUEUE: | |
234 | return _pthread_priority_make_newest(QOS_CLASS_BACKGROUND, 0, flags); | |
235 | /* Legacy dispatch does not use QOS_CLASS_MAINTENANCE, so no need to handle it here */ | |
236 | default: | |
237 | __pthread_abort(); | |
f1a1da6c A |
238 | } |
239 | } | |
240 | ||
241 | int | |
242 | _pthread_set_properties_self(_pthread_set_flags_t flags, pthread_priority_t priority, mach_port_t voucher) | |
243 | { | |
244 | if (!(__pthread_supported_features & PTHREAD_FEATURE_SETSELF)) { | |
245 | return ENOTSUP; | |
246 | } | |
247 | ||
248 | int rv = __bsdthread_ctl(BSDTHREAD_CTL_SET_SELF, priority, voucher, flags); | |
249 | ||
250 | /* Set QoS TSD if we succeeded or only failed the voucher half. */ | |
251 | if ((flags & _PTHREAD_SET_SELF_QOS_FLAG) != 0) { | |
252 | if (rv == 0 || errno == ENOENT) { | |
253 | _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, priority); | |
254 | } | |
255 | } | |
256 | ||
257 | if (rv) { | |
258 | rv = errno; | |
259 | } | |
260 | return rv; | |
261 | } | |
262 | ||
263 | int | |
264 | pthread_set_fixedpriority_self(void) | |
265 | { | |
266 | if (!(__pthread_supported_features & PTHREAD_FEATURE_BSDTHREADCTL)) { | |
267 | return ENOTSUP; | |
268 | } | |
a0619f9c | 269 | |
f1a1da6c A |
270 | if (__pthread_supported_features & PTHREAD_FEATURE_SETSELF) { |
271 | return _pthread_set_properties_self(_PTHREAD_SET_SELF_FIXEDPRIORITY_FLAG, 0, 0); | |
272 | } else { | |
273 | return ENOTSUP; | |
274 | } | |
275 | } | |
276 | ||
964d3577 A |
277 | int |
278 | pthread_set_timeshare_self(void) | |
279 | { | |
280 | if (!(__pthread_supported_features & PTHREAD_FEATURE_BSDTHREADCTL)) { | |
281 | return ENOTSUP; | |
282 | } | |
a0619f9c | 283 | |
964d3577 A |
284 | if (__pthread_supported_features & PTHREAD_FEATURE_SETSELF) { |
285 | return _pthread_set_properties_self(_PTHREAD_SET_SELF_TIMESHARE_FLAG, 0, 0); | |
286 | } else { | |
287 | return ENOTSUP; | |
288 | } | |
289 | } | |
290 | ||
f1a1da6c A |
291 | |
292 | pthread_override_t | |
293 | pthread_override_qos_class_start_np(pthread_t __pthread, qos_class_t __qos_class, int __relative_priority) | |
294 | { | |
295 | pthread_override_t rv; | |
296 | kern_return_t kr; | |
297 | int res = 0; | |
298 | ||
299 | /* For now, we don't have access to malloc. So we'll have to vm_allocate this, which means the tiny struct is going | |
300 | * to use an entire page. | |
301 | */ | |
302 | bool did_malloc = true; | |
303 | ||
304 | mach_vm_address_t vm_addr = malloc(sizeof(struct pthread_override_s)); | |
305 | if (!vm_addr) { | |
306 | vm_addr = vm_page_size; | |
307 | did_malloc = false; | |
308 | ||
309 | kr = mach_vm_allocate(mach_task_self(), &vm_addr, round_page(sizeof(struct pthread_override_s)), VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH) | VM_FLAGS_ANYWHERE); | |
310 | if (kr != KERN_SUCCESS) { | |
311 | errno = ENOMEM; | |
2546420a | 312 | return (_Nonnull pthread_override_t) NULL; |
f1a1da6c A |
313 | } |
314 | } | |
315 | ||
316 | rv = (pthread_override_t)vm_addr; | |
317 | rv->sig = PTHREAD_OVERRIDE_SIGNATURE; | |
318 | rv->pthread = __pthread; | |
319 | rv->kthread = pthread_mach_thread_np(__pthread); | |
320 | rv->priority = _pthread_priority_make_newest(__qos_class, __relative_priority, 0); | |
321 | rv->malloced = did_malloc; | |
322 | ||
323 | /* To ensure that the kernel port that we keep stays valid, we retain it here. */ | |
324 | kr = mach_port_mod_refs(mach_task_self(), rv->kthread, MACH_PORT_RIGHT_SEND, 1); | |
325 | if (kr != KERN_SUCCESS) { | |
326 | res = EINVAL; | |
327 | } | |
328 | ||
329 | if (res == 0) { | |
215aeb03 | 330 | res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_START, rv->kthread, rv->priority, (uintptr_t)rv); |
f1a1da6c A |
331 | |
332 | if (res != 0) { | |
333 | mach_port_mod_refs(mach_task_self(), rv->kthread, MACH_PORT_RIGHT_SEND, -1); | |
334 | } | |
335 | } | |
336 | ||
337 | if (res != 0) { | |
338 | if (did_malloc) { | |
339 | free(rv); | |
340 | } else { | |
341 | mach_vm_deallocate(mach_task_self(), vm_addr, round_page(sizeof(struct pthread_override_s))); | |
342 | } | |
343 | rv = NULL; | |
344 | } | |
2546420a | 345 | return (_Nonnull pthread_override_t) rv; |
f1a1da6c A |
346 | } |
347 | ||
348 | int | |
349 | pthread_override_qos_class_end_np(pthread_override_t override) | |
350 | { | |
351 | kern_return_t kr; | |
352 | int res = 0; | |
353 | ||
354 | /* Double-free is a fault. Swap the signature and check the old one. */ | |
a0619f9c | 355 | if (_pthread_atomic_xchg_uint32_relaxed(&override->sig, PTHREAD_OVERRIDE_SIG_DEAD) != PTHREAD_OVERRIDE_SIGNATURE) { |
f1a1da6c A |
356 | __builtin_trap(); |
357 | } | |
358 | ||
f1a1da6c | 359 | /* Always consumes (and deallocates) the pthread_override_t object given. */ |
215aeb03 | 360 | res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_END, override->kthread, (uintptr_t)override, 0); |
f1a1da6c A |
361 | if (res == -1) { res = errno; } |
362 | ||
363 | /* EFAULT from the syscall means we underflowed. Crash here. */ | |
364 | if (res == EFAULT) { | |
365 | // <rdar://problem/17645082> Disable the trap-on-underflow, it doesn't co-exist | |
366 | // with dispatch resetting override counts on threads. | |
367 | //__builtin_trap(); | |
368 | res = 0; | |
369 | } | |
370 | ||
371 | kr = mach_port_mod_refs(mach_task_self(), override->kthread, MACH_PORT_RIGHT_SEND, -1); | |
372 | if (kr != KERN_SUCCESS) { | |
373 | res = EINVAL; | |
374 | } | |
375 | ||
376 | if (override->malloced) { | |
377 | free(override); | |
378 | } else { | |
379 | kr = mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)override, round_page(sizeof(struct pthread_override_s))); | |
380 | if (kr != KERN_SUCCESS) { | |
381 | res = EINVAL; | |
382 | } | |
383 | } | |
384 | ||
385 | return res; | |
386 | } | |
387 | ||
388 | int | |
2546420a | 389 | _pthread_qos_override_start_direct(mach_port_t thread, pthread_priority_t priority, void *resource) |
f1a1da6c | 390 | { |
2546420a | 391 | int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_START, thread, priority, (uintptr_t)resource); |
f1a1da6c A |
392 | if (res == -1) { res = errno; } |
393 | return res; | |
394 | } | |
395 | ||
396 | int | |
2546420a | 397 | _pthread_qos_override_end_direct(mach_port_t thread, void *resource) |
f1a1da6c | 398 | { |
2546420a | 399 | int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_END, thread, (uintptr_t)resource, 0); |
f1a1da6c A |
400 | if (res == -1) { res = errno; } |
401 | return res; | |
402 | } | |
403 | ||
2546420a A |
404 | int |
405 | _pthread_override_qos_class_start_direct(mach_port_t thread, pthread_priority_t priority) | |
406 | { | |
407 | // use pthread_self as the default per-thread memory allocation to track the override in the kernel | |
408 | return _pthread_qos_override_start_direct(thread, priority, pthread_self()); | |
409 | } | |
410 | ||
411 | int | |
412 | _pthread_override_qos_class_end_direct(mach_port_t thread) | |
413 | { | |
414 | // use pthread_self as the default per-thread memory allocation to track the override in the kernel | |
415 | return _pthread_qos_override_end_direct(thread, pthread_self()); | |
416 | } | |
417 | ||
f1a1da6c A |
418 | int |
419 | _pthread_workqueue_override_start_direct(mach_port_t thread, pthread_priority_t priority) | |
420 | { | |
421 | int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH, thread, priority, 0); | |
422 | if (res == -1) { res = errno; } | |
423 | return res; | |
424 | } | |
425 | ||
2546420a A |
426 | int |
427 | _pthread_workqueue_override_start_direct_check_owner(mach_port_t thread, pthread_priority_t priority, mach_port_t *ulock_addr) | |
428 | { | |
429 | #if !TARGET_OS_IPHONE | |
430 | static boolean_t kernel_supports_owner_check = TRUE; | |
431 | if (!kernel_supports_owner_check) { | |
432 | ulock_addr = NULL; | |
433 | } | |
434 | #endif | |
435 | ||
436 | for (;;) { | |
437 | int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH, thread, priority, ulock_addr); | |
438 | if (res == -1) { res = errno; } | |
439 | #if !TARGET_OS_IPHONE | |
440 | if (ulock_addr && res == EINVAL) { | |
441 | if ((uintptr_t)ulock_addr % _Alignof(_Atomic uint32_t)) { | |
442 | // do not mute bad ulock addresses related errors | |
443 | return EINVAL; | |
444 | } | |
445 | // backward compatibility for the XBS chroot | |
446 | // BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH used to return EINVAL if | |
447 | // arg3 was non NULL. | |
448 | kernel_supports_owner_check = FALSE; | |
449 | ulock_addr = NULL; | |
450 | continue; | |
451 | } | |
452 | #endif | |
453 | if (ulock_addr && res == EFAULT) { | |
454 | // kernel wants us to redrive the call, so while we refault the | |
455 | // memory, also revalidate the owner | |
a0619f9c | 456 | uint32_t uval = *(uint32_t volatile *)ulock_addr; |
2546420a A |
457 | if (ulock_owner_value_to_port_name(uval) != thread) { |
458 | return ESTALE; | |
459 | } | |
460 | continue; | |
461 | } | |
462 | ||
463 | return res; | |
464 | } | |
465 | } | |
466 | ||
f1a1da6c A |
467 | int |
468 | _pthread_workqueue_override_reset(void) | |
469 | { | |
470 | int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_RESET, 0, 0, 0); | |
471 | if (res == -1) { res = errno; } | |
472 | return res; | |
473 | } | |
474 | ||
215aeb03 A |
475 | int |
476 | _pthread_workqueue_asynchronous_override_add(mach_port_t thread, pthread_priority_t priority, void *resource) | |
477 | { | |
478 | int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD, thread, priority, (uintptr_t)resource); | |
479 | if (res == -1) { res = errno; } | |
480 | return res; | |
481 | } | |
482 | ||
483 | int | |
484 | _pthread_workqueue_asynchronous_override_reset_self(void *resource) | |
485 | { | |
486 | int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET, | |
487 | 0 /* !reset_all */, | |
488 | (uintptr_t)resource, | |
489 | 0); | |
490 | if (res == -1) { res = errno; } | |
491 | return res; | |
492 | } | |
493 | ||
494 | int | |
495 | _pthread_workqueue_asynchronous_override_reset_all_self(void) | |
496 | { | |
497 | int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET, | |
498 | 1 /* reset_all */, | |
499 | 0, | |
500 | 0); | |
501 | if (res == -1) { res = errno; } | |
502 | return res; | |
503 | } | |
504 | ||
a0619f9c A |
505 | static inline uint16_t |
506 | _pthread_workqueue_parallelism_for_priority(int qos, unsigned long flags) | |
507 | { | |
508 | int rc = __bsdthread_ctl(BSDTHREAD_CTL_QOS_MAX_PARALLELISM, qos, flags, 0); | |
509 | if (os_unlikely(rc == -1)) { | |
510 | rc = errno; | |
511 | if (rc != EINVAL) { | |
512 | PTHREAD_INTERNAL_CRASH(rc, "qos_max_parallelism failed"); | |
513 | } | |
514 | if (flags & _PTHREAD_QOS_PARALLELISM_COUNT_LOGICAL) { | |
515 | return *(uint8_t *)_COMM_PAGE_LOGICAL_CPUS; | |
516 | } else { | |
517 | return *(uint8_t *)_COMM_PAGE_PHYSICAL_CPUS; | |
518 | } | |
519 | } | |
520 | return (uint16_t)rc; | |
521 | } | |
522 | ||
523 | int | |
524 | pthread_qos_max_parallelism(qos_class_t qos, unsigned long flags) | |
525 | { | |
526 | int thread_qos = _pthread_qos_class_to_thread_qos(qos); | |
527 | if (thread_qos == THREAD_QOS_UNSPECIFIED) { | |
528 | errno = EINVAL; | |
529 | return -1; | |
530 | } | |
531 | ||
532 | unsigned long syscall_flags = _PTHREAD_QOS_PARALLELISM_COUNT_LOGICAL; | |
533 | uint16_t *ptr = &_pthread_globals()->qmp_logical[thread_qos]; | |
534 | ||
535 | if (flags & PTHREAD_MAX_PARALLELISM_PHYSICAL) { | |
536 | syscall_flags = 0; | |
537 | ptr = &_pthread_globals()->qmp_physical[thread_qos]; | |
538 | } | |
539 | if (*ptr == 0) { | |
540 | *ptr = _pthread_workqueue_parallelism_for_priority(thread_qos, syscall_flags); | |
541 | } | |
542 | return *ptr; | |
543 | } | |
544 | ||
545 | int | |
546 | pthread_time_constraint_max_parallelism(unsigned long flags) | |
547 | { | |
548 | unsigned long syscall_flags = _PTHREAD_QOS_PARALLELISM_COUNT_LOGICAL; | |
549 | uint16_t *ptr = &_pthread_globals()->qmp_logical[0]; | |
550 | ||
551 | if (flags & PTHREAD_MAX_PARALLELISM_PHYSICAL) { | |
552 | syscall_flags = 0; | |
553 | ptr = &_pthread_globals()->qmp_physical[0]; | |
554 | } | |
555 | if (*ptr == 0) { | |
556 | *ptr = _pthread_workqueue_parallelism_for_priority(0, | |
557 | syscall_flags | _PTHREAD_QOS_PARALLELISM_REALTIME); | |
558 | } | |
559 | return *ptr; | |
560 | } | |
561 | ||
f1a1da6c A |
562 | int |
563 | posix_spawnattr_set_qos_class_np(posix_spawnattr_t * __restrict __attr, qos_class_t __qos_class) | |
564 | { | |
565 | switch (__qos_class) { | |
566 | case QOS_CLASS_UTILITY: | |
567 | return posix_spawnattr_set_qos_clamp_np(__attr, POSIX_SPAWN_PROC_CLAMP_UTILITY); | |
568 | case QOS_CLASS_BACKGROUND: | |
569 | return posix_spawnattr_set_qos_clamp_np(__attr, POSIX_SPAWN_PROC_CLAMP_BACKGROUND); | |
570 | case QOS_CLASS_MAINTENANCE: | |
571 | return posix_spawnattr_set_qos_clamp_np(__attr, POSIX_SPAWN_PROC_CLAMP_MAINTENANCE); | |
572 | default: | |
573 | return EINVAL; | |
574 | } | |
575 | } | |
576 | ||
577 | int | |
578 | posix_spawnattr_get_qos_class_np(const posix_spawnattr_t *__restrict __attr, qos_class_t * __restrict __qos_class) | |
579 | { | |
580 | uint64_t clamp; | |
581 | ||
582 | if (!__qos_class) { | |
583 | return EINVAL; | |
584 | } | |
585 | ||
586 | int rv = posix_spawnattr_get_qos_clamp_np(__attr, &clamp); | |
587 | if (rv != 0) { | |
588 | return rv; | |
589 | } | |
590 | ||
591 | switch (clamp) { | |
592 | case POSIX_SPAWN_PROC_CLAMP_UTILITY: | |
593 | *__qos_class = QOS_CLASS_UTILITY; | |
594 | break; | |
595 | case POSIX_SPAWN_PROC_CLAMP_BACKGROUND: | |
596 | *__qos_class = QOS_CLASS_BACKGROUND; | |
597 | break; | |
598 | case POSIX_SPAWN_PROC_CLAMP_MAINTENANCE: | |
599 | *__qos_class = QOS_CLASS_MAINTENANCE; | |
600 | break; | |
601 | default: | |
602 | *__qos_class = QOS_CLASS_UNSPECIFIED; | |
603 | break; | |
604 | } | |
605 | ||
606 | return 0; | |
607 | } |