]> git.saurik.com Git - apple/libpthread.git/blob - src/qos.c
4985d9e69cacd6d6c78daa21eb386f24694fd619
[apple/libpthread.git] / src / qos.c
1 /*
2 * Copyright (c) 2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include "internal.h"
25
26 #include <mach/mach_vm.h>
27 #include <unistd.h>
28 #include <spawn.h>
29 #include <spawn_private.h>
30 #include <pthread/spawn.h>
31 #include <sys/spawn_internal.h>
32 #include <sys/ulock.h>
33
34 #define PTHREAD_OVERRIDE_SIGNATURE (0x6f766572)
35 #define PTHREAD_OVERRIDE_SIG_DEAD (0x7265766f)
36
37 #if !defined(VARIANT_STATIC)
38 // internally redirected upcalls in case qos overrides are used
39 // before __pthread_init has run
40 PTHREAD_NOEXPORT void *
41 malloc(size_t sz)
42 {
43 if (os_likely(_pthread_malloc)) {
44 return _pthread_malloc(sz);
45 } else {
46 return NULL;
47 }
48 }
49
50 PTHREAD_NOEXPORT void
51 free(void *p)
52 {
53 if (os_likely(_pthread_free)) {
54 _pthread_free(p);
55 }
56 }
57 #endif // VARIANT_STATIC
58
59 struct pthread_override_s
60 {
61 uint32_t sig;
62 mach_port_t kthread;
63 pthread_t pthread;
64 pthread_priority_t priority;
65 bool malloced;
66 };
67
68 thread_qos_t
69 _pthread_qos_class_to_thread_qos(qos_class_t qos)
70 {
71 switch (qos) {
72 case QOS_CLASS_USER_INTERACTIVE: return THREAD_QOS_USER_INTERACTIVE;
73 case QOS_CLASS_USER_INITIATED: return THREAD_QOS_USER_INITIATED;
74 case QOS_CLASS_DEFAULT: return THREAD_QOS_LEGACY;
75 case QOS_CLASS_UTILITY: return THREAD_QOS_UTILITY;
76 case QOS_CLASS_BACKGROUND: return THREAD_QOS_BACKGROUND;
77 case QOS_CLASS_MAINTENANCE: return THREAD_QOS_MAINTENANCE;
78 default: return THREAD_QOS_UNSPECIFIED;
79 }
80 }
81
82 static inline qos_class_t
83 _pthread_qos_class_from_thread_qos(thread_qos_t tqos)
84 {
85 static const qos_class_t thread_qos_to_qos_class[THREAD_QOS_LAST] = {
86 [THREAD_QOS_UNSPECIFIED] = QOS_CLASS_UNSPECIFIED,
87 [THREAD_QOS_MAINTENANCE] = QOS_CLASS_MAINTENANCE,
88 [THREAD_QOS_BACKGROUND] = QOS_CLASS_BACKGROUND,
89 [THREAD_QOS_UTILITY] = QOS_CLASS_UTILITY,
90 [THREAD_QOS_LEGACY] = QOS_CLASS_DEFAULT,
91 [THREAD_QOS_USER_INITIATED] = QOS_CLASS_USER_INITIATED,
92 [THREAD_QOS_USER_INTERACTIVE] = QOS_CLASS_USER_INTERACTIVE,
93 };
94 if (os_unlikely(tqos >= THREAD_QOS_LAST)) return QOS_CLASS_UNSPECIFIED;
95 return thread_qos_to_qos_class[tqos];
96 }
97
98 static inline thread_qos_t
99 _pthread_validate_qos_class_and_relpri(qos_class_t qc, int relpri)
100 {
101 if (relpri > 0 || relpri < QOS_MIN_RELATIVE_PRIORITY) {
102 return THREAD_QOS_UNSPECIFIED;
103 }
104 return _pthread_qos_class_to_thread_qos(qc);
105 }
106
107 static inline void
108 _pthread_priority_split(pthread_priority_t pp, qos_class_t *qc, int *relpri)
109 {
110 thread_qos_t qos = _pthread_priority_thread_qos(pp);
111 if (qc) *qc = _pthread_qos_class_from_thread_qos(qos);
112 if (relpri) *relpri = _pthread_priority_relpri(pp);
113 }
114
115 void
116 _pthread_set_main_qos(pthread_priority_t qos)
117 {
118 _main_qos = (uint32_t)qos;
119 }
120
121 int
122 pthread_attr_set_qos_class_np(pthread_attr_t *attr, qos_class_t qc, int relpri)
123 {
124 thread_qos_t qos = _pthread_validate_qos_class_and_relpri(qc, relpri);
125 if (attr->sig != _PTHREAD_ATTR_SIG || attr->schedset) {
126 return EINVAL;
127 }
128
129 attr->qosclass = _pthread_priority_make_from_thread_qos(qos, relpri, 0);
130 attr->qosset = 1;
131 attr->schedset = 0;
132 return 0;
133 }
134
135 int
136 pthread_attr_get_qos_class_np(pthread_attr_t *attr, qos_class_t *qc, int *relpri)
137 {
138 if (attr->sig != _PTHREAD_ATTR_SIG) {
139 return EINVAL;
140 }
141
142 _pthread_priority_split(attr->qosset ? attr->qosclass : 0, qc, relpri);
143 return 0;
144 }
145
146 int
147 pthread_set_qos_class_self_np(qos_class_t qc, int relpri)
148 {
149 thread_qos_t qos = _pthread_validate_qos_class_and_relpri(qc, relpri);
150 if (!qos) {
151 return EINVAL;
152 }
153
154 pthread_priority_t pp = _pthread_priority_make_from_thread_qos(qos, relpri, 0);
155 return _pthread_set_properties_self(_PTHREAD_SET_SELF_QOS_FLAG, pp, 0);
156 }
157
158 int
159 pthread_set_qos_class_np(pthread_t thread, qos_class_t qc, int relpri)
160 {
161 if (thread != pthread_self()) {
162 /* The kext now enforces this anyway, if we check here too, it allows us to call
163 * _pthread_set_properties_self later if we can.
164 */
165 return EPERM;
166 }
167 _pthread_validate_signature(thread);
168 return pthread_set_qos_class_self_np(qc, relpri);
169 }
170
171 int
172 pthread_get_qos_class_np(pthread_t thread, qos_class_t *qc, int *relpri)
173 {
174 pthread_priority_t pp = _pthread_tsd_slot(thread, PTHREAD_QOS_CLASS);
175 _pthread_priority_split(pp, qc, relpri);
176 return 0;
177 }
178
179 qos_class_t
180 qos_class_self(void)
181 {
182 pthread_priority_t pp;
183 pp = _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS);
184 return _pthread_qos_class_from_thread_qos(_pthread_priority_thread_qos(pp));
185 }
186
187 qos_class_t
188 qos_class_main(void)
189 {
190 pthread_priority_t pp = _main_qos;
191 return _pthread_qos_class_from_thread_qos(_pthread_priority_thread_qos(pp));
192 }
193
194 pthread_priority_t
195 _pthread_qos_class_encode(qos_class_t qc, int relpri, unsigned long flags)
196 {
197 thread_qos_t qos = _pthread_qos_class_to_thread_qos(qc);
198 return _pthread_priority_make_from_thread_qos(qos, relpri, flags);
199 }
200
201 qos_class_t
202 _pthread_qos_class_decode(pthread_priority_t pp, int *relpri, unsigned long *flags)
203 {
204 qos_class_t qc;
205 _pthread_priority_split(pp, &qc, relpri);
206 if (flags) *flags = (pp & _PTHREAD_PRIORITY_FLAGS_MASK);
207 return qc;
208 }
209
210 // Encode a legacy workqueue API priority into a pthread_priority_t. This API
211 // is deprecated and can be removed when the simulator no longer uses it.
212 pthread_priority_t
213 _pthread_qos_class_encode_workqueue(int queue_priority, unsigned long flags)
214 {
215 thread_qos_t qos;
216 switch (queue_priority) {
217 case WORKQ_HIGH_PRIOQUEUE: qos = THREAD_QOS_USER_INTERACTIVE; break;
218 case WORKQ_DEFAULT_PRIOQUEUE: qos = THREAD_QOS_LEGACY; break;
219 case WORKQ_NON_INTERACTIVE_PRIOQUEUE:
220 case WORKQ_LOW_PRIOQUEUE: qos = THREAD_QOS_UTILITY; break;
221 case WORKQ_BG_PRIOQUEUE: qos = THREAD_QOS_BACKGROUND; break;
222 default:
223 PTHREAD_CLIENT_CRASH(queue_priority, "Invalid priority");
224 }
225 return _pthread_priority_make_from_thread_qos(qos, 0, flags);
226 }
227
228 #define _PTHREAD_SET_SELF_OUTSIDE_QOS_SKIP \
229 (_PTHREAD_SET_SELF_QOS_FLAG | _PTHREAD_SET_SELF_FIXEDPRIORITY_FLAG | \
230 _PTHREAD_SET_SELF_TIMESHARE_FLAG | \
231 _PTHREAD_SET_SELF_ALTERNATE_AMX)
232
233 int
234 _pthread_set_properties_self(_pthread_set_flags_t flags,
235 pthread_priority_t priority, mach_port_t voucher)
236 {
237 pthread_t self = pthread_self();
238 _pthread_set_flags_t kflags = flags;
239 int rv = 0;
240
241 _pthread_validate_signature(self);
242
243 if (self->wq_outsideqos && (flags & _PTHREAD_SET_SELF_OUTSIDE_QOS_SKIP)) {
244 // A number of properties cannot be altered if we are a workloop
245 // thread that has outside of QoS properties applied to it.
246 kflags &= ~_PTHREAD_SET_SELF_OUTSIDE_QOS_SKIP;
247 if (kflags == 0) goto skip;
248 }
249
250 rv = __bsdthread_ctl(BSDTHREAD_CTL_SET_SELF, priority, voucher, kflags);
251
252 skip:
253 // Set QoS TSD if we succeeded, or only failed the voucher portion of the
254 // call. Additionally, if we skipped setting QoS because of outside-of-QoS
255 // attributes then we still want to set the TSD in userspace.
256 if ((flags & _PTHREAD_SET_SELF_QOS_FLAG) != 0) {
257 if (rv == 0 || errno == ENOENT) {
258 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS,
259 priority);
260 }
261 }
262
263 if (rv) {
264 rv = errno;
265 }
266 return rv;
267 }
268
269 int
270 pthread_set_fixedpriority_self(void)
271 {
272 return _pthread_set_properties_self(_PTHREAD_SET_SELF_FIXEDPRIORITY_FLAG, 0, 0);
273 }
274
275 int
276 pthread_set_timeshare_self(void)
277 {
278 return _pthread_set_properties_self(_PTHREAD_SET_SELF_TIMESHARE_FLAG, 0, 0);
279 }
280
281 int
282 pthread_prefer_alternate_amx_self(void)
283 {
284 return _pthread_set_properties_self(_PTHREAD_SET_SELF_ALTERNATE_AMX, 0, 0);
285 }
286
287
288 pthread_override_t
289 pthread_override_qos_class_start_np(pthread_t thread, qos_class_t qc, int relpri)
290 {
291 pthread_override_t rv;
292 kern_return_t kr;
293 thread_qos_t qos;
294 int res = 0;
295
296 /* For now, we don't have access to malloc. So we'll have to vm_allocate this, which means the tiny struct is going
297 * to use an entire page.
298 */
299 bool did_malloc = true;
300
301 qos = _pthread_validate_qos_class_and_relpri(qc, relpri);
302 if (qos == THREAD_QOS_UNSPECIFIED) {
303 return (_Nonnull pthread_override_t)NULL;
304 }
305
306 mach_vm_address_t vm_addr = malloc(sizeof(struct pthread_override_s));
307 if (!vm_addr) {
308 vm_addr = vm_page_size;
309 did_malloc = false;
310
311 kr = mach_vm_allocate(mach_task_self(), &vm_addr,
312 round_page(sizeof(struct pthread_override_s)),
313 VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH) | VM_FLAGS_ANYWHERE);
314 if (kr != KERN_SUCCESS) {
315 errno = ENOMEM;
316 return (_Nonnull pthread_override_t)NULL;
317 }
318 }
319
320 rv = (pthread_override_t)vm_addr;
321 rv->sig = PTHREAD_OVERRIDE_SIGNATURE;
322 rv->pthread = thread;
323 rv->kthread = pthread_mach_thread_np(thread);
324 rv->priority = _pthread_priority_make_from_thread_qos(qos, relpri, 0);
325 rv->malloced = did_malloc;
326
327 /* To ensure that the kernel port that we keep stays valid, we retain it here. */
328 kr = mach_port_mod_refs(mach_task_self(), rv->kthread, MACH_PORT_RIGHT_SEND, 1);
329 if (kr != KERN_SUCCESS) {
330 res = EINVAL;
331 }
332
333 if (res == 0) {
334 res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_START, rv->kthread, rv->priority, (uintptr_t)rv);
335
336 if (res != 0) {
337 mach_port_mod_refs(mach_task_self(), rv->kthread, MACH_PORT_RIGHT_SEND, -1);
338 }
339 }
340
341 if (res != 0) {
342 if (did_malloc) {
343 free(rv);
344 } else {
345 mach_vm_deallocate(mach_task_self(), vm_addr, round_page(sizeof(struct pthread_override_s)));
346 }
347 rv = NULL;
348 }
349 return (_Nonnull pthread_override_t)rv;
350 }
351
352 int
353 pthread_override_qos_class_end_np(pthread_override_t override)
354 {
355 kern_return_t kr;
356 int res = 0;
357
358 /* Double-free is a fault. Swap the signature and check the old one. */
359 if (_pthread_atomic_xchg_uint32_relaxed(&override->sig, PTHREAD_OVERRIDE_SIG_DEAD) != PTHREAD_OVERRIDE_SIGNATURE) {
360 __builtin_trap();
361 }
362
363 /* Always consumes (and deallocates) the pthread_override_t object given. */
364 res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_END, override->kthread, (uintptr_t)override, 0);
365 if (res == -1) { res = errno; }
366
367 /* EFAULT from the syscall means we underflowed. Crash here. */
368 if (res == EFAULT) {
369 // <rdar://problem/17645082> Disable the trap-on-underflow, it doesn't co-exist
370 // with dispatch resetting override counts on threads.
371 //__builtin_trap();
372 res = 0;
373 }
374
375 kr = mach_port_mod_refs(mach_task_self(), override->kthread, MACH_PORT_RIGHT_SEND, -1);
376 if (kr != KERN_SUCCESS) {
377 res = EINVAL;
378 }
379
380 if (override->malloced) {
381 free(override);
382 } else {
383 kr = mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)override, round_page(sizeof(struct pthread_override_s)));
384 if (kr != KERN_SUCCESS) {
385 res = EINVAL;
386 }
387 }
388
389 return res;
390 }
391
392 int
393 _pthread_qos_override_start_direct(mach_port_t thread, pthread_priority_t priority, void *resource)
394 {
395 int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_START, thread, priority, (uintptr_t)resource);
396 if (res == -1) { res = errno; }
397 return res;
398 }
399
400 int
401 _pthread_qos_override_end_direct(mach_port_t thread, void *resource)
402 {
403 int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_END, thread, (uintptr_t)resource, 0);
404 if (res == -1) { res = errno; }
405 return res;
406 }
407
408 int
409 _pthread_override_qos_class_start_direct(mach_port_t thread, pthread_priority_t priority)
410 {
411 // use pthread_self as the default per-thread memory allocation to track the override in the kernel
412 return _pthread_qos_override_start_direct(thread, priority, pthread_self());
413 }
414
415 int
416 _pthread_override_qos_class_end_direct(mach_port_t thread)
417 {
418 // use pthread_self as the default per-thread memory allocation to track the override in the kernel
419 return _pthread_qos_override_end_direct(thread, pthread_self());
420 }
421
422 int
423 _pthread_workqueue_override_start_direct(mach_port_t thread, pthread_priority_t priority)
424 {
425 int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH, thread, priority, 0);
426 if (res == -1) { res = errno; }
427 return res;
428 }
429
430 int
431 _pthread_workqueue_override_start_direct_check_owner(mach_port_t thread, pthread_priority_t priority, mach_port_t *ulock_addr)
432 {
433 #if !TARGET_OS_IPHONE
434 static boolean_t kernel_supports_owner_check = TRUE;
435 if (!kernel_supports_owner_check) {
436 ulock_addr = NULL;
437 }
438 #endif
439
440 for (;;) {
441 int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH, thread, priority, ulock_addr);
442 if (res == -1) { res = errno; }
443 #if !TARGET_OS_IPHONE
444 if (ulock_addr && res == EINVAL) {
445 if ((uintptr_t)ulock_addr % _Alignof(_Atomic uint32_t)) {
446 // do not mute bad ulock addresses related errors
447 return EINVAL;
448 }
449 // backward compatibility for the XBS chroot
450 // BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH used to return EINVAL if
451 // arg3 was non NULL.
452 kernel_supports_owner_check = FALSE;
453 ulock_addr = NULL;
454 continue;
455 }
456 #endif
457 if (ulock_addr && res == EFAULT) {
458 // kernel wants us to redrive the call, so while we refault the
459 // memory, also revalidate the owner
460 uint32_t uval = *(uint32_t volatile *)ulock_addr;
461 if (ulock_owner_value_to_port_name(uval) != thread) {
462 return ESTALE;
463 }
464 continue;
465 }
466
467 return res;
468 }
469 }
470
471 int
472 _pthread_workqueue_override_reset(void)
473 {
474 int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_RESET, 0, 0, 0);
475 if (res == -1) { res = errno; }
476 return res;
477 }
478
479 int
480 _pthread_workqueue_asynchronous_override_add(mach_port_t thread, pthread_priority_t priority, void *resource)
481 {
482 int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD, thread, priority, (uintptr_t)resource);
483 if (res == -1) { res = errno; }
484 return res;
485 }
486
487 int
488 _pthread_workqueue_asynchronous_override_reset_self(void *resource)
489 {
490 int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET,
491 0 /* !reset_all */,
492 (uintptr_t)resource,
493 0);
494 if (res == -1) { res = errno; }
495 return res;
496 }
497
498 int
499 _pthread_workqueue_asynchronous_override_reset_all_self(void)
500 {
501 int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET,
502 1 /* reset_all */,
503 0,
504 0);
505 if (res == -1) { res = errno; }
506 return res;
507 }
508
509 static inline uint16_t
510 _pthread_workqueue_parallelism_for_priority(int qos, unsigned long flags)
511 {
512 int rc = __bsdthread_ctl(BSDTHREAD_CTL_QOS_MAX_PARALLELISM, qos, flags, 0);
513 if (os_unlikely(rc == -1)) {
514 rc = errno;
515 if (rc != EINVAL) {
516 PTHREAD_INTERNAL_CRASH(rc, "qos_max_parallelism failed");
517 }
518 if (flags & _PTHREAD_QOS_PARALLELISM_COUNT_LOGICAL) {
519 return *(uint8_t *)_COMM_PAGE_LOGICAL_CPUS;
520 } else {
521 return *(uint8_t *)_COMM_PAGE_PHYSICAL_CPUS;
522 }
523 }
524 return (uint16_t)rc;
525 }
526
527 int
528 pthread_qos_max_parallelism(qos_class_t qos, unsigned long flags)
529 {
530 thread_qos_t thread_qos;
531 if (qos == QOS_CLASS_UNSPECIFIED) {
532 qos = QOS_CLASS_DEFAULT; // <rdar://problem/35080198>
533 }
534 thread_qos = _pthread_qos_class_to_thread_qos(qos);
535 if (thread_qos == THREAD_QOS_UNSPECIFIED) {
536 errno = EINVAL;
537 return -1;
538 }
539
540 unsigned long syscall_flags = _PTHREAD_QOS_PARALLELISM_COUNT_LOGICAL;
541 uint16_t *ptr = &_pthread_globals()->qmp_logical[thread_qos];
542
543 if (flags & PTHREAD_MAX_PARALLELISM_PHYSICAL) {
544 syscall_flags = 0;
545 ptr = &_pthread_globals()->qmp_physical[thread_qos];
546 }
547 if (*ptr == 0) {
548 *ptr = _pthread_workqueue_parallelism_for_priority(thread_qos, syscall_flags);
549 }
550 return *ptr;
551 }
552
553 int
554 pthread_time_constraint_max_parallelism(unsigned long flags)
555 {
556 unsigned long syscall_flags = _PTHREAD_QOS_PARALLELISM_COUNT_LOGICAL;
557 uint16_t *ptr = &_pthread_globals()->qmp_logical[0];
558
559 if (flags & PTHREAD_MAX_PARALLELISM_PHYSICAL) {
560 syscall_flags = 0;
561 ptr = &_pthread_globals()->qmp_physical[0];
562 }
563 if (*ptr == 0) {
564 *ptr = _pthread_workqueue_parallelism_for_priority(0,
565 syscall_flags | _PTHREAD_QOS_PARALLELISM_REALTIME);
566 }
567 return *ptr;
568 }
569
570 int
571 posix_spawnattr_set_qos_class_np(posix_spawnattr_t * __restrict __attr, qos_class_t __qos_class)
572 {
573 switch (__qos_class) {
574 case QOS_CLASS_UTILITY:
575 return posix_spawnattr_set_qos_clamp_np(__attr, POSIX_SPAWN_PROC_CLAMP_UTILITY);
576 case QOS_CLASS_BACKGROUND:
577 return posix_spawnattr_set_qos_clamp_np(__attr, POSIX_SPAWN_PROC_CLAMP_BACKGROUND);
578 case QOS_CLASS_MAINTENANCE:
579 return posix_spawnattr_set_qos_clamp_np(__attr, POSIX_SPAWN_PROC_CLAMP_MAINTENANCE);
580 default:
581 return EINVAL;
582 }
583 }
584
585 int
586 posix_spawnattr_get_qos_class_np(const posix_spawnattr_t *__restrict __attr, qos_class_t * __restrict __qos_class)
587 {
588 uint64_t clamp;
589
590 if (!__qos_class) {
591 return EINVAL;
592 }
593
594 int rv = posix_spawnattr_get_qos_clamp_np(__attr, &clamp);
595 if (rv != 0) {
596 return rv;
597 }
598
599 switch (clamp) {
600 case POSIX_SPAWN_PROC_CLAMP_UTILITY:
601 *__qos_class = QOS_CLASS_UTILITY;
602 break;
603 case POSIX_SPAWN_PROC_CLAMP_BACKGROUND:
604 *__qos_class = QOS_CLASS_BACKGROUND;
605 break;
606 case POSIX_SPAWN_PROC_CLAMP_MAINTENANCE:
607 *__qos_class = QOS_CLASS_MAINTENANCE;
608 break;
609 default:
610 *__qos_class = QOS_CLASS_UNSPECIFIED;
611 break;
612 }
613
614 return 0;
615 }