]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/kern/ipc_tt.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / ipc_tt.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62/*
63 */
64
65/*
66 * File: ipc_tt.c
67 * Purpose:
68 * Task and thread related IPC functions.
69 */
70
71#include <mach/mach_types.h>
72#include <mach/boolean.h>
73#include <mach/kern_return.h>
74#include <mach/mach_param.h>
75#include <mach/task_special_ports.h>
76#include <mach/thread_special_ports.h>
77#include <mach/thread_status.h>
78#include <mach/exception_types.h>
79#include <mach/memory_object_types.h>
80#include <mach/mach_traps.h>
81#include <mach/task_server.h>
82#include <mach/thread_act_server.h>
83#include <mach/mach_host_server.h>
84#include <mach/host_priv_server.h>
85#include <mach/vm_map_server.h>
86
87#include <kern/kern_types.h>
88#include <kern/host.h>
89#include <kern/ipc_kobject.h>
90#include <kern/ipc_tt.h>
91#include <kern/kalloc.h>
92#include <kern/thread.h>
93#include <kern/misc_protos.h>
94#include <kdp/kdp_dyld.h>
95
96#include <vm/vm_map.h>
97#include <vm/vm_pageout.h>
98#include <vm/vm_protos.h>
99
100#include <security/mac_mach_internal.h>
101
102#if CONFIG_CSR
103#include <sys/csr.h>
104#endif
105
106#if !defined(XNU_TARGET_OS_OSX) && !SECURE_KERNEL
107extern int cs_relax_platform_task_ports;
108#endif
109
110extern boolean_t IOTaskHasEntitlement(task_t, const char *);
111
112/* forward declarations */
113static kern_return_t port_allowed_with_task_flavor(int which, mach_task_flavor_t flavor);
114static kern_return_t port_allowed_with_thread_flavor(int which, mach_thread_flavor_t flavor);
115static void ipc_port_bind_special_reply_port_locked(ipc_port_t port);
116static kern_return_t ipc_port_unbind_special_reply_port(thread_t thread, boolean_t unbind_active_port);
117kern_return_t task_conversion_eval(task_t caller, task_t victim);
118static ipc_space_t convert_port_to_space_no_eval(ipc_port_t port);
119static thread_t convert_port_to_thread_no_eval(ipc_port_t port);
120static ipc_port_t convert_task_to_port_with_flavor(task_t task, mach_task_flavor_t flavor);
121static ipc_port_t convert_thread_to_port_with_flavor(thread_t thread, mach_thread_flavor_t flavor);
122static task_read_t convert_port_to_task_read_no_eval(ipc_port_t port);
123static thread_read_t convert_port_to_thread_read_no_eval(ipc_port_t port);
124static ipc_space_read_t convert_port_to_space_read_no_eval(ipc_port_t port);
125
126/*
127 * Routine: ipc_task_init
128 * Purpose:
129 * Initialize a task's IPC state.
130 *
131 * If non-null, some state will be inherited from the parent.
132 * The parent must be appropriately initialized.
133 * Conditions:
134 * Nothing locked.
135 */
136
137void
138ipc_task_init(
139 task_t task,
140 task_t parent)
141{
142 ipc_space_t space;
143 ipc_port_t kport;
144 ipc_port_t nport;
145 ipc_port_t pport;
146 kern_return_t kr;
147 int i;
148
149
150 kr = ipc_space_create(&ipc_table_entries[0], IPC_LABEL_NONE, &space);
151 if (kr != KERN_SUCCESS) {
152 panic("ipc_task_init");
153 }
154
155 space->is_task = task;
156
157 if (immovable_control_port_enabled) {
158 ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
159 if (pinned_control_port_enabled) {
160 options |= IPC_KOBJECT_ALLOC_PINNED;
161 }
162 pport = ipc_kobject_alloc_port(IKO_NULL, IKOT_NONE, options);
163
164 kport = ipc_kobject_alloc_labeled_port(IKO_NULL, IKOT_TASK_CONTROL,
165 IPC_LABEL_SUBST_TASK, IPC_KOBJECT_ALLOC_NONE);
166 kport->ip_alt_port = pport;
167 } else {
168 kport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_CONTROL,
169 IPC_KOBJECT_ALLOC_NONE);
170
171 pport = kport;
172 }
173
174 nport = ipc_port_alloc_kernel();
175 if (nport == IP_NULL) {
176 panic("ipc_task_init");
177 }
178
179 if (pport == IP_NULL) {
180 panic("ipc_task_init");
181 }
182
183 itk_lock_init(task);
184 task->itk_task_ports[TASK_FLAVOR_CONTROL] = kport;
185 task->itk_task_ports[TASK_FLAVOR_NAME] = nport;
186
187 /* Lazily allocated on-demand */
188 task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
189 task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
190 task->itk_dyld_notify = NULL;
191
192 task->itk_self = pport;
193 task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
194 if (task_is_a_corpse_fork(task)) {
195 /*
196 * No sender's notification for corpse would not
197 * work with a naked send right in kernel.
198 */
199 task->itk_settable_self = IP_NULL;
200 } else {
201 task->itk_settable_self = ipc_port_make_send(kport);
202 }
203 task->itk_debug_control = IP_NULL;
204 task->itk_space = space;
205
206#if CONFIG_MACF
207 task->exc_actions[0].label = NULL;
208 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
209 mac_exc_associate_action_label(&task->exc_actions[i], mac_exc_create_label());
210 }
211#endif
212
213 /* always zero-out the first (unused) array element */
214 bzero(&task->exc_actions[0], sizeof(task->exc_actions[0]));
215
216 if (parent == TASK_NULL) {
217 ipc_port_t port;
218 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
219 task->exc_actions[i].port = IP_NULL;
220 task->exc_actions[i].flavor = 0;
221 task->exc_actions[i].behavior = 0;
222 task->exc_actions[i].privileged = FALSE;
223 }/* for */
224
225 kr = host_get_host_port(host_priv_self(), &port);
226 assert(kr == KERN_SUCCESS);
227 task->itk_host = port;
228
229 task->itk_bootstrap = IP_NULL;
230 task->itk_seatbelt = IP_NULL;
231 task->itk_gssd = IP_NULL;
232 task->itk_task_access = IP_NULL;
233
234 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
235 task->itk_registered[i] = IP_NULL;
236 }
237 } else {
238 itk_lock(parent);
239 assert(parent->itk_task_ports[TASK_FLAVOR_CONTROL] != IP_NULL);
240
241 /* inherit registered ports */
242
243 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
244 task->itk_registered[i] =
245 ipc_port_copy_send(parent->itk_registered[i]);
246 }
247
248 /* inherit exception and bootstrap ports */
249
250 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
251 task->exc_actions[i].port =
252 ipc_port_copy_send(parent->exc_actions[i].port);
253 task->exc_actions[i].flavor =
254 parent->exc_actions[i].flavor;
255 task->exc_actions[i].behavior =
256 parent->exc_actions[i].behavior;
257 task->exc_actions[i].privileged =
258 parent->exc_actions[i].privileged;
259#if CONFIG_MACF
260 mac_exc_inherit_action_label(parent->exc_actions + i, task->exc_actions + i);
261#endif
262 }/* for */
263 task->itk_host =
264 ipc_port_copy_send(parent->itk_host);
265
266 task->itk_bootstrap =
267 ipc_port_copy_send(parent->itk_bootstrap);
268
269 task->itk_seatbelt =
270 ipc_port_copy_send(parent->itk_seatbelt);
271
272 task->itk_gssd =
273 ipc_port_copy_send(parent->itk_gssd);
274
275 task->itk_task_access =
276 ipc_port_copy_send(parent->itk_task_access);
277
278 itk_unlock(parent);
279 }
280}
281
282/*
283 * Routine: ipc_task_enable
284 * Purpose:
285 * Enable a task for IPC access.
286 * Conditions:
287 * Nothing locked.
288 */
289
290void
291ipc_task_enable(
292 task_t task)
293{
294 ipc_port_t kport;
295 ipc_port_t nport;
296 ipc_port_t iport;
297 ipc_port_t rdport;
298 ipc_port_t pport;
299
300 itk_lock(task);
301
302 assert(!task->ipc_active || task_is_a_corpse(task));
303 task->ipc_active = true;
304
305 kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
306 if (kport != IP_NULL) {
307 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK_CONTROL);
308 }
309 nport = task->itk_task_ports[TASK_FLAVOR_NAME];
310 if (nport != IP_NULL) {
311 ipc_kobject_set(nport, (ipc_kobject_t) task, IKOT_TASK_NAME);
312 }
313 iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
314 if (iport != IP_NULL) {
315 ipc_kobject_set(iport, (ipc_kobject_t) task, IKOT_TASK_INSPECT);
316 }
317 rdport = task->itk_task_ports[TASK_FLAVOR_READ];
318 if (rdport != IP_NULL) {
319 ipc_kobject_set(rdport, (ipc_kobject_t) task, IKOT_TASK_READ);
320 }
321 pport = task->itk_self;
322 if (immovable_control_port_enabled && pport != IP_NULL) {
323 ipc_kobject_set(pport, (ipc_kobject_t) task, IKOT_TASK_CONTROL);
324 }
325
326 itk_unlock(task);
327}
328
329/*
330 * Routine: ipc_task_disable
331 * Purpose:
332 * Disable IPC access to a task.
333 * Conditions:
334 * Nothing locked.
335 */
336
337void
338ipc_task_disable(
339 task_t task)
340{
341 ipc_port_t kport;
342 ipc_port_t nport;
343 ipc_port_t iport;
344 ipc_port_t rdport;
345 ipc_port_t rport;
346 ipc_port_t pport;
347
348 itk_lock(task);
349
350 /*
351 * This innocuous looking line is load bearing.
352 *
353 * It is used to disable the creation of lazy made ports.
354 * We must do so before we drop the last reference on the task,
355 * as task ports do not own a reference on the task, and
356 * convert_port_to_task* will crash trying to resurect a task.
357 */
358 task->ipc_active = false;
359
360 kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
361 if (kport != IP_NULL) {
362 ip_lock(kport);
363 kport->ip_alt_port = IP_NULL;
364 ipc_kobject_set_atomically(kport, IKO_NULL, IKOT_NONE);
365 ip_unlock(kport);
366 }
367 nport = task->itk_task_ports[TASK_FLAVOR_NAME];
368 if (nport != IP_NULL) {
369 ipc_kobject_set(nport, IKO_NULL, IKOT_NONE);
370 }
371 iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
372 if (iport != IP_NULL) {
373 ipc_kobject_set(iport, IKO_NULL, IKOT_NONE);
374 }
375 rdport = task->itk_task_ports[TASK_FLAVOR_READ];
376 if (rdport != IP_NULL) {
377 ipc_kobject_set(rdport, IKO_NULL, IKOT_NONE);
378 }
379 pport = task->itk_self;
380 if (pport != kport && pport != IP_NULL) {
381 assert(immovable_control_port_enabled);
382 assert(pport->ip_immovable_send);
383 ipc_kobject_set(pport, IKO_NULL, IKOT_NONE);
384 }
385
386 rport = task->itk_resume;
387 if (rport != IP_NULL) {
388 /*
389 * From this point onwards this task is no longer accepting
390 * resumptions.
391 *
392 * There are still outstanding suspensions on this task,
393 * even as it is being torn down. Disconnect the task
394 * from the rport, thereby "orphaning" the rport. The rport
395 * itself will go away only when the last suspension holder
396 * destroys his SO right to it -- when he either
397 * exits, or tries to actually use that last SO right to
398 * resume this (now non-existent) task.
399 */
400 ipc_kobject_set(rport, IKO_NULL, IKOT_NONE);
401 }
402 itk_unlock(task);
403}
404
405/*
406 * Routine: ipc_task_terminate
407 * Purpose:
408 * Clean up and destroy a task's IPC state.
409 * Conditions:
410 * Nothing locked. The task must be suspended.
411 * (Or the current thread must be in the task.)
412 */
413
414void
415ipc_task_terminate(
416 task_t task)
417{
418 ipc_port_t kport;
419 ipc_port_t nport;
420 ipc_port_t iport;
421 ipc_port_t rdport;
422 ipc_port_t rport;
423 ipc_port_t pport;
424 ipc_port_t sself;
425 ipc_port_t *notifiers_ptr = NULL;
426
427 itk_lock(task);
428
429 /*
430 * If we ever failed to clear ipc_active before the last reference
431 * was dropped, lazy ports might be made and used after the last
432 * reference is dropped and cause use after free (see comment in
433 * ipc_task_disable()).
434 */
435 assert(!task->ipc_active);
436
437 kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
438 sself = task->itk_settable_self;
439
440 if (kport == IP_NULL) {
441 /* the task is already terminated (can this happen?) */
442 itk_unlock(task);
443 return;
444 }
445 task->itk_task_ports[TASK_FLAVOR_CONTROL] = IP_NULL;
446
447 rdport = task->itk_task_ports[TASK_FLAVOR_READ];
448 task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
449
450 iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
451 task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
452
453 nport = task->itk_task_ports[TASK_FLAVOR_NAME];
454 assert(nport != IP_NULL);
455 task->itk_task_ports[TASK_FLAVOR_NAME] = IP_NULL;
456
457 if (task->itk_dyld_notify) {
458 notifiers_ptr = task->itk_dyld_notify;
459 task->itk_dyld_notify = NULL;
460 }
461
462 if (immovable_control_port_enabled) {
463 pport = task->itk_self;
464 assert(pport != IP_NULL);
465 }
466
467 task->itk_self = IP_NULL;
468
469 rport = task->itk_resume;
470 task->itk_resume = IP_NULL;
471
472 itk_unlock(task);
473
474 /* release the naked send rights */
475 if (IP_VALID(sself)) {
476 ipc_port_release_send(sself);
477 }
478
479 if (notifiers_ptr) {
480 for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
481 if (IP_VALID(notifiers_ptr[i])) {
482 ipc_port_release_send(notifiers_ptr[i]);
483 }
484 }
485 kfree(notifiers_ptr, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT * sizeof(ipc_port_t));
486 }
487
488 for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
489 if (IP_VALID(task->exc_actions[i].port)) {
490 ipc_port_release_send(task->exc_actions[i].port);
491 }
492#if CONFIG_MACF
493 mac_exc_free_action_label(task->exc_actions + i);
494#endif
495 }
496
497 if (IP_VALID(task->itk_host)) {
498 ipc_port_release_send(task->itk_host);
499 }
500
501 if (IP_VALID(task->itk_bootstrap)) {
502 ipc_port_release_send(task->itk_bootstrap);
503 }
504
505 if (IP_VALID(task->itk_seatbelt)) {
506 ipc_port_release_send(task->itk_seatbelt);
507 }
508
509 if (IP_VALID(task->itk_gssd)) {
510 ipc_port_release_send(task->itk_gssd);
511 }
512
513 if (IP_VALID(task->itk_task_access)) {
514 ipc_port_release_send(task->itk_task_access);
515 }
516
517 if (IP_VALID(task->itk_debug_control)) {
518 ipc_port_release_send(task->itk_debug_control);
519 }
520
521 for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
522 if (IP_VALID(task->itk_registered[i])) {
523 ipc_port_release_send(task->itk_registered[i]);
524 }
525 }
526
527 /* destroy the kernel ports */
528 if (immovable_control_port_enabled) {
529 ip_lock(kport);
530 kport->ip_alt_port = IP_NULL;
531 ipc_kobject_set_atomically(kport, IKO_NULL, IKOT_NONE);
532 ip_unlock(kport);
533
534 /* pport == kport if immovability is off */
535 ipc_port_dealloc_kernel(pport);
536 }
537 ipc_port_dealloc_kernel(kport);
538 ipc_port_dealloc_kernel(nport);
539 if (iport != IP_NULL) {
540 ipc_port_dealloc_kernel(iport);
541 }
542 if (rdport != IP_NULL) {
543 ipc_port_dealloc_kernel(rdport);
544 }
545 if (rport != IP_NULL) {
546 ipc_port_dealloc_kernel(rport);
547 }
548
549 itk_lock_destroy(task);
550}
551
552/*
553 * Routine: ipc_task_reset
554 * Purpose:
555 * Reset a task's IPC state to protect it when
556 * it enters an elevated security context. The
557 * task name port can remain the same - since it
558 * represents no specific privilege.
559 * Conditions:
560 * Nothing locked. The task must be suspended.
561 * (Or the current thread must be in the task.)
562 */
563
564void
565ipc_task_reset(
566 task_t task)
567{
568 ipc_port_t old_kport, old_pport, new_kport, new_pport;
569 ipc_port_t old_sself;
570 ipc_port_t old_rdport;
571 ipc_port_t old_iport;
572 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
573 ipc_port_t *notifiers_ptr = NULL;
574
575#if CONFIG_MACF
576 /* Fresh label to unset credentials in existing labels. */
577 struct label *unset_label = mac_exc_create_label();
578#endif
579
580 if (immovable_control_port_enabled) {
581 ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
582 if (pinned_control_port_enabled) {
583 options |= IPC_KOBJECT_ALLOC_PINNED;
584 }
585
586 new_pport = ipc_kobject_alloc_port((ipc_kobject_t)task,
587 IKOT_TASK_CONTROL, options);
588
589 new_kport = ipc_kobject_alloc_labeled_port((ipc_kobject_t)task,
590 IKOT_TASK_CONTROL, IPC_LABEL_SUBST_TASK,
591 IPC_KOBJECT_ALLOC_NONE);
592 new_kport->ip_alt_port = new_pport;
593 } else {
594 new_kport = ipc_kobject_alloc_port((ipc_kobject_t)task,
595 IKOT_TASK_CONTROL, IPC_KOBJECT_ALLOC_NONE);
596
597 new_pport = new_kport;
598 }
599
600 itk_lock(task);
601
602 old_kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
603 old_rdport = task->itk_task_ports[TASK_FLAVOR_READ];
604 old_iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
605
606 old_pport = task->itk_self;
607
608 if (old_pport == IP_NULL) {
609 /* the task is already terminated (can this happen?) */
610 itk_unlock(task);
611 ipc_port_dealloc_kernel(new_kport);
612 if (immovable_control_port_enabled) {
613 ipc_port_dealloc_kernel(new_pport);
614 }
615#if CONFIG_MACF
616 mac_exc_free_label(unset_label);
617#endif
618 return;
619 }
620
621 old_sself = task->itk_settable_self;
622 task->itk_task_ports[TASK_FLAVOR_CONTROL] = new_kport;
623 task->itk_self = new_pport;
624
625 task->itk_settable_self = ipc_port_make_send(new_kport);
626
627 /* Set the old kport to IKOT_NONE and update the exec token while under the port lock */
628 ip_lock(old_kport);
629 old_kport->ip_alt_port = IP_NULL;
630 ipc_kobject_set_atomically(old_kport, IKO_NULL, IKOT_NONE);
631 task->exec_token += 1;
632 ip_unlock(old_kport);
633
634 /* Reset the read and inspect flavors of task port */
635 task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
636 task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
637
638 if (immovable_control_port_enabled) {
639 ip_lock(old_pport);
640 ipc_kobject_set_atomically(old_pport, IKO_NULL, IKOT_NONE);
641 task->exec_token += 1;
642 ip_unlock(old_pport);
643 }
644
645 for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
646 old_exc_actions[i] = IP_NULL;
647
648 if (i == EXC_CORPSE_NOTIFY && task_corpse_pending_report(task)) {
649 continue;
650 }
651
652 if (!task->exc_actions[i].privileged) {
653#if CONFIG_MACF
654 mac_exc_update_action_label(task->exc_actions + i, unset_label);
655#endif
656 old_exc_actions[i] = task->exc_actions[i].port;
657 task->exc_actions[i].port = IP_NULL;
658 }
659 }/* for */
660
661 if (IP_VALID(task->itk_debug_control)) {
662 ipc_port_release_send(task->itk_debug_control);
663 }
664 task->itk_debug_control = IP_NULL;
665
666 if (task->itk_dyld_notify) {
667 notifiers_ptr = task->itk_dyld_notify;
668 task->itk_dyld_notify = NULL;
669 }
670
671 itk_unlock(task);
672
673#if CONFIG_MACF
674 mac_exc_free_label(unset_label);
675#endif
676
677 /* release the naked send rights */
678
679 if (IP_VALID(old_sself)) {
680 ipc_port_release_send(old_sself);
681 }
682
683 if (notifiers_ptr) {
684 for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
685 if (IP_VALID(notifiers_ptr[i])) {
686 ipc_port_release_send(notifiers_ptr[i]);
687 }
688 }
689 kfree(notifiers_ptr, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT * sizeof(ipc_port_t));
690 }
691
692 for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
693 if (IP_VALID(old_exc_actions[i])) {
694 ipc_port_release_send(old_exc_actions[i]);
695 }
696 }/* for */
697
698 /* destroy all task port flavors */
699 ipc_port_dealloc_kernel(old_kport);
700 if (immovable_control_port_enabled) {
701 ipc_port_dealloc_kernel(old_pport);
702 }
703 if (old_rdport != IP_NULL) {
704 ipc_port_dealloc_kernel(old_rdport);
705 }
706 if (old_iport != IP_NULL) {
707 ipc_port_dealloc_kernel(old_iport);
708 }
709}
710
711/*
712 * Routine: ipc_thread_init
713 * Purpose:
714 * Initialize a thread's IPC state.
715 * Conditions:
716 * Nothing locked.
717 */
718
719void
720ipc_thread_init(
721 thread_t thread,
722 ipc_thread_init_options_t options)
723{
724 ipc_port_t kport;
725 ipc_port_t pport;
726 ipc_kobject_alloc_options_t alloc_options = IPC_KOBJECT_ALLOC_NONE;
727
728 /*
729 * Having immovable_control_port_enabled boot-arg set does not guarantee
730 * thread control port should be made immovable/pinned, also check options.
731 *
732 * raw mach threads created via thread_create() have neither of INIT_PINNED
733 * or INIT_IMMOVABLE set.
734 */
735 if (immovable_control_port_enabled && (options & IPC_THREAD_INIT_IMMOVABLE)) {
736 alloc_options |= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
737
738 if (pinned_control_port_enabled && (options & IPC_THREAD_INIT_PINNED)) {
739 alloc_options |= IPC_KOBJECT_ALLOC_PINNED;
740 }
741
742 pport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
743 IKOT_THREAD_CONTROL, alloc_options);
744
745 kport = ipc_kobject_alloc_labeled_port((ipc_kobject_t)thread,
746 IKOT_THREAD_CONTROL, IPC_LABEL_SUBST_THREAD, IPC_KOBJECT_ALLOC_NONE);
747 kport->ip_alt_port = pport;
748 } else {
749 kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
750 IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
751
752 pport = kport;
753 }
754
755 thread->ith_thread_ports[THREAD_FLAVOR_CONTROL] = kport;
756
757 thread->ith_settable_self = ipc_port_make_send(kport);
758
759 thread->ith_self = pport;
760
761 thread->ith_special_reply_port = NULL;
762 thread->exc_actions = NULL;
763
764#if IMPORTANCE_INHERITANCE
765 thread->ith_assertions = 0;
766#endif
767
768 thread->ipc_active = true;
769 ipc_kmsg_queue_init(&thread->ith_messages);
770
771 thread->ith_rpc_reply = IP_NULL;
772}
773
774void
775ipc_thread_init_exc_actions(
776 thread_t thread)
777{
778 assert(thread->exc_actions == NULL);
779
780 thread->exc_actions = kalloc(sizeof(struct exception_action) * EXC_TYPES_COUNT);
781 bzero(thread->exc_actions, sizeof(struct exception_action) * EXC_TYPES_COUNT);
782
783#if CONFIG_MACF
784 for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
785 mac_exc_associate_action_label(thread->exc_actions + i, mac_exc_create_label());
786 }
787#endif
788}
789
790void
791ipc_thread_destroy_exc_actions(
792 thread_t thread)
793{
794 if (thread->exc_actions != NULL) {
795#if CONFIG_MACF
796 for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
797 mac_exc_free_action_label(thread->exc_actions + i);
798 }
799#endif
800
801 kfree(thread->exc_actions,
802 sizeof(struct exception_action) * EXC_TYPES_COUNT);
803 thread->exc_actions = NULL;
804 }
805}
806
807/*
808 * Routine: ipc_thread_disable
809 * Purpose:
810 * Clean up and destroy a thread's IPC state.
811 * Conditions:
812 * Thread locked.
813 */
814void
815ipc_thread_disable(
816 thread_t thread)
817{
818 ipc_port_t kport = thread->ith_thread_ports[THREAD_FLAVOR_CONTROL];
819 ipc_port_t iport = thread->ith_thread_ports[THREAD_FLAVOR_INSPECT];
820 ipc_port_t rdport = thread->ith_thread_ports[THREAD_FLAVOR_READ];
821 ipc_port_t pport = thread->ith_self;
822
823 /*
824 * This innocuous looking line is load bearing.
825 *
826 * It is used to disable the creation of lazy made ports.
827 * We must do so before we drop the last reference on the thread,
828 * as thread ports do not own a reference on the thread, and
829 * convert_port_to_thread* will crash trying to resurect a thread.
830 */
831 thread->ipc_active = false;
832
833 if (kport != IP_NULL) {
834 ip_lock(kport);
835 kport->ip_alt_port = IP_NULL;
836 ipc_kobject_set_atomically(kport, IKO_NULL, IKOT_NONE);
837 ip_unlock(kport);
838 }
839
840 if (iport != IP_NULL) {
841 ipc_kobject_set(iport, IKO_NULL, IKOT_NONE);
842 }
843
844 if (rdport != IP_NULL) {
845 ipc_kobject_set(rdport, IKO_NULL, IKOT_NONE);
846 }
847
848 if (pport != kport && pport != IP_NULL) {
849 assert(immovable_control_port_enabled);
850 assert(pport->ip_immovable_send);
851 ipc_kobject_set(pport, IKO_NULL, IKOT_NONE);
852 }
853
854 /* unbind the thread special reply port */
855 if (IP_VALID(thread->ith_special_reply_port)) {
856 ipc_port_unbind_special_reply_port(thread, TRUE);
857 }
858}
859
860/*
861 * Routine: ipc_thread_terminate
862 * Purpose:
863 * Clean up and destroy a thread's IPC state.
864 * Conditions:
865 * Nothing locked.
866 */
867
868void
869ipc_thread_terminate(
870 thread_t thread)
871{
872 ipc_port_t kport = IP_NULL;
873 ipc_port_t iport = IP_NULL;
874 ipc_port_t rdport = IP_NULL;
875 ipc_port_t ith_rpc_reply = IP_NULL;
876 ipc_port_t pport = IP_NULL;
877
878 thread_mtx_lock(thread);
879
880 /*
881 * If we ever failed to clear ipc_active before the last reference
882 * was dropped, lazy ports might be made and used after the last
883 * reference is dropped and cause use after free (see comment in
884 * ipc_thread_disable()).
885 */
886 assert(!thread->ipc_active);
887
888 kport = thread->ith_thread_ports[THREAD_FLAVOR_CONTROL];
889 iport = thread->ith_thread_ports[THREAD_FLAVOR_INSPECT];
890 rdport = thread->ith_thread_ports[THREAD_FLAVOR_READ];
891 pport = thread->ith_self;
892
893 if (kport != IP_NULL) {
894 if (IP_VALID(thread->ith_settable_self)) {
895 ipc_port_release_send(thread->ith_settable_self);
896 }
897
898 thread->ith_thread_ports[THREAD_FLAVOR_CONTROL] = IP_NULL;
899 thread->ith_thread_ports[THREAD_FLAVOR_READ] = IP_NULL;
900 thread->ith_thread_ports[THREAD_FLAVOR_INSPECT] = IP_NULL;
901 thread->ith_settable_self = IP_NULL;
902 thread->ith_self = IP_NULL;
903
904 if (thread->exc_actions != NULL) {
905 for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
906 if (IP_VALID(thread->exc_actions[i].port)) {
907 ipc_port_release_send(thread->exc_actions[i].port);
908 }
909 }
910 ipc_thread_destroy_exc_actions(thread);
911 }
912 }
913
914#if IMPORTANCE_INHERITANCE
915 assert(thread->ith_assertions == 0);
916#endif
917
918 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
919 ith_rpc_reply = thread->ith_rpc_reply;
920 thread->ith_rpc_reply = IP_NULL;
921
922 thread_mtx_unlock(thread);
923
924 if (pport != kport && pport != IP_NULL) {
925 /* this thread has immovable contorl port */
926 ip_lock(kport);
927 kport->ip_alt_port = IP_NULL;
928 ipc_kobject_set_atomically(kport, IKO_NULL, IKOT_NONE);
929 ip_unlock(kport);
930 ipc_port_dealloc_kernel(pport);
931 }
932 if (kport != IP_NULL) {
933 ipc_port_dealloc_kernel(kport);
934 }
935 if (iport != IP_NULL) {
936 ipc_port_dealloc_kernel(iport);
937 }
938 if (rdport != IP_NULL) {
939 ipc_port_dealloc_kernel(rdport);
940 }
941 if (ith_rpc_reply != IP_NULL) {
942 ipc_port_dealloc_reply(ith_rpc_reply);
943 }
944}
945
946/*
947 * Routine: ipc_thread_reset
948 * Purpose:
949 * Reset the IPC state for a given Mach thread when
950 * its task enters an elevated security context.
951 * All flavors of thread port and its exception ports have
952 * to be reset. Its RPC reply port cannot have any
953 * rights outstanding, so it should be fine. The thread
954 * inspect and read port are set to NULL.
955 * Conditions:
956 * Nothing locked.
957 */
958
959void
960ipc_thread_reset(
961 thread_t thread)
962{
963 ipc_port_t old_kport, new_kport, old_pport, new_pport;
964 ipc_port_t old_sself;
965 ipc_port_t old_rdport;
966 ipc_port_t old_iport;
967 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
968 boolean_t has_old_exc_actions = FALSE;
969 boolean_t thread_is_immovable, thread_is_pinned;
970 int i;
971
972#if CONFIG_MACF
973 struct label *new_label = mac_exc_create_label();
974#endif
975
976 thread_is_immovable = thread->ith_self->ip_immovable_send;
977 thread_is_pinned = thread->ith_self->ip_pinned;
978
979 if (thread_is_immovable) {
980 ipc_kobject_alloc_options_t alloc_options = IPC_KOBJECT_ALLOC_NONE;
981
982 if (thread_is_pinned) {
983 assert(pinned_control_port_enabled);
984 alloc_options |= IPC_KOBJECT_ALLOC_PINNED;
985 }
986 if (thread_is_immovable) {
987 alloc_options |= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
988 }
989 new_pport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
990 IKOT_THREAD_CONTROL, alloc_options);
991
992 new_kport = ipc_kobject_alloc_labeled_port((ipc_kobject_t)thread,
993 IKOT_THREAD_CONTROL, IPC_LABEL_SUBST_THREAD,
994 IPC_KOBJECT_ALLOC_NONE);
995 new_kport->ip_alt_port = new_pport;
996 } else {
997 new_kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
998 IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
999
1000 new_pport = new_kport;
1001 }
1002
1003 thread_mtx_lock(thread);
1004
1005 old_kport = thread->ith_thread_ports[THREAD_FLAVOR_CONTROL];
1006 old_rdport = thread->ith_thread_ports[THREAD_FLAVOR_READ];
1007 old_iport = thread->ith_thread_ports[THREAD_FLAVOR_INSPECT];
1008
1009 old_sself = thread->ith_settable_self;
1010 old_pport = thread->ith_self;
1011
1012 if (old_kport == IP_NULL && thread->inspection == FALSE) {
1013 /* thread is already terminated (can this happen?) */
1014 thread_mtx_unlock(thread);
1015 ipc_port_dealloc_kernel(new_kport);
1016 if (thread_is_immovable) {
1017 ipc_port_dealloc_kernel(new_pport);
1018 }
1019#if CONFIG_MACF
1020 mac_exc_free_label(new_label);
1021#endif
1022 return;
1023 }
1024
1025 thread->ipc_active = true;
1026 thread->ith_thread_ports[THREAD_FLAVOR_CONTROL] = new_kport;
1027 thread->ith_self = new_pport;
1028 thread->ith_settable_self = ipc_port_make_send(new_kport);
1029 thread->ith_thread_ports[THREAD_FLAVOR_INSPECT] = IP_NULL;
1030 thread->ith_thread_ports[THREAD_FLAVOR_READ] = IP_NULL;
1031
1032 if (old_kport != IP_NULL) {
1033 ip_lock(old_kport);
1034 old_kport->ip_alt_port = IP_NULL;
1035 ipc_kobject_set_atomically(old_kport, IKO_NULL, IKOT_NONE);
1036 ip_unlock(old_kport);
1037 }
1038 if (old_rdport != IP_NULL) {
1039 ipc_kobject_set(old_rdport, IKO_NULL, IKOT_NONE);
1040 }
1041 if (old_iport != IP_NULL) {
1042 ipc_kobject_set(old_iport, IKO_NULL, IKOT_NONE);
1043 }
1044 if (thread_is_immovable && old_pport != IP_NULL) {
1045 ipc_kobject_set(old_pport, IKO_NULL, IKOT_NONE);
1046 }
1047
1048 /*
1049 * Only ports that were set by root-owned processes
1050 * (privileged ports) should survive
1051 */
1052 if (thread->exc_actions != NULL) {
1053 has_old_exc_actions = TRUE;
1054 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1055 if (thread->exc_actions[i].privileged) {
1056 old_exc_actions[i] = IP_NULL;
1057 } else {
1058#if CONFIG_MACF
1059 mac_exc_update_action_label(thread->exc_actions + i, new_label);
1060#endif
1061 old_exc_actions[i] = thread->exc_actions[i].port;
1062 thread->exc_actions[i].port = IP_NULL;
1063 }
1064 }
1065 }
1066
1067 thread_mtx_unlock(thread);
1068
1069#if CONFIG_MACF
1070 mac_exc_free_label(new_label);
1071#endif
1072
1073 /* release the naked send rights */
1074
1075 if (IP_VALID(old_sself)) {
1076 ipc_port_release_send(old_sself);
1077 }
1078
1079 if (has_old_exc_actions) {
1080 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1081 ipc_port_release_send(old_exc_actions[i]);
1082 }
1083 }
1084
1085 /* destroy the kernel port */
1086 if (old_kport != IP_NULL) {
1087 ipc_port_dealloc_kernel(old_kport);
1088 }
1089 if (old_rdport != IP_NULL) {
1090 ipc_port_dealloc_kernel(old_rdport);
1091 }
1092 if (old_iport != IP_NULL) {
1093 ipc_port_dealloc_kernel(old_iport);
1094 }
1095
1096 if (thread_is_immovable && old_pport != IP_NULL) {
1097 ipc_port_dealloc_kernel(old_pport);
1098 }
1099
1100 /* unbind the thread special reply port */
1101 if (IP_VALID(thread->ith_special_reply_port)) {
1102 ipc_port_unbind_special_reply_port(thread, TRUE);
1103 }
1104}
1105
1106/*
1107 * Routine: retrieve_task_self_fast
1108 * Purpose:
1109 * Optimized version of retrieve_task_self,
1110 * that only works for the current task.
1111 *
1112 * Return a send right (possibly null/dead)
1113 * for the task's user-visible self port.
1114 * Conditions:
1115 * Nothing locked.
1116 */
1117
1118ipc_port_t
1119retrieve_task_self_fast(
1120 task_t task)
1121{
1122 ipc_port_t port = IP_NULL;
1123
1124 assert(task == current_task());
1125
1126 itk_lock(task);
1127 assert(task->itk_self != IP_NULL);
1128
1129 if (task->itk_settable_self == task->itk_task_ports[TASK_FLAVOR_CONTROL]) {
1130 /* no interposing, return the IMMOVABLE port */
1131 port = ipc_port_make_send(task->itk_self);
1132 if (immovable_control_port_enabled) {
1133 assert(port->ip_immovable_send == 1);
1134 if (pinned_control_port_enabled) {
1135 /* pinned port is also immovable */
1136 assert(port->ip_pinned == 1);
1137 }
1138 }
1139 } else {
1140 port = ipc_port_copy_send(task->itk_settable_self);
1141 }
1142 itk_unlock(task);
1143
1144 return port;
1145}
1146
1147/*
1148 * Routine: mach_task_is_self
1149 * Purpose:
1150 * [MIG call] Checks if the task (control/read/inspect/name/movable)
1151 * port is pointing to current_task.
1152 */
1153kern_return_t
1154mach_task_is_self(
1155 task_t task,
1156 boolean_t *is_self)
1157{
1158 if (task == TASK_NULL) {
1159 return KERN_INVALID_ARGUMENT;
1160 }
1161
1162 *is_self = (task == current_task());
1163
1164 return KERN_SUCCESS;
1165}
1166
1167/*
1168 * Routine: retrieve_thread_self_fast
1169 * Purpose:
1170 * Return a send right (possibly null/dead)
1171 * for the thread's user-visible self port.
1172 *
1173 * Only works for the current thread.
1174 *
1175 * Conditions:
1176 * Nothing locked.
1177 */
1178
1179ipc_port_t
1180retrieve_thread_self_fast(
1181 thread_t thread)
1182{
1183 ipc_port_t port = IP_NULL;
1184
1185 assert(thread == current_thread());
1186
1187 thread_mtx_lock(thread);
1188
1189 assert(thread->ith_self != IP_NULL);
1190
1191 if (thread->ith_settable_self == thread->ith_thread_ports[THREAD_FLAVOR_CONTROL]) {
1192 /* no interposing, return IMMOVABLE_PORT */
1193 port = ipc_port_make_send(thread->ith_self);
1194 } else {
1195 port = ipc_port_copy_send(thread->ith_settable_self);
1196 }
1197
1198 thread_mtx_unlock(thread);
1199
1200 return port;
1201}
1202
1203/*
1204 * Routine: task_self_trap [mach trap]
1205 * Purpose:
1206 * Give the caller send rights for his own task port.
1207 * Conditions:
1208 * Nothing locked.
1209 * Returns:
1210 * MACH_PORT_NULL if there are any resource failures
1211 * or other errors.
1212 */
1213
1214mach_port_name_t
1215task_self_trap(
1216 __unused struct task_self_trap_args *args)
1217{
1218 task_t task = current_task();
1219 ipc_port_t sright;
1220 mach_port_name_t name;
1221
1222 sright = retrieve_task_self_fast(task);
1223 name = ipc_port_copyout_send(sright, task->itk_space);
1224 return name;
1225}
1226
1227/*
1228 * Routine: thread_self_trap [mach trap]
1229 * Purpose:
1230 * Give the caller send rights for his own thread port.
1231 * Conditions:
1232 * Nothing locked.
1233 * Returns:
1234 * MACH_PORT_NULL if there are any resource failures
1235 * or other errors.
1236 */
1237
1238mach_port_name_t
1239thread_self_trap(
1240 __unused struct thread_self_trap_args *args)
1241{
1242 thread_t thread = current_thread();
1243 task_t task = thread->task;
1244 ipc_port_t sright;
1245 mach_port_name_t name;
1246
1247 sright = retrieve_thread_self_fast(thread);
1248 name = ipc_port_copyout_send(sright, task->itk_space);
1249 return name;
1250}
1251
1252/*
1253 * Routine: mach_reply_port [mach trap]
1254 * Purpose:
1255 * Allocate a port for the caller.
1256 * Conditions:
1257 * Nothing locked.
1258 * Returns:
1259 * MACH_PORT_NULL if there are any resource failures
1260 * or other errors.
1261 */
1262
1263mach_port_name_t
1264mach_reply_port(
1265 __unused struct mach_reply_port_args *args)
1266{
1267 ipc_port_t port;
1268 mach_port_name_t name;
1269 kern_return_t kr;
1270
1271 kr = ipc_port_alloc(current_task()->itk_space, IPC_PORT_INIT_MESSAGE_QUEUE,
1272 &name, &port);
1273 if (kr == KERN_SUCCESS) {
1274 ip_unlock(port);
1275 } else {
1276 name = MACH_PORT_NULL;
1277 }
1278 return name;
1279}
1280
1281/*
1282 * Routine: thread_get_special_reply_port [mach trap]
1283 * Purpose:
1284 * Allocate a special reply port for the calling thread.
1285 * Conditions:
1286 * Nothing locked.
1287 * Returns:
1288 * mach_port_name_t: send right & receive right for special reply port.
1289 * MACH_PORT_NULL if there are any resource failures
1290 * or other errors.
1291 */
1292
1293mach_port_name_t
1294thread_get_special_reply_port(
1295 __unused struct thread_get_special_reply_port_args *args)
1296{
1297 ipc_port_t port;
1298 mach_port_name_t name;
1299 kern_return_t kr;
1300 thread_t thread = current_thread();
1301 ipc_port_init_flags_t flags = IPC_PORT_INIT_MESSAGE_QUEUE |
1302 IPC_PORT_INIT_MAKE_SEND_RIGHT | IPC_PORT_INIT_SPECIAL_REPLY;
1303
1304 /* unbind the thread special reply port */
1305 if (IP_VALID(thread->ith_special_reply_port)) {
1306 kr = ipc_port_unbind_special_reply_port(thread, TRUE);
1307 if (kr != KERN_SUCCESS) {
1308 return MACH_PORT_NULL;
1309 }
1310 }
1311
1312 kr = ipc_port_alloc(current_task()->itk_space, flags, &name, &port);
1313 if (kr == KERN_SUCCESS) {
1314 ipc_port_bind_special_reply_port_locked(port);
1315 ip_unlock(port);
1316 } else {
1317 name = MACH_PORT_NULL;
1318 }
1319 return name;
1320}
1321
1322/*
1323 * Routine: ipc_port_bind_special_reply_port_locked
1324 * Purpose:
1325 * Bind the given port to current thread as a special reply port.
1326 * Conditions:
1327 * Port locked.
1328 * Returns:
1329 * None.
1330 */
1331
1332static void
1333ipc_port_bind_special_reply_port_locked(
1334 ipc_port_t port)
1335{
1336 thread_t thread = current_thread();
1337 assert(thread->ith_special_reply_port == NULL);
1338 assert(port->ip_specialreply);
1339 assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1340
1341 ip_reference(port);
1342 thread->ith_special_reply_port = port;
1343 port->ip_messages.imq_srp_owner_thread = thread;
1344
1345 ipc_special_reply_port_bits_reset(port);
1346}
1347
1348/*
1349 * Routine: ipc_port_unbind_special_reply_port
1350 * Purpose:
1351 * Unbind the thread's special reply port.
1352 * If the special port has threads waiting on turnstile,
1353 * update it's inheritor.
1354 * Condition:
1355 * Nothing locked.
1356 * Returns:
1357 * None.
1358 */
1359static kern_return_t
1360ipc_port_unbind_special_reply_port(
1361 thread_t thread,
1362 boolean_t unbind_active_port)
1363{
1364 ipc_port_t special_reply_port = thread->ith_special_reply_port;
1365
1366 ip_lock(special_reply_port);
1367
1368 /* Return error if port active and unbind_active_port set to FALSE */
1369 if (unbind_active_port == FALSE && ip_active(special_reply_port)) {
1370 ip_unlock(special_reply_port);
1371 return KERN_FAILURE;
1372 }
1373
1374 thread->ith_special_reply_port = NULL;
1375 ipc_port_adjust_special_reply_port_locked(special_reply_port, NULL,
1376 IPC_PORT_ADJUST_UNLINK_THREAD, FALSE);
1377 /* port unlocked */
1378
1379 ip_release(special_reply_port);
1380 return KERN_SUCCESS;
1381}
1382
1383/*
1384 * Routine: thread_get_special_port [kernel call]
1385 * Purpose:
1386 * Clones a send right for one of the thread's
1387 * special ports.
1388 * Conditions:
1389 * Nothing locked.
1390 * Returns:
1391 * KERN_SUCCESS Extracted a send right.
1392 * KERN_INVALID_ARGUMENT The thread is null.
1393 * KERN_FAILURE The thread is dead.
1394 * KERN_INVALID_ARGUMENT Invalid special port.
1395 */
1396
1397kern_return_t
1398thread_get_special_port(
1399 thread_inspect_t thread,
1400 int which,
1401 ipc_port_t *portp);
1402
1403static kern_return_t
1404thread_get_special_port_internal(
1405 thread_inspect_t thread,
1406 int which,
1407 ipc_port_t *portp,
1408 mach_thread_flavor_t flavor)
1409{
1410 kern_return_t kr;
1411 ipc_port_t port;
1412
1413 if (thread == THREAD_NULL) {
1414 return KERN_INVALID_ARGUMENT;
1415 }
1416
1417 if ((kr = port_allowed_with_thread_flavor(which, flavor)) != KERN_SUCCESS) {
1418 return kr;
1419 }
1420
1421 thread_mtx_lock(thread);
1422 if (!thread->active) {
1423 thread_mtx_unlock(thread);
1424 return KERN_FAILURE;
1425 }
1426
1427 switch (which) {
1428 case THREAD_KERNEL_PORT:
1429 port = ipc_port_copy_send(thread->ith_settable_self);
1430 thread_mtx_unlock(thread);
1431 break;
1432
1433 case THREAD_READ_PORT:
1434 case THREAD_INSPECT_PORT:
1435 thread_mtx_unlock(thread);
1436 mach_thread_flavor_t current_flavor = (which == THREAD_READ_PORT) ?
1437 THREAD_FLAVOR_READ : THREAD_FLAVOR_INSPECT;
1438 /* convert_thread_to_port_with_flavor consumes a thread reference */
1439 thread_reference(thread);
1440 port = convert_thread_to_port_with_flavor(thread, current_flavor);
1441 break;
1442
1443 default:
1444 thread_mtx_unlock(thread);
1445 return KERN_INVALID_ARGUMENT;
1446 }
1447
1448 *portp = port;
1449 return KERN_SUCCESS;
1450}
1451
1452kern_return_t
1453thread_get_special_port(
1454 thread_inspect_t thread,
1455 int which,
1456 ipc_port_t *portp)
1457{
1458 return thread_get_special_port_internal(thread, which, portp, THREAD_FLAVOR_CONTROL);
1459}
1460
1461static ipc_port_t
1462thread_get_non_substituted_self(thread_t thread)
1463{
1464 ipc_port_t port = IP_NULL;
1465
1466 thread_mtx_lock(thread);
1467 port = thread->ith_settable_self;
1468 if (IP_VALID(port)) {
1469 ip_reference(port);
1470 }
1471 thread_mtx_unlock(thread);
1472
1473 if (IP_VALID(port)) {
1474 /* consumes the port reference */
1475 return ipc_kobject_alloc_subst_once(port);
1476 }
1477
1478 return port;
1479}
1480
1481kern_return_t
1482thread_get_special_port_from_user(
1483 mach_port_t port,
1484 int which,
1485 ipc_port_t *portp)
1486{
1487 ipc_kobject_type_t kotype;
1488 mach_thread_flavor_t flavor;
1489 kern_return_t kr = KERN_SUCCESS;
1490
1491 thread_t thread = convert_port_to_thread_check_type(port, &kotype,
1492 THREAD_FLAVOR_INSPECT, FALSE);
1493
1494 if (thread == THREAD_NULL) {
1495 return KERN_INVALID_ARGUMENT;
1496 }
1497
1498 if (which == THREAD_KERNEL_PORT && thread->task == current_task()) {
1499#if CONFIG_MACF
1500 /*
1501 * only check for threads belong to current_task,
1502 * because foreign thread ports are always movable
1503 */
1504 if (mac_task_check_get_movable_control_port()) {
1505 kr = KERN_DENIED;
1506 goto out;
1507 }
1508#endif
1509 if (kotype == IKOT_THREAD_CONTROL) {
1510 *portp = thread_get_non_substituted_self(thread);
1511 goto out;
1512 }
1513 }
1514
1515 switch (kotype) {
1516 case IKOT_THREAD_CONTROL:
1517 flavor = THREAD_FLAVOR_CONTROL;
1518 break;
1519 case IKOT_THREAD_READ:
1520 flavor = THREAD_FLAVOR_READ;
1521 break;
1522 case IKOT_THREAD_INSPECT:
1523 flavor = THREAD_FLAVOR_INSPECT;
1524 break;
1525 default:
1526 panic("strange kobject type");
1527 }
1528
1529 kr = thread_get_special_port_internal(thread, which, portp, flavor);
1530out:
1531 thread_deallocate(thread);
1532 return kr;
1533}
1534
1535static kern_return_t
1536port_allowed_with_thread_flavor(
1537 int which,
1538 mach_thread_flavor_t flavor)
1539{
1540 switch (flavor) {
1541 case THREAD_FLAVOR_CONTROL:
1542 return KERN_SUCCESS;
1543
1544 case THREAD_FLAVOR_READ:
1545
1546 switch (which) {
1547 case THREAD_READ_PORT:
1548 case THREAD_INSPECT_PORT:
1549 return KERN_SUCCESS;
1550 default:
1551 return KERN_INVALID_CAPABILITY;
1552 }
1553
1554 case THREAD_FLAVOR_INSPECT:
1555
1556 switch (which) {
1557 case THREAD_INSPECT_PORT:
1558 return KERN_SUCCESS;
1559 default:
1560 return KERN_INVALID_CAPABILITY;
1561 }
1562
1563 default:
1564 return KERN_INVALID_CAPABILITY;
1565 }
1566}
1567
1568/*
1569 * Routine: thread_set_special_port [kernel call]
1570 * Purpose:
1571 * Changes one of the thread's special ports,
1572 * setting it to the supplied send right.
1573 * Conditions:
1574 * Nothing locked. If successful, consumes
1575 * the supplied send right.
1576 * Returns:
1577 * KERN_SUCCESS Changed the special port.
1578 * KERN_INVALID_ARGUMENT The thread is null.
1579 * KERN_INVALID_RIGHT Port is marked as immovable.
1580 * KERN_FAILURE The thread is dead.
1581 * KERN_INVALID_ARGUMENT Invalid special port.
1582 * KERN_NO_ACCESS Restricted access to set port.
1583 */
1584
1585kern_return_t
1586thread_set_special_port(
1587 thread_t thread,
1588 int which,
1589 ipc_port_t port)
1590{
1591 kern_return_t result = KERN_SUCCESS;
1592 ipc_port_t *whichp, old = IP_NULL;
1593
1594 if (thread == THREAD_NULL) {
1595 return KERN_INVALID_ARGUMENT;
1596 }
1597
1598 if (IP_VALID(port) && (port->ip_immovable_receive || port->ip_immovable_send)) {
1599 return KERN_INVALID_RIGHT;
1600 }
1601
1602 switch (which) {
1603 case THREAD_KERNEL_PORT:
1604#if CONFIG_CSR
1605 if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) != 0) {
1606 /*
1607 * Only allow setting of thread-self
1608 * special port from user-space when SIP is
1609 * disabled (for Mach-on-Mach emulation).
1610 */
1611 return KERN_NO_ACCESS;
1612 }
1613#endif
1614 whichp = &thread->ith_settable_self;
1615 break;
1616
1617 default:
1618 return KERN_INVALID_ARGUMENT;
1619 }
1620
1621 thread_mtx_lock(thread);
1622
1623 if (thread->active) {
1624 old = *whichp;
1625 *whichp = port;
1626 } else {
1627 result = KERN_FAILURE;
1628 }
1629
1630 thread_mtx_unlock(thread);
1631
1632 if (IP_VALID(old)) {
1633 ipc_port_release_send(old);
1634 }
1635
1636 return result;
1637}
1638
1639/*
1640 * Routine: task_get_special_port [kernel call]
1641 * Purpose:
1642 * Clones a send right for one of the task's
1643 * special ports.
1644 * Conditions:
1645 * Nothing locked.
1646 * Returns:
1647 * KERN_SUCCESS Extracted a send right.
1648 * KERN_INVALID_ARGUMENT The task is null.
1649 * KERN_FAILURE The task/space is dead.
1650 * KERN_INVALID_ARGUMENT Invalid special port.
1651 */
1652
1653kern_return_t
1654task_get_special_port(
1655 task_t task,
1656 int which,
1657 ipc_port_t *portp);
1658
1659static kern_return_t
1660task_get_special_port_internal(
1661 task_t task,
1662 int which,
1663 ipc_port_t *portp,
1664 mach_task_flavor_t flavor)
1665{
1666 kern_return_t kr;
1667 ipc_port_t port;
1668
1669 if (task == TASK_NULL) {
1670 return KERN_INVALID_ARGUMENT;
1671 }
1672
1673 if ((kr = port_allowed_with_task_flavor(which, flavor)) != KERN_SUCCESS) {
1674 return kr;
1675 }
1676
1677 itk_lock(task);
1678 if (!task->ipc_active) {
1679 itk_unlock(task);
1680 return KERN_FAILURE;
1681 }
1682
1683 switch (which) {
1684 case TASK_KERNEL_PORT:
1685 port = ipc_port_copy_send(task->itk_settable_self);
1686 itk_unlock(task);
1687 break;
1688
1689 case TASK_READ_PORT:
1690 case TASK_INSPECT_PORT:
1691 itk_unlock(task);
1692 mach_task_flavor_t current_flavor = (which == TASK_READ_PORT) ?
1693 TASK_FLAVOR_READ : TASK_FLAVOR_INSPECT;
1694 /* convert_task_to_port_with_flavor consumes a task reference */
1695 task_reference(task);
1696 port = convert_task_to_port_with_flavor(task, current_flavor);
1697 break;
1698
1699 case TASK_NAME_PORT:
1700 port = ipc_port_make_send(task->itk_task_ports[TASK_FLAVOR_NAME]);
1701 itk_unlock(task);
1702 break;
1703
1704 case TASK_HOST_PORT:
1705 port = ipc_port_copy_send(task->itk_host);
1706 itk_unlock(task);
1707 break;
1708
1709 case TASK_BOOTSTRAP_PORT:
1710 port = ipc_port_copy_send(task->itk_bootstrap);
1711 itk_unlock(task);
1712 break;
1713
1714 case TASK_SEATBELT_PORT:
1715 port = ipc_port_copy_send(task->itk_seatbelt);
1716 itk_unlock(task);
1717 break;
1718
1719 case TASK_ACCESS_PORT:
1720 port = ipc_port_copy_send(task->itk_task_access);
1721 itk_unlock(task);
1722 break;
1723
1724 case TASK_DEBUG_CONTROL_PORT:
1725 port = ipc_port_copy_send(task->itk_debug_control);
1726 itk_unlock(task);
1727 break;
1728
1729 default:
1730 itk_unlock(task);
1731 return KERN_INVALID_ARGUMENT;
1732 }
1733
1734 *portp = port;
1735 return KERN_SUCCESS;
1736}
1737
1738kern_return_t
1739task_get_special_port(
1740 task_t task,
1741 int which,
1742 ipc_port_t *portp)
1743{
1744 return task_get_special_port_internal(task, which, portp, TASK_FLAVOR_CONTROL);
1745}
1746
1747static ipc_port_t
1748task_get_non_substituted_self(task_t task)
1749{
1750 ipc_port_t port = IP_NULL;
1751
1752 itk_lock(task);
1753 port = task->itk_settable_self;
1754 if (IP_VALID(port)) {
1755 ip_reference(port);
1756 }
1757 itk_unlock(task);
1758
1759 if (IP_VALID(port)) {
1760 /* consumes the port reference */
1761 return ipc_kobject_alloc_subst_once(port);
1762 }
1763
1764 return port;
1765}
1766kern_return_t
1767task_get_special_port_from_user(
1768 mach_port_t port,
1769 int which,
1770 ipc_port_t *portp)
1771{
1772 ipc_kobject_type_t kotype;
1773 mach_task_flavor_t flavor;
1774 kern_return_t kr = KERN_SUCCESS;
1775
1776 task_t task = convert_port_to_task_check_type(port, &kotype,
1777 TASK_FLAVOR_INSPECT, FALSE);
1778
1779 if (task == TASK_NULL) {
1780 return KERN_INVALID_ARGUMENT;
1781 }
1782
1783 if (which == TASK_KERNEL_PORT && task == current_task()) {
1784#if CONFIG_MACF
1785 /*
1786 * only check for current_task,
1787 * because foreign task ports are always movable
1788 */
1789 if (mac_task_check_get_movable_control_port()) {
1790 kr = KERN_DENIED;
1791 goto out;
1792 }
1793#endif
1794 if (kotype == IKOT_TASK_CONTROL) {
1795 *portp = task_get_non_substituted_self(task);
1796 goto out;
1797 }
1798 }
1799
1800 switch (kotype) {
1801 case IKOT_TASK_CONTROL:
1802 flavor = TASK_FLAVOR_CONTROL;
1803 break;
1804 case IKOT_TASK_READ:
1805 flavor = TASK_FLAVOR_READ;
1806 break;
1807 case IKOT_TASK_INSPECT:
1808 flavor = TASK_FLAVOR_INSPECT;
1809 break;
1810 default:
1811 panic("strange kobject type");
1812 }
1813
1814 kr = task_get_special_port_internal(task, which, portp, flavor);
1815out:
1816 task_deallocate(task);
1817 return kr;
1818}
1819
1820static kern_return_t
1821port_allowed_with_task_flavor(
1822 int which,
1823 mach_task_flavor_t flavor)
1824{
1825 switch (flavor) {
1826 case TASK_FLAVOR_CONTROL:
1827 return KERN_SUCCESS;
1828
1829 case TASK_FLAVOR_READ:
1830
1831 switch (which) {
1832 case TASK_READ_PORT:
1833 case TASK_INSPECT_PORT:
1834 case TASK_NAME_PORT:
1835 return KERN_SUCCESS;
1836 default:
1837 return KERN_INVALID_CAPABILITY;
1838 }
1839
1840 case TASK_FLAVOR_INSPECT:
1841
1842 switch (which) {
1843 case TASK_INSPECT_PORT:
1844 case TASK_NAME_PORT:
1845 return KERN_SUCCESS;
1846 default:
1847 return KERN_INVALID_CAPABILITY;
1848 }
1849
1850 default:
1851 return KERN_INVALID_CAPABILITY;
1852 }
1853}
1854
1855/*
1856 * Routine: task_set_special_port [kernel call]
1857 * Purpose:
1858 * Changes one of the task's special ports,
1859 * setting it to the supplied send right.
1860 * Conditions:
1861 * Nothing locked. If successful, consumes
1862 * the supplied send right.
1863 * Returns:
1864 * KERN_SUCCESS Changed the special port.
1865 * KERN_INVALID_ARGUMENT The task is null.
1866 * KERN_INVALID_RIGHT Port is marked as immovable.
1867 * KERN_FAILURE The task/space is dead.
1868 * KERN_INVALID_ARGUMENT Invalid special port.
1869 * KERN_NO_ACCESS Restricted access to set port.
1870 */
1871
1872kern_return_t
1873task_set_special_port(
1874 task_t task,
1875 int which,
1876 ipc_port_t port)
1877{
1878 if (task == TASK_NULL) {
1879 return KERN_INVALID_ARGUMENT;
1880 }
1881
1882 if (task_is_driver(current_task())) {
1883 return KERN_NO_ACCESS;
1884 }
1885
1886 if (IP_VALID(port) && (port->ip_immovable_receive || port->ip_immovable_send)) {
1887 return KERN_INVALID_RIGHT;
1888 }
1889
1890 switch (which) {
1891 case TASK_KERNEL_PORT:
1892 case TASK_HOST_PORT:
1893#if CONFIG_CSR
1894 if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
1895 /*
1896 * Only allow setting of task-self / task-host
1897 * special ports from user-space when SIP is
1898 * disabled (for Mach-on-Mach emulation).
1899 */
1900 break;
1901 }
1902#endif
1903 return KERN_NO_ACCESS;
1904 default:
1905 break;
1906 }
1907
1908 return task_set_special_port_internal(task, which, port);
1909}
1910
1911/*
1912 * Routine: task_set_special_port_internal
1913 * Purpose:
1914 * Changes one of the task's special ports,
1915 * setting it to the supplied send right.
1916 * Conditions:
1917 * Nothing locked. If successful, consumes
1918 * the supplied send right.
1919 * Returns:
1920 * KERN_SUCCESS Changed the special port.
1921 * KERN_INVALID_ARGUMENT The task is null.
1922 * KERN_FAILURE The task/space is dead.
1923 * KERN_INVALID_ARGUMENT Invalid special port.
1924 * KERN_NO_ACCESS Restricted access to overwrite port.
1925 */
1926
1927kern_return_t
1928task_set_special_port_internal(
1929 task_t task,
1930 int which,
1931 ipc_port_t port)
1932{
1933 ipc_port_t old = IP_NULL;
1934 kern_return_t rc = KERN_INVALID_ARGUMENT;
1935
1936 if (task == TASK_NULL) {
1937 goto out;
1938 }
1939
1940 itk_lock(task);
1941 if (!task->ipc_active) {
1942 rc = KERN_FAILURE;
1943 goto out_unlock;
1944 }
1945
1946 switch (which) {
1947 case TASK_KERNEL_PORT:
1948 old = task->itk_settable_self;
1949 task->itk_settable_self = port;
1950 break;
1951
1952 case TASK_HOST_PORT:
1953 old = task->itk_host;
1954 task->itk_host = port;
1955 break;
1956
1957 case TASK_BOOTSTRAP_PORT:
1958 old = task->itk_bootstrap;
1959 task->itk_bootstrap = port;
1960 break;
1961
1962 /* Never allow overwrite of seatbelt port */
1963 case TASK_SEATBELT_PORT:
1964 if (IP_VALID(task->itk_seatbelt)) {
1965 rc = KERN_NO_ACCESS;
1966 goto out_unlock;
1967 }
1968 task->itk_seatbelt = port;
1969 break;
1970
1971 /* Never allow overwrite of the task access port */
1972 case TASK_ACCESS_PORT:
1973 if (IP_VALID(task->itk_task_access)) {
1974 rc = KERN_NO_ACCESS;
1975 goto out_unlock;
1976 }
1977 task->itk_task_access = port;
1978 break;
1979
1980 case TASK_DEBUG_CONTROL_PORT:
1981 old = task->itk_debug_control;
1982 task->itk_debug_control = port;
1983 break;
1984
1985 default:
1986 rc = KERN_INVALID_ARGUMENT;
1987 goto out_unlock;
1988 }/* switch */
1989
1990 rc = KERN_SUCCESS;
1991
1992out_unlock:
1993 itk_unlock(task);
1994
1995 if (IP_VALID(old)) {
1996 ipc_port_release_send(old);
1997 }
1998out:
1999 return rc;
2000}
2001/*
2002 * Routine: mach_ports_register [kernel call]
2003 * Purpose:
2004 * Stash a handful of port send rights in the task.
2005 * Child tasks will inherit these rights, but they
2006 * must use mach_ports_lookup to acquire them.
2007 *
2008 * The rights are supplied in a (wired) kalloc'd segment.
2009 * Rights which aren't supplied are assumed to be null.
2010 * Conditions:
2011 * Nothing locked. If successful, consumes
2012 * the supplied rights and memory.
2013 * Returns:
2014 * KERN_SUCCESS Stashed the port rights.
2015 * KERN_INVALID_RIGHT Port in array is marked immovable.
2016 * KERN_INVALID_ARGUMENT The task is null.
2017 * KERN_INVALID_ARGUMENT The task is dead.
2018 * KERN_INVALID_ARGUMENT The memory param is null.
2019 * KERN_INVALID_ARGUMENT Too many port rights supplied.
2020 */
2021
2022kern_return_t
2023mach_ports_register(
2024 task_t task,
2025 mach_port_array_t memory,
2026 mach_msg_type_number_t portsCnt)
2027{
2028 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
2029 unsigned int i;
2030
2031 if ((task == TASK_NULL) ||
2032 (portsCnt > TASK_PORT_REGISTER_MAX) ||
2033 (portsCnt && memory == NULL)) {
2034 return KERN_INVALID_ARGUMENT;
2035 }
2036
2037 /*
2038 * Pad the port rights with nulls.
2039 */
2040
2041 for (i = 0; i < portsCnt; i++) {
2042 ports[i] = memory[i];
2043 if (IP_VALID(ports[i]) && (ports[i]->ip_immovable_receive || ports[i]->ip_immovable_send)) {
2044 return KERN_INVALID_RIGHT;
2045 }
2046 }
2047 for (; i < TASK_PORT_REGISTER_MAX; i++) {
2048 ports[i] = IP_NULL;
2049 }
2050
2051 itk_lock(task);
2052 if (!task->ipc_active) {
2053 itk_unlock(task);
2054 return KERN_INVALID_ARGUMENT;
2055 }
2056
2057 /*
2058 * Replace the old send rights with the new.
2059 * Release the old rights after unlocking.
2060 */
2061
2062 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2063 ipc_port_t old;
2064
2065 old = task->itk_registered[i];
2066 task->itk_registered[i] = ports[i];
2067 ports[i] = old;
2068 }
2069
2070 itk_unlock(task);
2071
2072 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2073 if (IP_VALID(ports[i])) {
2074 ipc_port_release_send(ports[i]);
2075 }
2076 }
2077
2078 /*
2079 * Now that the operation is known to be successful,
2080 * we can free the memory.
2081 */
2082
2083 if (portsCnt != 0) {
2084 kfree(memory,
2085 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
2086 }
2087
2088 return KERN_SUCCESS;
2089}
2090
2091/*
2092 * Routine: mach_ports_lookup [kernel call]
2093 * Purpose:
2094 * Retrieves (clones) the stashed port send rights.
2095 * Conditions:
2096 * Nothing locked. If successful, the caller gets
2097 * rights and memory.
2098 * Returns:
2099 * KERN_SUCCESS Retrieved the send rights.
2100 * KERN_INVALID_ARGUMENT The task is null.
2101 * KERN_INVALID_ARGUMENT The task is dead.
2102 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
2103 */
2104
2105kern_return_t
2106mach_ports_lookup(
2107 task_t task,
2108 mach_port_array_t *portsp,
2109 mach_msg_type_number_t *portsCnt)
2110{
2111 void *memory;
2112 vm_size_t size;
2113 ipc_port_t *ports;
2114 int i;
2115
2116 if (task == TASK_NULL) {
2117 return KERN_INVALID_ARGUMENT;
2118 }
2119
2120 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
2121
2122 memory = kalloc(size);
2123 if (memory == 0) {
2124 return KERN_RESOURCE_SHORTAGE;
2125 }
2126
2127 itk_lock(task);
2128 if (!task->ipc_active) {
2129 itk_unlock(task);
2130
2131 kfree(memory, size);
2132 return KERN_INVALID_ARGUMENT;
2133 }
2134
2135 ports = (ipc_port_t *) memory;
2136
2137 /*
2138 * Clone port rights. Because kalloc'd memory
2139 * is wired, we won't fault while holding the task lock.
2140 */
2141
2142 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2143 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
2144 }
2145
2146 itk_unlock(task);
2147
2148 *portsp = (mach_port_array_t) ports;
2149 *portsCnt = TASK_PORT_REGISTER_MAX;
2150 return KERN_SUCCESS;
2151}
2152
2153kern_return_t
2154task_conversion_eval(task_t caller, task_t victim)
2155{
2156 /*
2157 * Tasks are allowed to resolve their own task ports, and the kernel is
2158 * allowed to resolve anyone's task port.
2159 */
2160 if (caller == kernel_task) {
2161 return KERN_SUCCESS;
2162 }
2163
2164 if (caller == victim) {
2165 return KERN_SUCCESS;
2166 }
2167
2168 /*
2169 * Only the kernel can can resolve the kernel's task port. We've established
2170 * by this point that the caller is not kernel_task.
2171 */
2172 if (victim == TASK_NULL || victim == kernel_task) {
2173 return KERN_INVALID_SECURITY;
2174 }
2175
2176 task_require(victim);
2177
2178#if !defined(XNU_TARGET_OS_OSX)
2179 /*
2180 * On platforms other than macOS, only a platform binary can resolve the task port
2181 * of another platform binary.
2182 */
2183 if ((victim->t_flags & TF_PLATFORM) && !(caller->t_flags & TF_PLATFORM)) {
2184#if SECURE_KERNEL
2185 return KERN_INVALID_SECURITY;
2186#else
2187 if (cs_relax_platform_task_ports) {
2188 return KERN_SUCCESS;
2189 } else {
2190 return KERN_INVALID_SECURITY;
2191 }
2192#endif /* SECURE_KERNEL */
2193 }
2194#endif /* !defined(XNU_TARGET_OS_OSX) */
2195
2196 return KERN_SUCCESS;
2197}
2198
2199/*
2200 * Routine: convert_port_to_locked_task
2201 * Purpose:
2202 * Internal helper routine to convert from a port to a locked
2203 * task. Used by several routines that try to convert from a
2204 * task port to a reference on some task related object.
2205 * Conditions:
2206 * Nothing locked, blocking OK.
2207 */
2208static task_t
2209convert_port_to_locked_task(ipc_port_t port, boolean_t eval)
2210{
2211 int try_failed_count = 0;
2212
2213 while (IP_VALID(port)) {
2214 task_t ct = current_task();
2215 task_t task;
2216
2217 ip_lock(port);
2218 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK_CONTROL)) {
2219 ip_unlock(port);
2220 return TASK_NULL;
2221 }
2222 task = (task_t) ip_get_kobject(port);
2223 assert(task != TASK_NULL);
2224
2225 if (eval && task_conversion_eval(ct, task)) {
2226 ip_unlock(port);
2227 return TASK_NULL;
2228 }
2229
2230 /*
2231 * Normal lock ordering puts task_lock() before ip_lock().
2232 * Attempt out-of-order locking here.
2233 */
2234 if (task_lock_try(task)) {
2235 ip_unlock(port);
2236 return task;
2237 }
2238 try_failed_count++;
2239
2240 ip_unlock(port);
2241 mutex_pause(try_failed_count);
2242 }
2243 return TASK_NULL;
2244}
2245
2246/*
2247 * Routine: convert_port_to_locked_task_inspect
2248 * Purpose:
2249 * Internal helper routine to convert from a port to a locked
2250 * task inspect right. Used by internal routines that try to convert from a
2251 * task inspect port to a reference on some task related object.
2252 * Conditions:
2253 * Nothing locked, blocking OK.
2254 */
2255static task_inspect_t
2256convert_port_to_locked_task_inspect(ipc_port_t port)
2257{
2258 int try_failed_count = 0;
2259
2260 while (IP_VALID(port)) {
2261 task_inspect_t task;
2262
2263 ip_lock(port);
2264 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK_CONTROL &&
2265 ip_kotype(port) != IKOT_TASK_READ &&
2266 ip_kotype(port) != IKOT_TASK_INSPECT)) {
2267 ip_unlock(port);
2268 return TASK_INSPECT_NULL;
2269 }
2270 task = (task_inspect_t) ip_get_kobject(port);
2271 assert(task != TASK_INSPECT_NULL);
2272 /*
2273 * Normal lock ordering puts task_lock() before ip_lock().
2274 * Attempt out-of-order locking here.
2275 */
2276 if (task_lock_try((task_t)task)) {
2277 ip_unlock(port);
2278 return task;
2279 }
2280 try_failed_count++;
2281
2282 ip_unlock(port);
2283 mutex_pause(try_failed_count);
2284 }
2285 return TASK_INSPECT_NULL;
2286}
2287
2288/*
2289 * Routine: convert_port_to_locked_task_read
2290 * Purpose:
2291 * Internal helper routine to convert from a port to a locked
2292 * task read right. Used by internal routines that try to convert from a
2293 * task read port to a reference on some task related object.
2294 * Conditions:
2295 * Nothing locked, blocking OK.
2296 */
2297static task_read_t
2298convert_port_to_locked_task_read(
2299 ipc_port_t port,
2300 boolean_t eval)
2301{
2302 int try_failed_count = 0;
2303
2304 while (IP_VALID(port)) {
2305 task_t ct = current_task();
2306 task_read_t task;
2307
2308 ip_lock(port);
2309 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK_CONTROL &&
2310 ip_kotype(port) != IKOT_TASK_READ)) {
2311 ip_unlock(port);
2312 return TASK_READ_NULL;
2313 }
2314 task = (task_read_t)ipc_kobject_get(port);
2315 assert(task != TASK_READ_NULL);
2316
2317 if (eval && task_conversion_eval(ct, task)) {
2318 ip_unlock(port);
2319 return TASK_READ_NULL;
2320 }
2321
2322 /*
2323 * Normal lock ordering puts task_lock() before ip_lock().
2324 * Attempt out-of-order locking here.
2325 */
2326 if (task_lock_try((task_t)task)) {
2327 ip_unlock(port);
2328 return task;
2329 }
2330 try_failed_count++;
2331
2332 ip_unlock(port);
2333 mutex_pause(try_failed_count);
2334 }
2335 return TASK_READ_NULL;
2336}
2337
2338static task_t
2339convert_port_to_task_locked(
2340 ipc_port_t port,
2341 uint32_t *exec_token,
2342 boolean_t eval)
2343{
2344 task_t task = TASK_NULL;
2345
2346 ip_lock_held(port);
2347 require_ip_active(port);
2348
2349 if (ip_kotype(port) == IKOT_TASK_CONTROL) {
2350 task = (task_t) ip_get_kobject(port);
2351 assert(task != TASK_NULL);
2352
2353 if (eval && task_conversion_eval(current_task(), task)) {
2354 return TASK_NULL;
2355 }
2356
2357 if (exec_token) {
2358 *exec_token = task->exec_token;
2359 }
2360
2361 task_reference_internal(task);
2362 }
2363
2364 return task;
2365}
2366
2367/*
2368 * Routine: convert_port_to_task_with_exec_token
2369 * Purpose:
2370 * Convert from a port to a task and return
2371 * the exec token stored in the task.
2372 * Doesn't consume the port ref; produces a task ref,
2373 * which may be null.
2374 * Conditions:
2375 * Nothing locked.
2376 */
2377task_t
2378convert_port_to_task_with_exec_token(
2379 ipc_port_t port,
2380 uint32_t *exec_token,
2381 boolean_t eval)
2382{
2383 task_t task = TASK_NULL;
2384
2385 if (IP_VALID(port)) {
2386 ip_lock(port);
2387 if (ip_active(port)) {
2388 task = convert_port_to_task_locked(port, exec_token, eval);
2389 }
2390 ip_unlock(port);
2391 }
2392
2393 return task;
2394}
2395
2396/*
2397 * Routine: convert_port_to_task
2398 * Purpose:
2399 * Convert from a port to a task.
2400 * Doesn't consume the port ref; produces a task ref,
2401 * which may be null.
2402 * Conditions:
2403 * Nothing locked.
2404 */
2405task_t
2406convert_port_to_task(
2407 ipc_port_t port)
2408{
2409 return convert_port_to_task_with_exec_token(port, NULL, TRUE);
2410}
2411
2412/*
2413 * Routine: convert_port_to_task_no_eval
2414 * Purpose:
2415 * Convert from a port to a task, skips task_conversion_eval.
2416 * Doesn't consume the port ref; produces a task ref,
2417 * which may be null.
2418 * Conditions:
2419 * Nothing locked.
2420 */
2421static task_t
2422convert_port_to_task_no_eval(
2423 ipc_port_t port)
2424{
2425 return convert_port_to_task_with_exec_token(port, NULL, FALSE);
2426}
2427
2428/*
2429 * Routine: convert_port_to_task_name
2430 * Purpose:
2431 * Convert from a port to a task name.
2432 * Doesn't consume the port ref; produces a task name ref,
2433 * which may be null.
2434 * Conditions:
2435 * Nothing locked.
2436 */
2437
2438static task_name_t
2439convert_port_to_task_name_locked(
2440 ipc_port_t port)
2441{
2442 task_name_t task = TASK_NAME_NULL;
2443
2444 ip_lock_held(port);
2445 require_ip_active(port);
2446
2447 if (ip_kotype(port) == IKOT_TASK_CONTROL ||
2448 ip_kotype(port) == IKOT_TASK_READ ||
2449 ip_kotype(port) == IKOT_TASK_INSPECT ||
2450 ip_kotype(port) == IKOT_TASK_NAME) {
2451 task = (task_name_t) ip_get_kobject(port);
2452 assert(task != TASK_NAME_NULL);
2453
2454 task_reference_internal(task);
2455 }
2456
2457 return task;
2458}
2459
2460task_name_t
2461convert_port_to_task_name(
2462 ipc_port_t port)
2463{
2464 task_name_t task = TASK_NULL;
2465
2466 if (IP_VALID(port)) {
2467 ip_lock(port);
2468 if (ip_active(port)) {
2469 task = convert_port_to_task_name_locked(port);
2470 }
2471 ip_unlock(port);
2472 }
2473
2474 return task;
2475}
2476
2477/*
2478 * Routine: convert_port_to_task_policy
2479 * Purpose:
2480 * Convert from a port to a task.
2481 * Doesn't consume the port ref; produces a task ref,
2482 * which may be null.
2483 * If the port is being used with task_port_set(), any task port
2484 * type other than TASK_CONTROL requires an entitlement. If the
2485 * port is being used with task_port_get(), TASK_NAME requires an
2486 * entitlement.
2487 * Conditions:
2488 * Nothing locked.
2489 */
2490static task_t
2491convert_port_to_task_policy(ipc_port_t port, boolean_t set)
2492{
2493 task_t task = TASK_NULL;
2494 task_t ctask = current_task();
2495
2496 if (!IP_VALID(port)) {
2497 return TASK_NULL;
2498 }
2499
2500 task = set ?
2501 convert_port_to_task(port) :
2502 convert_port_to_task_inspect(port);
2503
2504 if (task == TASK_NULL &&
2505 IOTaskHasEntitlement(ctask, "com.apple.private.task_policy")) {
2506 task = convert_port_to_task_name(port);
2507 }
2508
2509 if (task_conversion_eval(ctask, task) != KERN_SUCCESS) {
2510 task_deallocate(task);
2511 return TASK_NULL;
2512 }
2513
2514 return task;
2515}
2516
2517task_policy_set_t
2518convert_port_to_task_policy_set(ipc_port_t port)
2519{
2520 return convert_port_to_task_policy(port, true);
2521}
2522
2523task_policy_get_t
2524convert_port_to_task_policy_get(ipc_port_t port)
2525{
2526 return convert_port_to_task_policy(port, false);
2527}
2528
2529static task_inspect_t
2530convert_port_to_task_inspect_locked(
2531 ipc_port_t port)
2532{
2533 task_inspect_t task = TASK_INSPECT_NULL;
2534
2535 ip_lock_held(port);
2536 require_ip_active(port);
2537
2538 if (ip_kotype(port) == IKOT_TASK_CONTROL ||
2539 ip_kotype(port) == IKOT_TASK_READ ||
2540 ip_kotype(port) == IKOT_TASK_INSPECT) {
2541 task = (task_inspect_t) ip_get_kobject(port);
2542 assert(task != TASK_INSPECT_NULL);
2543
2544 task_reference_internal(task);
2545 }
2546
2547 return task;
2548}
2549
2550static task_read_t
2551convert_port_to_task_read_locked(
2552 ipc_port_t port,
2553 boolean_t eval)
2554{
2555 task_read_t task = TASK_READ_NULL;
2556
2557 ip_lock_held(port);
2558 require_ip_active(port);
2559
2560 if (ip_kotype(port) == IKOT_TASK_CONTROL ||
2561 ip_kotype(port) == IKOT_TASK_READ) {
2562 task_t ct = current_task();
2563 task = (task_read_t)ipc_kobject_get(port);
2564
2565 assert(task != TASK_READ_NULL);
2566
2567 if (eval && task_conversion_eval(ct, task)) {
2568 return TASK_READ_NULL;
2569 }
2570
2571 task_reference_internal(task);
2572 }
2573
2574 return task;
2575}
2576
2577/*
2578 * Routine: convert_port_to_task_check_type
2579 * Purpose:
2580 * Convert from a port to a task based on port's type.
2581 * Doesn't consume the port ref; produces a task ref,
2582 * which may be null.
2583 * Arguments:
2584 * port: The port that we do conversion on
2585 * kotype: Returns the IKOT_TYPE of the port, if translation succeeded
2586 * at_most: The lowest capability flavor allowed. In mach_task_flavor_t,
2587 * the higher the flavor number, the lesser the capability, hence the name.
2588 * eval_check: Whether to run task_conversion_eval check during the conversion.
2589 * For backward compatibility, some interfaces does not run conversion
2590 * eval on IKOT_TASK_CONTROL.
2591 * Conditions:
2592 * Nothing locked.
2593 * Returns:
2594 * task_t and port's type, if translation succeeded;
2595 * TASK_NULL and IKOT_NONE, if translation failed
2596 */
2597task_t
2598convert_port_to_task_check_type(
2599 ipc_port_t port,
2600 ipc_kobject_type_t *kotype,
2601 mach_task_flavor_t at_most,
2602 boolean_t eval_check)
2603{
2604 task_t task = TASK_NULL;
2605 ipc_kobject_type_t type = IKOT_NONE;
2606
2607 if (!IP_VALID(port) || !ip_active(port)) {
2608 goto out;
2609 }
2610
2611 switch (ip_kotype(port)) {
2612 case IKOT_TASK_CONTROL:
2613 task = eval_check ? convert_port_to_task(port) : convert_port_to_task_no_eval(port);
2614 if (task != TASK_NULL) {
2615 type = IKOT_TASK_CONTROL;
2616 }
2617 break;
2618 case IKOT_TASK_READ:
2619 if (at_most >= TASK_FLAVOR_READ) {
2620 task = eval_check ? convert_port_to_task_read(port) : convert_port_to_task_read_no_eval(port);
2621 if (task != TASK_READ_NULL) {
2622 type = IKOT_TASK_READ;
2623 }
2624 }
2625 break;
2626 case IKOT_TASK_INSPECT:
2627 if (at_most >= TASK_FLAVOR_INSPECT) {
2628 task = convert_port_to_task_inspect(port);
2629 if (task != TASK_INSPECT_NULL) {
2630 type = IKOT_TASK_INSPECT;
2631 }
2632 }
2633 break;
2634 case IKOT_TASK_NAME:
2635 if (at_most >= TASK_FLAVOR_NAME) {
2636 task = convert_port_to_task_name(port);
2637 if (task != TASK_NAME_NULL) {
2638 type = IKOT_TASK_NAME;
2639 }
2640 }
2641 break;
2642 default:
2643 break;
2644 }
2645
2646out:
2647 if (kotype) {
2648 *kotype = type;
2649 }
2650 return task;
2651}
2652
2653/*
2654 * Routine: convert_port_to_thread_check_type
2655 * Purpose:
2656 * Convert from a port to a thread based on port's type.
2657 * Doesn't consume the port ref; produces a thread ref,
2658 * which may be null.
2659 * This conversion routine is _ONLY_ supposed to be used
2660 * by thread_get_special_port.
2661 * Arguments:
2662 * port: The port that we do conversion on
2663 * kotype: Returns the IKOT_TYPE of the port, if translation succeeded
2664 * at_most: The lowest capability flavor allowed. In mach_thread_flavor_t,
2665 * the higher the flavor number, the lesser the capability, hence the name.
2666 * eval_check: Whether to run task_conversion_eval check during the conversion.
2667 * For backward compatibility, some interfaces do not run
2668 * conversion eval on IKOT_THREAD_CONTROL.
2669 * Conditions:
2670 * Nothing locked.
2671 * Returns:
2672 * thread_t and port's type, if translation succeeded;
2673 * THREAD_NULL and IKOT_NONE, if translation failed
2674 */
2675thread_t
2676convert_port_to_thread_check_type(
2677 ipc_port_t port,
2678 ipc_kobject_type_t *kotype,
2679 mach_thread_flavor_t at_most,
2680 boolean_t eval_check)
2681{
2682 thread_t thread = THREAD_NULL;
2683 ipc_kobject_type_t type = IKOT_NONE;
2684
2685 if (!IP_VALID(port) || !ip_active(port)) {
2686 goto out;
2687 }
2688
2689 switch (ip_kotype(port)) {
2690 case IKOT_THREAD_CONTROL:
2691 thread = eval_check ? convert_port_to_thread(port) : convert_port_to_thread_no_eval(port);
2692 if (thread != THREAD_NULL) {
2693 type = IKOT_THREAD_CONTROL;
2694 }
2695 break;
2696 case IKOT_THREAD_READ:
2697 if (at_most >= THREAD_FLAVOR_READ) {
2698 thread = eval_check ? convert_port_to_thread_read(port) : convert_port_to_thread_read_no_eval(port);
2699 if (thread != THREAD_READ_NULL) {
2700 type = IKOT_THREAD_READ;
2701 }
2702 }
2703 break;
2704 case IKOT_THREAD_INSPECT:
2705 if (at_most >= THREAD_FLAVOR_INSPECT) {
2706 thread = convert_port_to_thread_inspect(port);
2707 if (thread != THREAD_INSPECT_NULL) {
2708 type = IKOT_THREAD_INSPECT;
2709 }
2710 }
2711 break;
2712 default:
2713 break;
2714 }
2715
2716out:
2717 if (kotype) {
2718 *kotype = type;
2719 }
2720 return thread;
2721}
2722
2723/*
2724 * Routine: convert_port_to_space_check_type
2725 * Purpose:
2726 * Convert from a port to a space based on port's type.
2727 * Doesn't consume the port ref; produces a space ref,
2728 * which may be null.
2729 * Arguments:
2730 * port: The port that we do conversion on
2731 * kotype: Returns the IKOT_TYPE of the port, if translation succeeded
2732 * at_most: The lowest capability flavor allowed. In mach_task_flavor_t,
2733 * the higher the flavor number, the lesser the capability, hence the name.
2734 * eval_check: Whether to run task_conversion_eval check during the conversion.
2735 * For backward compatibility, some interfaces do not run
2736 * conversion eval on IKOT_TASK_CONTROL.
2737 * Conditions:
2738 * Nothing locked.
2739 * Returns:
2740 * ipc_space_t and port's type, if translation succeeded;
2741 * IPC_SPACE_NULL and IKOT_NONE, if translation failed
2742 */
2743ipc_space_t
2744convert_port_to_space_check_type(
2745 ipc_port_t port,
2746 ipc_kobject_type_t *kotype,
2747 mach_task_flavor_t at_most,
2748 boolean_t eval_check)
2749{
2750 ipc_space_t space = IPC_SPACE_NULL;
2751 ipc_kobject_type_t type = IKOT_NONE;
2752
2753 if (!IP_VALID(port) || !ip_active(port)) {
2754 goto out;
2755 }
2756
2757 switch (ip_kotype(port)) {
2758 case IKOT_TASK_CONTROL:
2759 space = eval_check ? convert_port_to_space(port) : convert_port_to_space_no_eval(port);
2760 if (space != IPC_SPACE_NULL) {
2761 type = IKOT_TASK_CONTROL;
2762 }
2763 break;
2764 case IKOT_TASK_READ:
2765 if (at_most >= TASK_FLAVOR_READ) {
2766 space = eval_check ? convert_port_to_space_read(port) : convert_port_to_space_read_no_eval(port);
2767 if (space != IPC_SPACE_READ_NULL) {
2768 type = IKOT_TASK_READ;
2769 }
2770 }
2771 break;
2772 case IKOT_TASK_INSPECT:
2773 if (at_most >= TASK_FLAVOR_INSPECT) {
2774 space = convert_port_to_space_inspect(port);
2775 if (space != IPC_SPACE_INSPECT_NULL) {
2776 type = IKOT_TASK_INSPECT;
2777 }
2778 }
2779 break;
2780 default:
2781 break;
2782 }
2783
2784out:
2785 if (kotype) {
2786 *kotype = type;
2787 }
2788 return space;
2789}
2790
2791/*
2792 * Routine: convert_port_to_task_inspect
2793 * Purpose:
2794 * Convert from a port to a task inspection right
2795 * Doesn't consume the port ref; produces a task ref,
2796 * which may be null.
2797 * Conditions:
2798 * Nothing locked.
2799 */
2800task_inspect_t
2801convert_port_to_task_inspect(
2802 ipc_port_t port)
2803{
2804 task_inspect_t task = TASK_INSPECT_NULL;
2805
2806 if (IP_VALID(port)) {
2807 ip_lock(port);
2808 if (ip_active(port)) {
2809 task = convert_port_to_task_inspect_locked(port);
2810 }
2811 ip_unlock(port);
2812 }
2813
2814 return task;
2815}
2816
2817/*
2818 * Routine: convert_port_to_task_read
2819 * Purpose:
2820 * Convert from a port to a task read right
2821 * Doesn't consume the port ref; produces a task ref,
2822 * which may be null.
2823 * Conditions:
2824 * Nothing locked.
2825 */
2826task_read_t
2827convert_port_to_task_read(
2828 ipc_port_t port)
2829{
2830 task_read_t task = TASK_READ_NULL;
2831
2832 if (IP_VALID(port)) {
2833 ip_lock(port);
2834 if (ip_active(port)) {
2835 task = convert_port_to_task_read_locked(port, TRUE);
2836 }
2837 ip_unlock(port);
2838 }
2839
2840 return task;
2841}
2842
2843static task_read_t
2844convert_port_to_task_read_no_eval(
2845 ipc_port_t port)
2846{
2847 task_read_t task = TASK_READ_NULL;
2848
2849 if (IP_VALID(port)) {
2850 ip_lock(port);
2851 if (ip_active(port)) {
2852 task = convert_port_to_task_read_locked(port, FALSE);
2853 }
2854 ip_unlock(port);
2855 }
2856
2857 return task;
2858}
2859
2860/*
2861 * Routine: convert_port_to_task_suspension_token
2862 * Purpose:
2863 * Convert from a port to a task suspension token.
2864 * Doesn't consume the port ref; produces a suspension token ref,
2865 * which may be null.
2866 * Conditions:
2867 * Nothing locked.
2868 */
2869task_suspension_token_t
2870convert_port_to_task_suspension_token(
2871 ipc_port_t port)
2872{
2873 task_suspension_token_t task = TASK_NULL;
2874
2875 if (IP_VALID(port)) {
2876 ip_lock(port);
2877
2878 if (ip_active(port) &&
2879 ip_kotype(port) == IKOT_TASK_RESUME) {
2880 task = (task_suspension_token_t) ip_get_kobject(port);
2881 assert(task != TASK_NULL);
2882
2883 task_reference_internal(task);
2884 }
2885
2886 ip_unlock(port);
2887 }
2888
2889 return task;
2890}
2891
2892/*
2893 * Routine: convert_port_to_space_with_flavor
2894 * Purpose:
2895 * Convert from a port to a space.
2896 * Doesn't consume the port ref; produces a space ref,
2897 * which may be null.
2898 * Conditions:
2899 * Nothing locked.
2900 */
2901static ipc_space_t
2902convert_port_to_space_with_flavor(
2903 ipc_port_t port,
2904 mach_task_flavor_t flavor,
2905 boolean_t eval)
2906{
2907 ipc_space_t space;
2908 task_t task;
2909
2910 switch (flavor) {
2911 case TASK_FLAVOR_CONTROL:
2912 task = convert_port_to_locked_task(port, eval);
2913 break;
2914 case TASK_FLAVOR_READ:
2915 task = convert_port_to_locked_task_read(port, eval);
2916 break;
2917 case TASK_FLAVOR_INSPECT:
2918 task = convert_port_to_locked_task_inspect(port);
2919 break;
2920 default:
2921 task = TASK_NULL;
2922 break;
2923 }
2924
2925 if (task == TASK_NULL) {
2926 return IPC_SPACE_NULL;
2927 }
2928
2929 if (!task->active) {
2930 task_unlock(task);
2931 return IPC_SPACE_NULL;
2932 }
2933
2934 space = task->itk_space;
2935 is_reference(space);
2936 task_unlock(task);
2937 return space;
2938}
2939
2940ipc_space_t
2941convert_port_to_space(
2942 ipc_port_t port)
2943{
2944 return convert_port_to_space_with_flavor(port, TASK_FLAVOR_CONTROL, TRUE);
2945}
2946
2947static ipc_space_t
2948convert_port_to_space_no_eval(
2949 ipc_port_t port)
2950{
2951 return convert_port_to_space_with_flavor(port, TASK_FLAVOR_CONTROL, FALSE);
2952}
2953
2954ipc_space_read_t
2955convert_port_to_space_read(
2956 ipc_port_t port)
2957{
2958 return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ, TRUE);
2959}
2960
2961static ipc_space_read_t
2962convert_port_to_space_read_no_eval(
2963 ipc_port_t port)
2964{
2965 return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ, FALSE);
2966}
2967
2968ipc_space_inspect_t
2969convert_port_to_space_inspect(
2970 ipc_port_t port)
2971{
2972 return convert_port_to_space_with_flavor(port, TASK_FLAVOR_INSPECT, TRUE);
2973}
2974
2975/*
2976 * Routine: convert_port_to_map_with_flavor
2977 * Purpose:
2978 * Convert from a port to a map.
2979 * Doesn't consume the port ref; produces a map ref,
2980 * which may be null.
2981 * Conditions:
2982 * Nothing locked.
2983 */
2984
2985static vm_map_t
2986convert_port_to_map_with_flavor(
2987 ipc_port_t port,
2988 mach_task_flavor_t flavor)
2989{
2990 task_t task;
2991 vm_map_t map;
2992
2993 switch (flavor) {
2994 case TASK_FLAVOR_CONTROL:
2995 task = convert_port_to_locked_task(port, TRUE); /* always eval */
2996 break;
2997 case TASK_FLAVOR_READ:
2998 task = convert_port_to_locked_task_read(port, TRUE); /* always eval */
2999 break;
3000 case TASK_FLAVOR_INSPECT:
3001 task = convert_port_to_locked_task_inspect(port); /* always no eval */
3002 break;
3003 default:
3004 task = TASK_NULL;
3005 break;
3006 }
3007
3008 if (task == TASK_NULL) {
3009 return VM_MAP_NULL;
3010 }
3011
3012 if (!task->active) {
3013 task_unlock(task);
3014 return VM_MAP_NULL;
3015 }
3016
3017 map = task->map;
3018 if (map->pmap == kernel_pmap) {
3019 if (flavor == TASK_FLAVOR_CONTROL) {
3020 panic("userspace has control access to a "
3021 "kernel map %p through task %p", map, task);
3022 }
3023 if (task != kernel_task) {
3024 panic("userspace has access to a "
3025 "kernel map %p through task %p", map, task);
3026 }
3027 } else {
3028 pmap_require(map->pmap);
3029 }
3030
3031 vm_map_reference(map);
3032 task_unlock(task);
3033 return map;
3034}
3035
3036vm_map_read_t
3037convert_port_to_map(
3038 ipc_port_t port)
3039{
3040 return convert_port_to_map_with_flavor(port, TASK_FLAVOR_CONTROL);
3041}
3042
3043vm_map_read_t
3044convert_port_to_map_read(
3045 ipc_port_t port)
3046{
3047 return convert_port_to_map_with_flavor(port, TASK_FLAVOR_READ);
3048}
3049
3050vm_map_inspect_t
3051convert_port_to_map_inspect(
3052 ipc_port_t port)
3053{
3054 return convert_port_to_map_with_flavor(port, TASK_FLAVOR_INSPECT);
3055}
3056
3057
3058/*
3059 * Routine: convert_port_to_thread
3060 * Purpose:
3061 * Convert from a port to a thread.
3062 * Doesn't consume the port ref; produces an thread ref,
3063 * which may be null.
3064 * Conditions:
3065 * Nothing locked.
3066 */
3067
3068static thread_t
3069convert_port_to_thread_locked(
3070 ipc_port_t port,
3071 port_to_thread_options_t options,
3072 boolean_t eval)
3073{
3074 thread_t thread = THREAD_NULL;
3075
3076 ip_lock_held(port);
3077 require_ip_active(port);
3078
3079 if (ip_kotype(port) == IKOT_THREAD_CONTROL) {
3080 thread = (thread_t) ip_get_kobject(port);
3081 assert(thread != THREAD_NULL);
3082
3083 if (options & PORT_TO_THREAD_NOT_CURRENT_THREAD) {
3084 if (thread == current_thread()) {
3085 return THREAD_NULL;
3086 }
3087 }
3088
3089 if (options & PORT_TO_THREAD_IN_CURRENT_TASK) {
3090 if (thread->task != current_task()) {
3091 return THREAD_NULL;
3092 }
3093 } else {
3094 /* Use task conversion rules for thread control conversions */
3095 if (eval && task_conversion_eval(current_task(), thread->task) != KERN_SUCCESS) {
3096 return THREAD_NULL;
3097 }
3098 }
3099
3100 thread_reference_internal(thread);
3101 }
3102
3103 return thread;
3104}
3105
3106thread_t
3107convert_port_to_thread(
3108 ipc_port_t port)
3109{
3110 thread_t thread = THREAD_NULL;
3111
3112 if (IP_VALID(port)) {
3113 ip_lock(port);
3114 if (ip_active(port)) {
3115 thread = convert_port_to_thread_locked(port, PORT_TO_THREAD_NONE, TRUE);
3116 }
3117 ip_unlock(port);
3118 }
3119
3120 return thread;
3121}
3122
3123static thread_t
3124convert_port_to_thread_no_eval(
3125 ipc_port_t port)
3126{
3127 thread_t thread = THREAD_NULL;
3128
3129 if (IP_VALID(port)) {
3130 ip_lock(port);
3131 if (ip_active(port)) {
3132 thread = convert_port_to_thread_locked(port, PORT_TO_THREAD_NONE, FALSE);
3133 }
3134 ip_unlock(port);
3135 }
3136
3137 return thread;
3138}
3139
3140/*
3141 * Routine: convert_port_to_thread_inspect
3142 * Purpose:
3143 * Convert from a port to a thread inspect right
3144 * Doesn't consume the port ref; produces a thread ref,
3145 * which may be null.
3146 * Conditions:
3147 * Nothing locked.
3148 */
3149static thread_inspect_t
3150convert_port_to_thread_inspect_locked(
3151 ipc_port_t port)
3152{
3153 thread_inspect_t thread = THREAD_INSPECT_NULL;
3154
3155 ip_lock_held(port);
3156 require_ip_active(port);
3157
3158 if (ip_kotype(port) == IKOT_THREAD_CONTROL ||
3159 ip_kotype(port) == IKOT_THREAD_READ ||
3160 ip_kotype(port) == IKOT_THREAD_INSPECT) {
3161 thread = (thread_inspect_t)ipc_kobject_get(port);
3162 assert(thread != THREAD_INSPECT_NULL);
3163 thread_reference_internal((thread_t)thread);
3164 }
3165
3166 return thread;
3167}
3168
3169thread_inspect_t
3170convert_port_to_thread_inspect(
3171 ipc_port_t port)
3172{
3173 thread_inspect_t thread = THREAD_INSPECT_NULL;
3174
3175 if (IP_VALID(port)) {
3176 ip_lock(port);
3177 if (ip_active(port)) {
3178 thread = convert_port_to_thread_inspect_locked(port);
3179 }
3180 ip_unlock(port);
3181 }
3182
3183 return thread;
3184}
3185
3186/*
3187 * Routine: convert_port_to_thread_read
3188 * Purpose:
3189 * Convert from a port to a thread read right
3190 * Doesn't consume the port ref; produces a thread ref,
3191 * which may be null.
3192 * Conditions:
3193 * Nothing locked.
3194 */
3195static thread_read_t
3196convert_port_to_thread_read_locked(
3197 ipc_port_t port,
3198 boolean_t eval)
3199{
3200 thread_read_t thread = THREAD_READ_NULL;
3201
3202 ip_lock_held(port);
3203 require_ip_active(port);
3204
3205 if (ip_kotype(port) == IKOT_THREAD_CONTROL ||
3206 ip_kotype(port) == IKOT_THREAD_READ) {
3207 thread = (thread_read_t) ip_get_kobject(port);
3208 assert(thread != THREAD_READ_NULL);
3209
3210 /* Use task conversion rules for thread control conversions */
3211 if (eval && task_conversion_eval(current_task(), thread->task) != KERN_SUCCESS) {
3212 return THREAD_READ_NULL;
3213 }
3214
3215 thread_reference_internal((thread_t)thread);
3216 }
3217
3218 return thread;
3219}
3220
3221thread_read_t
3222convert_port_to_thread_read(
3223 ipc_port_t port)
3224{
3225 thread_read_t thread = THREAD_READ_NULL;
3226
3227 if (IP_VALID(port)) {
3228 ip_lock(port);
3229 if (ip_active(port)) {
3230 thread = convert_port_to_thread_read_locked(port, TRUE);
3231 }
3232 ip_unlock(port);
3233 }
3234
3235 return thread;
3236}
3237
3238static thread_read_t
3239convert_port_to_thread_read_no_eval(
3240 ipc_port_t port)
3241{
3242 thread_read_t thread = THREAD_READ_NULL;
3243
3244 if (IP_VALID(port)) {
3245 ip_lock(port);
3246 if (ip_active(port)) {
3247 thread = convert_port_to_thread_read_locked(port, FALSE);
3248 }
3249 ip_unlock(port);
3250 }
3251
3252 return thread;
3253}
3254
3255
3256/*
3257 * Routine: convert_thread_to_port_with_flavor
3258 * Purpose:
3259 * Convert from a thread to a port of given flavor.
3260 * Consumes a thread ref; produces a naked send right
3261 * which may be invalid.
3262 * Conditions:
3263 * Nothing locked.
3264 */
3265static ipc_port_t
3266convert_thread_to_port_with_flavor(
3267 thread_t thread,
3268 mach_thread_flavor_t flavor)
3269{
3270 ipc_port_t port = IP_NULL;
3271
3272 thread_mtx_lock(thread);
3273
3274 if (!thread->ipc_active) {
3275 goto exit;
3276 }
3277
3278 if (flavor == THREAD_FLAVOR_CONTROL) {
3279 port = ipc_port_make_send(thread->ith_thread_ports[flavor]);
3280 } else {
3281 ipc_kobject_type_t kotype = (flavor == THREAD_FLAVOR_READ) ? IKOT_THREAD_READ : IKOT_THREAD_INSPECT;
3282 /*
3283 * Claim a send right on the thread read/inspect port, and request a no-senders
3284 * notification on that port (if none outstanding). A thread reference is not
3285 * donated here even though the ports are created lazily because it doesn't own the
3286 * kobject that it points to. Threads manage their lifetime explicitly and
3287 * have to synchronize with each other, between the task/thread terminating and the
3288 * send-once notification firing, and this is done under the thread mutex
3289 * rather than with atomics.
3290 */
3291 (void)ipc_kobject_make_send_lazy_alloc_port(&thread->ith_thread_ports[flavor], (ipc_kobject_t)thread,
3292 kotype, IPC_KOBJECT_ALLOC_IMMOVABLE_SEND, false, 0);
3293 port = thread->ith_thread_ports[flavor];
3294 }
3295
3296exit:
3297 thread_mtx_unlock(thread);
3298 thread_deallocate(thread);
3299 return port;
3300}
3301
3302ipc_port_t
3303convert_thread_to_port(
3304 thread_t thread)
3305{
3306 return convert_thread_to_port_with_flavor(thread, THREAD_FLAVOR_CONTROL);
3307}
3308
3309ipc_port_t
3310convert_thread_read_to_port(thread_read_t thread)
3311{
3312 return convert_thread_to_port_with_flavor(thread, THREAD_FLAVOR_READ);
3313}
3314
3315ipc_port_t
3316convert_thread_inspect_to_port(thread_inspect_t thread)
3317{
3318 return convert_thread_to_port_with_flavor(thread, THREAD_FLAVOR_INSPECT);
3319}
3320
3321
3322/*
3323 * Routine: port_name_to_thread
3324 * Purpose:
3325 * Convert from a port name to a thread reference
3326 * A name of MACH_PORT_NULL is valid for the null thread.
3327 * Conditions:
3328 * Nothing locked.
3329 */
3330thread_t
3331port_name_to_thread(
3332 mach_port_name_t name,
3333 port_to_thread_options_t options)
3334{
3335 thread_t thread = THREAD_NULL;
3336 ipc_port_t kport;
3337 kern_return_t kr;
3338
3339 if (MACH_PORT_VALID(name)) {
3340 kr = ipc_port_translate_send(current_space(), name, &kport);
3341 if (kr == KERN_SUCCESS) {
3342 thread = convert_port_to_thread_locked(kport, options, TRUE);
3343 ip_unlock(kport);
3344 }
3345 }
3346
3347 return thread;
3348}
3349
3350/*
3351 * Routine: port_name_to_task
3352 * Purpose:
3353 * Convert from a port name to a task reference
3354 * A name of MACH_PORT_NULL is valid for the null task.
3355 * Conditions:
3356 * Nothing locked.
3357 */
3358task_t
3359port_name_to_task(
3360 mach_port_name_t name)
3361{
3362 ipc_port_t kport;
3363 kern_return_t kr;
3364 task_t task = TASK_NULL;
3365
3366 if (MACH_PORT_VALID(name)) {
3367 kr = ipc_port_translate_send(current_space(), name, &kport);
3368 if (kr == KERN_SUCCESS) {
3369 task = convert_port_to_task_locked(kport, NULL, TRUE);
3370 ip_unlock(kport);
3371 }
3372 }
3373 return task;
3374}
3375
3376/*
3377 * Routine: port_name_to_task_read
3378 * Purpose:
3379 * Convert from a port name to a task reference
3380 * A name of MACH_PORT_NULL is valid for the null task.
3381 * Conditions:
3382 * Nothing locked.
3383 */
3384task_read_t
3385port_name_to_task_read(
3386 mach_port_name_t name)
3387{
3388 ipc_port_t kport;
3389 kern_return_t kr;
3390 task_read_t tr = TASK_READ_NULL;
3391
3392 if (MACH_PORT_VALID(name)) {
3393 kr = ipc_port_translate_send(current_space(), name, &kport);
3394 if (kr == KERN_SUCCESS) {
3395 tr = convert_port_to_task_read_locked(kport, TRUE);
3396 ip_unlock(kport);
3397 }
3398 }
3399 return tr;
3400}
3401
3402/*
3403 * Routine: port_name_to_task_read_no_eval
3404 * Purpose:
3405 * Convert from a port name to a task reference
3406 * A name of MACH_PORT_NULL is valid for the null task.
3407 * Skips task_conversion_eval() during conversion.
3408 * Conditions:
3409 * Nothing locked.
3410 */
3411task_read_t
3412port_name_to_task_read_no_eval(
3413 mach_port_name_t name)
3414{
3415 ipc_port_t kport;
3416 kern_return_t kr;
3417 task_read_t tr = TASK_READ_NULL;
3418
3419 if (MACH_PORT_VALID(name)) {
3420 kr = ipc_port_translate_send(current_space(), name, &kport);
3421 if (kr == KERN_SUCCESS) {
3422 tr = convert_port_to_task_read_locked(kport, FALSE);
3423 ip_unlock(kport);
3424 }
3425 }
3426 return tr;
3427}
3428
3429/*
3430 * Routine: port_name_to_task_name
3431 * Purpose:
3432 * Convert from a port name to a task reference
3433 * A name of MACH_PORT_NULL is valid for the null task.
3434 * Conditions:
3435 * Nothing locked.
3436 */
3437task_name_t
3438port_name_to_task_name(
3439 mach_port_name_t name)
3440{
3441 ipc_port_t kport;
3442 kern_return_t kr;
3443 task_name_t tn = TASK_NAME_NULL;
3444
3445 if (MACH_PORT_VALID(name)) {
3446 kr = ipc_port_translate_send(current_space(), name, &kport);
3447 if (kr == KERN_SUCCESS) {
3448 tn = convert_port_to_task_name_locked(kport);
3449 ip_unlock(kport);
3450 }
3451 }
3452 return tn;
3453}
3454
3455/*
3456 * Routine: port_name_to_host
3457 * Purpose:
3458 * Convert from a port name to a host pointer.
3459 * NOTE: This does _not_ return a +1 reference to the host_t
3460 * Conditions:
3461 * Nothing locked.
3462 */
3463host_t
3464port_name_to_host(
3465 mach_port_name_t name)
3466{
3467 host_t host = HOST_NULL;
3468 kern_return_t kr;
3469 ipc_port_t port;
3470
3471 if (MACH_PORT_VALID(name)) {
3472 kr = ipc_port_translate_send(current_space(), name, &port);
3473 if (kr == KERN_SUCCESS) {
3474 host = convert_port_to_host(port);
3475 ip_unlock(port);
3476 }
3477 }
3478 return host;
3479}
3480
3481/*
3482 * Routine: convert_task_to_port_with_flavor
3483 * Purpose:
3484 * Convert from a task to a port of given flavor.
3485 * Consumes a task ref; produces a naked send right
3486 * which may be invalid.
3487 * Conditions:
3488 * Nothing locked.
3489 */
3490static ipc_port_t
3491convert_task_to_port_with_flavor(
3492 task_t task,
3493 mach_task_flavor_t flavor)
3494{
3495 ipc_port_t port = IP_NULL;
3496 ipc_kobject_type_t kotype = IKOT_NONE;
3497
3498 itk_lock(task);
3499
3500 if (!task->ipc_active) {
3501 goto exit;
3502 }
3503
3504 switch (flavor) {
3505 case TASK_FLAVOR_CONTROL:
3506 case TASK_FLAVOR_NAME:
3507 port = ipc_port_make_send(task->itk_task_ports[flavor]);
3508 break;
3509 /*
3510 * Claim a send right on the task read/inspect port, and request a no-senders
3511 * notification on that port (if none outstanding). A task reference is
3512 * deliberately not donated here because ipc_kobject_make_send_lazy_alloc_port
3513 * is used only for convenience and these ports don't control the lifecycle of
3514 * the task kobject. Instead, the task's itk_lock is used to synchronize the
3515 * handling of the no-senders notification with the task termination.
3516 */
3517 case TASK_FLAVOR_READ:
3518 case TASK_FLAVOR_INSPECT:
3519 kotype = (flavor == TASK_FLAVOR_READ) ? IKOT_TASK_READ : IKOT_TASK_INSPECT;
3520 (void)ipc_kobject_make_send_lazy_alloc_port((ipc_port_t *) &task->itk_task_ports[flavor],
3521 (ipc_kobject_t)task, kotype, IPC_KOBJECT_ALLOC_IMMOVABLE_SEND, true,
3522 OS_PTRAUTH_DISCRIMINATOR("task.itk_task_ports"));
3523 port = task->itk_task_ports[flavor];
3524
3525 break;
3526 }
3527
3528exit:
3529 itk_unlock(task);
3530 task_deallocate(task);
3531 return port;
3532}
3533
3534ipc_port_t
3535convert_task_to_port(
3536 task_t task)
3537{
3538 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL);
3539}
3540
3541ipc_port_t
3542convert_task_read_to_port(
3543 task_read_t task)
3544{
3545 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ);
3546}
3547
3548ipc_port_t
3549convert_task_inspect_to_port(
3550 task_inspect_t task)
3551{
3552 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_INSPECT);
3553}
3554
3555ipc_port_t
3556convert_task_name_to_port(
3557 task_name_t task)
3558{
3559 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_NAME);
3560}
3561
3562ipc_port_t
3563convert_task_to_port_pinned(
3564 task_t task)
3565{
3566 ipc_port_t port = IP_NULL;
3567
3568 itk_lock(task);
3569
3570 if (task->ipc_active && task->itk_self != IP_NULL) {
3571 port = ipc_port_make_send(task->itk_self);
3572 }
3573
3574 itk_unlock(task);
3575 task_deallocate(task);
3576 return port;
3577}
3578/*
3579 * Routine: convert_task_suspend_token_to_port
3580 * Purpose:
3581 * Convert from a task suspension token to a port.
3582 * Consumes a task suspension token ref; produces a naked send-once right
3583 * which may be invalid.
3584 * Conditions:
3585 * Nothing locked.
3586 */
3587ipc_port_t
3588convert_task_suspension_token_to_port(
3589 task_suspension_token_t task)
3590{
3591 ipc_port_t port;
3592
3593 task_lock(task);
3594 if (task->active) {
3595 if (task->itk_resume == IP_NULL) {
3596 task->itk_resume = ipc_kobject_alloc_port((ipc_kobject_t) task,
3597 IKOT_TASK_RESUME, IPC_KOBJECT_ALLOC_NONE);
3598 }
3599
3600 /*
3601 * Create a send-once right for each instance of a direct user-called
3602 * task_suspend2 call. Each time one of these send-once rights is abandoned,
3603 * the notification handler will resume the target task.
3604 */
3605 port = ipc_port_make_sonce(task->itk_resume);
3606 assert(IP_VALID(port));
3607 } else {
3608 port = IP_NULL;
3609 }
3610
3611 task_unlock(task);
3612 task_suspension_token_deallocate(task);
3613
3614 return port;
3615}
3616
3617ipc_port_t
3618convert_thread_to_port_pinned(
3619 thread_t thread)
3620{
3621 ipc_port_t port = IP_NULL;
3622
3623 thread_mtx_lock(thread);
3624
3625 if (thread->ipc_active && thread->ith_self != IP_NULL) {
3626 port = ipc_port_make_send(thread->ith_self);
3627 }
3628
3629 thread_mtx_unlock(thread);
3630 thread_deallocate(thread);
3631 return port;
3632}
3633/*
3634 * Routine: space_deallocate
3635 * Purpose:
3636 * Deallocate a space ref produced by convert_port_to_space.
3637 * Conditions:
3638 * Nothing locked.
3639 */
3640
3641void
3642space_deallocate(
3643 ipc_space_t space)
3644{
3645 if (space != IS_NULL) {
3646 is_release(space);
3647 }
3648}
3649
3650/*
3651 * Routine: space_read_deallocate
3652 * Purpose:
3653 * Deallocate a space read ref produced by convert_port_to_space_read.
3654 * Conditions:
3655 * Nothing locked.
3656 */
3657
3658void
3659space_read_deallocate(
3660 ipc_space_read_t space)
3661{
3662 if (space != IS_INSPECT_NULL) {
3663 is_release((ipc_space_t)space);
3664 }
3665}
3666
3667/*
3668 * Routine: space_inspect_deallocate
3669 * Purpose:
3670 * Deallocate a space inspect ref produced by convert_port_to_space_inspect.
3671 * Conditions:
3672 * Nothing locked.
3673 */
3674
3675void
3676space_inspect_deallocate(
3677 ipc_space_inspect_t space)
3678{
3679 if (space != IS_INSPECT_NULL) {
3680 is_release((ipc_space_t)space);
3681 }
3682}
3683
3684
3685/*
3686 * Routine: thread/task_set_exception_ports [kernel call]
3687 * Purpose:
3688 * Sets the thread/task exception port, flavor and
3689 * behavior for the exception types specified by the mask.
3690 * There will be one send right per exception per valid
3691 * port.
3692 * Conditions:
3693 * Nothing locked. If successful, consumes
3694 * the supplied send right.
3695 * Returns:
3696 * KERN_SUCCESS Changed the special port.
3697 * KERN_INVALID_ARGUMENT The thread is null,
3698 * Illegal mask bit set.
3699 * Illegal exception behavior
3700 * KERN_FAILURE The thread is dead.
3701 */
3702
3703kern_return_t
3704thread_set_exception_ports(
3705 thread_t thread,
3706 exception_mask_t exception_mask,
3707 ipc_port_t new_port,
3708 exception_behavior_t new_behavior,
3709 thread_state_flavor_t new_flavor)
3710{
3711 ipc_port_t old_port[EXC_TYPES_COUNT];
3712 boolean_t privileged = current_task()->sec_token.val[0] == 0;
3713 register int i;
3714
3715#if CONFIG_MACF
3716 struct label *new_label;
3717#endif
3718
3719 if (thread == THREAD_NULL) {
3720 return KERN_INVALID_ARGUMENT;
3721 }
3722
3723 if (exception_mask & ~EXC_MASK_VALID) {
3724 return KERN_INVALID_ARGUMENT;
3725 }
3726
3727 if (IP_VALID(new_port)) {
3728 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
3729 case EXCEPTION_DEFAULT:
3730 case EXCEPTION_STATE:
3731 case EXCEPTION_STATE_IDENTITY:
3732 break;
3733
3734 default:
3735 return KERN_INVALID_ARGUMENT;
3736 }
3737 }
3738
3739
3740 /*
3741 * Check the validity of the thread_state_flavor by calling the
3742 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
3743 * osfmk/mach/ARCHITECTURE/thread_status.h
3744 */
3745 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
3746 return KERN_INVALID_ARGUMENT;
3747 }
3748
3749#if CONFIG_MACF
3750 new_label = mac_exc_create_label_for_current_proc();
3751#endif
3752
3753 thread_mtx_lock(thread);
3754
3755 if (!thread->active) {
3756 thread_mtx_unlock(thread);
3757
3758 return KERN_FAILURE;
3759 }
3760
3761 if (thread->exc_actions == NULL) {
3762 ipc_thread_init_exc_actions(thread);
3763 }
3764 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
3765 if ((exception_mask & (1 << i))
3766#if CONFIG_MACF
3767 && mac_exc_update_action_label(&thread->exc_actions[i], new_label) == 0
3768#endif
3769 ) {
3770 old_port[i] = thread->exc_actions[i].port;
3771 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
3772 thread->exc_actions[i].behavior = new_behavior;
3773 thread->exc_actions[i].flavor = new_flavor;
3774 thread->exc_actions[i].privileged = privileged;
3775 } else {
3776 old_port[i] = IP_NULL;
3777 }
3778 }
3779
3780 thread_mtx_unlock(thread);
3781
3782#if CONFIG_MACF
3783 mac_exc_free_label(new_label);
3784#endif
3785
3786 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
3787 if (IP_VALID(old_port[i])) {
3788 ipc_port_release_send(old_port[i]);
3789 }
3790 }
3791
3792 if (IP_VALID(new_port)) { /* consume send right */
3793 ipc_port_release_send(new_port);
3794 }
3795
3796 return KERN_SUCCESS;
3797}
3798
3799kern_return_t
3800task_set_exception_ports(
3801 task_t task,
3802 exception_mask_t exception_mask,
3803 ipc_port_t new_port,
3804 exception_behavior_t new_behavior,
3805 thread_state_flavor_t new_flavor)
3806{
3807 ipc_port_t old_port[EXC_TYPES_COUNT];
3808 boolean_t privileged = current_task()->sec_token.val[0] == 0;
3809 register int i;
3810
3811#if CONFIG_MACF
3812 struct label *new_label;
3813#endif
3814
3815 if (task == TASK_NULL) {
3816 return KERN_INVALID_ARGUMENT;
3817 }
3818
3819 if (exception_mask & ~EXC_MASK_VALID) {
3820 return KERN_INVALID_ARGUMENT;
3821 }
3822
3823 if (IP_VALID(new_port)) {
3824 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
3825 case EXCEPTION_DEFAULT:
3826 case EXCEPTION_STATE:
3827 case EXCEPTION_STATE_IDENTITY:
3828 break;
3829
3830 default:
3831 return KERN_INVALID_ARGUMENT;
3832 }
3833 }
3834
3835
3836 /*
3837 * Check the validity of the thread_state_flavor by calling the
3838 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
3839 * osfmk/mach/ARCHITECTURE/thread_status.h
3840 */
3841 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
3842 return KERN_INVALID_ARGUMENT;
3843 }
3844
3845#if CONFIG_MACF
3846 new_label = mac_exc_create_label_for_current_proc();
3847#endif
3848
3849 itk_lock(task);
3850
3851 if (!task->ipc_active) {
3852 itk_unlock(task);
3853 return KERN_FAILURE;
3854 }
3855
3856 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
3857 if ((exception_mask & (1 << i))
3858#if CONFIG_MACF
3859 && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
3860#endif
3861 ) {
3862 old_port[i] = task->exc_actions[i].port;
3863 task->exc_actions[i].port =
3864 ipc_port_copy_send(new_port);
3865 task->exc_actions[i].behavior = new_behavior;
3866 task->exc_actions[i].flavor = new_flavor;
3867 task->exc_actions[i].privileged = privileged;
3868 } else {
3869 old_port[i] = IP_NULL;
3870 }
3871 }
3872
3873 itk_unlock(task);
3874
3875#if CONFIG_MACF
3876 mac_exc_free_label(new_label);
3877#endif
3878
3879 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
3880 if (IP_VALID(old_port[i])) {
3881 ipc_port_release_send(old_port[i]);
3882 }
3883 }
3884
3885 if (IP_VALID(new_port)) { /* consume send right */
3886 ipc_port_release_send(new_port);
3887 }
3888
3889 return KERN_SUCCESS;
3890}
3891
3892/*
3893 * Routine: thread/task_swap_exception_ports [kernel call]
3894 * Purpose:
3895 * Sets the thread/task exception port, flavor and
3896 * behavior for the exception types specified by the
3897 * mask.
3898 *
3899 * The old ports, behavior and flavors are returned
3900 * Count specifies the array sizes on input and
3901 * the number of returned ports etc. on output. The
3902 * arrays must be large enough to hold all the returned
3903 * data, MIG returnes an error otherwise. The masks
3904 * array specifies the corresponding exception type(s).
3905 *
3906 * Conditions:
3907 * Nothing locked. If successful, consumes
3908 * the supplied send right.
3909 *
3910 * Returns upto [in} CountCnt elements.
3911 * Returns:
3912 * KERN_SUCCESS Changed the special port.
3913 * KERN_INVALID_ARGUMENT The thread is null,
3914 * Illegal mask bit set.
3915 * Illegal exception behavior
3916 * KERN_FAILURE The thread is dead.
3917 */
3918
3919kern_return_t
3920thread_swap_exception_ports(
3921 thread_t thread,
3922 exception_mask_t exception_mask,
3923 ipc_port_t new_port,
3924 exception_behavior_t new_behavior,
3925 thread_state_flavor_t new_flavor,
3926 exception_mask_array_t masks,
3927 mach_msg_type_number_t *CountCnt,
3928 exception_port_array_t ports,
3929 exception_behavior_array_t behaviors,
3930 thread_state_flavor_array_t flavors)
3931{
3932 ipc_port_t old_port[EXC_TYPES_COUNT];
3933 boolean_t privileged = current_task()->sec_token.val[0] == 0;
3934 unsigned int i, j, count;
3935
3936#if CONFIG_MACF
3937 struct label *new_label;
3938#endif
3939
3940 if (thread == THREAD_NULL) {
3941 return KERN_INVALID_ARGUMENT;
3942 }
3943
3944 if (exception_mask & ~EXC_MASK_VALID) {
3945 return KERN_INVALID_ARGUMENT;
3946 }
3947
3948 if (IP_VALID(new_port)) {
3949 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
3950 case EXCEPTION_DEFAULT:
3951 case EXCEPTION_STATE:
3952 case EXCEPTION_STATE_IDENTITY:
3953 break;
3954
3955 default:
3956 return KERN_INVALID_ARGUMENT;
3957 }
3958 }
3959
3960
3961 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
3962 return KERN_INVALID_ARGUMENT;
3963 }
3964
3965#if CONFIG_MACF
3966 new_label = mac_exc_create_label_for_current_proc();
3967#endif
3968
3969 thread_mtx_lock(thread);
3970
3971 if (!thread->active) {
3972 thread_mtx_unlock(thread);
3973#if CONFIG_MACF
3974 mac_exc_free_label(new_label);
3975#endif
3976 return KERN_FAILURE;
3977 }
3978
3979 if (thread->exc_actions == NULL) {
3980 ipc_thread_init_exc_actions(thread);
3981 }
3982
3983 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
3984 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
3985 if ((exception_mask & (1 << i))
3986#if CONFIG_MACF
3987 && mac_exc_update_action_label(&thread->exc_actions[i], new_label) == 0
3988#endif
3989 ) {
3990 for (j = 0; j < count; ++j) {
3991 /*
3992 * search for an identical entry, if found
3993 * set corresponding mask for this exception.
3994 */
3995 if (thread->exc_actions[i].port == ports[j] &&
3996 thread->exc_actions[i].behavior == behaviors[j] &&
3997 thread->exc_actions[i].flavor == flavors[j]) {
3998 masks[j] |= (1 << i);
3999 break;
4000 }
4001 }
4002
4003 if (j == count) {
4004 masks[j] = (1 << i);
4005 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
4006
4007 behaviors[j] = thread->exc_actions[i].behavior;
4008 flavors[j] = thread->exc_actions[i].flavor;
4009 ++count;
4010 }
4011
4012 old_port[i] = thread->exc_actions[i].port;
4013 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
4014 thread->exc_actions[i].behavior = new_behavior;
4015 thread->exc_actions[i].flavor = new_flavor;
4016 thread->exc_actions[i].privileged = privileged;
4017 } else {
4018 old_port[i] = IP_NULL;
4019 }
4020 }
4021
4022 thread_mtx_unlock(thread);
4023
4024#if CONFIG_MACF
4025 mac_exc_free_label(new_label);
4026#endif
4027
4028 while (--i >= FIRST_EXCEPTION) {
4029 if (IP_VALID(old_port[i])) {
4030 ipc_port_release_send(old_port[i]);
4031 }
4032 }
4033
4034 if (IP_VALID(new_port)) { /* consume send right */
4035 ipc_port_release_send(new_port);
4036 }
4037
4038 *CountCnt = count;
4039
4040 return KERN_SUCCESS;
4041}
4042
4043kern_return_t
4044task_swap_exception_ports(
4045 task_t task,
4046 exception_mask_t exception_mask,
4047 ipc_port_t new_port,
4048 exception_behavior_t new_behavior,
4049 thread_state_flavor_t new_flavor,
4050 exception_mask_array_t masks,
4051 mach_msg_type_number_t *CountCnt,
4052 exception_port_array_t ports,
4053 exception_behavior_array_t behaviors,
4054 thread_state_flavor_array_t flavors)
4055{
4056 ipc_port_t old_port[EXC_TYPES_COUNT];
4057 boolean_t privileged = current_task()->sec_token.val[0] == 0;
4058 unsigned int i, j, count;
4059
4060#if CONFIG_MACF
4061 struct label *new_label;
4062#endif
4063
4064 if (task == TASK_NULL) {
4065 return KERN_INVALID_ARGUMENT;
4066 }
4067
4068 if (exception_mask & ~EXC_MASK_VALID) {
4069 return KERN_INVALID_ARGUMENT;
4070 }
4071
4072 if (IP_VALID(new_port)) {
4073 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4074 case EXCEPTION_DEFAULT:
4075 case EXCEPTION_STATE:
4076 case EXCEPTION_STATE_IDENTITY:
4077 break;
4078
4079 default:
4080 return KERN_INVALID_ARGUMENT;
4081 }
4082 }
4083
4084
4085 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4086 return KERN_INVALID_ARGUMENT;
4087 }
4088
4089#if CONFIG_MACF
4090 new_label = mac_exc_create_label_for_current_proc();
4091#endif
4092
4093 itk_lock(task);
4094
4095 if (!task->ipc_active) {
4096 itk_unlock(task);
4097#if CONFIG_MACF
4098 mac_exc_free_label(new_label);
4099#endif
4100 return KERN_FAILURE;
4101 }
4102
4103 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4104 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4105 if ((exception_mask & (1 << i))
4106#if CONFIG_MACF
4107 && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
4108#endif
4109 ) {
4110 for (j = 0; j < count; j++) {
4111 /*
4112 * search for an identical entry, if found
4113 * set corresponding mask for this exception.
4114 */
4115 if (task->exc_actions[i].port == ports[j] &&
4116 task->exc_actions[i].behavior == behaviors[j] &&
4117 task->exc_actions[i].flavor == flavors[j]) {
4118 masks[j] |= (1 << i);
4119 break;
4120 }
4121 }
4122
4123 if (j == count) {
4124 masks[j] = (1 << i);
4125 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
4126 behaviors[j] = task->exc_actions[i].behavior;
4127 flavors[j] = task->exc_actions[i].flavor;
4128 ++count;
4129 }
4130
4131 old_port[i] = task->exc_actions[i].port;
4132
4133 task->exc_actions[i].port = ipc_port_copy_send(new_port);
4134 task->exc_actions[i].behavior = new_behavior;
4135 task->exc_actions[i].flavor = new_flavor;
4136 task->exc_actions[i].privileged = privileged;
4137 } else {
4138 old_port[i] = IP_NULL;
4139 }
4140 }
4141
4142 itk_unlock(task);
4143
4144#if CONFIG_MACF
4145 mac_exc_free_label(new_label);
4146#endif
4147
4148 while (--i >= FIRST_EXCEPTION) {
4149 if (IP_VALID(old_port[i])) {
4150 ipc_port_release_send(old_port[i]);
4151 }
4152 }
4153
4154 if (IP_VALID(new_port)) { /* consume send right */
4155 ipc_port_release_send(new_port);
4156 }
4157
4158 *CountCnt = count;
4159
4160 return KERN_SUCCESS;
4161}
4162
4163/*
4164 * Routine: thread/task_get_exception_ports [kernel call]
4165 * Purpose:
4166 * Clones a send right for each of the thread/task's exception
4167 * ports specified in the mask and returns the behaviour
4168 * and flavor of said port.
4169 *
4170 * Returns upto [in} CountCnt elements.
4171 *
4172 * Conditions:
4173 * Nothing locked.
4174 * Returns:
4175 * KERN_SUCCESS Extracted a send right.
4176 * KERN_INVALID_ARGUMENT The thread is null,
4177 * Invalid special port,
4178 * Illegal mask bit set.
4179 * KERN_FAILURE The thread is dead.
4180 */
4181static kern_return_t
4182thread_get_exception_ports_internal(
4183 thread_t thread,
4184 exception_mask_t exception_mask,
4185 exception_mask_array_t masks,
4186 mach_msg_type_number_t *CountCnt,
4187 exception_port_info_array_t ports_info,
4188 exception_port_array_t ports,
4189 exception_behavior_array_t behaviors,
4190 thread_state_flavor_array_t flavors)
4191{
4192 unsigned int count;
4193 boolean_t info_only = (ports_info != NULL);
4194 boolean_t dbg_ok = TRUE;
4195 ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4196
4197 if (thread == THREAD_NULL) {
4198 return KERN_INVALID_ARGUMENT;
4199 }
4200
4201 if (exception_mask & ~EXC_MASK_VALID) {
4202 return KERN_INVALID_ARGUMENT;
4203 }
4204
4205 if (!info_only && !ports) {
4206 return KERN_INVALID_ARGUMENT;
4207 }
4208
4209#if !(DEVELOPMENT || DEBUG) && CONFIG_MACF
4210 if (info_only && mac_task_check_expose_task(kernel_task, TASK_FLAVOR_CONTROL) == 0) {
4211 dbg_ok = TRUE;
4212 } else {
4213 dbg_ok = FALSE;
4214 }
4215#endif
4216
4217 thread_mtx_lock(thread);
4218
4219 if (!thread->active) {
4220 thread_mtx_unlock(thread);
4221
4222 return KERN_FAILURE;
4223 }
4224
4225 count = 0;
4226
4227 if (thread->exc_actions == NULL) {
4228 goto done;
4229 }
4230
4231 for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4232 if (exception_mask & (1 << i)) {
4233 ipc_port_t exc_port = thread->exc_actions[i].port;
4234 exception_behavior_t exc_behavior = thread->exc_actions[i].behavior;
4235 thread_state_flavor_t exc_flavor = thread->exc_actions[i].flavor;
4236
4237 for (j = 0; j < count; ++j) {
4238 /*
4239 * search for an identical entry, if found
4240 * set corresponding mask for this exception.
4241 */
4242 if (exc_port == port_ptrs[j] &&
4243 exc_behavior == behaviors[j] &&
4244 exc_flavor == flavors[j]) {
4245 masks[j] |= (1 << i);
4246 break;
4247 }
4248 }
4249
4250 if (j == count && count < *CountCnt) {
4251 masks[j] = (1 << i);
4252 port_ptrs[j] = exc_port;
4253
4254 if (info_only) {
4255 if (!dbg_ok || !IP_VALID(exc_port)) {
4256 /* avoid taking port lock if !dbg_ok */
4257 ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4258 } else {
4259 uintptr_t receiver;
4260 (void)ipc_port_get_receiver_task(exc_port, &receiver);
4261 ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRPERM(exc_port);
4262 ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRPERM(receiver) : 0;
4263 }
4264 } else {
4265 ports[j] = ipc_port_copy_send(exc_port);
4266 }
4267 behaviors[j] = exc_behavior;
4268 flavors[j] = exc_flavor;
4269 ++count;
4270 }
4271 }
4272 }
4273
4274done:
4275 thread_mtx_unlock(thread);
4276
4277 *CountCnt = count;
4278
4279 return KERN_SUCCESS;
4280}
4281
4282static kern_return_t
4283thread_get_exception_ports(
4284 thread_t thread,
4285 exception_mask_t exception_mask,
4286 exception_mask_array_t masks,
4287 mach_msg_type_number_t *CountCnt,
4288 exception_port_array_t ports,
4289 exception_behavior_array_t behaviors,
4290 thread_state_flavor_array_t flavors)
4291{
4292 return thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4293 NULL, ports, behaviors, flavors);
4294}
4295
4296kern_return_t
4297thread_get_exception_ports_info(
4298 mach_port_t port,
4299 exception_mask_t exception_mask,
4300 exception_mask_array_t masks,
4301 mach_msg_type_number_t *CountCnt,
4302 exception_port_info_array_t ports_info,
4303 exception_behavior_array_t behaviors,
4304 thread_state_flavor_array_t flavors)
4305{
4306 kern_return_t kr;
4307
4308 thread_t thread = convert_port_to_thread_read_no_eval(port);
4309
4310 if (thread == THREAD_NULL) {
4311 return KERN_INVALID_ARGUMENT;
4312 }
4313
4314 kr = thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4315 ports_info, NULL, behaviors, flavors);
4316
4317 thread_deallocate(thread);
4318 return kr;
4319}
4320
4321kern_return_t
4322thread_get_exception_ports_from_user(
4323 mach_port_t port,
4324 exception_mask_t exception_mask,
4325 exception_mask_array_t masks,
4326 mach_msg_type_number_t *CountCnt,
4327 exception_port_array_t ports,
4328 exception_behavior_array_t behaviors,
4329 thread_state_flavor_array_t flavors)
4330{
4331 kern_return_t kr;
4332
4333 thread_t thread = convert_port_to_thread_no_eval(port);
4334
4335 if (thread == THREAD_NULL) {
4336 return KERN_INVALID_ARGUMENT;
4337 }
4338
4339 kr = thread_get_exception_ports(thread, exception_mask, masks, CountCnt, ports, behaviors, flavors);
4340
4341 thread_deallocate(thread);
4342 return kr;
4343}
4344
4345static kern_return_t
4346task_get_exception_ports_internal(
4347 task_t task,
4348 exception_mask_t exception_mask,
4349 exception_mask_array_t masks,
4350 mach_msg_type_number_t *CountCnt,
4351 exception_port_info_array_t ports_info,
4352 exception_port_array_t ports,
4353 exception_behavior_array_t behaviors,
4354 thread_state_flavor_array_t flavors)
4355{
4356 unsigned int count;
4357 boolean_t info_only = (ports_info != NULL);
4358 boolean_t dbg_ok = TRUE;
4359 ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4360
4361 if (task == TASK_NULL) {
4362 return KERN_INVALID_ARGUMENT;
4363 }
4364
4365 if (exception_mask & ~EXC_MASK_VALID) {
4366 return KERN_INVALID_ARGUMENT;
4367 }
4368
4369 if (!info_only && !ports) {
4370 return KERN_INVALID_ARGUMENT;
4371 }
4372
4373#if !(DEVELOPMENT || DEBUG) && CONFIG_MACF
4374 if (info_only && mac_task_check_expose_task(kernel_task, TASK_FLAVOR_CONTROL) == 0) {
4375 dbg_ok = TRUE;
4376 } else {
4377 dbg_ok = FALSE;
4378 }
4379#endif
4380
4381 itk_lock(task);
4382
4383 if (!task->ipc_active) {
4384 itk_unlock(task);
4385 return KERN_FAILURE;
4386 }
4387
4388 count = 0;
4389
4390 for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4391 if (exception_mask & (1 << i)) {
4392 ipc_port_t exc_port = task->exc_actions[i].port;
4393 exception_behavior_t exc_behavior = task->exc_actions[i].behavior;
4394 thread_state_flavor_t exc_flavor = task->exc_actions[i].flavor;
4395
4396 for (j = 0; j < count; ++j) {
4397 /*
4398 * search for an identical entry, if found
4399 * set corresponding mask for this exception.
4400 */
4401 if (exc_port == port_ptrs[j] &&
4402 exc_behavior == behaviors[j] &&
4403 exc_flavor == flavors[j]) {
4404 masks[j] |= (1 << i);
4405 break;
4406 }
4407 }
4408
4409 if (j == count && count < *CountCnt) {
4410 masks[j] = (1 << i);
4411 port_ptrs[j] = exc_port;
4412
4413 if (info_only) {
4414 if (!dbg_ok || !IP_VALID(exc_port)) {
4415 /* avoid taking port lock if !dbg_ok */
4416 ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4417 } else {
4418 uintptr_t receiver;
4419 (void)ipc_port_get_receiver_task(exc_port, &receiver);
4420 ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRPERM(exc_port);
4421 ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRPERM(receiver) : 0;
4422 }
4423 } else {
4424 ports[j] = ipc_port_copy_send(exc_port);
4425 }
4426 behaviors[j] = exc_behavior;
4427 flavors[j] = exc_flavor;
4428 ++count;
4429 }
4430 }
4431 }
4432
4433 itk_unlock(task);
4434
4435 *CountCnt = count;
4436
4437 return KERN_SUCCESS;
4438}
4439
4440static kern_return_t
4441task_get_exception_ports(
4442 task_t task,
4443 exception_mask_t exception_mask,
4444 exception_mask_array_t masks,
4445 mach_msg_type_number_t *CountCnt,
4446 exception_port_array_t ports,
4447 exception_behavior_array_t behaviors,
4448 thread_state_flavor_array_t flavors)
4449{
4450 return task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
4451 NULL, ports, behaviors, flavors);
4452}
4453
4454kern_return_t
4455task_get_exception_ports_info(
4456 mach_port_t port,
4457 exception_mask_t exception_mask,
4458 exception_mask_array_t masks,
4459 mach_msg_type_number_t *CountCnt,
4460 exception_port_info_array_t ports_info,
4461 exception_behavior_array_t behaviors,
4462 thread_state_flavor_array_t flavors)
4463{
4464 kern_return_t kr;
4465
4466 task_t task = convert_port_to_task_read_no_eval(port);
4467
4468 if (task == TASK_NULL) {
4469 return KERN_INVALID_ARGUMENT;
4470 }
4471
4472 kr = task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
4473 ports_info, NULL, behaviors, flavors);
4474
4475 task_deallocate(task);
4476 return kr;
4477}
4478
4479kern_return_t
4480task_get_exception_ports_from_user(
4481 mach_port_t port,
4482 exception_mask_t exception_mask,
4483 exception_mask_array_t masks,
4484 mach_msg_type_number_t *CountCnt,
4485 exception_port_array_t ports,
4486 exception_behavior_array_t behaviors,
4487 thread_state_flavor_array_t flavors)
4488{
4489 kern_return_t kr;
4490
4491 task_t task = convert_port_to_task_no_eval(port);
4492
4493 if (task == TASK_NULL) {
4494 return KERN_INVALID_ARGUMENT;
4495 }
4496
4497 kr = task_get_exception_ports(task, exception_mask, masks, CountCnt, ports, behaviors, flavors);
4498
4499 task_deallocate(task);
4500 return kr;
4501}
4502
4503/*
4504 * Routine: ipc_thread_port_unpin
4505 * Purpose:
4506 * Called on the thread port when the thread is
4507 * terminating so that the last ref can be deallocated
4508 * without a guard exception.
4509 * Conditions:
4510 * Thread mutex lock is held.
4511 * check_bit should be set to true only when port is expected
4512 * to have ip_pinned bit set.
4513 */
4514void
4515ipc_thread_port_unpin(
4516 ipc_port_t port,
4517 __unused bool check_bit)
4518{
4519 if (port == IP_NULL) {
4520 return;
4521 }
4522 ip_lock(port);
4523 imq_lock(&port->ip_messages);
4524#if DEVELOPMENT || DEBUG
4525 if (pinned_control_port_enabled && check_bit) {
4526 assert(ip_is_control(port)); /*remove once we get rid of boot-arg */
4527 assert(port->ip_pinned == 1);
4528 }
4529#endif
4530 port->ip_pinned = 0;
4531 imq_unlock(&port->ip_messages);
4532 ip_unlock(port);
4533}