]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ipc_tt.c
xnu-3248.40.184.tar.gz
[apple/xnu.git] / osfmk / kern / ipc_tt.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64
65 /*
66 * File: ipc_tt.c
67 * Purpose:
68 * Task and thread related IPC functions.
69 */
70
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
86
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
94
95 #include <vm/vm_map.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_protos.h>
98
99 #include <security/mac_mach_internal.h>
100
101 /* forward declarations */
102 task_t convert_port_to_locked_task(ipc_port_t port);
103
104
105 /*
106 * Routine: ipc_task_init
107 * Purpose:
108 * Initialize a task's IPC state.
109 *
110 * If non-null, some state will be inherited from the parent.
111 * The parent must be appropriately initialized.
112 * Conditions:
113 * Nothing locked.
114 */
115
116 void
117 ipc_task_init(
118 task_t task,
119 task_t parent)
120 {
121 ipc_space_t space;
122 ipc_port_t kport;
123 ipc_port_t nport;
124 kern_return_t kr;
125 int i;
126
127
128 kr = ipc_space_create(&ipc_table_entries[0], &space);
129 if (kr != KERN_SUCCESS)
130 panic("ipc_task_init");
131
132 space->is_task = task;
133
134 kport = ipc_port_alloc_kernel();
135 if (kport == IP_NULL)
136 panic("ipc_task_init");
137
138 nport = ipc_port_alloc_kernel();
139 if (nport == IP_NULL)
140 panic("ipc_task_init");
141
142 itk_lock_init(task);
143 task->itk_self = kport;
144 task->itk_nself = nport;
145 task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
146 task->itk_sself = ipc_port_make_send(kport);
147 task->itk_debug_control = IP_NULL;
148 task->itk_space = space;
149
150 if (parent == TASK_NULL) {
151 ipc_port_t port;
152
153 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
154 task->exc_actions[i].port = IP_NULL;
155 }/* for */
156
157 kr = host_get_host_port(host_priv_self(), &port);
158 assert(kr == KERN_SUCCESS);
159 task->itk_host = port;
160
161 task->itk_bootstrap = IP_NULL;
162 task->itk_seatbelt = IP_NULL;
163 task->itk_gssd = IP_NULL;
164 task->itk_task_access = IP_NULL;
165
166 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
167 task->itk_registered[i] = IP_NULL;
168 } else {
169 itk_lock(parent);
170 assert(parent->itk_self != IP_NULL);
171
172 /* inherit registered ports */
173
174 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
175 task->itk_registered[i] =
176 ipc_port_copy_send(parent->itk_registered[i]);
177
178 /* inherit exception and bootstrap ports */
179
180 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
181 task->exc_actions[i].port =
182 ipc_port_copy_send(parent->exc_actions[i].port);
183 task->exc_actions[i].flavor =
184 parent->exc_actions[i].flavor;
185 task->exc_actions[i].behavior =
186 parent->exc_actions[i].behavior;
187 task->exc_actions[i].privileged =
188 parent->exc_actions[i].privileged;
189 }/* for */
190 task->itk_host =
191 ipc_port_copy_send(parent->itk_host);
192
193 task->itk_bootstrap =
194 ipc_port_copy_send(parent->itk_bootstrap);
195
196 task->itk_seatbelt =
197 ipc_port_copy_send(parent->itk_seatbelt);
198
199 task->itk_gssd =
200 ipc_port_copy_send(parent->itk_gssd);
201
202 task->itk_task_access =
203 ipc_port_copy_send(parent->itk_task_access);
204
205 itk_unlock(parent);
206 }
207 }
208
209 /*
210 * Routine: ipc_task_enable
211 * Purpose:
212 * Enable a task for IPC access.
213 * Conditions:
214 * Nothing locked.
215 */
216
217 void
218 ipc_task_enable(
219 task_t task)
220 {
221 ipc_port_t kport;
222 ipc_port_t nport;
223
224 itk_lock(task);
225 kport = task->itk_self;
226 if (kport != IP_NULL)
227 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
228 nport = task->itk_nself;
229 if (nport != IP_NULL)
230 ipc_kobject_set(nport, (ipc_kobject_t) task, IKOT_TASK_NAME);
231 itk_unlock(task);
232 }
233
234 /*
235 * Routine: ipc_task_disable
236 * Purpose:
237 * Disable IPC access to a task.
238 * Conditions:
239 * Nothing locked.
240 */
241
242 void
243 ipc_task_disable(
244 task_t task)
245 {
246 ipc_port_t kport;
247 ipc_port_t nport;
248 ipc_port_t rport;
249
250 itk_lock(task);
251 kport = task->itk_self;
252 if (kport != IP_NULL)
253 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
254 nport = task->itk_nself;
255 if (nport != IP_NULL)
256 ipc_kobject_set(nport, IKO_NULL, IKOT_NONE);
257
258 rport = task->itk_resume;
259 if (rport != IP_NULL) {
260 /*
261 * From this point onwards this task is no longer accepting
262 * resumptions.
263 *
264 * There are still outstanding suspensions on this task,
265 * even as it is being torn down. Disconnect the task
266 * from the rport, thereby "orphaning" the rport. The rport
267 * itself will go away only when the last suspension holder
268 * destroys his SO right to it -- when he either
269 * exits, or tries to actually use that last SO right to
270 * resume this (now non-existent) task.
271 */
272 ipc_kobject_set(rport, IKO_NULL, IKOT_NONE);
273 }
274 itk_unlock(task);
275 }
276
277 /*
278 * Routine: ipc_task_terminate
279 * Purpose:
280 * Clean up and destroy a task's IPC state.
281 * Conditions:
282 * Nothing locked. The task must be suspended.
283 * (Or the current thread must be in the task.)
284 */
285
286 void
287 ipc_task_terminate(
288 task_t task)
289 {
290 ipc_port_t kport;
291 ipc_port_t nport;
292 ipc_port_t rport;
293 int i;
294
295 itk_lock(task);
296 kport = task->itk_self;
297
298 if (kport == IP_NULL) {
299 /* the task is already terminated (can this happen?) */
300 itk_unlock(task);
301 return;
302 }
303 task->itk_self = IP_NULL;
304
305 nport = task->itk_nself;
306 assert(nport != IP_NULL);
307 task->itk_nself = IP_NULL;
308
309 rport = task->itk_resume;
310 task->itk_resume = IP_NULL;
311
312 itk_unlock(task);
313
314 /* release the naked send rights */
315
316 if (IP_VALID(task->itk_sself))
317 ipc_port_release_send(task->itk_sself);
318
319 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
320 if (IP_VALID(task->exc_actions[i].port)) {
321 ipc_port_release_send(task->exc_actions[i].port);
322 }
323 }
324
325 if (IP_VALID(task->itk_host))
326 ipc_port_release_send(task->itk_host);
327
328 if (IP_VALID(task->itk_bootstrap))
329 ipc_port_release_send(task->itk_bootstrap);
330
331 if (IP_VALID(task->itk_seatbelt))
332 ipc_port_release_send(task->itk_seatbelt);
333
334 if (IP_VALID(task->itk_gssd))
335 ipc_port_release_send(task->itk_gssd);
336
337 if (IP_VALID(task->itk_task_access))
338 ipc_port_release_send(task->itk_task_access);
339
340 if (IP_VALID(task->itk_debug_control))
341 ipc_port_release_send(task->itk_debug_control);
342
343 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
344 if (IP_VALID(task->itk_registered[i]))
345 ipc_port_release_send(task->itk_registered[i]);
346
347 /* destroy the kernel ports */
348 ipc_port_dealloc_kernel(kport);
349 ipc_port_dealloc_kernel(nport);
350 if (rport != IP_NULL)
351 ipc_port_dealloc_kernel(rport);
352
353 itk_lock_destroy(task);
354 }
355
356 /*
357 * Routine: ipc_task_reset
358 * Purpose:
359 * Reset a task's IPC state to protect it when
360 * it enters an elevated security context. The
361 * task name port can remain the same - since
362 * it represents no specific privilege.
363 * Conditions:
364 * Nothing locked. The task must be suspended.
365 * (Or the current thread must be in the task.)
366 */
367
368 void
369 ipc_task_reset(
370 task_t task)
371 {
372 ipc_port_t old_kport, new_kport;
373 ipc_port_t old_sself;
374 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
375 int i;
376
377 new_kport = ipc_port_alloc_kernel();
378 if (new_kport == IP_NULL)
379 panic("ipc_task_reset");
380
381 itk_lock(task);
382
383 old_kport = task->itk_self;
384
385 if (old_kport == IP_NULL) {
386 /* the task is already terminated (can this happen?) */
387 itk_unlock(task);
388 ipc_port_dealloc_kernel(new_kport);
389 return;
390 }
391
392 task->itk_self = new_kport;
393 old_sself = task->itk_sself;
394 task->itk_sself = ipc_port_make_send(new_kport);
395 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
396 ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);
397
398 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
399 old_exc_actions[i] = IP_NULL;
400
401 if (i == EXC_CORPSE_NOTIFY && task_corpse_pending_report(task)) {
402 continue;
403 }
404
405 if (!task->exc_actions[i].privileged) {
406 old_exc_actions[i] = task->exc_actions[i].port;
407 task->exc_actions[i].port = IP_NULL;
408 }
409 }/* for */
410
411 if (IP_VALID(task->itk_debug_control)) {
412 ipc_port_release_send(task->itk_debug_control);
413 }
414 task->itk_debug_control = IP_NULL;
415
416 itk_unlock(task);
417
418 /* release the naked send rights */
419
420 if (IP_VALID(old_sself))
421 ipc_port_release_send(old_sself);
422
423 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
424 if (IP_VALID(old_exc_actions[i])) {
425 ipc_port_release_send(old_exc_actions[i]);
426 }
427 }/* for */
428
429 /* destroy the kernel port */
430 ipc_port_dealloc_kernel(old_kport);
431 }
432
433 /*
434 * Routine: ipc_thread_init
435 * Purpose:
436 * Initialize a thread's IPC state.
437 * Conditions:
438 * Nothing locked.
439 */
440
441 void
442 ipc_thread_init(
443 thread_t thread)
444 {
445 ipc_port_t kport;
446
447 kport = ipc_port_alloc_kernel();
448 if (kport == IP_NULL)
449 panic("ipc_thread_init");
450
451 thread->ith_self = kport;
452 thread->ith_sself = ipc_port_make_send(kport);
453 thread->exc_actions = NULL;
454
455 ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD);
456
457 #if IMPORTANCE_INHERITANCE
458 thread->ith_assertions = 0;
459 #endif
460
461 ipc_kmsg_queue_init(&thread->ith_messages);
462
463 thread->ith_rpc_reply = IP_NULL;
464 }
465
466 void
467 ipc_thread_init_exc_actions(
468 thread_t thread)
469 {
470 assert(thread->exc_actions == NULL);
471
472 thread->exc_actions = kalloc(sizeof(struct exception_action) * EXC_TYPES_COUNT);
473 bzero(thread->exc_actions, sizeof(struct exception_action) * EXC_TYPES_COUNT);
474 }
475
476 void
477 ipc_thread_destroy_exc_actions(
478 thread_t thread)
479 {
480 if (thread->exc_actions != NULL) {
481 kfree(thread->exc_actions,
482 sizeof(struct exception_action) * EXC_TYPES_COUNT);
483 thread->exc_actions = NULL;
484 }
485 }
486
487 void
488 ipc_thread_disable(
489 thread_t thread)
490 {
491 ipc_port_t kport = thread->ith_self;
492
493 if (kport != IP_NULL)
494 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
495 }
496
497 /*
498 * Routine: ipc_thread_terminate
499 * Purpose:
500 * Clean up and destroy a thread's IPC state.
501 * Conditions:
502 * Nothing locked.
503 */
504
505 void
506 ipc_thread_terminate(
507 thread_t thread)
508 {
509 ipc_port_t kport = thread->ith_self;
510
511 if (kport != IP_NULL) {
512 int i;
513
514 if (IP_VALID(thread->ith_sself))
515 ipc_port_release_send(thread->ith_sself);
516
517 thread->ith_sself = thread->ith_self = IP_NULL;
518
519 if (thread->exc_actions != NULL) {
520 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
521 if (IP_VALID(thread->exc_actions[i].port))
522 ipc_port_release_send(thread->exc_actions[i].port);
523 }
524 ipc_thread_destroy_exc_actions(thread);
525 }
526
527 ipc_port_dealloc_kernel(kport);
528 }
529
530 #if IMPORTANCE_INHERITANCE
531 assert(thread->ith_assertions == 0);
532 #endif
533
534 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
535
536 if (thread->ith_rpc_reply != IP_NULL)
537 ipc_port_dealloc_reply(thread->ith_rpc_reply);
538
539 thread->ith_rpc_reply = IP_NULL;
540 }
541
542 /*
543 * Routine: ipc_thread_reset
544 * Purpose:
545 * Reset the IPC state for a given Mach thread when
546 * its task enters an elevated security context.
547 * Both the thread port and its exception ports have
548 * to be reset. Its RPC reply port cannot have any
549 * rights outstanding, so it should be fine.
550 * Conditions:
551 * Nothing locked.
552 */
553
554 void
555 ipc_thread_reset(
556 thread_t thread)
557 {
558 ipc_port_t old_kport, new_kport;
559 ipc_port_t old_sself;
560 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
561 boolean_t has_old_exc_actions = FALSE;
562 int i;
563
564 new_kport = ipc_port_alloc_kernel();
565 if (new_kport == IP_NULL)
566 panic("ipc_task_reset");
567
568 thread_mtx_lock(thread);
569
570 old_kport = thread->ith_self;
571
572 if (old_kport == IP_NULL && thread->inspection == FALSE) {
573 /* the is already terminated (can this happen?) */
574 thread_mtx_unlock(thread);
575 ipc_port_dealloc_kernel(new_kport);
576 return;
577 }
578
579 thread->ith_self = new_kport;
580 old_sself = thread->ith_sself;
581 thread->ith_sself = ipc_port_make_send(new_kport);
582 if (old_kport != IP_NULL) {
583 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
584 }
585 ipc_kobject_set(new_kport, (ipc_kobject_t) thread, IKOT_THREAD);
586
587 /*
588 * Only ports that were set by root-owned processes
589 * (privileged ports) should survive
590 */
591 if (thread->exc_actions != NULL) {
592 has_old_exc_actions = TRUE;
593 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
594 if (thread->exc_actions[i].privileged) {
595 old_exc_actions[i] = IP_NULL;
596 } else {
597 old_exc_actions[i] = thread->exc_actions[i].port;
598 thread->exc_actions[i].port = IP_NULL;
599 }
600 }
601 }
602
603 thread_mtx_unlock(thread);
604
605 /* release the naked send rights */
606
607 if (IP_VALID(old_sself))
608 ipc_port_release_send(old_sself);
609
610 if (has_old_exc_actions) {
611 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
612 ipc_port_release_send(old_exc_actions[i]);
613 }
614 }
615
616 /* destroy the kernel port */
617 if (old_kport != IP_NULL) {
618 ipc_port_dealloc_kernel(old_kport);
619 }
620 }
621
622 /*
623 * Routine: retrieve_task_self_fast
624 * Purpose:
625 * Optimized version of retrieve_task_self,
626 * that only works for the current task.
627 *
628 * Return a send right (possibly null/dead)
629 * for the task's user-visible self port.
630 * Conditions:
631 * Nothing locked.
632 */
633
634 ipc_port_t
635 retrieve_task_self_fast(
636 register task_t task)
637 {
638 register ipc_port_t port;
639
640 assert(task == current_task());
641
642 itk_lock(task);
643 assert(task->itk_self != IP_NULL);
644
645 if ((port = task->itk_sself) == task->itk_self) {
646 /* no interposing */
647
648 ip_lock(port);
649 assert(ip_active(port));
650 ip_reference(port);
651 port->ip_srights++;
652 ip_unlock(port);
653 } else
654 port = ipc_port_copy_send(port);
655 itk_unlock(task);
656
657 return port;
658 }
659
660 /*
661 * Routine: retrieve_thread_self_fast
662 * Purpose:
663 * Return a send right (possibly null/dead)
664 * for the thread's user-visible self port.
665 *
666 * Only works for the current thread.
667 *
668 * Conditions:
669 * Nothing locked.
670 */
671
672 ipc_port_t
673 retrieve_thread_self_fast(
674 thread_t thread)
675 {
676 register ipc_port_t port;
677
678 assert(thread == current_thread());
679
680 thread_mtx_lock(thread);
681
682 assert(thread->ith_self != IP_NULL);
683
684 if ((port = thread->ith_sself) == thread->ith_self) {
685 /* no interposing */
686
687 ip_lock(port);
688 assert(ip_active(port));
689 ip_reference(port);
690 port->ip_srights++;
691 ip_unlock(port);
692 }
693 else
694 port = ipc_port_copy_send(port);
695
696 thread_mtx_unlock(thread);
697
698 return port;
699 }
700
701 /*
702 * Routine: task_self_trap [mach trap]
703 * Purpose:
704 * Give the caller send rights for his own task port.
705 * Conditions:
706 * Nothing locked.
707 * Returns:
708 * MACH_PORT_NULL if there are any resource failures
709 * or other errors.
710 */
711
712 mach_port_name_t
713 task_self_trap(
714 __unused struct task_self_trap_args *args)
715 {
716 task_t task = current_task();
717 ipc_port_t sright;
718 mach_port_name_t name;
719
720 sright = retrieve_task_self_fast(task);
721 name = ipc_port_copyout_send(sright, task->itk_space);
722 return name;
723 }
724
725 /*
726 * Routine: thread_self_trap [mach trap]
727 * Purpose:
728 * Give the caller send rights for his own thread port.
729 * Conditions:
730 * Nothing locked.
731 * Returns:
732 * MACH_PORT_NULL if there are any resource failures
733 * or other errors.
734 */
735
736 mach_port_name_t
737 thread_self_trap(
738 __unused struct thread_self_trap_args *args)
739 {
740 thread_t thread = current_thread();
741 task_t task = thread->task;
742 ipc_port_t sright;
743 mach_port_name_t name;
744
745 sright = retrieve_thread_self_fast(thread);
746 name = ipc_port_copyout_send(sright, task->itk_space);
747 return name;
748
749 }
750
751 /*
752 * Routine: mach_reply_port [mach trap]
753 * Purpose:
754 * Allocate a port for the caller.
755 * Conditions:
756 * Nothing locked.
757 * Returns:
758 * MACH_PORT_NULL if there are any resource failures
759 * or other errors.
760 */
761
762 mach_port_name_t
763 mach_reply_port(
764 __unused struct mach_reply_port_args *args)
765 {
766 ipc_port_t port;
767 mach_port_name_t name;
768 kern_return_t kr;
769
770 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
771 if (kr == KERN_SUCCESS)
772 ip_unlock(port);
773 else
774 name = MACH_PORT_NULL;
775 return name;
776 }
777
778 /*
779 * Routine: thread_get_special_port [kernel call]
780 * Purpose:
781 * Clones a send right for one of the thread's
782 * special ports.
783 * Conditions:
784 * Nothing locked.
785 * Returns:
786 * KERN_SUCCESS Extracted a send right.
787 * KERN_INVALID_ARGUMENT The thread is null.
788 * KERN_FAILURE The thread is dead.
789 * KERN_INVALID_ARGUMENT Invalid special port.
790 */
791
792 kern_return_t
793 thread_get_special_port(
794 thread_t thread,
795 int which,
796 ipc_port_t *portp)
797 {
798 kern_return_t result = KERN_SUCCESS;
799 ipc_port_t *whichp;
800
801 if (thread == THREAD_NULL)
802 return (KERN_INVALID_ARGUMENT);
803
804 switch (which) {
805
806 case THREAD_KERNEL_PORT:
807 whichp = &thread->ith_sself;
808 break;
809
810 default:
811 return (KERN_INVALID_ARGUMENT);
812 }
813
814 thread_mtx_lock(thread);
815
816 if (thread->active)
817 *portp = ipc_port_copy_send(*whichp);
818 else
819 result = KERN_FAILURE;
820
821 thread_mtx_unlock(thread);
822
823 return (result);
824 }
825
826 /*
827 * Routine: thread_set_special_port [kernel call]
828 * Purpose:
829 * Changes one of the thread's special ports,
830 * setting it to the supplied send right.
831 * Conditions:
832 * Nothing locked. If successful, consumes
833 * the supplied send right.
834 * Returns:
835 * KERN_SUCCESS Changed the special port.
836 * KERN_INVALID_ARGUMENT The thread is null.
837 * KERN_FAILURE The thread is dead.
838 * KERN_INVALID_ARGUMENT Invalid special port.
839 */
840
841 kern_return_t
842 thread_set_special_port(
843 thread_t thread,
844 int which,
845 ipc_port_t port)
846 {
847 kern_return_t result = KERN_SUCCESS;
848 ipc_port_t *whichp, old = IP_NULL;
849
850 if (thread == THREAD_NULL)
851 return (KERN_INVALID_ARGUMENT);
852
853 switch (which) {
854
855 case THREAD_KERNEL_PORT:
856 whichp = &thread->ith_sself;
857 break;
858
859 default:
860 return (KERN_INVALID_ARGUMENT);
861 }
862
863 thread_mtx_lock(thread);
864
865 if (thread->active) {
866 old = *whichp;
867 *whichp = port;
868 }
869 else
870 result = KERN_FAILURE;
871
872 thread_mtx_unlock(thread);
873
874 if (IP_VALID(old))
875 ipc_port_release_send(old);
876
877 return (result);
878 }
879
880 /*
881 * Routine: task_get_special_port [kernel call]
882 * Purpose:
883 * Clones a send right for one of the task's
884 * special ports.
885 * Conditions:
886 * Nothing locked.
887 * Returns:
888 * KERN_SUCCESS Extracted a send right.
889 * KERN_INVALID_ARGUMENT The task is null.
890 * KERN_FAILURE The task/space is dead.
891 * KERN_INVALID_ARGUMENT Invalid special port.
892 */
893
894 kern_return_t
895 task_get_special_port(
896 task_t task,
897 int which,
898 ipc_port_t *portp)
899 {
900 ipc_port_t port;
901
902 if (task == TASK_NULL)
903 return KERN_INVALID_ARGUMENT;
904
905 itk_lock(task);
906 if (task->itk_self == IP_NULL) {
907 itk_unlock(task);
908 return KERN_FAILURE;
909 }
910
911 switch (which) {
912 case TASK_KERNEL_PORT:
913 port = ipc_port_copy_send(task->itk_sself);
914 break;
915
916 case TASK_NAME_PORT:
917 port = ipc_port_make_send(task->itk_nself);
918 break;
919
920 case TASK_HOST_PORT:
921 port = ipc_port_copy_send(task->itk_host);
922 break;
923
924 case TASK_BOOTSTRAP_PORT:
925 port = ipc_port_copy_send(task->itk_bootstrap);
926 break;
927
928 case TASK_SEATBELT_PORT:
929 port = ipc_port_copy_send(task->itk_seatbelt);
930 break;
931
932 case TASK_ACCESS_PORT:
933 port = ipc_port_copy_send(task->itk_task_access);
934 break;
935
936 case TASK_DEBUG_CONTROL_PORT:
937 port = ipc_port_copy_send(task->itk_debug_control);
938 break;
939
940 default:
941 itk_unlock(task);
942 return KERN_INVALID_ARGUMENT;
943 }
944 itk_unlock(task);
945
946 *portp = port;
947 return KERN_SUCCESS;
948 }
949
950 /*
951 * Routine: task_set_special_port [kernel call]
952 * Purpose:
953 * Changes one of the task's special ports,
954 * setting it to the supplied send right.
955 * Conditions:
956 * Nothing locked. If successful, consumes
957 * the supplied send right.
958 * Returns:
959 * KERN_SUCCESS Changed the special port.
960 * KERN_INVALID_ARGUMENT The task is null.
961 * KERN_FAILURE The task/space is dead.
962 * KERN_INVALID_ARGUMENT Invalid special port.
963 * KERN_NO_ACCESS Attempted overwrite of seatbelt port.
964 */
965
966 kern_return_t
967 task_set_special_port(
968 task_t task,
969 int which,
970 ipc_port_t port)
971 {
972 ipc_port_t *whichp;
973 ipc_port_t old;
974
975 if (task == TASK_NULL)
976 return KERN_INVALID_ARGUMENT;
977
978 switch (which) {
979 case TASK_KERNEL_PORT:
980 whichp = &task->itk_sself;
981 break;
982
983 case TASK_HOST_PORT:
984 whichp = &task->itk_host;
985 break;
986
987 case TASK_BOOTSTRAP_PORT:
988 whichp = &task->itk_bootstrap;
989 break;
990
991 case TASK_SEATBELT_PORT:
992 whichp = &task->itk_seatbelt;
993 break;
994
995 case TASK_ACCESS_PORT:
996 whichp = &task->itk_task_access;
997 break;
998
999 case TASK_DEBUG_CONTROL_PORT:
1000 whichp = &task->itk_debug_control;
1001 break;
1002
1003
1004 default:
1005 return KERN_INVALID_ARGUMENT;
1006 }/* switch */
1007
1008 itk_lock(task);
1009 if (task->itk_self == IP_NULL) {
1010 itk_unlock(task);
1011 return KERN_FAILURE;
1012 }
1013
1014 /* do not allow overwrite of seatbelt or task access ports */
1015 if ((TASK_SEATBELT_PORT == which || TASK_ACCESS_PORT == which)
1016 && IP_VALID(*whichp)) {
1017 itk_unlock(task);
1018 return KERN_NO_ACCESS;
1019 }
1020
1021 old = *whichp;
1022 *whichp = port;
1023 itk_unlock(task);
1024
1025 if (IP_VALID(old))
1026 ipc_port_release_send(old);
1027 return KERN_SUCCESS;
1028 }
1029
1030
1031 /*
1032 * Routine: mach_ports_register [kernel call]
1033 * Purpose:
1034 * Stash a handful of port send rights in the task.
1035 * Child tasks will inherit these rights, but they
1036 * must use mach_ports_lookup to acquire them.
1037 *
1038 * The rights are supplied in a (wired) kalloc'd segment.
1039 * Rights which aren't supplied are assumed to be null.
1040 * Conditions:
1041 * Nothing locked. If successful, consumes
1042 * the supplied rights and memory.
1043 * Returns:
1044 * KERN_SUCCESS Stashed the port rights.
1045 * KERN_INVALID_ARGUMENT The task is null.
1046 * KERN_INVALID_ARGUMENT The task is dead.
1047 * KERN_INVALID_ARGUMENT The memory param is null.
1048 * KERN_INVALID_ARGUMENT Too many port rights supplied.
1049 */
1050
1051 kern_return_t
1052 mach_ports_register(
1053 task_t task,
1054 mach_port_array_t memory,
1055 mach_msg_type_number_t portsCnt)
1056 {
1057 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
1058 unsigned int i;
1059
1060 if ((task == TASK_NULL) ||
1061 (portsCnt > TASK_PORT_REGISTER_MAX) ||
1062 (portsCnt && memory == NULL))
1063 return KERN_INVALID_ARGUMENT;
1064
1065 /*
1066 * Pad the port rights with nulls.
1067 */
1068
1069 for (i = 0; i < portsCnt; i++)
1070 ports[i] = memory[i];
1071 for (; i < TASK_PORT_REGISTER_MAX; i++)
1072 ports[i] = IP_NULL;
1073
1074 itk_lock(task);
1075 if (task->itk_self == IP_NULL) {
1076 itk_unlock(task);
1077 return KERN_INVALID_ARGUMENT;
1078 }
1079
1080 /*
1081 * Replace the old send rights with the new.
1082 * Release the old rights after unlocking.
1083 */
1084
1085 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
1086 ipc_port_t old;
1087
1088 old = task->itk_registered[i];
1089 task->itk_registered[i] = ports[i];
1090 ports[i] = old;
1091 }
1092
1093 itk_unlock(task);
1094
1095 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
1096 if (IP_VALID(ports[i]))
1097 ipc_port_release_send(ports[i]);
1098
1099 /*
1100 * Now that the operation is known to be successful,
1101 * we can free the memory.
1102 */
1103
1104 if (portsCnt != 0)
1105 kfree(memory,
1106 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
1107
1108 return KERN_SUCCESS;
1109 }
1110
1111 /*
1112 * Routine: mach_ports_lookup [kernel call]
1113 * Purpose:
1114 * Retrieves (clones) the stashed port send rights.
1115 * Conditions:
1116 * Nothing locked. If successful, the caller gets
1117 * rights and memory.
1118 * Returns:
1119 * KERN_SUCCESS Retrieved the send rights.
1120 * KERN_INVALID_ARGUMENT The task is null.
1121 * KERN_INVALID_ARGUMENT The task is dead.
1122 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
1123 */
1124
1125 kern_return_t
1126 mach_ports_lookup(
1127 task_t task,
1128 mach_port_array_t *portsp,
1129 mach_msg_type_number_t *portsCnt)
1130 {
1131 void *memory;
1132 vm_size_t size;
1133 ipc_port_t *ports;
1134 int i;
1135
1136 if (task == TASK_NULL)
1137 return KERN_INVALID_ARGUMENT;
1138
1139 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
1140
1141 memory = kalloc(size);
1142 if (memory == 0)
1143 return KERN_RESOURCE_SHORTAGE;
1144
1145 itk_lock(task);
1146 if (task->itk_self == IP_NULL) {
1147 itk_unlock(task);
1148
1149 kfree(memory, size);
1150 return KERN_INVALID_ARGUMENT;
1151 }
1152
1153 ports = (ipc_port_t *) memory;
1154
1155 /*
1156 * Clone port rights. Because kalloc'd memory
1157 * is wired, we won't fault while holding the task lock.
1158 */
1159
1160 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
1161 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
1162
1163 itk_unlock(task);
1164
1165 *portsp = (mach_port_array_t) ports;
1166 *portsCnt = TASK_PORT_REGISTER_MAX;
1167 return KERN_SUCCESS;
1168 }
1169
1170 /*
1171 * Routine: convert_port_to_locked_task
1172 * Purpose:
1173 * Internal helper routine to convert from a port to a locked
1174 * task. Used by several routines that try to convert from a
1175 * task port to a reference on some task related object.
1176 * Conditions:
1177 * Nothing locked, blocking OK.
1178 */
1179 task_t
1180 convert_port_to_locked_task(ipc_port_t port)
1181 {
1182 int try_failed_count = 0;
1183
1184 while (IP_VALID(port)) {
1185 task_t task;
1186
1187 ip_lock(port);
1188 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
1189 ip_unlock(port);
1190 return TASK_NULL;
1191 }
1192 task = (task_t) port->ip_kobject;
1193 assert(task != TASK_NULL);
1194
1195 /*
1196 * Normal lock ordering puts task_lock() before ip_lock().
1197 * Attempt out-of-order locking here.
1198 */
1199 if (task_lock_try(task)) {
1200 ip_unlock(port);
1201 return(task);
1202 }
1203 try_failed_count++;
1204
1205 ip_unlock(port);
1206 mutex_pause(try_failed_count);
1207 }
1208 return TASK_NULL;
1209 }
1210
1211 /*
1212 * Routine: convert_port_to_task
1213 * Purpose:
1214 * Convert from a port to a task.
1215 * Doesn't consume the port ref; produces a task ref,
1216 * which may be null.
1217 * Conditions:
1218 * Nothing locked.
1219 */
1220 task_t
1221 convert_port_to_task(
1222 ipc_port_t port)
1223 {
1224 task_t task = TASK_NULL;
1225
1226 if (IP_VALID(port)) {
1227 ip_lock(port);
1228
1229 if ( ip_active(port) &&
1230 ip_kotype(port) == IKOT_TASK ) {
1231 task = (task_t)port->ip_kobject;
1232 assert(task != TASK_NULL);
1233
1234 task_reference_internal(task);
1235 }
1236
1237 ip_unlock(port);
1238 }
1239
1240 return (task);
1241 }
1242
1243 /*
1244 * Routine: convert_port_to_task_name
1245 * Purpose:
1246 * Convert from a port to a task name.
1247 * Doesn't consume the port ref; produces a task name ref,
1248 * which may be null.
1249 * Conditions:
1250 * Nothing locked.
1251 */
1252 task_name_t
1253 convert_port_to_task_name(
1254 ipc_port_t port)
1255 {
1256 task_name_t task = TASK_NULL;
1257
1258 if (IP_VALID(port)) {
1259 ip_lock(port);
1260
1261 if ( ip_active(port) &&
1262 (ip_kotype(port) == IKOT_TASK ||
1263 ip_kotype(port) == IKOT_TASK_NAME)) {
1264 task = (task_name_t)port->ip_kobject;
1265 assert(task != TASK_NAME_NULL);
1266
1267 task_reference_internal(task);
1268 }
1269
1270 ip_unlock(port);
1271 }
1272
1273 return (task);
1274 }
1275
1276 /*
1277 * Routine: convert_port_to_task_suspension_token
1278 * Purpose:
1279 * Convert from a port to a task suspension token.
1280 * Doesn't consume the port ref; produces a suspension token ref,
1281 * which may be null.
1282 * Conditions:
1283 * Nothing locked.
1284 */
1285 task_suspension_token_t
1286 convert_port_to_task_suspension_token(
1287 ipc_port_t port)
1288 {
1289 task_suspension_token_t task = TASK_NULL;
1290
1291 if (IP_VALID(port)) {
1292 ip_lock(port);
1293
1294 if ( ip_active(port) &&
1295 ip_kotype(port) == IKOT_TASK_RESUME) {
1296 task = (task_suspension_token_t)port->ip_kobject;
1297 assert(task != TASK_NULL);
1298
1299 task_reference_internal(task);
1300 }
1301
1302 ip_unlock(port);
1303 }
1304
1305 return (task);
1306 }
1307
1308 /*
1309 * Routine: convert_port_to_space
1310 * Purpose:
1311 * Convert from a port to a space.
1312 * Doesn't consume the port ref; produces a space ref,
1313 * which may be null.
1314 * Conditions:
1315 * Nothing locked.
1316 */
1317 ipc_space_t
1318 convert_port_to_space(
1319 ipc_port_t port)
1320 {
1321 ipc_space_t space;
1322 task_t task;
1323
1324 task = convert_port_to_locked_task(port);
1325
1326 if (task == TASK_NULL)
1327 return IPC_SPACE_NULL;
1328
1329 if (!task->active) {
1330 task_unlock(task);
1331 return IPC_SPACE_NULL;
1332 }
1333
1334 space = task->itk_space;
1335 is_reference(space);
1336 task_unlock(task);
1337 return (space);
1338 }
1339
1340 /*
1341 * Routine: convert_port_to_map
1342 * Purpose:
1343 * Convert from a port to a map.
1344 * Doesn't consume the port ref; produces a map ref,
1345 * which may be null.
1346 * Conditions:
1347 * Nothing locked.
1348 */
1349
1350 vm_map_t
1351 convert_port_to_map(
1352 ipc_port_t port)
1353 {
1354 task_t task;
1355 vm_map_t map;
1356
1357 task = convert_port_to_locked_task(port);
1358
1359 if (task == TASK_NULL)
1360 return VM_MAP_NULL;
1361
1362 if (!task->active) {
1363 task_unlock(task);
1364 return VM_MAP_NULL;
1365 }
1366
1367 map = task->map;
1368 vm_map_reference_swap(map);
1369 task_unlock(task);
1370 return map;
1371 }
1372
1373
1374 /*
1375 * Routine: convert_port_to_thread
1376 * Purpose:
1377 * Convert from a port to a thread.
1378 * Doesn't consume the port ref; produces an thread ref,
1379 * which may be null.
1380 * Conditions:
1381 * Nothing locked.
1382 */
1383
1384 thread_t
1385 convert_port_to_thread(
1386 ipc_port_t port)
1387 {
1388 thread_t thread = THREAD_NULL;
1389
1390 if (IP_VALID(port)) {
1391 ip_lock(port);
1392
1393 if ( ip_active(port) &&
1394 ip_kotype(port) == IKOT_THREAD ) {
1395 thread = (thread_t)port->ip_kobject;
1396 assert(thread != THREAD_NULL);
1397
1398 thread_reference_internal(thread);
1399 }
1400
1401 ip_unlock(port);
1402 }
1403
1404 return (thread);
1405 }
1406
1407 /*
1408 * Routine: port_name_to_thread
1409 * Purpose:
1410 * Convert from a port name to an thread reference
1411 * A name of MACH_PORT_NULL is valid for the null thread.
1412 * Conditions:
1413 * Nothing locked.
1414 *
1415 * TODO: Could this be faster if it were ipc_port_translate_send based, like thread_switch?
1416 * We could avoid extra lock/unlock and extra ref operations on the port.
1417 */
1418 thread_t
1419 port_name_to_thread(
1420 mach_port_name_t name)
1421 {
1422 thread_t thread = THREAD_NULL;
1423 ipc_port_t kport;
1424
1425 if (MACH_PORT_VALID(name)) {
1426 if (ipc_object_copyin(current_space(), name,
1427 MACH_MSG_TYPE_COPY_SEND,
1428 (ipc_object_t *)&kport) != KERN_SUCCESS)
1429 return (THREAD_NULL);
1430
1431 thread = convert_port_to_thread(kport);
1432
1433 if (IP_VALID(kport))
1434 ipc_port_release_send(kport);
1435 }
1436
1437 return (thread);
1438 }
1439
1440 task_t
1441 port_name_to_task(
1442 mach_port_name_t name)
1443 {
1444 ipc_port_t kern_port;
1445 kern_return_t kr;
1446 task_t task = TASK_NULL;
1447
1448 if (MACH_PORT_VALID(name)) {
1449 kr = ipc_object_copyin(current_space(), name,
1450 MACH_MSG_TYPE_COPY_SEND,
1451 (ipc_object_t *) &kern_port);
1452 if (kr != KERN_SUCCESS)
1453 return TASK_NULL;
1454
1455 task = convert_port_to_task(kern_port);
1456
1457 if (IP_VALID(kern_port))
1458 ipc_port_release_send(kern_port);
1459 }
1460 return task;
1461 }
1462
1463 /*
1464 * Routine: convert_task_to_port
1465 * Purpose:
1466 * Convert from a task to a port.
1467 * Consumes a task ref; produces a naked send right
1468 * which may be invalid.
1469 * Conditions:
1470 * Nothing locked.
1471 */
1472
1473 ipc_port_t
1474 convert_task_to_port(
1475 task_t task)
1476 {
1477 ipc_port_t port;
1478
1479 itk_lock(task);
1480 if (task->itk_self != IP_NULL)
1481 port = ipc_port_make_send(task->itk_self);
1482 else
1483 port = IP_NULL;
1484 itk_unlock(task);
1485
1486 task_deallocate(task);
1487 return port;
1488 }
1489
1490 /*
1491 * Routine: convert_task_suspend_token_to_port
1492 * Purpose:
1493 * Convert from a task suspension token to a port.
1494 * Consumes a task suspension token ref; produces a naked send-once right
1495 * which may be invalid.
1496 * Conditions:
1497 * Nothing locked.
1498 */
1499 ipc_port_t
1500 convert_task_suspension_token_to_port(
1501 task_suspension_token_t task)
1502 {
1503 ipc_port_t port;
1504
1505 task_lock(task);
1506 if (task->active) {
1507 if (task->itk_resume == IP_NULL) {
1508 task->itk_resume = ipc_port_alloc_kernel();
1509 if (!IP_VALID(task->itk_resume)) {
1510 panic("failed to create resume port");
1511 }
1512
1513 ipc_kobject_set(task->itk_resume, (ipc_kobject_t) task, IKOT_TASK_RESUME);
1514 }
1515
1516 /*
1517 * Create a send-once right for each instance of a direct user-called
1518 * task_suspend2 call. Each time one of these send-once rights is abandoned,
1519 * the notification handler will resume the target task.
1520 */
1521 port = ipc_port_make_sonce(task->itk_resume);
1522 assert(IP_VALID(port));
1523 } else {
1524 port = IP_NULL;
1525 }
1526
1527 task_unlock(task);
1528 task_suspension_token_deallocate(task);
1529
1530 return port;
1531 }
1532
1533
1534 /*
1535 * Routine: convert_task_name_to_port
1536 * Purpose:
1537 * Convert from a task name ref to a port.
1538 * Consumes a task name ref; produces a naked send right
1539 * which may be invalid.
1540 * Conditions:
1541 * Nothing locked.
1542 */
1543
1544 ipc_port_t
1545 convert_task_name_to_port(
1546 task_name_t task_name)
1547 {
1548 ipc_port_t port;
1549
1550 itk_lock(task_name);
1551 if (task_name->itk_nself != IP_NULL)
1552 port = ipc_port_make_send(task_name->itk_nself);
1553 else
1554 port = IP_NULL;
1555 itk_unlock(task_name);
1556
1557 task_name_deallocate(task_name);
1558 return port;
1559 }
1560
1561 /*
1562 * Routine: convert_thread_to_port
1563 * Purpose:
1564 * Convert from a thread to a port.
1565 * Consumes an thread ref; produces a naked send right
1566 * which may be invalid.
1567 * Conditions:
1568 * Nothing locked.
1569 */
1570
1571 ipc_port_t
1572 convert_thread_to_port(
1573 thread_t thread)
1574 {
1575 ipc_port_t port;
1576
1577 thread_mtx_lock(thread);
1578
1579 if (thread->ith_self != IP_NULL)
1580 port = ipc_port_make_send(thread->ith_self);
1581 else
1582 port = IP_NULL;
1583
1584 thread_mtx_unlock(thread);
1585
1586 thread_deallocate(thread);
1587
1588 return (port);
1589 }
1590
1591 /*
1592 * Routine: space_deallocate
1593 * Purpose:
1594 * Deallocate a space ref produced by convert_port_to_space.
1595 * Conditions:
1596 * Nothing locked.
1597 */
1598
1599 void
1600 space_deallocate(
1601 ipc_space_t space)
1602 {
1603 if (space != IS_NULL)
1604 is_release(space);
1605 }
1606
1607 /*
1608 * Routine: thread/task_set_exception_ports [kernel call]
1609 * Purpose:
1610 * Sets the thread/task exception port, flavor and
1611 * behavior for the exception types specified by the mask.
1612 * There will be one send right per exception per valid
1613 * port.
1614 * Conditions:
1615 * Nothing locked. If successful, consumes
1616 * the supplied send right.
1617 * Returns:
1618 * KERN_SUCCESS Changed the special port.
1619 * KERN_INVALID_ARGUMENT The thread is null,
1620 * Illegal mask bit set.
1621 * Illegal exception behavior
1622 * KERN_FAILURE The thread is dead.
1623 */
1624
1625 kern_return_t
1626 thread_set_exception_ports(
1627 thread_t thread,
1628 exception_mask_t exception_mask,
1629 ipc_port_t new_port,
1630 exception_behavior_t new_behavior,
1631 thread_state_flavor_t new_flavor)
1632 {
1633 ipc_port_t old_port[EXC_TYPES_COUNT];
1634 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1635 register int i;
1636
1637 if (thread == THREAD_NULL)
1638 return (KERN_INVALID_ARGUMENT);
1639
1640 if (exception_mask & ~EXC_MASK_VALID)
1641 return (KERN_INVALID_ARGUMENT);
1642
1643 if (IP_VALID(new_port)) {
1644 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1645
1646 case EXCEPTION_DEFAULT:
1647 case EXCEPTION_STATE:
1648 case EXCEPTION_STATE_IDENTITY:
1649 break;
1650
1651 default:
1652 return (KERN_INVALID_ARGUMENT);
1653 }
1654 }
1655
1656 /*
1657 * Check the validity of the thread_state_flavor by calling the
1658 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1659 * osfmk/mach/ARCHITECTURE/thread_status.h
1660 */
1661 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
1662 return (KERN_INVALID_ARGUMENT);
1663
1664 thread_mtx_lock(thread);
1665
1666 if (!thread->active) {
1667 thread_mtx_unlock(thread);
1668
1669 return (KERN_FAILURE);
1670 }
1671
1672 if (thread->exc_actions == NULL) {
1673 ipc_thread_init_exc_actions(thread);
1674 }
1675 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1676 if (exception_mask & (1 << i)) {
1677 old_port[i] = thread->exc_actions[i].port;
1678 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1679 thread->exc_actions[i].behavior = new_behavior;
1680 thread->exc_actions[i].flavor = new_flavor;
1681 thread->exc_actions[i].privileged = privileged;
1682 }
1683 else
1684 old_port[i] = IP_NULL;
1685 }
1686
1687 thread_mtx_unlock(thread);
1688
1689 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1690 if (IP_VALID(old_port[i]))
1691 ipc_port_release_send(old_port[i]);
1692
1693 if (IP_VALID(new_port)) /* consume send right */
1694 ipc_port_release_send(new_port);
1695
1696 return (KERN_SUCCESS);
1697 }
1698
1699 kern_return_t
1700 task_set_exception_ports(
1701 task_t task,
1702 exception_mask_t exception_mask,
1703 ipc_port_t new_port,
1704 exception_behavior_t new_behavior,
1705 thread_state_flavor_t new_flavor)
1706 {
1707 ipc_port_t old_port[EXC_TYPES_COUNT];
1708 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1709 register int i;
1710
1711 if (task == TASK_NULL)
1712 return (KERN_INVALID_ARGUMENT);
1713
1714 if (exception_mask & ~EXC_MASK_VALID)
1715 return (KERN_INVALID_ARGUMENT);
1716
1717 if (IP_VALID(new_port)) {
1718 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1719
1720 case EXCEPTION_DEFAULT:
1721 case EXCEPTION_STATE:
1722 case EXCEPTION_STATE_IDENTITY:
1723 break;
1724
1725 default:
1726 return (KERN_INVALID_ARGUMENT);
1727 }
1728 }
1729
1730 /*
1731 * Check the validity of the thread_state_flavor by calling the
1732 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1733 * osfmk/mach/ARCHITECTURE/thread_status.h
1734 */
1735 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
1736 return (KERN_INVALID_ARGUMENT);
1737
1738 itk_lock(task);
1739
1740 if (task->itk_self == IP_NULL) {
1741 itk_unlock(task);
1742
1743 return (KERN_FAILURE);
1744 }
1745
1746 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1747 if (exception_mask & (1 << i)) {
1748 old_port[i] = task->exc_actions[i].port;
1749 task->exc_actions[i].port =
1750 ipc_port_copy_send(new_port);
1751 task->exc_actions[i].behavior = new_behavior;
1752 task->exc_actions[i].flavor = new_flavor;
1753 task->exc_actions[i].privileged = privileged;
1754 }
1755 else
1756 old_port[i] = IP_NULL;
1757 }
1758
1759 itk_unlock(task);
1760
1761 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1762 if (IP_VALID(old_port[i]))
1763 ipc_port_release_send(old_port[i]);
1764
1765 if (IP_VALID(new_port)) /* consume send right */
1766 ipc_port_release_send(new_port);
1767
1768 return (KERN_SUCCESS);
1769 }
1770
1771 /*
1772 * Routine: thread/task_swap_exception_ports [kernel call]
1773 * Purpose:
1774 * Sets the thread/task exception port, flavor and
1775 * behavior for the exception types specified by the
1776 * mask.
1777 *
1778 * The old ports, behavior and flavors are returned
1779 * Count specifies the array sizes on input and
1780 * the number of returned ports etc. on output. The
1781 * arrays must be large enough to hold all the returned
1782 * data, MIG returnes an error otherwise. The masks
1783 * array specifies the corresponding exception type(s).
1784 *
1785 * Conditions:
1786 * Nothing locked. If successful, consumes
1787 * the supplied send right.
1788 *
1789 * Returns upto [in} CountCnt elements.
1790 * Returns:
1791 * KERN_SUCCESS Changed the special port.
1792 * KERN_INVALID_ARGUMENT The thread is null,
1793 * Illegal mask bit set.
1794 * Illegal exception behavior
1795 * KERN_FAILURE The thread is dead.
1796 */
1797
1798 kern_return_t
1799 thread_swap_exception_ports(
1800 thread_t thread,
1801 exception_mask_t exception_mask,
1802 ipc_port_t new_port,
1803 exception_behavior_t new_behavior,
1804 thread_state_flavor_t new_flavor,
1805 exception_mask_array_t masks,
1806 mach_msg_type_number_t *CountCnt,
1807 exception_port_array_t ports,
1808 exception_behavior_array_t behaviors,
1809 thread_state_flavor_array_t flavors)
1810 {
1811 ipc_port_t old_port[EXC_TYPES_COUNT];
1812 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1813 unsigned int i, j, count;
1814
1815 if (thread == THREAD_NULL)
1816 return (KERN_INVALID_ARGUMENT);
1817
1818 if (exception_mask & ~EXC_MASK_VALID)
1819 return (KERN_INVALID_ARGUMENT);
1820
1821 if (IP_VALID(new_port)) {
1822 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1823
1824 case EXCEPTION_DEFAULT:
1825 case EXCEPTION_STATE:
1826 case EXCEPTION_STATE_IDENTITY:
1827 break;
1828
1829 default:
1830 return (KERN_INVALID_ARGUMENT);
1831 }
1832 }
1833
1834 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
1835 return (KERN_INVALID_ARGUMENT);
1836
1837 thread_mtx_lock(thread);
1838
1839 if (!thread->active) {
1840 thread_mtx_unlock(thread);
1841
1842 return (KERN_FAILURE);
1843 }
1844
1845 if (thread->exc_actions == NULL) {
1846 ipc_thread_init_exc_actions(thread);
1847 }
1848
1849 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
1850 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
1851 if (exception_mask & (1 << i)) {
1852 for (j = 0; j < count; ++j) {
1853 /*
1854 * search for an identical entry, if found
1855 * set corresponding mask for this exception.
1856 */
1857 if ( thread->exc_actions[i].port == ports[j] &&
1858 thread->exc_actions[i].behavior == behaviors[j] &&
1859 thread->exc_actions[i].flavor == flavors[j] ) {
1860 masks[j] |= (1 << i);
1861 break;
1862 }
1863 }
1864
1865 if (j == count) {
1866 masks[j] = (1 << i);
1867 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
1868
1869 behaviors[j] = thread->exc_actions[i].behavior;
1870 flavors[j] = thread->exc_actions[i].flavor;
1871 ++count;
1872 }
1873
1874 old_port[i] = thread->exc_actions[i].port;
1875 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1876 thread->exc_actions[i].behavior = new_behavior;
1877 thread->exc_actions[i].flavor = new_flavor;
1878 thread->exc_actions[i].privileged = privileged;
1879 }
1880 else
1881 old_port[i] = IP_NULL;
1882 }
1883
1884 thread_mtx_unlock(thread);
1885
1886 while (--i >= FIRST_EXCEPTION) {
1887 if (IP_VALID(old_port[i]))
1888 ipc_port_release_send(old_port[i]);
1889 }
1890
1891 if (IP_VALID(new_port)) /* consume send right */
1892 ipc_port_release_send(new_port);
1893
1894 *CountCnt = count;
1895
1896 return (KERN_SUCCESS);
1897 }
1898
1899 kern_return_t
1900 task_swap_exception_ports(
1901 task_t task,
1902 exception_mask_t exception_mask,
1903 ipc_port_t new_port,
1904 exception_behavior_t new_behavior,
1905 thread_state_flavor_t new_flavor,
1906 exception_mask_array_t masks,
1907 mach_msg_type_number_t *CountCnt,
1908 exception_port_array_t ports,
1909 exception_behavior_array_t behaviors,
1910 thread_state_flavor_array_t flavors)
1911 {
1912 ipc_port_t old_port[EXC_TYPES_COUNT];
1913 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1914 unsigned int i, j, count;
1915
1916 if (task == TASK_NULL)
1917 return (KERN_INVALID_ARGUMENT);
1918
1919 if (exception_mask & ~EXC_MASK_VALID)
1920 return (KERN_INVALID_ARGUMENT);
1921
1922 if (IP_VALID(new_port)) {
1923 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1924
1925 case EXCEPTION_DEFAULT:
1926 case EXCEPTION_STATE:
1927 case EXCEPTION_STATE_IDENTITY:
1928 break;
1929
1930 default:
1931 return (KERN_INVALID_ARGUMENT);
1932 }
1933 }
1934
1935 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
1936 return (KERN_INVALID_ARGUMENT);
1937
1938 itk_lock(task);
1939
1940 if (task->itk_self == IP_NULL) {
1941 itk_unlock(task);
1942
1943 return (KERN_FAILURE);
1944 }
1945
1946 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
1947 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
1948 if (exception_mask & (1 << i)) {
1949 for (j = 0; j < count; j++) {
1950 /*
1951 * search for an identical entry, if found
1952 * set corresponding mask for this exception.
1953 */
1954 if ( task->exc_actions[i].port == ports[j] &&
1955 task->exc_actions[i].behavior == behaviors[j] &&
1956 task->exc_actions[i].flavor == flavors[j] ) {
1957 masks[j] |= (1 << i);
1958 break;
1959 }
1960 }
1961
1962 if (j == count) {
1963 masks[j] = (1 << i);
1964 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
1965 behaviors[j] = task->exc_actions[i].behavior;
1966 flavors[j] = task->exc_actions[i].flavor;
1967 ++count;
1968 }
1969
1970 old_port[i] = task->exc_actions[i].port;
1971
1972 task->exc_actions[i].port = ipc_port_copy_send(new_port);
1973 task->exc_actions[i].behavior = new_behavior;
1974 task->exc_actions[i].flavor = new_flavor;
1975 task->exc_actions[i].privileged = privileged;
1976 }
1977 else
1978 old_port[i] = IP_NULL;
1979 }
1980
1981 itk_unlock(task);
1982
1983 while (--i >= FIRST_EXCEPTION) {
1984 if (IP_VALID(old_port[i]))
1985 ipc_port_release_send(old_port[i]);
1986 }
1987
1988 if (IP_VALID(new_port)) /* consume send right */
1989 ipc_port_release_send(new_port);
1990
1991 *CountCnt = count;
1992
1993 return (KERN_SUCCESS);
1994 }
1995
1996 /*
1997 * Routine: thread/task_get_exception_ports [kernel call]
1998 * Purpose:
1999 * Clones a send right for each of the thread/task's exception
2000 * ports specified in the mask and returns the behaviour
2001 * and flavor of said port.
2002 *
2003 * Returns upto [in} CountCnt elements.
2004 *
2005 * Conditions:
2006 * Nothing locked.
2007 * Returns:
2008 * KERN_SUCCESS Extracted a send right.
2009 * KERN_INVALID_ARGUMENT The thread is null,
2010 * Invalid special port,
2011 * Illegal mask bit set.
2012 * KERN_FAILURE The thread is dead.
2013 */
2014
2015 kern_return_t
2016 thread_get_exception_ports(
2017 thread_t thread,
2018 exception_mask_t exception_mask,
2019 exception_mask_array_t masks,
2020 mach_msg_type_number_t *CountCnt,
2021 exception_port_array_t ports,
2022 exception_behavior_array_t behaviors,
2023 thread_state_flavor_array_t flavors)
2024 {
2025 unsigned int i, j, count;
2026
2027 if (thread == THREAD_NULL)
2028 return (KERN_INVALID_ARGUMENT);
2029
2030 if (exception_mask & ~EXC_MASK_VALID)
2031 return (KERN_INVALID_ARGUMENT);
2032
2033 thread_mtx_lock(thread);
2034
2035 if (!thread->active) {
2036 thread_mtx_unlock(thread);
2037
2038 return (KERN_FAILURE);
2039 }
2040
2041 count = 0;
2042
2043 if (thread->exc_actions == NULL) {
2044 goto done;
2045 }
2046
2047 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2048 if (exception_mask & (1 << i)) {
2049 for (j = 0; j < count; ++j) {
2050 /*
2051 * search for an identical entry, if found
2052 * set corresponding mask for this exception.
2053 */
2054 if ( thread->exc_actions[i].port == ports[j] &&
2055 thread->exc_actions[i].behavior ==behaviors[j] &&
2056 thread->exc_actions[i].flavor == flavors[j] ) {
2057 masks[j] |= (1 << i);
2058 break;
2059 }
2060 }
2061
2062 if (j == count) {
2063 masks[j] = (1 << i);
2064 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
2065 behaviors[j] = thread->exc_actions[i].behavior;
2066 flavors[j] = thread->exc_actions[i].flavor;
2067 ++count;
2068 if (count >= *CountCnt)
2069 break;
2070 }
2071 }
2072 }
2073
2074 done:
2075 thread_mtx_unlock(thread);
2076
2077 *CountCnt = count;
2078
2079 return (KERN_SUCCESS);
2080 }
2081
2082 kern_return_t
2083 task_get_exception_ports(
2084 task_t task,
2085 exception_mask_t exception_mask,
2086 exception_mask_array_t masks,
2087 mach_msg_type_number_t *CountCnt,
2088 exception_port_array_t ports,
2089 exception_behavior_array_t behaviors,
2090 thread_state_flavor_array_t flavors)
2091 {
2092 unsigned int i, j, count;
2093
2094 if (task == TASK_NULL)
2095 return (KERN_INVALID_ARGUMENT);
2096
2097 if (exception_mask & ~EXC_MASK_VALID)
2098 return (KERN_INVALID_ARGUMENT);
2099
2100 itk_lock(task);
2101
2102 if (task->itk_self == IP_NULL) {
2103 itk_unlock(task);
2104
2105 return (KERN_FAILURE);
2106 }
2107
2108 count = 0;
2109
2110 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2111 if (exception_mask & (1 << i)) {
2112 for (j = 0; j < count; ++j) {
2113 /*
2114 * search for an identical entry, if found
2115 * set corresponding mask for this exception.
2116 */
2117 if ( task->exc_actions[i].port == ports[j] &&
2118 task->exc_actions[i].behavior == behaviors[j] &&
2119 task->exc_actions[i].flavor == flavors[j] ) {
2120 masks[j] |= (1 << i);
2121 break;
2122 }
2123 }
2124
2125 if (j == count) {
2126 masks[j] = (1 << i);
2127 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
2128 behaviors[j] = task->exc_actions[i].behavior;
2129 flavors[j] = task->exc_actions[i].flavor;
2130 ++count;
2131 if (count > *CountCnt)
2132 break;
2133 }
2134 }
2135 }
2136
2137 itk_unlock(task);
2138
2139 *CountCnt = count;
2140
2141 return (KERN_SUCCESS);
2142 }