]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | ||
59 | /* | |
60 | * File: ipc_tt.c | |
61 | * Purpose: | |
62 | * Task and thread related IPC functions. | |
63 | */ | |
64 | ||
65 | #include <mach/mach_types.h> | |
66 | #include <mach/boolean.h> | |
67 | #include <mach/kern_return.h> | |
68 | #include <mach/mach_param.h> | |
69 | #include <mach/task_special_ports.h> | |
70 | #include <mach/thread_special_ports.h> | |
71 | #include <mach/thread_status.h> | |
72 | #include <mach/exception_types.h> | |
73 | #include <mach/memory_object_types.h> | |
74 | #include <mach/mach_traps.h> | |
75 | #include <mach/task_server.h> | |
76 | #include <mach/thread_act_server.h> | |
77 | #include <mach/mach_host_server.h> | |
78 | #include <mach/host_priv_server.h> | |
79 | #include <mach/vm_map_server.h> | |
80 | ||
81 | #include <kern/kern_types.h> | |
82 | #include <kern/host.h> | |
83 | #include <kern/ipc_kobject.h> | |
84 | #include <kern/ipc_tt.h> | |
85 | #include <kern/kalloc.h> | |
86 | #include <kern/thread.h> | |
87 | #include <kern/misc_protos.h> | |
88 | ||
89 | #include <vm/vm_map.h> | |
90 | #include <vm/vm_pageout.h> | |
91 | #include <vm/vm_shared_memory_server.h> | |
92 | #include <vm/vm_protos.h> | |
93 | ||
94 | /* forward declarations */ | |
95 | task_t convert_port_to_locked_task(ipc_port_t port); | |
96 | ||
97 | ||
98 | /* | |
99 | * Routine: ipc_task_init | |
100 | * Purpose: | |
101 | * Initialize a task's IPC state. | |
102 | * | |
103 | * If non-null, some state will be inherited from the parent. | |
104 | * The parent must be appropriately initialized. | |
105 | * Conditions: | |
106 | * Nothing locked. | |
107 | */ | |
108 | ||
109 | void | |
110 | ipc_task_init( | |
111 | task_t task, | |
112 | task_t parent) | |
113 | { | |
114 | ipc_space_t space; | |
115 | ipc_port_t kport; | |
116 | kern_return_t kr; | |
117 | int i; | |
118 | ||
119 | ||
120 | kr = ipc_space_create(&ipc_table_entries[0], &space); | |
121 | if (kr != KERN_SUCCESS) | |
122 | panic("ipc_task_init"); | |
123 | ||
124 | ||
125 | kport = ipc_port_alloc_kernel(); | |
126 | if (kport == IP_NULL) | |
127 | panic("ipc_task_init"); | |
128 | ||
129 | itk_lock_init(task); | |
130 | task->itk_self = kport; | |
131 | task->itk_sself = ipc_port_make_send(kport); | |
132 | task->itk_space = space; | |
133 | space->is_fast = FALSE; | |
134 | ||
135 | if (parent == TASK_NULL) { | |
136 | ipc_port_t port; | |
137 | ||
138 | for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { | |
139 | task->exc_actions[i].port = IP_NULL; | |
140 | }/* for */ | |
141 | ||
142 | kr = host_get_host_port(host_priv_self(), &port); | |
143 | assert(kr == KERN_SUCCESS); | |
144 | task->itk_host = port; | |
145 | ||
146 | task->itk_bootstrap = IP_NULL; | |
147 | ||
148 | for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) | |
149 | task->itk_registered[i] = IP_NULL; | |
150 | } else { | |
151 | itk_lock(parent); | |
152 | assert(parent->itk_self != IP_NULL); | |
153 | ||
154 | /* inherit registered ports */ | |
155 | ||
156 | for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) | |
157 | task->itk_registered[i] = | |
158 | ipc_port_copy_send(parent->itk_registered[i]); | |
159 | ||
160 | /* inherit exception and bootstrap ports */ | |
161 | ||
162 | for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { | |
163 | task->exc_actions[i].port = | |
164 | ipc_port_copy_send(parent->exc_actions[i].port); | |
165 | task->exc_actions[i].flavor = | |
166 | parent->exc_actions[i].flavor; | |
167 | task->exc_actions[i].behavior = | |
168 | parent->exc_actions[i].behavior; | |
169 | task->exc_actions[i].privileged = | |
170 | parent->exc_actions[i].privileged; | |
171 | }/* for */ | |
172 | task->itk_host = | |
173 | ipc_port_copy_send(parent->itk_host); | |
174 | ||
175 | task->itk_bootstrap = | |
176 | ipc_port_copy_send(parent->itk_bootstrap); | |
177 | ||
178 | itk_unlock(parent); | |
179 | } | |
180 | } | |
181 | ||
182 | /* | |
183 | * Routine: ipc_task_enable | |
184 | * Purpose: | |
185 | * Enable a task for IPC access. | |
186 | * Conditions: | |
187 | * Nothing locked. | |
188 | */ | |
189 | ||
190 | void | |
191 | ipc_task_enable( | |
192 | task_t task) | |
193 | { | |
194 | ipc_port_t kport; | |
195 | ||
196 | itk_lock(task); | |
197 | kport = task->itk_self; | |
198 | if (kport != IP_NULL) | |
199 | ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK); | |
200 | itk_unlock(task); | |
201 | } | |
202 | ||
203 | /* | |
204 | * Routine: ipc_task_disable | |
205 | * Purpose: | |
206 | * Disable IPC access to a task. | |
207 | * Conditions: | |
208 | * Nothing locked. | |
209 | */ | |
210 | ||
211 | void | |
212 | ipc_task_disable( | |
213 | task_t task) | |
214 | { | |
215 | ipc_port_t kport; | |
216 | ||
217 | itk_lock(task); | |
218 | kport = task->itk_self; | |
219 | if (kport != IP_NULL) | |
220 | ipc_kobject_set(kport, IKO_NULL, IKOT_NONE); | |
221 | itk_unlock(task); | |
222 | } | |
223 | ||
224 | /* | |
225 | * Routine: ipc_task_terminate | |
226 | * Purpose: | |
227 | * Clean up and destroy a task's IPC state. | |
228 | * Conditions: | |
229 | * Nothing locked. The task must be suspended. | |
230 | * (Or the current thread must be in the task.) | |
231 | */ | |
232 | ||
233 | void | |
234 | ipc_task_terminate( | |
235 | task_t task) | |
236 | { | |
237 | ipc_port_t kport; | |
238 | int i; | |
239 | ||
240 | itk_lock(task); | |
241 | kport = task->itk_self; | |
242 | ||
243 | if (kport == IP_NULL) { | |
244 | /* the task is already terminated (can this happen?) */ | |
245 | itk_unlock(task); | |
246 | return; | |
247 | } | |
248 | ||
249 | task->itk_self = IP_NULL; | |
250 | itk_unlock(task); | |
251 | ||
252 | /* release the naked send rights */ | |
253 | ||
254 | if (IP_VALID(task->itk_sself)) | |
255 | ipc_port_release_send(task->itk_sself); | |
256 | ||
257 | for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { | |
258 | if (IP_VALID(task->exc_actions[i].port)) { | |
259 | ipc_port_release_send(task->exc_actions[i].port); | |
260 | } | |
261 | } | |
262 | ||
263 | if (IP_VALID(task->itk_host)) | |
264 | ipc_port_release_send(task->itk_host); | |
265 | ||
266 | if (IP_VALID(task->itk_bootstrap)) | |
267 | ipc_port_release_send(task->itk_bootstrap); | |
268 | ||
269 | for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) | |
270 | if (IP_VALID(task->itk_registered[i])) | |
271 | ipc_port_release_send(task->itk_registered[i]); | |
272 | ||
273 | ipc_port_release_send(task->wired_ledger_port); | |
274 | ipc_port_release_send(task->paged_ledger_port); | |
275 | ||
276 | /* destroy the kernel port */ | |
277 | ipc_port_dealloc_kernel(kport); | |
278 | } | |
279 | ||
280 | /* | |
281 | * Routine: ipc_task_reset | |
282 | * Purpose: | |
283 | * Reset a task's IPC state to protect it when | |
284 | * it enters an elevated security context. | |
285 | * Conditions: | |
286 | * Nothing locked. The task must be suspended. | |
287 | * (Or the current thread must be in the task.) | |
288 | */ | |
289 | ||
290 | void | |
291 | ipc_task_reset( | |
292 | task_t task) | |
293 | { | |
294 | ipc_port_t old_kport, new_kport; | |
295 | ipc_port_t old_sself; | |
296 | ipc_port_t old_exc_actions[EXC_TYPES_COUNT]; | |
297 | int i; | |
298 | ||
299 | new_kport = ipc_port_alloc_kernel(); | |
300 | if (new_kport == IP_NULL) | |
301 | panic("ipc_task_reset"); | |
302 | ||
303 | itk_lock(task); | |
304 | ||
305 | old_kport = task->itk_self; | |
306 | ||
307 | if (old_kport == IP_NULL) { | |
308 | /* the task is already terminated (can this happen?) */ | |
309 | itk_unlock(task); | |
310 | ipc_port_dealloc_kernel(new_kport); | |
311 | return; | |
312 | } | |
313 | ||
314 | task->itk_self = new_kport; | |
315 | old_sself = task->itk_sself; | |
316 | task->itk_sself = ipc_port_make_send(new_kport); | |
317 | ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE); | |
318 | ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK); | |
319 | ||
320 | for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { | |
321 | if (!task->exc_actions[i].privileged) { | |
322 | old_exc_actions[i] = task->exc_actions[i].port; | |
323 | task->exc_actions[i].port = IP_NULL; | |
324 | } else { | |
325 | old_exc_actions[i] = IP_NULL; | |
326 | } | |
327 | }/* for */ | |
328 | ||
329 | itk_unlock(task); | |
330 | ||
331 | /* release the naked send rights */ | |
332 | ||
333 | if (IP_VALID(old_sself)) | |
334 | ipc_port_release_send(old_sself); | |
335 | ||
336 | for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { | |
337 | if (IP_VALID(old_exc_actions[i])) { | |
338 | ipc_port_release_send(old_exc_actions[i]); | |
339 | } | |
340 | }/* for */ | |
341 | ||
342 | /* destroy the kernel port */ | |
343 | ipc_port_dealloc_kernel(old_kport); | |
344 | } | |
345 | ||
346 | /* | |
347 | * Routine: ipc_thread_init | |
348 | * Purpose: | |
349 | * Initialize a thread's IPC state. | |
350 | * Conditions: | |
351 | * Nothing locked. | |
352 | */ | |
353 | ||
354 | void | |
355 | ipc_thread_init( | |
356 | thread_t thread) | |
357 | { | |
358 | ipc_port_t kport; | |
359 | int i; | |
360 | ||
361 | kport = ipc_port_alloc_kernel(); | |
362 | if (kport == IP_NULL) | |
363 | panic("ipc_thread_init"); | |
364 | ||
365 | thread->ith_self = kport; | |
366 | thread->ith_sself = ipc_port_make_send(kport); | |
367 | ||
368 | for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) | |
369 | thread->exc_actions[i].port = IP_NULL; | |
370 | ||
371 | ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD); | |
372 | ||
373 | ipc_kmsg_queue_init(&thread->ith_messages); | |
374 | ||
375 | thread->ith_rpc_reply = IP_NULL; | |
376 | } | |
377 | ||
378 | void | |
379 | ipc_thread_disable( | |
380 | thread_t thread) | |
381 | { | |
382 | ipc_port_t kport = thread->ith_self; | |
383 | ||
384 | if (kport != IP_NULL) | |
385 | ipc_kobject_set(kport, IKO_NULL, IKOT_NONE); | |
386 | } | |
387 | ||
388 | /* | |
389 | * Routine: ipc_thread_terminate | |
390 | * Purpose: | |
391 | * Clean up and destroy a thread's IPC state. | |
392 | * Conditions: | |
393 | * Nothing locked. | |
394 | */ | |
395 | ||
396 | void | |
397 | ipc_thread_terminate( | |
398 | thread_t thread) | |
399 | { | |
400 | ipc_port_t kport = thread->ith_self; | |
401 | ||
402 | if (kport != IP_NULL) { | |
403 | int i; | |
404 | ||
405 | if (IP_VALID(thread->ith_sself)) | |
406 | ipc_port_release_send(thread->ith_sself); | |
407 | ||
408 | thread->ith_sself = thread->ith_self = IP_NULL; | |
409 | ||
410 | for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { | |
411 | if (IP_VALID(thread->exc_actions[i].port)) | |
412 | ipc_port_release_send(thread->exc_actions[i].port); | |
413 | } | |
414 | ||
415 | ipc_port_dealloc_kernel(kport); | |
416 | } | |
417 | ||
418 | assert(ipc_kmsg_queue_empty(&thread->ith_messages)); | |
419 | ||
420 | if (thread->ith_rpc_reply != IP_NULL) | |
421 | ipc_port_dealloc_reply(thread->ith_rpc_reply); | |
422 | ||
423 | thread->ith_rpc_reply = IP_NULL; | |
424 | } | |
425 | ||
426 | /* | |
427 | * Routine: retrieve_task_self_fast | |
428 | * Purpose: | |
429 | * Optimized version of retrieve_task_self, | |
430 | * that only works for the current task. | |
431 | * | |
432 | * Return a send right (possibly null/dead) | |
433 | * for the task's user-visible self port. | |
434 | * Conditions: | |
435 | * Nothing locked. | |
436 | */ | |
437 | ||
438 | ipc_port_t | |
439 | retrieve_task_self_fast( | |
440 | register task_t task) | |
441 | { | |
442 | register ipc_port_t port; | |
443 | ||
444 | assert(task == current_task()); | |
445 | ||
446 | itk_lock(task); | |
447 | assert(task->itk_self != IP_NULL); | |
448 | ||
449 | if ((port = task->itk_sself) == task->itk_self) { | |
450 | /* no interposing */ | |
451 | ||
452 | ip_lock(port); | |
453 | assert(ip_active(port)); | |
454 | ip_reference(port); | |
455 | port->ip_srights++; | |
456 | ip_unlock(port); | |
457 | } else | |
458 | port = ipc_port_copy_send(port); | |
459 | itk_unlock(task); | |
460 | ||
461 | return port; | |
462 | } | |
463 | ||
464 | /* | |
465 | * Routine: retrieve_thread_self_fast | |
466 | * Purpose: | |
467 | * Return a send right (possibly null/dead) | |
468 | * for the thread's user-visible self port. | |
469 | * | |
470 | * Only works for the current thread. | |
471 | * | |
472 | * Conditions: | |
473 | * Nothing locked. | |
474 | */ | |
475 | ||
476 | ipc_port_t | |
477 | retrieve_thread_self_fast( | |
478 | thread_t thread) | |
479 | { | |
480 | register ipc_port_t port; | |
481 | ||
482 | assert(thread == current_thread()); | |
483 | ||
484 | thread_mtx_lock(thread); | |
485 | ||
486 | assert(thread->ith_self != IP_NULL); | |
487 | ||
488 | if ((port = thread->ith_sself) == thread->ith_self) { | |
489 | /* no interposing */ | |
490 | ||
491 | ip_lock(port); | |
492 | assert(ip_active(port)); | |
493 | ip_reference(port); | |
494 | port->ip_srights++; | |
495 | ip_unlock(port); | |
496 | } | |
497 | else | |
498 | port = ipc_port_copy_send(port); | |
499 | ||
500 | thread_mtx_unlock(thread); | |
501 | ||
502 | return port; | |
503 | } | |
504 | ||
505 | /* | |
506 | * Routine: task_self_trap [mach trap] | |
507 | * Purpose: | |
508 | * Give the caller send rights for his own task port. | |
509 | * Conditions: | |
510 | * Nothing locked. | |
511 | * Returns: | |
512 | * MACH_PORT_NULL if there are any resource failures | |
513 | * or other errors. | |
514 | */ | |
515 | ||
516 | mach_port_name_t | |
517 | task_self_trap( | |
518 | __unused struct task_self_trap_args *args) | |
519 | { | |
520 | task_t task = current_task(); | |
521 | ipc_port_t sright; | |
522 | mach_port_name_t name; | |
523 | ||
524 | sright = retrieve_task_self_fast(task); | |
525 | name = ipc_port_copyout_send(sright, task->itk_space); | |
526 | return name; | |
527 | } | |
528 | ||
529 | /* | |
530 | * Routine: thread_self_trap [mach trap] | |
531 | * Purpose: | |
532 | * Give the caller send rights for his own thread port. | |
533 | * Conditions: | |
534 | * Nothing locked. | |
535 | * Returns: | |
536 | * MACH_PORT_NULL if there are any resource failures | |
537 | * or other errors. | |
538 | */ | |
539 | ||
540 | mach_port_name_t | |
541 | thread_self_trap( | |
542 | __unused struct thread_self_trap_args *args) | |
543 | { | |
544 | thread_t thread = current_thread(); | |
545 | task_t task = thread->task; | |
546 | ipc_port_t sright; | |
547 | mach_port_name_t name; | |
548 | ||
549 | sright = retrieve_thread_self_fast(thread); | |
550 | name = ipc_port_copyout_send(sright, task->itk_space); | |
551 | return name; | |
552 | ||
553 | } | |
554 | ||
555 | /* | |
556 | * Routine: mach_reply_port [mach trap] | |
557 | * Purpose: | |
558 | * Allocate a port for the caller. | |
559 | * Conditions: | |
560 | * Nothing locked. | |
561 | * Returns: | |
562 | * MACH_PORT_NULL if there are any resource failures | |
563 | * or other errors. | |
564 | */ | |
565 | ||
566 | mach_port_name_t | |
567 | mach_reply_port( | |
568 | __unused struct mach_reply_port_args *args) | |
569 | { | |
570 | ipc_port_t port; | |
571 | mach_port_name_t name; | |
572 | kern_return_t kr; | |
573 | ||
574 | kr = ipc_port_alloc(current_task()->itk_space, &name, &port); | |
575 | if (kr == KERN_SUCCESS) | |
576 | ip_unlock(port); | |
577 | else | |
578 | name = MACH_PORT_NULL; | |
579 | return name; | |
580 | } | |
581 | ||
582 | /* | |
583 | * Routine: thread_get_special_port [kernel call] | |
584 | * Purpose: | |
585 | * Clones a send right for one of the thread's | |
586 | * special ports. | |
587 | * Conditions: | |
588 | * Nothing locked. | |
589 | * Returns: | |
590 | * KERN_SUCCESS Extracted a send right. | |
591 | * KERN_INVALID_ARGUMENT The thread is null. | |
592 | * KERN_FAILURE The thread is dead. | |
593 | * KERN_INVALID_ARGUMENT Invalid special port. | |
594 | */ | |
595 | ||
596 | kern_return_t | |
597 | thread_get_special_port( | |
598 | thread_t thread, | |
599 | int which, | |
600 | ipc_port_t *portp) | |
601 | { | |
602 | kern_return_t result = KERN_SUCCESS; | |
603 | ipc_port_t *whichp; | |
604 | ||
605 | if (thread == THREAD_NULL) | |
606 | return (KERN_INVALID_ARGUMENT); | |
607 | ||
608 | switch (which) { | |
609 | ||
610 | case THREAD_KERNEL_PORT: | |
611 | whichp = &thread->ith_sself; | |
612 | break; | |
613 | ||
614 | default: | |
615 | return (KERN_INVALID_ARGUMENT); | |
616 | } | |
617 | ||
618 | thread_mtx_lock(thread); | |
619 | ||
620 | if (thread->active) | |
621 | *portp = ipc_port_copy_send(*whichp); | |
622 | else | |
623 | result = KERN_FAILURE; | |
624 | ||
625 | thread_mtx_unlock(thread); | |
626 | ||
627 | return (result); | |
628 | } | |
629 | ||
630 | /* | |
631 | * Routine: thread_set_special_port [kernel call] | |
632 | * Purpose: | |
633 | * Changes one of the thread's special ports, | |
634 | * setting it to the supplied send right. | |
635 | * Conditions: | |
636 | * Nothing locked. If successful, consumes | |
637 | * the supplied send right. | |
638 | * Returns: | |
639 | * KERN_SUCCESS Changed the special port. | |
640 | * KERN_INVALID_ARGUMENT The thread is null. | |
641 | * KERN_FAILURE The thread is dead. | |
642 | * KERN_INVALID_ARGUMENT Invalid special port. | |
643 | */ | |
644 | ||
645 | kern_return_t | |
646 | thread_set_special_port( | |
647 | thread_t thread, | |
648 | int which, | |
649 | ipc_port_t port) | |
650 | { | |
651 | kern_return_t result = KERN_SUCCESS; | |
652 | ipc_port_t *whichp, old = IP_NULL; | |
653 | ||
654 | if (thread == THREAD_NULL) | |
655 | return (KERN_INVALID_ARGUMENT); | |
656 | ||
657 | switch (which) { | |
658 | ||
659 | case THREAD_KERNEL_PORT: | |
660 | whichp = &thread->ith_sself; | |
661 | break; | |
662 | ||
663 | default: | |
664 | return (KERN_INVALID_ARGUMENT); | |
665 | } | |
666 | ||
667 | thread_mtx_lock(thread); | |
668 | ||
669 | if (thread->active) { | |
670 | old = *whichp; | |
671 | *whichp = port; | |
672 | } | |
673 | else | |
674 | result = KERN_FAILURE; | |
675 | ||
676 | thread_mtx_unlock(thread); | |
677 | ||
678 | if (IP_VALID(old)) | |
679 | ipc_port_release_send(old); | |
680 | ||
681 | return (result); | |
682 | } | |
683 | ||
684 | /* | |
685 | * Routine: task_get_special_port [kernel call] | |
686 | * Purpose: | |
687 | * Clones a send right for one of the task's | |
688 | * special ports. | |
689 | * Conditions: | |
690 | * Nothing locked. | |
691 | * Returns: | |
692 | * KERN_SUCCESS Extracted a send right. | |
693 | * KERN_INVALID_ARGUMENT The task is null. | |
694 | * KERN_FAILURE The task/space is dead. | |
695 | * KERN_INVALID_ARGUMENT Invalid special port. | |
696 | */ | |
697 | ||
698 | kern_return_t | |
699 | task_get_special_port( | |
700 | task_t task, | |
701 | int which, | |
702 | ipc_port_t *portp) | |
703 | { | |
704 | ipc_port_t *whichp; | |
705 | ipc_port_t port; | |
706 | ||
707 | if (task == TASK_NULL) | |
708 | return KERN_INVALID_ARGUMENT; | |
709 | ||
710 | switch (which) { | |
711 | case TASK_KERNEL_PORT: | |
712 | whichp = &task->itk_sself; | |
713 | break; | |
714 | ||
715 | case TASK_HOST_PORT: | |
716 | whichp = &task->itk_host; | |
717 | break; | |
718 | ||
719 | case TASK_BOOTSTRAP_PORT: | |
720 | whichp = &task->itk_bootstrap; | |
721 | break; | |
722 | ||
723 | case TASK_WIRED_LEDGER_PORT: | |
724 | whichp = &task->wired_ledger_port; | |
725 | break; | |
726 | ||
727 | case TASK_PAGED_LEDGER_PORT: | |
728 | whichp = &task->paged_ledger_port; | |
729 | break; | |
730 | ||
731 | default: | |
732 | return KERN_INVALID_ARGUMENT; | |
733 | } | |
734 | ||
735 | itk_lock(task); | |
736 | if (task->itk_self == IP_NULL) { | |
737 | itk_unlock(task); | |
738 | return KERN_FAILURE; | |
739 | } | |
740 | ||
741 | port = ipc_port_copy_send(*whichp); | |
742 | itk_unlock(task); | |
743 | ||
744 | *portp = port; | |
745 | return KERN_SUCCESS; | |
746 | } | |
747 | ||
748 | /* | |
749 | * Routine: task_set_special_port [kernel call] | |
750 | * Purpose: | |
751 | * Changes one of the task's special ports, | |
752 | * setting it to the supplied send right. | |
753 | * Conditions: | |
754 | * Nothing locked. If successful, consumes | |
755 | * the supplied send right. | |
756 | * Returns: | |
757 | * KERN_SUCCESS Changed the special port. | |
758 | * KERN_INVALID_ARGUMENT The task is null. | |
759 | * KERN_FAILURE The task/space is dead. | |
760 | * KERN_INVALID_ARGUMENT Invalid special port. | |
761 | */ | |
762 | ||
763 | kern_return_t | |
764 | task_set_special_port( | |
765 | task_t task, | |
766 | int which, | |
767 | ipc_port_t port) | |
768 | { | |
769 | ipc_port_t *whichp; | |
770 | ipc_port_t old; | |
771 | ||
772 | if (task == TASK_NULL) | |
773 | return KERN_INVALID_ARGUMENT; | |
774 | ||
775 | switch (which) { | |
776 | case TASK_KERNEL_PORT: | |
777 | whichp = &task->itk_sself; | |
778 | break; | |
779 | ||
780 | case TASK_HOST_PORT: | |
781 | whichp = &task->itk_host; | |
782 | break; | |
783 | ||
784 | case TASK_BOOTSTRAP_PORT: | |
785 | whichp = &task->itk_bootstrap; | |
786 | break; | |
787 | ||
788 | case TASK_WIRED_LEDGER_PORT: | |
789 | whichp = &task->wired_ledger_port; | |
790 | break; | |
791 | ||
792 | case TASK_PAGED_LEDGER_PORT: | |
793 | whichp = &task->paged_ledger_port; | |
794 | break; | |
795 | ||
796 | default: | |
797 | return KERN_INVALID_ARGUMENT; | |
798 | }/* switch */ | |
799 | ||
800 | itk_lock(task); | |
801 | if (task->itk_self == IP_NULL) { | |
802 | itk_unlock(task); | |
803 | return KERN_FAILURE; | |
804 | } | |
805 | ||
806 | old = *whichp; | |
807 | *whichp = port; | |
808 | itk_unlock(task); | |
809 | ||
810 | if (IP_VALID(old)) | |
811 | ipc_port_release_send(old); | |
812 | return KERN_SUCCESS; | |
813 | } | |
814 | ||
815 | ||
816 | /* | |
817 | * Routine: mach_ports_register [kernel call] | |
818 | * Purpose: | |
819 | * Stash a handful of port send rights in the task. | |
820 | * Child tasks will inherit these rights, but they | |
821 | * must use mach_ports_lookup to acquire them. | |
822 | * | |
823 | * The rights are supplied in a (wired) kalloc'd segment. | |
824 | * Rights which aren't supplied are assumed to be null. | |
825 | * Conditions: | |
826 | * Nothing locked. If successful, consumes | |
827 | * the supplied rights and memory. | |
828 | * Returns: | |
829 | * KERN_SUCCESS Stashed the port rights. | |
830 | * KERN_INVALID_ARGUMENT The task is null. | |
831 | * KERN_INVALID_ARGUMENT The task is dead. | |
832 | * KERN_INVALID_ARGUMENT Too many port rights supplied. | |
833 | */ | |
834 | ||
835 | kern_return_t | |
836 | mach_ports_register( | |
837 | task_t task, | |
838 | mach_port_array_t memory, | |
839 | mach_msg_type_number_t portsCnt) | |
840 | { | |
841 | ipc_port_t ports[TASK_PORT_REGISTER_MAX]; | |
842 | unsigned int i; | |
843 | ||
844 | if ((task == TASK_NULL) || | |
845 | (portsCnt > TASK_PORT_REGISTER_MAX)) | |
846 | return KERN_INVALID_ARGUMENT; | |
847 | ||
848 | /* | |
849 | * Pad the port rights with nulls. | |
850 | */ | |
851 | ||
852 | for (i = 0; i < portsCnt; i++) | |
853 | ports[i] = memory[i]; | |
854 | for (; i < TASK_PORT_REGISTER_MAX; i++) | |
855 | ports[i] = IP_NULL; | |
856 | ||
857 | itk_lock(task); | |
858 | if (task->itk_self == IP_NULL) { | |
859 | itk_unlock(task); | |
860 | return KERN_INVALID_ARGUMENT; | |
861 | } | |
862 | ||
863 | /* | |
864 | * Replace the old send rights with the new. | |
865 | * Release the old rights after unlocking. | |
866 | */ | |
867 | ||
868 | for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) { | |
869 | ipc_port_t old; | |
870 | ||
871 | old = task->itk_registered[i]; | |
872 | task->itk_registered[i] = ports[i]; | |
873 | ports[i] = old; | |
874 | } | |
875 | ||
876 | itk_unlock(task); | |
877 | ||
878 | for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) | |
879 | if (IP_VALID(ports[i])) | |
880 | ipc_port_release_send(ports[i]); | |
881 | ||
882 | /* | |
883 | * Now that the operation is known to be successful, | |
884 | * we can free the memory. | |
885 | */ | |
886 | ||
887 | if (portsCnt != 0) | |
888 | kfree(memory, | |
889 | (vm_size_t) (portsCnt * sizeof(mach_port_t))); | |
890 | ||
891 | return KERN_SUCCESS; | |
892 | } | |
893 | ||
894 | /* | |
895 | * Routine: mach_ports_lookup [kernel call] | |
896 | * Purpose: | |
897 | * Retrieves (clones) the stashed port send rights. | |
898 | * Conditions: | |
899 | * Nothing locked. If successful, the caller gets | |
900 | * rights and memory. | |
901 | * Returns: | |
902 | * KERN_SUCCESS Retrieved the send rights. | |
903 | * KERN_INVALID_ARGUMENT The task is null. | |
904 | * KERN_INVALID_ARGUMENT The task is dead. | |
905 | * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. | |
906 | */ | |
907 | ||
908 | kern_return_t | |
909 | mach_ports_lookup( | |
910 | task_t task, | |
911 | mach_port_array_t *portsp, | |
912 | mach_msg_type_number_t *portsCnt) | |
913 | { | |
914 | void *memory; | |
915 | vm_size_t size; | |
916 | ipc_port_t *ports; | |
917 | int i; | |
918 | ||
919 | if (task == TASK_NULL) | |
920 | return KERN_INVALID_ARGUMENT; | |
921 | ||
922 | size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t)); | |
923 | ||
924 | memory = kalloc(size); | |
925 | if (memory == 0) | |
926 | return KERN_RESOURCE_SHORTAGE; | |
927 | ||
928 | itk_lock(task); | |
929 | if (task->itk_self == IP_NULL) { | |
930 | itk_unlock(task); | |
931 | ||
932 | kfree(memory, size); | |
933 | return KERN_INVALID_ARGUMENT; | |
934 | } | |
935 | ||
936 | ports = (ipc_port_t *) memory; | |
937 | ||
938 | /* | |
939 | * Clone port rights. Because kalloc'd memory | |
940 | * is wired, we won't fault while holding the task lock. | |
941 | */ | |
942 | ||
943 | for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) | |
944 | ports[i] = ipc_port_copy_send(task->itk_registered[i]); | |
945 | ||
946 | itk_unlock(task); | |
947 | ||
948 | *portsp = (mach_port_array_t) ports; | |
949 | *portsCnt = TASK_PORT_REGISTER_MAX; | |
950 | return KERN_SUCCESS; | |
951 | } | |
952 | ||
953 | /* | |
954 | * Routine: convert_port_to_locked_task | |
955 | * Purpose: | |
956 | * Internal helper routine to convert from a port to a locked | |
957 | * task. Used by several routines that try to convert from a | |
958 | * task port to a reference on some task related object. | |
959 | * Conditions: | |
960 | * Nothing locked, blocking OK. | |
961 | */ | |
962 | task_t | |
963 | convert_port_to_locked_task(ipc_port_t port) | |
964 | { | |
965 | while (IP_VALID(port)) { | |
966 | task_t task; | |
967 | ||
968 | ip_lock(port); | |
969 | if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) { | |
970 | ip_unlock(port); | |
971 | return TASK_NULL; | |
972 | } | |
973 | task = (task_t) port->ip_kobject; | |
974 | assert(task != TASK_NULL); | |
975 | ||
976 | /* | |
977 | * Normal lock ordering puts task_lock() before ip_lock(). | |
978 | * Attempt out-of-order locking here. | |
979 | */ | |
980 | if (task_lock_try(task)) { | |
981 | ip_unlock(port); | |
982 | return(task); | |
983 | } | |
984 | ||
985 | ip_unlock(port); | |
986 | mutex_pause(); | |
987 | } | |
988 | return TASK_NULL; | |
989 | } | |
990 | ||
991 | /* | |
992 | * Routine: convert_port_to_task | |
993 | * Purpose: | |
994 | * Convert from a port to a task. | |
995 | * Doesn't consume the port ref; produces a task ref, | |
996 | * which may be null. | |
997 | * Conditions: | |
998 | * Nothing locked. | |
999 | */ | |
1000 | task_t | |
1001 | convert_port_to_task( | |
1002 | ipc_port_t port) | |
1003 | { | |
1004 | task_t task = TASK_NULL; | |
1005 | ||
1006 | if (IP_VALID(port)) { | |
1007 | ip_lock(port); | |
1008 | ||
1009 | if ( ip_active(port) && | |
1010 | ip_kotype(port) == IKOT_TASK ) { | |
1011 | task = (task_t)port->ip_kobject; | |
1012 | assert(task != TASK_NULL); | |
1013 | ||
1014 | task_reference_internal(task); | |
1015 | } | |
1016 | ||
1017 | ip_unlock(port); | |
1018 | } | |
1019 | ||
1020 | return (task); | |
1021 | } | |
1022 | ||
1023 | /* | |
1024 | * Routine: convert_port_to_space | |
1025 | * Purpose: | |
1026 | * Convert from a port to a space. | |
1027 | * Doesn't consume the port ref; produces a space ref, | |
1028 | * which may be null. | |
1029 | * Conditions: | |
1030 | * Nothing locked. | |
1031 | */ | |
1032 | ipc_space_t | |
1033 | convert_port_to_space( | |
1034 | ipc_port_t port) | |
1035 | { | |
1036 | ipc_space_t space; | |
1037 | task_t task; | |
1038 | ||
1039 | task = convert_port_to_locked_task(port); | |
1040 | ||
1041 | if (task == TASK_NULL) | |
1042 | return IPC_SPACE_NULL; | |
1043 | ||
1044 | if (!task->active) { | |
1045 | task_unlock(task); | |
1046 | return IPC_SPACE_NULL; | |
1047 | } | |
1048 | ||
1049 | space = task->itk_space; | |
1050 | is_reference(space); | |
1051 | task_unlock(task); | |
1052 | return (space); | |
1053 | } | |
1054 | ||
1055 | /* | |
1056 | * Routine: convert_port_to_map | |
1057 | * Purpose: | |
1058 | * Convert from a port to a map. | |
1059 | * Doesn't consume the port ref; produces a map ref, | |
1060 | * which may be null. | |
1061 | * Conditions: | |
1062 | * Nothing locked. | |
1063 | */ | |
1064 | ||
1065 | vm_map_t | |
1066 | convert_port_to_map( | |
1067 | ipc_port_t port) | |
1068 | { | |
1069 | task_t task; | |
1070 | vm_map_t map; | |
1071 | ||
1072 | task = convert_port_to_locked_task(port); | |
1073 | ||
1074 | if (task == TASK_NULL) | |
1075 | return VM_MAP_NULL; | |
1076 | ||
1077 | if (!task->active) { | |
1078 | task_unlock(task); | |
1079 | return VM_MAP_NULL; | |
1080 | } | |
1081 | ||
1082 | map = task->map; | |
1083 | vm_map_reference_swap(map); | |
1084 | task_unlock(task); | |
1085 | return map; | |
1086 | } | |
1087 | ||
1088 | ||
1089 | /* | |
1090 | * Routine: convert_port_to_thread | |
1091 | * Purpose: | |
1092 | * Convert from a port to a thread. | |
1093 | * Doesn't consume the port ref; produces an thread ref, | |
1094 | * which may be null. | |
1095 | * Conditions: | |
1096 | * Nothing locked. | |
1097 | */ | |
1098 | ||
1099 | thread_t | |
1100 | convert_port_to_thread( | |
1101 | ipc_port_t port) | |
1102 | { | |
1103 | thread_t thread = THREAD_NULL; | |
1104 | ||
1105 | if (IP_VALID(port)) { | |
1106 | ip_lock(port); | |
1107 | ||
1108 | if ( ip_active(port) && | |
1109 | ip_kotype(port) == IKOT_THREAD ) { | |
1110 | thread = (thread_t)port->ip_kobject; | |
1111 | assert(thread != THREAD_NULL); | |
1112 | ||
1113 | thread_reference_internal(thread); | |
1114 | } | |
1115 | ||
1116 | ip_unlock(port); | |
1117 | } | |
1118 | ||
1119 | return (thread); | |
1120 | } | |
1121 | ||
1122 | /* | |
1123 | * Routine: port_name_to_thread | |
1124 | * Purpose: | |
1125 | * Convert from a port name to an thread reference | |
1126 | * A name of MACH_PORT_NULL is valid for the null thread. | |
1127 | * Conditions: | |
1128 | * Nothing locked. | |
1129 | */ | |
1130 | thread_t | |
1131 | port_name_to_thread( | |
1132 | mach_port_name_t name) | |
1133 | { | |
1134 | thread_t thread = THREAD_NULL; | |
1135 | ipc_port_t kport; | |
1136 | ||
1137 | if (MACH_PORT_VALID(name)) { | |
1138 | if (ipc_object_copyin(current_space(), name, | |
1139 | MACH_MSG_TYPE_COPY_SEND, | |
1140 | (ipc_object_t *)&kport) != KERN_SUCCESS) | |
1141 | return (THREAD_NULL); | |
1142 | ||
1143 | thread = convert_port_to_thread(kport); | |
1144 | ||
1145 | if (IP_VALID(kport)) | |
1146 | ipc_port_release_send(kport); | |
1147 | } | |
1148 | ||
1149 | return (thread); | |
1150 | } | |
1151 | ||
1152 | task_t | |
1153 | port_name_to_task( | |
1154 | mach_port_name_t name) | |
1155 | { | |
1156 | ipc_port_t kern_port; | |
1157 | kern_return_t kr; | |
1158 | task_t task = TASK_NULL; | |
1159 | ||
1160 | if (MACH_PORT_VALID(name)) { | |
1161 | kr = ipc_object_copyin(current_space(), name, | |
1162 | MACH_MSG_TYPE_COPY_SEND, | |
1163 | (ipc_object_t *) &kern_port); | |
1164 | if (kr != KERN_SUCCESS) | |
1165 | return TASK_NULL; | |
1166 | ||
1167 | task = convert_port_to_task(kern_port); | |
1168 | ||
1169 | if (IP_VALID(kern_port)) | |
1170 | ipc_port_release_send(kern_port); | |
1171 | } | |
1172 | return task; | |
1173 | } | |
1174 | ||
1175 | /* | |
1176 | * Routine: convert_task_to_port | |
1177 | * Purpose: | |
1178 | * Convert from a task to a port. | |
1179 | * Consumes a task ref; produces a naked send right | |
1180 | * which may be invalid. | |
1181 | * Conditions: | |
1182 | * Nothing locked. | |
1183 | */ | |
1184 | ||
1185 | ipc_port_t | |
1186 | convert_task_to_port( | |
1187 | task_t task) | |
1188 | { | |
1189 | ipc_port_t port; | |
1190 | ||
1191 | itk_lock(task); | |
1192 | if (task->itk_self != IP_NULL) | |
1193 | port = ipc_port_make_send(task->itk_self); | |
1194 | else | |
1195 | port = IP_NULL; | |
1196 | itk_unlock(task); | |
1197 | ||
1198 | task_deallocate(task); | |
1199 | return port; | |
1200 | } | |
1201 | ||
1202 | /* | |
1203 | * Routine: convert_thread_to_port | |
1204 | * Purpose: | |
1205 | * Convert from a thread to a port. | |
1206 | * Consumes an thread ref; produces a naked send right | |
1207 | * which may be invalid. | |
1208 | * Conditions: | |
1209 | * Nothing locked. | |
1210 | */ | |
1211 | ||
1212 | ipc_port_t | |
1213 | convert_thread_to_port( | |
1214 | thread_t thread) | |
1215 | { | |
1216 | ipc_port_t port; | |
1217 | ||
1218 | thread_mtx_lock(thread); | |
1219 | ||
1220 | if (thread->ith_self != IP_NULL) | |
1221 | port = ipc_port_make_send(thread->ith_self); | |
1222 | else | |
1223 | port = IP_NULL; | |
1224 | ||
1225 | thread_mtx_unlock(thread); | |
1226 | ||
1227 | thread_deallocate(thread); | |
1228 | ||
1229 | return (port); | |
1230 | } | |
1231 | ||
1232 | /* | |
1233 | * Routine: space_deallocate | |
1234 | * Purpose: | |
1235 | * Deallocate a space ref produced by convert_port_to_space. | |
1236 | * Conditions: | |
1237 | * Nothing locked. | |
1238 | */ | |
1239 | ||
1240 | void | |
1241 | space_deallocate( | |
1242 | ipc_space_t space) | |
1243 | { | |
1244 | if (space != IS_NULL) | |
1245 | is_release(space); | |
1246 | } | |
1247 | ||
1248 | /* | |
1249 | * Routine: thread/task_set_exception_ports [kernel call] | |
1250 | * Purpose: | |
1251 | * Sets the thread/task exception port, flavor and | |
1252 | * behavior for the exception types specified by the mask. | |
1253 | * There will be one send right per exception per valid | |
1254 | * port. | |
1255 | * Conditions: | |
1256 | * Nothing locked. If successful, consumes | |
1257 | * the supplied send right. | |
1258 | * Returns: | |
1259 | * KERN_SUCCESS Changed the special port. | |
1260 | * KERN_INVALID_ARGUMENT The thread is null, | |
1261 | * Illegal mask bit set. | |
1262 | * Illegal exception behavior | |
1263 | * KERN_FAILURE The thread is dead. | |
1264 | */ | |
1265 | ||
1266 | kern_return_t | |
1267 | thread_set_exception_ports( | |
1268 | thread_t thread, | |
1269 | exception_mask_t exception_mask, | |
1270 | ipc_port_t new_port, | |
1271 | exception_behavior_t new_behavior, | |
1272 | thread_state_flavor_t new_flavor) | |
1273 | { | |
1274 | ipc_port_t old_port[EXC_TYPES_COUNT]; | |
1275 | register int i; | |
1276 | ||
1277 | if (thread == THREAD_NULL) | |
1278 | return (KERN_INVALID_ARGUMENT); | |
1279 | ||
1280 | if (exception_mask & ~EXC_MASK_ALL) | |
1281 | return (KERN_INVALID_ARGUMENT); | |
1282 | ||
1283 | if (IP_VALID(new_port)) { | |
1284 | switch (new_behavior) { | |
1285 | ||
1286 | case EXCEPTION_DEFAULT: | |
1287 | case EXCEPTION_STATE: | |
1288 | case EXCEPTION_STATE_IDENTITY: | |
1289 | break; | |
1290 | ||
1291 | default: | |
1292 | return (KERN_INVALID_ARGUMENT); | |
1293 | } | |
1294 | } | |
1295 | ||
1296 | /* | |
1297 | * Check the validity of the thread_state_flavor by calling the | |
1298 | * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in | |
1299 | * osfmk/mach/ARCHITECTURE/thread_status.h | |
1300 | */ | |
1301 | if (!VALID_THREAD_STATE_FLAVOR(new_flavor)) | |
1302 | return (KERN_INVALID_ARGUMENT); | |
1303 | ||
1304 | thread_mtx_lock(thread); | |
1305 | ||
1306 | if (!thread->active) { | |
1307 | thread_mtx_unlock(thread); | |
1308 | ||
1309 | return (KERN_FAILURE); | |
1310 | } | |
1311 | ||
1312 | for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { | |
1313 | if (exception_mask & (1 << i)) { | |
1314 | old_port[i] = thread->exc_actions[i].port; | |
1315 | thread->exc_actions[i].port = ipc_port_copy_send(new_port); | |
1316 | thread->exc_actions[i].behavior = new_behavior; | |
1317 | thread->exc_actions[i].flavor = new_flavor; | |
1318 | } | |
1319 | else | |
1320 | old_port[i] = IP_NULL; | |
1321 | } | |
1322 | ||
1323 | thread_mtx_unlock(thread); | |
1324 | ||
1325 | for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) | |
1326 | if (IP_VALID(old_port[i])) | |
1327 | ipc_port_release_send(old_port[i]); | |
1328 | ||
1329 | if (IP_VALID(new_port)) /* consume send right */ | |
1330 | ipc_port_release_send(new_port); | |
1331 | ||
1332 | return (KERN_SUCCESS); | |
1333 | } | |
1334 | ||
1335 | kern_return_t | |
1336 | task_set_exception_ports( | |
1337 | task_t task, | |
1338 | exception_mask_t exception_mask, | |
1339 | ipc_port_t new_port, | |
1340 | exception_behavior_t new_behavior, | |
1341 | thread_state_flavor_t new_flavor) | |
1342 | { | |
1343 | ipc_port_t old_port[EXC_TYPES_COUNT]; | |
1344 | boolean_t privileged = current_task()->sec_token.val[0] == 0; | |
1345 | register int i; | |
1346 | ||
1347 | if (task == TASK_NULL) | |
1348 | return (KERN_INVALID_ARGUMENT); | |
1349 | ||
1350 | if (exception_mask & ~EXC_MASK_ALL) | |
1351 | return (KERN_INVALID_ARGUMENT); | |
1352 | ||
1353 | if (IP_VALID(new_port)) { | |
1354 | switch (new_behavior) { | |
1355 | ||
1356 | case EXCEPTION_DEFAULT: | |
1357 | case EXCEPTION_STATE: | |
1358 | case EXCEPTION_STATE_IDENTITY: | |
1359 | break; | |
1360 | ||
1361 | default: | |
1362 | return (KERN_INVALID_ARGUMENT); | |
1363 | } | |
1364 | } | |
1365 | ||
1366 | itk_lock(task); | |
1367 | ||
1368 | if (task->itk_self == IP_NULL) { | |
1369 | itk_unlock(task); | |
1370 | ||
1371 | return (KERN_FAILURE); | |
1372 | } | |
1373 | ||
1374 | for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { | |
1375 | if (exception_mask & (1 << i)) { | |
1376 | old_port[i] = task->exc_actions[i].port; | |
1377 | task->exc_actions[i].port = | |
1378 | ipc_port_copy_send(new_port); | |
1379 | task->exc_actions[i].behavior = new_behavior; | |
1380 | task->exc_actions[i].flavor = new_flavor; | |
1381 | task->exc_actions[i].privileged = privileged; | |
1382 | } | |
1383 | else | |
1384 | old_port[i] = IP_NULL; | |
1385 | } | |
1386 | ||
1387 | itk_unlock(task); | |
1388 | ||
1389 | for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) | |
1390 | if (IP_VALID(old_port[i])) | |
1391 | ipc_port_release_send(old_port[i]); | |
1392 | ||
1393 | if (IP_VALID(new_port)) /* consume send right */ | |
1394 | ipc_port_release_send(new_port); | |
1395 | ||
1396 | return (KERN_SUCCESS); | |
1397 | } | |
1398 | ||
1399 | /* | |
1400 | * Routine: thread/task_swap_exception_ports [kernel call] | |
1401 | * Purpose: | |
1402 | * Sets the thread/task exception port, flavor and | |
1403 | * behavior for the exception types specified by the | |
1404 | * mask. | |
1405 | * | |
1406 | * The old ports, behavior and flavors are returned | |
1407 | * Count specifies the array sizes on input and | |
1408 | * the number of returned ports etc. on output. The | |
1409 | * arrays must be large enough to hold all the returned | |
1410 | * data, MIG returnes an error otherwise. The masks | |
1411 | * array specifies the corresponding exception type(s). | |
1412 | * | |
1413 | * Conditions: | |
1414 | * Nothing locked. If successful, consumes | |
1415 | * the supplied send right. | |
1416 | * | |
1417 | * Returns upto [in} CountCnt elements. | |
1418 | * Returns: | |
1419 | * KERN_SUCCESS Changed the special port. | |
1420 | * KERN_INVALID_ARGUMENT The thread is null, | |
1421 | * Illegal mask bit set. | |
1422 | * Illegal exception behavior | |
1423 | * KERN_FAILURE The thread is dead. | |
1424 | */ | |
1425 | ||
1426 | kern_return_t | |
1427 | thread_swap_exception_ports( | |
1428 | thread_t thread, | |
1429 | exception_mask_t exception_mask, | |
1430 | ipc_port_t new_port, | |
1431 | exception_behavior_t new_behavior, | |
1432 | thread_state_flavor_t new_flavor, | |
1433 | exception_mask_array_t masks, | |
1434 | mach_msg_type_number_t *CountCnt, | |
1435 | exception_port_array_t ports, | |
1436 | exception_behavior_array_t behaviors, | |
1437 | thread_state_flavor_array_t flavors) | |
1438 | { | |
1439 | ipc_port_t old_port[EXC_TYPES_COUNT]; | |
1440 | unsigned int i, j, count; | |
1441 | ||
1442 | if (thread == THREAD_NULL) | |
1443 | return (KERN_INVALID_ARGUMENT); | |
1444 | ||
1445 | if (exception_mask & ~EXC_MASK_ALL) | |
1446 | return (KERN_INVALID_ARGUMENT); | |
1447 | ||
1448 | if (IP_VALID(new_port)) { | |
1449 | switch (new_behavior) { | |
1450 | ||
1451 | case EXCEPTION_DEFAULT: | |
1452 | case EXCEPTION_STATE: | |
1453 | case EXCEPTION_STATE_IDENTITY: | |
1454 | break; | |
1455 | ||
1456 | default: | |
1457 | return (KERN_INVALID_ARGUMENT); | |
1458 | } | |
1459 | } | |
1460 | ||
1461 | thread_mtx_lock(thread); | |
1462 | ||
1463 | if (!thread->active) { | |
1464 | thread_mtx_unlock(thread); | |
1465 | ||
1466 | return (KERN_FAILURE); | |
1467 | } | |
1468 | ||
1469 | count = 0; | |
1470 | ||
1471 | for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { | |
1472 | if (exception_mask & (1 << i)) { | |
1473 | for (j = 0; j < count; ++j) { | |
1474 | /* | |
1475 | * search for an identical entry, if found | |
1476 | * set corresponding mask for this exception. | |
1477 | */ | |
1478 | if ( thread->exc_actions[i].port == ports[j] && | |
1479 | thread->exc_actions[i].behavior == behaviors[j] && | |
1480 | thread->exc_actions[i].flavor == flavors[j] ) { | |
1481 | masks[j] |= (1 << i); | |
1482 | break; | |
1483 | } | |
1484 | } | |
1485 | ||
1486 | if (j == count) { | |
1487 | masks[j] = (1 << i); | |
1488 | ports[j] = ipc_port_copy_send(thread->exc_actions[i].port); | |
1489 | ||
1490 | behaviors[j] = thread->exc_actions[i].behavior; | |
1491 | flavors[j] = thread->exc_actions[i].flavor; | |
1492 | ++count; | |
1493 | } | |
1494 | ||
1495 | old_port[i] = thread->exc_actions[i].port; | |
1496 | thread->exc_actions[i].port = ipc_port_copy_send(new_port); | |
1497 | thread->exc_actions[i].behavior = new_behavior; | |
1498 | thread->exc_actions[i].flavor = new_flavor; | |
1499 | if (count > *CountCnt) | |
1500 | break; | |
1501 | } | |
1502 | else | |
1503 | old_port[i] = IP_NULL; | |
1504 | } | |
1505 | ||
1506 | thread_mtx_unlock(thread); | |
1507 | ||
1508 | for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) | |
1509 | if (IP_VALID(old_port[i])) | |
1510 | ipc_port_release_send(old_port[i]); | |
1511 | ||
1512 | if (IP_VALID(new_port)) /* consume send right */ | |
1513 | ipc_port_release_send(new_port); | |
1514 | ||
1515 | *CountCnt = count; | |
1516 | ||
1517 | return (KERN_SUCCESS); | |
1518 | } | |
1519 | ||
1520 | kern_return_t | |
1521 | task_swap_exception_ports( | |
1522 | task_t task, | |
1523 | exception_mask_t exception_mask, | |
1524 | ipc_port_t new_port, | |
1525 | exception_behavior_t new_behavior, | |
1526 | thread_state_flavor_t new_flavor, | |
1527 | exception_mask_array_t masks, | |
1528 | mach_msg_type_number_t *CountCnt, | |
1529 | exception_port_array_t ports, | |
1530 | exception_behavior_array_t behaviors, | |
1531 | thread_state_flavor_array_t flavors) | |
1532 | { | |
1533 | ipc_port_t old_port[EXC_TYPES_COUNT]; | |
1534 | boolean_t privileged = current_task()->sec_token.val[0] == 0; | |
1535 | unsigned int i, j, count; | |
1536 | ||
1537 | if (task == TASK_NULL) | |
1538 | return (KERN_INVALID_ARGUMENT); | |
1539 | ||
1540 | if (exception_mask & ~EXC_MASK_ALL) | |
1541 | return (KERN_INVALID_ARGUMENT); | |
1542 | ||
1543 | if (IP_VALID(new_port)) { | |
1544 | switch (new_behavior) { | |
1545 | ||
1546 | case EXCEPTION_DEFAULT: | |
1547 | case EXCEPTION_STATE: | |
1548 | case EXCEPTION_STATE_IDENTITY: | |
1549 | break; | |
1550 | ||
1551 | default: | |
1552 | return (KERN_INVALID_ARGUMENT); | |
1553 | } | |
1554 | } | |
1555 | ||
1556 | itk_lock(task); | |
1557 | ||
1558 | if (task->itk_self == IP_NULL) { | |
1559 | itk_unlock(task); | |
1560 | ||
1561 | return (KERN_FAILURE); | |
1562 | } | |
1563 | ||
1564 | count = 0; | |
1565 | ||
1566 | for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { | |
1567 | if (exception_mask & (1 << i)) { | |
1568 | for (j = 0; j < count; j++) { | |
1569 | /* | |
1570 | * search for an identical entry, if found | |
1571 | * set corresponding mask for this exception. | |
1572 | */ | |
1573 | if ( task->exc_actions[i].port == ports[j] && | |
1574 | task->exc_actions[i].behavior == behaviors[j] && | |
1575 | task->exc_actions[i].flavor == flavors[j] ) { | |
1576 | masks[j] |= (1 << i); | |
1577 | break; | |
1578 | } | |
1579 | } | |
1580 | ||
1581 | if (j == count) { | |
1582 | masks[j] = (1 << i); | |
1583 | ports[j] = ipc_port_copy_send(task->exc_actions[i].port); | |
1584 | behaviors[j] = task->exc_actions[i].behavior; | |
1585 | flavors[j] = task->exc_actions[i].flavor; | |
1586 | ++count; | |
1587 | } | |
1588 | ||
1589 | old_port[i] = task->exc_actions[i].port; | |
1590 | task->exc_actions[i].port = ipc_port_copy_send(new_port); | |
1591 | task->exc_actions[i].behavior = new_behavior; | |
1592 | task->exc_actions[i].flavor = new_flavor; | |
1593 | task->exc_actions[i].privileged = privileged; | |
1594 | if (count > *CountCnt) | |
1595 | break; | |
1596 | } | |
1597 | else | |
1598 | old_port[i] = IP_NULL; | |
1599 | } | |
1600 | ||
1601 | itk_unlock(task); | |
1602 | ||
1603 | for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) | |
1604 | if (IP_VALID(old_port[i])) | |
1605 | ipc_port_release_send(old_port[i]); | |
1606 | ||
1607 | if (IP_VALID(new_port)) /* consume send right */ | |
1608 | ipc_port_release_send(new_port); | |
1609 | ||
1610 | *CountCnt = count; | |
1611 | ||
1612 | return (KERN_SUCCESS); | |
1613 | } | |
1614 | ||
1615 | /* | |
1616 | * Routine: thread/task_get_exception_ports [kernel call] | |
1617 | * Purpose: | |
1618 | * Clones a send right for each of the thread/task's exception | |
1619 | * ports specified in the mask and returns the behaviour | |
1620 | * and flavor of said port. | |
1621 | * | |
1622 | * Returns upto [in} CountCnt elements. | |
1623 | * | |
1624 | * Conditions: | |
1625 | * Nothing locked. | |
1626 | * Returns: | |
1627 | * KERN_SUCCESS Extracted a send right. | |
1628 | * KERN_INVALID_ARGUMENT The thread is null, | |
1629 | * Invalid special port, | |
1630 | * Illegal mask bit set. | |
1631 | * KERN_FAILURE The thread is dead. | |
1632 | */ | |
1633 | ||
1634 | kern_return_t | |
1635 | thread_get_exception_ports( | |
1636 | thread_t thread, | |
1637 | exception_mask_t exception_mask, | |
1638 | exception_mask_array_t masks, | |
1639 | mach_msg_type_number_t *CountCnt, | |
1640 | exception_port_array_t ports, | |
1641 | exception_behavior_array_t behaviors, | |
1642 | thread_state_flavor_array_t flavors) | |
1643 | { | |
1644 | unsigned int i, j, count; | |
1645 | ||
1646 | if (thread == THREAD_NULL) | |
1647 | return (KERN_INVALID_ARGUMENT); | |
1648 | ||
1649 | if (exception_mask & ~EXC_MASK_ALL) | |
1650 | return (KERN_INVALID_ARGUMENT); | |
1651 | ||
1652 | thread_mtx_lock(thread); | |
1653 | ||
1654 | if (!thread->active) { | |
1655 | thread_mtx_unlock(thread); | |
1656 | ||
1657 | return (KERN_FAILURE); | |
1658 | } | |
1659 | ||
1660 | count = 0; | |
1661 | ||
1662 | for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { | |
1663 | if (exception_mask & (1 << i)) { | |
1664 | for (j = 0; j < count; ++j) { | |
1665 | /* | |
1666 | * search for an identical entry, if found | |
1667 | * set corresponding mask for this exception. | |
1668 | */ | |
1669 | if ( thread->exc_actions[i].port == ports[j] && | |
1670 | thread->exc_actions[i].behavior ==behaviors[j] && | |
1671 | thread->exc_actions[i].flavor == flavors[j] ) { | |
1672 | masks[j] |= (1 << i); | |
1673 | break; | |
1674 | } | |
1675 | } | |
1676 | ||
1677 | if (j == count) { | |
1678 | masks[j] = (1 << i); | |
1679 | ports[j] = ipc_port_copy_send(thread->exc_actions[i].port); | |
1680 | behaviors[j] = thread->exc_actions[i].behavior; | |
1681 | flavors[j] = thread->exc_actions[i].flavor; | |
1682 | ++count; | |
1683 | if (count >= *CountCnt) | |
1684 | break; | |
1685 | } | |
1686 | } | |
1687 | } | |
1688 | ||
1689 | thread_mtx_unlock(thread); | |
1690 | ||
1691 | *CountCnt = count; | |
1692 | ||
1693 | return (KERN_SUCCESS); | |
1694 | } | |
1695 | ||
1696 | kern_return_t | |
1697 | task_get_exception_ports( | |
1698 | task_t task, | |
1699 | exception_mask_t exception_mask, | |
1700 | exception_mask_array_t masks, | |
1701 | mach_msg_type_number_t *CountCnt, | |
1702 | exception_port_array_t ports, | |
1703 | exception_behavior_array_t behaviors, | |
1704 | thread_state_flavor_array_t flavors) | |
1705 | { | |
1706 | unsigned int i, j, count; | |
1707 | ||
1708 | if (task == TASK_NULL) | |
1709 | return (KERN_INVALID_ARGUMENT); | |
1710 | ||
1711 | if (exception_mask & ~EXC_MASK_ALL) | |
1712 | return (KERN_INVALID_ARGUMENT); | |
1713 | ||
1714 | itk_lock(task); | |
1715 | ||
1716 | if (task->itk_self == IP_NULL) { | |
1717 | itk_unlock(task); | |
1718 | ||
1719 | return (KERN_FAILURE); | |
1720 | } | |
1721 | ||
1722 | count = 0; | |
1723 | ||
1724 | for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { | |
1725 | if (exception_mask & (1 << i)) { | |
1726 | for (j = 0; j < count; ++j) { | |
1727 | /* | |
1728 | * search for an identical entry, if found | |
1729 | * set corresponding mask for this exception. | |
1730 | */ | |
1731 | if ( task->exc_actions[i].port == ports[j] && | |
1732 | task->exc_actions[i].behavior == behaviors[j] && | |
1733 | task->exc_actions[i].flavor == flavors[j] ) { | |
1734 | masks[j] |= (1 << i); | |
1735 | break; | |
1736 | } | |
1737 | } | |
1738 | ||
1739 | if (j == count) { | |
1740 | masks[j] = (1 << i); | |
1741 | ports[j] = ipc_port_copy_send(task->exc_actions[i].port); | |
1742 | behaviors[j] = task->exc_actions[i].behavior; | |
1743 | flavors[j] = task->exc_actions[i].flavor; | |
1744 | ++count; | |
1745 | if (count > *CountCnt) | |
1746 | break; | |
1747 | } | |
1748 | } | |
1749 | } | |
1750 | ||
1751 | itk_unlock(task); | |
1752 | ||
1753 | *CountCnt = count; | |
1754 | ||
1755 | return (KERN_SUCCESS); | |
1756 | } |