]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | ||
59 | #include <mach/mach_types.h> | |
60 | #include <mach/boolean.h> | |
61 | #include <mach/kern_return.h> | |
62 | #include <mach/message.h> | |
63 | #include <mach/port.h> | |
64 | #include <mach/mig_errors.h> | |
65 | #include <mach/task.h> | |
66 | #include <mach/thread_status.h> | |
67 | #include <mach/exception_types.h> | |
68 | #include <mach/exc.h> | |
69 | #include <mach/mach_exc.h> | |
70 | #include <ipc/port.h> | |
71 | #include <ipc/ipc_entry.h> | |
72 | #include <ipc/ipc_object.h> | |
73 | #include <ipc/ipc_notify.h> | |
74 | #include <ipc/ipc_space.h> | |
75 | #include <ipc/ipc_pset.h> | |
76 | #include <ipc/ipc_machdep.h> | |
77 | #include <kern/counters.h> | |
78 | #include <kern/ipc_tt.h> | |
79 | #include <kern/task.h> | |
80 | #include <kern/thread.h> | |
81 | #include <kern/processor.h> | |
82 | #include <kern/sched.h> | |
83 | #include <kern/sched_prim.h> | |
84 | #include <kern/host.h> | |
85 | #include <kern/misc_protos.h> | |
86 | #include <security/mac_mach_internal.h> | |
87 | #include <string.h> | |
88 | #include <pexpert/pexpert.h> | |
89 | ||
90 | extern int panic_on_exception_triage; | |
91 | ||
92 | unsigned long c_thr_exc_raise = 0; | |
93 | unsigned long c_thr_exc_raise_state = 0; | |
94 | unsigned long c_thr_exc_raise_state_id = 0; | |
95 | unsigned long c_tsk_exc_raise = 0; | |
96 | unsigned long c_tsk_exc_raise_state = 0; | |
97 | unsigned long c_tsk_exc_raise_state_id = 0; | |
98 | ||
99 | /* forward declarations */ | |
100 | kern_return_t exception_deliver( | |
101 | thread_t thread, | |
102 | exception_type_t exception, | |
103 | mach_exception_data_t code, | |
104 | mach_msg_type_number_t codeCnt, | |
105 | struct exception_action *excp, | |
106 | lck_mtx_t *mutex); | |
107 | ||
108 | static kern_return_t | |
109 | check_exc_receiver_dependency( | |
110 | exception_type_t exception, | |
111 | struct exception_action *excp, | |
112 | lck_mtx_t *mutex); | |
113 | ||
114 | #ifdef MACH_BSD | |
115 | kern_return_t bsd_exception( | |
116 | exception_type_t exception, | |
117 | mach_exception_data_t code, | |
118 | mach_msg_type_number_t codeCnt); | |
119 | #endif /* MACH_BSD */ | |
120 | ||
121 | /* | |
122 | * Routine: exception_deliver | |
123 | * Purpose: | |
124 | * Make an upcall to the exception server provided. | |
125 | * Conditions: | |
126 | * Nothing locked and no resources held. | |
127 | * Called from an exception context, so | |
128 | * thread_exception_return and thread_kdb_return | |
129 | * are possible. | |
130 | * Returns: | |
131 | * KERN_SUCCESS if the exception was handled | |
132 | */ | |
133 | kern_return_t | |
134 | exception_deliver( | |
135 | thread_t thread, | |
136 | exception_type_t exception, | |
137 | mach_exception_data_t code, | |
138 | mach_msg_type_number_t codeCnt, | |
139 | struct exception_action *excp, | |
140 | lck_mtx_t *mutex) | |
141 | { | |
142 | ipc_port_t exc_port = IPC_PORT_NULL; | |
143 | exception_data_type_t small_code[EXCEPTION_CODE_MAX]; | |
144 | int code64; | |
145 | int behavior; | |
146 | int flavor; | |
147 | kern_return_t kr; | |
148 | task_t task; | |
149 | ipc_port_t thread_port = IPC_PORT_NULL, task_port = IPC_PORT_NULL; | |
150 | ||
151 | /* | |
152 | * Save work if we are terminating. | |
153 | * Just go back to our AST handler. | |
154 | */ | |
155 | if (!thread->active && !thread->inspection) | |
156 | return KERN_SUCCESS; | |
157 | ||
158 | /* | |
159 | * If there are no exception actions defined for this entity, | |
160 | * we can't deliver here. | |
161 | */ | |
162 | if (excp == NULL) | |
163 | return KERN_FAILURE; | |
164 | ||
165 | assert(exception < EXC_TYPES_COUNT); | |
166 | if (exception >= EXC_TYPES_COUNT) | |
167 | return KERN_FAILURE; | |
168 | ||
169 | excp = &excp[exception]; | |
170 | ||
171 | /* | |
172 | * Snapshot the exception action data under lock for consistency. | |
173 | * Hold a reference to the port over the exception_raise_* calls | |
174 | * so it can't be destroyed. This seems like overkill, but keeps | |
175 | * the port from disappearing between now and when | |
176 | * ipc_object_copyin_from_kernel is finally called. | |
177 | */ | |
178 | lck_mtx_lock(mutex); | |
179 | exc_port = excp->port; | |
180 | if (!IP_VALID(exc_port)) { | |
181 | lck_mtx_unlock(mutex); | |
182 | return KERN_FAILURE; | |
183 | } | |
184 | ip_lock(exc_port); | |
185 | if (!ip_active(exc_port)) { | |
186 | ip_unlock(exc_port); | |
187 | lck_mtx_unlock(mutex); | |
188 | return KERN_FAILURE; | |
189 | } | |
190 | ip_reference(exc_port); | |
191 | exc_port->ip_srights++; | |
192 | ip_unlock(exc_port); | |
193 | ||
194 | flavor = excp->flavor; | |
195 | behavior = excp->behavior; | |
196 | lck_mtx_unlock(mutex); | |
197 | ||
198 | code64 = (behavior & MACH_EXCEPTION_CODES); | |
199 | behavior &= ~MACH_EXCEPTION_CODES; | |
200 | ||
201 | if (!code64) { | |
202 | small_code[0] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[0]); | |
203 | small_code[1] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[1]); | |
204 | } | |
205 | ||
206 | task = thread->task; | |
207 | ||
208 | #if CONFIG_MACF | |
209 | /* Now is a reasonably good time to check if the exception action is | |
210 | * permitted for this process, because after this point we will send | |
211 | * the message out almost certainly. | |
212 | * As with other failures, exception_triage_thread will go on | |
213 | * to the next level. | |
214 | */ | |
215 | if (mac_exc_action_check_exception_send(task, excp) != 0) { | |
216 | kr = KERN_FAILURE; | |
217 | goto out_release_right; | |
218 | } | |
219 | #endif | |
220 | ||
221 | if (behavior != EXCEPTION_STATE) { | |
222 | if (thread != current_thread() || exception == EXC_CORPSE_NOTIFY) { | |
223 | ||
224 | task_reference(task); | |
225 | task_port = convert_task_to_port(task); | |
226 | /* task ref consumed */ | |
227 | thread_reference(thread); | |
228 | thread_port = convert_thread_to_port(thread); | |
229 | /* thread ref consumed */ | |
230 | } | |
231 | else { | |
232 | task_port = retrieve_task_self_fast(thread->task); | |
233 | thread_port = retrieve_thread_self_fast(thread); | |
234 | } | |
235 | } | |
236 | ||
237 | switch (behavior) { | |
238 | case EXCEPTION_STATE: { | |
239 | mach_msg_type_number_t state_cnt; | |
240 | thread_state_data_t state; | |
241 | ||
242 | c_thr_exc_raise_state++; | |
243 | state_cnt = _MachineStateCount[flavor]; | |
244 | kr = thread_getstatus(thread, flavor, | |
245 | (thread_state_t)state, | |
246 | &state_cnt); | |
247 | if (kr == KERN_SUCCESS) { | |
248 | if (code64) { | |
249 | kr = mach_exception_raise_state(exc_port, | |
250 | exception, | |
251 | code, | |
252 | codeCnt, | |
253 | &flavor, | |
254 | state, state_cnt, | |
255 | state, &state_cnt); | |
256 | } else { | |
257 | kr = exception_raise_state(exc_port, exception, | |
258 | small_code, | |
259 | codeCnt, | |
260 | &flavor, | |
261 | state, state_cnt, | |
262 | state, &state_cnt); | |
263 | } | |
264 | if (kr == KERN_SUCCESS) { | |
265 | if (exception != EXC_CORPSE_NOTIFY) | |
266 | kr = thread_setstatus(thread, flavor, | |
267 | (thread_state_t)state, | |
268 | state_cnt); | |
269 | goto out_release_right; | |
270 | } | |
271 | ||
272 | } | |
273 | ||
274 | goto out_release_right; | |
275 | } | |
276 | ||
277 | case EXCEPTION_DEFAULT: | |
278 | c_thr_exc_raise++; | |
279 | if (code64) { | |
280 | kr = mach_exception_raise(exc_port, | |
281 | thread_port, | |
282 | task_port, | |
283 | exception, | |
284 | code, | |
285 | codeCnt); | |
286 | } else { | |
287 | kr = exception_raise(exc_port, | |
288 | thread_port, | |
289 | task_port, | |
290 | exception, | |
291 | small_code, | |
292 | codeCnt); | |
293 | } | |
294 | ||
295 | goto out_release_right; | |
296 | ||
297 | case EXCEPTION_STATE_IDENTITY: { | |
298 | mach_msg_type_number_t state_cnt; | |
299 | thread_state_data_t state; | |
300 | ||
301 | c_thr_exc_raise_state_id++; | |
302 | state_cnt = _MachineStateCount[flavor]; | |
303 | kr = thread_getstatus(thread, flavor, | |
304 | (thread_state_t)state, | |
305 | &state_cnt); | |
306 | if (kr == KERN_SUCCESS) { | |
307 | if (code64) { | |
308 | kr = mach_exception_raise_state_identity( | |
309 | exc_port, | |
310 | thread_port, | |
311 | task_port, | |
312 | exception, | |
313 | code, | |
314 | codeCnt, | |
315 | &flavor, | |
316 | state, state_cnt, | |
317 | state, &state_cnt); | |
318 | } else { | |
319 | kr = exception_raise_state_identity(exc_port, | |
320 | thread_port, | |
321 | task_port, | |
322 | exception, | |
323 | small_code, | |
324 | codeCnt, | |
325 | &flavor, | |
326 | state, state_cnt, | |
327 | state, &state_cnt); | |
328 | } | |
329 | ||
330 | if (kr == KERN_SUCCESS) { | |
331 | if (exception != EXC_CORPSE_NOTIFY) | |
332 | kr = thread_setstatus(thread, flavor, | |
333 | (thread_state_t)state, | |
334 | state_cnt); | |
335 | goto out_release_right; | |
336 | } | |
337 | ||
338 | } | |
339 | ||
340 | goto out_release_right; | |
341 | } | |
342 | ||
343 | default: | |
344 | panic ("bad exception behavior!"); | |
345 | return KERN_FAILURE; | |
346 | }/* switch */ | |
347 | ||
348 | out_release_right: | |
349 | ||
350 | if (task_port) { | |
351 | ipc_port_release_send(task_port); | |
352 | } | |
353 | ||
354 | if (thread_port) { | |
355 | ipc_port_release_send(thread_port); | |
356 | } | |
357 | ||
358 | if (exc_port) { | |
359 | ipc_port_release_send(exc_port); | |
360 | } | |
361 | ||
362 | return kr; | |
363 | } | |
364 | ||
365 | /* | |
366 | * Routine: check_exc_receiver_dependency | |
367 | * Purpose: | |
368 | * Verify that the port destined for receiving this exception is not | |
369 | * on the current task. This would cause hang in kernel for | |
370 | * EXC_CRASH primarily. Note: If port is transferred | |
371 | * between check and delivery then deadlock may happen. | |
372 | * | |
373 | * Conditions: | |
374 | * Nothing locked and no resources held. | |
375 | * Called from an exception context. | |
376 | * Returns: | |
377 | * KERN_SUCCESS if its ok to send exception message. | |
378 | */ | |
379 | kern_return_t | |
380 | check_exc_receiver_dependency( | |
381 | exception_type_t exception, | |
382 | struct exception_action *excp, | |
383 | lck_mtx_t *mutex) | |
384 | { | |
385 | kern_return_t retval = KERN_SUCCESS; | |
386 | ||
387 | if (excp == NULL || exception != EXC_CRASH) | |
388 | return retval; | |
389 | ||
390 | task_t task = current_task(); | |
391 | lck_mtx_lock(mutex); | |
392 | ipc_port_t xport = excp[exception].port; | |
393 | if ( IP_VALID(xport) | |
394 | && ip_active(xport) | |
395 | && task->itk_space == xport->ip_receiver) | |
396 | retval = KERN_FAILURE; | |
397 | lck_mtx_unlock(mutex); | |
398 | return retval; | |
399 | } | |
400 | ||
401 | ||
402 | /* | |
403 | * Routine: exception_triage_thread | |
404 | * Purpose: | |
405 | * The thread caught an exception. | |
406 | * We make an up-call to the thread's exception server. | |
407 | * Conditions: | |
408 | * Nothing locked and no resources held. | |
409 | * Called from an exception context, so | |
410 | * thread_exception_return and thread_kdb_return | |
411 | * are possible. | |
412 | * Returns: | |
413 | * KERN_SUCCESS if exception is handled by any of the handlers. | |
414 | */ | |
415 | kern_return_t | |
416 | exception_triage_thread( | |
417 | exception_type_t exception, | |
418 | mach_exception_data_t code, | |
419 | mach_msg_type_number_t codeCnt, | |
420 | thread_t thread) | |
421 | { | |
422 | task_t task; | |
423 | host_priv_t host_priv; | |
424 | lck_mtx_t *mutex; | |
425 | kern_return_t kr = KERN_FAILURE; | |
426 | ||
427 | assert(exception != EXC_RPC_ALERT); | |
428 | ||
429 | /* | |
430 | * If this behavior has been requested by the the kernel | |
431 | * (due to the boot environment), we should panic if we | |
432 | * enter this function. This is intended as a debugging | |
433 | * aid; it should allow us to debug why we caught an | |
434 | * exception in environments where debugging is especially | |
435 | * difficult. | |
436 | */ | |
437 | if (panic_on_exception_triage) { | |
438 | panic("called exception_triage when it was forbidden by the boot environment"); | |
439 | } | |
440 | ||
441 | /* | |
442 | * Try to raise the exception at the activation level. | |
443 | */ | |
444 | mutex = &thread->mutex; | |
445 | if (KERN_SUCCESS == check_exc_receiver_dependency(exception, thread->exc_actions, mutex)) | |
446 | { | |
447 | kr = exception_deliver(thread, exception, code, codeCnt, thread->exc_actions, mutex); | |
448 | if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) | |
449 | goto out; | |
450 | } | |
451 | ||
452 | /* | |
453 | * Maybe the task level will handle it. | |
454 | */ | |
455 | task = thread->task; | |
456 | mutex = &task->itk_lock_data; | |
457 | if (KERN_SUCCESS == check_exc_receiver_dependency(exception, task->exc_actions, mutex)) | |
458 | { | |
459 | kr = exception_deliver(thread, exception, code, codeCnt, task->exc_actions, mutex); | |
460 | if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) | |
461 | goto out; | |
462 | } | |
463 | ||
464 | /* | |
465 | * How about at the host level? | |
466 | */ | |
467 | host_priv = host_priv_self(); | |
468 | mutex = &host_priv->lock; | |
469 | ||
470 | if (KERN_SUCCESS == check_exc_receiver_dependency(exception, host_priv->exc_actions, mutex)) | |
471 | { | |
472 | kr = exception_deliver(thread, exception, code, codeCnt, host_priv->exc_actions, mutex); | |
473 | if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) | |
474 | goto out; | |
475 | } | |
476 | ||
477 | out: | |
478 | if ((exception != EXC_CRASH) && (exception != EXC_RESOURCE) && | |
479 | (exception != EXC_GUARD) && (exception != EXC_CORPSE_NOTIFY)) | |
480 | thread_exception_return(); | |
481 | return kr; | |
482 | } | |
483 | ||
484 | /* | |
485 | * Routine: exception_triage | |
486 | * Purpose: | |
487 | * The current thread caught an exception. | |
488 | * We make an up-call to the thread's exception server. | |
489 | * Conditions: | |
490 | * Nothing locked and no resources held. | |
491 | * Called from an exception context, so | |
492 | * thread_exception_return and thread_kdb_return | |
493 | * are possible. | |
494 | * Returns: | |
495 | * KERN_SUCCESS if exception is handled by any of the handlers. | |
496 | */ | |
497 | kern_return_t | |
498 | exception_triage( | |
499 | exception_type_t exception, | |
500 | mach_exception_data_t code, | |
501 | mach_msg_type_number_t codeCnt) | |
502 | { | |
503 | thread_t thread = current_thread(); | |
504 | return exception_triage_thread(exception, code, codeCnt, thread); | |
505 | } | |
506 | ||
507 | kern_return_t | |
508 | bsd_exception( | |
509 | exception_type_t exception, | |
510 | mach_exception_data_t code, | |
511 | mach_msg_type_number_t codeCnt) | |
512 | { | |
513 | task_t task; | |
514 | lck_mtx_t *mutex; | |
515 | thread_t self = current_thread(); | |
516 | kern_return_t kr; | |
517 | ||
518 | /* | |
519 | * Maybe the task level will handle it. | |
520 | */ | |
521 | task = current_task(); | |
522 | mutex = &task->itk_lock_data; | |
523 | ||
524 | kr = exception_deliver(self, exception, code, codeCnt, task->exc_actions, mutex); | |
525 | ||
526 | if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) | |
527 | return(KERN_SUCCESS); | |
528 | return(KERN_FAILURE); | |
529 | } | |
530 | ||
531 | ||
532 | /* | |
533 | * Raise an exception on a task. | |
534 | * This should tell launchd to launch Crash Reporter for this task. | |
535 | */ | |
536 | kern_return_t task_exception_notify(exception_type_t exception, | |
537 | mach_exception_data_type_t exccode, mach_exception_data_type_t excsubcode) | |
538 | { | |
539 | mach_exception_data_type_t code[EXCEPTION_CODE_MAX]; | |
540 | wait_interrupt_t wsave; | |
541 | kern_return_t kr = KERN_SUCCESS; | |
542 | ||
543 | code[0] = exccode; | |
544 | code[1] = excsubcode; | |
545 | ||
546 | wsave = thread_interrupt_level(THREAD_UNINT); | |
547 | kr = exception_triage(exception, code, EXCEPTION_CODE_MAX); | |
548 | (void) thread_interrupt_level(wsave); | |
549 | return kr; | |
550 | } | |
551 | ||
552 | ||
553 | /* | |
554 | * Handle interface for special performance monitoring | |
555 | * This is a special case of the host exception handler | |
556 | */ | |
557 | kern_return_t sys_perf_notify(thread_t thread, int pid) | |
558 | { | |
559 | host_priv_t hostp; | |
560 | ipc_port_t xport; | |
561 | wait_interrupt_t wsave; | |
562 | kern_return_t ret; | |
563 | ||
564 | hostp = host_priv_self(); /* Get the host privileged ports */ | |
565 | mach_exception_data_type_t code[EXCEPTION_CODE_MAX]; | |
566 | code[0] = 0xFF000001; /* Set terminate code */ | |
567 | code[1] = pid; /* Pass out the pid */ | |
568 | ||
569 | struct task *task = thread->task; | |
570 | xport = hostp->exc_actions[EXC_RPC_ALERT].port; | |
571 | ||
572 | /* Make sure we're not catching our own exception */ | |
573 | if (!IP_VALID(xport) || | |
574 | !ip_active(xport) || | |
575 | task->itk_space == xport->data.receiver) { | |
576 | ||
577 | return(KERN_FAILURE); | |
578 | } | |
579 | ||
580 | wsave = thread_interrupt_level(THREAD_UNINT); | |
581 | ret = exception_deliver( | |
582 | thread, | |
583 | EXC_RPC_ALERT, | |
584 | code, | |
585 | 2, | |
586 | hostp->exc_actions, | |
587 | &hostp->lock); | |
588 | (void)thread_interrupt_level(wsave); | |
589 | ||
590 | return(ret); | |
591 | } | |
592 |