X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/6601e61aa18bf4f09af135ff61fc7f4771d23b06..d9a64523371fa019c4575bb400cbbc3a50ac9903:/osfmk/kern/exception.c diff --git a/osfmk/kern/exception.c b/osfmk/kern/exception.c index 67abd9fbf..9a67b727b 100644 --- a/osfmk/kern/exception.c +++ b/osfmk/kern/exception.c @@ -1,23 +1,29 @@ /* * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ @@ -50,8 +56,6 @@ /* */ -#include - #include #include #include @@ -61,6 +65,9 @@ #include #include #include +#include +#include + #include #include #include @@ -68,6 +75,7 @@ #include #include #include + #include #include #include @@ -77,24 +85,14 @@ #include #include #include -#include -#include - -#if MACH_KDB -#include -#endif /* MACH_KDB */ - -#if MACH_KDB +#include -#include +#include +#include -#if iPSC386 || iPSC860 -boolean_t debug_user_with_kdb = TRUE; -#else -boolean_t debug_user_with_kdb = FALSE; -#endif +#include -#endif /* MACH_KDB */ +extern int panic_on_exception_triage; unsigned long c_thr_exc_raise = 0; unsigned long c_thr_exc_raise_state = 0; @@ -104,17 +102,24 @@ unsigned long c_tsk_exc_raise_state = 0; unsigned long c_tsk_exc_raise_state_id = 0; /* forward declarations */ -void exception_deliver( +kern_return_t exception_deliver( + thread_t thread, exception_type_t exception, - exception_data_t code, + mach_exception_data_t code, mach_msg_type_number_t codeCnt, struct exception_action *excp, - mutex_t *mutex); + lck_mtx_t *mutex); + +static kern_return_t +check_exc_receiver_dependency( + exception_type_t exception, + struct exception_action *excp, + lck_mtx_t *mutex); #ifdef MACH_BSD kern_return_t bsd_exception( exception_type_t exception, - exception_data_t code, + mach_exception_data_t code, mach_msg_type_number_t codeCnt); #endif /* MACH_BSD */ @@ -128,28 +133,45 @@ kern_return_t bsd_exception( * thread_exception_return and thread_kdb_return * are possible. * Returns: - * If the exception was not handled by this handler + * KERN_SUCCESS if the exception was handled */ -void +kern_return_t exception_deliver( + thread_t thread, exception_type_t exception, - exception_data_t code, + mach_exception_data_t code, mach_msg_type_number_t codeCnt, struct exception_action *excp, - mutex_t *mutex) + lck_mtx_t *mutex) { - thread_t self = current_thread(); - ipc_port_t exc_port; + ipc_port_t exc_port = IPC_PORT_NULL; + exception_data_type_t small_code[EXCEPTION_CODE_MAX]; + int code64; int behavior; int flavor; kern_return_t kr; + task_t task; + ipc_port_t thread_port = IPC_PORT_NULL, task_port = IPC_PORT_NULL; /* * Save work if we are terminating. * Just go back to our AST handler. */ - if (!self->active) - thread_exception_return(); + if (!thread->active && !thread->inspection) + return KERN_SUCCESS; + + /* + * If there are no exception actions defined for this entity, + * we can't deliver here. + */ + if (excp == NULL) + return KERN_FAILURE; + + assert(exception < EXC_TYPES_COUNT); + if (exception >= EXC_TYPES_COUNT) + return KERN_FAILURE; + + excp = &excp[exception]; /* * Snapshot the exception action data under lock for consistency. @@ -158,17 +180,17 @@ exception_deliver( * the port from disappearing between now and when * ipc_object_copyin_from_kernel is finally called. */ - mutex_lock(mutex); + lck_mtx_lock(mutex); exc_port = excp->port; if (!IP_VALID(exc_port)) { - mutex_unlock(mutex); - return; + lck_mtx_unlock(mutex); + return KERN_FAILURE; } ip_lock(exc_port); if (!ip_active(exc_port)) { ip_unlock(exc_port); - mutex_unlock(mutex); - return; + lck_mtx_unlock(mutex); + return KERN_FAILURE; } ip_reference(exc_port); exc_port->ip_srights++; @@ -176,7 +198,49 @@ exception_deliver( flavor = excp->flavor; behavior = excp->behavior; - mutex_unlock(mutex); + lck_mtx_unlock(mutex); + + code64 = (behavior & MACH_EXCEPTION_CODES); + behavior &= ~MACH_EXCEPTION_CODES; + + if (!code64) { + small_code[0] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[0]); + small_code[1] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[1]); + } + + task = thread->task; + +#if CONFIG_MACF + /* Now is a reasonably good time to check if the exception action is + * permitted for this process, because after this point we will send + * the message out almost certainly. + * As with other failures, exception_triage_thread will go on + * to the next level. + */ + + /* The global exception-to-signal translation port is safe to be an exception handler. */ + if (is_ux_handler_port(exc_port) == FALSE && + mac_exc_action_check_exception_send(task, excp) != 0) { + kr = KERN_FAILURE; + goto out_release_right; + } +#endif + + if (behavior != EXCEPTION_STATE) { + if (thread != current_thread() || exception == EXC_CORPSE_NOTIFY) { + + task_reference(task); + task_port = convert_task_to_port(task); + /* task ref consumed */ + thread_reference(thread); + thread_port = convert_thread_to_port(thread); + /* thread ref consumed */ + } + else { + task_port = retrieve_task_self_fast(thread->task); + thread_port = retrieve_thread_self_fast(thread); + } + } switch (behavior) { case EXCEPTION_STATE: { @@ -185,39 +249,58 @@ exception_deliver( c_thr_exc_raise_state++; state_cnt = _MachineStateCount[flavor]; - kr = thread_getstatus(self, flavor, + kr = thread_getstatus_to_user(thread, flavor, (thread_state_t)state, &state_cnt); if (kr == KERN_SUCCESS) { - kr = exception_raise_state(exc_port, exception, - code, codeCnt, - &flavor, - state, state_cnt, - state, &state_cnt); - if (kr == MACH_MSG_SUCCESS) - kr = thread_setstatus(self, flavor, - (thread_state_t)state, - state_cnt); + if (code64) { + kr = mach_exception_raise_state(exc_port, + exception, + code, + codeCnt, + &flavor, + state, state_cnt, + state, &state_cnt); + } else { + kr = exception_raise_state(exc_port, exception, + small_code, + codeCnt, + &flavor, + state, state_cnt, + state, &state_cnt); + } + if (kr == KERN_SUCCESS) { + if (exception != EXC_CORPSE_NOTIFY) + kr = thread_setstatus_from_user(thread, flavor, + (thread_state_t)state, + state_cnt); + goto out_release_right; + } + } - if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) - thread_exception_return(); - /*NOTREACHED*/ - return; + goto out_release_right; } case EXCEPTION_DEFAULT: c_thr_exc_raise++; - kr = exception_raise(exc_port, - retrieve_thread_self_fast(self), - retrieve_task_self_fast(self->task), - exception, - code, codeCnt); + if (code64) { + kr = mach_exception_raise(exc_port, + thread_port, + task_port, + exception, + code, + codeCnt); + } else { + kr = exception_raise(exc_port, + thread_port, + task_port, + exception, + small_code, + codeCnt); + } - if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) - thread_exception_return(); - /*NOTREACHED*/ - return; + goto out_release_right; case EXCEPTION_STATE_IDENTITY: { mach_msg_type_number_t state_cnt; @@ -225,39 +308,109 @@ exception_deliver( c_thr_exc_raise_state_id++; state_cnt = _MachineStateCount[flavor]; - kr = thread_getstatus(self, flavor, + kr = thread_getstatus_to_user(thread, flavor, (thread_state_t)state, &state_cnt); if (kr == KERN_SUCCESS) { - kr = exception_raise_state_identity(exc_port, - retrieve_thread_self_fast(self), - retrieve_task_self_fast(self->task), - exception, - code, codeCnt, - &flavor, - state, state_cnt, - state, &state_cnt); - if (kr == MACH_MSG_SUCCESS) - kr = thread_setstatus(self, flavor, - (thread_state_t)state, - state_cnt); + if (code64) { + kr = mach_exception_raise_state_identity( + exc_port, + thread_port, + task_port, + exception, + code, + codeCnt, + &flavor, + state, state_cnt, + state, &state_cnt); + } else { + kr = exception_raise_state_identity(exc_port, + thread_port, + task_port, + exception, + small_code, + codeCnt, + &flavor, + state, state_cnt, + state, &state_cnt); + } + + if (kr == KERN_SUCCESS) { + if (exception != EXC_CORPSE_NOTIFY) + kr = thread_setstatus_from_user(thread, flavor, + (thread_state_t)state, + state_cnt); + goto out_release_right; + } + } - if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) - thread_exception_return(); - /*NOTREACHED*/ - return; + goto out_release_right; } - + default: - panic ("bad exception behavior!"); + panic ("bad exception behavior!"); + return KERN_FAILURE; }/* switch */ + +out_release_right: + + if (task_port) { + ipc_port_release_send(task_port); + } + + if (thread_port) { + ipc_port_release_send(thread_port); + } + + if (exc_port) { + ipc_port_release_send(exc_port); + } + + return kr; } /* - * Routine: exception + * Routine: check_exc_receiver_dependency + * Purpose: + * Verify that the port destined for receiving this exception is not + * on the current task. This would cause hang in kernel for + * EXC_CRASH primarily. Note: If port is transferred + * between check and delivery then deadlock may happen. + * + * Conditions: + * Nothing locked and no resources held. + * Called from an exception context. + * Returns: + * KERN_SUCCESS if its ok to send exception message. + */ +kern_return_t +check_exc_receiver_dependency( + exception_type_t exception, + struct exception_action *excp, + lck_mtx_t *mutex) +{ + kern_return_t retval = KERN_SUCCESS; + + if (excp == NULL || exception != EXC_CRASH) + return retval; + + task_t task = current_task(); + lck_mtx_lock(mutex); + ipc_port_t xport = excp[exception].port; + if ( IP_VALID(xport) + && ip_active(xport) + && task->itk_space == xport->ip_receiver) + retval = KERN_FAILURE; + lck_mtx_unlock(mutex); + return retval; +} + + +/* + * Routine: exception_triage_thread * Purpose: - * The current thread caught an exception. + * The thread caught an exception. * We make an up-call to the thread's exception server. * Conditions: * Nothing locked and no resources held. @@ -265,260 +418,183 @@ exception_deliver( * thread_exception_return and thread_kdb_return * are possible. * Returns: - * Doesn't return. + * KERN_SUCCESS if exception is handled by any of the handlers. */ -void -exception_triage( +kern_return_t +exception_triage_thread( exception_type_t exception, - exception_data_t code, - mach_msg_type_number_t codeCnt) + mach_exception_data_t code, + mach_msg_type_number_t codeCnt, + thread_t thread) { - thread_t thread; task_t task; host_priv_t host_priv; - struct exception_action *excp; - mutex_t *mutex; + lck_mtx_t *mutex; + kern_return_t kr = KERN_FAILURE; assert(exception != EXC_RPC_ALERT); - if (exception == KERN_SUCCESS) - panic("exception"); + /* + * If this behavior has been requested by the the kernel + * (due to the boot environment), we should panic if we + * enter this function. This is intended as a debugging + * aid; it should allow us to debug why we caught an + * exception in environments where debugging is especially + * difficult. + */ + if (panic_on_exception_triage) { + panic("called exception_triage when it was forbidden by the boot environment"); + } /* * Try to raise the exception at the activation level. */ - thread = current_thread(); - mutex = mutex_addr(thread->mutex); - excp = &thread->exc_actions[exception]; - exception_deliver(exception, code, codeCnt, excp, mutex); + mutex = &thread->mutex; + if (KERN_SUCCESS == check_exc_receiver_dependency(exception, thread->exc_actions, mutex)) + { + kr = exception_deliver(thread, exception, code, codeCnt, thread->exc_actions, mutex); + if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) + goto out; + } /* * Maybe the task level will handle it. */ - task = current_task(); - mutex = mutex_addr(task->lock); - excp = &task->exc_actions[exception]; - exception_deliver(exception, code, codeCnt, excp, mutex); + task = thread->task; + mutex = &task->itk_lock_data; + if (KERN_SUCCESS == check_exc_receiver_dependency(exception, task->exc_actions, mutex)) + { + kr = exception_deliver(thread, exception, code, codeCnt, task->exc_actions, mutex); + if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) + goto out; + } /* * How about at the host level? */ host_priv = host_priv_self(); - mutex = mutex_addr(host_priv->lock); - excp = &host_priv->exc_actions[exception]; - exception_deliver(exception, code, codeCnt, excp, mutex); - - /* - * Nobody handled it, terminate the task. - */ + mutex = &host_priv->lock; -#if MACH_KDB - if (debug_user_with_kdb) { - /* - * Debug the exception with kdb. - * If kdb handles the exception, - * then thread_kdb_return won't return. - */ - db_printf("No exception server, calling kdb...\n"); - thread_kdb_return(); + if (KERN_SUCCESS == check_exc_receiver_dependency(exception, host_priv->exc_actions, mutex)) + { + kr = exception_deliver(thread, exception, code, codeCnt, host_priv->exc_actions, mutex); + if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) + goto out; } -#endif /* MACH_KDB */ - (void) task_terminate(task); - thread_exception_return(); - /*NOTREACHED*/ +out: + if ((exception != EXC_CRASH) && (exception != EXC_RESOURCE) && + (exception != EXC_GUARD) && (exception != EXC_CORPSE_NOTIFY)) + thread_exception_return(); + return kr; +} + +/* + * Routine: exception_triage + * Purpose: + * The current thread caught an exception. + * We make an up-call to the thread's exception server. + * Conditions: + * Nothing locked and no resources held. + * Called from an exception context, so + * thread_exception_return and thread_kdb_return + * are possible. + * Returns: + * KERN_SUCCESS if exception is handled by any of the handlers. + */ +kern_return_t +exception_triage( + exception_type_t exception, + mach_exception_data_t code, + mach_msg_type_number_t codeCnt) +{ + thread_t thread = current_thread(); + return exception_triage_thread(exception, code, codeCnt, thread); } kern_return_t bsd_exception( exception_type_t exception, - exception_data_t code, + mach_exception_data_t code, mach_msg_type_number_t codeCnt) { task_t task; - struct exception_action *excp; - mutex_t *mutex; + lck_mtx_t *mutex; thread_t self = current_thread(); - ipc_port_t exc_port; - int behavior; - int flavor; kern_return_t kr; /* * Maybe the task level will handle it. */ task = current_task(); - mutex = mutex_addr(task->lock); - excp = &task->exc_actions[exception]; - - /* - * Save work if we are terminating. - * Just go back to our AST handler. - */ - if (!self->active) { - return(KERN_FAILURE); - } - - /* - * Snapshot the exception action data under lock for consistency. - * Hold a reference to the port over the exception_raise_* calls - * so it can't be destroyed. This seems like overkill, but keeps - * the port from disappearing between now and when - * ipc_object_copyin_from_kernel is finally called. - */ - mutex_lock(mutex); - exc_port = excp->port; - if (!IP_VALID(exc_port)) { - mutex_unlock(mutex); - return(KERN_FAILURE); - } - ip_lock(exc_port); - if (!ip_active(exc_port)) { - ip_unlock(exc_port); - mutex_unlock(mutex); - return(KERN_FAILURE); - } - ip_reference(exc_port); - exc_port->ip_srights++; - ip_unlock(exc_port); + mutex = &task->itk_lock_data; - flavor = excp->flavor; - behavior = excp->behavior; - mutex_unlock(mutex); + kr = exception_deliver(self, exception, code, codeCnt, task->exc_actions, mutex); - switch (behavior) { - case EXCEPTION_STATE: { - mach_msg_type_number_t state_cnt; - thread_state_data_t state; - - c_thr_exc_raise_state++; - state_cnt = _MachineStateCount[flavor]; - kr = thread_getstatus(self, flavor, - (thread_state_t)state, - &state_cnt); - if (kr == KERN_SUCCESS) { - kr = exception_raise_state(exc_port, exception, - code, codeCnt, - &flavor, - state, state_cnt, - state, &state_cnt); - if (kr == MACH_MSG_SUCCESS) - kr = thread_setstatus(self, flavor, - (thread_state_t)state, - state_cnt); - } - - if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) - return(KERN_SUCCESS); - - return(KERN_FAILURE); - } - - case EXCEPTION_DEFAULT: - c_thr_exc_raise++; - kr = exception_raise(exc_port, - retrieve_thread_self_fast(self), - retrieve_task_self_fast(self->task), - exception, - code, codeCnt); + if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) + return(KERN_SUCCESS); + return(KERN_FAILURE); +} - if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) - return(KERN_SUCCESS); - return(KERN_FAILURE); - case EXCEPTION_STATE_IDENTITY: { - mach_msg_type_number_t state_cnt; - thread_state_data_t state; +/* + * Raise an exception on a task. + * This should tell launchd to launch Crash Reporter for this task. + */ +kern_return_t task_exception_notify(exception_type_t exception, + mach_exception_data_type_t exccode, mach_exception_data_type_t excsubcode) +{ + mach_exception_data_type_t code[EXCEPTION_CODE_MAX]; + wait_interrupt_t wsave; + kern_return_t kr = KERN_SUCCESS; - c_thr_exc_raise_state_id++; - state_cnt = _MachineStateCount[flavor]; - kr = thread_getstatus(self, flavor, - (thread_state_t)state, - &state_cnt); - if (kr == KERN_SUCCESS) { - kr = exception_raise_state_identity(exc_port, - retrieve_thread_self_fast(self), - retrieve_task_self_fast(self->task), - exception, - code, codeCnt, - &flavor, - state, state_cnt, - state, &state_cnt); - if (kr == MACH_MSG_SUCCESS) - kr = thread_setstatus(self, flavor, - (thread_state_t)state, - state_cnt); - } + code[0] = exccode; + code[1] = excsubcode; - if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) - return(KERN_SUCCESS); - return(KERN_FAILURE); - } - - default: - - return(KERN_FAILURE); - }/* switch */ - return(KERN_FAILURE); + wsave = thread_interrupt_level(THREAD_UNINT); + kr = exception_triage(exception, code, EXCEPTION_CODE_MAX); + (void) thread_interrupt_level(wsave); + return kr; } - - /* - * Handle interface for special perfomance monitoring + * Handle interface for special performance monitoring * This is a special case of the host exception handler */ - -kern_return_t sys_perf_notify(struct task *task, - exception_data_t code, - mach_msg_type_number_t codeCnt) +kern_return_t sys_perf_notify(thread_t thread, int pid) { host_priv_t hostp; - struct exception_action *excp; - thread_t thread = current_thread(); ipc_port_t xport; - kern_return_t ret; wait_interrupt_t wsave; + kern_return_t ret; - hostp = host_priv_self(); /* Get the host privileged ports */ - excp = &hostp->exc_actions[EXC_RPC_ALERT]; /* Point to the RPC_ALERT action */ + hostp = host_priv_self(); /* Get the host privileged ports */ + mach_exception_data_type_t code[EXCEPTION_CODE_MAX]; + code[0] = 0xFF000001; /* Set terminate code */ + code[1] = pid; /* Pass out the pid */ - mutex_lock(&hostp->lock); /* Lock the priv port */ - xport = excp->port; /* Get the port for this exception */ - if (!IP_VALID(xport)) { /* Is it valid? */ - mutex_unlock(&hostp->lock); /* Unlock */ - return(KERN_FAILURE); /* Go away... */ - } + struct task *task = thread->task; + xport = hostp->exc_actions[EXC_RPC_ALERT].port; - ip_lock(xport); /* Lock the exception port */ - if (!ip_active(xport)) { /* and is it active? */ - ip_unlock(xport); /* Nope, fail */ - mutex_unlock(&hostp->lock); /* Unlock */ - return(KERN_FAILURE); /* Go away... */ - } + /* Make sure we're not catching our own exception */ + if (!IP_VALID(xport) || + !ip_active(xport) || + task->itk_space == xport->data.receiver) { - if (task->itk_space == xport->data.receiver) { /* Are we trying to send to ourselves? */ - ip_unlock(xport); /* Yes, fail */ - mutex_unlock(&hostp->lock); /* Unlock */ - return(KERN_FAILURE); /* Go away... */ + return(KERN_FAILURE); } - - ip_reference(xport); /* Bump reference so it doesn't go away */ - xport->ip_srights++; /* Bump send rights */ - ip_unlock(xport); /* We can unlock it now */ - - mutex_unlock(&hostp->lock); /* All done with the lock */ - - wsave = thread_interrupt_level(THREAD_UNINT); /* Make sure we aren't aborted here */ - - ret = exception_raise(xport, /* Send the exception to the perf handler */ - retrieve_thread_self_fast(thread), /* Not always the dying guy */ - retrieve_task_self_fast(thread->task), /* Not always the dying guy */ - EXC_RPC_ALERT, /* Unused exception type until now */ - code, codeCnt); - - (void)thread_interrupt_level(wsave); /* Restore interrupt level */ - - return(ret); /* Tell caller how it went */ + + wsave = thread_interrupt_level(THREAD_UNINT); + ret = exception_deliver( + thread, + EXC_RPC_ALERT, + code, + 2, + hostp->exc_actions, + &hostp->lock); + (void)thread_interrupt_level(wsave); + + return(ret); } +