X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/378393581903b274cb7a4d18e0d978071a6b592d..3903760236c30e3b5ace7a4eefac3a269d68957c:/osfmk/kern/exception.c

diff --git a/osfmk/kern/exception.c b/osfmk/kern/exception.c
index 67abd9fbf..a93f38ca5 100644
--- a/osfmk/kern/exception.c
+++ b/osfmk/kern/exception.c
@@ -1,23 +1,29 @@
 /*
  * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
  * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
  * 
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
@@ -50,8 +56,6 @@
 /*
  */
 
-#include <mach_kdb.h>
-
 #include <mach/mach_types.h>
 #include <mach/boolean.h>
 #include <mach/kern_return.h>
@@ -61,6 +65,8 @@
 #include <mach/task.h>
 #include <mach/thread_status.h>
 #include <mach/exception_types.h>
+#include <mach/exc.h>
+#include <mach/mach_exc.h>
 #include <ipc/port.h>
 #include <ipc/ipc_entry.h>
 #include <ipc/ipc_object.h>
@@ -77,24 +83,11 @@
 #include <kern/sched_prim.h>
 #include <kern/host.h>
 #include <kern/misc_protos.h>
+#include <security/mac_mach_internal.h>
 #include <string.h>
-#include <mach/exc.h>
-
-#if	MACH_KDB
-#include <ddb/db_trap.h>
-#endif	/* MACH_KDB */
+#include <pexpert/pexpert.h>
 
-#if	MACH_KDB
-
-#include <ddb/db_output.h>
-
-#if iPSC386 || iPSC860
-boolean_t debug_user_with_kdb = TRUE;
-#else
-boolean_t debug_user_with_kdb = FALSE;
-#endif
-
-#endif	/* MACH_KDB */
+extern int panic_on_exception_triage;
 
 unsigned long c_thr_exc_raise = 0;
 unsigned long c_thr_exc_raise_state = 0;
@@ -104,17 +97,24 @@ unsigned long c_tsk_exc_raise_state = 0;
 unsigned long c_tsk_exc_raise_state_id = 0;
 
 /* forward declarations */
-void exception_deliver(
+kern_return_t exception_deliver(
+	thread_t 		thread,
 	exception_type_t	exception,
-	exception_data_t	code,
+	mach_exception_data_t	code,
 	mach_msg_type_number_t  codeCnt,
 	struct exception_action *excp,
-	mutex_t			*mutex);
+	lck_mtx_t			*mutex);
+
+static kern_return_t
+check_exc_receiver_dependency(
+	exception_type_t exception, 
+	struct exception_action *excp, 
+	lck_mtx_t *mutex);
 
 #ifdef MACH_BSD
 kern_return_t bsd_exception(
 	exception_type_t	exception,
-	exception_data_t	code,
+	mach_exception_data_t	code,
 	mach_msg_type_number_t  codeCnt);
 #endif /* MACH_BSD */
 
@@ -128,28 +128,46 @@ kern_return_t bsd_exception(
  *		thread_exception_return and thread_kdb_return
  *		are possible.
  *	Returns:
- *		If the exception was not handled by this handler
+ *		KERN_SUCCESS if the exception was handled
  */
-void
+kern_return_t 
 exception_deliver(
+	thread_t		thread,
 	exception_type_t	exception,
-	exception_data_t	code,
+	mach_exception_data_t	code,
 	mach_msg_type_number_t  codeCnt,
 	struct exception_action *excp,
-	mutex_t			*mutex)
+	lck_mtx_t			*mutex)
 {
-	thread_t		self = current_thread();
 	ipc_port_t		exc_port;
+	exception_data_type_t	small_code[EXCEPTION_CODE_MAX];
+	int			code64;
 	int			behavior;
 	int			flavor;
 	kern_return_t		kr;
+	int use_fast_retrieve = TRUE;
+	task_t task;
+	ipc_port_t thread_port = NULL, task_port = NULL;
 
 	/*
 	 *  Save work if we are terminating.
 	 *  Just go back to our AST handler.
 	 */
-	if (!self->active)
-		thread_exception_return();
+	if (!thread->active && !thread->inspection)
+		return KERN_SUCCESS;
+
+	/*
+	 * If there are no exception actions defined for this entity,
+	 * we can't deliver here.
+	 */
+	if (excp == NULL)
+		return KERN_FAILURE;
+
+	assert(exception < EXC_TYPES_COUNT);
+	if (exception >= EXC_TYPES_COUNT)
+		return KERN_FAILURE;
+
+	excp = &excp[exception];
 
 	/*
 	 * Snapshot the exception action data under lock for consistency.
@@ -158,17 +176,17 @@ exception_deliver(
 	 * the port from disappearing between now and when
 	 * ipc_object_copyin_from_kernel is finally called.
 	 */
-	mutex_lock(mutex);
+	lck_mtx_lock(mutex);
 	exc_port = excp->port;
 	if (!IP_VALID(exc_port)) {
-		mutex_unlock(mutex);
-		return;
+		lck_mtx_unlock(mutex);
+		return KERN_FAILURE;
 	}
 	ip_lock(exc_port);
 	if (!ip_active(exc_port)) {
 		ip_unlock(exc_port);
-		mutex_unlock(mutex);
-		return;
+		lck_mtx_unlock(mutex);
+		return KERN_FAILURE;
 	}
 	ip_reference(exc_port);	
 	exc_port->ip_srights++;
@@ -176,7 +194,42 @@ exception_deliver(
 
 	flavor = excp->flavor;
 	behavior = excp->behavior;
-	mutex_unlock(mutex);
+	lck_mtx_unlock(mutex);
+
+	code64 = (behavior & MACH_EXCEPTION_CODES);
+	behavior &= ~MACH_EXCEPTION_CODES;
+
+	if (!code64) {
+		small_code[0] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[0]);
+		small_code[1] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[1]);
+	}
+
+	task = thread->task;
+
+#if CONFIG_MACF
+	/* Now is a reasonably good time to check if the exception action is
+	 * permitted for this process, because after this point we will send
+	 * the message out almost certainly.
+	 * As with other failures, exception_triage_thread will go on
+	 * to the next level.
+	 */
+	if (mac_exc_action_check_exception_send(task, excp) != 0) {
+		return KERN_FAILURE;
+	}
+#endif
+
+	if ((thread != current_thread() || exception == EXC_CORPSE_NOTIFY)
+			&& behavior != EXCEPTION_STATE) {
+		use_fast_retrieve = FALSE;
+
+		task_reference(task);
+		task_port = convert_task_to_port(task);
+		/* task ref consumed */
+		thread_reference(thread);
+		thread_port = convert_thread_to_port(thread);
+		/* thread ref consumed */
+
+	}
 
 	switch (behavior) {
 	case EXCEPTION_STATE: {
@@ -185,39 +238,58 @@ exception_deliver(
 
 		c_thr_exc_raise_state++;
 		state_cnt = _MachineStateCount[flavor];
-		kr = thread_getstatus(self, flavor, 
+		kr = thread_getstatus(thread, flavor, 
 				      (thread_state_t)state,
 				      &state_cnt);
 		if (kr == KERN_SUCCESS) {
-			kr = exception_raise_state(exc_port, exception,
-						   code, codeCnt,
-						   &flavor,
-						   state, state_cnt,
-						   state, &state_cnt);
-			if (kr == MACH_MSG_SUCCESS)
-				kr = thread_setstatus(self, flavor, 
-						      (thread_state_t)state,
-						      state_cnt);
+			if (code64) {
+				kr = mach_exception_raise_state(exc_port, 
+						exception,
+						code, 
+						codeCnt,
+						&flavor,
+						state, state_cnt,
+						state, &state_cnt);
+			} else {
+				kr = exception_raise_state(exc_port, exception,
+						small_code, 
+						codeCnt,
+						&flavor,
+						state, state_cnt,
+						state, &state_cnt);
+			}
+			if (kr == MACH_MSG_SUCCESS && exception != EXC_CORPSE_NOTIFY)
+				kr = thread_setstatus(thread, flavor, 
+						(thread_state_t)state,
+						state_cnt);
 		}
 
-		if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
-			thread_exception_return();
-			/*NOTREACHED*/
-		return;
+		return kr;
 	}
 
 	case EXCEPTION_DEFAULT:
 		c_thr_exc_raise++;
-		kr = exception_raise(exc_port,
-				retrieve_thread_self_fast(self),
-				retrieve_task_self_fast(self->task),
-				exception,
-				code, codeCnt);
+		if (code64) {
+			kr = mach_exception_raise(exc_port,
+					use_fast_retrieve ? retrieve_thread_self_fast(thread) :
+						thread_port,
+					use_fast_retrieve ? retrieve_task_self_fast(thread->task) :
+						task_port,
+					exception,
+					code, 
+					codeCnt);
+		} else {
+			kr = exception_raise(exc_port,
+					use_fast_retrieve ? retrieve_thread_self_fast(thread) :
+						thread_port,
+					use_fast_retrieve ? retrieve_task_self_fast(thread->task) :
+						task_port,
+					exception,
+					small_code, 
+					codeCnt);
+		}
 
-		if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
-			thread_exception_return();
-			/*NOTREACHED*/
-		return;
+		return kr;
 
 	case EXCEPTION_STATE_IDENTITY: {
 		mach_msg_type_number_t state_cnt;
@@ -225,39 +297,92 @@ exception_deliver(
 
 		c_thr_exc_raise_state_id++;
 		state_cnt = _MachineStateCount[flavor];
-		kr = thread_getstatus(self, flavor,
+		kr = thread_getstatus(thread, flavor,
 				      (thread_state_t)state,
 				      &state_cnt);
 		if (kr == KERN_SUCCESS) {
-		    kr = exception_raise_state_identity(exc_port,
-				retrieve_thread_self_fast(self),
-				retrieve_task_self_fast(self->task),
-				exception,
-				code, codeCnt,
-				&flavor,
-				state, state_cnt,
-				state, &state_cnt);
-		    if (kr == MACH_MSG_SUCCESS)
-			kr = thread_setstatus(self, flavor,
-					      (thread_state_t)state,
-					      state_cnt);
+			if (code64) {
+				kr = mach_exception_raise_state_identity(
+						exc_port,
+						use_fast_retrieve ? retrieve_thread_self_fast(thread) :
+							thread_port,
+						use_fast_retrieve ? retrieve_task_self_fast(thread->task) :
+							task_port,
+						exception,
+						code, 
+						codeCnt,
+						&flavor,
+						state, state_cnt,
+						state, &state_cnt);
+			} else {
+				kr = exception_raise_state_identity(exc_port,
+						use_fast_retrieve ? retrieve_thread_self_fast(thread) :
+							thread_port,
+						use_fast_retrieve ? retrieve_task_self_fast(thread->task) :
+							task_port,
+						exception,
+						small_code, 
+						codeCnt,
+						&flavor,
+						state, state_cnt,
+						state, &state_cnt);
+			}
+			if (kr == MACH_MSG_SUCCESS && exception != EXC_CORPSE_NOTIFY)
+				kr = thread_setstatus(thread, flavor,
+						(thread_state_t)state,
+						state_cnt);
 		}
 
-		if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
-			thread_exception_return();
-			/*NOTREACHED*/
-		return;
+		return kr;
 	}
-	
+
 	default:
-		panic ("bad exception behavior!");
+	       panic ("bad exception behavior!");
+	       return KERN_FAILURE; 
 	}/* switch */
 }
 
 /*
- *	Routine:	exception
+ * Routine: check_exc_receiver_dependency
+ * Purpose:
+ *      Verify that the port destined for receiving this exception is not
+ *      on the current task. This would cause hang in kernel for
+ *      EXC_CRASH primarily. Note: If port is transferred
+ *      between check and delivery then deadlock may happen.
+ *
+ * Conditions:
+ *		Nothing locked and no resources held.
+ *		Called from an exception context.
+ * Returns:
+ *      KERN_SUCCESS if its ok to send exception message.
+ */
+kern_return_t
+check_exc_receiver_dependency(
+	exception_type_t exception,
+	struct exception_action *excp,
+	lck_mtx_t *mutex)
+{
+	kern_return_t retval = KERN_SUCCESS;
+
+	if (excp == NULL || exception != EXC_CRASH)
+		return retval;
+
+	task_t task = current_task();
+	lck_mtx_lock(mutex);
+	ipc_port_t xport = excp[exception].port;
+	if ( IP_VALID(xport)
+		     && ip_active(xport)
+		     && task->itk_space == xport->ip_receiver)
+		retval = KERN_FAILURE;
+	lck_mtx_unlock(mutex);
+	return retval;
+}
+
+
+/*
+ *	Routine:	exception_triage_thread
  *	Purpose:
- *		The current thread caught an exception.
+ *		The thread caught an exception.
  *		We make an up-call to the thread's exception server.
  *	Conditions:
  *		Nothing locked and no resources held.
@@ -265,260 +390,183 @@ exception_deliver(
  *		thread_exception_return and thread_kdb_return
  *		are possible.
  *	Returns:
- *		Doesn't return.
+ *		KERN_SUCCESS if exception is handled by any of the handlers.
  */
-void
-exception_triage(
+kern_return_t
+exception_triage_thread(
 	exception_type_t	exception,
-	exception_data_t	code,
-	mach_msg_type_number_t  codeCnt)
+	mach_exception_data_t	code,
+	mach_msg_type_number_t  codeCnt,
+	thread_t 		thread)
 {
-	thread_t		thread;
 	task_t			task;
 	host_priv_t		host_priv;
-	struct exception_action *excp;
-	mutex_t			*mutex;
+	lck_mtx_t		*mutex;
+	kern_return_t	kr = KERN_FAILURE;
 
 	assert(exception != EXC_RPC_ALERT);
 
-	if (exception == KERN_SUCCESS)
-		panic("exception");
+	/*
+	 * If this behavior has been requested by the the kernel
+	 * (due to the boot environment), we should panic if we
+	 * enter this function.  This is intended as a debugging
+	 * aid; it should allow us to debug why we caught an
+	 * exception in environments where debugging is especially
+	 * difficult.
+	 */
+	if (panic_on_exception_triage) {
+		panic("called exception_triage when it was forbidden by the boot environment");
+	}
 
 	/*
 	 * Try to raise the exception at the activation level.
 	 */
-	thread = current_thread();
-	mutex = mutex_addr(thread->mutex);
-	excp = &thread->exc_actions[exception];
-	exception_deliver(exception, code, codeCnt, excp, mutex);
+	mutex = &thread->mutex;
+	if (KERN_SUCCESS == check_exc_receiver_dependency(exception, thread->exc_actions, mutex))
+	{
+		kr = exception_deliver(thread, exception, code, codeCnt, thread->exc_actions, mutex);
+		if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
+			goto out;
+	}
 
 	/*
 	 * Maybe the task level will handle it.
 	 */
-	task = current_task();
-	mutex = mutex_addr(task->lock);
-	excp = &task->exc_actions[exception];
-	exception_deliver(exception, code, codeCnt, excp, mutex);
+	task = thread->task;
+	mutex = &task->itk_lock_data;
+	if (KERN_SUCCESS == check_exc_receiver_dependency(exception, task->exc_actions, mutex))
+	{
+		kr = exception_deliver(thread, exception, code, codeCnt, task->exc_actions, mutex);
+		if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
+			goto out;
+	}
 
 	/*
 	 * How about at the host level?
 	 */
 	host_priv = host_priv_self();
-	mutex = mutex_addr(host_priv->lock);
-	excp = &host_priv->exc_actions[exception];
-	exception_deliver(exception, code, codeCnt, excp, mutex);
-
-	/*
-	 * Nobody handled it, terminate the task.
-	 */
-
-#if	MACH_KDB
-	if (debug_user_with_kdb) {
-		/*
-		 *	Debug the exception with kdb.
-		 *	If kdb handles the exception,
-		 *	then thread_kdb_return won't return.
-		 */
-		db_printf("No exception server, calling kdb...\n");
-		thread_kdb_return();
+	mutex = &host_priv->lock;
+	
+	if (KERN_SUCCESS == check_exc_receiver_dependency(exception, host_priv->exc_actions, mutex))
+	{
+		kr = exception_deliver(thread, exception, code, codeCnt, host_priv->exc_actions, mutex);
+		if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
+			goto out;
 	}
-#endif	/* MACH_KDB */
 
-	(void) task_terminate(task);
-	thread_exception_return();
-	/*NOTREACHED*/
+out:
+	if ((exception != EXC_CRASH) && (exception != EXC_RESOURCE) &&
+	    (exception != EXC_GUARD) && (exception != EXC_CORPSE_NOTIFY))
+		thread_exception_return();
+	return kr;
+}
+
+/*
+ *	Routine:	exception_triage
+ *	Purpose:
+ *		The current thread caught an exception.
+ *		We make an up-call to the thread's exception server.
+ *	Conditions:
+ *		Nothing locked and no resources held.
+ *		Called from an exception context, so
+ *		thread_exception_return and thread_kdb_return
+ *		are possible.
+ *	Returns:
+ *		KERN_SUCCESS if exception is handled by any of the handlers.
+ */
+kern_return_t
+exception_triage(
+	exception_type_t	exception,
+	mach_exception_data_t	code,
+	mach_msg_type_number_t  codeCnt)
+{
+	thread_t thread = current_thread();
+	return exception_triage_thread(exception, code, codeCnt, thread);
 }
 
 kern_return_t
 bsd_exception(
 	exception_type_t	exception,
-	exception_data_t	code,
+	mach_exception_data_t	code,
 	mach_msg_type_number_t  codeCnt)
 {
 	task_t			task;
-	struct exception_action *excp;
-	mutex_t			*mutex;
+	lck_mtx_t		*mutex;
 	thread_t		self = current_thread();
-	ipc_port_t		exc_port;
-	int			behavior;
-	int			flavor;
 	kern_return_t		kr;
 
 	/*
 	 * Maybe the task level will handle it.
 	 */
 	task = current_task();
-	mutex = mutex_addr(task->lock);
-	excp = &task->exc_actions[exception];
-
-	/*
-	 *  Save work if we are terminating.
-	 *  Just go back to our AST handler.
-	 */
-	if (!self->active) {
-		return(KERN_FAILURE);
-	}
-
-	/*
-	 * Snapshot the exception action data under lock for consistency.
-	 * Hold a reference to the port over the exception_raise_* calls
-	 * so it can't be destroyed.  This seems like overkill, but keeps
-	 * the port from disappearing between now and when
-	 * ipc_object_copyin_from_kernel is finally called.
-	 */
-	mutex_lock(mutex);
-	exc_port = excp->port;
-	if (!IP_VALID(exc_port)) {
-		mutex_unlock(mutex);
-		return(KERN_FAILURE);
-	}
-	ip_lock(exc_port);
-	if (!ip_active(exc_port)) {
-		ip_unlock(exc_port);
-		mutex_unlock(mutex);
-		return(KERN_FAILURE);
-	}
-	ip_reference(exc_port);	
-	exc_port->ip_srights++;
-	ip_unlock(exc_port);
-
-	flavor = excp->flavor;
-	behavior = excp->behavior;
-	mutex_unlock(mutex);
-
-	switch (behavior) {
-	case EXCEPTION_STATE: {
-		mach_msg_type_number_t state_cnt;
-		thread_state_data_t state;
-
-		c_thr_exc_raise_state++;
-		state_cnt = _MachineStateCount[flavor];
-		kr = thread_getstatus(self, flavor, 
-				      (thread_state_t)state,
-				      &state_cnt);
-		if (kr == KERN_SUCCESS) {
-			kr = exception_raise_state(exc_port, exception,
-						   code, codeCnt,
-						   &flavor,
-						   state, state_cnt,
-						   state, &state_cnt);
-			if (kr == MACH_MSG_SUCCESS)
-				kr = thread_setstatus(self, flavor, 
-						      (thread_state_t)state,
-						      state_cnt);
-		}
+	mutex = &task->itk_lock_data;
 
-		if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
-			return(KERN_SUCCESS);
-
-		return(KERN_FAILURE);
-	}
+	kr = exception_deliver(self, exception, code, codeCnt, task->exc_actions, mutex);
 
-	case EXCEPTION_DEFAULT:
-		c_thr_exc_raise++;
-		kr = exception_raise(exc_port,
-				retrieve_thread_self_fast(self),
-				retrieve_task_self_fast(self->task),
-				exception,
-				code, codeCnt);
+	if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
+		return(KERN_SUCCESS);
+	return(KERN_FAILURE);
+}
 
-		if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
-			return(KERN_SUCCESS);
-		return(KERN_FAILURE);
 
-	case EXCEPTION_STATE_IDENTITY: {
-		mach_msg_type_number_t state_cnt;
-		thread_state_data_t state;
+/*
+ * Raise an exception on a task.
+ * This should tell launchd to launch Crash Reporter for this task.
+ */
+kern_return_t task_exception_notify(exception_type_t exception,
+	mach_exception_data_type_t exccode, mach_exception_data_type_t excsubcode)
+{
+	mach_exception_data_type_t	code[EXCEPTION_CODE_MAX];
+	wait_interrupt_t		wsave;
+	kern_return_t kr = KERN_SUCCESS;
 
-		c_thr_exc_raise_state_id++;
-		state_cnt = _MachineStateCount[flavor];
-		kr = thread_getstatus(self, flavor,
-				      (thread_state_t)state,
-				      &state_cnt);
-		if (kr == KERN_SUCCESS) {
-		    kr = exception_raise_state_identity(exc_port,
-				retrieve_thread_self_fast(self),
-				retrieve_task_self_fast(self->task),
-				exception,
-				code, codeCnt,
-				&flavor,
-				state, state_cnt,
-				state, &state_cnt);
-		    if (kr == MACH_MSG_SUCCESS)
-			kr = thread_setstatus(self, flavor,
-					      (thread_state_t)state,
-					      state_cnt);
-		}
+	code[0] = exccode;
+	code[1] = excsubcode;
 
-		if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
-			return(KERN_SUCCESS);
-		return(KERN_FAILURE);
-	}
-	
-	default:
-		
-		return(KERN_FAILURE);
-	}/* switch */
-	return(KERN_FAILURE);
+	wsave = thread_interrupt_level(THREAD_UNINT);
+	kr = exception_triage(exception, code, EXCEPTION_CODE_MAX);
+	(void) thread_interrupt_level(wsave);
+	return kr;
 }
 
 
-
-
 /*
- *	Handle interface for special perfomance monitoring
+ *	Handle interface for special performance monitoring
  *	This is a special case of the host exception handler
  */
-
-kern_return_t sys_perf_notify(struct task *task,
-	exception_data_t code,
-	mach_msg_type_number_t codeCnt)
+kern_return_t sys_perf_notify(thread_t thread, int pid) 
 {
 	host_priv_t		hostp;
-	struct exception_action *excp;
-	thread_t		thread = current_thread();
 	ipc_port_t		xport;
-	kern_return_t	ret;
 	wait_interrupt_t	wsave;
+	kern_return_t		ret;
 
-	hostp = host_priv_self();				/* Get the host privileged ports */
-	excp = &hostp->exc_actions[EXC_RPC_ALERT];	/* Point to the RPC_ALERT action */
+	hostp = host_priv_self();	/* Get the host privileged ports */
+	mach_exception_data_type_t	code[EXCEPTION_CODE_MAX];
+	code[0] = 0xFF000001;		/* Set terminate code */
+	code[1] = pid;		/* Pass out the pid */
 
-	mutex_lock(&hostp->lock);				/* Lock the priv port */
-	xport = excp->port;						/* Get the port for this exception */
-	if (!IP_VALID(xport)) {					/* Is it valid? */
-		mutex_unlock(&hostp->lock);			/* Unlock */
-		return(KERN_FAILURE);				/* Go away... */
-	}
+	struct task *task = thread->task;
+	xport = hostp->exc_actions[EXC_RPC_ALERT].port;	
 
-	ip_lock(xport);							/* Lock the exception port */
-	if (!ip_active(xport)) {				/* and is it active? */
-		ip_unlock(xport);					/* Nope, fail */
-		mutex_unlock(&hostp->lock);			/* Unlock */
-		return(KERN_FAILURE);				/* Go away... */
-	}
+	/* Make sure we're not catching our own exception */
+	if (!IP_VALID(xport) ||
+			!ip_active(xport) ||
+			task->itk_space == xport->data.receiver) {
 
-	if (task->itk_space == xport->data.receiver) {	/* Are we trying to send to ourselves? */
-		ip_unlock(xport);					/* Yes, fail */
-		mutex_unlock(&hostp->lock);			/* Unlock */
-		return(KERN_FAILURE);				/* Go away... */
+		return(KERN_FAILURE);	
 	}
-	
-	ip_reference(xport);					/* Bump reference so it doesn't go away */
-	xport->ip_srights++;					/* Bump send rights */
-	ip_unlock(xport);						/* We can unlock it now */
-
-	mutex_unlock(&hostp->lock);				/* All done with the lock */
 
-	wsave = thread_interrupt_level(THREAD_UNINT);	/* Make sure we aren't aborted here */
-	
-	ret = exception_raise(xport,			/* Send the exception to the perf handler */
-		retrieve_thread_self_fast(thread),		/* Not always the dying guy */
-		retrieve_task_self_fast(thread->task),	/* Not always the dying guy */
-		EXC_RPC_ALERT,						/* Unused exception type until now */
-		code, codeCnt);	
-		
-	(void)thread_interrupt_level(wsave);	/* Restore interrupt level */			
-
-	return(ret);							/* Tell caller how it went */
+	wsave = thread_interrupt_level(THREAD_UNINT);	
+	ret = exception_deliver(
+			thread,
+			EXC_RPC_ALERT, 
+			code, 
+			2, 
+			hostp->exc_actions,
+			&hostp->lock);
+	(void)thread_interrupt_level(wsave);
+
+	return(ret);
 }
+