]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/exception.c
xnu-517.9.4.tar.gz
[apple/xnu.git] / osfmk / kern / exception.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52
53 #include <mach_kdb.h>
54
55 #include <mach/mach_types.h>
56 #include <mach/boolean.h>
57 #include <mach/kern_return.h>
58 #include <mach/message.h>
59 #include <mach/port.h>
60 #include <mach/mig_errors.h>
61 #include <mach/thread_status.h>
62 #include <mach/exception_types.h>
63 #include <ipc/port.h>
64 #include <ipc/ipc_entry.h>
65 #include <ipc/ipc_object.h>
66 #include <ipc/ipc_notify.h>
67 #include <ipc/ipc_space.h>
68 #include <ipc/ipc_pset.h>
69 #include <ipc/ipc_machdep.h>
70 #include <kern/etap_macros.h>
71 #include <kern/counters.h>
72 #include <kern/ipc_tt.h>
73 #include <kern/task.h>
74 #include <kern/thread.h>
75 #include <kern/thread_swap.h>
76 #include <kern/processor.h>
77 #include <kern/sched.h>
78 #include <kern/sched_prim.h>
79 #include <kern/host.h>
80 #include <kern/misc_protos.h>
81 #include <string.h>
82 #include <mach/exc.h>
83
84 #if MACH_KDB
85 #include <ddb/db_trap.h>
86 #endif /* MACH_KDB */
87
88 #if MACH_KDB
89
90 #include <ddb/db_output.h>
91
92 #if iPSC386 || iPSC860
93 boolean_t debug_user_with_kdb = TRUE;
94 #else
95 boolean_t debug_user_with_kdb = FALSE;
96 #endif
97
98 #endif /* MACH_KDB */
99
100 unsigned long c_thr_exc_raise = 0;
101 unsigned long c_thr_exc_raise_state = 0;
102 unsigned long c_thr_exc_raise_state_id = 0;
103 unsigned long c_tsk_exc_raise = 0;
104 unsigned long c_tsk_exc_raise_state = 0;
105 unsigned long c_tsk_exc_raise_state_id = 0;
106
107
108 /*
109 * Routine: exception_deliver
110 * Purpose:
111 * Make an upcall to the exception server provided.
112 * Conditions:
113 * Nothing locked and no resources held.
114 * Called from an exception context, so
115 * thread_exception_return and thread_kdb_return
116 * are possible.
117 * Returns:
118 * If the exception was not handled by this handler
119 */
120 void
121 exception_deliver(
122 exception_type_t exception,
123 exception_data_t code,
124 mach_msg_type_number_t codeCnt,
125 struct exception_action *excp,
126 mutex_t *mutex)
127 {
128 thread_act_t a_self = current_act();
129 ipc_port_t exc_port;
130 int behavior;
131 int flavor;
132 kern_return_t kr;
133
134 /*
135 * Save work if we are terminating.
136 * Just go back to our AST handler.
137 */
138 if (!a_self->active)
139 thread_exception_return();
140
141 /*
142 * Snapshot the exception action data under lock for consistency.
143 * Hold a reference to the port over the exception_raise_* calls
144 * so it can't be destroyed. This seems like overkill, but keeps
145 * the port from disappearing between now and when
146 * ipc_object_copyin_from_kernel is finally called.
147 */
148 mutex_lock(mutex);
149 exc_port = excp->port;
150 if (!IP_VALID(exc_port)) {
151 mutex_unlock(mutex);
152 return;
153 }
154 ip_lock(exc_port);
155 if (!ip_active(exc_port)) {
156 ip_unlock(exc_port);
157 mutex_unlock(mutex);
158 return;
159 }
160 ip_reference(exc_port);
161 exc_port->ip_srights++;
162 ip_unlock(exc_port);
163
164 flavor = excp->flavor;
165 behavior = excp->behavior;
166 mutex_unlock(mutex);
167
168 switch (behavior) {
169 case EXCEPTION_STATE: {
170 mach_msg_type_number_t state_cnt;
171 thread_state_data_t state;
172
173 c_thr_exc_raise_state++;
174 state_cnt = state_count[flavor];
175 kr = thread_getstatus(a_self, flavor,
176 (thread_state_t)state,
177 &state_cnt);
178 if (kr == KERN_SUCCESS) {
179 kr = exception_raise_state(exc_port, exception,
180 code, codeCnt,
181 &flavor,
182 state, state_cnt,
183 state, &state_cnt);
184 if (kr == MACH_MSG_SUCCESS)
185 kr = thread_setstatus(a_self, flavor,
186 (thread_state_t)state,
187 state_cnt);
188 }
189
190 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
191 thread_exception_return();
192 /*NOTREACHED*/
193 return;
194 }
195
196 case EXCEPTION_DEFAULT:
197 c_thr_exc_raise++;
198 kr = exception_raise(exc_port,
199 retrieve_act_self_fast(a_self),
200 retrieve_task_self_fast(a_self->task),
201 exception,
202 code, codeCnt);
203
204 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
205 thread_exception_return();
206 /*NOTREACHED*/
207 return;
208
209 case EXCEPTION_STATE_IDENTITY: {
210 mach_msg_type_number_t state_cnt;
211 thread_state_data_t state;
212
213 c_thr_exc_raise_state_id++;
214 state_cnt = state_count[flavor];
215 kr = thread_getstatus(a_self, flavor,
216 (thread_state_t)state,
217 &state_cnt);
218 if (kr == KERN_SUCCESS) {
219 kr = exception_raise_state_identity(exc_port,
220 retrieve_act_self_fast(a_self),
221 retrieve_task_self_fast(a_self->task),
222 exception,
223 code, codeCnt,
224 &flavor,
225 state, state_cnt,
226 state, &state_cnt);
227 if (kr == MACH_MSG_SUCCESS)
228 kr = thread_setstatus(a_self, flavor,
229 (thread_state_t)state,
230 state_cnt);
231 }
232
233 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
234 thread_exception_return();
235 /*NOTREACHED*/
236 return;
237 }
238
239 default:
240 panic ("bad exception behavior!");
241 }/* switch */
242 }
243
244 /*
245 * Routine: exception
246 * Purpose:
247 * The current thread caught an exception.
248 * We make an up-call to the thread's exception server.
249 * Conditions:
250 * Nothing locked and no resources held.
251 * Called from an exception context, so
252 * thread_exception_return and thread_kdb_return
253 * are possible.
254 * Returns:
255 * Doesn't return.
256 */
257 void
258 exception(
259 exception_type_t exception,
260 exception_data_t code,
261 mach_msg_type_number_t codeCnt)
262 {
263 thread_act_t thr_act;
264 task_t task;
265 host_priv_t host_priv;
266 struct exception_action *excp;
267 mutex_t *mutex;
268
269 assert(exception != EXC_RPC_ALERT);
270
271 if (exception == KERN_SUCCESS)
272 panic("exception");
273
274 /*
275 * Try to raise the exception at the activation level.
276 */
277 thr_act = current_act();
278 mutex = mutex_addr(thr_act->lock);
279 excp = &thr_act->exc_actions[exception];
280 exception_deliver(exception, code, codeCnt, excp, mutex);
281
282 /*
283 * Maybe the task level will handle it.
284 */
285 task = current_task();
286 mutex = mutex_addr(task->lock);
287 excp = &task->exc_actions[exception];
288 exception_deliver(exception, code, codeCnt, excp, mutex);
289
290 /*
291 * How about at the host level?
292 */
293 host_priv = host_priv_self();
294 mutex = mutex_addr(host_priv->lock);
295 excp = &host_priv->exc_actions[exception];
296 exception_deliver(exception, code, codeCnt, excp, mutex);
297
298 /*
299 * Nobody handled it, terminate the task.
300 */
301
302 #if MACH_KDB
303 if (debug_user_with_kdb) {
304 /*
305 * Debug the exception with kdb.
306 * If kdb handles the exception,
307 * then thread_kdb_return won't return.
308 */
309 db_printf("No exception server, calling kdb...\n");
310 thread_kdb_return();
311 }
312 #endif /* MACH_KDB */
313
314 (void) task_terminate(task);
315 thread_exception_return();
316 /*NOTREACHED*/
317 }
318
319 kern_return_t
320 bsd_exception(
321 exception_type_t exception,
322 exception_data_t code,
323 mach_msg_type_number_t codeCnt)
324 {
325 task_t task;
326 host_priv_t host_priv;
327 struct exception_action *excp;
328 mutex_t *mutex;
329 thread_act_t a_self = current_act();
330 ipc_port_t exc_port;
331 int behavior;
332 int flavor;
333 kern_return_t kr;
334
335 /*
336 * Maybe the task level will handle it.
337 */
338 task = current_task();
339 mutex = mutex_addr(task->lock);
340 excp = &task->exc_actions[exception];
341
342 /*
343 * Save work if we are terminating.
344 * Just go back to our AST handler.
345 */
346 if (!a_self->active) {
347 return(KERN_FAILURE);
348 }
349
350 /*
351 * Snapshot the exception action data under lock for consistency.
352 * Hold a reference to the port over the exception_raise_* calls
353 * so it can't be destroyed. This seems like overkill, but keeps
354 * the port from disappearing between now and when
355 * ipc_object_copyin_from_kernel is finally called.
356 */
357 mutex_lock(mutex);
358 exc_port = excp->port;
359 if (!IP_VALID(exc_port)) {
360 mutex_unlock(mutex);
361 return(KERN_FAILURE);
362 }
363 ip_lock(exc_port);
364 if (!ip_active(exc_port)) {
365 ip_unlock(exc_port);
366 mutex_unlock(mutex);
367 return(KERN_FAILURE);
368 }
369 ip_reference(exc_port);
370 exc_port->ip_srights++;
371 ip_unlock(exc_port);
372
373 flavor = excp->flavor;
374 behavior = excp->behavior;
375 mutex_unlock(mutex);
376
377 switch (behavior) {
378 case EXCEPTION_STATE: {
379 mach_msg_type_number_t state_cnt;
380 thread_state_data_t state;
381
382 c_thr_exc_raise_state++;
383 state_cnt = state_count[flavor];
384 kr = thread_getstatus(a_self, flavor,
385 (thread_state_t)state,
386 &state_cnt);
387 if (kr == KERN_SUCCESS) {
388 kr = exception_raise_state(exc_port, exception,
389 code, codeCnt,
390 &flavor,
391 state, state_cnt,
392 state, &state_cnt);
393 if (kr == MACH_MSG_SUCCESS)
394 kr = thread_setstatus(a_self, flavor,
395 (thread_state_t)state,
396 state_cnt);
397 }
398
399 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
400 return(KERN_SUCCESS);
401
402 return(KERN_FAILURE);
403 }
404
405 case EXCEPTION_DEFAULT:
406 c_thr_exc_raise++;
407 kr = exception_raise(exc_port,
408 retrieve_act_self_fast(a_self),
409 retrieve_task_self_fast(a_self->task),
410 exception,
411 code, codeCnt);
412
413 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
414 return(KERN_SUCCESS);
415 return(KERN_FAILURE);
416
417 case EXCEPTION_STATE_IDENTITY: {
418 mach_msg_type_number_t state_cnt;
419 thread_state_data_t state;
420
421 c_thr_exc_raise_state_id++;
422 state_cnt = state_count[flavor];
423 kr = thread_getstatus(a_self, flavor,
424 (thread_state_t)state,
425 &state_cnt);
426 if (kr == KERN_SUCCESS) {
427 kr = exception_raise_state_identity(exc_port,
428 retrieve_act_self_fast(a_self),
429 retrieve_task_self_fast(a_self->task),
430 exception,
431 code, codeCnt,
432 &flavor,
433 state, state_cnt,
434 state, &state_cnt);
435 if (kr == MACH_MSG_SUCCESS)
436 kr = thread_setstatus(a_self, flavor,
437 (thread_state_t)state,
438 state_cnt);
439 }
440
441 if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
442 return(KERN_SUCCESS);
443 return(KERN_FAILURE);
444 }
445
446 default:
447
448 return(KERN_FAILURE);
449 }/* switch */
450 return(KERN_FAILURE);
451 }
452
453
454
455
456 /*
457 * Handle interface for special perfomance monitoring
458 * This is a special case of the host exception handler
459 */
460
461 kern_return_t sys_perf_notify(struct task *task,
462 exception_data_t code,
463 mach_msg_type_number_t codeCnt)
464 {
465 host_priv_t hostp;
466 struct exception_action *excp;
467 thread_act_t act = current_act();
468 thread_t thr = current_thread();
469 ipc_port_t xport;
470 kern_return_t ret;
471 int abrt;
472 spl_t ints;
473 wait_interrupt_t wsave;
474
475 hostp = host_priv_self(); /* Get the host privileged ports */
476 excp = &hostp->exc_actions[EXC_RPC_ALERT]; /* Point to the RPC_ALERT action */
477
478 mutex_lock(&hostp->lock); /* Lock the priv port */
479 xport = excp->port; /* Get the port for this exception */
480 if (!IP_VALID(xport)) { /* Is it valid? */
481 mutex_unlock(&hostp->lock); /* Unlock */
482 return(KERN_FAILURE); /* Go away... */
483 }
484
485 ip_lock(xport); /* Lock the exception port */
486 if (!ip_active(xport)) { /* and is it active? */
487 ip_unlock(xport); /* Nope, fail */
488 mutex_unlock(&hostp->lock); /* Unlock */
489 return(KERN_FAILURE); /* Go away... */
490 }
491
492 if (task->itk_space == xport->data.receiver) { /* Are we trying to send to ourselves? */
493 ip_unlock(xport); /* Yes, fail */
494 mutex_unlock(&hostp->lock); /* Unlock */
495 return(KERN_FAILURE); /* Go away... */
496 }
497
498 ip_reference(xport); /* Bump reference so it doesn't go away */
499 xport->ip_srights++; /* Bump send rights */
500 ip_unlock(xport); /* We can unlock it now */
501
502 mutex_unlock(&hostp->lock); /* All done with the lock */
503
504 wsave = thread_interrupt_level(THREAD_UNINT); /* Make sure we aren't aborted here */
505
506 ret = exception_raise(xport, /* Send the exception to the perf handler */
507 retrieve_act_self_fast(act), /* Not always the dying guy */
508 retrieve_task_self_fast(act->task), /* Not always the dying guy */
509 EXC_RPC_ALERT, /* Unused exception type until now */
510 code, codeCnt);
511
512 (void)thread_interrupt_level(wsave); /* Restore interrupt level */
513
514 return(ret); /* Tell caller how it went */
515 }
516