]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/bsd_i386.c
xnu-6153.121.1.tar.gz
[apple/xnu.git] / osfmk / i386 / bsd_i386.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #ifdef MACH_BSD
29 #include <mach_debug.h>
30 #include <mach_ldebug.h>
31
32 #include <mach/kern_return.h>
33 #include <mach/mach_traps.h>
34 #include <mach/thread_status.h>
35 #include <mach/vm_param.h>
36
37 #include <kern/counters.h>
38 #include <kern/cpu_data.h>
39 #include <kern/mach_param.h>
40 #include <kern/task.h>
41 #include <kern/thread.h>
42 #include <kern/sched_prim.h>
43 #include <kern/misc_protos.h>
44 #include <kern/assert.h>
45 #include <kern/debug.h>
46 #include <kern/spl.h>
47 #include <kern/syscall_sw.h>
48 #include <ipc/ipc_port.h>
49 #include <vm/vm_kern.h>
50 #include <vm/pmap.h>
51
52 #include <i386/cpu_number.h>
53 #include <i386/eflags.h>
54 #include <i386/proc_reg.h>
55 #include <i386/tss.h>
56 #include <i386/user_ldt.h>
57 #include <i386/fpu.h>
58 #include <i386/machdep_call.h>
59 #include <i386/vmparam.h>
60 #include <i386/mp_desc.h>
61 #include <i386/misc_protos.h>
62 #include <i386/thread.h>
63 #include <i386/trap.h>
64 #include <i386/seg.h>
65 #include <mach/i386/syscall_sw.h>
66 #include <sys/syscall.h>
67 #include <sys/kdebug.h>
68 #include <sys/errno.h>
69 #include <../bsd/sys/sysent.h>
70
71 #ifdef MACH_BSD
72 extern void mach_kauth_cred_uthread_update(void);
73 extern void throttle_lowpri_io(int);
74 #endif
75
76 void * find_user_regs(thread_t);
77
78 unsigned int get_msr_exportmask(void);
79
80 unsigned int get_msr_nbits(void);
81
82 unsigned int get_msr_rbits(void);
83
84 /*
85 * thread_userstack:
86 *
87 * Return the user stack pointer from the machine
88 * dependent thread state info.
89 */
90 kern_return_t
91 thread_userstack(
92 __unused thread_t thread,
93 int flavor,
94 thread_state_t tstate,
95 __unused unsigned int count,
96 mach_vm_offset_t *user_stack,
97 int *customstack,
98 __unused boolean_t is64bit
99 )
100 {
101 if (customstack) {
102 *customstack = 0;
103 }
104
105 switch (flavor) {
106 case x86_THREAD_STATE32:
107 {
108 x86_thread_state32_t *state25;
109
110 state25 = (x86_thread_state32_t *) tstate;
111
112 if (state25->esp) {
113 *user_stack = state25->esp;
114 if (customstack) {
115 *customstack = 1;
116 }
117 } else {
118 *user_stack = VM_USRSTACK32;
119 if (customstack) {
120 *customstack = 0;
121 }
122 }
123 break;
124 }
125
126 case x86_THREAD_FULL_STATE64:
127 /* FALL THROUGH */
128 case x86_THREAD_STATE64:
129 {
130 x86_thread_state64_t *state25;
131
132 state25 = (x86_thread_state64_t *) tstate;
133
134 if (state25->rsp) {
135 *user_stack = state25->rsp;
136 if (customstack) {
137 *customstack = 1;
138 }
139 } else {
140 *user_stack = VM_USRSTACK64;
141 if (customstack) {
142 *customstack = 0;
143 }
144 }
145 break;
146 }
147
148 default:
149 return KERN_INVALID_ARGUMENT;
150 }
151
152 return KERN_SUCCESS;
153 }
154
155 /*
156 * thread_userstackdefault:
157 *
158 * Return the default stack location for the
159 * thread, if otherwise unknown.
160 */
161 kern_return_t
162 thread_userstackdefault(
163 mach_vm_offset_t *default_user_stack,
164 boolean_t is64bit)
165 {
166 if (is64bit) {
167 *default_user_stack = VM_USRSTACK64;
168 } else {
169 *default_user_stack = VM_USRSTACK32;
170 }
171 return KERN_SUCCESS;
172 }
173
174 kern_return_t
175 thread_entrypoint(
176 __unused thread_t thread,
177 int flavor,
178 thread_state_t tstate,
179 __unused unsigned int count,
180 mach_vm_offset_t *entry_point
181 )
182 {
183 /*
184 * Set a default.
185 */
186 if (*entry_point == 0) {
187 *entry_point = VM_MIN_ADDRESS;
188 }
189
190 switch (flavor) {
191 case x86_THREAD_STATE32:
192 {
193 x86_thread_state32_t *state25;
194
195 state25 = (i386_thread_state_t *) tstate;
196 *entry_point = state25->eip ? state25->eip : VM_MIN_ADDRESS;
197 break;
198 }
199
200 case x86_THREAD_STATE64:
201 {
202 x86_thread_state64_t *state25;
203
204 state25 = (x86_thread_state64_t *) tstate;
205 *entry_point = state25->rip ? state25->rip : VM_MIN_ADDRESS64;
206 break;
207 }
208 }
209 return KERN_SUCCESS;
210 }
211
212 /*
213 * FIXME - thread_set_child
214 */
215
216 void thread_set_child(thread_t child, int pid);
217 void
218 thread_set_child(thread_t child, int pid)
219 {
220 pal_register_cache_state(child, DIRTY);
221
222 if (thread_is_64bit_addr(child)) {
223 x86_saved_state64_t *iss64;
224
225 iss64 = USER_REGS64(child);
226
227 iss64->rax = pid;
228 iss64->rdx = 1;
229 iss64->isf.rflags &= ~EFL_CF;
230 } else {
231 x86_saved_state32_t *iss32;
232
233 iss32 = USER_REGS32(child);
234
235 iss32->eax = pid;
236 iss32->edx = 1;
237 iss32->efl &= ~EFL_CF;
238 }
239 }
240
241
242
243 /*
244 * System Call handling code
245 */
246
247 extern long fuword(vm_offset_t);
248
249 __attribute__((noreturn))
250 void
251 machdep_syscall(x86_saved_state_t *state)
252 {
253 int args[machdep_call_count];
254 int trapno;
255 int nargs;
256 const machdep_call_t *entry;
257 x86_saved_state32_t *regs;
258
259 assert(is_saved_state32(state));
260 regs = saved_state32(state);
261
262 trapno = regs->eax;
263 #if DEBUG_TRACE
264 kprintf("machdep_syscall(0x%08x) code=%d\n", regs, trapno);
265 #endif
266
267 DEBUG_KPRINT_SYSCALL_MDEP(
268 "machdep_syscall: trapno=%d\n", trapno);
269
270 if (trapno < 0 || trapno >= machdep_call_count) {
271 regs->eax = (unsigned int)kern_invalid(NULL);
272
273 thread_exception_return();
274 /* NOTREACHED */
275 }
276 entry = &machdep_call_table[trapno];
277 nargs = entry->nargs;
278
279 if (nargs != 0) {
280 if (copyin((user_addr_t) regs->uesp + sizeof(int),
281 (char *) args, (nargs * sizeof(int)))) {
282 regs->eax = KERN_INVALID_ADDRESS;
283
284 thread_exception_return();
285 /* NOTREACHED */
286 }
287 }
288 switch (nargs) {
289 case 0:
290 regs->eax = (*entry->routine.args_0)();
291 break;
292 case 1:
293 regs->eax = (*entry->routine.args_1)(args[0]);
294 break;
295 case 2:
296 regs->eax = (*entry->routine.args_2)(args[0], args[1]);
297 break;
298 case 3:
299 if (!entry->bsd_style) {
300 regs->eax = (*entry->routine.args_3)(args[0], args[1], args[2]);
301 } else {
302 int error;
303 uint32_t rval;
304
305 error = (*entry->routine.args_bsd_3)(&rval, args[0], args[1], args[2]);
306 if (error) {
307 regs->eax = error;
308 regs->efl |= EFL_CF; /* carry bit */
309 } else {
310 regs->eax = rval;
311 regs->efl &= ~EFL_CF;
312 }
313 }
314 break;
315 case 4:
316 regs->eax = (*entry->routine.args_4)(args[0], args[1], args[2], args[3]);
317 break;
318
319 default:
320 panic("machdep_syscall: too many args");
321 }
322
323 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%u\n", regs->eax);
324
325 #if DEBUG || DEVELOPMENT
326 kern_allocation_name_t
327 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
328 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
329 #endif /* DEBUG || DEVELOPMENT */
330
331 throttle_lowpri_io(1);
332
333 thread_exception_return();
334 /* NOTREACHED */
335 }
336
337 __attribute__((noreturn))
338 void
339 machdep_syscall64(x86_saved_state_t *state)
340 {
341 int trapno;
342 const machdep_call_t *entry;
343 x86_saved_state64_t *regs;
344
345 assert(is_saved_state64(state));
346 regs = saved_state64(state);
347
348 trapno = (int)(regs->rax & SYSCALL_NUMBER_MASK);
349
350 DEBUG_KPRINT_SYSCALL_MDEP(
351 "machdep_syscall64: trapno=%d\n", trapno);
352
353 if (trapno < 0 || trapno >= machdep_call_count) {
354 regs->rax = (unsigned int)kern_invalid(NULL);
355
356 thread_exception_return();
357 /* NOTREACHED */
358 }
359 entry = &machdep_call_table64[trapno];
360
361 switch (entry->nargs) {
362 case 0:
363 regs->rax = (*entry->routine.args_0)();
364 break;
365 case 1:
366 regs->rax = (*entry->routine.args64_1)(regs->rdi);
367 break;
368 case 2:
369 regs->rax = (*entry->routine.args64_2)(regs->rdi, regs->rsi);
370 break;
371 case 3:
372 if (!entry->bsd_style) {
373 regs->rax = (*entry->routine.args64_3)(regs->rdi, regs->rsi, regs->rdx);
374 } else {
375 int error;
376 uint32_t rval;
377
378 error = (*entry->routine.args64_bsd_3)(&rval, regs->rdi, regs->rsi, regs->rdx);
379 if (error) {
380 regs->rax = (uint64_t)error;
381 regs->isf.rflags |= EFL_CF; /* carry bit */
382 } else {
383 regs->rax = rval;
384 regs->isf.rflags &= ~(uint64_t)EFL_CF;
385 }
386 }
387 break;
388 default:
389 panic("machdep_syscall64: too many args");
390 }
391
392 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%llu\n", regs->rax);
393
394 #if DEBUG || DEVELOPMENT
395 kern_allocation_name_t
396 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
397 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
398 #endif /* DEBUG || DEVELOPMENT */
399
400 throttle_lowpri_io(1);
401
402 thread_exception_return();
403 /* NOTREACHED */
404 }
405
406 #endif /* MACH_BSD */
407
408
409 typedef kern_return_t (*mach_call_t)(void *);
410
411 struct mach_call_args {
412 syscall_arg_t arg1;
413 syscall_arg_t arg2;
414 syscall_arg_t arg3;
415 syscall_arg_t arg4;
416 syscall_arg_t arg5;
417 syscall_arg_t arg6;
418 syscall_arg_t arg7;
419 syscall_arg_t arg8;
420 syscall_arg_t arg9;
421 };
422
423 static kern_return_t
424 mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp);
425
426
427 static kern_return_t
428 mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp)
429 {
430 if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args, trapp->mach_trap_u32_words * sizeof(int))) {
431 return KERN_INVALID_ARGUMENT;
432 }
433 #if CONFIG_REQUIRES_U32_MUNGING
434 trapp->mach_trap_arg_munge32(args);
435 #else
436 #error U32 mach traps on x86_64 kernel requires munging
437 #endif
438 return KERN_SUCCESS;
439 }
440
441
442 __private_extern__ void mach_call_munger(x86_saved_state_t *state);
443
444 extern const char *mach_syscall_name_table[];
445
446 __attribute__((noreturn))
447 void
448 mach_call_munger(x86_saved_state_t *state)
449 {
450 int argc;
451 int call_number;
452 mach_call_t mach_call;
453 kern_return_t retval;
454 struct mach_call_args args = {
455 .arg1 = 0,
456 .arg2 = 0,
457 .arg3 = 0,
458 .arg4 = 0,
459 .arg5 = 0,
460 .arg6 = 0,
461 .arg7 = 0,
462 .arg8 = 0,
463 .arg9 = 0
464 };
465 x86_saved_state32_t *regs;
466
467 struct uthread *ut = get_bsdthread_info(current_thread());
468 uthread_reset_proc_refcount(ut);
469
470 assert(is_saved_state32(state));
471 regs = saved_state32(state);
472
473 call_number = -(regs->eax);
474
475 DEBUG_KPRINT_SYSCALL_MACH(
476 "mach_call_munger: code=%d(%s)\n",
477 call_number, mach_syscall_name_table[call_number]);
478 #if DEBUG_TRACE
479 kprintf("mach_call_munger(0x%08x) code=%d\n", regs, call_number);
480 #endif
481
482 if (call_number < 0 || call_number >= mach_trap_count) {
483 i386_exception(EXC_SYSCALL, call_number, 1);
484 /* NOTREACHED */
485 }
486 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
487
488 if (mach_call == (mach_call_t)kern_invalid) {
489 DEBUG_KPRINT_SYSCALL_MACH(
490 "mach_call_munger: kern_invalid 0x%x\n", regs->eax);
491 i386_exception(EXC_SYSCALL, call_number, 1);
492 /* NOTREACHED */
493 }
494
495 argc = mach_trap_table[call_number].mach_trap_arg_count;
496 if (argc) {
497 retval = mach_call_arg_munger32(regs->uesp, &args, &mach_trap_table[call_number]);
498 if (retval != KERN_SUCCESS) {
499 regs->eax = retval;
500
501 DEBUG_KPRINT_SYSCALL_MACH(
502 "mach_call_munger: retval=0x%x\n", retval);
503
504 thread_exception_return();
505 /* NOTREACHED */
506 }
507 }
508
509 #ifdef MACH_BSD
510 mach_kauth_cred_uthread_update();
511 #endif
512
513 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
514 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
515 args.arg1, args.arg2, args.arg3, args.arg4, 0);
516
517 retval = mach_call(&args);
518
519 DEBUG_KPRINT_SYSCALL_MACH("mach_call_munger: retval=0x%x\n", retval);
520
521 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
522 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END,
523 retval, 0, 0, 0, 0);
524
525 regs->eax = retval;
526
527 #if DEBUG || DEVELOPMENT
528 kern_allocation_name_t
529 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
530 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
531 #endif /* DEBUG || DEVELOPMENT */
532
533 throttle_lowpri_io(1);
534
535 #if PROC_REF_DEBUG
536 if (__improbable(uthread_get_proc_refcount(ut) != 0)) {
537 panic("system call returned with uu_proc_refcount != 0");
538 }
539 #endif
540
541 thread_exception_return();
542 /* NOTREACHED */
543 }
544
545
546 __private_extern__ void mach_call_munger64(x86_saved_state_t *regs);
547
548 __attribute__((noreturn))
549 void
550 mach_call_munger64(x86_saved_state_t *state)
551 {
552 int call_number;
553 int argc;
554 mach_call_t mach_call;
555 struct mach_call_args args = {
556 .arg1 = 0,
557 .arg2 = 0,
558 .arg3 = 0,
559 .arg4 = 0,
560 .arg5 = 0,
561 .arg6 = 0,
562 .arg7 = 0,
563 .arg8 = 0,
564 .arg9 = 0
565 };
566 x86_saved_state64_t *regs;
567
568 struct uthread *ut = get_bsdthread_info(current_thread());
569 uthread_reset_proc_refcount(ut);
570
571 assert(is_saved_state64(state));
572 regs = saved_state64(state);
573
574 call_number = (int)(regs->rax & SYSCALL_NUMBER_MASK);
575
576 DEBUG_KPRINT_SYSCALL_MACH(
577 "mach_call_munger64: code=%d(%s)\n",
578 call_number, mach_syscall_name_table[call_number]);
579
580 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
581 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
582 regs->rdi, regs->rsi, regs->rdx, regs->r10, 0);
583
584 if (call_number < 0 || call_number >= mach_trap_count) {
585 i386_exception(EXC_SYSCALL, regs->rax, 1);
586 /* NOTREACHED */
587 }
588 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
589
590 if (mach_call == (mach_call_t)kern_invalid) {
591 i386_exception(EXC_SYSCALL, regs->rax, 1);
592 /* NOTREACHED */
593 }
594 argc = mach_trap_table[call_number].mach_trap_arg_count;
595 if (argc) {
596 int args_in_regs = MIN(6, argc);
597 __nochk_memcpy(&args.arg1, &regs->rdi, args_in_regs * sizeof(syscall_arg_t));
598
599 if (argc > 6) {
600 int copyin_count;
601
602 assert(argc <= 9);
603 copyin_count = (argc - 6) * (int)sizeof(syscall_arg_t);
604
605 if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&args.arg7, copyin_count)) {
606 regs->rax = KERN_INVALID_ARGUMENT;
607
608 thread_exception_return();
609 /* NOTREACHED */
610 }
611 }
612 }
613
614 #ifdef MACH_BSD
615 mach_kauth_cred_uthread_update();
616 #endif
617
618 regs->rax = (uint64_t)mach_call((void *)&args);
619
620 DEBUG_KPRINT_SYSCALL_MACH( "mach_call_munger64: retval=0x%llx\n", regs->rax);
621
622 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
623 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END,
624 regs->rax, 0, 0, 0, 0);
625
626 #if DEBUG || DEVELOPMENT
627 kern_allocation_name_t
628 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
629 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
630 #endif /* DEBUG || DEVELOPMENT */
631
632 throttle_lowpri_io(1);
633
634 #if PROC_REF_DEBUG
635 if (__improbable(uthread_get_proc_refcount(ut) != 0)) {
636 panic("system call returned with uu_proc_refcount != 0");
637 }
638 #endif
639
640 thread_exception_return();
641 /* NOTREACHED */
642 }
643
644
645 /*
646 * thread_setuserstack:
647 *
648 * Sets the user stack pointer into the machine
649 * dependent thread state info.
650 */
651 void
652 thread_setuserstack(
653 thread_t thread,
654 mach_vm_address_t user_stack)
655 {
656 pal_register_cache_state(thread, DIRTY);
657 if (thread_is_64bit_addr(thread)) {
658 x86_saved_state64_t *iss64;
659
660 iss64 = USER_REGS64(thread);
661
662 iss64->isf.rsp = (uint64_t)user_stack;
663 } else {
664 x86_saved_state32_t *iss32;
665
666 iss32 = USER_REGS32(thread);
667
668 iss32->uesp = CAST_DOWN_EXPLICIT(unsigned int, user_stack);
669 }
670 }
671
672 /*
673 * thread_adjuserstack:
674 *
675 * Returns the adjusted user stack pointer from the machine
676 * dependent thread state info. Used for small (<2G) deltas.
677 */
678 uint64_t
679 thread_adjuserstack(
680 thread_t thread,
681 int adjust)
682 {
683 pal_register_cache_state(thread, DIRTY);
684 if (thread_is_64bit_addr(thread)) {
685 x86_saved_state64_t *iss64;
686
687 iss64 = USER_REGS64(thread);
688
689 iss64->isf.rsp += adjust;
690
691 return iss64->isf.rsp;
692 } else {
693 x86_saved_state32_t *iss32;
694
695 iss32 = USER_REGS32(thread);
696
697 iss32->uesp += adjust;
698
699 return CAST_USER_ADDR_T(iss32->uesp);
700 }
701 }
702
703 /*
704 * thread_setentrypoint:
705 *
706 * Sets the user PC into the machine
707 * dependent thread state info.
708 */
709 void
710 thread_setentrypoint(thread_t thread, mach_vm_address_t entry)
711 {
712 pal_register_cache_state(thread, DIRTY);
713 if (thread_is_64bit_addr(thread)) {
714 x86_saved_state64_t *iss64;
715
716 iss64 = USER_REGS64(thread);
717
718 iss64->isf.rip = (uint64_t)entry;
719 } else {
720 x86_saved_state32_t *iss32;
721
722 iss32 = USER_REGS32(thread);
723
724 iss32->eip = CAST_DOWN_EXPLICIT(unsigned int, entry);
725 }
726 }
727
728
729 kern_return_t
730 thread_setsinglestep(thread_t thread, int on)
731 {
732 pal_register_cache_state(thread, DIRTY);
733 if (thread_is_64bit_addr(thread)) {
734 x86_saved_state64_t *iss64;
735
736 iss64 = USER_REGS64(thread);
737
738 if (on) {
739 iss64->isf.rflags |= EFL_TF;
740 } else {
741 iss64->isf.rflags &= ~EFL_TF;
742 }
743 } else {
744 x86_saved_state32_t *iss32;
745
746 iss32 = USER_REGS32(thread);
747
748 if (on) {
749 iss32->efl |= EFL_TF;
750 /* Ensure IRET */
751 if (iss32->cs == SYSENTER_CS) {
752 iss32->cs = SYSENTER_TF_CS;
753 }
754 } else {
755 iss32->efl &= ~EFL_TF;
756 }
757 }
758
759 return KERN_SUCCESS;
760 }
761
762 void *
763 get_user_regs(thread_t th)
764 {
765 pal_register_cache_state(th, DIRTY);
766 return USER_STATE(th);
767 }
768
769 void *
770 find_user_regs(thread_t thread)
771 {
772 return get_user_regs(thread);
773 }
774
775 #if CONFIG_DTRACE
776 /*
777 * DTrace would like to have a peek at the kernel interrupt state, if available.
778 */
779 x86_saved_state_t *find_kern_regs(thread_t);
780
781 x86_saved_state_t *
782 find_kern_regs(thread_t thread)
783 {
784 if (thread == current_thread() &&
785 NULL != current_cpu_datap()->cpu_int_state &&
786 !(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
787 current_cpu_datap()->cpu_interrupt_level == 1)) {
788 return current_cpu_datap()->cpu_int_state;
789 } else {
790 return NULL;
791 }
792 }
793
794 vm_offset_t dtrace_get_cpu_int_stack_top(void);
795
796 vm_offset_t
797 dtrace_get_cpu_int_stack_top(void)
798 {
799 return current_cpu_datap()->cpu_int_stack_top;
800 }
801 #endif