]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/bsd_i386.c
7c01567eae02a17b628fea8122e9172bd2e645ba
[apple/xnu.git] / osfmk / i386 / bsd_i386.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #ifdef MACH_BSD
29 #include <mach_debug.h>
30 #include <mach_ldebug.h>
31
32 #include <mach/kern_return.h>
33 #include <mach/mach_traps.h>
34 #include <mach/thread_status.h>
35 #include <mach/vm_param.h>
36
37 #include <kern/counters.h>
38 #include <kern/cpu_data.h>
39 #include <kern/mach_param.h>
40 #include <kern/task.h>
41 #include <kern/thread.h>
42 #include <kern/sched_prim.h>
43 #include <kern/misc_protos.h>
44 #include <kern/assert.h>
45 #include <kern/debug.h>
46 #include <kern/spl.h>
47 #include <kern/syscall_sw.h>
48 #include <ipc/ipc_port.h>
49 #include <vm/vm_kern.h>
50 #include <vm/pmap.h>
51
52 #include <i386/cpu_number.h>
53 #include <i386/eflags.h>
54 #include <i386/proc_reg.h>
55 #include <i386/tss.h>
56 #include <i386/user_ldt.h>
57 #include <i386/fpu.h>
58 #include <i386/machdep_call.h>
59 #include <i386/vmparam.h>
60 #include <i386/mp_desc.h>
61 #include <i386/misc_protos.h>
62 #include <i386/thread.h>
63 #include <i386/trap.h>
64 #include <i386/seg.h>
65 #include <mach/i386/syscall_sw.h>
66 #include <sys/syscall.h>
67 #include <sys/kdebug.h>
68 #include <sys/errno.h>
69 #include <../bsd/sys/sysent.h>
70
71 #ifdef MACH_BSD
72 extern void mach_kauth_cred_uthread_update(void);
73 extern void throttle_lowpri_io(int);
74 #endif
75
76 #if CONFIG_MACF
77 #include <security/mac_mach_internal.h>
78 #endif
79
80 void * find_user_regs(thread_t);
81
82 unsigned int get_msr_exportmask(void);
83
84 unsigned int get_msr_nbits(void);
85
86 unsigned int get_msr_rbits(void);
87
88 /*
89 * thread_userstack:
90 *
91 * Return the user stack pointer from the machine
92 * dependent thread state info.
93 */
94 kern_return_t
95 thread_userstack(
96 __unused thread_t thread,
97 int flavor,
98 thread_state_t tstate,
99 unsigned int count,
100 mach_vm_offset_t *user_stack,
101 int *customstack,
102 __unused boolean_t is64bit
103 )
104 {
105 if (customstack) {
106 *customstack = 0;
107 }
108
109 switch (flavor) {
110 case x86_THREAD_STATE32:
111 {
112 x86_thread_state32_t *state25;
113
114 if (__improbable(count != x86_THREAD_STATE32_COUNT)) {
115 return KERN_INVALID_ARGUMENT;
116 }
117
118 state25 = (x86_thread_state32_t *) tstate;
119
120 if (state25->esp) {
121 *user_stack = state25->esp;
122 if (customstack) {
123 *customstack = 1;
124 }
125 } else {
126 *user_stack = VM_USRSTACK32;
127 if (customstack) {
128 *customstack = 0;
129 }
130 }
131 break;
132 }
133
134 case x86_THREAD_FULL_STATE64:
135 {
136 x86_thread_full_state64_t *state25;
137
138 if (__improbable(count != x86_THREAD_FULL_STATE64_COUNT)) {
139 return KERN_INVALID_ARGUMENT;
140 }
141
142 state25 = (x86_thread_full_state64_t *) tstate;
143
144 if (state25->ss64.rsp) {
145 *user_stack = state25->ss64.rsp;
146 if (customstack) {
147 *customstack = 1;
148 }
149 } else {
150 *user_stack = VM_USRSTACK64;
151 if (customstack) {
152 *customstack = 0;
153 }
154 }
155 break;
156 }
157
158 case x86_THREAD_STATE64:
159 {
160 x86_thread_state64_t *state25;
161
162 if (__improbable(count != x86_THREAD_STATE64_COUNT)) {
163 return KERN_INVALID_ARGUMENT;
164 }
165
166 state25 = (x86_thread_state64_t *) tstate;
167
168 if (state25->rsp) {
169 *user_stack = state25->rsp;
170 if (customstack) {
171 *customstack = 1;
172 }
173 } else {
174 *user_stack = VM_USRSTACK64;
175 if (customstack) {
176 *customstack = 0;
177 }
178 }
179 break;
180 }
181
182 default:
183 return KERN_INVALID_ARGUMENT;
184 }
185
186 return KERN_SUCCESS;
187 }
188
189 /*
190 * thread_userstackdefault:
191 *
192 * Return the default stack location for the
193 * thread, if otherwise unknown.
194 */
195 kern_return_t
196 thread_userstackdefault(
197 mach_vm_offset_t *default_user_stack,
198 boolean_t is64bit)
199 {
200 if (is64bit) {
201 *default_user_stack = VM_USRSTACK64;
202 } else {
203 *default_user_stack = VM_USRSTACK32;
204 }
205 return KERN_SUCCESS;
206 }
207
208 kern_return_t
209 thread_entrypoint(
210 __unused thread_t thread,
211 int flavor,
212 thread_state_t tstate,
213 unsigned int count,
214 mach_vm_offset_t *entry_point
215 )
216 {
217 /*
218 * Set a default.
219 */
220 if (*entry_point == 0) {
221 *entry_point = VM_MIN_ADDRESS;
222 }
223
224 switch (flavor) {
225 case x86_THREAD_STATE32:
226 {
227 x86_thread_state32_t *state25;
228
229 if (count != x86_THREAD_STATE32_COUNT) {
230 return KERN_INVALID_ARGUMENT;
231 }
232
233 state25 = (i386_thread_state_t *) tstate;
234 *entry_point = state25->eip ? state25->eip : VM_MIN_ADDRESS;
235 break;
236 }
237
238 case x86_THREAD_STATE64:
239 {
240 x86_thread_state64_t *state25;
241
242 if (count != x86_THREAD_STATE64_COUNT) {
243 return KERN_INVALID_ARGUMENT;
244 }
245
246 state25 = (x86_thread_state64_t *) tstate;
247 *entry_point = state25->rip ? state25->rip : VM_MIN_ADDRESS64;
248 break;
249 }
250 }
251 return KERN_SUCCESS;
252 }
253
254 /*
255 * FIXME - thread_set_child
256 */
257
258 void thread_set_child(thread_t child, int pid);
259 void
260 thread_set_child(thread_t child, int pid)
261 {
262 pal_register_cache_state(child, DIRTY);
263
264 if (thread_is_64bit_addr(child)) {
265 x86_saved_state64_t *iss64;
266
267 iss64 = USER_REGS64(child);
268
269 iss64->rax = pid;
270 iss64->rdx = 1;
271 iss64->isf.rflags &= ~EFL_CF;
272 } else {
273 x86_saved_state32_t *iss32;
274
275 iss32 = USER_REGS32(child);
276
277 iss32->eax = pid;
278 iss32->edx = 1;
279 iss32->efl &= ~EFL_CF;
280 }
281 }
282
283
284
285 /*
286 * System Call handling code
287 */
288
289 extern long fuword(vm_offset_t);
290
291 __attribute__((noreturn))
292 void
293 machdep_syscall(x86_saved_state_t *state)
294 {
295 int args[machdep_call_count];
296 int trapno;
297 int nargs;
298 const machdep_call_t *entry;
299 x86_saved_state32_t *regs;
300
301 assert(is_saved_state32(state));
302 regs = saved_state32(state);
303
304 trapno = regs->eax;
305 #if DEBUG_TRACE
306 kprintf("machdep_syscall(0x%08x) code=%d\n", regs, trapno);
307 #endif
308
309 DEBUG_KPRINT_SYSCALL_MDEP(
310 "machdep_syscall: trapno=%d\n", trapno);
311
312 if (trapno < 0 || trapno >= machdep_call_count) {
313 regs->eax = (unsigned int)kern_invalid(NULL);
314
315 thread_exception_return();
316 /* NOTREACHED */
317 }
318 entry = &machdep_call_table[trapno];
319 nargs = entry->nargs;
320
321 if (nargs != 0) {
322 if (copyin((user_addr_t) regs->uesp + sizeof(int),
323 (char *) args, (nargs * sizeof(int)))) {
324 regs->eax = KERN_INVALID_ADDRESS;
325
326 thread_exception_return();
327 /* NOTREACHED */
328 }
329 }
330 switch (nargs) {
331 case 0:
332 regs->eax = (*entry->routine.args_0)();
333 break;
334 case 1:
335 regs->eax = (*entry->routine.args_1)(args[0]);
336 break;
337 case 2:
338 regs->eax = (*entry->routine.args_2)(args[0], args[1]);
339 break;
340 case 3:
341 if (!entry->bsd_style) {
342 regs->eax = (*entry->routine.args_3)(args[0], args[1], args[2]);
343 } else {
344 int error;
345 uint32_t rval;
346
347 error = (*entry->routine.args_bsd_3)(&rval, args[0], args[1], args[2]);
348 if (error) {
349 regs->eax = error;
350 regs->efl |= EFL_CF; /* carry bit */
351 } else {
352 regs->eax = rval;
353 regs->efl &= ~EFL_CF;
354 }
355 }
356 break;
357 case 4:
358 regs->eax = (*entry->routine.args_4)(args[0], args[1], args[2], args[3]);
359 break;
360
361 default:
362 panic("machdep_syscall: too many args");
363 }
364
365 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%u\n", regs->eax);
366
367 #if DEBUG || DEVELOPMENT
368 kern_allocation_name_t
369 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
370 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
371 #endif /* DEBUG || DEVELOPMENT */
372
373 throttle_lowpri_io(1);
374
375 thread_exception_return();
376 /* NOTREACHED */
377 }
378
379 __attribute__((noreturn))
380 void
381 machdep_syscall64(x86_saved_state_t *state)
382 {
383 int trapno;
384 const machdep_call_t *entry;
385 x86_saved_state64_t *regs;
386
387 assert(is_saved_state64(state));
388 regs = saved_state64(state);
389
390 trapno = (int)(regs->rax & SYSCALL_NUMBER_MASK);
391
392 DEBUG_KPRINT_SYSCALL_MDEP(
393 "machdep_syscall64: trapno=%d\n", trapno);
394
395 if (trapno < 0 || trapno >= machdep_call_count) {
396 regs->rax = (unsigned int)kern_invalid(NULL);
397
398 thread_exception_return();
399 /* NOTREACHED */
400 }
401 entry = &machdep_call_table64[trapno];
402
403 switch (entry->nargs) {
404 case 0:
405 regs->rax = (*entry->routine.args_0)();
406 break;
407 case 1:
408 regs->rax = (*entry->routine.args64_1)(regs->rdi);
409 break;
410 case 2:
411 regs->rax = (*entry->routine.args64_2)(regs->rdi, regs->rsi);
412 break;
413 case 3:
414 if (!entry->bsd_style) {
415 regs->rax = (*entry->routine.args64_3)(regs->rdi, regs->rsi, regs->rdx);
416 } else {
417 int error;
418 uint32_t rval;
419
420 error = (*entry->routine.args64_bsd_3)(&rval, regs->rdi, regs->rsi, regs->rdx);
421 if (error) {
422 regs->rax = (uint64_t)error;
423 regs->isf.rflags |= EFL_CF; /* carry bit */
424 } else {
425 regs->rax = rval;
426 regs->isf.rflags &= ~(uint64_t)EFL_CF;
427 }
428 }
429 break;
430 default:
431 panic("machdep_syscall64: too many args");
432 }
433
434 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%llu\n", regs->rax);
435
436 #if DEBUG || DEVELOPMENT
437 kern_allocation_name_t
438 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
439 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
440 #endif /* DEBUG || DEVELOPMENT */
441
442 throttle_lowpri_io(1);
443
444 thread_exception_return();
445 /* NOTREACHED */
446 }
447
448 #endif /* MACH_BSD */
449
450
451 typedef kern_return_t (*mach_call_t)(void *);
452
453 struct mach_call_args {
454 syscall_arg_t arg1;
455 syscall_arg_t arg2;
456 syscall_arg_t arg3;
457 syscall_arg_t arg4;
458 syscall_arg_t arg5;
459 syscall_arg_t arg6;
460 syscall_arg_t arg7;
461 syscall_arg_t arg8;
462 syscall_arg_t arg9;
463 };
464
465 static kern_return_t
466 mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp);
467
468
469 static kern_return_t
470 mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp)
471 {
472 if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args, trapp->mach_trap_u32_words * sizeof(int))) {
473 return KERN_INVALID_ARGUMENT;
474 }
475 #if CONFIG_REQUIRES_U32_MUNGING
476 trapp->mach_trap_arg_munge32(args);
477 #else
478 #error U32 mach traps on x86_64 kernel requires munging
479 #endif
480 return KERN_SUCCESS;
481 }
482
483
484 __private_extern__ void mach_call_munger(x86_saved_state_t *state);
485
486 extern const char *mach_syscall_name_table[];
487
488 __attribute__((noreturn))
489 void
490 mach_call_munger(x86_saved_state_t *state)
491 {
492 int argc;
493 int call_number;
494 mach_call_t mach_call;
495 kern_return_t retval;
496 struct mach_call_args args = {
497 .arg1 = 0,
498 .arg2 = 0,
499 .arg3 = 0,
500 .arg4 = 0,
501 .arg5 = 0,
502 .arg6 = 0,
503 .arg7 = 0,
504 .arg8 = 0,
505 .arg9 = 0
506 };
507 x86_saved_state32_t *regs;
508
509 struct uthread *ut = get_bsdthread_info(current_thread());
510 uthread_reset_proc_refcount(ut);
511
512 assert(is_saved_state32(state));
513 regs = saved_state32(state);
514
515 call_number = -(regs->eax);
516
517 DEBUG_KPRINT_SYSCALL_MACH(
518 "mach_call_munger: code=%d(%s)\n",
519 call_number, mach_syscall_name_table[call_number]);
520 #if DEBUG_TRACE
521 kprintf("mach_call_munger(0x%08x) code=%d\n", regs, call_number);
522 #endif
523
524 if (call_number < 0 || call_number >= mach_trap_count) {
525 i386_exception(EXC_SYSCALL, call_number, 1);
526 /* NOTREACHED */
527 }
528 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
529
530 if (mach_call == (mach_call_t)kern_invalid) {
531 DEBUG_KPRINT_SYSCALL_MACH(
532 "mach_call_munger: kern_invalid 0x%x\n", regs->eax);
533 i386_exception(EXC_SYSCALL, call_number, 1);
534 /* NOTREACHED */
535 }
536
537 argc = mach_trap_table[call_number].mach_trap_arg_count;
538 if (argc) {
539 retval = mach_call_arg_munger32(regs->uesp, &args, &mach_trap_table[call_number]);
540 if (retval != KERN_SUCCESS) {
541 regs->eax = retval;
542
543 DEBUG_KPRINT_SYSCALL_MACH(
544 "mach_call_munger: retval=0x%x\n", retval);
545
546 thread_exception_return();
547 /* NOTREACHED */
548 }
549 }
550
551 #ifdef MACH_BSD
552 mach_kauth_cred_uthread_update();
553 #endif
554
555 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
556 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
557 args.arg1, args.arg2, args.arg3, args.arg4, 0);
558
559 #if CONFIG_MACF
560 /* Check mach trap filter mask, if exists. */
561 task_t task = current_task();
562 uint8_t *filter_mask = task->mach_trap_filter_mask;
563
564 if (__improbable(filter_mask != NULL &&
565 !bitstr_test(filter_mask, call_number))) {
566 /* Not in filter mask, evaluate policy. */
567 if (mac_task_mach_trap_evaluate != NULL) {
568 retval = mac_task_mach_trap_evaluate(get_bsdtask_info(task),
569 call_number);
570 if (retval) {
571 goto skip_machcall;
572 }
573 }
574 }
575 #endif /* CONFIG_MACF */
576
577 retval = mach_call(&args);
578
579 #if CONFIG_MACF
580 skip_machcall:
581 #endif
582
583 DEBUG_KPRINT_SYSCALL_MACH("mach_call_munger: retval=0x%x\n", retval);
584
585 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
586 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END,
587 retval, 0, 0, 0, 0);
588
589 regs->eax = retval;
590
591 #if DEBUG || DEVELOPMENT
592 kern_allocation_name_t
593 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
594 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
595 #endif /* DEBUG || DEVELOPMENT */
596
597 throttle_lowpri_io(1);
598
599 #if PROC_REF_DEBUG
600 if (__improbable(uthread_get_proc_refcount(ut) != 0)) {
601 panic("system call returned with uu_proc_refcount != 0");
602 }
603 #endif
604
605 thread_exception_return();
606 /* NOTREACHED */
607 }
608
609
610 __private_extern__ void mach_call_munger64(x86_saved_state_t *regs);
611
612 __attribute__((noreturn))
613 void
614 mach_call_munger64(x86_saved_state_t *state)
615 {
616 int call_number;
617 int argc;
618 mach_call_t mach_call;
619 struct mach_call_args args = {
620 .arg1 = 0,
621 .arg2 = 0,
622 .arg3 = 0,
623 .arg4 = 0,
624 .arg5 = 0,
625 .arg6 = 0,
626 .arg7 = 0,
627 .arg8 = 0,
628 .arg9 = 0
629 };
630 x86_saved_state64_t *regs;
631
632 struct uthread *ut = get_bsdthread_info(current_thread());
633 uthread_reset_proc_refcount(ut);
634
635 assert(is_saved_state64(state));
636 regs = saved_state64(state);
637
638 call_number = (int)(regs->rax & SYSCALL_NUMBER_MASK);
639
640 DEBUG_KPRINT_SYSCALL_MACH(
641 "mach_call_munger64: code=%d(%s)\n",
642 call_number, mach_syscall_name_table[call_number]);
643
644 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
645 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
646 regs->rdi, regs->rsi, regs->rdx, regs->r10, 0);
647
648 if (call_number < 0 || call_number >= mach_trap_count) {
649 i386_exception(EXC_SYSCALL, regs->rax, 1);
650 /* NOTREACHED */
651 }
652 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
653
654 if (mach_call == (mach_call_t)kern_invalid) {
655 i386_exception(EXC_SYSCALL, regs->rax, 1);
656 /* NOTREACHED */
657 }
658 argc = mach_trap_table[call_number].mach_trap_arg_count;
659 if (argc) {
660 int args_in_regs = MIN(6, argc);
661 __nochk_memcpy(&args.arg1, &regs->rdi, args_in_regs * sizeof(syscall_arg_t));
662
663 if (argc > 6) {
664 int copyin_count;
665
666 assert(argc <= 9);
667 copyin_count = (argc - 6) * (int)sizeof(syscall_arg_t);
668
669 if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&args.arg7, copyin_count)) {
670 regs->rax = KERN_INVALID_ARGUMENT;
671
672 thread_exception_return();
673 /* NOTREACHED */
674 }
675 }
676 }
677
678 #ifdef MACH_BSD
679 mach_kauth_cred_uthread_update();
680 #endif
681
682 #if CONFIG_MACF
683 /* Check syscall filter mask, if exists. */
684 task_t task = current_task();
685 uint8_t *filter_mask = task->mach_trap_filter_mask;
686
687 if (__improbable(filter_mask != NULL &&
688 !bitstr_test(filter_mask, call_number))) {
689 /* Not in filter mask, evaluate policy. */
690 if (mac_task_mach_trap_evaluate != NULL) {
691 regs->rax = mac_task_mach_trap_evaluate(get_bsdtask_info(task),
692 call_number);
693 if (regs->rax) {
694 goto skip_machcall;
695 }
696 }
697 }
698 #endif /* CONFIG_MACF */
699
700 regs->rax = (uint64_t)mach_call((void *)&args);
701
702 #if CONFIG_MACF
703 skip_machcall:
704 #endif
705
706 DEBUG_KPRINT_SYSCALL_MACH( "mach_call_munger64: retval=0x%llx\n", regs->rax);
707
708 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
709 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END,
710 regs->rax, 0, 0, 0, 0);
711
712 #if DEBUG || DEVELOPMENT
713 kern_allocation_name_t
714 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
715 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
716 #endif /* DEBUG || DEVELOPMENT */
717
718 throttle_lowpri_io(1);
719
720 #if PROC_REF_DEBUG
721 if (__improbable(uthread_get_proc_refcount(ut) != 0)) {
722 panic("system call returned with uu_proc_refcount != 0");
723 }
724 #endif
725
726 thread_exception_return();
727 /* NOTREACHED */
728 }
729
730
731 /*
732 * thread_setuserstack:
733 *
734 * Sets the user stack pointer into the machine
735 * dependent thread state info.
736 */
737 void
738 thread_setuserstack(
739 thread_t thread,
740 mach_vm_address_t user_stack)
741 {
742 pal_register_cache_state(thread, DIRTY);
743 if (thread_is_64bit_addr(thread)) {
744 x86_saved_state64_t *iss64;
745
746 iss64 = USER_REGS64(thread);
747
748 iss64->isf.rsp = (uint64_t)user_stack;
749 } else {
750 x86_saved_state32_t *iss32;
751
752 iss32 = USER_REGS32(thread);
753
754 iss32->uesp = CAST_DOWN_EXPLICIT(unsigned int, user_stack);
755 }
756 }
757
758 /*
759 * thread_adjuserstack:
760 *
761 * Returns the adjusted user stack pointer from the machine
762 * dependent thread state info. Used for small (<2G) deltas.
763 */
764 user_addr_t
765 thread_adjuserstack(
766 thread_t thread,
767 int adjust)
768 {
769 pal_register_cache_state(thread, DIRTY);
770 if (thread_is_64bit_addr(thread)) {
771 x86_saved_state64_t *iss64;
772
773 iss64 = USER_REGS64(thread);
774
775 iss64->isf.rsp += adjust;
776
777 return iss64->isf.rsp;
778 } else {
779 x86_saved_state32_t *iss32;
780
781 iss32 = USER_REGS32(thread);
782
783 iss32->uesp += adjust;
784
785 return CAST_USER_ADDR_T(iss32->uesp);
786 }
787 }
788
789 /*
790 * thread_setentrypoint:
791 *
792 * Sets the user PC into the machine
793 * dependent thread state info.
794 */
795 void
796 thread_setentrypoint(thread_t thread, mach_vm_address_t entry)
797 {
798 pal_register_cache_state(thread, DIRTY);
799 if (thread_is_64bit_addr(thread)) {
800 x86_saved_state64_t *iss64;
801
802 iss64 = USER_REGS64(thread);
803
804 iss64->isf.rip = (uint64_t)entry;
805 } else {
806 x86_saved_state32_t *iss32;
807
808 iss32 = USER_REGS32(thread);
809
810 iss32->eip = CAST_DOWN_EXPLICIT(unsigned int, entry);
811 }
812 }
813
814
815 kern_return_t
816 thread_setsinglestep(thread_t thread, int on)
817 {
818 pal_register_cache_state(thread, DIRTY);
819 if (thread_is_64bit_addr(thread)) {
820 x86_saved_state64_t *iss64;
821
822 iss64 = USER_REGS64(thread);
823
824 if (on) {
825 iss64->isf.rflags |= EFL_TF;
826 } else {
827 iss64->isf.rflags &= ~EFL_TF;
828 }
829 } else {
830 x86_saved_state32_t *iss32;
831
832 iss32 = USER_REGS32(thread);
833
834 if (on) {
835 iss32->efl |= EFL_TF;
836 /* Ensure IRET */
837 if (iss32->cs == SYSENTER_CS) {
838 iss32->cs = SYSENTER_TF_CS;
839 }
840 } else {
841 iss32->efl &= ~EFL_TF;
842 }
843 }
844
845 return KERN_SUCCESS;
846 }
847
848 void *
849 get_user_regs(thread_t th)
850 {
851 pal_register_cache_state(th, DIRTY);
852 return USER_STATE(th);
853 }
854
855 void *
856 find_user_regs(thread_t thread)
857 {
858 return get_user_regs(thread);
859 }
860
861 #if CONFIG_DTRACE
862 /*
863 * DTrace would like to have a peek at the kernel interrupt state, if available.
864 */
865 x86_saved_state_t *find_kern_regs(thread_t);
866
867 x86_saved_state_t *
868 find_kern_regs(thread_t thread)
869 {
870 if (thread == current_thread() &&
871 NULL != current_cpu_datap()->cpu_int_state &&
872 !(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
873 current_cpu_datap()->cpu_interrupt_level == 1)) {
874 return current_cpu_datap()->cpu_int_state;
875 } else {
876 return NULL;
877 }
878 }
879
880 vm_offset_t dtrace_get_cpu_int_stack_top(void);
881
882 vm_offset_t
883 dtrace_get_cpu_int_stack_top(void)
884 {
885 return current_cpu_datap()->cpu_int_stack_top;
886 }
887 #endif