]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/bsd_i386.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / osfmk / i386 / bsd_i386.c
CommitLineData
1c79356b 1/*
39037602 2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28#ifdef MACH_BSD
1c79356b
A
29#include <mach_rt.h>
30#include <mach_debug.h>
31#include <mach_ldebug.h>
32
33#include <mach/kern_return.h>
91447636 34#include <mach/mach_traps.h>
1c79356b
A
35#include <mach/thread_status.h>
36#include <mach/vm_param.h>
1c79356b
A
37
38#include <kern/counters.h>
39#include <kern/cpu_data.h>
40#include <kern/mach_param.h>
41#include <kern/task.h>
42#include <kern/thread.h>
1c79356b
A
43#include <kern/sched_prim.h>
44#include <kern/misc_protos.h>
45#include <kern/assert.h>
b0d623f7 46#include <kern/debug.h>
1c79356b 47#include <kern/spl.h>
55e303ae 48#include <kern/syscall_sw.h>
1c79356b
A
49#include <ipc/ipc_port.h>
50#include <vm/vm_kern.h>
51#include <vm/pmap.h>
52
91447636 53#include <i386/cpu_number.h>
1c79356b
A
54#include <i386/eflags.h>
55#include <i386/proc_reg.h>
1c79356b
A
56#include <i386/tss.h>
57#include <i386/user_ldt.h>
58#include <i386/fpu.h>
1c79356b 59#include <i386/machdep_call.h>
91447636 60#include <i386/vmparam.h>
b0d623f7
A
61#include <i386/mp_desc.h>
62#include <i386/misc_protos.h>
63#include <i386/thread.h>
0c530ab8 64#include <i386/trap.h>
b0d623f7 65#include <i386/seg.h>
0c530ab8 66#include <mach/i386/syscall_sw.h>
9bccf70c 67#include <sys/syscall.h>
91447636 68#include <sys/kdebug.h>
0c530ab8 69#include <sys/errno.h>
91447636
A
70#include <../bsd/sys/sysent.h>
71
b0d623f7
A
72#ifdef MACH_BSD
73extern void mach_kauth_cred_uthread_update(void);
39236c6e 74extern void throttle_lowpri_io(int);
b0d623f7
A
75#endif
76
0c530ab8
A
77void * find_user_regs(thread_t);
78
1c79356b
A
79unsigned int get_msr_exportmask(void);
80
81unsigned int get_msr_nbits(void);
82
83unsigned int get_msr_rbits(void);
84
85/*
86 * thread_userstack:
87 *
88 * Return the user stack pointer from the machine
89 * dependent thread state info.
90 */
91kern_return_t
92thread_userstack(
2d21ac55
A
93 __unused thread_t thread,
94 int flavor,
95 thread_state_t tstate,
96 __unused unsigned int count,
6d2010ae 97 mach_vm_offset_t *user_stack,
39037602
A
98 int *customstack,
99 __unused boolean_t is64bit
2d21ac55 100)
1c79356b 101{
2d21ac55
A
102 if (customstack)
103 *customstack = 0;
4452a7af 104
2d21ac55
A
105 switch (flavor) {
106 case x86_THREAD_STATE32:
107 {
108 x86_thread_state32_t *state25;
109
110 state25 = (x86_thread_state32_t *) tstate;
111
6d2010ae 112 if (state25->esp) {
2d21ac55 113 *user_stack = state25->esp;
6d2010ae
A
114 if (customstack)
115 *customstack = 1;
116 } else {
2d21ac55 117 *user_stack = VM_USRSTACK32;
6d2010ae
A
118 if (customstack)
119 *customstack = 0;
120 }
2d21ac55
A
121 break;
122 }
0c530ab8 123
2d21ac55
A
124 case x86_THREAD_STATE64:
125 {
126 x86_thread_state64_t *state25;
127
128 state25 = (x86_thread_state64_t *) tstate;
129
6d2010ae 130 if (state25->rsp) {
2d21ac55 131 *user_stack = state25->rsp;
6d2010ae
A
132 if (customstack)
133 *customstack = 1;
134 } else {
2d21ac55 135 *user_stack = VM_USRSTACK64;
6d2010ae
A
136 if (customstack)
137 *customstack = 0;
138 }
2d21ac55
A
139 break;
140 }
1c79356b 141
2d21ac55
A
142 default:
143 return (KERN_INVALID_ARGUMENT);
144 }
0c530ab8 145
2d21ac55
A
146 return (KERN_SUCCESS);
147}
1c79356b 148
316670eb
A
149/*
150 * thread_userstackdefault:
151 *
152 * Return the default stack location for the
153 * thread, if otherwise unknown.
154 */
155kern_return_t
156thread_userstackdefault(
39037602
A
157 mach_vm_offset_t *default_user_stack,
158 boolean_t is64bit)
316670eb 159{
39037602 160 if (is64bit) {
316670eb
A
161 *default_user_stack = VM_USRSTACK64;
162 } else {
163 *default_user_stack = VM_USRSTACK32;
164 }
165 return (KERN_SUCCESS);
166}
0c530ab8 167
1c79356b
A
168kern_return_t
169thread_entrypoint(
2d21ac55
A
170 __unused thread_t thread,
171 int flavor,
172 thread_state_t tstate,
173 __unused unsigned int count,
174 mach_vm_offset_t *entry_point
175)
1c79356b 176{
2d21ac55 177 /*
0c530ab8
A
178 * Set a default.
179 */
2d21ac55
A
180 if (*entry_point == 0)
181 *entry_point = VM_MIN_ADDRESS;
182
0c530ab8 183 switch (flavor) {
2d21ac55
A
184 case x86_THREAD_STATE32:
185 {
186 x86_thread_state32_t *state25;
0c530ab8 187
2d21ac55
A
188 state25 = (i386_thread_state_t *) tstate;
189 *entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS;
190 break;
191 }
6601e61a 192
2d21ac55
A
193 case x86_THREAD_STATE64:
194 {
195 x86_thread_state64_t *state25;
0c530ab8 196
2d21ac55
A
197 state25 = (x86_thread_state64_t *) tstate;
198 *entry_point = state25->rip ? state25->rip: VM_MIN_ADDRESS64;
199 break;
200 }
201 }
202 return (KERN_SUCCESS);
203}
1c79356b 204
1c79356b
A
205/*
206 * FIXME - thread_set_child
207 */
208
91447636 209void thread_set_child(thread_t child, int pid);
1c79356b 210void
91447636 211thread_set_child(thread_t child, int pid)
1c79356b 212{
6d2010ae 213 pal_register_cache_state(child, DIRTY);
b0d623f7 214
2d21ac55
A
215 if (thread_is_64bit(child)) {
216 x86_saved_state64_t *iss64;
0c530ab8 217
0c530ab8
A
218 iss64 = USER_REGS64(child);
219
220 iss64->rax = pid;
221 iss64->rdx = 1;
222 iss64->isf.rflags &= ~EFL_CF;
223 } else {
2d21ac55
A
224 x86_saved_state32_t *iss32;
225
0c530ab8
A
226 iss32 = USER_REGS32(child);
227
228 iss32->eax = pid;
229 iss32->edx = 1;
230 iss32->efl &= ~EFL_CF;
231 }
1c79356b 232}
0c530ab8
A
233
234
1c79356b 235
1c79356b
A
236/*
237 * System Call handling code
238 */
239
91447636
A
240extern long fuword(vm_offset_t);
241
39037602 242__attribute__((noreturn))
1c79356b 243void
0c530ab8 244machdep_syscall(x86_saved_state_t *state)
8f6c56a5 245{
0c530ab8 246 int args[machdep_call_count];
2d21ac55 247 int trapno;
0c530ab8 248 int nargs;
316670eb 249 const machdep_call_t *entry;
0c530ab8
A
250 x86_saved_state32_t *regs;
251
252 assert(is_saved_state32(state));
253 regs = saved_state32(state);
1c79356b 254
0c530ab8
A
255 trapno = regs->eax;
256#if DEBUG_TRACE
257 kprintf("machdep_syscall(0x%08x) code=%d\n", regs, trapno);
258#endif
1c79356b 259
b0d623f7
A
260 DEBUG_KPRINT_SYSCALL_MDEP(
261 "machdep_syscall: trapno=%d\n", trapno);
262
0c530ab8 263 if (trapno < 0 || trapno >= machdep_call_count) {
2d21ac55 264 regs->eax = (unsigned int)kern_invalid(NULL);
1c79356b 265
91447636
A
266 thread_exception_return();
267 /* NOTREACHED */
268 }
0c530ab8
A
269 entry = &machdep_call_table[trapno];
270 nargs = entry->nargs;
91447636 271
0c530ab8 272 if (nargs != 0) {
2d21ac55
A
273 if (copyin((user_addr_t) regs->uesp + sizeof (int),
274 (char *) args, (nargs * sizeof (int)))) {
275 regs->eax = KERN_INVALID_ADDRESS;
6601e61a 276
0c530ab8
A
277 thread_exception_return();
278 /* NOTREACHED */
279 }
1c79356b 280 }
0c530ab8 281 switch (nargs) {
2d21ac55
A
282 case 0:
283 regs->eax = (*entry->routine.args_0)();
0c530ab8 284 break;
2d21ac55 285 case 1:
0c530ab8
A
286 regs->eax = (*entry->routine.args_1)(args[0]);
287 break;
2d21ac55
A
288 case 2:
289 regs->eax = (*entry->routine.args_2)(args[0],args[1]);
0c530ab8 290 break;
2d21ac55
A
291 case 3:
292 if (!entry->bsd_style)
293 regs->eax = (*entry->routine.args_3)(args[0],args[1],args[2]);
0c530ab8 294 else {
2d21ac55
A
295 int error;
296 uint32_t rval;
0c530ab8 297
2d21ac55 298 error = (*entry->routine.args_bsd_3)(&rval, args[0], args[1], args[2]);
0c530ab8 299 if (error) {
2d21ac55 300 regs->eax = error;
0c530ab8
A
301 regs->efl |= EFL_CF; /* carry bit */
302 } else {
2d21ac55
A
303 regs->eax = rval;
304 regs->efl &= ~EFL_CF;
0c530ab8
A
305 }
306 }
307 break;
2d21ac55 308 case 4:
0c530ab8
A
309 regs->eax = (*entry->routine.args_4)(args[0], args[1], args[2], args[3]);
310 break;
1c79356b 311
2d21ac55
A
312 default:
313 panic("machdep_syscall: too many args");
6601e61a 314 }
6601e61a 315
b0d623f7
A
316 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%u\n", regs->eax);
317
39236c6e 318 throttle_lowpri_io(1);
593a1d5f 319
0c530ab8
A
320 thread_exception_return();
321 /* NOTREACHED */
1c79356b
A
322}
323
39037602 324__attribute__((noreturn))
1c79356b 325void
0c530ab8 326machdep_syscall64(x86_saved_state_t *state)
1c79356b 327{
2d21ac55 328 int trapno;
316670eb 329 const machdep_call_t *entry;
0c530ab8 330 x86_saved_state64_t *regs;
1c79356b 331
0c530ab8
A
332 assert(is_saved_state64(state));
333 regs = saved_state64(state);
1c79356b 334
b0d623f7
A
335 trapno = (int)(regs->rax & SYSCALL_NUMBER_MASK);
336
337 DEBUG_KPRINT_SYSCALL_MDEP(
338 "machdep_syscall64: trapno=%d\n", trapno);
1c79356b 339
0c530ab8 340 if (trapno < 0 || trapno >= machdep_call_count) {
2d21ac55 341 regs->rax = (unsigned int)kern_invalid(NULL);
1c79356b 342
0c530ab8
A
343 thread_exception_return();
344 /* NOTREACHED */
1c79356b 345 }
0c530ab8 346 entry = &machdep_call_table64[trapno];
1c79356b 347
0c530ab8 348 switch (entry->nargs) {
2d21ac55
A
349 case 0:
350 regs->rax = (*entry->routine.args_0)();
6601e61a 351 break;
2d21ac55 352 case 1:
0c530ab8 353 regs->rax = (*entry->routine.args64_1)(regs->rdi);
55e303ae 354 break;
fe8ab488
A
355 case 2:
356 regs->rax = (*entry->routine.args64_2)(regs->rdi, regs->rsi);
357 break;
2d21ac55
A
358 default:
359 panic("machdep_syscall64: too many args");
55e303ae 360 }
6601e61a 361
b0d623f7
A
362 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%llu\n", regs->rax);
363
39236c6e 364 throttle_lowpri_io(1);
593a1d5f 365
0c530ab8
A
366 thread_exception_return();
367 /* NOTREACHED */
1c79356b
A
368}
369
0c530ab8 370#endif /* MACH_BSD */
4452a7af 371
21362eb3 372
0c530ab8 373typedef kern_return_t (*mach_call_t)(void *);
4452a7af 374
0c530ab8
A
375struct mach_call_args {
376 syscall_arg_t arg1;
377 syscall_arg_t arg2;
378 syscall_arg_t arg3;
379 syscall_arg_t arg4;
380 syscall_arg_t arg5;
381 syscall_arg_t arg6;
382 syscall_arg_t arg7;
383 syscall_arg_t arg8;
384 syscall_arg_t arg9;
385};
4452a7af 386
0c530ab8 387static kern_return_t
39236c6e 388mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp);
4452a7af 389
6601e61a 390
0c530ab8 391static kern_return_t
39236c6e 392mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp)
0c530ab8 393{
39236c6e 394 if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args, trapp->mach_trap_u32_words * sizeof (int)))
2d21ac55 395 return KERN_INVALID_ARGUMENT;
fe8ab488
A
396#if CONFIG_REQUIRES_U32_MUNGING
397 trapp->mach_trap_arg_munge32(args);
398#else
399#error U32 mach traps on x86_64 kernel requires munging
400#endif
0c530ab8 401 return KERN_SUCCESS;
6601e61a 402}
4452a7af
A
403
404
2d21ac55 405__private_extern__ void mach_call_munger(x86_saved_state_t *state);
0c530ab8 406
b0d623f7
A
407extern const char *mach_syscall_name_table[];
408
39037602 409__attribute__((noreturn))
0c530ab8
A
410void
411mach_call_munger(x86_saved_state_t *state)
4452a7af 412{
4452a7af 413 int argc;
0c530ab8 414 int call_number;
4452a7af 415 mach_call_t mach_call;
6601e61a
A
416 kern_return_t retval;
417 struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
0c530ab8
A
418 x86_saved_state32_t *regs;
419
3e170ce0 420 struct uthread *ut = get_bsdthread_info(current_thread());
3e170ce0 421 uthread_reset_proc_refcount(ut);
3e170ce0 422
0c530ab8
A
423 assert(is_saved_state32(state));
424 regs = saved_state32(state);
425
426 call_number = -(regs->eax);
b0d623f7
A
427
428 DEBUG_KPRINT_SYSCALL_MACH(
429 "mach_call_munger: code=%d(%s)\n",
430 call_number, mach_syscall_name_table[call_number]);
0c530ab8
A
431#if DEBUG_TRACE
432 kprintf("mach_call_munger(0x%08x) code=%d\n", regs, call_number);
433#endif
434
435 if (call_number < 0 || call_number >= mach_trap_count) {
2d21ac55 436 i386_exception(EXC_SYSCALL, call_number, 1);
0c530ab8
A
437 /* NOTREACHED */
438 }
439 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
2d21ac55 440
0c530ab8 441 if (mach_call == (mach_call_t)kern_invalid) {
b0d623f7
A
442 DEBUG_KPRINT_SYSCALL_MACH(
443 "mach_call_munger: kern_invalid 0x%x\n", regs->eax);
2d21ac55 444 i386_exception(EXC_SYSCALL, call_number, 1);
0c530ab8
A
445 /* NOTREACHED */
446 }
0c530ab8 447
2d21ac55 448 argc = mach_trap_table[call_number].mach_trap_arg_count;
0c530ab8 449 if (argc) {
39236c6e 450 retval = mach_call_arg_munger32(regs->uesp, &args, &mach_trap_table[call_number]);
0c530ab8 451 if (retval != KERN_SUCCESS) {
2d21ac55
A
452 regs->eax = retval;
453
b0d623f7
A
454 DEBUG_KPRINT_SYSCALL_MACH(
455 "mach_call_munger: retval=0x%x\n", retval);
456
0c530ab8
A
457 thread_exception_return();
458 /* NOTREACHED */
459 }
4452a7af 460 }
b0d623f7
A
461
462#ifdef MACH_BSD
463 mach_kauth_cred_uthread_update();
464#endif
316670eb
A
465
466 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
467 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
468 args.arg1, args.arg2, args.arg3, args.arg4, 0);
2d21ac55 469
0c530ab8
A
470 retval = mach_call(&args);
471
b0d623f7
A
472 DEBUG_KPRINT_SYSCALL_MACH("mach_call_munger: retval=0x%x\n", retval);
473
316670eb
A
474 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
475 MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END,
476 retval, 0, 0, 0, 0);
477
0c530ab8 478 regs->eax = retval;
2d21ac55 479
39236c6e 480 throttle_lowpri_io(1);
593a1d5f 481
3e170ce0
A
482#if PROC_REF_DEBUG
483 if (__improbable(uthread_get_proc_refcount(ut) != 0)) {
484 panic("system call returned with uu_proc_refcount != 0");
485 }
486#endif
487
0c530ab8
A
488 thread_exception_return();
489 /* NOTREACHED */
490}
491
492
2d21ac55 493__private_extern__ void mach_call_munger64(x86_saved_state_t *regs);
0c530ab8 494
39037602 495__attribute__((noreturn))
0c530ab8
A
496void
497mach_call_munger64(x86_saved_state_t *state)
498{
499 int call_number;
500 int argc;
501 mach_call_t mach_call;
fe8ab488 502 struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
0c530ab8
A
503 x86_saved_state64_t *regs;
504
3e170ce0 505 struct uthread *ut = get_bsdthread_info(current_thread());
3e170ce0 506 uthread_reset_proc_refcount(ut);
3e170ce0 507
0c530ab8
A
508 assert(is_saved_state64(state));
509 regs = saved_state64(state);
510
b0d623f7
A
511 call_number = (int)(regs->rax & SYSCALL_NUMBER_MASK);
512
513 DEBUG_KPRINT_SYSCALL_MACH(
514 "mach_call_munger64: code=%d(%s)\n",
515 call_number, mach_syscall_name_table[call_number]);
0c530ab8 516
316670eb
A
517 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
518 MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_START,
519 regs->rdi, regs->rsi, regs->rdx, regs->r10, 0);
6601e61a 520
0c530ab8
A
521 if (call_number < 0 || call_number >= mach_trap_count) {
522 i386_exception(EXC_SYSCALL, regs->rax, 1);
523 /* NOTREACHED */
524 }
6601e61a 525 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
6601e61a 526
0c530ab8
A
527 if (mach_call == (mach_call_t)kern_invalid) {
528 i386_exception(EXC_SYSCALL, regs->rax, 1);
529 /* NOTREACHED */
530 }
531 argc = mach_trap_table[call_number].mach_trap_arg_count;
fe8ab488
A
532 if (argc) {
533 int args_in_regs = MIN(6, argc);
0c530ab8 534
fe8ab488
A
535 memcpy(&args.arg1, &regs->rdi, args_in_regs * sizeof(syscall_arg_t));
536
537 if (argc > 6) {
0c530ab8
A
538 int copyin_count;
539
fe8ab488
A
540 assert(argc <= 9);
541 copyin_count = (argc - 6) * (int)sizeof(syscall_arg_t);
0c530ab8 542
fe8ab488 543 if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&args.arg7, copyin_count)) {
0c530ab8
A
544 regs->rax = KERN_INVALID_ARGUMENT;
545
fe8ab488
A
546 thread_exception_return();
547 /* NOTREACHED */
548 }
0c530ab8
A
549 }
550 }
b0d623f7
A
551
552#ifdef MACH_BSD
553 mach_kauth_cred_uthread_update();
554#endif
555
fe8ab488 556 regs->rax = (uint64_t)mach_call((void *)&args);
0c530ab8 557
b0d623f7
A
558 DEBUG_KPRINT_SYSCALL_MACH( "mach_call_munger64: retval=0x%llx\n", regs->rax);
559
316670eb
A
560 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
561 MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END,
562 regs->rax, 0, 0, 0, 0);
91447636 563
39236c6e 564 throttle_lowpri_io(1);
593a1d5f 565
3e170ce0
A
566#if PROC_REF_DEBUG
567 if (__improbable(uthread_get_proc_refcount(ut) != 0)) {
568 panic("system call returned with uu_proc_refcount != 0");
569 }
570#endif
571
0c530ab8
A
572 thread_exception_return();
573 /* NOTREACHED */
91447636
A
574}
575
0c530ab8 576
91447636
A
577/*
578 * thread_setuserstack:
579 *
580 * Sets the user stack pointer into the machine
581 * dependent thread state info.
582 */
583void
584thread_setuserstack(
585 thread_t thread,
586 mach_vm_address_t user_stack)
587{
6d2010ae 588 pal_register_cache_state(thread, DIRTY);
2d21ac55
A
589 if (thread_is_64bit(thread)) {
590 x86_saved_state64_t *iss64;
0c530ab8
A
591
592 iss64 = USER_REGS64(thread);
5d5c5d0d 593
0c530ab8
A
594 iss64->isf.rsp = (uint64_t)user_stack;
595 } else {
2d21ac55
A
596 x86_saved_state32_t *iss32;
597
0c530ab8
A
598 iss32 = USER_REGS32(thread);
599
b0d623f7 600 iss32->uesp = CAST_DOWN_EXPLICIT(unsigned int, user_stack);
0c530ab8 601 }
91447636
A
602}
603
604/*
605 * thread_adjuserstack:
606 *
607 * Returns the adjusted user stack pointer from the machine
608 * dependent thread state info. Used for small (<2G) deltas.
609 */
610uint64_t
611thread_adjuserstack(
612 thread_t thread,
613 int adjust)
614{
6d2010ae 615 pal_register_cache_state(thread, DIRTY);
2d21ac55
A
616 if (thread_is_64bit(thread)) {
617 x86_saved_state64_t *iss64;
5d5c5d0d 618
0c530ab8
A
619 iss64 = USER_REGS64(thread);
620
621 iss64->isf.rsp += adjust;
622
623 return iss64->isf.rsp;
624 } else {
2d21ac55
A
625 x86_saved_state32_t *iss32;
626
0c530ab8
A
627 iss32 = USER_REGS32(thread);
628
629 iss32->uesp += adjust;
630
631 return CAST_USER_ADDR_T(iss32->uesp);
632 }
91447636
A
633}
634
635/*
636 * thread_setentrypoint:
637 *
638 * Sets the user PC into the machine
639 * dependent thread state info.
640 */
641void
0c530ab8 642thread_setentrypoint(thread_t thread, mach_vm_address_t entry)
4452a7af 643{
6d2010ae 644 pal_register_cache_state(thread, DIRTY);
2d21ac55
A
645 if (thread_is_64bit(thread)) {
646 x86_saved_state64_t *iss64;
4452a7af 647
0c530ab8
A
648 iss64 = USER_REGS64(thread);
649
650 iss64->isf.rip = (uint64_t)entry;
651 } else {
2d21ac55
A
652 x86_saved_state32_t *iss32;
653
0c530ab8
A
654 iss32 = USER_REGS32(thread);
655
b0d623f7 656 iss32->eip = CAST_DOWN_EXPLICIT(unsigned int, entry);
0c530ab8
A
657 }
658}
659
660
2d21ac55 661kern_return_t
0c530ab8
A
662thread_setsinglestep(thread_t thread, int on)
663{
6d2010ae 664 pal_register_cache_state(thread, DIRTY);
2d21ac55
A
665 if (thread_is_64bit(thread)) {
666 x86_saved_state64_t *iss64;
0c530ab8
A
667
668 iss64 = USER_REGS64(thread);
669
670 if (on)
2d21ac55 671 iss64->isf.rflags |= EFL_TF;
0c530ab8 672 else
2d21ac55 673 iss64->isf.rflags &= ~EFL_TF;
0c530ab8 674 } else {
2d21ac55
A
675 x86_saved_state32_t *iss32;
676
0c530ab8
A
677 iss32 = USER_REGS32(thread);
678
b0d623f7 679 if (on) {
2d21ac55 680 iss32->efl |= EFL_TF;
b0d623f7
A
681 /* Ensure IRET */
682 if (iss32->cs == SYSENTER_CS)
683 iss32->cs = SYSENTER_TF_CS;
684 }
0c530ab8 685 else
2d21ac55 686 iss32->efl &= ~EFL_TF;
0c530ab8 687 }
2d21ac55
A
688
689 return (KERN_SUCCESS);
0c530ab8
A
690}
691
0c530ab8 692void *
39037602 693get_user_regs(thread_t th)
0c530ab8 694{
39037602
A
695 pal_register_cache_state(th, DIRTY);
696 return(USER_STATE(th));
0c530ab8 697}
91447636 698
2d21ac55 699void *
39037602 700find_user_regs(thread_t thread)
2d21ac55 701{
39037602 702 return get_user_regs(thread);
2d21ac55
A
703}
704
705#if CONFIG_DTRACE
706/*
707 * DTrace would like to have a peek at the kernel interrupt state, if available.
708 * Based on osfmk/chud/i386/chud_thread_i386.c:chudxnu_thread_get_state(), which see.
709 */
b0d623f7 710x86_saved_state_t *find_kern_regs(thread_t);
2d21ac55 711
b0d623f7 712x86_saved_state_t *
2d21ac55
A
713find_kern_regs(thread_t thread)
714{
715 if (thread == current_thread() &&
716 NULL != current_cpu_datap()->cpu_int_state &&
717 !(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
718 current_cpu_datap()->cpu_interrupt_level == 1)) {
719
b0d623f7 720 return current_cpu_datap()->cpu_int_state;
2d21ac55
A
721 } else {
722 return NULL;
723 }
724}
725
726vm_offset_t dtrace_get_cpu_int_stack_top(void);
727
728vm_offset_t
729dtrace_get_cpu_int_stack_top(void)
730{
731 return current_cpu_datap()->cpu_int_stack_top;
732}
733#endif