]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/bsd_i386.c
xnu-4903.241.1.tar.gz
[apple/xnu.git] / osfmk / i386 / bsd_i386.c
CommitLineData
1c79356b 1/*
39037602 2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28#ifdef MACH_BSD
1c79356b
A
29#include <mach_debug.h>
30#include <mach_ldebug.h>
31
32#include <mach/kern_return.h>
91447636 33#include <mach/mach_traps.h>
1c79356b
A
34#include <mach/thread_status.h>
35#include <mach/vm_param.h>
1c79356b
A
36
37#include <kern/counters.h>
38#include <kern/cpu_data.h>
39#include <kern/mach_param.h>
40#include <kern/task.h>
41#include <kern/thread.h>
1c79356b
A
42#include <kern/sched_prim.h>
43#include <kern/misc_protos.h>
44#include <kern/assert.h>
b0d623f7 45#include <kern/debug.h>
1c79356b 46#include <kern/spl.h>
55e303ae 47#include <kern/syscall_sw.h>
1c79356b
A
48#include <ipc/ipc_port.h>
49#include <vm/vm_kern.h>
50#include <vm/pmap.h>
51
91447636 52#include <i386/cpu_number.h>
1c79356b
A
53#include <i386/eflags.h>
54#include <i386/proc_reg.h>
1c79356b
A
55#include <i386/tss.h>
56#include <i386/user_ldt.h>
57#include <i386/fpu.h>
1c79356b 58#include <i386/machdep_call.h>
91447636 59#include <i386/vmparam.h>
b0d623f7
A
60#include <i386/mp_desc.h>
61#include <i386/misc_protos.h>
62#include <i386/thread.h>
0c530ab8 63#include <i386/trap.h>
b0d623f7 64#include <i386/seg.h>
0c530ab8 65#include <mach/i386/syscall_sw.h>
9bccf70c 66#include <sys/syscall.h>
91447636 67#include <sys/kdebug.h>
0c530ab8 68#include <sys/errno.h>
91447636
A
69#include <../bsd/sys/sysent.h>
70
b0d623f7
A
71#ifdef MACH_BSD
72extern void mach_kauth_cred_uthread_update(void);
39236c6e 73extern void throttle_lowpri_io(int);
b0d623f7
A
74#endif
75
0c530ab8
A
76void * find_user_regs(thread_t);
77
1c79356b
A
78unsigned int get_msr_exportmask(void);
79
80unsigned int get_msr_nbits(void);
81
82unsigned int get_msr_rbits(void);
83
84/*
85 * thread_userstack:
86 *
87 * Return the user stack pointer from the machine
88 * dependent thread state info.
89 */
90kern_return_t
91thread_userstack(
2d21ac55
A
92 __unused thread_t thread,
93 int flavor,
94 thread_state_t tstate,
95 __unused unsigned int count,
6d2010ae 96 mach_vm_offset_t *user_stack,
39037602
A
97 int *customstack,
98 __unused boolean_t is64bit
2d21ac55 99)
1c79356b 100{
2d21ac55
A
101 if (customstack)
102 *customstack = 0;
4452a7af 103
2d21ac55
A
104 switch (flavor) {
105 case x86_THREAD_STATE32:
106 {
107 x86_thread_state32_t *state25;
108
109 state25 = (x86_thread_state32_t *) tstate;
110
6d2010ae 111 if (state25->esp) {
2d21ac55 112 *user_stack = state25->esp;
6d2010ae
A
113 if (customstack)
114 *customstack = 1;
115 } else {
2d21ac55 116 *user_stack = VM_USRSTACK32;
6d2010ae
A
117 if (customstack)
118 *customstack = 0;
119 }
2d21ac55
A
120 break;
121 }
0c530ab8 122
2d21ac55
A
123 case x86_THREAD_STATE64:
124 {
125 x86_thread_state64_t *state25;
126
127 state25 = (x86_thread_state64_t *) tstate;
128
6d2010ae 129 if (state25->rsp) {
2d21ac55 130 *user_stack = state25->rsp;
6d2010ae
A
131 if (customstack)
132 *customstack = 1;
133 } else {
2d21ac55 134 *user_stack = VM_USRSTACK64;
6d2010ae
A
135 if (customstack)
136 *customstack = 0;
137 }
2d21ac55
A
138 break;
139 }
1c79356b 140
2d21ac55
A
141 default:
142 return (KERN_INVALID_ARGUMENT);
143 }
0c530ab8 144
2d21ac55
A
145 return (KERN_SUCCESS);
146}
1c79356b 147
316670eb
A
148/*
149 * thread_userstackdefault:
150 *
151 * Return the default stack location for the
152 * thread, if otherwise unknown.
153 */
154kern_return_t
155thread_userstackdefault(
39037602
A
156 mach_vm_offset_t *default_user_stack,
157 boolean_t is64bit)
316670eb 158{
39037602 159 if (is64bit) {
316670eb
A
160 *default_user_stack = VM_USRSTACK64;
161 } else {
162 *default_user_stack = VM_USRSTACK32;
163 }
164 return (KERN_SUCCESS);
165}
0c530ab8 166
1c79356b
A
167kern_return_t
168thread_entrypoint(
2d21ac55
A
169 __unused thread_t thread,
170 int flavor,
171 thread_state_t tstate,
172 __unused unsigned int count,
173 mach_vm_offset_t *entry_point
174)
1c79356b 175{
2d21ac55 176 /*
0c530ab8
A
177 * Set a default.
178 */
2d21ac55
A
179 if (*entry_point == 0)
180 *entry_point = VM_MIN_ADDRESS;
181
0c530ab8 182 switch (flavor) {
2d21ac55
A
183 case x86_THREAD_STATE32:
184 {
185 x86_thread_state32_t *state25;
0c530ab8 186
2d21ac55
A
187 state25 = (i386_thread_state_t *) tstate;
188 *entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS;
189 break;
190 }
6601e61a 191
2d21ac55
A
192 case x86_THREAD_STATE64:
193 {
194 x86_thread_state64_t *state25;
0c530ab8 195
2d21ac55
A
196 state25 = (x86_thread_state64_t *) tstate;
197 *entry_point = state25->rip ? state25->rip: VM_MIN_ADDRESS64;
198 break;
199 }
200 }
201 return (KERN_SUCCESS);
202}
1c79356b 203
1c79356b
A
204/*
205 * FIXME - thread_set_child
206 */
207
91447636 208void thread_set_child(thread_t child, int pid);
1c79356b 209void
91447636 210thread_set_child(thread_t child, int pid)
1c79356b 211{
6d2010ae 212 pal_register_cache_state(child, DIRTY);
b0d623f7 213
d9a64523 214 if (thread_is_64bit_addr(child)) {
2d21ac55 215 x86_saved_state64_t *iss64;
0c530ab8 216
0c530ab8
A
217 iss64 = USER_REGS64(child);
218
219 iss64->rax = pid;
220 iss64->rdx = 1;
221 iss64->isf.rflags &= ~EFL_CF;
222 } else {
2d21ac55
A
223 x86_saved_state32_t *iss32;
224
0c530ab8
A
225 iss32 = USER_REGS32(child);
226
227 iss32->eax = pid;
228 iss32->edx = 1;
229 iss32->efl &= ~EFL_CF;
230 }
1c79356b 231}
0c530ab8
A
232
233
1c79356b 234
1c79356b
A
235/*
236 * System Call handling code
237 */
238
91447636
A
239extern long fuword(vm_offset_t);
240
39037602 241__attribute__((noreturn))
1c79356b 242void
0c530ab8 243machdep_syscall(x86_saved_state_t *state)
8f6c56a5 244{
0c530ab8 245 int args[machdep_call_count];
2d21ac55 246 int trapno;
0c530ab8 247 int nargs;
316670eb 248 const machdep_call_t *entry;
0c530ab8
A
249 x86_saved_state32_t *regs;
250
251 assert(is_saved_state32(state));
252 regs = saved_state32(state);
1c79356b 253
0c530ab8
A
254 trapno = regs->eax;
255#if DEBUG_TRACE
256 kprintf("machdep_syscall(0x%08x) code=%d\n", regs, trapno);
257#endif
1c79356b 258
b0d623f7
A
259 DEBUG_KPRINT_SYSCALL_MDEP(
260 "machdep_syscall: trapno=%d\n", trapno);
261
0c530ab8 262 if (trapno < 0 || trapno >= machdep_call_count) {
2d21ac55 263 regs->eax = (unsigned int)kern_invalid(NULL);
1c79356b 264
91447636
A
265 thread_exception_return();
266 /* NOTREACHED */
267 }
0c530ab8
A
268 entry = &machdep_call_table[trapno];
269 nargs = entry->nargs;
91447636 270
0c530ab8 271 if (nargs != 0) {
2d21ac55
A
272 if (copyin((user_addr_t) regs->uesp + sizeof (int),
273 (char *) args, (nargs * sizeof (int)))) {
274 regs->eax = KERN_INVALID_ADDRESS;
6601e61a 275
0c530ab8
A
276 thread_exception_return();
277 /* NOTREACHED */
278 }
1c79356b 279 }
0c530ab8 280 switch (nargs) {
2d21ac55
A
281 case 0:
282 regs->eax = (*entry->routine.args_0)();
0c530ab8 283 break;
2d21ac55 284 case 1:
0c530ab8
A
285 regs->eax = (*entry->routine.args_1)(args[0]);
286 break;
2d21ac55
A
287 case 2:
288 regs->eax = (*entry->routine.args_2)(args[0],args[1]);
0c530ab8 289 break;
2d21ac55
A
290 case 3:
291 if (!entry->bsd_style)
292 regs->eax = (*entry->routine.args_3)(args[0],args[1],args[2]);
0c530ab8 293 else {
2d21ac55
A
294 int error;
295 uint32_t rval;
0c530ab8 296
2d21ac55 297 error = (*entry->routine.args_bsd_3)(&rval, args[0], args[1], args[2]);
0c530ab8 298 if (error) {
2d21ac55 299 regs->eax = error;
0c530ab8
A
300 regs->efl |= EFL_CF; /* carry bit */
301 } else {
2d21ac55
A
302 regs->eax = rval;
303 regs->efl &= ~EFL_CF;
0c530ab8
A
304 }
305 }
306 break;
2d21ac55 307 case 4:
0c530ab8
A
308 regs->eax = (*entry->routine.args_4)(args[0], args[1], args[2], args[3]);
309 break;
1c79356b 310
2d21ac55
A
311 default:
312 panic("machdep_syscall: too many args");
6601e61a 313 }
6601e61a 314
b0d623f7
A
315 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%u\n", regs->eax);
316
5ba3f43e
A
317#if DEBUG || DEVELOPMENT
318 kern_allocation_name_t
319 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
320 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
321#endif /* DEBUG || DEVELOPMENT */
322
39236c6e 323 throttle_lowpri_io(1);
593a1d5f 324
0c530ab8
A
325 thread_exception_return();
326 /* NOTREACHED */
1c79356b
A
327}
328
39037602 329__attribute__((noreturn))
1c79356b 330void
0c530ab8 331machdep_syscall64(x86_saved_state_t *state)
1c79356b 332{
2d21ac55 333 int trapno;
316670eb 334 const machdep_call_t *entry;
0c530ab8 335 x86_saved_state64_t *regs;
1c79356b 336
0c530ab8
A
337 assert(is_saved_state64(state));
338 regs = saved_state64(state);
1c79356b 339
b0d623f7
A
340 trapno = (int)(regs->rax & SYSCALL_NUMBER_MASK);
341
342 DEBUG_KPRINT_SYSCALL_MDEP(
343 "machdep_syscall64: trapno=%d\n", trapno);
1c79356b 344
0c530ab8 345 if (trapno < 0 || trapno >= machdep_call_count) {
2d21ac55 346 regs->rax = (unsigned int)kern_invalid(NULL);
1c79356b 347
0c530ab8
A
348 thread_exception_return();
349 /* NOTREACHED */
1c79356b 350 }
0c530ab8 351 entry = &machdep_call_table64[trapno];
1c79356b 352
0c530ab8 353 switch (entry->nargs) {
2d21ac55
A
354 case 0:
355 regs->rax = (*entry->routine.args_0)();
6601e61a 356 break;
2d21ac55 357 case 1:
0c530ab8 358 regs->rax = (*entry->routine.args64_1)(regs->rdi);
55e303ae 359 break;
fe8ab488
A
360 case 2:
361 regs->rax = (*entry->routine.args64_2)(regs->rdi, regs->rsi);
362 break;
2d21ac55
A
363 default:
364 panic("machdep_syscall64: too many args");
55e303ae 365 }
6601e61a 366
b0d623f7
A
367 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%llu\n", regs->rax);
368
5ba3f43e
A
369#if DEBUG || DEVELOPMENT
370 kern_allocation_name_t
371 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
372 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
373#endif /* DEBUG || DEVELOPMENT */
374
39236c6e 375 throttle_lowpri_io(1);
593a1d5f 376
0c530ab8
A
377 thread_exception_return();
378 /* NOTREACHED */
1c79356b
A
379}
380
0c530ab8 381#endif /* MACH_BSD */
4452a7af 382
21362eb3 383
0c530ab8 384typedef kern_return_t (*mach_call_t)(void *);
4452a7af 385
0c530ab8
A
386struct mach_call_args {
387 syscall_arg_t arg1;
388 syscall_arg_t arg2;
389 syscall_arg_t arg3;
390 syscall_arg_t arg4;
391 syscall_arg_t arg5;
392 syscall_arg_t arg6;
393 syscall_arg_t arg7;
394 syscall_arg_t arg8;
395 syscall_arg_t arg9;
396};
4452a7af 397
0c530ab8 398static kern_return_t
39236c6e 399mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp);
4452a7af 400
6601e61a 401
0c530ab8 402static kern_return_t
39236c6e 403mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp)
0c530ab8 404{
39236c6e 405 if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args, trapp->mach_trap_u32_words * sizeof (int)))
2d21ac55 406 return KERN_INVALID_ARGUMENT;
fe8ab488
A
407#if CONFIG_REQUIRES_U32_MUNGING
408 trapp->mach_trap_arg_munge32(args);
409#else
410#error U32 mach traps on x86_64 kernel requires munging
411#endif
0c530ab8 412 return KERN_SUCCESS;
6601e61a 413}
4452a7af
A
414
415
2d21ac55 416__private_extern__ void mach_call_munger(x86_saved_state_t *state);
0c530ab8 417
b0d623f7
A
418extern const char *mach_syscall_name_table[];
419
39037602 420__attribute__((noreturn))
0c530ab8
A
421void
422mach_call_munger(x86_saved_state_t *state)
4452a7af 423{
4452a7af 424 int argc;
0c530ab8 425 int call_number;
4452a7af 426 mach_call_t mach_call;
6601e61a
A
427 kern_return_t retval;
428 struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
0c530ab8
A
429 x86_saved_state32_t *regs;
430
3e170ce0 431 struct uthread *ut = get_bsdthread_info(current_thread());
3e170ce0 432 uthread_reset_proc_refcount(ut);
3e170ce0 433
0c530ab8
A
434 assert(is_saved_state32(state));
435 regs = saved_state32(state);
436
437 call_number = -(regs->eax);
b0d623f7
A
438
439 DEBUG_KPRINT_SYSCALL_MACH(
440 "mach_call_munger: code=%d(%s)\n",
441 call_number, mach_syscall_name_table[call_number]);
0c530ab8
A
442#if DEBUG_TRACE
443 kprintf("mach_call_munger(0x%08x) code=%d\n", regs, call_number);
444#endif
445
446 if (call_number < 0 || call_number >= mach_trap_count) {
2d21ac55 447 i386_exception(EXC_SYSCALL, call_number, 1);
0c530ab8
A
448 /* NOTREACHED */
449 }
450 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
2d21ac55 451
0c530ab8 452 if (mach_call == (mach_call_t)kern_invalid) {
b0d623f7
A
453 DEBUG_KPRINT_SYSCALL_MACH(
454 "mach_call_munger: kern_invalid 0x%x\n", regs->eax);
2d21ac55 455 i386_exception(EXC_SYSCALL, call_number, 1);
0c530ab8
A
456 /* NOTREACHED */
457 }
0c530ab8 458
2d21ac55 459 argc = mach_trap_table[call_number].mach_trap_arg_count;
0c530ab8 460 if (argc) {
39236c6e 461 retval = mach_call_arg_munger32(regs->uesp, &args, &mach_trap_table[call_number]);
0c530ab8 462 if (retval != KERN_SUCCESS) {
2d21ac55
A
463 regs->eax = retval;
464
b0d623f7
A
465 DEBUG_KPRINT_SYSCALL_MACH(
466 "mach_call_munger: retval=0x%x\n", retval);
467
0c530ab8
A
468 thread_exception_return();
469 /* NOTREACHED */
470 }
4452a7af 471 }
b0d623f7
A
472
473#ifdef MACH_BSD
474 mach_kauth_cred_uthread_update();
475#endif
316670eb
A
476
477 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
478 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
479 args.arg1, args.arg2, args.arg3, args.arg4, 0);
2d21ac55 480
0c530ab8
A
481 retval = mach_call(&args);
482
b0d623f7
A
483 DEBUG_KPRINT_SYSCALL_MACH("mach_call_munger: retval=0x%x\n", retval);
484
316670eb
A
485 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
486 MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END,
487 retval, 0, 0, 0, 0);
488
0c530ab8 489 regs->eax = retval;
2d21ac55 490
5ba3f43e
A
491#if DEBUG || DEVELOPMENT
492 kern_allocation_name_t
493 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
494 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
495#endif /* DEBUG || DEVELOPMENT */
496
39236c6e 497 throttle_lowpri_io(1);
593a1d5f 498
3e170ce0
A
499#if PROC_REF_DEBUG
500 if (__improbable(uthread_get_proc_refcount(ut) != 0)) {
501 panic("system call returned with uu_proc_refcount != 0");
502 }
503#endif
504
0c530ab8
A
505 thread_exception_return();
506 /* NOTREACHED */
507}
508
509
2d21ac55 510__private_extern__ void mach_call_munger64(x86_saved_state_t *regs);
0c530ab8 511
39037602 512__attribute__((noreturn))
0c530ab8
A
513void
514mach_call_munger64(x86_saved_state_t *state)
515{
516 int call_number;
517 int argc;
518 mach_call_t mach_call;
fe8ab488 519 struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
0c530ab8
A
520 x86_saved_state64_t *regs;
521
3e170ce0 522 struct uthread *ut = get_bsdthread_info(current_thread());
3e170ce0 523 uthread_reset_proc_refcount(ut);
3e170ce0 524
0c530ab8
A
525 assert(is_saved_state64(state));
526 regs = saved_state64(state);
527
b0d623f7
A
528 call_number = (int)(regs->rax & SYSCALL_NUMBER_MASK);
529
530 DEBUG_KPRINT_SYSCALL_MACH(
531 "mach_call_munger64: code=%d(%s)\n",
532 call_number, mach_syscall_name_table[call_number]);
0c530ab8 533
316670eb
A
534 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
535 MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_START,
536 regs->rdi, regs->rsi, regs->rdx, regs->r10, 0);
6601e61a 537
0c530ab8
A
538 if (call_number < 0 || call_number >= mach_trap_count) {
539 i386_exception(EXC_SYSCALL, regs->rax, 1);
540 /* NOTREACHED */
541 }
6601e61a 542 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
6601e61a 543
0c530ab8
A
544 if (mach_call == (mach_call_t)kern_invalid) {
545 i386_exception(EXC_SYSCALL, regs->rax, 1);
546 /* NOTREACHED */
547 }
548 argc = mach_trap_table[call_number].mach_trap_arg_count;
fe8ab488
A
549 if (argc) {
550 int args_in_regs = MIN(6, argc);
0c530ab8 551
fe8ab488
A
552 memcpy(&args.arg1, &regs->rdi, args_in_regs * sizeof(syscall_arg_t));
553
554 if (argc > 6) {
0c530ab8
A
555 int copyin_count;
556
fe8ab488
A
557 assert(argc <= 9);
558 copyin_count = (argc - 6) * (int)sizeof(syscall_arg_t);
0c530ab8 559
fe8ab488 560 if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&args.arg7, copyin_count)) {
0c530ab8
A
561 regs->rax = KERN_INVALID_ARGUMENT;
562
fe8ab488
A
563 thread_exception_return();
564 /* NOTREACHED */
565 }
0c530ab8
A
566 }
567 }
b0d623f7
A
568
569#ifdef MACH_BSD
570 mach_kauth_cred_uthread_update();
571#endif
572
fe8ab488 573 regs->rax = (uint64_t)mach_call((void *)&args);
0c530ab8 574
b0d623f7
A
575 DEBUG_KPRINT_SYSCALL_MACH( "mach_call_munger64: retval=0x%llx\n", regs->rax);
576
316670eb
A
577 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
578 MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END,
579 regs->rax, 0, 0, 0, 0);
91447636 580
5ba3f43e
A
581#if DEBUG || DEVELOPMENT
582 kern_allocation_name_t
583 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
584 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
585#endif /* DEBUG || DEVELOPMENT */
586
39236c6e 587 throttle_lowpri_io(1);
593a1d5f 588
3e170ce0
A
589#if PROC_REF_DEBUG
590 if (__improbable(uthread_get_proc_refcount(ut) != 0)) {
591 panic("system call returned with uu_proc_refcount != 0");
592 }
593#endif
594
0c530ab8
A
595 thread_exception_return();
596 /* NOTREACHED */
91447636
A
597}
598
0c530ab8 599
91447636
A
600/*
601 * thread_setuserstack:
602 *
603 * Sets the user stack pointer into the machine
604 * dependent thread state info.
605 */
606void
607thread_setuserstack(
608 thread_t thread,
609 mach_vm_address_t user_stack)
610{
6d2010ae 611 pal_register_cache_state(thread, DIRTY);
d9a64523 612 if (thread_is_64bit_addr(thread)) {
2d21ac55 613 x86_saved_state64_t *iss64;
0c530ab8
A
614
615 iss64 = USER_REGS64(thread);
5d5c5d0d 616
0c530ab8
A
617 iss64->isf.rsp = (uint64_t)user_stack;
618 } else {
2d21ac55
A
619 x86_saved_state32_t *iss32;
620
0c530ab8
A
621 iss32 = USER_REGS32(thread);
622
b0d623f7 623 iss32->uesp = CAST_DOWN_EXPLICIT(unsigned int, user_stack);
0c530ab8 624 }
91447636
A
625}
626
627/*
628 * thread_adjuserstack:
629 *
630 * Returns the adjusted user stack pointer from the machine
631 * dependent thread state info. Used for small (<2G) deltas.
632 */
633uint64_t
634thread_adjuserstack(
635 thread_t thread,
636 int adjust)
637{
6d2010ae 638 pal_register_cache_state(thread, DIRTY);
d9a64523 639 if (thread_is_64bit_addr(thread)) {
2d21ac55 640 x86_saved_state64_t *iss64;
5d5c5d0d 641
0c530ab8
A
642 iss64 = USER_REGS64(thread);
643
644 iss64->isf.rsp += adjust;
645
646 return iss64->isf.rsp;
647 } else {
2d21ac55
A
648 x86_saved_state32_t *iss32;
649
0c530ab8
A
650 iss32 = USER_REGS32(thread);
651
652 iss32->uesp += adjust;
653
654 return CAST_USER_ADDR_T(iss32->uesp);
655 }
91447636
A
656}
657
658/*
659 * thread_setentrypoint:
660 *
661 * Sets the user PC into the machine
662 * dependent thread state info.
663 */
664void
0c530ab8 665thread_setentrypoint(thread_t thread, mach_vm_address_t entry)
4452a7af 666{
6d2010ae 667 pal_register_cache_state(thread, DIRTY);
d9a64523 668 if (thread_is_64bit_addr(thread)) {
2d21ac55 669 x86_saved_state64_t *iss64;
4452a7af 670
0c530ab8
A
671 iss64 = USER_REGS64(thread);
672
673 iss64->isf.rip = (uint64_t)entry;
674 } else {
2d21ac55
A
675 x86_saved_state32_t *iss32;
676
0c530ab8
A
677 iss32 = USER_REGS32(thread);
678
b0d623f7 679 iss32->eip = CAST_DOWN_EXPLICIT(unsigned int, entry);
0c530ab8
A
680 }
681}
682
683
2d21ac55 684kern_return_t
0c530ab8
A
685thread_setsinglestep(thread_t thread, int on)
686{
6d2010ae 687 pal_register_cache_state(thread, DIRTY);
d9a64523 688 if (thread_is_64bit_addr(thread)) {
2d21ac55 689 x86_saved_state64_t *iss64;
0c530ab8
A
690
691 iss64 = USER_REGS64(thread);
692
693 if (on)
2d21ac55 694 iss64->isf.rflags |= EFL_TF;
0c530ab8 695 else
2d21ac55 696 iss64->isf.rflags &= ~EFL_TF;
0c530ab8 697 } else {
2d21ac55
A
698 x86_saved_state32_t *iss32;
699
0c530ab8
A
700 iss32 = USER_REGS32(thread);
701
b0d623f7 702 if (on) {
2d21ac55 703 iss32->efl |= EFL_TF;
b0d623f7
A
704 /* Ensure IRET */
705 if (iss32->cs == SYSENTER_CS)
706 iss32->cs = SYSENTER_TF_CS;
707 }
0c530ab8 708 else
2d21ac55 709 iss32->efl &= ~EFL_TF;
0c530ab8 710 }
2d21ac55
A
711
712 return (KERN_SUCCESS);
0c530ab8
A
713}
714
0c530ab8 715void *
39037602 716get_user_regs(thread_t th)
0c530ab8 717{
39037602
A
718 pal_register_cache_state(th, DIRTY);
719 return(USER_STATE(th));
0c530ab8 720}
91447636 721
2d21ac55 722void *
39037602 723find_user_regs(thread_t thread)
2d21ac55 724{
39037602 725 return get_user_regs(thread);
2d21ac55
A
726}
727
728#if CONFIG_DTRACE
729/*
730 * DTrace would like to have a peek at the kernel interrupt state, if available.
2d21ac55 731 */
b0d623f7 732x86_saved_state_t *find_kern_regs(thread_t);
2d21ac55 733
b0d623f7 734x86_saved_state_t *
2d21ac55
A
735find_kern_regs(thread_t thread)
736{
737 if (thread == current_thread() &&
738 NULL != current_cpu_datap()->cpu_int_state &&
739 !(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
740 current_cpu_datap()->cpu_interrupt_level == 1)) {
741
b0d623f7 742 return current_cpu_datap()->cpu_int_state;
2d21ac55
A
743 } else {
744 return NULL;
745 }
746}
747
748vm_offset_t dtrace_get_cpu_int_stack_top(void);
749
750vm_offset_t
751dtrace_get_cpu_int_stack_top(void)
752{
753 return current_cpu_datap()->cpu_int_stack_top;
754}
755#endif