]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/bsd_i386.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / osfmk / i386 / bsd_i386.c
CommitLineData
1c79356b 1/*
cb323159 2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
0a7de745 28#ifdef MACH_BSD
1c79356b
A
29#include <mach_debug.h>
30#include <mach_ldebug.h>
31
32#include <mach/kern_return.h>
91447636 33#include <mach/mach_traps.h>
1c79356b
A
34#include <mach/thread_status.h>
35#include <mach/vm_param.h>
1c79356b
A
36
37#include <kern/counters.h>
38#include <kern/cpu_data.h>
39#include <kern/mach_param.h>
40#include <kern/task.h>
41#include <kern/thread.h>
1c79356b
A
42#include <kern/sched_prim.h>
43#include <kern/misc_protos.h>
44#include <kern/assert.h>
b0d623f7 45#include <kern/debug.h>
1c79356b 46#include <kern/spl.h>
55e303ae 47#include <kern/syscall_sw.h>
1c79356b
A
48#include <ipc/ipc_port.h>
49#include <vm/vm_kern.h>
50#include <vm/pmap.h>
51
91447636 52#include <i386/cpu_number.h>
1c79356b
A
53#include <i386/eflags.h>
54#include <i386/proc_reg.h>
1c79356b
A
55#include <i386/tss.h>
56#include <i386/user_ldt.h>
57#include <i386/fpu.h>
1c79356b 58#include <i386/machdep_call.h>
91447636 59#include <i386/vmparam.h>
b0d623f7
A
60#include <i386/mp_desc.h>
61#include <i386/misc_protos.h>
62#include <i386/thread.h>
0c530ab8 63#include <i386/trap.h>
b0d623f7 64#include <i386/seg.h>
0c530ab8 65#include <mach/i386/syscall_sw.h>
9bccf70c 66#include <sys/syscall.h>
91447636 67#include <sys/kdebug.h>
0c530ab8 68#include <sys/errno.h>
91447636
A
69#include <../bsd/sys/sysent.h>
70
b0d623f7 71#ifdef MACH_BSD
0a7de745 72extern void mach_kauth_cred_uthread_update(void);
39236c6e 73extern void throttle_lowpri_io(int);
b0d623f7
A
74#endif
75
0c530ab8
A
76void * find_user_regs(thread_t);
77
1c79356b
A
78unsigned int get_msr_exportmask(void);
79
80unsigned int get_msr_nbits(void);
81
82unsigned int get_msr_rbits(void);
83
84/*
85 * thread_userstack:
86 *
87 * Return the user stack pointer from the machine
88 * dependent thread state info.
89 */
90kern_return_t
91thread_userstack(
0a7de745
A
92 __unused thread_t thread,
93 int flavor,
94 thread_state_t tstate,
eb6b6ca3 95 unsigned int count,
0a7de745
A
96 mach_vm_offset_t *user_stack,
97 int *customstack,
98 __unused boolean_t is64bit
99 )
1c79356b 100{
0a7de745 101 if (customstack) {
2d21ac55 102 *customstack = 0;
0a7de745 103 }
4452a7af 104
2d21ac55
A
105 switch (flavor) {
106 case x86_THREAD_STATE32:
0a7de745
A
107 {
108 x86_thread_state32_t *state25;
2d21ac55 109
eb6b6ca3
A
110 if (__improbable(count != x86_THREAD_STATE32_COUNT)) {
111 return KERN_INVALID_ARGUMENT;
112 }
113
0a7de745 114 state25 = (x86_thread_state32_t *) tstate;
2d21ac55 115
0a7de745
A
116 if (state25->esp) {
117 *user_stack = state25->esp;
118 if (customstack) {
119 *customstack = 1;
120 }
121 } else {
122 *user_stack = VM_USRSTACK32;
123 if (customstack) {
124 *customstack = 0;
6d2010ae 125 }
2d21ac55 126 }
0a7de745
A
127 break;
128 }
0c530ab8 129
0a7de745 130 case x86_THREAD_FULL_STATE64:
eb6b6ca3
A
131 {
132 x86_thread_full_state64_t *state25;
133
134 if (__improbable(count != x86_THREAD_FULL_STATE64_COUNT)) {
135 return KERN_INVALID_ARGUMENT;
136 }
137
138 state25 = (x86_thread_full_state64_t *) tstate;
139
140 if (state25->ss64.rsp) {
141 *user_stack = state25->ss64.rsp;
142 if (customstack) {
143 *customstack = 1;
144 }
145 } else {
146 *user_stack = VM_USRSTACK64;
147 if (customstack) {
148 *customstack = 0;
149 }
150 }
151 break;
152 }
153
2d21ac55 154 case x86_THREAD_STATE64:
0a7de745
A
155 {
156 x86_thread_state64_t *state25;
2d21ac55 157
eb6b6ca3
A
158 if (__improbable(count != x86_THREAD_STATE64_COUNT)) {
159 return KERN_INVALID_ARGUMENT;
160 }
161
0a7de745 162 state25 = (x86_thread_state64_t *) tstate;
2d21ac55 163
0a7de745
A
164 if (state25->rsp) {
165 *user_stack = state25->rsp;
166 if (customstack) {
167 *customstack = 1;
168 }
169 } else {
170 *user_stack = VM_USRSTACK64;
171 if (customstack) {
172 *customstack = 0;
6d2010ae 173 }
2d21ac55 174 }
0a7de745
A
175 break;
176 }
1c79356b 177
2d21ac55 178 default:
0a7de745 179 return KERN_INVALID_ARGUMENT;
2d21ac55 180 }
0c530ab8 181
0a7de745 182 return KERN_SUCCESS;
2d21ac55 183}
1c79356b 184
316670eb
A
185/*
186 * thread_userstackdefault:
187 *
188 * Return the default stack location for the
189 * thread, if otherwise unknown.
190 */
191kern_return_t
192thread_userstackdefault(
39037602
A
193 mach_vm_offset_t *default_user_stack,
194 boolean_t is64bit)
316670eb 195{
39037602 196 if (is64bit) {
316670eb
A
197 *default_user_stack = VM_USRSTACK64;
198 } else {
199 *default_user_stack = VM_USRSTACK32;
200 }
0a7de745 201 return KERN_SUCCESS;
316670eb 202}
0c530ab8 203
1c79356b
A
204kern_return_t
205thread_entrypoint(
0a7de745
A
206 __unused thread_t thread,
207 int flavor,
208 thread_state_t tstate,
eb6b6ca3 209 unsigned int count,
0a7de745
A
210 mach_vm_offset_t *entry_point
211 )
212{
2d21ac55 213 /*
0c530ab8
A
214 * Set a default.
215 */
0a7de745 216 if (*entry_point == 0) {
2d21ac55 217 *entry_point = VM_MIN_ADDRESS;
0a7de745 218 }
2d21ac55 219
0c530ab8 220 switch (flavor) {
2d21ac55 221 case x86_THREAD_STATE32:
0a7de745
A
222 {
223 x86_thread_state32_t *state25;
0c530ab8 224
eb6b6ca3
A
225 if (count != x86_THREAD_STATE32_COUNT) {
226 return KERN_INVALID_ARGUMENT;
227 }
228
0a7de745
A
229 state25 = (i386_thread_state_t *) tstate;
230 *entry_point = state25->eip ? state25->eip : VM_MIN_ADDRESS;
231 break;
232 }
6601e61a 233
2d21ac55 234 case x86_THREAD_STATE64:
0a7de745
A
235 {
236 x86_thread_state64_t *state25;
0c530ab8 237
eb6b6ca3
A
238 if (count != x86_THREAD_STATE64_COUNT) {
239 return KERN_INVALID_ARGUMENT;
240 }
241
0a7de745
A
242 state25 = (x86_thread_state64_t *) tstate;
243 *entry_point = state25->rip ? state25->rip : VM_MIN_ADDRESS64;
244 break;
245 }
2d21ac55 246 }
0a7de745 247 return KERN_SUCCESS;
2d21ac55 248}
1c79356b 249
0a7de745 250/*
1c79356b
A
251 * FIXME - thread_set_child
252 */
253
91447636 254void thread_set_child(thread_t child, int pid);
1c79356b 255void
91447636 256thread_set_child(thread_t child, int pid)
1c79356b 257{
6d2010ae 258 pal_register_cache_state(child, DIRTY);
b0d623f7 259
d9a64523 260 if (thread_is_64bit_addr(child)) {
0a7de745 261 x86_saved_state64_t *iss64;
0c530ab8 262
0c530ab8
A
263 iss64 = USER_REGS64(child);
264
265 iss64->rax = pid;
266 iss64->rdx = 1;
267 iss64->isf.rflags &= ~EFL_CF;
268 } else {
0a7de745 269 x86_saved_state32_t *iss32;
2d21ac55 270
0c530ab8
A
271 iss32 = USER_REGS32(child);
272
273 iss32->eax = pid;
274 iss32->edx = 1;
275 iss32->efl &= ~EFL_CF;
276 }
1c79356b 277}
0c530ab8
A
278
279
1c79356b 280
1c79356b
A
281/*
282 * System Call handling code
283 */
284
91447636
A
285extern long fuword(vm_offset_t);
286
39037602 287__attribute__((noreturn))
1c79356b 288void
0c530ab8 289machdep_syscall(x86_saved_state_t *state)
8f6c56a5 290{
0a7de745
A
291 int args[machdep_call_count];
292 int trapno;
293 int nargs;
294 const machdep_call_t *entry;
295 x86_saved_state32_t *regs;
0c530ab8
A
296
297 assert(is_saved_state32(state));
298 regs = saved_state32(state);
0a7de745 299
0c530ab8
A
300 trapno = regs->eax;
301#if DEBUG_TRACE
302 kprintf("machdep_syscall(0x%08x) code=%d\n", regs, trapno);
303#endif
1c79356b 304
b0d623f7
A
305 DEBUG_KPRINT_SYSCALL_MDEP(
306 "machdep_syscall: trapno=%d\n", trapno);
307
0c530ab8 308 if (trapno < 0 || trapno >= machdep_call_count) {
2d21ac55 309 regs->eax = (unsigned int)kern_invalid(NULL);
1c79356b 310
91447636
A
311 thread_exception_return();
312 /* NOTREACHED */
313 }
0c530ab8
A
314 entry = &machdep_call_table[trapno];
315 nargs = entry->nargs;
91447636 316
0c530ab8 317 if (nargs != 0) {
0a7de745
A
318 if (copyin((user_addr_t) regs->uesp + sizeof(int),
319 (char *) args, (nargs * sizeof(int)))) {
2d21ac55 320 regs->eax = KERN_INVALID_ADDRESS;
6601e61a 321
0c530ab8
A
322 thread_exception_return();
323 /* NOTREACHED */
324 }
1c79356b 325 }
0c530ab8 326 switch (nargs) {
2d21ac55
A
327 case 0:
328 regs->eax = (*entry->routine.args_0)();
0c530ab8 329 break;
2d21ac55 330 case 1:
0c530ab8
A
331 regs->eax = (*entry->routine.args_1)(args[0]);
332 break;
2d21ac55 333 case 2:
0a7de745 334 regs->eax = (*entry->routine.args_2)(args[0], args[1]);
0c530ab8 335 break;
2d21ac55 336 case 3:
0a7de745
A
337 if (!entry->bsd_style) {
338 regs->eax = (*entry->routine.args_3)(args[0], args[1], args[2]);
339 } else {
340 int error;
341 uint32_t rval;
0c530ab8 342
2d21ac55 343 error = (*entry->routine.args_bsd_3)(&rval, args[0], args[1], args[2]);
0c530ab8 344 if (error) {
2d21ac55 345 regs->eax = error;
0a7de745 346 regs->efl |= EFL_CF; /* carry bit */
0c530ab8 347 } else {
2d21ac55
A
348 regs->eax = rval;
349 regs->efl &= ~EFL_CF;
0c530ab8
A
350 }
351 }
352 break;
2d21ac55 353 case 4:
0c530ab8
A
354 regs->eax = (*entry->routine.args_4)(args[0], args[1], args[2], args[3]);
355 break;
1c79356b 356
2d21ac55
A
357 default:
358 panic("machdep_syscall: too many args");
6601e61a 359 }
6601e61a 360
b0d623f7
A
361 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%u\n", regs->eax);
362
5ba3f43e
A
363#if DEBUG || DEVELOPMENT
364 kern_allocation_name_t
365 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
366 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
367#endif /* DEBUG || DEVELOPMENT */
368
39236c6e 369 throttle_lowpri_io(1);
593a1d5f 370
0c530ab8
A
371 thread_exception_return();
372 /* NOTREACHED */
1c79356b
A
373}
374
39037602 375__attribute__((noreturn))
1c79356b 376void
0c530ab8 377machdep_syscall64(x86_saved_state_t *state)
1c79356b 378{
0a7de745
A
379 int trapno;
380 const machdep_call_t *entry;
381 x86_saved_state64_t *regs;
1c79356b 382
0c530ab8
A
383 assert(is_saved_state64(state));
384 regs = saved_state64(state);
0a7de745 385
b0d623f7
A
386 trapno = (int)(regs->rax & SYSCALL_NUMBER_MASK);
387
388 DEBUG_KPRINT_SYSCALL_MDEP(
389 "machdep_syscall64: trapno=%d\n", trapno);
1c79356b 390
0c530ab8 391 if (trapno < 0 || trapno >= machdep_call_count) {
2d21ac55 392 regs->rax = (unsigned int)kern_invalid(NULL);
1c79356b 393
0c530ab8
A
394 thread_exception_return();
395 /* NOTREACHED */
1c79356b 396 }
0c530ab8 397 entry = &machdep_call_table64[trapno];
1c79356b 398
0c530ab8 399 switch (entry->nargs) {
2d21ac55
A
400 case 0:
401 regs->rax = (*entry->routine.args_0)();
6601e61a 402 break;
2d21ac55 403 case 1:
0c530ab8 404 regs->rax = (*entry->routine.args64_1)(regs->rdi);
55e303ae 405 break;
fe8ab488
A
406 case 2:
407 regs->rax = (*entry->routine.args64_2)(regs->rdi, regs->rsi);
408 break;
0a7de745
A
409 case 3:
410 if (!entry->bsd_style) {
411 regs->rax = (*entry->routine.args64_3)(regs->rdi, regs->rsi, regs->rdx);
412 } else {
413 int error;
414 uint32_t rval;
415
416 error = (*entry->routine.args64_bsd_3)(&rval, regs->rdi, regs->rsi, regs->rdx);
417 if (error) {
418 regs->rax = (uint64_t)error;
419 regs->isf.rflags |= EFL_CF; /* carry bit */
420 } else {
421 regs->rax = rval;
422 regs->isf.rflags &= ~(uint64_t)EFL_CF;
423 }
424 }
425 break;
2d21ac55
A
426 default:
427 panic("machdep_syscall64: too many args");
55e303ae 428 }
6601e61a 429
b0d623f7
A
430 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%llu\n", regs->rax);
431
5ba3f43e
A
432#if DEBUG || DEVELOPMENT
433 kern_allocation_name_t
434 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
435 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
436#endif /* DEBUG || DEVELOPMENT */
437
39236c6e 438 throttle_lowpri_io(1);
593a1d5f 439
0c530ab8
A
440 thread_exception_return();
441 /* NOTREACHED */
1c79356b
A
442}
443
0a7de745 444#endif /* MACH_BSD */
4452a7af 445
21362eb3 446
0c530ab8 447typedef kern_return_t (*mach_call_t)(void *);
4452a7af 448
0c530ab8
A
449struct mach_call_args {
450 syscall_arg_t arg1;
451 syscall_arg_t arg2;
452 syscall_arg_t arg3;
453 syscall_arg_t arg4;
454 syscall_arg_t arg5;
455 syscall_arg_t arg6;
456 syscall_arg_t arg7;
457 syscall_arg_t arg8;
458 syscall_arg_t arg9;
459};
4452a7af 460
0c530ab8 461static kern_return_t
39236c6e 462mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp);
4452a7af 463
6601e61a 464
0c530ab8 465static kern_return_t
39236c6e 466mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp)
0c530ab8 467{
0a7de745 468 if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args, trapp->mach_trap_u32_words * sizeof(int))) {
2d21ac55 469 return KERN_INVALID_ARGUMENT;
0a7de745 470 }
fe8ab488
A
471#if CONFIG_REQUIRES_U32_MUNGING
472 trapp->mach_trap_arg_munge32(args);
473#else
474#error U32 mach traps on x86_64 kernel requires munging
475#endif
0c530ab8 476 return KERN_SUCCESS;
6601e61a 477}
4452a7af
A
478
479
2d21ac55 480__private_extern__ void mach_call_munger(x86_saved_state_t *state);
0c530ab8 481
b0d623f7
A
482extern const char *mach_syscall_name_table[];
483
39037602 484__attribute__((noreturn))
0c530ab8
A
485void
486mach_call_munger(x86_saved_state_t *state)
4452a7af 487{
4452a7af 488 int argc;
0c530ab8 489 int call_number;
4452a7af 490 mach_call_t mach_call;
6601e61a 491 kern_return_t retval;
cb323159
A
492 struct mach_call_args args = {
493 .arg1 = 0,
494 .arg2 = 0,
495 .arg3 = 0,
496 .arg4 = 0,
497 .arg5 = 0,
498 .arg6 = 0,
499 .arg7 = 0,
500 .arg8 = 0,
501 .arg9 = 0
502 };
0a7de745 503 x86_saved_state32_t *regs;
0c530ab8 504
3e170ce0 505 struct uthread *ut = get_bsdthread_info(current_thread());
3e170ce0 506 uthread_reset_proc_refcount(ut);
3e170ce0 507
0c530ab8
A
508 assert(is_saved_state32(state));
509 regs = saved_state32(state);
510
511 call_number = -(regs->eax);
b0d623f7
A
512
513 DEBUG_KPRINT_SYSCALL_MACH(
514 "mach_call_munger: code=%d(%s)\n",
515 call_number, mach_syscall_name_table[call_number]);
0c530ab8
A
516#if DEBUG_TRACE
517 kprintf("mach_call_munger(0x%08x) code=%d\n", regs, call_number);
518#endif
519
520 if (call_number < 0 || call_number >= mach_trap_count) {
2d21ac55 521 i386_exception(EXC_SYSCALL, call_number, 1);
0c530ab8
A
522 /* NOTREACHED */
523 }
524 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
2d21ac55 525
0c530ab8 526 if (mach_call == (mach_call_t)kern_invalid) {
b0d623f7
A
527 DEBUG_KPRINT_SYSCALL_MACH(
528 "mach_call_munger: kern_invalid 0x%x\n", regs->eax);
2d21ac55 529 i386_exception(EXC_SYSCALL, call_number, 1);
0c530ab8
A
530 /* NOTREACHED */
531 }
0c530ab8 532
2d21ac55 533 argc = mach_trap_table[call_number].mach_trap_arg_count;
0c530ab8 534 if (argc) {
0a7de745 535 retval = mach_call_arg_munger32(regs->uesp, &args, &mach_trap_table[call_number]);
0c530ab8 536 if (retval != KERN_SUCCESS) {
2d21ac55
A
537 regs->eax = retval;
538
b0d623f7
A
539 DEBUG_KPRINT_SYSCALL_MACH(
540 "mach_call_munger: retval=0x%x\n", retval);
541
0c530ab8
A
542 thread_exception_return();
543 /* NOTREACHED */
544 }
4452a7af 545 }
b0d623f7
A
546
547#ifdef MACH_BSD
548 mach_kauth_cred_uthread_update();
549#endif
316670eb
A
550
551 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
552 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
553 args.arg1, args.arg2, args.arg3, args.arg4, 0);
2d21ac55 554
0c530ab8
A
555 retval = mach_call(&args);
556
b0d623f7
A
557 DEBUG_KPRINT_SYSCALL_MACH("mach_call_munger: retval=0x%x\n", retval);
558
316670eb 559 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
560 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END,
561 retval, 0, 0, 0, 0);
316670eb 562
0c530ab8 563 regs->eax = retval;
2d21ac55 564
5ba3f43e
A
565#if DEBUG || DEVELOPMENT
566 kern_allocation_name_t
567 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
568 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
569#endif /* DEBUG || DEVELOPMENT */
570
39236c6e 571 throttle_lowpri_io(1);
593a1d5f 572
3e170ce0
A
573#if PROC_REF_DEBUG
574 if (__improbable(uthread_get_proc_refcount(ut) != 0)) {
575 panic("system call returned with uu_proc_refcount != 0");
576 }
577#endif
578
0c530ab8
A
579 thread_exception_return();
580 /* NOTREACHED */
581}
582
583
2d21ac55 584__private_extern__ void mach_call_munger64(x86_saved_state_t *regs);
0c530ab8 585
39037602 586__attribute__((noreturn))
0c530ab8
A
587void
588mach_call_munger64(x86_saved_state_t *state)
589{
590 int call_number;
591 int argc;
592 mach_call_t mach_call;
cb323159
A
593 struct mach_call_args args = {
594 .arg1 = 0,
595 .arg2 = 0,
596 .arg3 = 0,
597 .arg4 = 0,
598 .arg5 = 0,
599 .arg6 = 0,
600 .arg7 = 0,
601 .arg8 = 0,
602 .arg9 = 0
603 };
0a7de745 604 x86_saved_state64_t *regs;
0c530ab8 605
3e170ce0 606 struct uthread *ut = get_bsdthread_info(current_thread());
3e170ce0 607 uthread_reset_proc_refcount(ut);
3e170ce0 608
0c530ab8
A
609 assert(is_saved_state64(state));
610 regs = saved_state64(state);
611
b0d623f7
A
612 call_number = (int)(regs->rax & SYSCALL_NUMBER_MASK);
613
614 DEBUG_KPRINT_SYSCALL_MACH(
615 "mach_call_munger64: code=%d(%s)\n",
616 call_number, mach_syscall_name_table[call_number]);
0c530ab8 617
0a7de745
A
618 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
619 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
620 regs->rdi, regs->rsi, regs->rdx, regs->r10, 0);
621
0c530ab8 622 if (call_number < 0 || call_number >= mach_trap_count) {
0a7de745 623 i386_exception(EXC_SYSCALL, regs->rax, 1);
0c530ab8
A
624 /* NOTREACHED */
625 }
6601e61a 626 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
6601e61a 627
0c530ab8 628 if (mach_call == (mach_call_t)kern_invalid) {
0a7de745 629 i386_exception(EXC_SYSCALL, regs->rax, 1);
0c530ab8
A
630 /* NOTREACHED */
631 }
632 argc = mach_trap_table[call_number].mach_trap_arg_count;
fe8ab488
A
633 if (argc) {
634 int args_in_regs = MIN(6, argc);
cb323159 635 __nochk_memcpy(&args.arg1, &regs->rdi, args_in_regs * sizeof(syscall_arg_t));
fe8ab488
A
636
637 if (argc > 6) {
0a7de745 638 int copyin_count;
0c530ab8 639
fe8ab488
A
640 assert(argc <= 9);
641 copyin_count = (argc - 6) * (int)sizeof(syscall_arg_t);
0c530ab8 642
0a7de745
A
643 if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&args.arg7, copyin_count)) {
644 regs->rax = KERN_INVALID_ARGUMENT;
645
fe8ab488
A
646 thread_exception_return();
647 /* NOTREACHED */
648 }
0c530ab8
A
649 }
650 }
b0d623f7
A
651
652#ifdef MACH_BSD
653 mach_kauth_cred_uthread_update();
654#endif
655
fe8ab488 656 regs->rax = (uint64_t)mach_call((void *)&args);
0a7de745 657
b0d623f7
A
658 DEBUG_KPRINT_SYSCALL_MACH( "mach_call_munger64: retval=0x%llx\n", regs->rax);
659
0a7de745
A
660 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
661 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END,
662 regs->rax, 0, 0, 0, 0);
91447636 663
5ba3f43e
A
664#if DEBUG || DEVELOPMENT
665 kern_allocation_name_t
666 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
667 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
668#endif /* DEBUG || DEVELOPMENT */
669
39236c6e 670 throttle_lowpri_io(1);
593a1d5f 671
3e170ce0
A
672#if PROC_REF_DEBUG
673 if (__improbable(uthread_get_proc_refcount(ut) != 0)) {
674 panic("system call returned with uu_proc_refcount != 0");
675 }
676#endif
677
0c530ab8
A
678 thread_exception_return();
679 /* NOTREACHED */
91447636
A
680}
681
0c530ab8 682
91447636
A
683/*
684 * thread_setuserstack:
685 *
686 * Sets the user stack pointer into the machine
687 * dependent thread state info.
688 */
689void
690thread_setuserstack(
0a7de745
A
691 thread_t thread,
692 mach_vm_address_t user_stack)
91447636 693{
6d2010ae 694 pal_register_cache_state(thread, DIRTY);
d9a64523 695 if (thread_is_64bit_addr(thread)) {
0a7de745 696 x86_saved_state64_t *iss64;
0c530ab8
A
697
698 iss64 = USER_REGS64(thread);
5d5c5d0d 699
0c530ab8
A
700 iss64->isf.rsp = (uint64_t)user_stack;
701 } else {
0a7de745 702 x86_saved_state32_t *iss32;
2d21ac55 703
0c530ab8
A
704 iss32 = USER_REGS32(thread);
705
b0d623f7 706 iss32->uesp = CAST_DOWN_EXPLICIT(unsigned int, user_stack);
0c530ab8 707 }
91447636
A
708}
709
710/*
711 * thread_adjuserstack:
712 *
713 * Returns the adjusted user stack pointer from the machine
714 * dependent thread state info. Used for small (<2G) deltas.
715 */
716uint64_t
717thread_adjuserstack(
0a7de745
A
718 thread_t thread,
719 int adjust)
91447636 720{
6d2010ae 721 pal_register_cache_state(thread, DIRTY);
d9a64523 722 if (thread_is_64bit_addr(thread)) {
0a7de745 723 x86_saved_state64_t *iss64;
5d5c5d0d 724
0c530ab8
A
725 iss64 = USER_REGS64(thread);
726
727 iss64->isf.rsp += adjust;
728
729 return iss64->isf.rsp;
730 } else {
0a7de745 731 x86_saved_state32_t *iss32;
2d21ac55 732
0c530ab8
A
733 iss32 = USER_REGS32(thread);
734
735 iss32->uesp += adjust;
736
737 return CAST_USER_ADDR_T(iss32->uesp);
738 }
91447636
A
739}
740
741/*
742 * thread_setentrypoint:
743 *
744 * Sets the user PC into the machine
745 * dependent thread state info.
746 */
747void
0c530ab8 748thread_setentrypoint(thread_t thread, mach_vm_address_t entry)
4452a7af 749{
6d2010ae 750 pal_register_cache_state(thread, DIRTY);
d9a64523 751 if (thread_is_64bit_addr(thread)) {
0a7de745 752 x86_saved_state64_t *iss64;
4452a7af 753
0c530ab8
A
754 iss64 = USER_REGS64(thread);
755
756 iss64->isf.rip = (uint64_t)entry;
757 } else {
0a7de745 758 x86_saved_state32_t *iss32;
2d21ac55 759
0c530ab8
A
760 iss32 = USER_REGS32(thread);
761
b0d623f7 762 iss32->eip = CAST_DOWN_EXPLICIT(unsigned int, entry);
0c530ab8
A
763 }
764}
765
766
2d21ac55 767kern_return_t
0c530ab8
A
768thread_setsinglestep(thread_t thread, int on)
769{
6d2010ae 770 pal_register_cache_state(thread, DIRTY);
d9a64523 771 if (thread_is_64bit_addr(thread)) {
0a7de745 772 x86_saved_state64_t *iss64;
0c530ab8
A
773
774 iss64 = USER_REGS64(thread);
775
0a7de745 776 if (on) {
2d21ac55 777 iss64->isf.rflags |= EFL_TF;
0a7de745 778 } else {
2d21ac55 779 iss64->isf.rflags &= ~EFL_TF;
0a7de745 780 }
0c530ab8 781 } else {
0a7de745 782 x86_saved_state32_t *iss32;
2d21ac55 783
0c530ab8
A
784 iss32 = USER_REGS32(thread);
785
b0d623f7 786 if (on) {
2d21ac55 787 iss32->efl |= EFL_TF;
b0d623f7 788 /* Ensure IRET */
0a7de745 789 if (iss32->cs == SYSENTER_CS) {
b0d623f7 790 iss32->cs = SYSENTER_TF_CS;
0a7de745
A
791 }
792 } else {
2d21ac55 793 iss32->efl &= ~EFL_TF;
0a7de745 794 }
0c530ab8 795 }
0a7de745
A
796
797 return KERN_SUCCESS;
0c530ab8
A
798}
799
0c530ab8 800void *
39037602 801get_user_regs(thread_t th)
0c530ab8 802{
39037602 803 pal_register_cache_state(th, DIRTY);
0a7de745 804 return USER_STATE(th);
0c530ab8 805}
91447636 806
2d21ac55 807void *
39037602 808find_user_regs(thread_t thread)
2d21ac55 809{
39037602 810 return get_user_regs(thread);
2d21ac55
A
811}
812
813#if CONFIG_DTRACE
814/*
815 * DTrace would like to have a peek at the kernel interrupt state, if available.
2d21ac55 816 */
b0d623f7 817x86_saved_state_t *find_kern_regs(thread_t);
2d21ac55 818
b0d623f7 819x86_saved_state_t *
2d21ac55
A
820find_kern_regs(thread_t thread)
821{
0a7de745
A
822 if (thread == current_thread() &&
823 NULL != current_cpu_datap()->cpu_int_state &&
824 !(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
825 current_cpu_datap()->cpu_interrupt_level == 1)) {
b0d623f7 826 return current_cpu_datap()->cpu_int_state;
2d21ac55
A
827 } else {
828 return NULL;
829 }
830}
831
832vm_offset_t dtrace_get_cpu_int_stack_top(void);
833
834vm_offset_t
835dtrace_get_cpu_int_stack_top(void)
836{
837 return current_cpu_datap()->cpu_int_stack_top;
838}
839#endif