]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/bsd_i386.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / osfmk / i386 / bsd_i386.c
CommitLineData
1c79356b 1/*
cb323159 2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
0a7de745 28#ifdef MACH_BSD
1c79356b
A
29#include <mach_debug.h>
30#include <mach_ldebug.h>
31
32#include <mach/kern_return.h>
91447636 33#include <mach/mach_traps.h>
1c79356b
A
34#include <mach/thread_status.h>
35#include <mach/vm_param.h>
1c79356b
A
36
37#include <kern/counters.h>
38#include <kern/cpu_data.h>
39#include <kern/mach_param.h>
40#include <kern/task.h>
41#include <kern/thread.h>
1c79356b
A
42#include <kern/sched_prim.h>
43#include <kern/misc_protos.h>
44#include <kern/assert.h>
b0d623f7 45#include <kern/debug.h>
1c79356b 46#include <kern/spl.h>
55e303ae 47#include <kern/syscall_sw.h>
1c79356b
A
48#include <ipc/ipc_port.h>
49#include <vm/vm_kern.h>
50#include <vm/pmap.h>
51
91447636 52#include <i386/cpu_number.h>
1c79356b
A
53#include <i386/eflags.h>
54#include <i386/proc_reg.h>
1c79356b
A
55#include <i386/tss.h>
56#include <i386/user_ldt.h>
57#include <i386/fpu.h>
1c79356b 58#include <i386/machdep_call.h>
91447636 59#include <i386/vmparam.h>
b0d623f7
A
60#include <i386/mp_desc.h>
61#include <i386/misc_protos.h>
62#include <i386/thread.h>
0c530ab8 63#include <i386/trap.h>
b0d623f7 64#include <i386/seg.h>
0c530ab8 65#include <mach/i386/syscall_sw.h>
9bccf70c 66#include <sys/syscall.h>
91447636 67#include <sys/kdebug.h>
0c530ab8 68#include <sys/errno.h>
91447636
A
69#include <../bsd/sys/sysent.h>
70
b0d623f7 71#ifdef MACH_BSD
0a7de745 72extern void mach_kauth_cred_uthread_update(void);
39236c6e 73extern void throttle_lowpri_io(int);
b0d623f7
A
74#endif
75
0c530ab8
A
76void * find_user_regs(thread_t);
77
1c79356b
A
78unsigned int get_msr_exportmask(void);
79
80unsigned int get_msr_nbits(void);
81
82unsigned int get_msr_rbits(void);
83
84/*
85 * thread_userstack:
86 *
87 * Return the user stack pointer from the machine
88 * dependent thread state info.
89 */
90kern_return_t
91thread_userstack(
0a7de745
A
92 __unused thread_t thread,
93 int flavor,
94 thread_state_t tstate,
95 __unused unsigned int count,
96 mach_vm_offset_t *user_stack,
97 int *customstack,
98 __unused boolean_t is64bit
99 )
1c79356b 100{
0a7de745 101 if (customstack) {
2d21ac55 102 *customstack = 0;
0a7de745 103 }
4452a7af 104
2d21ac55
A
105 switch (flavor) {
106 case x86_THREAD_STATE32:
0a7de745
A
107 {
108 x86_thread_state32_t *state25;
2d21ac55 109
0a7de745 110 state25 = (x86_thread_state32_t *) tstate;
2d21ac55 111
0a7de745
A
112 if (state25->esp) {
113 *user_stack = state25->esp;
114 if (customstack) {
115 *customstack = 1;
116 }
117 } else {
118 *user_stack = VM_USRSTACK32;
119 if (customstack) {
120 *customstack = 0;
6d2010ae 121 }
2d21ac55 122 }
0a7de745
A
123 break;
124 }
0c530ab8 125
0a7de745
A
126 case x86_THREAD_FULL_STATE64:
127 /* FALL THROUGH */
2d21ac55 128 case x86_THREAD_STATE64:
0a7de745
A
129 {
130 x86_thread_state64_t *state25;
2d21ac55 131
0a7de745 132 state25 = (x86_thread_state64_t *) tstate;
2d21ac55 133
0a7de745
A
134 if (state25->rsp) {
135 *user_stack = state25->rsp;
136 if (customstack) {
137 *customstack = 1;
138 }
139 } else {
140 *user_stack = VM_USRSTACK64;
141 if (customstack) {
142 *customstack = 0;
6d2010ae 143 }
2d21ac55 144 }
0a7de745
A
145 break;
146 }
1c79356b 147
2d21ac55 148 default:
0a7de745 149 return KERN_INVALID_ARGUMENT;
2d21ac55 150 }
0c530ab8 151
0a7de745 152 return KERN_SUCCESS;
2d21ac55 153}
1c79356b 154
316670eb
A
155/*
156 * thread_userstackdefault:
157 *
158 * Return the default stack location for the
159 * thread, if otherwise unknown.
160 */
161kern_return_t
162thread_userstackdefault(
39037602
A
163 mach_vm_offset_t *default_user_stack,
164 boolean_t is64bit)
316670eb 165{
39037602 166 if (is64bit) {
316670eb
A
167 *default_user_stack = VM_USRSTACK64;
168 } else {
169 *default_user_stack = VM_USRSTACK32;
170 }
0a7de745 171 return KERN_SUCCESS;
316670eb 172}
0c530ab8 173
1c79356b
A
174kern_return_t
175thread_entrypoint(
0a7de745
A
176 __unused thread_t thread,
177 int flavor,
178 thread_state_t tstate,
179 __unused unsigned int count,
180 mach_vm_offset_t *entry_point
181 )
182{
2d21ac55 183 /*
0c530ab8
A
184 * Set a default.
185 */
0a7de745 186 if (*entry_point == 0) {
2d21ac55 187 *entry_point = VM_MIN_ADDRESS;
0a7de745 188 }
2d21ac55 189
0c530ab8 190 switch (flavor) {
2d21ac55 191 case x86_THREAD_STATE32:
0a7de745
A
192 {
193 x86_thread_state32_t *state25;
0c530ab8 194
0a7de745
A
195 state25 = (i386_thread_state_t *) tstate;
196 *entry_point = state25->eip ? state25->eip : VM_MIN_ADDRESS;
197 break;
198 }
6601e61a 199
2d21ac55 200 case x86_THREAD_STATE64:
0a7de745
A
201 {
202 x86_thread_state64_t *state25;
0c530ab8 203
0a7de745
A
204 state25 = (x86_thread_state64_t *) tstate;
205 *entry_point = state25->rip ? state25->rip : VM_MIN_ADDRESS64;
206 break;
207 }
2d21ac55 208 }
0a7de745 209 return KERN_SUCCESS;
2d21ac55 210}
1c79356b 211
0a7de745 212/*
1c79356b
A
213 * FIXME - thread_set_child
214 */
215
91447636 216void thread_set_child(thread_t child, int pid);
1c79356b 217void
91447636 218thread_set_child(thread_t child, int pid)
1c79356b 219{
6d2010ae 220 pal_register_cache_state(child, DIRTY);
b0d623f7 221
d9a64523 222 if (thread_is_64bit_addr(child)) {
0a7de745 223 x86_saved_state64_t *iss64;
0c530ab8 224
0c530ab8
A
225 iss64 = USER_REGS64(child);
226
227 iss64->rax = pid;
228 iss64->rdx = 1;
229 iss64->isf.rflags &= ~EFL_CF;
230 } else {
0a7de745 231 x86_saved_state32_t *iss32;
2d21ac55 232
0c530ab8
A
233 iss32 = USER_REGS32(child);
234
235 iss32->eax = pid;
236 iss32->edx = 1;
237 iss32->efl &= ~EFL_CF;
238 }
1c79356b 239}
0c530ab8
A
240
241
1c79356b 242
1c79356b
A
243/*
244 * System Call handling code
245 */
246
91447636
A
247extern long fuword(vm_offset_t);
248
39037602 249__attribute__((noreturn))
1c79356b 250void
0c530ab8 251machdep_syscall(x86_saved_state_t *state)
8f6c56a5 252{
0a7de745
A
253 int args[machdep_call_count];
254 int trapno;
255 int nargs;
256 const machdep_call_t *entry;
257 x86_saved_state32_t *regs;
0c530ab8
A
258
259 assert(is_saved_state32(state));
260 regs = saved_state32(state);
0a7de745 261
0c530ab8
A
262 trapno = regs->eax;
263#if DEBUG_TRACE
264 kprintf("machdep_syscall(0x%08x) code=%d\n", regs, trapno);
265#endif
1c79356b 266
b0d623f7
A
267 DEBUG_KPRINT_SYSCALL_MDEP(
268 "machdep_syscall: trapno=%d\n", trapno);
269
0c530ab8 270 if (trapno < 0 || trapno >= machdep_call_count) {
2d21ac55 271 regs->eax = (unsigned int)kern_invalid(NULL);
1c79356b 272
91447636
A
273 thread_exception_return();
274 /* NOTREACHED */
275 }
0c530ab8
A
276 entry = &machdep_call_table[trapno];
277 nargs = entry->nargs;
91447636 278
0c530ab8 279 if (nargs != 0) {
0a7de745
A
280 if (copyin((user_addr_t) regs->uesp + sizeof(int),
281 (char *) args, (nargs * sizeof(int)))) {
2d21ac55 282 regs->eax = KERN_INVALID_ADDRESS;
6601e61a 283
0c530ab8
A
284 thread_exception_return();
285 /* NOTREACHED */
286 }
1c79356b 287 }
0c530ab8 288 switch (nargs) {
2d21ac55
A
289 case 0:
290 regs->eax = (*entry->routine.args_0)();
0c530ab8 291 break;
2d21ac55 292 case 1:
0c530ab8
A
293 regs->eax = (*entry->routine.args_1)(args[0]);
294 break;
2d21ac55 295 case 2:
0a7de745 296 regs->eax = (*entry->routine.args_2)(args[0], args[1]);
0c530ab8 297 break;
2d21ac55 298 case 3:
0a7de745
A
299 if (!entry->bsd_style) {
300 regs->eax = (*entry->routine.args_3)(args[0], args[1], args[2]);
301 } else {
302 int error;
303 uint32_t rval;
0c530ab8 304
2d21ac55 305 error = (*entry->routine.args_bsd_3)(&rval, args[0], args[1], args[2]);
0c530ab8 306 if (error) {
2d21ac55 307 regs->eax = error;
0a7de745 308 regs->efl |= EFL_CF; /* carry bit */
0c530ab8 309 } else {
2d21ac55
A
310 regs->eax = rval;
311 regs->efl &= ~EFL_CF;
0c530ab8
A
312 }
313 }
314 break;
2d21ac55 315 case 4:
0c530ab8
A
316 regs->eax = (*entry->routine.args_4)(args[0], args[1], args[2], args[3]);
317 break;
1c79356b 318
2d21ac55
A
319 default:
320 panic("machdep_syscall: too many args");
6601e61a 321 }
6601e61a 322
b0d623f7
A
323 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%u\n", regs->eax);
324
5ba3f43e
A
325#if DEBUG || DEVELOPMENT
326 kern_allocation_name_t
327 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
328 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
329#endif /* DEBUG || DEVELOPMENT */
330
39236c6e 331 throttle_lowpri_io(1);
593a1d5f 332
0c530ab8
A
333 thread_exception_return();
334 /* NOTREACHED */
1c79356b
A
335}
336
39037602 337__attribute__((noreturn))
1c79356b 338void
0c530ab8 339machdep_syscall64(x86_saved_state_t *state)
1c79356b 340{
0a7de745
A
341 int trapno;
342 const machdep_call_t *entry;
343 x86_saved_state64_t *regs;
1c79356b 344
0c530ab8
A
345 assert(is_saved_state64(state));
346 regs = saved_state64(state);
0a7de745 347
b0d623f7
A
348 trapno = (int)(regs->rax & SYSCALL_NUMBER_MASK);
349
350 DEBUG_KPRINT_SYSCALL_MDEP(
351 "machdep_syscall64: trapno=%d\n", trapno);
1c79356b 352
0c530ab8 353 if (trapno < 0 || trapno >= machdep_call_count) {
2d21ac55 354 regs->rax = (unsigned int)kern_invalid(NULL);
1c79356b 355
0c530ab8
A
356 thread_exception_return();
357 /* NOTREACHED */
1c79356b 358 }
0c530ab8 359 entry = &machdep_call_table64[trapno];
1c79356b 360
0c530ab8 361 switch (entry->nargs) {
2d21ac55
A
362 case 0:
363 regs->rax = (*entry->routine.args_0)();
6601e61a 364 break;
2d21ac55 365 case 1:
0c530ab8 366 regs->rax = (*entry->routine.args64_1)(regs->rdi);
55e303ae 367 break;
fe8ab488
A
368 case 2:
369 regs->rax = (*entry->routine.args64_2)(regs->rdi, regs->rsi);
370 break;
0a7de745
A
371 case 3:
372 if (!entry->bsd_style) {
373 regs->rax = (*entry->routine.args64_3)(regs->rdi, regs->rsi, regs->rdx);
374 } else {
375 int error;
376 uint32_t rval;
377
378 error = (*entry->routine.args64_bsd_3)(&rval, regs->rdi, regs->rsi, regs->rdx);
379 if (error) {
380 regs->rax = (uint64_t)error;
381 regs->isf.rflags |= EFL_CF; /* carry bit */
382 } else {
383 regs->rax = rval;
384 regs->isf.rflags &= ~(uint64_t)EFL_CF;
385 }
386 }
387 break;
2d21ac55
A
388 default:
389 panic("machdep_syscall64: too many args");
55e303ae 390 }
6601e61a 391
b0d623f7
A
392 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%llu\n", regs->rax);
393
5ba3f43e
A
394#if DEBUG || DEVELOPMENT
395 kern_allocation_name_t
396 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
397 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
398#endif /* DEBUG || DEVELOPMENT */
399
39236c6e 400 throttle_lowpri_io(1);
593a1d5f 401
0c530ab8
A
402 thread_exception_return();
403 /* NOTREACHED */
1c79356b
A
404}
405
0a7de745 406#endif /* MACH_BSD */
4452a7af 407
21362eb3 408
0c530ab8 409typedef kern_return_t (*mach_call_t)(void *);
4452a7af 410
0c530ab8
A
411struct mach_call_args {
412 syscall_arg_t arg1;
413 syscall_arg_t arg2;
414 syscall_arg_t arg3;
415 syscall_arg_t arg4;
416 syscall_arg_t arg5;
417 syscall_arg_t arg6;
418 syscall_arg_t arg7;
419 syscall_arg_t arg8;
420 syscall_arg_t arg9;
421};
4452a7af 422
0c530ab8 423static kern_return_t
39236c6e 424mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp);
4452a7af 425
6601e61a 426
0c530ab8 427static kern_return_t
39236c6e 428mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp)
0c530ab8 429{
0a7de745 430 if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args, trapp->mach_trap_u32_words * sizeof(int))) {
2d21ac55 431 return KERN_INVALID_ARGUMENT;
0a7de745 432 }
fe8ab488
A
433#if CONFIG_REQUIRES_U32_MUNGING
434 trapp->mach_trap_arg_munge32(args);
435#else
436#error U32 mach traps on x86_64 kernel requires munging
437#endif
0c530ab8 438 return KERN_SUCCESS;
6601e61a 439}
4452a7af
A
440
441
2d21ac55 442__private_extern__ void mach_call_munger(x86_saved_state_t *state);
0c530ab8 443
b0d623f7
A
444extern const char *mach_syscall_name_table[];
445
39037602 446__attribute__((noreturn))
0c530ab8
A
447void
448mach_call_munger(x86_saved_state_t *state)
4452a7af 449{
4452a7af 450 int argc;
0c530ab8 451 int call_number;
4452a7af 452 mach_call_t mach_call;
6601e61a 453 kern_return_t retval;
cb323159
A
454 struct mach_call_args args = {
455 .arg1 = 0,
456 .arg2 = 0,
457 .arg3 = 0,
458 .arg4 = 0,
459 .arg5 = 0,
460 .arg6 = 0,
461 .arg7 = 0,
462 .arg8 = 0,
463 .arg9 = 0
464 };
0a7de745 465 x86_saved_state32_t *regs;
0c530ab8 466
3e170ce0 467 struct uthread *ut = get_bsdthread_info(current_thread());
3e170ce0 468 uthread_reset_proc_refcount(ut);
3e170ce0 469
0c530ab8
A
470 assert(is_saved_state32(state));
471 regs = saved_state32(state);
472
473 call_number = -(regs->eax);
b0d623f7
A
474
475 DEBUG_KPRINT_SYSCALL_MACH(
476 "mach_call_munger: code=%d(%s)\n",
477 call_number, mach_syscall_name_table[call_number]);
0c530ab8
A
478#if DEBUG_TRACE
479 kprintf("mach_call_munger(0x%08x) code=%d\n", regs, call_number);
480#endif
481
482 if (call_number < 0 || call_number >= mach_trap_count) {
2d21ac55 483 i386_exception(EXC_SYSCALL, call_number, 1);
0c530ab8
A
484 /* NOTREACHED */
485 }
486 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
2d21ac55 487
0c530ab8 488 if (mach_call == (mach_call_t)kern_invalid) {
b0d623f7
A
489 DEBUG_KPRINT_SYSCALL_MACH(
490 "mach_call_munger: kern_invalid 0x%x\n", regs->eax);
2d21ac55 491 i386_exception(EXC_SYSCALL, call_number, 1);
0c530ab8
A
492 /* NOTREACHED */
493 }
0c530ab8 494
2d21ac55 495 argc = mach_trap_table[call_number].mach_trap_arg_count;
0c530ab8 496 if (argc) {
0a7de745 497 retval = mach_call_arg_munger32(regs->uesp, &args, &mach_trap_table[call_number]);
0c530ab8 498 if (retval != KERN_SUCCESS) {
2d21ac55
A
499 regs->eax = retval;
500
b0d623f7
A
501 DEBUG_KPRINT_SYSCALL_MACH(
502 "mach_call_munger: retval=0x%x\n", retval);
503
0c530ab8
A
504 thread_exception_return();
505 /* NOTREACHED */
506 }
4452a7af 507 }
b0d623f7
A
508
509#ifdef MACH_BSD
510 mach_kauth_cred_uthread_update();
511#endif
316670eb
A
512
513 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
514 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
515 args.arg1, args.arg2, args.arg3, args.arg4, 0);
2d21ac55 516
0c530ab8
A
517 retval = mach_call(&args);
518
b0d623f7
A
519 DEBUG_KPRINT_SYSCALL_MACH("mach_call_munger: retval=0x%x\n", retval);
520
316670eb 521 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
522 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END,
523 retval, 0, 0, 0, 0);
316670eb 524
0c530ab8 525 regs->eax = retval;
2d21ac55 526
5ba3f43e
A
527#if DEBUG || DEVELOPMENT
528 kern_allocation_name_t
529 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
530 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
531#endif /* DEBUG || DEVELOPMENT */
532
39236c6e 533 throttle_lowpri_io(1);
593a1d5f 534
3e170ce0
A
535#if PROC_REF_DEBUG
536 if (__improbable(uthread_get_proc_refcount(ut) != 0)) {
537 panic("system call returned with uu_proc_refcount != 0");
538 }
539#endif
540
0c530ab8
A
541 thread_exception_return();
542 /* NOTREACHED */
543}
544
545
2d21ac55 546__private_extern__ void mach_call_munger64(x86_saved_state_t *regs);
0c530ab8 547
39037602 548__attribute__((noreturn))
0c530ab8
A
549void
550mach_call_munger64(x86_saved_state_t *state)
551{
552 int call_number;
553 int argc;
554 mach_call_t mach_call;
cb323159
A
555 struct mach_call_args args = {
556 .arg1 = 0,
557 .arg2 = 0,
558 .arg3 = 0,
559 .arg4 = 0,
560 .arg5 = 0,
561 .arg6 = 0,
562 .arg7 = 0,
563 .arg8 = 0,
564 .arg9 = 0
565 };
0a7de745 566 x86_saved_state64_t *regs;
0c530ab8 567
3e170ce0 568 struct uthread *ut = get_bsdthread_info(current_thread());
3e170ce0 569 uthread_reset_proc_refcount(ut);
3e170ce0 570
0c530ab8
A
571 assert(is_saved_state64(state));
572 regs = saved_state64(state);
573
b0d623f7
A
574 call_number = (int)(regs->rax & SYSCALL_NUMBER_MASK);
575
576 DEBUG_KPRINT_SYSCALL_MACH(
577 "mach_call_munger64: code=%d(%s)\n",
578 call_number, mach_syscall_name_table[call_number]);
0c530ab8 579
0a7de745
A
580 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
581 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
582 regs->rdi, regs->rsi, regs->rdx, regs->r10, 0);
583
0c530ab8 584 if (call_number < 0 || call_number >= mach_trap_count) {
0a7de745 585 i386_exception(EXC_SYSCALL, regs->rax, 1);
0c530ab8
A
586 /* NOTREACHED */
587 }
6601e61a 588 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
6601e61a 589
0c530ab8 590 if (mach_call == (mach_call_t)kern_invalid) {
0a7de745 591 i386_exception(EXC_SYSCALL, regs->rax, 1);
0c530ab8
A
592 /* NOTREACHED */
593 }
594 argc = mach_trap_table[call_number].mach_trap_arg_count;
fe8ab488
A
595 if (argc) {
596 int args_in_regs = MIN(6, argc);
cb323159 597 __nochk_memcpy(&args.arg1, &regs->rdi, args_in_regs * sizeof(syscall_arg_t));
fe8ab488
A
598
599 if (argc > 6) {
0a7de745 600 int copyin_count;
0c530ab8 601
fe8ab488
A
602 assert(argc <= 9);
603 copyin_count = (argc - 6) * (int)sizeof(syscall_arg_t);
0c530ab8 604
0a7de745
A
605 if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&args.arg7, copyin_count)) {
606 regs->rax = KERN_INVALID_ARGUMENT;
607
fe8ab488
A
608 thread_exception_return();
609 /* NOTREACHED */
610 }
0c530ab8
A
611 }
612 }
b0d623f7
A
613
614#ifdef MACH_BSD
615 mach_kauth_cred_uthread_update();
616#endif
617
fe8ab488 618 regs->rax = (uint64_t)mach_call((void *)&args);
0a7de745 619
b0d623f7
A
620 DEBUG_KPRINT_SYSCALL_MACH( "mach_call_munger64: retval=0x%llx\n", regs->rax);
621
0a7de745
A
622 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
623 MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END,
624 regs->rax, 0, 0, 0, 0);
91447636 625
5ba3f43e
A
626#if DEBUG || DEVELOPMENT
627 kern_allocation_name_t
628 prior __assert_only = thread_get_kernel_state(current_thread())->allocation_name;
629 assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
630#endif /* DEBUG || DEVELOPMENT */
631
39236c6e 632 throttle_lowpri_io(1);
593a1d5f 633
3e170ce0
A
634#if PROC_REF_DEBUG
635 if (__improbable(uthread_get_proc_refcount(ut) != 0)) {
636 panic("system call returned with uu_proc_refcount != 0");
637 }
638#endif
639
0c530ab8
A
640 thread_exception_return();
641 /* NOTREACHED */
91447636
A
642}
643
0c530ab8 644
91447636
A
645/*
646 * thread_setuserstack:
647 *
648 * Sets the user stack pointer into the machine
649 * dependent thread state info.
650 */
651void
652thread_setuserstack(
0a7de745
A
653 thread_t thread,
654 mach_vm_address_t user_stack)
91447636 655{
6d2010ae 656 pal_register_cache_state(thread, DIRTY);
d9a64523 657 if (thread_is_64bit_addr(thread)) {
0a7de745 658 x86_saved_state64_t *iss64;
0c530ab8
A
659
660 iss64 = USER_REGS64(thread);
5d5c5d0d 661
0c530ab8
A
662 iss64->isf.rsp = (uint64_t)user_stack;
663 } else {
0a7de745 664 x86_saved_state32_t *iss32;
2d21ac55 665
0c530ab8
A
666 iss32 = USER_REGS32(thread);
667
b0d623f7 668 iss32->uesp = CAST_DOWN_EXPLICIT(unsigned int, user_stack);
0c530ab8 669 }
91447636
A
670}
671
672/*
673 * thread_adjuserstack:
674 *
675 * Returns the adjusted user stack pointer from the machine
676 * dependent thread state info. Used for small (<2G) deltas.
677 */
678uint64_t
679thread_adjuserstack(
0a7de745
A
680 thread_t thread,
681 int adjust)
91447636 682{
6d2010ae 683 pal_register_cache_state(thread, DIRTY);
d9a64523 684 if (thread_is_64bit_addr(thread)) {
0a7de745 685 x86_saved_state64_t *iss64;
5d5c5d0d 686
0c530ab8
A
687 iss64 = USER_REGS64(thread);
688
689 iss64->isf.rsp += adjust;
690
691 return iss64->isf.rsp;
692 } else {
0a7de745 693 x86_saved_state32_t *iss32;
2d21ac55 694
0c530ab8
A
695 iss32 = USER_REGS32(thread);
696
697 iss32->uesp += adjust;
698
699 return CAST_USER_ADDR_T(iss32->uesp);
700 }
91447636
A
701}
702
703/*
704 * thread_setentrypoint:
705 *
706 * Sets the user PC into the machine
707 * dependent thread state info.
708 */
709void
0c530ab8 710thread_setentrypoint(thread_t thread, mach_vm_address_t entry)
4452a7af 711{
6d2010ae 712 pal_register_cache_state(thread, DIRTY);
d9a64523 713 if (thread_is_64bit_addr(thread)) {
0a7de745 714 x86_saved_state64_t *iss64;
4452a7af 715
0c530ab8
A
716 iss64 = USER_REGS64(thread);
717
718 iss64->isf.rip = (uint64_t)entry;
719 } else {
0a7de745 720 x86_saved_state32_t *iss32;
2d21ac55 721
0c530ab8
A
722 iss32 = USER_REGS32(thread);
723
b0d623f7 724 iss32->eip = CAST_DOWN_EXPLICIT(unsigned int, entry);
0c530ab8
A
725 }
726}
727
728
2d21ac55 729kern_return_t
0c530ab8
A
730thread_setsinglestep(thread_t thread, int on)
731{
6d2010ae 732 pal_register_cache_state(thread, DIRTY);
d9a64523 733 if (thread_is_64bit_addr(thread)) {
0a7de745 734 x86_saved_state64_t *iss64;
0c530ab8
A
735
736 iss64 = USER_REGS64(thread);
737
0a7de745 738 if (on) {
2d21ac55 739 iss64->isf.rflags |= EFL_TF;
0a7de745 740 } else {
2d21ac55 741 iss64->isf.rflags &= ~EFL_TF;
0a7de745 742 }
0c530ab8 743 } else {
0a7de745 744 x86_saved_state32_t *iss32;
2d21ac55 745
0c530ab8
A
746 iss32 = USER_REGS32(thread);
747
b0d623f7 748 if (on) {
2d21ac55 749 iss32->efl |= EFL_TF;
b0d623f7 750 /* Ensure IRET */
0a7de745 751 if (iss32->cs == SYSENTER_CS) {
b0d623f7 752 iss32->cs = SYSENTER_TF_CS;
0a7de745
A
753 }
754 } else {
2d21ac55 755 iss32->efl &= ~EFL_TF;
0a7de745 756 }
0c530ab8 757 }
0a7de745
A
758
759 return KERN_SUCCESS;
0c530ab8
A
760}
761
0c530ab8 762void *
39037602 763get_user_regs(thread_t th)
0c530ab8 764{
39037602 765 pal_register_cache_state(th, DIRTY);
0a7de745 766 return USER_STATE(th);
0c530ab8 767}
91447636 768
2d21ac55 769void *
39037602 770find_user_regs(thread_t thread)
2d21ac55 771{
39037602 772 return get_user_regs(thread);
2d21ac55
A
773}
774
775#if CONFIG_DTRACE
776/*
777 * DTrace would like to have a peek at the kernel interrupt state, if available.
2d21ac55 778 */
b0d623f7 779x86_saved_state_t *find_kern_regs(thread_t);
2d21ac55 780
b0d623f7 781x86_saved_state_t *
2d21ac55
A
782find_kern_regs(thread_t thread)
783{
0a7de745
A
784 if (thread == current_thread() &&
785 NULL != current_cpu_datap()->cpu_int_state &&
786 !(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
787 current_cpu_datap()->cpu_interrupt_level == 1)) {
b0d623f7 788 return current_cpu_datap()->cpu_int_state;
2d21ac55
A
789 } else {
790 return NULL;
791 }
792}
793
794vm_offset_t dtrace_get_cpu_int_stack_top(void);
795
796vm_offset_t
797dtrace_get_cpu_int_stack_top(void)
798{
799 return current_cpu_datap()->cpu_int_stack_top;
800}
801#endif