]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/bsd_i386.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / osfmk / i386 / bsd_i386.c
CommitLineData
1c79356b 1/*
b0d623f7 2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28#ifdef MACH_BSD
1c79356b
A
29#include <mach_rt.h>
30#include <mach_debug.h>
31#include <mach_ldebug.h>
32
33#include <mach/kern_return.h>
91447636 34#include <mach/mach_traps.h>
1c79356b
A
35#include <mach/thread_status.h>
36#include <mach/vm_param.h>
1c79356b
A
37
38#include <kern/counters.h>
39#include <kern/cpu_data.h>
40#include <kern/mach_param.h>
41#include <kern/task.h>
42#include <kern/thread.h>
1c79356b
A
43#include <kern/sched_prim.h>
44#include <kern/misc_protos.h>
45#include <kern/assert.h>
b0d623f7 46#include <kern/debug.h>
1c79356b 47#include <kern/spl.h>
55e303ae 48#include <kern/syscall_sw.h>
1c79356b
A
49#include <ipc/ipc_port.h>
50#include <vm/vm_kern.h>
51#include <vm/pmap.h>
52
91447636 53#include <i386/cpu_number.h>
1c79356b
A
54#include <i386/eflags.h>
55#include <i386/proc_reg.h>
1c79356b
A
56#include <i386/tss.h>
57#include <i386/user_ldt.h>
58#include <i386/fpu.h>
1c79356b 59#include <i386/machdep_call.h>
91447636 60#include <i386/vmparam.h>
b0d623f7
A
61#include <i386/mp_desc.h>
62#include <i386/misc_protos.h>
63#include <i386/thread.h>
0c530ab8 64#include <i386/trap.h>
b0d623f7 65#include <i386/seg.h>
0c530ab8 66#include <mach/i386/syscall_sw.h>
9bccf70c 67#include <sys/syscall.h>
91447636 68#include <sys/kdebug.h>
0c530ab8 69#include <sys/errno.h>
91447636
A
70#include <../bsd/sys/sysent.h>
71
b0d623f7
A
72#ifdef MACH_BSD
73extern void mach_kauth_cred_uthread_update(void);
74#endif
75
1c79356b
A
76kern_return_t
77thread_userstack(
78 thread_t,
79 int,
80 thread_state_t,
81 unsigned int,
91447636 82 mach_vm_offset_t *,
2d21ac55 83 int *
1c79356b
A
84);
85
86kern_return_t
87thread_entrypoint(
88 thread_t,
89 int,
90 thread_state_t,
91 unsigned int,
91447636 92 mach_vm_offset_t *
1c79356b
A
93);
94
0c530ab8
A
95void * find_user_regs(thread_t);
96
1c79356b
A
97unsigned int get_msr_exportmask(void);
98
99unsigned int get_msr_nbits(void);
100
101unsigned int get_msr_rbits(void);
102
593a1d5f 103extern void throttle_lowpri_io(boolean_t);
91447636 104
2d21ac55 105
1c79356b
A
106/*
107 * thread_userstack:
108 *
109 * Return the user stack pointer from the machine
110 * dependent thread state info.
111 */
112kern_return_t
113thread_userstack(
2d21ac55
A
114 __unused thread_t thread,
115 int flavor,
116 thread_state_t tstate,
117 __unused unsigned int count,
118 user_addr_t *user_stack,
119 int *customstack
120)
1c79356b 121{
2d21ac55
A
122 if (customstack)
123 *customstack = 0;
4452a7af 124
2d21ac55
A
125 switch (flavor) {
126 case x86_THREAD_STATE32:
127 {
128 x86_thread_state32_t *state25;
129
130 state25 = (x86_thread_state32_t *) tstate;
131
132 if (state25->esp)
133 *user_stack = state25->esp;
134 else
135 *user_stack = VM_USRSTACK32;
136 if (customstack && state25->esp)
137 *customstack = 1;
138 else
139 *customstack = 0;
140 break;
141 }
0c530ab8 142
2d21ac55
A
143 case x86_THREAD_STATE64:
144 {
145 x86_thread_state64_t *state25;
146
147 state25 = (x86_thread_state64_t *) tstate;
148
149 if (state25->rsp)
150 *user_stack = state25->rsp;
151 else
152 *user_stack = VM_USRSTACK64;
153 if (customstack && state25->rsp)
154 *customstack = 1;
155 else
156 *customstack = 0;
157 break;
158 }
1c79356b 159
2d21ac55
A
160 default:
161 return (KERN_INVALID_ARGUMENT);
162 }
0c530ab8 163
2d21ac55
A
164 return (KERN_SUCCESS);
165}
1c79356b 166
0c530ab8 167
1c79356b
A
168kern_return_t
169thread_entrypoint(
2d21ac55
A
170 __unused thread_t thread,
171 int flavor,
172 thread_state_t tstate,
173 __unused unsigned int count,
174 mach_vm_offset_t *entry_point
175)
1c79356b 176{
2d21ac55 177 /*
0c530ab8
A
178 * Set a default.
179 */
2d21ac55
A
180 if (*entry_point == 0)
181 *entry_point = VM_MIN_ADDRESS;
182
0c530ab8 183 switch (flavor) {
2d21ac55
A
184 case x86_THREAD_STATE32:
185 {
186 x86_thread_state32_t *state25;
0c530ab8 187
2d21ac55
A
188 state25 = (i386_thread_state_t *) tstate;
189 *entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS;
190 break;
191 }
6601e61a 192
2d21ac55
A
193 case x86_THREAD_STATE64:
194 {
195 x86_thread_state64_t *state25;
0c530ab8 196
2d21ac55
A
197 state25 = (x86_thread_state64_t *) tstate;
198 *entry_point = state25->rip ? state25->rip: VM_MIN_ADDRESS64;
199 break;
200 }
201 }
202 return (KERN_SUCCESS);
203}
1c79356b 204
1c79356b
A
205/*
206 * Duplicate parent state in child
207 * for U**X fork.
208 */
55e303ae
A
209kern_return_t
210machine_thread_dup(
91447636
A
211 thread_t parent,
212 thread_t child
1c79356b
A
213)
214{
0c530ab8
A
215
216 pcb_t parent_pcb;
217 pcb_t child_pcb;
6601e61a 218
0c530ab8
A
219 if ((child_pcb = child->machine.pcb) == NULL ||
220 (parent_pcb = parent->machine.pcb) == NULL)
55e303ae 221 return (KERN_FAILURE);
0c530ab8 222 /*
2d21ac55 223 * Copy over the x86_saved_state registers
0c530ab8
A
224 */
225 if (cpu_mode_is64bit()) {
2d21ac55
A
226 if (thread_is_64bit(parent))
227 bcopy(USER_REGS64(parent), USER_REGS64(child), sizeof(x86_saved_state64_t));
0c530ab8 228 else
2d21ac55 229 bcopy(USER_REGS32(parent), USER_REGS32(child), sizeof(x86_saved_state_compat32_t));
0c530ab8 230 } else
2d21ac55 231 bcopy(USER_REGS32(parent), USER_REGS32(child), sizeof(x86_saved_state32_t));
1c79356b 232
0c530ab8
A
233 /*
234 * Check to see if parent is using floating point
6601e61a 235 * and if so, copy the registers to the child
4452a7af 236 */
2d21ac55 237 fpu_dup_fxstate(parent, child);
4452a7af 238
0c530ab8
A
239#ifdef MACH_BSD
240 /*
241 * Copy the parent's cthread id and USER_CTHREAD descriptor, if 32-bit.
242 */
243 child_pcb->cthread_self = parent_pcb->cthread_self;
244 if (!thread_is_64bit(parent))
245 child_pcb->cthread_desc = parent_pcb->cthread_desc;
246
247 /*
248 * FIXME - should a user specified LDT, TSS and V86 info
1c79356b
A
249 * be duplicated as well?? - probably not.
250 */
2d21ac55 251 // duplicate any use LDT entry that was set I think this is appropriate.
0c530ab8
A
252 if (parent_pcb->uldt_selector!= 0) {
253 child_pcb->uldt_selector = parent_pcb->uldt_selector;
254 child_pcb->uldt_desc = parent_pcb->uldt_desc;
2d21ac55 255 }
91447636 256#endif
2d21ac55 257
55e303ae 258 return (KERN_SUCCESS);
1c79356b
A
259}
260
261/*
262 * FIXME - thread_set_child
263 */
264
91447636 265void thread_set_child(thread_t child, int pid);
1c79356b 266void
91447636 267thread_set_child(thread_t child, int pid)
1c79356b 268{
b0d623f7 269
2d21ac55
A
270 if (thread_is_64bit(child)) {
271 x86_saved_state64_t *iss64;
0c530ab8 272
0c530ab8
A
273 iss64 = USER_REGS64(child);
274
275 iss64->rax = pid;
276 iss64->rdx = 1;
277 iss64->isf.rflags &= ~EFL_CF;
278 } else {
2d21ac55
A
279 x86_saved_state32_t *iss32;
280
0c530ab8
A
281 iss32 = USER_REGS32(child);
282
283 iss32->eax = pid;
284 iss32->edx = 1;
285 iss32->efl &= ~EFL_CF;
286 }
1c79356b 287}
0c530ab8
A
288
289
91447636 290void thread_set_parent(thread_t parent, int pid);
2d21ac55 291
0b4e3aa0 292void
91447636 293thread_set_parent(thread_t parent, int pid)
0b4e3aa0 294{
b0d623f7 295
2d21ac55
A
296 if (thread_is_64bit(parent)) {
297 x86_saved_state64_t *iss64;
0c530ab8 298
0c530ab8
A
299 iss64 = USER_REGS64(parent);
300
301 iss64->rax = pid;
302 iss64->rdx = 0;
303 iss64->isf.rflags &= ~EFL_CF;
304 } else {
2d21ac55
A
305 x86_saved_state32_t *iss32;
306
0c530ab8
A
307 iss32 = USER_REGS32(parent);
308
309 iss32->eax = pid;
310 iss32->edx = 0;
311 iss32->efl &= ~EFL_CF;
312 }
0b4e3aa0 313}
1c79356b
A
314
315
1c79356b
A
316/*
317 * System Call handling code
318 */
319
91447636
A
320extern long fuword(vm_offset_t);
321
1c79356b 322
1c79356b
A
323
324void
0c530ab8 325machdep_syscall(x86_saved_state_t *state)
8f6c56a5 326{
0c530ab8 327 int args[machdep_call_count];
2d21ac55 328 int trapno;
0c530ab8
A
329 int nargs;
330 machdep_call_t *entry;
331 x86_saved_state32_t *regs;
332
333 assert(is_saved_state32(state));
334 regs = saved_state32(state);
1c79356b 335
0c530ab8
A
336 trapno = regs->eax;
337#if DEBUG_TRACE
338 kprintf("machdep_syscall(0x%08x) code=%d\n", regs, trapno);
339#endif
1c79356b 340
b0d623f7
A
341 DEBUG_KPRINT_SYSCALL_MDEP(
342 "machdep_syscall: trapno=%d\n", trapno);
343
0c530ab8 344 if (trapno < 0 || trapno >= machdep_call_count) {
2d21ac55 345 regs->eax = (unsigned int)kern_invalid(NULL);
1c79356b 346
91447636
A
347 thread_exception_return();
348 /* NOTREACHED */
349 }
0c530ab8
A
350 entry = &machdep_call_table[trapno];
351 nargs = entry->nargs;
91447636 352
0c530ab8 353 if (nargs != 0) {
2d21ac55
A
354 if (copyin((user_addr_t) regs->uesp + sizeof (int),
355 (char *) args, (nargs * sizeof (int)))) {
356 regs->eax = KERN_INVALID_ADDRESS;
6601e61a 357
0c530ab8
A
358 thread_exception_return();
359 /* NOTREACHED */
360 }
1c79356b 361 }
0c530ab8 362 switch (nargs) {
2d21ac55
A
363 case 0:
364 regs->eax = (*entry->routine.args_0)();
0c530ab8 365 break;
2d21ac55 366 case 1:
0c530ab8
A
367 regs->eax = (*entry->routine.args_1)(args[0]);
368 break;
2d21ac55
A
369 case 2:
370 regs->eax = (*entry->routine.args_2)(args[0],args[1]);
0c530ab8 371 break;
2d21ac55
A
372 case 3:
373 if (!entry->bsd_style)
374 regs->eax = (*entry->routine.args_3)(args[0],args[1],args[2]);
0c530ab8 375 else {
2d21ac55
A
376 int error;
377 uint32_t rval;
0c530ab8 378
2d21ac55 379 error = (*entry->routine.args_bsd_3)(&rval, args[0], args[1], args[2]);
0c530ab8 380 if (error) {
2d21ac55 381 regs->eax = error;
0c530ab8
A
382 regs->efl |= EFL_CF; /* carry bit */
383 } else {
2d21ac55
A
384 regs->eax = rval;
385 regs->efl &= ~EFL_CF;
0c530ab8
A
386 }
387 }
388 break;
2d21ac55 389 case 4:
0c530ab8
A
390 regs->eax = (*entry->routine.args_4)(args[0], args[1], args[2], args[3]);
391 break;
1c79356b 392
2d21ac55
A
393 default:
394 panic("machdep_syscall: too many args");
6601e61a 395 }
0c530ab8 396 if (current_thread()->funnel_lock)
2d21ac55 397 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
6601e61a 398
b0d623f7
A
399 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%u\n", regs->eax);
400
593a1d5f
A
401 throttle_lowpri_io(TRUE);
402
0c530ab8
A
403 thread_exception_return();
404 /* NOTREACHED */
1c79356b
A
405}
406
407
408void
0c530ab8 409machdep_syscall64(x86_saved_state_t *state)
1c79356b 410{
2d21ac55 411 int trapno;
0c530ab8
A
412 machdep_call_t *entry;
413 x86_saved_state64_t *regs;
1c79356b 414
0c530ab8
A
415 assert(is_saved_state64(state));
416 regs = saved_state64(state);
1c79356b 417
b0d623f7
A
418 trapno = (int)(regs->rax & SYSCALL_NUMBER_MASK);
419
420 DEBUG_KPRINT_SYSCALL_MDEP(
421 "machdep_syscall64: trapno=%d\n", trapno);
1c79356b 422
0c530ab8 423 if (trapno < 0 || trapno >= machdep_call_count) {
2d21ac55 424 regs->rax = (unsigned int)kern_invalid(NULL);
1c79356b 425
0c530ab8
A
426 thread_exception_return();
427 /* NOTREACHED */
1c79356b 428 }
0c530ab8 429 entry = &machdep_call_table64[trapno];
1c79356b 430
0c530ab8 431 switch (entry->nargs) {
2d21ac55
A
432 case 0:
433 regs->rax = (*entry->routine.args_0)();
6601e61a 434 break;
2d21ac55 435 case 1:
0c530ab8 436 regs->rax = (*entry->routine.args64_1)(regs->rdi);
55e303ae 437 break;
2d21ac55
A
438 default:
439 panic("machdep_syscall64: too many args");
55e303ae 440 }
0c530ab8 441 if (current_thread()->funnel_lock)
2d21ac55 442 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
6601e61a 443
b0d623f7
A
444 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%llu\n", regs->rax);
445
593a1d5f
A
446 throttle_lowpri_io(TRUE);
447
0c530ab8
A
448 thread_exception_return();
449 /* NOTREACHED */
1c79356b
A
450}
451
b0d623f7
A
452/*
453 * thread_fast_set_cthread_self: Sets the machine kernel thread ID of the
454 * current thread to the given thread ID; fast version for 32-bit processes
455 *
456 * Parameters: self Thread ID to set
457 *
458 * Returns: 0 Success
459 * !0 Not success
460 */
55e303ae 461kern_return_t
91447636 462thread_fast_set_cthread_self(uint32_t self)
55e303ae 463{
b0d623f7
A
464 thread_t thread = current_thread();
465 pcb_t pcb = thread->machine.pcb;
466 struct real_descriptor desc = {
467 .limit_low = 1,
468 .limit_high = 0,
469 .base_low = self & 0xffff,
470 .base_med = (self >> 16) & 0xff,
471 .base_high = (self >> 24) & 0xff,
472 .access = ACC_P|ACC_PL_U|ACC_DATA_W,
473 .granularity = SZ_32|SZ_G,
474 };
475
476 current_thread()->machine.pcb->cthread_self = (uint64_t) self; /* preserve old func too */
477
478 /* assign descriptor */
479 mp_disable_preemption();
480 pcb->cthread_desc = desc;
481 *ldt_desc_p(USER_CTHREAD) = desc;
482 saved_state32(pcb->iss)->gs = USER_CTHREAD;
483 mp_enable_preemption();
0c530ab8
A
484
485 return (USER_CTHREAD);
486}
487
b0d623f7
A
488/*
489 * thread_fast_set_cthread_self64: Sets the machine kernel thread ID of the
490 * current thread to the given thread ID; fast version for 64-bit processes
491 *
492 * Parameters: self Thread ID
493 *
494 * Returns: 0 Success
495 * !0 Not success
496 */
0c530ab8
A
497kern_return_t
498thread_fast_set_cthread_self64(uint64_t self)
499{
b0d623f7 500 pcb_t pcb = current_thread()->machine.pcb;
0b4c1975 501 cpu_data_t *cdp;
0c530ab8
A
502
503 /* check for canonical address, set 0 otherwise */
504 if (!IS_USERADDR64_CANONICAL(self))
505 self = 0ULL;
b0d623f7 506
0c530ab8 507 pcb->cthread_self = self;
b0d623f7 508 mp_disable_preemption();
0b4c1975 509 cdp = current_cpu_datap();
b0d623f7 510#if defined(__x86_64__)
0b4c1975
A
511 if ((cdp->cpu_uber.cu_user_gs_base != pcb->cthread_self) ||
512 (pcb->cthread_self != rdmsr64(MSR_IA32_KERNEL_GS_BASE)))
b0d623f7
A
513 wrmsr64(MSR_IA32_KERNEL_GS_BASE, self);
514#endif
0b4c1975 515 cdp->cpu_uber.cu_user_gs_base = self;
b0d623f7 516 mp_enable_preemption();
0c530ab8 517 return (USER_CTHREAD);
55e303ae
A
518}
519
91447636
A
520/*
521 * thread_set_user_ldt routine is the interface for the user level
522 * settable ldt entry feature. allowing a user to create arbitrary
523 * ldt entries seems to be too large of a security hole, so instead
524 * this mechanism is in place to allow user level processes to have
525 * an ldt entry that can be used in conjunction with the FS register.
526 *
527 * Swapping occurs inside the pcb.c file along with initialization
528 * when a thread is created. The basic functioning theory is that the
529 * pcb->uldt_selector variable will contain either 0 meaning the
530 * process has not set up any entry, or the selector to be used in
531 * the FS register. pcb->uldt_desc contains the actual descriptor the
532 * user has set up stored in machine usable ldt format.
533 *
534 * Currently one entry is shared by all threads (USER_SETTABLE), but
535 * this could be changed in the future by changing how this routine
536 * allocates the selector. There seems to be no real reason at this
537 * time to have this added feature, but in the future it might be
538 * needed.
539 *
540 * address is the linear address of the start of the data area size
541 * is the size in bytes of the area flags should always be set to 0
542 * for now. in the future it could be used to set R/W permisions or
543 * other functions. Currently the segment is created as a data segment
544 * up to 1 megabyte in size with full read/write permisions only.
545 *
546 * this call returns the segment selector or -1 if any error occurs
547 */
548kern_return_t
549thread_set_user_ldt(uint32_t address, uint32_t size, uint32_t flags)
550{
2d21ac55
A
551 pcb_t pcb;
552 struct fake_descriptor temp;
553 int mycpu;
554
555 if (flags != 0)
556 return -1; // flags not supported
557 if (size > 0xFFFFF)
558 return -1; // size too big, 1 meg is the limit
559
560 mp_disable_preemption();
561 mycpu = cpu_number();
562
563 // create a "fake" descriptor so we can use fix_desc()
564 // to build a real one...
565 // 32 bit default operation size
566 // standard read/write perms for a data segment
567 pcb = (pcb_t)current_thread()->machine.pcb;
568 temp.offset = address;
569 temp.lim_or_seg = size;
570 temp.size_or_wdct = SZ_32;
571 temp.access = ACC_P|ACC_PL_U|ACC_DATA_W;
91447636 572
2d21ac55
A
573 // turn this into a real descriptor
574 fix_desc(&temp,1);
91447636 575
2d21ac55
A
576 // set up our data in the pcb
577 pcb->uldt_desc = *(struct real_descriptor*)&temp;
578 pcb->uldt_selector = USER_SETTABLE; // set the selector value
91447636 579
2d21ac55
A
580 // now set it up in the current table...
581 *ldt_desc_p(USER_SETTABLE) = *(struct real_descriptor*)&temp;
91447636 582
2d21ac55 583 mp_enable_preemption();
91447636 584
2d21ac55 585 return USER_SETTABLE;
91447636 586}
89b3af67 587
0c530ab8 588#endif /* MACH_BSD */
4452a7af 589
21362eb3 590
0c530ab8 591typedef kern_return_t (*mach_call_t)(void *);
4452a7af 592
0c530ab8
A
593struct mach_call_args {
594 syscall_arg_t arg1;
595 syscall_arg_t arg2;
596 syscall_arg_t arg3;
597 syscall_arg_t arg4;
598 syscall_arg_t arg5;
599 syscall_arg_t arg6;
600 syscall_arg_t arg7;
601 syscall_arg_t arg8;
602 syscall_arg_t arg9;
603};
4452a7af 604
0c530ab8
A
605static kern_return_t
606mach_call_arg_munger32(uint32_t sp, int nargs, int call_number, struct mach_call_args *args);
4452a7af 607
6601e61a 608
0c530ab8
A
609static kern_return_t
610mach_call_arg_munger32(uint32_t sp, int nargs, int call_number, struct mach_call_args *args)
611{
612 unsigned int args32[9];
4452a7af 613
0c530ab8 614 if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args32, nargs * sizeof (int)))
2d21ac55 615 return KERN_INVALID_ARGUMENT;
4452a7af 616
0c530ab8 617 switch (nargs) {
2d21ac55
A
618 case 9: args->arg9 = args32[8];
619 case 8: args->arg8 = args32[7];
620 case 7: args->arg7 = args32[6];
621 case 6: args->arg6 = args32[5];
622 case 5: args->arg5 = args32[4];
623 case 4: args->arg4 = args32[3];
624 case 3: args->arg3 = args32[2];
625 case 2: args->arg2 = args32[1];
626 case 1: args->arg1 = args32[0];
0c530ab8
A
627 }
628 if (call_number == 90) {
2d21ac55
A
629 /* munge_l for mach_wait_until_trap() */
630 args->arg1 = (((uint64_t)(args32[0])) | ((((uint64_t)(args32[1]))<<32)));
0c530ab8
A
631 }
632 if (call_number == 93) {
2d21ac55
A
633 /* munge_wl for mk_timer_arm_trap() */
634 args->arg2 = (((uint64_t)(args32[1])) | ((((uint64_t)(args32[2]))<<32)));
0c530ab8 635 }
4452a7af 636
0c530ab8 637 return KERN_SUCCESS;
6601e61a 638}
4452a7af
A
639
640
2d21ac55 641__private_extern__ void mach_call_munger(x86_saved_state_t *state);
0c530ab8 642
b0d623f7
A
643extern const char *mach_syscall_name_table[];
644
0c530ab8
A
645void
646mach_call_munger(x86_saved_state_t *state)
4452a7af 647{
4452a7af 648 int argc;
0c530ab8 649 int call_number;
4452a7af 650 mach_call_t mach_call;
6601e61a
A
651 kern_return_t retval;
652 struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
0c530ab8
A
653 x86_saved_state32_t *regs;
654
655 assert(is_saved_state32(state));
656 regs = saved_state32(state);
657
658 call_number = -(regs->eax);
b0d623f7
A
659
660 DEBUG_KPRINT_SYSCALL_MACH(
661 "mach_call_munger: code=%d(%s)\n",
662 call_number, mach_syscall_name_table[call_number]);
0c530ab8
A
663#if DEBUG_TRACE
664 kprintf("mach_call_munger(0x%08x) code=%d\n", regs, call_number);
665#endif
666
667 if (call_number < 0 || call_number >= mach_trap_count) {
2d21ac55 668 i386_exception(EXC_SYSCALL, call_number, 1);
0c530ab8
A
669 /* NOTREACHED */
670 }
671 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
2d21ac55 672
0c530ab8 673 if (mach_call == (mach_call_t)kern_invalid) {
b0d623f7
A
674 DEBUG_KPRINT_SYSCALL_MACH(
675 "mach_call_munger: kern_invalid 0x%x\n", regs->eax);
2d21ac55 676 i386_exception(EXC_SYSCALL, call_number, 1);
0c530ab8
A
677 /* NOTREACHED */
678 }
0c530ab8 679
2d21ac55 680 argc = mach_trap_table[call_number].mach_trap_arg_count;
0c530ab8 681 if (argc) {
2d21ac55 682 retval = mach_call_arg_munger32(regs->uesp, argc, call_number, &args);
0c530ab8 683 if (retval != KERN_SUCCESS) {
2d21ac55
A
684 regs->eax = retval;
685
b0d623f7
A
686 DEBUG_KPRINT_SYSCALL_MACH(
687 "mach_call_munger: retval=0x%x\n", retval);
688
0c530ab8
A
689 thread_exception_return();
690 /* NOTREACHED */
691 }
4452a7af 692 }
b0d623f7
A
693
694#ifdef MACH_BSD
695 mach_kauth_cred_uthread_update();
696#endif
0c530ab8 697 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
b0d623f7 698 args.arg1, args.arg2, args.arg3, args.arg4, 0);
2d21ac55 699
0c530ab8
A
700 retval = mach_call(&args);
701
b0d623f7
A
702 DEBUG_KPRINT_SYSCALL_MACH("mach_call_munger: retval=0x%x\n", retval);
703
0c530ab8 704 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END,
2d21ac55 705 retval, 0, 0, 0, 0);
0c530ab8 706 regs->eax = retval;
2d21ac55 707
593a1d5f
A
708 throttle_lowpri_io(TRUE);
709
0c530ab8
A
710 thread_exception_return();
711 /* NOTREACHED */
712}
713
714
2d21ac55 715__private_extern__ void mach_call_munger64(x86_saved_state_t *regs);
0c530ab8 716
0c530ab8
A
717void
718mach_call_munger64(x86_saved_state_t *state)
719{
720 int call_number;
721 int argc;
722 mach_call_t mach_call;
723 x86_saved_state64_t *regs;
724
725 assert(is_saved_state64(state));
726 regs = saved_state64(state);
727
b0d623f7
A
728 call_number = (int)(regs->rax & SYSCALL_NUMBER_MASK);
729
730 DEBUG_KPRINT_SYSCALL_MACH(
731 "mach_call_munger64: code=%d(%s)\n",
732 call_number, mach_syscall_name_table[call_number]);
0c530ab8 733
2d21ac55
A
734 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,
735 (call_number)) | DBG_FUNC_START,
b0d623f7
A
736 regs->rdi, regs->rsi,
737 regs->rdx, regs->r10, 0);
6601e61a 738
0c530ab8
A
739 if (call_number < 0 || call_number >= mach_trap_count) {
740 i386_exception(EXC_SYSCALL, regs->rax, 1);
741 /* NOTREACHED */
742 }
6601e61a 743 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
6601e61a 744
0c530ab8
A
745 if (mach_call == (mach_call_t)kern_invalid) {
746 i386_exception(EXC_SYSCALL, regs->rax, 1);
747 /* NOTREACHED */
748 }
749 argc = mach_trap_table[call_number].mach_trap_arg_count;
750
751 if (argc > 6) {
752 int copyin_count;
753
b0d623f7 754 copyin_count = (argc - 6) * (int)sizeof(uint64_t);
0c530ab8
A
755
756 if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&regs->v_arg6, copyin_count)) {
757 regs->rax = KERN_INVALID_ARGUMENT;
758
759 thread_exception_return();
760 /* NOTREACHED */
761 }
762 }
b0d623f7
A
763
764#ifdef MACH_BSD
765 mach_kauth_cred_uthread_update();
766#endif
767
0c530ab8
A
768 regs->rax = (uint64_t)mach_call((void *)(&regs->rdi));
769
b0d623f7
A
770 DEBUG_KPRINT_SYSCALL_MACH( "mach_call_munger64: retval=0x%llx\n", regs->rax);
771
2d21ac55
A
772 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,
773 (call_number)) | DBG_FUNC_END,
b0d623f7 774 regs->rax, 0, 0, 0, 0);
91447636 775
593a1d5f
A
776 throttle_lowpri_io(TRUE);
777
0c530ab8
A
778 thread_exception_return();
779 /* NOTREACHED */
91447636
A
780}
781
0c530ab8 782
91447636
A
783/*
784 * thread_setuserstack:
785 *
786 * Sets the user stack pointer into the machine
787 * dependent thread state info.
788 */
789void
790thread_setuserstack(
791 thread_t thread,
792 mach_vm_address_t user_stack)
793{
2d21ac55
A
794 if (thread_is_64bit(thread)) {
795 x86_saved_state64_t *iss64;
0c530ab8
A
796
797 iss64 = USER_REGS64(thread);
5d5c5d0d 798
0c530ab8
A
799 iss64->isf.rsp = (uint64_t)user_stack;
800 } else {
2d21ac55
A
801 x86_saved_state32_t *iss32;
802
0c530ab8
A
803 iss32 = USER_REGS32(thread);
804
b0d623f7 805 iss32->uesp = CAST_DOWN_EXPLICIT(unsigned int, user_stack);
0c530ab8 806 }
91447636
A
807}
808
809/*
810 * thread_adjuserstack:
811 *
812 * Returns the adjusted user stack pointer from the machine
813 * dependent thread state info. Used for small (<2G) deltas.
814 */
815uint64_t
816thread_adjuserstack(
817 thread_t thread,
818 int adjust)
819{
2d21ac55
A
820 if (thread_is_64bit(thread)) {
821 x86_saved_state64_t *iss64;
5d5c5d0d 822
0c530ab8
A
823 iss64 = USER_REGS64(thread);
824
825 iss64->isf.rsp += adjust;
826
827 return iss64->isf.rsp;
828 } else {
2d21ac55
A
829 x86_saved_state32_t *iss32;
830
0c530ab8
A
831 iss32 = USER_REGS32(thread);
832
833 iss32->uesp += adjust;
834
835 return CAST_USER_ADDR_T(iss32->uesp);
836 }
91447636
A
837}
838
839/*
840 * thread_setentrypoint:
841 *
842 * Sets the user PC into the machine
843 * dependent thread state info.
844 */
845void
0c530ab8 846thread_setentrypoint(thread_t thread, mach_vm_address_t entry)
4452a7af 847{
2d21ac55
A
848 if (thread_is_64bit(thread)) {
849 x86_saved_state64_t *iss64;
4452a7af 850
0c530ab8
A
851 iss64 = USER_REGS64(thread);
852
853 iss64->isf.rip = (uint64_t)entry;
854 } else {
2d21ac55
A
855 x86_saved_state32_t *iss32;
856
0c530ab8
A
857 iss32 = USER_REGS32(thread);
858
b0d623f7 859 iss32->eip = CAST_DOWN_EXPLICIT(unsigned int, entry);
0c530ab8
A
860 }
861}
862
863
2d21ac55 864kern_return_t
0c530ab8
A
865thread_setsinglestep(thread_t thread, int on)
866{
2d21ac55
A
867 if (thread_is_64bit(thread)) {
868 x86_saved_state64_t *iss64;
0c530ab8
A
869
870 iss64 = USER_REGS64(thread);
871
872 if (on)
2d21ac55 873 iss64->isf.rflags |= EFL_TF;
0c530ab8 874 else
2d21ac55 875 iss64->isf.rflags &= ~EFL_TF;
0c530ab8 876 } else {
2d21ac55
A
877 x86_saved_state32_t *iss32;
878
0c530ab8
A
879 iss32 = USER_REGS32(thread);
880
b0d623f7 881 if (on) {
2d21ac55 882 iss32->efl |= EFL_TF;
b0d623f7
A
883 /* Ensure IRET */
884 if (iss32->cs == SYSENTER_CS)
885 iss32->cs = SYSENTER_TF_CS;
886 }
0c530ab8 887 else
2d21ac55 888 iss32->efl &= ~EFL_TF;
0c530ab8 889 }
2d21ac55
A
890
891 return (KERN_SUCCESS);
0c530ab8
A
892}
893
894
895
896/* XXX this should be a struct savearea so that CHUD will work better on x86 */
897void *
2d21ac55 898find_user_regs(thread_t thread)
0c530ab8
A
899{
900 return USER_STATE(thread);
901}
91447636 902
2d21ac55
A
903void *
904get_user_regs(thread_t th)
905{
906 if (th->machine.pcb)
907 return(USER_STATE(th));
908 else {
909 printf("[get_user_regs: thread does not have pcb]");
910 return NULL;
911 }
912}
913
914#if CONFIG_DTRACE
915/*
916 * DTrace would like to have a peek at the kernel interrupt state, if available.
917 * Based on osfmk/chud/i386/chud_thread_i386.c:chudxnu_thread_get_state(), which see.
918 */
b0d623f7 919x86_saved_state_t *find_kern_regs(thread_t);
2d21ac55 920
b0d623f7 921x86_saved_state_t *
2d21ac55
A
922find_kern_regs(thread_t thread)
923{
924 if (thread == current_thread() &&
925 NULL != current_cpu_datap()->cpu_int_state &&
926 !(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
927 current_cpu_datap()->cpu_interrupt_level == 1)) {
928
b0d623f7 929 return current_cpu_datap()->cpu_int_state;
2d21ac55
A
930 } else {
931 return NULL;
932 }
933}
934
935vm_offset_t dtrace_get_cpu_int_stack_top(void);
936
937vm_offset_t
938dtrace_get_cpu_int_stack_top(void)
939{
940 return current_cpu_datap()->cpu_int_stack_top;
941}
942#endif