]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/bsd_i386.c
xnu-1228.0.2.tar.gz
[apple/xnu.git] / osfmk / i386 / bsd_i386.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28#ifdef MACH_BSD
1c79356b
A
29#include <mach_rt.h>
30#include <mach_debug.h>
31#include <mach_ldebug.h>
32
33#include <mach/kern_return.h>
91447636 34#include <mach/mach_traps.h>
1c79356b
A
35#include <mach/thread_status.h>
36#include <mach/vm_param.h>
1c79356b
A
37
38#include <kern/counters.h>
39#include <kern/cpu_data.h>
40#include <kern/mach_param.h>
41#include <kern/task.h>
42#include <kern/thread.h>
1c79356b
A
43#include <kern/sched_prim.h>
44#include <kern/misc_protos.h>
45#include <kern/assert.h>
46#include <kern/spl.h>
55e303ae 47#include <kern/syscall_sw.h>
1c79356b
A
48#include <ipc/ipc_port.h>
49#include <vm/vm_kern.h>
50#include <vm/pmap.h>
51
91447636
A
52#include <i386/cpu_data.h>
53#include <i386/cpu_number.h>
1c79356b
A
54#include <i386/thread.h>
55#include <i386/eflags.h>
56#include <i386/proc_reg.h>
57#include <i386/seg.h>
58#include <i386/tss.h>
59#include <i386/user_ldt.h>
60#include <i386/fpu.h>
1c79356b 61#include <i386/machdep_call.h>
91447636
A
62#include <i386/misc_protos.h>
63#include <i386/cpu_data.h>
64#include <i386/cpu_number.h>
65#include <i386/mp_desc.h>
66#include <i386/vmparam.h>
0c530ab8
A
67#include <i386/trap.h>
68#include <mach/i386/syscall_sw.h>
9bccf70c 69#include <sys/syscall.h>
91447636 70#include <sys/kdebug.h>
0c530ab8 71#include <sys/errno.h>
91447636
A
72#include <../bsd/sys/sysent.h>
73
1c79356b
A
74kern_return_t
75thread_userstack(
76 thread_t,
77 int,
78 thread_state_t,
79 unsigned int,
91447636 80 mach_vm_offset_t *,
2d21ac55 81 int *
1c79356b
A
82);
83
84kern_return_t
85thread_entrypoint(
86 thread_t,
87 int,
88 thread_state_t,
89 unsigned int,
91447636 90 mach_vm_offset_t *
1c79356b
A
91);
92
0c530ab8
A
93void * find_user_regs(thread_t);
94
1c79356b
A
95unsigned int get_msr_exportmask(void);
96
97unsigned int get_msr_nbits(void);
98
99unsigned int get_msr_rbits(void);
100
55e303ae
A
101kern_return_t
102thread_compose_cthread_desc(unsigned int addr, pcb_t pcb);
103
91447636
A
104void IOSleep(int);
105
2d21ac55
A
106void thread_set_cthreadself(thread_t thread, uint64_t pself, int isLP64);
107
1c79356b
A
108/*
109 * thread_userstack:
110 *
111 * Return the user stack pointer from the machine
112 * dependent thread state info.
113 */
114kern_return_t
115thread_userstack(
2d21ac55
A
116 __unused thread_t thread,
117 int flavor,
118 thread_state_t tstate,
119 __unused unsigned int count,
120 user_addr_t *user_stack,
121 int *customstack
122)
1c79356b 123{
2d21ac55
A
124 if (customstack)
125 *customstack = 0;
4452a7af 126
2d21ac55
A
127 switch (flavor) {
128 case x86_THREAD_STATE32:
129 {
130 x86_thread_state32_t *state25;
131
132 state25 = (x86_thread_state32_t *) tstate;
133
134 if (state25->esp)
135 *user_stack = state25->esp;
136 else
137 *user_stack = VM_USRSTACK32;
138 if (customstack && state25->esp)
139 *customstack = 1;
140 else
141 *customstack = 0;
142 break;
143 }
0c530ab8 144
2d21ac55
A
145 case x86_THREAD_STATE64:
146 {
147 x86_thread_state64_t *state25;
148
149 state25 = (x86_thread_state64_t *) tstate;
150
151 if (state25->rsp)
152 *user_stack = state25->rsp;
153 else
154 *user_stack = VM_USRSTACK64;
155 if (customstack && state25->rsp)
156 *customstack = 1;
157 else
158 *customstack = 0;
159 break;
160 }
1c79356b 161
2d21ac55
A
162 default:
163 return (KERN_INVALID_ARGUMENT);
164 }
0c530ab8 165
2d21ac55
A
166 return (KERN_SUCCESS);
167}
1c79356b 168
0c530ab8 169
1c79356b
A
170kern_return_t
171thread_entrypoint(
2d21ac55
A
172 __unused thread_t thread,
173 int flavor,
174 thread_state_t tstate,
175 __unused unsigned int count,
176 mach_vm_offset_t *entry_point
177)
1c79356b 178{
2d21ac55 179 /*
0c530ab8
A
180 * Set a default.
181 */
2d21ac55
A
182 if (*entry_point == 0)
183 *entry_point = VM_MIN_ADDRESS;
184
0c530ab8 185 switch (flavor) {
2d21ac55
A
186 case x86_THREAD_STATE32:
187 {
188 x86_thread_state32_t *state25;
0c530ab8 189
2d21ac55
A
190 state25 = (i386_thread_state_t *) tstate;
191 *entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS;
192 break;
193 }
6601e61a 194
2d21ac55
A
195 case x86_THREAD_STATE64:
196 {
197 x86_thread_state64_t *state25;
0c530ab8 198
2d21ac55
A
199 state25 = (x86_thread_state64_t *) tstate;
200 *entry_point = state25->rip ? state25->rip: VM_MIN_ADDRESS64;
201 break;
202 }
203 }
204 return (KERN_SUCCESS);
205}
1c79356b 206
1c79356b
A
207
208/*
209 * Duplicate parent state in child
210 * for U**X fork.
211 */
55e303ae
A
212kern_return_t
213machine_thread_dup(
91447636
A
214 thread_t parent,
215 thread_t child
1c79356b
A
216)
217{
0c530ab8
A
218
219 pcb_t parent_pcb;
220 pcb_t child_pcb;
6601e61a 221
0c530ab8
A
222 if ((child_pcb = child->machine.pcb) == NULL ||
223 (parent_pcb = parent->machine.pcb) == NULL)
55e303ae 224 return (KERN_FAILURE);
0c530ab8 225 /*
2d21ac55 226 * Copy over the x86_saved_state registers
0c530ab8
A
227 */
228 if (cpu_mode_is64bit()) {
2d21ac55
A
229 if (thread_is_64bit(parent))
230 bcopy(USER_REGS64(parent), USER_REGS64(child), sizeof(x86_saved_state64_t));
0c530ab8 231 else
2d21ac55 232 bcopy(USER_REGS32(parent), USER_REGS32(child), sizeof(x86_saved_state_compat32_t));
0c530ab8 233 } else
2d21ac55 234 bcopy(USER_REGS32(parent), USER_REGS32(child), sizeof(x86_saved_state32_t));
1c79356b 235
0c530ab8
A
236 /*
237 * Check to see if parent is using floating point
6601e61a 238 * and if so, copy the registers to the child
4452a7af 239 */
2d21ac55 240 fpu_dup_fxstate(parent, child);
4452a7af 241
0c530ab8
A
242#ifdef MACH_BSD
243 /*
244 * Copy the parent's cthread id and USER_CTHREAD descriptor, if 32-bit.
245 */
246 child_pcb->cthread_self = parent_pcb->cthread_self;
247 if (!thread_is_64bit(parent))
248 child_pcb->cthread_desc = parent_pcb->cthread_desc;
249
250 /*
251 * FIXME - should a user specified LDT, TSS and V86 info
1c79356b
A
252 * be duplicated as well?? - probably not.
253 */
2d21ac55 254 // duplicate any use LDT entry that was set I think this is appropriate.
0c530ab8
A
255 if (parent_pcb->uldt_selector!= 0) {
256 child_pcb->uldt_selector = parent_pcb->uldt_selector;
257 child_pcb->uldt_desc = parent_pcb->uldt_desc;
2d21ac55 258 }
91447636 259#endif
2d21ac55 260
55e303ae 261 return (KERN_SUCCESS);
1c79356b
A
262}
263
264/*
265 * FIXME - thread_set_child
266 */
267
91447636 268void thread_set_child(thread_t child, int pid);
1c79356b 269void
91447636 270thread_set_child(thread_t child, int pid)
1c79356b 271{
2d21ac55
A
272 if (thread_is_64bit(child)) {
273 x86_saved_state64_t *iss64;
0c530ab8 274
0c530ab8
A
275 iss64 = USER_REGS64(child);
276
277 iss64->rax = pid;
278 iss64->rdx = 1;
279 iss64->isf.rflags &= ~EFL_CF;
280 } else {
2d21ac55
A
281 x86_saved_state32_t *iss32;
282
0c530ab8
A
283 iss32 = USER_REGS32(child);
284
285 iss32->eax = pid;
286 iss32->edx = 1;
287 iss32->efl &= ~EFL_CF;
288 }
1c79356b 289}
0c530ab8
A
290
291
91447636 292void thread_set_parent(thread_t parent, int pid);
2d21ac55 293
0b4e3aa0 294void
91447636 295thread_set_parent(thread_t parent, int pid)
0b4e3aa0 296{
2d21ac55
A
297 if (thread_is_64bit(parent)) {
298 x86_saved_state64_t *iss64;
0c530ab8 299
0c530ab8
A
300 iss64 = USER_REGS64(parent);
301
302 iss64->rax = pid;
303 iss64->rdx = 0;
304 iss64->isf.rflags &= ~EFL_CF;
305 } else {
2d21ac55
A
306 x86_saved_state32_t *iss32;
307
0c530ab8
A
308 iss32 = USER_REGS32(parent);
309
310 iss32->eax = pid;
311 iss32->edx = 0;
312 iss32->efl &= ~EFL_CF;
313 }
0b4e3aa0 314}
1c79356b
A
315
316
1c79356b
A
317/*
318 * System Call handling code
319 */
320
91447636
A
321extern long fuword(vm_offset_t);
322
1c79356b 323
1c79356b
A
324
325void
0c530ab8 326machdep_syscall(x86_saved_state_t *state)
8f6c56a5 327{
0c530ab8 328 int args[machdep_call_count];
2d21ac55 329 int trapno;
0c530ab8
A
330 int nargs;
331 machdep_call_t *entry;
332 x86_saved_state32_t *regs;
333
334 assert(is_saved_state32(state));
335 regs = saved_state32(state);
1c79356b 336
0c530ab8
A
337 trapno = regs->eax;
338#if DEBUG_TRACE
339 kprintf("machdep_syscall(0x%08x) code=%d\n", regs, trapno);
340#endif
1c79356b 341
0c530ab8 342 if (trapno < 0 || trapno >= machdep_call_count) {
2d21ac55 343 regs->eax = (unsigned int)kern_invalid(NULL);
1c79356b 344
91447636
A
345 thread_exception_return();
346 /* NOTREACHED */
347 }
0c530ab8
A
348 entry = &machdep_call_table[trapno];
349 nargs = entry->nargs;
91447636 350
0c530ab8 351 if (nargs != 0) {
2d21ac55
A
352 if (copyin((user_addr_t) regs->uesp + sizeof (int),
353 (char *) args, (nargs * sizeof (int)))) {
354 regs->eax = KERN_INVALID_ADDRESS;
6601e61a 355
0c530ab8
A
356 thread_exception_return();
357 /* NOTREACHED */
358 }
1c79356b 359 }
0c530ab8 360 switch (nargs) {
2d21ac55
A
361 case 0:
362 regs->eax = (*entry->routine.args_0)();
0c530ab8 363 break;
2d21ac55 364 case 1:
0c530ab8
A
365 regs->eax = (*entry->routine.args_1)(args[0]);
366 break;
2d21ac55
A
367 case 2:
368 regs->eax = (*entry->routine.args_2)(args[0],args[1]);
0c530ab8 369 break;
2d21ac55
A
370 case 3:
371 if (!entry->bsd_style)
372 regs->eax = (*entry->routine.args_3)(args[0],args[1],args[2]);
0c530ab8 373 else {
2d21ac55
A
374 int error;
375 uint32_t rval;
0c530ab8 376
2d21ac55 377 error = (*entry->routine.args_bsd_3)(&rval, args[0], args[1], args[2]);
0c530ab8 378 if (error) {
2d21ac55 379 regs->eax = error;
0c530ab8
A
380 regs->efl |= EFL_CF; /* carry bit */
381 } else {
2d21ac55
A
382 regs->eax = rval;
383 regs->efl &= ~EFL_CF;
0c530ab8
A
384 }
385 }
386 break;
2d21ac55 387 case 4:
0c530ab8
A
388 regs->eax = (*entry->routine.args_4)(args[0], args[1], args[2], args[3]);
389 break;
1c79356b 390
2d21ac55
A
391 default:
392 panic("machdep_syscall: too many args");
6601e61a 393 }
0c530ab8 394 if (current_thread()->funnel_lock)
2d21ac55 395 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
6601e61a 396
0c530ab8
A
397 thread_exception_return();
398 /* NOTREACHED */
1c79356b
A
399}
400
401
402void
0c530ab8 403machdep_syscall64(x86_saved_state_t *state)
1c79356b 404{
2d21ac55 405 int trapno;
0c530ab8
A
406 machdep_call_t *entry;
407 x86_saved_state64_t *regs;
1c79356b 408
0c530ab8
A
409 assert(is_saved_state64(state));
410 regs = saved_state64(state);
1c79356b 411
0c530ab8 412 trapno = regs->rax & SYSCALL_NUMBER_MASK;
1c79356b 413
0c530ab8 414 if (trapno < 0 || trapno >= machdep_call_count) {
2d21ac55 415 regs->rax = (unsigned int)kern_invalid(NULL);
1c79356b 416
0c530ab8
A
417 thread_exception_return();
418 /* NOTREACHED */
1c79356b 419 }
0c530ab8 420 entry = &machdep_call_table64[trapno];
1c79356b 421
0c530ab8 422 switch (entry->nargs) {
2d21ac55
A
423 case 0:
424 regs->rax = (*entry->routine.args_0)();
6601e61a 425 break;
2d21ac55 426 case 1:
0c530ab8 427 regs->rax = (*entry->routine.args64_1)(regs->rdi);
55e303ae 428 break;
2d21ac55
A
429 default:
430 panic("machdep_syscall64: too many args");
55e303ae 431 }
0c530ab8 432 if (current_thread()->funnel_lock)
2d21ac55 433 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
6601e61a 434
0c530ab8
A
435 thread_exception_return();
436 /* NOTREACHED */
1c79356b
A
437}
438
439
55e303ae
A
440kern_return_t
441thread_compose_cthread_desc(unsigned int addr, pcb_t pcb)
442{
443 struct real_descriptor desc;
55e303ae 444
91447636
A
445 mp_disable_preemption();
446
55e303ae
A
447 desc.limit_low = 1;
448 desc.limit_high = 0;
449 desc.base_low = addr & 0xffff;
450 desc.base_med = (addr >> 16) & 0xff;
451 desc.base_high = (addr >> 24) & 0xff;
452 desc.access = ACC_P|ACC_PL_U|ACC_DATA_W;
453 desc.granularity = SZ_32|SZ_G;
454 pcb->cthread_desc = desc;
91447636
A
455 *ldt_desc_p(USER_CTHREAD) = desc;
456
457 mp_enable_preemption();
458
55e303ae
A
459 return(KERN_SUCCESS);
460}
461
1c79356b 462kern_return_t
91447636 463thread_set_cthread_self(uint32_t self)
1c79356b 464{
2d21ac55
A
465 current_thread()->machine.pcb->cthread_self = (uint64_t) self;
466
467 return (KERN_SUCCESS);
1c79356b
A
468}
469
470kern_return_t
471thread_get_cthread_self(void)
472{
2d21ac55 473 return ((kern_return_t)current_thread()->machine.pcb->cthread_self);
1c79356b
A
474}
475
55e303ae 476kern_return_t
91447636 477thread_fast_set_cthread_self(uint32_t self)
55e303ae 478{
0c530ab8
A
479 pcb_t pcb;
480 x86_saved_state32_t *iss;
481
482 pcb = (pcb_t)current_thread()->machine.pcb;
483 thread_compose_cthread_desc(self, pcb);
484 pcb->cthread_self = (uint64_t) self; /* preserve old func too */
485 iss = saved_state32(pcb->iss);
486 iss->gs = USER_CTHREAD;
487
488 return (USER_CTHREAD);
489}
490
2d21ac55
A
491void
492thread_set_cthreadself(thread_t thread, uint64_t pself, int isLP64)
493{
494 if (isLP64 == 0) {
495 pcb_t pcb;
496 x86_saved_state32_t *iss;
497
498 pcb = (pcb_t)thread->machine.pcb;
499 thread_compose_cthread_desc(pself, pcb);
500 pcb->cthread_self = (uint64_t) pself; /* preserve old func too */
501 iss = saved_state32(pcb->iss);
502 iss->gs = USER_CTHREAD;
503 } else {
504 pcb_t pcb;
505 x86_saved_state64_t *iss;
506
507 pcb = thread->machine.pcb;
508
509 /* check for canonical address, set 0 otherwise */
510 if (!IS_USERADDR64_CANONICAL(pself))
511 pself = 0ULL;
512 pcb->cthread_self = pself;
513
514 /* XXX for 64-in-32 */
515 iss = saved_state64(pcb->iss);
516 iss->gs = USER_CTHREAD;
517 thread_compose_cthread_desc((uint32_t) pself, pcb);
518 }
519}
520
521
0c530ab8
A
522kern_return_t
523thread_fast_set_cthread_self64(uint64_t self)
524{
525 pcb_t pcb;
526 x86_saved_state64_t *iss;
527
528 pcb = current_thread()->machine.pcb;
529
530 /* check for canonical address, set 0 otherwise */
531 if (!IS_USERADDR64_CANONICAL(self))
532 self = 0ULL;
533 pcb->cthread_self = self;
534 current_cpu_datap()->cpu_uber.cu_user_gs_base = self;
535
536 /* XXX for 64-in-32 */
537 iss = saved_state64(pcb->iss);
538 iss->gs = USER_CTHREAD;
539 thread_compose_cthread_desc((uint32_t) self, pcb);
540
541 return (USER_CTHREAD);
55e303ae
A
542}
543
91447636
A
544/*
545 * thread_set_user_ldt routine is the interface for the user level
546 * settable ldt entry feature. allowing a user to create arbitrary
547 * ldt entries seems to be too large of a security hole, so instead
548 * this mechanism is in place to allow user level processes to have
549 * an ldt entry that can be used in conjunction with the FS register.
550 *
551 * Swapping occurs inside the pcb.c file along with initialization
552 * when a thread is created. The basic functioning theory is that the
553 * pcb->uldt_selector variable will contain either 0 meaning the
554 * process has not set up any entry, or the selector to be used in
555 * the FS register. pcb->uldt_desc contains the actual descriptor the
556 * user has set up stored in machine usable ldt format.
557 *
558 * Currently one entry is shared by all threads (USER_SETTABLE), but
559 * this could be changed in the future by changing how this routine
560 * allocates the selector. There seems to be no real reason at this
561 * time to have this added feature, but in the future it might be
562 * needed.
563 *
564 * address is the linear address of the start of the data area size
565 * is the size in bytes of the area flags should always be set to 0
566 * for now. in the future it could be used to set R/W permisions or
567 * other functions. Currently the segment is created as a data segment
568 * up to 1 megabyte in size with full read/write permisions only.
569 *
570 * this call returns the segment selector or -1 if any error occurs
571 */
572kern_return_t
573thread_set_user_ldt(uint32_t address, uint32_t size, uint32_t flags)
574{
2d21ac55
A
575 pcb_t pcb;
576 struct fake_descriptor temp;
577 int mycpu;
578
579 if (flags != 0)
580 return -1; // flags not supported
581 if (size > 0xFFFFF)
582 return -1; // size too big, 1 meg is the limit
583
584 mp_disable_preemption();
585 mycpu = cpu_number();
586
587 // create a "fake" descriptor so we can use fix_desc()
588 // to build a real one...
589 // 32 bit default operation size
590 // standard read/write perms for a data segment
591 pcb = (pcb_t)current_thread()->machine.pcb;
592 temp.offset = address;
593 temp.lim_or_seg = size;
594 temp.size_or_wdct = SZ_32;
595 temp.access = ACC_P|ACC_PL_U|ACC_DATA_W;
91447636 596
2d21ac55
A
597 // turn this into a real descriptor
598 fix_desc(&temp,1);
91447636 599
2d21ac55
A
600 // set up our data in the pcb
601 pcb->uldt_desc = *(struct real_descriptor*)&temp;
602 pcb->uldt_selector = USER_SETTABLE; // set the selector value
91447636 603
2d21ac55
A
604 // now set it up in the current table...
605 *ldt_desc_p(USER_SETTABLE) = *(struct real_descriptor*)&temp;
91447636 606
2d21ac55 607 mp_enable_preemption();
91447636 608
2d21ac55 609 return USER_SETTABLE;
91447636 610}
89b3af67 611
0c530ab8 612#endif /* MACH_BSD */
4452a7af 613
21362eb3 614
0c530ab8 615typedef kern_return_t (*mach_call_t)(void *);
4452a7af 616
0c530ab8
A
617struct mach_call_args {
618 syscall_arg_t arg1;
619 syscall_arg_t arg2;
620 syscall_arg_t arg3;
621 syscall_arg_t arg4;
622 syscall_arg_t arg5;
623 syscall_arg_t arg6;
624 syscall_arg_t arg7;
625 syscall_arg_t arg8;
626 syscall_arg_t arg9;
627};
4452a7af 628
0c530ab8
A
629static kern_return_t
630mach_call_arg_munger32(uint32_t sp, int nargs, int call_number, struct mach_call_args *args);
4452a7af 631
6601e61a 632
0c530ab8
A
633static kern_return_t
634mach_call_arg_munger32(uint32_t sp, int nargs, int call_number, struct mach_call_args *args)
635{
636 unsigned int args32[9];
4452a7af 637
0c530ab8 638 if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args32, nargs * sizeof (int)))
2d21ac55 639 return KERN_INVALID_ARGUMENT;
4452a7af 640
0c530ab8 641 switch (nargs) {
2d21ac55
A
642 case 9: args->arg9 = args32[8];
643 case 8: args->arg8 = args32[7];
644 case 7: args->arg7 = args32[6];
645 case 6: args->arg6 = args32[5];
646 case 5: args->arg5 = args32[4];
647 case 4: args->arg4 = args32[3];
648 case 3: args->arg3 = args32[2];
649 case 2: args->arg2 = args32[1];
650 case 1: args->arg1 = args32[0];
0c530ab8
A
651 }
652 if (call_number == 90) {
2d21ac55
A
653 /* munge_l for mach_wait_until_trap() */
654 args->arg1 = (((uint64_t)(args32[0])) | ((((uint64_t)(args32[1]))<<32)));
0c530ab8
A
655 }
656 if (call_number == 93) {
2d21ac55
A
657 /* munge_wl for mk_timer_arm_trap() */
658 args->arg2 = (((uint64_t)(args32[1])) | ((((uint64_t)(args32[2]))<<32)));
0c530ab8 659 }
4452a7af 660
0c530ab8 661 return KERN_SUCCESS;
6601e61a 662}
4452a7af
A
663
664
2d21ac55 665__private_extern__ void mach_call_munger(x86_saved_state_t *state);
0c530ab8 666
0c530ab8
A
667void
668mach_call_munger(x86_saved_state_t *state)
4452a7af 669{
4452a7af 670 int argc;
0c530ab8 671 int call_number;
4452a7af 672 mach_call_t mach_call;
6601e61a
A
673 kern_return_t retval;
674 struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
0c530ab8
A
675 x86_saved_state32_t *regs;
676
677 assert(is_saved_state32(state));
678 regs = saved_state32(state);
679
680 call_number = -(regs->eax);
681#if DEBUG_TRACE
682 kprintf("mach_call_munger(0x%08x) code=%d\n", regs, call_number);
683#endif
684
685 if (call_number < 0 || call_number >= mach_trap_count) {
2d21ac55 686 i386_exception(EXC_SYSCALL, call_number, 1);
0c530ab8
A
687 /* NOTREACHED */
688 }
689 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
2d21ac55 690
0c530ab8 691 if (mach_call == (mach_call_t)kern_invalid) {
2d21ac55 692 i386_exception(EXC_SYSCALL, call_number, 1);
0c530ab8
A
693 /* NOTREACHED */
694 }
0c530ab8 695
2d21ac55 696 argc = mach_trap_table[call_number].mach_trap_arg_count;
0c530ab8 697 if (argc) {
2d21ac55 698 retval = mach_call_arg_munger32(regs->uesp, argc, call_number, &args);
0c530ab8 699 if (retval != KERN_SUCCESS) {
2d21ac55
A
700 regs->eax = retval;
701
0c530ab8
A
702 thread_exception_return();
703 /* NOTREACHED */
704 }
4452a7af 705 }
0c530ab8 706 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
2d21ac55
A
707 (int) args.arg1, (int) args.arg2, (int) args.arg3, (int) args.arg4, 0);
708
0c530ab8
A
709 retval = mach_call(&args);
710
711 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END,
2d21ac55 712 retval, 0, 0, 0, 0);
0c530ab8 713 regs->eax = retval;
2d21ac55 714
0c530ab8
A
715 thread_exception_return();
716 /* NOTREACHED */
717}
718
719
2d21ac55 720__private_extern__ void mach_call_munger64(x86_saved_state_t *regs);
0c530ab8 721
0c530ab8
A
722void
723mach_call_munger64(x86_saved_state_t *state)
724{
725 int call_number;
726 int argc;
727 mach_call_t mach_call;
728 x86_saved_state64_t *regs;
729
730 assert(is_saved_state64(state));
731 regs = saved_state64(state);
732
733 call_number = regs->rax & SYSCALL_NUMBER_MASK;
734
2d21ac55
A
735 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,
736 (call_number)) | DBG_FUNC_START,
737 (int) regs->rdi, (int) regs->rsi,
738 (int) regs->rdx, (int) regs->r10, 0);
6601e61a 739
0c530ab8
A
740 if (call_number < 0 || call_number >= mach_trap_count) {
741 i386_exception(EXC_SYSCALL, regs->rax, 1);
742 /* NOTREACHED */
743 }
6601e61a 744 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
6601e61a 745
0c530ab8
A
746 if (mach_call == (mach_call_t)kern_invalid) {
747 i386_exception(EXC_SYSCALL, regs->rax, 1);
748 /* NOTREACHED */
749 }
750 argc = mach_trap_table[call_number].mach_trap_arg_count;
751
752 if (argc > 6) {
753 int copyin_count;
754
755 copyin_count = (argc - 6) * sizeof(uint64_t);
756
757 if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&regs->v_arg6, copyin_count)) {
758 regs->rax = KERN_INVALID_ARGUMENT;
759
760 thread_exception_return();
761 /* NOTREACHED */
762 }
763 }
764 regs->rax = (uint64_t)mach_call((void *)(&regs->rdi));
765
2d21ac55
A
766 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,
767 (call_number)) | DBG_FUNC_END,
0c530ab8 768 (int)regs->rax, 0, 0, 0, 0);
91447636 769
0c530ab8
A
770 thread_exception_return();
771 /* NOTREACHED */
91447636
A
772}
773
0c530ab8 774
91447636
A
775/*
776 * thread_setuserstack:
777 *
778 * Sets the user stack pointer into the machine
779 * dependent thread state info.
780 */
781void
782thread_setuserstack(
783 thread_t thread,
784 mach_vm_address_t user_stack)
785{
2d21ac55
A
786 if (thread_is_64bit(thread)) {
787 x86_saved_state64_t *iss64;
0c530ab8
A
788
789 iss64 = USER_REGS64(thread);
5d5c5d0d 790
0c530ab8
A
791 iss64->isf.rsp = (uint64_t)user_stack;
792 } else {
2d21ac55
A
793 x86_saved_state32_t *iss32;
794
0c530ab8
A
795 iss32 = USER_REGS32(thread);
796
797 iss32->uesp = CAST_DOWN(unsigned int, user_stack);
798 }
91447636
A
799}
800
801/*
802 * thread_adjuserstack:
803 *
804 * Returns the adjusted user stack pointer from the machine
805 * dependent thread state info. Used for small (<2G) deltas.
806 */
807uint64_t
808thread_adjuserstack(
809 thread_t thread,
810 int adjust)
811{
2d21ac55
A
812 if (thread_is_64bit(thread)) {
813 x86_saved_state64_t *iss64;
5d5c5d0d 814
0c530ab8
A
815 iss64 = USER_REGS64(thread);
816
817 iss64->isf.rsp += adjust;
818
819 return iss64->isf.rsp;
820 } else {
2d21ac55
A
821 x86_saved_state32_t *iss32;
822
0c530ab8
A
823 iss32 = USER_REGS32(thread);
824
825 iss32->uesp += adjust;
826
827 return CAST_USER_ADDR_T(iss32->uesp);
828 }
91447636
A
829}
830
831/*
832 * thread_setentrypoint:
833 *
834 * Sets the user PC into the machine
835 * dependent thread state info.
836 */
837void
0c530ab8 838thread_setentrypoint(thread_t thread, mach_vm_address_t entry)
4452a7af 839{
2d21ac55
A
840 if (thread_is_64bit(thread)) {
841 x86_saved_state64_t *iss64;
4452a7af 842
0c530ab8
A
843 iss64 = USER_REGS64(thread);
844
845 iss64->isf.rip = (uint64_t)entry;
846 } else {
2d21ac55
A
847 x86_saved_state32_t *iss32;
848
0c530ab8
A
849 iss32 = USER_REGS32(thread);
850
851 iss32->eip = CAST_DOWN(unsigned int, entry);
852 }
853}
854
855
2d21ac55 856kern_return_t
0c530ab8
A
857thread_setsinglestep(thread_t thread, int on)
858{
2d21ac55
A
859 if (thread_is_64bit(thread)) {
860 x86_saved_state64_t *iss64;
0c530ab8
A
861
862 iss64 = USER_REGS64(thread);
863
864 if (on)
2d21ac55 865 iss64->isf.rflags |= EFL_TF;
0c530ab8 866 else
2d21ac55 867 iss64->isf.rflags &= ~EFL_TF;
0c530ab8 868 } else {
2d21ac55
A
869 x86_saved_state32_t *iss32;
870
0c530ab8
A
871 iss32 = USER_REGS32(thread);
872
873 if (on)
2d21ac55 874 iss32->efl |= EFL_TF;
0c530ab8 875 else
2d21ac55 876 iss32->efl &= ~EFL_TF;
0c530ab8 877 }
2d21ac55
A
878
879 return (KERN_SUCCESS);
0c530ab8
A
880}
881
882
883
884/* XXX this should be a struct savearea so that CHUD will work better on x86 */
885void *
2d21ac55 886find_user_regs(thread_t thread)
0c530ab8
A
887{
888 return USER_STATE(thread);
889}
91447636 890
2d21ac55
A
891void *
892get_user_regs(thread_t th)
893{
894 if (th->machine.pcb)
895 return(USER_STATE(th));
896 else {
897 printf("[get_user_regs: thread does not have pcb]");
898 return NULL;
899 }
900}
901
902#if CONFIG_DTRACE
903/*
904 * DTrace would like to have a peek at the kernel interrupt state, if available.
905 * Based on osfmk/chud/i386/chud_thread_i386.c:chudxnu_thread_get_state(), which see.
906 */
907x86_saved_state32_t *find_kern_regs(thread_t);
908
909x86_saved_state32_t *
910find_kern_regs(thread_t thread)
911{
912 if (thread == current_thread() &&
913 NULL != current_cpu_datap()->cpu_int_state &&
914 !(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
915 current_cpu_datap()->cpu_interrupt_level == 1)) {
916
917 return saved_state32(current_cpu_datap()->cpu_int_state);
918 } else {
919 return NULL;
920 }
921}
922
923vm_offset_t dtrace_get_cpu_int_stack_top(void);
924
925vm_offset_t
926dtrace_get_cpu_int_stack_top(void)
927{
928 return current_cpu_datap()->cpu_int_stack_top;
929}
930#endif