]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/bsd_i386.c
xnu-1228.15.4.tar.gz
[apple/xnu.git] / osfmk / i386 / bsd_i386.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28#ifdef MACH_BSD
1c79356b
A
29#include <mach_rt.h>
30#include <mach_debug.h>
31#include <mach_ldebug.h>
32
33#include <mach/kern_return.h>
91447636 34#include <mach/mach_traps.h>
1c79356b
A
35#include <mach/thread_status.h>
36#include <mach/vm_param.h>
1c79356b
A
37
38#include <kern/counters.h>
39#include <kern/cpu_data.h>
40#include <kern/mach_param.h>
41#include <kern/task.h>
42#include <kern/thread.h>
1c79356b
A
43#include <kern/sched_prim.h>
44#include <kern/misc_protos.h>
45#include <kern/assert.h>
46#include <kern/spl.h>
55e303ae 47#include <kern/syscall_sw.h>
1c79356b
A
48#include <ipc/ipc_port.h>
49#include <vm/vm_kern.h>
50#include <vm/pmap.h>
51
91447636
A
52#include <i386/cpu_data.h>
53#include <i386/cpu_number.h>
1c79356b
A
54#include <i386/thread.h>
55#include <i386/eflags.h>
56#include <i386/proc_reg.h>
57#include <i386/seg.h>
58#include <i386/tss.h>
59#include <i386/user_ldt.h>
60#include <i386/fpu.h>
1c79356b 61#include <i386/machdep_call.h>
91447636
A
62#include <i386/misc_protos.h>
63#include <i386/cpu_data.h>
64#include <i386/cpu_number.h>
65#include <i386/mp_desc.h>
66#include <i386/vmparam.h>
0c530ab8
A
67#include <i386/trap.h>
68#include <mach/i386/syscall_sw.h>
9bccf70c 69#include <sys/syscall.h>
91447636 70#include <sys/kdebug.h>
0c530ab8 71#include <sys/errno.h>
91447636
A
72#include <../bsd/sys/sysent.h>
73
1c79356b
A
74kern_return_t
75thread_userstack(
76 thread_t,
77 int,
78 thread_state_t,
79 unsigned int,
91447636 80 mach_vm_offset_t *,
2d21ac55 81 int *
1c79356b
A
82);
83
84kern_return_t
85thread_entrypoint(
86 thread_t,
87 int,
88 thread_state_t,
89 unsigned int,
91447636 90 mach_vm_offset_t *
1c79356b
A
91);
92
0c530ab8
A
93void * find_user_regs(thread_t);
94
1c79356b
A
95unsigned int get_msr_exportmask(void);
96
97unsigned int get_msr_nbits(void);
98
99unsigned int get_msr_rbits(void);
100
55e303ae
A
101kern_return_t
102thread_compose_cthread_desc(unsigned int addr, pcb_t pcb);
103
91447636 104void IOSleep(int);
593a1d5f 105extern void throttle_lowpri_io(boolean_t);
91447636 106
2d21ac55
A
107void thread_set_cthreadself(thread_t thread, uint64_t pself, int isLP64);
108
1c79356b
A
109/*
110 * thread_userstack:
111 *
112 * Return the user stack pointer from the machine
113 * dependent thread state info.
114 */
115kern_return_t
116thread_userstack(
2d21ac55
A
117 __unused thread_t thread,
118 int flavor,
119 thread_state_t tstate,
120 __unused unsigned int count,
121 user_addr_t *user_stack,
122 int *customstack
123)
1c79356b 124{
2d21ac55
A
125 if (customstack)
126 *customstack = 0;
4452a7af 127
2d21ac55
A
128 switch (flavor) {
129 case x86_THREAD_STATE32:
130 {
131 x86_thread_state32_t *state25;
132
133 state25 = (x86_thread_state32_t *) tstate;
134
135 if (state25->esp)
136 *user_stack = state25->esp;
137 else
138 *user_stack = VM_USRSTACK32;
139 if (customstack && state25->esp)
140 *customstack = 1;
141 else
142 *customstack = 0;
143 break;
144 }
0c530ab8 145
2d21ac55
A
146 case x86_THREAD_STATE64:
147 {
148 x86_thread_state64_t *state25;
149
150 state25 = (x86_thread_state64_t *) tstate;
151
152 if (state25->rsp)
153 *user_stack = state25->rsp;
154 else
155 *user_stack = VM_USRSTACK64;
156 if (customstack && state25->rsp)
157 *customstack = 1;
158 else
159 *customstack = 0;
160 break;
161 }
1c79356b 162
2d21ac55
A
163 default:
164 return (KERN_INVALID_ARGUMENT);
165 }
0c530ab8 166
2d21ac55
A
167 return (KERN_SUCCESS);
168}
1c79356b 169
0c530ab8 170
1c79356b
A
171kern_return_t
172thread_entrypoint(
2d21ac55
A
173 __unused thread_t thread,
174 int flavor,
175 thread_state_t tstate,
176 __unused unsigned int count,
177 mach_vm_offset_t *entry_point
178)
1c79356b 179{
2d21ac55 180 /*
0c530ab8
A
181 * Set a default.
182 */
2d21ac55
A
183 if (*entry_point == 0)
184 *entry_point = VM_MIN_ADDRESS;
185
0c530ab8 186 switch (flavor) {
2d21ac55
A
187 case x86_THREAD_STATE32:
188 {
189 x86_thread_state32_t *state25;
0c530ab8 190
2d21ac55
A
191 state25 = (i386_thread_state_t *) tstate;
192 *entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS;
193 break;
194 }
6601e61a 195
2d21ac55
A
196 case x86_THREAD_STATE64:
197 {
198 x86_thread_state64_t *state25;
0c530ab8 199
2d21ac55
A
200 state25 = (x86_thread_state64_t *) tstate;
201 *entry_point = state25->rip ? state25->rip: VM_MIN_ADDRESS64;
202 break;
203 }
204 }
205 return (KERN_SUCCESS);
206}
1c79356b 207
1c79356b
A
208
209/*
210 * Duplicate parent state in child
211 * for U**X fork.
212 */
55e303ae
A
213kern_return_t
214machine_thread_dup(
91447636
A
215 thread_t parent,
216 thread_t child
1c79356b
A
217)
218{
0c530ab8
A
219
220 pcb_t parent_pcb;
221 pcb_t child_pcb;
6601e61a 222
0c530ab8
A
223 if ((child_pcb = child->machine.pcb) == NULL ||
224 (parent_pcb = parent->machine.pcb) == NULL)
55e303ae 225 return (KERN_FAILURE);
0c530ab8 226 /*
2d21ac55 227 * Copy over the x86_saved_state registers
0c530ab8
A
228 */
229 if (cpu_mode_is64bit()) {
2d21ac55
A
230 if (thread_is_64bit(parent))
231 bcopy(USER_REGS64(parent), USER_REGS64(child), sizeof(x86_saved_state64_t));
0c530ab8 232 else
2d21ac55 233 bcopy(USER_REGS32(parent), USER_REGS32(child), sizeof(x86_saved_state_compat32_t));
0c530ab8 234 } else
2d21ac55 235 bcopy(USER_REGS32(parent), USER_REGS32(child), sizeof(x86_saved_state32_t));
1c79356b 236
0c530ab8
A
237 /*
238 * Check to see if parent is using floating point
6601e61a 239 * and if so, copy the registers to the child
4452a7af 240 */
2d21ac55 241 fpu_dup_fxstate(parent, child);
4452a7af 242
0c530ab8
A
243#ifdef MACH_BSD
244 /*
245 * Copy the parent's cthread id and USER_CTHREAD descriptor, if 32-bit.
246 */
247 child_pcb->cthread_self = parent_pcb->cthread_self;
248 if (!thread_is_64bit(parent))
249 child_pcb->cthread_desc = parent_pcb->cthread_desc;
250
251 /*
252 * FIXME - should a user specified LDT, TSS and V86 info
1c79356b
A
253 * be duplicated as well?? - probably not.
254 */
2d21ac55 255 // duplicate any use LDT entry that was set I think this is appropriate.
0c530ab8
A
256 if (parent_pcb->uldt_selector!= 0) {
257 child_pcb->uldt_selector = parent_pcb->uldt_selector;
258 child_pcb->uldt_desc = parent_pcb->uldt_desc;
2d21ac55 259 }
91447636 260#endif
2d21ac55 261
55e303ae 262 return (KERN_SUCCESS);
1c79356b
A
263}
264
265/*
266 * FIXME - thread_set_child
267 */
268
91447636 269void thread_set_child(thread_t child, int pid);
1c79356b 270void
91447636 271thread_set_child(thread_t child, int pid)
1c79356b 272{
2d21ac55
A
273 if (thread_is_64bit(child)) {
274 x86_saved_state64_t *iss64;
0c530ab8 275
0c530ab8
A
276 iss64 = USER_REGS64(child);
277
278 iss64->rax = pid;
279 iss64->rdx = 1;
280 iss64->isf.rflags &= ~EFL_CF;
281 } else {
2d21ac55
A
282 x86_saved_state32_t *iss32;
283
0c530ab8
A
284 iss32 = USER_REGS32(child);
285
286 iss32->eax = pid;
287 iss32->edx = 1;
288 iss32->efl &= ~EFL_CF;
289 }
1c79356b 290}
0c530ab8
A
291
292
91447636 293void thread_set_parent(thread_t parent, int pid);
2d21ac55 294
0b4e3aa0 295void
91447636 296thread_set_parent(thread_t parent, int pid)
0b4e3aa0 297{
2d21ac55
A
298 if (thread_is_64bit(parent)) {
299 x86_saved_state64_t *iss64;
0c530ab8 300
0c530ab8
A
301 iss64 = USER_REGS64(parent);
302
303 iss64->rax = pid;
304 iss64->rdx = 0;
305 iss64->isf.rflags &= ~EFL_CF;
306 } else {
2d21ac55
A
307 x86_saved_state32_t *iss32;
308
0c530ab8
A
309 iss32 = USER_REGS32(parent);
310
311 iss32->eax = pid;
312 iss32->edx = 0;
313 iss32->efl &= ~EFL_CF;
314 }
0b4e3aa0 315}
1c79356b
A
316
317
1c79356b
A
318/*
319 * System Call handling code
320 */
321
91447636
A
322extern long fuword(vm_offset_t);
323
1c79356b 324
1c79356b
A
325
326void
0c530ab8 327machdep_syscall(x86_saved_state_t *state)
8f6c56a5 328{
0c530ab8 329 int args[machdep_call_count];
2d21ac55 330 int trapno;
0c530ab8
A
331 int nargs;
332 machdep_call_t *entry;
333 x86_saved_state32_t *regs;
334
335 assert(is_saved_state32(state));
336 regs = saved_state32(state);
1c79356b 337
0c530ab8
A
338 trapno = regs->eax;
339#if DEBUG_TRACE
340 kprintf("machdep_syscall(0x%08x) code=%d\n", regs, trapno);
341#endif
1c79356b 342
0c530ab8 343 if (trapno < 0 || trapno >= machdep_call_count) {
2d21ac55 344 regs->eax = (unsigned int)kern_invalid(NULL);
1c79356b 345
91447636
A
346 thread_exception_return();
347 /* NOTREACHED */
348 }
0c530ab8
A
349 entry = &machdep_call_table[trapno];
350 nargs = entry->nargs;
91447636 351
0c530ab8 352 if (nargs != 0) {
2d21ac55
A
353 if (copyin((user_addr_t) regs->uesp + sizeof (int),
354 (char *) args, (nargs * sizeof (int)))) {
355 regs->eax = KERN_INVALID_ADDRESS;
6601e61a 356
0c530ab8
A
357 thread_exception_return();
358 /* NOTREACHED */
359 }
1c79356b 360 }
0c530ab8 361 switch (nargs) {
2d21ac55
A
362 case 0:
363 regs->eax = (*entry->routine.args_0)();
0c530ab8 364 break;
2d21ac55 365 case 1:
0c530ab8
A
366 regs->eax = (*entry->routine.args_1)(args[0]);
367 break;
2d21ac55
A
368 case 2:
369 regs->eax = (*entry->routine.args_2)(args[0],args[1]);
0c530ab8 370 break;
2d21ac55
A
371 case 3:
372 if (!entry->bsd_style)
373 regs->eax = (*entry->routine.args_3)(args[0],args[1],args[2]);
0c530ab8 374 else {
2d21ac55
A
375 int error;
376 uint32_t rval;
0c530ab8 377
2d21ac55 378 error = (*entry->routine.args_bsd_3)(&rval, args[0], args[1], args[2]);
0c530ab8 379 if (error) {
2d21ac55 380 regs->eax = error;
0c530ab8
A
381 regs->efl |= EFL_CF; /* carry bit */
382 } else {
2d21ac55
A
383 regs->eax = rval;
384 regs->efl &= ~EFL_CF;
0c530ab8
A
385 }
386 }
387 break;
2d21ac55 388 case 4:
0c530ab8
A
389 regs->eax = (*entry->routine.args_4)(args[0], args[1], args[2], args[3]);
390 break;
1c79356b 391
2d21ac55
A
392 default:
393 panic("machdep_syscall: too many args");
6601e61a 394 }
0c530ab8 395 if (current_thread()->funnel_lock)
2d21ac55 396 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
6601e61a 397
593a1d5f
A
398 throttle_lowpri_io(TRUE);
399
0c530ab8
A
400 thread_exception_return();
401 /* NOTREACHED */
1c79356b
A
402}
403
404
405void
0c530ab8 406machdep_syscall64(x86_saved_state_t *state)
1c79356b 407{
2d21ac55 408 int trapno;
0c530ab8
A
409 machdep_call_t *entry;
410 x86_saved_state64_t *regs;
1c79356b 411
0c530ab8
A
412 assert(is_saved_state64(state));
413 regs = saved_state64(state);
1c79356b 414
0c530ab8 415 trapno = regs->rax & SYSCALL_NUMBER_MASK;
1c79356b 416
0c530ab8 417 if (trapno < 0 || trapno >= machdep_call_count) {
2d21ac55 418 regs->rax = (unsigned int)kern_invalid(NULL);
1c79356b 419
0c530ab8
A
420 thread_exception_return();
421 /* NOTREACHED */
1c79356b 422 }
0c530ab8 423 entry = &machdep_call_table64[trapno];
1c79356b 424
0c530ab8 425 switch (entry->nargs) {
2d21ac55
A
426 case 0:
427 regs->rax = (*entry->routine.args_0)();
6601e61a 428 break;
2d21ac55 429 case 1:
0c530ab8 430 regs->rax = (*entry->routine.args64_1)(regs->rdi);
55e303ae 431 break;
2d21ac55
A
432 default:
433 panic("machdep_syscall64: too many args");
55e303ae 434 }
0c530ab8 435 if (current_thread()->funnel_lock)
2d21ac55 436 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
6601e61a 437
593a1d5f
A
438 throttle_lowpri_io(TRUE);
439
0c530ab8
A
440 thread_exception_return();
441 /* NOTREACHED */
1c79356b
A
442}
443
444
55e303ae
A
445kern_return_t
446thread_compose_cthread_desc(unsigned int addr, pcb_t pcb)
447{
448 struct real_descriptor desc;
55e303ae 449
91447636
A
450 mp_disable_preemption();
451
55e303ae
A
452 desc.limit_low = 1;
453 desc.limit_high = 0;
454 desc.base_low = addr & 0xffff;
455 desc.base_med = (addr >> 16) & 0xff;
456 desc.base_high = (addr >> 24) & 0xff;
457 desc.access = ACC_P|ACC_PL_U|ACC_DATA_W;
458 desc.granularity = SZ_32|SZ_G;
459 pcb->cthread_desc = desc;
91447636
A
460 *ldt_desc_p(USER_CTHREAD) = desc;
461
462 mp_enable_preemption();
463
55e303ae
A
464 return(KERN_SUCCESS);
465}
466
1c79356b 467kern_return_t
91447636 468thread_set_cthread_self(uint32_t self)
1c79356b 469{
2d21ac55
A
470 current_thread()->machine.pcb->cthread_self = (uint64_t) self;
471
472 return (KERN_SUCCESS);
1c79356b
A
473}
474
475kern_return_t
476thread_get_cthread_self(void)
477{
2d21ac55 478 return ((kern_return_t)current_thread()->machine.pcb->cthread_self);
1c79356b
A
479}
480
55e303ae 481kern_return_t
91447636 482thread_fast_set_cthread_self(uint32_t self)
55e303ae 483{
0c530ab8
A
484 pcb_t pcb;
485 x86_saved_state32_t *iss;
486
487 pcb = (pcb_t)current_thread()->machine.pcb;
488 thread_compose_cthread_desc(self, pcb);
489 pcb->cthread_self = (uint64_t) self; /* preserve old func too */
490 iss = saved_state32(pcb->iss);
491 iss->gs = USER_CTHREAD;
492
493 return (USER_CTHREAD);
494}
495
2d21ac55
A
496void
497thread_set_cthreadself(thread_t thread, uint64_t pself, int isLP64)
498{
499 if (isLP64 == 0) {
500 pcb_t pcb;
501 x86_saved_state32_t *iss;
502
503 pcb = (pcb_t)thread->machine.pcb;
504 thread_compose_cthread_desc(pself, pcb);
505 pcb->cthread_self = (uint64_t) pself; /* preserve old func too */
506 iss = saved_state32(pcb->iss);
507 iss->gs = USER_CTHREAD;
508 } else {
509 pcb_t pcb;
510 x86_saved_state64_t *iss;
511
512 pcb = thread->machine.pcb;
513
514 /* check for canonical address, set 0 otherwise */
515 if (!IS_USERADDR64_CANONICAL(pself))
516 pself = 0ULL;
517 pcb->cthread_self = pself;
518
519 /* XXX for 64-in-32 */
520 iss = saved_state64(pcb->iss);
521 iss->gs = USER_CTHREAD;
522 thread_compose_cthread_desc((uint32_t) pself, pcb);
523 }
524}
525
526
0c530ab8
A
527kern_return_t
528thread_fast_set_cthread_self64(uint64_t self)
529{
530 pcb_t pcb;
531 x86_saved_state64_t *iss;
532
533 pcb = current_thread()->machine.pcb;
534
535 /* check for canonical address, set 0 otherwise */
536 if (!IS_USERADDR64_CANONICAL(self))
537 self = 0ULL;
538 pcb->cthread_self = self;
539 current_cpu_datap()->cpu_uber.cu_user_gs_base = self;
540
541 /* XXX for 64-in-32 */
542 iss = saved_state64(pcb->iss);
543 iss->gs = USER_CTHREAD;
544 thread_compose_cthread_desc((uint32_t) self, pcb);
545
546 return (USER_CTHREAD);
55e303ae
A
547}
548
91447636
A
549/*
550 * thread_set_user_ldt routine is the interface for the user level
551 * settable ldt entry feature. allowing a user to create arbitrary
552 * ldt entries seems to be too large of a security hole, so instead
553 * this mechanism is in place to allow user level processes to have
554 * an ldt entry that can be used in conjunction with the FS register.
555 *
556 * Swapping occurs inside the pcb.c file along with initialization
557 * when a thread is created. The basic functioning theory is that the
558 * pcb->uldt_selector variable will contain either 0 meaning the
559 * process has not set up any entry, or the selector to be used in
560 * the FS register. pcb->uldt_desc contains the actual descriptor the
561 * user has set up stored in machine usable ldt format.
562 *
563 * Currently one entry is shared by all threads (USER_SETTABLE), but
564 * this could be changed in the future by changing how this routine
565 * allocates the selector. There seems to be no real reason at this
566 * time to have this added feature, but in the future it might be
567 * needed.
568 *
569 * address is the linear address of the start of the data area size
570 * is the size in bytes of the area flags should always be set to 0
571 * for now. in the future it could be used to set R/W permisions or
572 * other functions. Currently the segment is created as a data segment
573 * up to 1 megabyte in size with full read/write permisions only.
574 *
575 * this call returns the segment selector or -1 if any error occurs
576 */
577kern_return_t
578thread_set_user_ldt(uint32_t address, uint32_t size, uint32_t flags)
579{
2d21ac55
A
580 pcb_t pcb;
581 struct fake_descriptor temp;
582 int mycpu;
583
584 if (flags != 0)
585 return -1; // flags not supported
586 if (size > 0xFFFFF)
587 return -1; // size too big, 1 meg is the limit
588
589 mp_disable_preemption();
590 mycpu = cpu_number();
591
592 // create a "fake" descriptor so we can use fix_desc()
593 // to build a real one...
594 // 32 bit default operation size
595 // standard read/write perms for a data segment
596 pcb = (pcb_t)current_thread()->machine.pcb;
597 temp.offset = address;
598 temp.lim_or_seg = size;
599 temp.size_or_wdct = SZ_32;
600 temp.access = ACC_P|ACC_PL_U|ACC_DATA_W;
91447636 601
2d21ac55
A
602 // turn this into a real descriptor
603 fix_desc(&temp,1);
91447636 604
2d21ac55
A
605 // set up our data in the pcb
606 pcb->uldt_desc = *(struct real_descriptor*)&temp;
607 pcb->uldt_selector = USER_SETTABLE; // set the selector value
91447636 608
2d21ac55
A
609 // now set it up in the current table...
610 *ldt_desc_p(USER_SETTABLE) = *(struct real_descriptor*)&temp;
91447636 611
2d21ac55 612 mp_enable_preemption();
91447636 613
2d21ac55 614 return USER_SETTABLE;
91447636 615}
89b3af67 616
0c530ab8 617#endif /* MACH_BSD */
4452a7af 618
21362eb3 619
0c530ab8 620typedef kern_return_t (*mach_call_t)(void *);
4452a7af 621
0c530ab8
A
622struct mach_call_args {
623 syscall_arg_t arg1;
624 syscall_arg_t arg2;
625 syscall_arg_t arg3;
626 syscall_arg_t arg4;
627 syscall_arg_t arg5;
628 syscall_arg_t arg6;
629 syscall_arg_t arg7;
630 syscall_arg_t arg8;
631 syscall_arg_t arg9;
632};
4452a7af 633
0c530ab8
A
634static kern_return_t
635mach_call_arg_munger32(uint32_t sp, int nargs, int call_number, struct mach_call_args *args);
4452a7af 636
6601e61a 637
0c530ab8
A
638static kern_return_t
639mach_call_arg_munger32(uint32_t sp, int nargs, int call_number, struct mach_call_args *args)
640{
641 unsigned int args32[9];
4452a7af 642
0c530ab8 643 if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args32, nargs * sizeof (int)))
2d21ac55 644 return KERN_INVALID_ARGUMENT;
4452a7af 645
0c530ab8 646 switch (nargs) {
2d21ac55
A
647 case 9: args->arg9 = args32[8];
648 case 8: args->arg8 = args32[7];
649 case 7: args->arg7 = args32[6];
650 case 6: args->arg6 = args32[5];
651 case 5: args->arg5 = args32[4];
652 case 4: args->arg4 = args32[3];
653 case 3: args->arg3 = args32[2];
654 case 2: args->arg2 = args32[1];
655 case 1: args->arg1 = args32[0];
0c530ab8
A
656 }
657 if (call_number == 90) {
2d21ac55
A
658 /* munge_l for mach_wait_until_trap() */
659 args->arg1 = (((uint64_t)(args32[0])) | ((((uint64_t)(args32[1]))<<32)));
0c530ab8
A
660 }
661 if (call_number == 93) {
2d21ac55
A
662 /* munge_wl for mk_timer_arm_trap() */
663 args->arg2 = (((uint64_t)(args32[1])) | ((((uint64_t)(args32[2]))<<32)));
0c530ab8 664 }
4452a7af 665
0c530ab8 666 return KERN_SUCCESS;
6601e61a 667}
4452a7af
A
668
669
2d21ac55 670__private_extern__ void mach_call_munger(x86_saved_state_t *state);
0c530ab8 671
0c530ab8
A
672void
673mach_call_munger(x86_saved_state_t *state)
4452a7af 674{
4452a7af 675 int argc;
0c530ab8 676 int call_number;
4452a7af 677 mach_call_t mach_call;
6601e61a
A
678 kern_return_t retval;
679 struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
0c530ab8
A
680 x86_saved_state32_t *regs;
681
682 assert(is_saved_state32(state));
683 regs = saved_state32(state);
684
685 call_number = -(regs->eax);
686#if DEBUG_TRACE
687 kprintf("mach_call_munger(0x%08x) code=%d\n", regs, call_number);
688#endif
689
690 if (call_number < 0 || call_number >= mach_trap_count) {
2d21ac55 691 i386_exception(EXC_SYSCALL, call_number, 1);
0c530ab8
A
692 /* NOTREACHED */
693 }
694 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
2d21ac55 695
0c530ab8 696 if (mach_call == (mach_call_t)kern_invalid) {
2d21ac55 697 i386_exception(EXC_SYSCALL, call_number, 1);
0c530ab8
A
698 /* NOTREACHED */
699 }
0c530ab8 700
2d21ac55 701 argc = mach_trap_table[call_number].mach_trap_arg_count;
0c530ab8 702 if (argc) {
2d21ac55 703 retval = mach_call_arg_munger32(regs->uesp, argc, call_number, &args);
0c530ab8 704 if (retval != KERN_SUCCESS) {
2d21ac55
A
705 regs->eax = retval;
706
0c530ab8
A
707 thread_exception_return();
708 /* NOTREACHED */
709 }
4452a7af 710 }
0c530ab8 711 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
2d21ac55
A
712 (int) args.arg1, (int) args.arg2, (int) args.arg3, (int) args.arg4, 0);
713
0c530ab8
A
714 retval = mach_call(&args);
715
716 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END,
2d21ac55 717 retval, 0, 0, 0, 0);
0c530ab8 718 regs->eax = retval;
2d21ac55 719
593a1d5f
A
720 throttle_lowpri_io(TRUE);
721
0c530ab8
A
722 thread_exception_return();
723 /* NOTREACHED */
724}
725
726
2d21ac55 727__private_extern__ void mach_call_munger64(x86_saved_state_t *regs);
0c530ab8 728
0c530ab8
A
729void
730mach_call_munger64(x86_saved_state_t *state)
731{
732 int call_number;
733 int argc;
734 mach_call_t mach_call;
735 x86_saved_state64_t *regs;
736
737 assert(is_saved_state64(state));
738 regs = saved_state64(state);
739
740 call_number = regs->rax & SYSCALL_NUMBER_MASK;
741
2d21ac55
A
742 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,
743 (call_number)) | DBG_FUNC_START,
744 (int) regs->rdi, (int) regs->rsi,
745 (int) regs->rdx, (int) regs->r10, 0);
6601e61a 746
0c530ab8
A
747 if (call_number < 0 || call_number >= mach_trap_count) {
748 i386_exception(EXC_SYSCALL, regs->rax, 1);
749 /* NOTREACHED */
750 }
6601e61a 751 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
6601e61a 752
0c530ab8
A
753 if (mach_call == (mach_call_t)kern_invalid) {
754 i386_exception(EXC_SYSCALL, regs->rax, 1);
755 /* NOTREACHED */
756 }
757 argc = mach_trap_table[call_number].mach_trap_arg_count;
758
759 if (argc > 6) {
760 int copyin_count;
761
762 copyin_count = (argc - 6) * sizeof(uint64_t);
763
764 if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&regs->v_arg6, copyin_count)) {
765 regs->rax = KERN_INVALID_ARGUMENT;
766
767 thread_exception_return();
768 /* NOTREACHED */
769 }
770 }
771 regs->rax = (uint64_t)mach_call((void *)(&regs->rdi));
772
2d21ac55
A
773 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,
774 (call_number)) | DBG_FUNC_END,
0c530ab8 775 (int)regs->rax, 0, 0, 0, 0);
91447636 776
593a1d5f
A
777 throttle_lowpri_io(TRUE);
778
0c530ab8
A
779 thread_exception_return();
780 /* NOTREACHED */
91447636
A
781}
782
0c530ab8 783
91447636
A
784/*
785 * thread_setuserstack:
786 *
787 * Sets the user stack pointer into the machine
788 * dependent thread state info.
789 */
790void
791thread_setuserstack(
792 thread_t thread,
793 mach_vm_address_t user_stack)
794{
2d21ac55
A
795 if (thread_is_64bit(thread)) {
796 x86_saved_state64_t *iss64;
0c530ab8
A
797
798 iss64 = USER_REGS64(thread);
5d5c5d0d 799
0c530ab8
A
800 iss64->isf.rsp = (uint64_t)user_stack;
801 } else {
2d21ac55
A
802 x86_saved_state32_t *iss32;
803
0c530ab8
A
804 iss32 = USER_REGS32(thread);
805
806 iss32->uesp = CAST_DOWN(unsigned int, user_stack);
807 }
91447636
A
808}
809
810/*
811 * thread_adjuserstack:
812 *
813 * Returns the adjusted user stack pointer from the machine
814 * dependent thread state info. Used for small (<2G) deltas.
815 */
816uint64_t
817thread_adjuserstack(
818 thread_t thread,
819 int adjust)
820{
2d21ac55
A
821 if (thread_is_64bit(thread)) {
822 x86_saved_state64_t *iss64;
5d5c5d0d 823
0c530ab8
A
824 iss64 = USER_REGS64(thread);
825
826 iss64->isf.rsp += adjust;
827
828 return iss64->isf.rsp;
829 } else {
2d21ac55
A
830 x86_saved_state32_t *iss32;
831
0c530ab8
A
832 iss32 = USER_REGS32(thread);
833
834 iss32->uesp += adjust;
835
836 return CAST_USER_ADDR_T(iss32->uesp);
837 }
91447636
A
838}
839
840/*
841 * thread_setentrypoint:
842 *
843 * Sets the user PC into the machine
844 * dependent thread state info.
845 */
846void
0c530ab8 847thread_setentrypoint(thread_t thread, mach_vm_address_t entry)
4452a7af 848{
2d21ac55
A
849 if (thread_is_64bit(thread)) {
850 x86_saved_state64_t *iss64;
4452a7af 851
0c530ab8
A
852 iss64 = USER_REGS64(thread);
853
854 iss64->isf.rip = (uint64_t)entry;
855 } else {
2d21ac55
A
856 x86_saved_state32_t *iss32;
857
0c530ab8
A
858 iss32 = USER_REGS32(thread);
859
860 iss32->eip = CAST_DOWN(unsigned int, entry);
861 }
862}
863
864
2d21ac55 865kern_return_t
0c530ab8
A
866thread_setsinglestep(thread_t thread, int on)
867{
2d21ac55
A
868 if (thread_is_64bit(thread)) {
869 x86_saved_state64_t *iss64;
0c530ab8
A
870
871 iss64 = USER_REGS64(thread);
872
873 if (on)
2d21ac55 874 iss64->isf.rflags |= EFL_TF;
0c530ab8 875 else
2d21ac55 876 iss64->isf.rflags &= ~EFL_TF;
0c530ab8 877 } else {
2d21ac55
A
878 x86_saved_state32_t *iss32;
879
0c530ab8
A
880 iss32 = USER_REGS32(thread);
881
882 if (on)
2d21ac55 883 iss32->efl |= EFL_TF;
0c530ab8 884 else
2d21ac55 885 iss32->efl &= ~EFL_TF;
0c530ab8 886 }
2d21ac55
A
887
888 return (KERN_SUCCESS);
0c530ab8
A
889}
890
891
892
893/* XXX this should be a struct savearea so that CHUD will work better on x86 */
894void *
2d21ac55 895find_user_regs(thread_t thread)
0c530ab8
A
896{
897 return USER_STATE(thread);
898}
91447636 899
2d21ac55
A
900void *
901get_user_regs(thread_t th)
902{
903 if (th->machine.pcb)
904 return(USER_STATE(th));
905 else {
906 printf("[get_user_regs: thread does not have pcb]");
907 return NULL;
908 }
909}
910
911#if CONFIG_DTRACE
912/*
913 * DTrace would like to have a peek at the kernel interrupt state, if available.
914 * Based on osfmk/chud/i386/chud_thread_i386.c:chudxnu_thread_get_state(), which see.
915 */
916x86_saved_state32_t *find_kern_regs(thread_t);
917
918x86_saved_state32_t *
919find_kern_regs(thread_t thread)
920{
921 if (thread == current_thread() &&
922 NULL != current_cpu_datap()->cpu_int_state &&
923 !(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
924 current_cpu_datap()->cpu_interrupt_level == 1)) {
925
926 return saved_state32(current_cpu_datap()->cpu_int_state);
927 } else {
928 return NULL;
929 }
930}
931
932vm_offset_t dtrace_get_cpu_int_stack_top(void);
933
934vm_offset_t
935dtrace_get_cpu_int_stack_top(void)
936{
937 return current_cpu_datap()->cpu_int_stack_top;
938}
939#endif