]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/bsd_i386.c
xnu-1228.0.2.tar.gz
[apple/xnu.git] / osfmk / i386 / bsd_i386.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #ifdef MACH_BSD
29 #include <mach_rt.h>
30 #include <mach_debug.h>
31 #include <mach_ldebug.h>
32
33 #include <mach/kern_return.h>
34 #include <mach/mach_traps.h>
35 #include <mach/thread_status.h>
36 #include <mach/vm_param.h>
37
38 #include <kern/counters.h>
39 #include <kern/cpu_data.h>
40 #include <kern/mach_param.h>
41 #include <kern/task.h>
42 #include <kern/thread.h>
43 #include <kern/sched_prim.h>
44 #include <kern/misc_protos.h>
45 #include <kern/assert.h>
46 #include <kern/spl.h>
47 #include <kern/syscall_sw.h>
48 #include <ipc/ipc_port.h>
49 #include <vm/vm_kern.h>
50 #include <vm/pmap.h>
51
52 #include <i386/cpu_data.h>
53 #include <i386/cpu_number.h>
54 #include <i386/thread.h>
55 #include <i386/eflags.h>
56 #include <i386/proc_reg.h>
57 #include <i386/seg.h>
58 #include <i386/tss.h>
59 #include <i386/user_ldt.h>
60 #include <i386/fpu.h>
61 #include <i386/machdep_call.h>
62 #include <i386/misc_protos.h>
63 #include <i386/cpu_data.h>
64 #include <i386/cpu_number.h>
65 #include <i386/mp_desc.h>
66 #include <i386/vmparam.h>
67 #include <i386/trap.h>
68 #include <mach/i386/syscall_sw.h>
69 #include <sys/syscall.h>
70 #include <sys/kdebug.h>
71 #include <sys/errno.h>
72 #include <../bsd/sys/sysent.h>
73
74 kern_return_t
75 thread_userstack(
76 thread_t,
77 int,
78 thread_state_t,
79 unsigned int,
80 mach_vm_offset_t *,
81 int *
82 );
83
84 kern_return_t
85 thread_entrypoint(
86 thread_t,
87 int,
88 thread_state_t,
89 unsigned int,
90 mach_vm_offset_t *
91 );
92
93 void * find_user_regs(thread_t);
94
95 unsigned int get_msr_exportmask(void);
96
97 unsigned int get_msr_nbits(void);
98
99 unsigned int get_msr_rbits(void);
100
101 kern_return_t
102 thread_compose_cthread_desc(unsigned int addr, pcb_t pcb);
103
104 void IOSleep(int);
105
106 void thread_set_cthreadself(thread_t thread, uint64_t pself, int isLP64);
107
108 /*
109 * thread_userstack:
110 *
111 * Return the user stack pointer from the machine
112 * dependent thread state info.
113 */
114 kern_return_t
115 thread_userstack(
116 __unused thread_t thread,
117 int flavor,
118 thread_state_t tstate,
119 __unused unsigned int count,
120 user_addr_t *user_stack,
121 int *customstack
122 )
123 {
124 if (customstack)
125 *customstack = 0;
126
127 switch (flavor) {
128 case x86_THREAD_STATE32:
129 {
130 x86_thread_state32_t *state25;
131
132 state25 = (x86_thread_state32_t *) tstate;
133
134 if (state25->esp)
135 *user_stack = state25->esp;
136 else
137 *user_stack = VM_USRSTACK32;
138 if (customstack && state25->esp)
139 *customstack = 1;
140 else
141 *customstack = 0;
142 break;
143 }
144
145 case x86_THREAD_STATE64:
146 {
147 x86_thread_state64_t *state25;
148
149 state25 = (x86_thread_state64_t *) tstate;
150
151 if (state25->rsp)
152 *user_stack = state25->rsp;
153 else
154 *user_stack = VM_USRSTACK64;
155 if (customstack && state25->rsp)
156 *customstack = 1;
157 else
158 *customstack = 0;
159 break;
160 }
161
162 default:
163 return (KERN_INVALID_ARGUMENT);
164 }
165
166 return (KERN_SUCCESS);
167 }
168
169
170 kern_return_t
171 thread_entrypoint(
172 __unused thread_t thread,
173 int flavor,
174 thread_state_t tstate,
175 __unused unsigned int count,
176 mach_vm_offset_t *entry_point
177 )
178 {
179 /*
180 * Set a default.
181 */
182 if (*entry_point == 0)
183 *entry_point = VM_MIN_ADDRESS;
184
185 switch (flavor) {
186 case x86_THREAD_STATE32:
187 {
188 x86_thread_state32_t *state25;
189
190 state25 = (i386_thread_state_t *) tstate;
191 *entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS;
192 break;
193 }
194
195 case x86_THREAD_STATE64:
196 {
197 x86_thread_state64_t *state25;
198
199 state25 = (x86_thread_state64_t *) tstate;
200 *entry_point = state25->rip ? state25->rip: VM_MIN_ADDRESS64;
201 break;
202 }
203 }
204 return (KERN_SUCCESS);
205 }
206
207
208 /*
209 * Duplicate parent state in child
210 * for U**X fork.
211 */
212 kern_return_t
213 machine_thread_dup(
214 thread_t parent,
215 thread_t child
216 )
217 {
218
219 pcb_t parent_pcb;
220 pcb_t child_pcb;
221
222 if ((child_pcb = child->machine.pcb) == NULL ||
223 (parent_pcb = parent->machine.pcb) == NULL)
224 return (KERN_FAILURE);
225 /*
226 * Copy over the x86_saved_state registers
227 */
228 if (cpu_mode_is64bit()) {
229 if (thread_is_64bit(parent))
230 bcopy(USER_REGS64(parent), USER_REGS64(child), sizeof(x86_saved_state64_t));
231 else
232 bcopy(USER_REGS32(parent), USER_REGS32(child), sizeof(x86_saved_state_compat32_t));
233 } else
234 bcopy(USER_REGS32(parent), USER_REGS32(child), sizeof(x86_saved_state32_t));
235
236 /*
237 * Check to see if parent is using floating point
238 * and if so, copy the registers to the child
239 */
240 fpu_dup_fxstate(parent, child);
241
242 #ifdef MACH_BSD
243 /*
244 * Copy the parent's cthread id and USER_CTHREAD descriptor, if 32-bit.
245 */
246 child_pcb->cthread_self = parent_pcb->cthread_self;
247 if (!thread_is_64bit(parent))
248 child_pcb->cthread_desc = parent_pcb->cthread_desc;
249
250 /*
251 * FIXME - should a user specified LDT, TSS and V86 info
252 * be duplicated as well?? - probably not.
253 */
254 // duplicate any use LDT entry that was set I think this is appropriate.
255 if (parent_pcb->uldt_selector!= 0) {
256 child_pcb->uldt_selector = parent_pcb->uldt_selector;
257 child_pcb->uldt_desc = parent_pcb->uldt_desc;
258 }
259 #endif
260
261 return (KERN_SUCCESS);
262 }
263
264 /*
265 * FIXME - thread_set_child
266 */
267
268 void thread_set_child(thread_t child, int pid);
269 void
270 thread_set_child(thread_t child, int pid)
271 {
272 if (thread_is_64bit(child)) {
273 x86_saved_state64_t *iss64;
274
275 iss64 = USER_REGS64(child);
276
277 iss64->rax = pid;
278 iss64->rdx = 1;
279 iss64->isf.rflags &= ~EFL_CF;
280 } else {
281 x86_saved_state32_t *iss32;
282
283 iss32 = USER_REGS32(child);
284
285 iss32->eax = pid;
286 iss32->edx = 1;
287 iss32->efl &= ~EFL_CF;
288 }
289 }
290
291
292 void thread_set_parent(thread_t parent, int pid);
293
294 void
295 thread_set_parent(thread_t parent, int pid)
296 {
297 if (thread_is_64bit(parent)) {
298 x86_saved_state64_t *iss64;
299
300 iss64 = USER_REGS64(parent);
301
302 iss64->rax = pid;
303 iss64->rdx = 0;
304 iss64->isf.rflags &= ~EFL_CF;
305 } else {
306 x86_saved_state32_t *iss32;
307
308 iss32 = USER_REGS32(parent);
309
310 iss32->eax = pid;
311 iss32->edx = 0;
312 iss32->efl &= ~EFL_CF;
313 }
314 }
315
316
317 /*
318 * System Call handling code
319 */
320
321 extern long fuword(vm_offset_t);
322
323
324
325 void
326 machdep_syscall(x86_saved_state_t *state)
327 {
328 int args[machdep_call_count];
329 int trapno;
330 int nargs;
331 machdep_call_t *entry;
332 x86_saved_state32_t *regs;
333
334 assert(is_saved_state32(state));
335 regs = saved_state32(state);
336
337 trapno = regs->eax;
338 #if DEBUG_TRACE
339 kprintf("machdep_syscall(0x%08x) code=%d\n", regs, trapno);
340 #endif
341
342 if (trapno < 0 || trapno >= machdep_call_count) {
343 regs->eax = (unsigned int)kern_invalid(NULL);
344
345 thread_exception_return();
346 /* NOTREACHED */
347 }
348 entry = &machdep_call_table[trapno];
349 nargs = entry->nargs;
350
351 if (nargs != 0) {
352 if (copyin((user_addr_t) regs->uesp + sizeof (int),
353 (char *) args, (nargs * sizeof (int)))) {
354 regs->eax = KERN_INVALID_ADDRESS;
355
356 thread_exception_return();
357 /* NOTREACHED */
358 }
359 }
360 switch (nargs) {
361 case 0:
362 regs->eax = (*entry->routine.args_0)();
363 break;
364 case 1:
365 regs->eax = (*entry->routine.args_1)(args[0]);
366 break;
367 case 2:
368 regs->eax = (*entry->routine.args_2)(args[0],args[1]);
369 break;
370 case 3:
371 if (!entry->bsd_style)
372 regs->eax = (*entry->routine.args_3)(args[0],args[1],args[2]);
373 else {
374 int error;
375 uint32_t rval;
376
377 error = (*entry->routine.args_bsd_3)(&rval, args[0], args[1], args[2]);
378 if (error) {
379 regs->eax = error;
380 regs->efl |= EFL_CF; /* carry bit */
381 } else {
382 regs->eax = rval;
383 regs->efl &= ~EFL_CF;
384 }
385 }
386 break;
387 case 4:
388 regs->eax = (*entry->routine.args_4)(args[0], args[1], args[2], args[3]);
389 break;
390
391 default:
392 panic("machdep_syscall: too many args");
393 }
394 if (current_thread()->funnel_lock)
395 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
396
397 thread_exception_return();
398 /* NOTREACHED */
399 }
400
401
402 void
403 machdep_syscall64(x86_saved_state_t *state)
404 {
405 int trapno;
406 machdep_call_t *entry;
407 x86_saved_state64_t *regs;
408
409 assert(is_saved_state64(state));
410 regs = saved_state64(state);
411
412 trapno = regs->rax & SYSCALL_NUMBER_MASK;
413
414 if (trapno < 0 || trapno >= machdep_call_count) {
415 regs->rax = (unsigned int)kern_invalid(NULL);
416
417 thread_exception_return();
418 /* NOTREACHED */
419 }
420 entry = &machdep_call_table64[trapno];
421
422 switch (entry->nargs) {
423 case 0:
424 regs->rax = (*entry->routine.args_0)();
425 break;
426 case 1:
427 regs->rax = (*entry->routine.args64_1)(regs->rdi);
428 break;
429 default:
430 panic("machdep_syscall64: too many args");
431 }
432 if (current_thread()->funnel_lock)
433 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
434
435 thread_exception_return();
436 /* NOTREACHED */
437 }
438
439
440 kern_return_t
441 thread_compose_cthread_desc(unsigned int addr, pcb_t pcb)
442 {
443 struct real_descriptor desc;
444
445 mp_disable_preemption();
446
447 desc.limit_low = 1;
448 desc.limit_high = 0;
449 desc.base_low = addr & 0xffff;
450 desc.base_med = (addr >> 16) & 0xff;
451 desc.base_high = (addr >> 24) & 0xff;
452 desc.access = ACC_P|ACC_PL_U|ACC_DATA_W;
453 desc.granularity = SZ_32|SZ_G;
454 pcb->cthread_desc = desc;
455 *ldt_desc_p(USER_CTHREAD) = desc;
456
457 mp_enable_preemption();
458
459 return(KERN_SUCCESS);
460 }
461
462 kern_return_t
463 thread_set_cthread_self(uint32_t self)
464 {
465 current_thread()->machine.pcb->cthread_self = (uint64_t) self;
466
467 return (KERN_SUCCESS);
468 }
469
470 kern_return_t
471 thread_get_cthread_self(void)
472 {
473 return ((kern_return_t)current_thread()->machine.pcb->cthread_self);
474 }
475
476 kern_return_t
477 thread_fast_set_cthread_self(uint32_t self)
478 {
479 pcb_t pcb;
480 x86_saved_state32_t *iss;
481
482 pcb = (pcb_t)current_thread()->machine.pcb;
483 thread_compose_cthread_desc(self, pcb);
484 pcb->cthread_self = (uint64_t) self; /* preserve old func too */
485 iss = saved_state32(pcb->iss);
486 iss->gs = USER_CTHREAD;
487
488 return (USER_CTHREAD);
489 }
490
491 void
492 thread_set_cthreadself(thread_t thread, uint64_t pself, int isLP64)
493 {
494 if (isLP64 == 0) {
495 pcb_t pcb;
496 x86_saved_state32_t *iss;
497
498 pcb = (pcb_t)thread->machine.pcb;
499 thread_compose_cthread_desc(pself, pcb);
500 pcb->cthread_self = (uint64_t) pself; /* preserve old func too */
501 iss = saved_state32(pcb->iss);
502 iss->gs = USER_CTHREAD;
503 } else {
504 pcb_t pcb;
505 x86_saved_state64_t *iss;
506
507 pcb = thread->machine.pcb;
508
509 /* check for canonical address, set 0 otherwise */
510 if (!IS_USERADDR64_CANONICAL(pself))
511 pself = 0ULL;
512 pcb->cthread_self = pself;
513
514 /* XXX for 64-in-32 */
515 iss = saved_state64(pcb->iss);
516 iss->gs = USER_CTHREAD;
517 thread_compose_cthread_desc((uint32_t) pself, pcb);
518 }
519 }
520
521
522 kern_return_t
523 thread_fast_set_cthread_self64(uint64_t self)
524 {
525 pcb_t pcb;
526 x86_saved_state64_t *iss;
527
528 pcb = current_thread()->machine.pcb;
529
530 /* check for canonical address, set 0 otherwise */
531 if (!IS_USERADDR64_CANONICAL(self))
532 self = 0ULL;
533 pcb->cthread_self = self;
534 current_cpu_datap()->cpu_uber.cu_user_gs_base = self;
535
536 /* XXX for 64-in-32 */
537 iss = saved_state64(pcb->iss);
538 iss->gs = USER_CTHREAD;
539 thread_compose_cthread_desc((uint32_t) self, pcb);
540
541 return (USER_CTHREAD);
542 }
543
544 /*
545 * thread_set_user_ldt routine is the interface for the user level
546 * settable ldt entry feature. allowing a user to create arbitrary
547 * ldt entries seems to be too large of a security hole, so instead
548 * this mechanism is in place to allow user level processes to have
549 * an ldt entry that can be used in conjunction with the FS register.
550 *
551 * Swapping occurs inside the pcb.c file along with initialization
552 * when a thread is created. The basic functioning theory is that the
553 * pcb->uldt_selector variable will contain either 0 meaning the
554 * process has not set up any entry, or the selector to be used in
555 * the FS register. pcb->uldt_desc contains the actual descriptor the
556 * user has set up stored in machine usable ldt format.
557 *
558 * Currently one entry is shared by all threads (USER_SETTABLE), but
559 * this could be changed in the future by changing how this routine
560 * allocates the selector. There seems to be no real reason at this
561 * time to have this added feature, but in the future it might be
562 * needed.
563 *
564 * address is the linear address of the start of the data area size
565 * is the size in bytes of the area flags should always be set to 0
566 * for now. in the future it could be used to set R/W permisions or
567 * other functions. Currently the segment is created as a data segment
568 * up to 1 megabyte in size with full read/write permisions only.
569 *
570 * this call returns the segment selector or -1 if any error occurs
571 */
572 kern_return_t
573 thread_set_user_ldt(uint32_t address, uint32_t size, uint32_t flags)
574 {
575 pcb_t pcb;
576 struct fake_descriptor temp;
577 int mycpu;
578
579 if (flags != 0)
580 return -1; // flags not supported
581 if (size > 0xFFFFF)
582 return -1; // size too big, 1 meg is the limit
583
584 mp_disable_preemption();
585 mycpu = cpu_number();
586
587 // create a "fake" descriptor so we can use fix_desc()
588 // to build a real one...
589 // 32 bit default operation size
590 // standard read/write perms for a data segment
591 pcb = (pcb_t)current_thread()->machine.pcb;
592 temp.offset = address;
593 temp.lim_or_seg = size;
594 temp.size_or_wdct = SZ_32;
595 temp.access = ACC_P|ACC_PL_U|ACC_DATA_W;
596
597 // turn this into a real descriptor
598 fix_desc(&temp,1);
599
600 // set up our data in the pcb
601 pcb->uldt_desc = *(struct real_descriptor*)&temp;
602 pcb->uldt_selector = USER_SETTABLE; // set the selector value
603
604 // now set it up in the current table...
605 *ldt_desc_p(USER_SETTABLE) = *(struct real_descriptor*)&temp;
606
607 mp_enable_preemption();
608
609 return USER_SETTABLE;
610 }
611
612 #endif /* MACH_BSD */
613
614
615 typedef kern_return_t (*mach_call_t)(void *);
616
617 struct mach_call_args {
618 syscall_arg_t arg1;
619 syscall_arg_t arg2;
620 syscall_arg_t arg3;
621 syscall_arg_t arg4;
622 syscall_arg_t arg5;
623 syscall_arg_t arg6;
624 syscall_arg_t arg7;
625 syscall_arg_t arg8;
626 syscall_arg_t arg9;
627 };
628
629 static kern_return_t
630 mach_call_arg_munger32(uint32_t sp, int nargs, int call_number, struct mach_call_args *args);
631
632
633 static kern_return_t
634 mach_call_arg_munger32(uint32_t sp, int nargs, int call_number, struct mach_call_args *args)
635 {
636 unsigned int args32[9];
637
638 if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args32, nargs * sizeof (int)))
639 return KERN_INVALID_ARGUMENT;
640
641 switch (nargs) {
642 case 9: args->arg9 = args32[8];
643 case 8: args->arg8 = args32[7];
644 case 7: args->arg7 = args32[6];
645 case 6: args->arg6 = args32[5];
646 case 5: args->arg5 = args32[4];
647 case 4: args->arg4 = args32[3];
648 case 3: args->arg3 = args32[2];
649 case 2: args->arg2 = args32[1];
650 case 1: args->arg1 = args32[0];
651 }
652 if (call_number == 90) {
653 /* munge_l for mach_wait_until_trap() */
654 args->arg1 = (((uint64_t)(args32[0])) | ((((uint64_t)(args32[1]))<<32)));
655 }
656 if (call_number == 93) {
657 /* munge_wl for mk_timer_arm_trap() */
658 args->arg2 = (((uint64_t)(args32[1])) | ((((uint64_t)(args32[2]))<<32)));
659 }
660
661 return KERN_SUCCESS;
662 }
663
664
665 __private_extern__ void mach_call_munger(x86_saved_state_t *state);
666
667 void
668 mach_call_munger(x86_saved_state_t *state)
669 {
670 int argc;
671 int call_number;
672 mach_call_t mach_call;
673 kern_return_t retval;
674 struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
675 x86_saved_state32_t *regs;
676
677 assert(is_saved_state32(state));
678 regs = saved_state32(state);
679
680 call_number = -(regs->eax);
681 #if DEBUG_TRACE
682 kprintf("mach_call_munger(0x%08x) code=%d\n", regs, call_number);
683 #endif
684
685 if (call_number < 0 || call_number >= mach_trap_count) {
686 i386_exception(EXC_SYSCALL, call_number, 1);
687 /* NOTREACHED */
688 }
689 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
690
691 if (mach_call == (mach_call_t)kern_invalid) {
692 i386_exception(EXC_SYSCALL, call_number, 1);
693 /* NOTREACHED */
694 }
695
696 argc = mach_trap_table[call_number].mach_trap_arg_count;
697 if (argc) {
698 retval = mach_call_arg_munger32(regs->uesp, argc, call_number, &args);
699 if (retval != KERN_SUCCESS) {
700 regs->eax = retval;
701
702 thread_exception_return();
703 /* NOTREACHED */
704 }
705 }
706 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
707 (int) args.arg1, (int) args.arg2, (int) args.arg3, (int) args.arg4, 0);
708
709 retval = mach_call(&args);
710
711 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END,
712 retval, 0, 0, 0, 0);
713 regs->eax = retval;
714
715 thread_exception_return();
716 /* NOTREACHED */
717 }
718
719
720 __private_extern__ void mach_call_munger64(x86_saved_state_t *regs);
721
722 void
723 mach_call_munger64(x86_saved_state_t *state)
724 {
725 int call_number;
726 int argc;
727 mach_call_t mach_call;
728 x86_saved_state64_t *regs;
729
730 assert(is_saved_state64(state));
731 regs = saved_state64(state);
732
733 call_number = regs->rax & SYSCALL_NUMBER_MASK;
734
735 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,
736 (call_number)) | DBG_FUNC_START,
737 (int) regs->rdi, (int) regs->rsi,
738 (int) regs->rdx, (int) regs->r10, 0);
739
740 if (call_number < 0 || call_number >= mach_trap_count) {
741 i386_exception(EXC_SYSCALL, regs->rax, 1);
742 /* NOTREACHED */
743 }
744 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
745
746 if (mach_call == (mach_call_t)kern_invalid) {
747 i386_exception(EXC_SYSCALL, regs->rax, 1);
748 /* NOTREACHED */
749 }
750 argc = mach_trap_table[call_number].mach_trap_arg_count;
751
752 if (argc > 6) {
753 int copyin_count;
754
755 copyin_count = (argc - 6) * sizeof(uint64_t);
756
757 if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&regs->v_arg6, copyin_count)) {
758 regs->rax = KERN_INVALID_ARGUMENT;
759
760 thread_exception_return();
761 /* NOTREACHED */
762 }
763 }
764 regs->rax = (uint64_t)mach_call((void *)(&regs->rdi));
765
766 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,
767 (call_number)) | DBG_FUNC_END,
768 (int)regs->rax, 0, 0, 0, 0);
769
770 thread_exception_return();
771 /* NOTREACHED */
772 }
773
774
775 /*
776 * thread_setuserstack:
777 *
778 * Sets the user stack pointer into the machine
779 * dependent thread state info.
780 */
781 void
782 thread_setuserstack(
783 thread_t thread,
784 mach_vm_address_t user_stack)
785 {
786 if (thread_is_64bit(thread)) {
787 x86_saved_state64_t *iss64;
788
789 iss64 = USER_REGS64(thread);
790
791 iss64->isf.rsp = (uint64_t)user_stack;
792 } else {
793 x86_saved_state32_t *iss32;
794
795 iss32 = USER_REGS32(thread);
796
797 iss32->uesp = CAST_DOWN(unsigned int, user_stack);
798 }
799 }
800
801 /*
802 * thread_adjuserstack:
803 *
804 * Returns the adjusted user stack pointer from the machine
805 * dependent thread state info. Used for small (<2G) deltas.
806 */
807 uint64_t
808 thread_adjuserstack(
809 thread_t thread,
810 int adjust)
811 {
812 if (thread_is_64bit(thread)) {
813 x86_saved_state64_t *iss64;
814
815 iss64 = USER_REGS64(thread);
816
817 iss64->isf.rsp += adjust;
818
819 return iss64->isf.rsp;
820 } else {
821 x86_saved_state32_t *iss32;
822
823 iss32 = USER_REGS32(thread);
824
825 iss32->uesp += adjust;
826
827 return CAST_USER_ADDR_T(iss32->uesp);
828 }
829 }
830
831 /*
832 * thread_setentrypoint:
833 *
834 * Sets the user PC into the machine
835 * dependent thread state info.
836 */
837 void
838 thread_setentrypoint(thread_t thread, mach_vm_address_t entry)
839 {
840 if (thread_is_64bit(thread)) {
841 x86_saved_state64_t *iss64;
842
843 iss64 = USER_REGS64(thread);
844
845 iss64->isf.rip = (uint64_t)entry;
846 } else {
847 x86_saved_state32_t *iss32;
848
849 iss32 = USER_REGS32(thread);
850
851 iss32->eip = CAST_DOWN(unsigned int, entry);
852 }
853 }
854
855
856 kern_return_t
857 thread_setsinglestep(thread_t thread, int on)
858 {
859 if (thread_is_64bit(thread)) {
860 x86_saved_state64_t *iss64;
861
862 iss64 = USER_REGS64(thread);
863
864 if (on)
865 iss64->isf.rflags |= EFL_TF;
866 else
867 iss64->isf.rflags &= ~EFL_TF;
868 } else {
869 x86_saved_state32_t *iss32;
870
871 iss32 = USER_REGS32(thread);
872
873 if (on)
874 iss32->efl |= EFL_TF;
875 else
876 iss32->efl &= ~EFL_TF;
877 }
878
879 return (KERN_SUCCESS);
880 }
881
882
883
884 /* XXX this should be a struct savearea so that CHUD will work better on x86 */
885 void *
886 find_user_regs(thread_t thread)
887 {
888 return USER_STATE(thread);
889 }
890
891 void *
892 get_user_regs(thread_t th)
893 {
894 if (th->machine.pcb)
895 return(USER_STATE(th));
896 else {
897 printf("[get_user_regs: thread does not have pcb]");
898 return NULL;
899 }
900 }
901
902 #if CONFIG_DTRACE
903 /*
904 * DTrace would like to have a peek at the kernel interrupt state, if available.
905 * Based on osfmk/chud/i386/chud_thread_i386.c:chudxnu_thread_get_state(), which see.
906 */
907 x86_saved_state32_t *find_kern_regs(thread_t);
908
909 x86_saved_state32_t *
910 find_kern_regs(thread_t thread)
911 {
912 if (thread == current_thread() &&
913 NULL != current_cpu_datap()->cpu_int_state &&
914 !(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
915 current_cpu_datap()->cpu_interrupt_level == 1)) {
916
917 return saved_state32(current_cpu_datap()->cpu_int_state);
918 } else {
919 return NULL;
920 }
921 }
922
923 vm_offset_t dtrace_get_cpu_int_stack_top(void);
924
925 vm_offset_t
926 dtrace_get_cpu_int_stack_top(void)
927 {
928 return current_cpu_datap()->cpu_int_stack_top;
929 }
930 #endif