]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/bsd_i386.c
896ebd976e46ee0b4dfe785bd360bb96b54038fc
[apple/xnu.git] / osfmk / i386 / bsd_i386.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #ifdef MACH_BSD
29 #include <mach_rt.h>
30 #include <mach_debug.h>
31 #include <mach_ldebug.h>
32
33 #include <mach/kern_return.h>
34 #include <mach/mach_traps.h>
35 #include <mach/thread_status.h>
36 #include <mach/vm_param.h>
37
38 #include <kern/counters.h>
39 #include <kern/cpu_data.h>
40 #include <kern/mach_param.h>
41 #include <kern/task.h>
42 #include <kern/thread.h>
43 #include <kern/sched_prim.h>
44 #include <kern/misc_protos.h>
45 #include <kern/assert.h>
46 #include <kern/spl.h>
47 #include <kern/syscall_sw.h>
48 #include <ipc/ipc_port.h>
49 #include <vm/vm_kern.h>
50 #include <vm/pmap.h>
51
52 #include <i386/cpu_data.h>
53 #include <i386/cpu_number.h>
54 #include <i386/thread.h>
55 #include <i386/eflags.h>
56 #include <i386/proc_reg.h>
57 #include <i386/seg.h>
58 #include <i386/tss.h>
59 #include <i386/user_ldt.h>
60 #include <i386/fpu.h>
61 #include <i386/iopb_entries.h>
62 #include <i386/machdep_call.h>
63 #include <i386/misc_protos.h>
64 #include <i386/cpu_data.h>
65 #include <i386/cpu_number.h>
66 #include <i386/mp_desc.h>
67 #include <i386/vmparam.h>
68 #include <i386/trap.h>
69 #include <mach/i386/syscall_sw.h>
70 #include <sys/syscall.h>
71 #include <sys/kdebug.h>
72 #include <sys/ktrace.h>
73 #include <sys/errno.h>
74 #include <../bsd/sys/sysent.h>
75
76 extern struct proc *current_proc(void);
77 extern struct proc * kernproc;
78
79 kern_return_t
80 thread_userstack(
81 thread_t,
82 int,
83 thread_state_t,
84 unsigned int,
85 mach_vm_offset_t *,
86 int *
87 );
88
89 kern_return_t
90 thread_entrypoint(
91 thread_t,
92 int,
93 thread_state_t,
94 unsigned int,
95 mach_vm_offset_t *
96 );
97
98 void * find_user_regs(thread_t);
99
100 unsigned int get_msr_exportmask(void);
101
102 unsigned int get_msr_nbits(void);
103
104 unsigned int get_msr_rbits(void);
105
106 kern_return_t
107 thread_compose_cthread_desc(unsigned int addr, pcb_t pcb);
108
109 void IOSleep(int);
110
111 /*
112 * thread_userstack:
113 *
114 * Return the user stack pointer from the machine
115 * dependent thread state info.
116 */
117 kern_return_t
118 thread_userstack(
119 __unused thread_t thread,
120 int flavor,
121 thread_state_t tstate,
122 __unused unsigned int count,
123 user_addr_t *user_stack,
124 int *customstack
125 )
126 {
127 if (customstack)
128 *customstack = 0;
129
130 switch (flavor) {
131 case OLD_i386_THREAD_STATE:
132 case x86_THREAD_STATE32:
133 {
134 x86_thread_state32_t *state25;
135
136 state25 = (x86_thread_state32_t *) tstate;
137
138 if (state25->esp)
139 *user_stack = state25->esp;
140 else
141 *user_stack = VM_USRSTACK32;
142 if (customstack && state25->esp)
143 *customstack = 1;
144 else
145 *customstack = 0;
146 break;
147 }
148
149 case x86_THREAD_STATE64:
150 {
151 x86_thread_state64_t *state25;
152
153 state25 = (x86_thread_state64_t *) tstate;
154
155 if (state25->rsp)
156 *user_stack = state25->rsp;
157 else
158 *user_stack = VM_USRSTACK64;
159 if (customstack && state25->rsp)
160 *customstack = 1;
161 else
162 *customstack = 0;
163 break;
164 }
165
166 default :
167 return (KERN_INVALID_ARGUMENT);
168 }
169
170 return (KERN_SUCCESS);
171 }
172
173
174 kern_return_t
175 thread_entrypoint(
176 __unused thread_t thread,
177 int flavor,
178 thread_state_t tstate,
179 __unused unsigned int count,
180 mach_vm_offset_t *entry_point
181 )
182 {
183 /*
184 * Set a default.
185 */
186 if (*entry_point == 0)
187 *entry_point = VM_MIN_ADDRESS;
188
189 switch (flavor) {
190 case OLD_i386_THREAD_STATE:
191 case x86_THREAD_STATE32:
192 {
193 x86_thread_state32_t *state25;
194
195 state25 = (x86_thread_state32_t *) tstate;
196 *entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS;
197 break;
198 }
199
200 case x86_THREAD_STATE64:
201 {
202 x86_thread_state64_t *state25;
203
204 state25 = (x86_thread_state64_t *) tstate;
205 *entry_point = state25->rip ? state25->rip: VM_MIN_ADDRESS64;
206 break;
207 }
208 }
209 return (KERN_SUCCESS);
210 }
211
212
213 /*
214 * Duplicate parent state in child
215 * for U**X fork.
216 */
217 kern_return_t
218 machine_thread_dup(
219 thread_t parent,
220 thread_t child
221 )
222 {
223
224 pcb_t parent_pcb;
225 pcb_t child_pcb;
226
227 if ((child_pcb = child->machine.pcb) == NULL ||
228 (parent_pcb = parent->machine.pcb) == NULL)
229 return (KERN_FAILURE);
230 /*
231 * Copy over the i386_saved_state registers
232 */
233 if (cpu_mode_is64bit()) {
234 if (thread_is_64bit(parent))
235 bcopy(USER_REGS64(parent), USER_REGS64(child), sizeof(x86_saved_state64_t));
236 else
237 bcopy(USER_REGS32(parent), USER_REGS32(child), sizeof(x86_saved_state_compat32_t));
238 } else
239 bcopy(USER_REGS32(parent), USER_REGS32(child), sizeof(x86_saved_state32_t));
240
241 /*
242 * Check to see if parent is using floating point
243 * and if so, copy the registers to the child
244 */
245 fpu_dup_fxstate(parent, child);
246
247 #ifdef MACH_BSD
248 /*
249 * Copy the parent's cthread id and USER_CTHREAD descriptor, if 32-bit.
250 */
251 child_pcb->cthread_self = parent_pcb->cthread_self;
252 if (!thread_is_64bit(parent))
253 child_pcb->cthread_desc = parent_pcb->cthread_desc;
254
255 /*
256 * FIXME - should a user specified LDT, TSS and V86 info
257 * be duplicated as well?? - probably not.
258 */
259 // duplicate any use LDT entry that was set I think this is appropriate.
260 if (parent_pcb->uldt_selector!= 0) {
261 child_pcb->uldt_selector = parent_pcb->uldt_selector;
262 child_pcb->uldt_desc = parent_pcb->uldt_desc;
263 }
264 #endif
265
266 return (KERN_SUCCESS);
267 }
268
269 /*
270 * FIXME - thread_set_child
271 */
272
273 void thread_set_child(thread_t child, int pid);
274 void
275 thread_set_child(thread_t child, int pid)
276 {
277
278 if (thread_is_64bit(child)) {
279 x86_saved_state64_t *iss64;
280
281 iss64 = USER_REGS64(child);
282
283 iss64->rax = pid;
284 iss64->rdx = 1;
285 iss64->isf.rflags &= ~EFL_CF;
286 } else {
287 x86_saved_state32_t *iss32;
288
289 iss32 = USER_REGS32(child);
290
291 iss32->eax = pid;
292 iss32->edx = 1;
293 iss32->efl &= ~EFL_CF;
294 }
295 }
296
297
298 void thread_set_parent(thread_t parent, int pid);
299 void
300 thread_set_parent(thread_t parent, int pid)
301 {
302
303 if (thread_is_64bit(parent)) {
304 x86_saved_state64_t *iss64;
305
306 iss64 = USER_REGS64(parent);
307
308 iss64->rax = pid;
309 iss64->rdx = 0;
310 iss64->isf.rflags &= ~EFL_CF;
311 } else {
312 x86_saved_state32_t *iss32;
313
314 iss32 = USER_REGS32(parent);
315
316 iss32->eax = pid;
317 iss32->edx = 0;
318 iss32->efl &= ~EFL_CF;
319 }
320 }
321
322
323
324 /*
325 * System Call handling code
326 */
327
328 extern struct proc * i386_current_proc(void);
329
330 extern long fuword(vm_offset_t);
331
332
333 /* following implemented in bsd/dev/i386/unix_signal.c */
334 int __pthread_cset(struct sysent *);
335
336 void __pthread_creset(struct sysent *);
337
338
339 void
340 machdep_syscall(x86_saved_state_t *state)
341 {
342 int args[machdep_call_count];
343 int trapno;
344 int nargs;
345 machdep_call_t *entry;
346 x86_saved_state32_t *regs;
347
348 assert(is_saved_state32(state));
349 regs = saved_state32(state);
350
351 trapno = regs->eax;
352 #if DEBUG_TRACE
353 kprintf("machdep_syscall(0x%08x) code=%d\n", regs, trapno);
354 #endif
355
356 if (trapno < 0 || trapno >= machdep_call_count) {
357 regs->eax = (unsigned int)kern_invalid(NULL);
358
359 thread_exception_return();
360 /* NOTREACHED */
361 }
362 entry = &machdep_call_table[trapno];
363 nargs = entry->nargs;
364
365 if (nargs != 0) {
366 if (copyin((user_addr_t) regs->uesp + sizeof (int),
367 (char *) args, (nargs * sizeof (int)))) {
368 regs->eax = KERN_INVALID_ADDRESS;
369
370 thread_exception_return();
371 /* NOTREACHED */
372 }
373 }
374 switch (nargs) {
375 case 0:
376 regs->eax = (*entry->routine.args_0)();
377 break;
378 case 1:
379 regs->eax = (*entry->routine.args_1)(args[0]);
380 break;
381 case 2:
382 regs->eax = (*entry->routine.args_2)(args[0], args[1]);
383 break;
384 case 3:
385 if (!entry->bsd_style)
386 regs->eax = (*entry->routine.args_3)(args[0], args[1], args[2]);
387 else {
388 int error;
389 int rval;
390
391 error = (*entry->routine.args_bsd_3)(&rval, args[0], args[1], args[2]);
392 if (error) {
393 regs->eax = error;
394 regs->efl |= EFL_CF; /* carry bit */
395 } else {
396 regs->eax = rval;
397 regs->efl &= ~EFL_CF;
398 }
399 }
400 break;
401 case 4:
402 regs->eax = (*entry->routine.args_4)(args[0], args[1], args[2], args[3]);
403 break;
404
405 default:
406 panic("machdep_syscall: too many args");
407 }
408 if (current_thread()->funnel_lock)
409 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
410
411 thread_exception_return();
412 /* NOTREACHED */
413 }
414
415
416 void
417 machdep_syscall64(x86_saved_state_t *state)
418 {
419 int trapno;
420 machdep_call_t *entry;
421 x86_saved_state64_t *regs;
422
423 assert(is_saved_state64(state));
424 regs = saved_state64(state);
425
426 trapno = regs->rax & SYSCALL_NUMBER_MASK;
427
428 if (trapno < 0 || trapno >= machdep_call_count) {
429 regs->rax = (unsigned int)kern_invalid(NULL);
430
431 thread_exception_return();
432 /* NOTREACHED */
433 }
434 entry = &machdep_call_table64[trapno];
435
436 switch (entry->nargs) {
437 case 0:
438 regs->rax = (*entry->routine.args_0)();
439 break;
440 case 1:
441 regs->rax = (*entry->routine.args64_1)(regs->rdi);
442 break;
443 default:
444 panic("machdep_syscall64: too many args");
445 }
446 if (current_thread()->funnel_lock)
447 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
448
449 thread_exception_return();
450 /* NOTREACHED */
451 }
452
453
454 kern_return_t
455 thread_compose_cthread_desc(unsigned int addr, pcb_t pcb)
456 {
457 struct real_descriptor desc;
458
459 mp_disable_preemption();
460
461 desc.limit_low = 1;
462 desc.limit_high = 0;
463 desc.base_low = addr & 0xffff;
464 desc.base_med = (addr >> 16) & 0xff;
465 desc.base_high = (addr >> 24) & 0xff;
466 desc.access = ACC_P|ACC_PL_U|ACC_DATA_W;
467 desc.granularity = SZ_32|SZ_G;
468 pcb->cthread_desc = desc;
469 *ldt_desc_p(USER_CTHREAD) = desc;
470
471 mp_enable_preemption();
472
473 return(KERN_SUCCESS);
474 }
475
476 kern_return_t
477 thread_set_cthread_self(uint32_t self)
478 {
479 current_thread()->machine.pcb->cthread_self = (uint64_t) self;
480
481 return (KERN_SUCCESS);
482 }
483
484 kern_return_t
485 thread_get_cthread_self(void)
486 {
487 return ((kern_return_t)current_thread()->machine.pcb->cthread_self);
488 }
489
490 kern_return_t
491 thread_fast_set_cthread_self(uint32_t self)
492 {
493 pcb_t pcb;
494 x86_saved_state32_t *iss;
495
496 pcb = (pcb_t)current_thread()->machine.pcb;
497 thread_compose_cthread_desc(self, pcb);
498 pcb->cthread_self = (uint64_t) self; /* preserve old func too */
499 iss = saved_state32(pcb->iss);
500 iss->gs = USER_CTHREAD;
501
502 return (USER_CTHREAD);
503 }
504
505 kern_return_t
506 thread_fast_set_cthread_self64(uint64_t self)
507 {
508 pcb_t pcb;
509 x86_saved_state64_t *iss;
510
511 pcb = current_thread()->machine.pcb;
512
513 /* check for canonical address, set 0 otherwise */
514 if (!IS_USERADDR64_CANONICAL(self))
515 self = 0ULL;
516 pcb->cthread_self = self;
517 current_cpu_datap()->cpu_uber.cu_user_gs_base = self;
518
519 /* XXX for 64-in-32 */
520 iss = saved_state64(pcb->iss);
521 iss->gs = USER_CTHREAD;
522 thread_compose_cthread_desc((uint32_t) self, pcb);
523
524 return (USER_CTHREAD);
525 }
526
527 /*
528 * thread_set_user_ldt routine is the interface for the user level
529 * settable ldt entry feature. allowing a user to create arbitrary
530 * ldt entries seems to be too large of a security hole, so instead
531 * this mechanism is in place to allow user level processes to have
532 * an ldt entry that can be used in conjunction with the FS register.
533 *
534 * Swapping occurs inside the pcb.c file along with initialization
535 * when a thread is created. The basic functioning theory is that the
536 * pcb->uldt_selector variable will contain either 0 meaning the
537 * process has not set up any entry, or the selector to be used in
538 * the FS register. pcb->uldt_desc contains the actual descriptor the
539 * user has set up stored in machine usable ldt format.
540 *
541 * Currently one entry is shared by all threads (USER_SETTABLE), but
542 * this could be changed in the future by changing how this routine
543 * allocates the selector. There seems to be no real reason at this
544 * time to have this added feature, but in the future it might be
545 * needed.
546 *
547 * address is the linear address of the start of the data area size
548 * is the size in bytes of the area flags should always be set to 0
549 * for now. in the future it could be used to set R/W permisions or
550 * other functions. Currently the segment is created as a data segment
551 * up to 1 megabyte in size with full read/write permisions only.
552 *
553 * this call returns the segment selector or -1 if any error occurs
554 */
555 kern_return_t
556 thread_set_user_ldt(uint32_t address, uint32_t size, uint32_t flags)
557 {
558 pcb_t pcb;
559 struct fake_descriptor temp;
560 int mycpu;
561
562 if (flags != 0)
563 return -1; // flags not supported
564 if (size > 0xFFFFF)
565 return -1; // size too big, 1 meg is the limit
566
567 mp_disable_preemption();
568 mycpu = cpu_number();
569
570 // create a "fake" descriptor so we can use fix_desc()
571 // to build a real one...
572 // 32 bit default operation size
573 // standard read/write perms for a data segment
574 pcb = (pcb_t)current_thread()->machine.pcb;
575 temp.offset = address;
576 temp.lim_or_seg = size;
577 temp.size_or_wdct = SZ_32;
578 temp.access = ACC_P|ACC_PL_U|ACC_DATA_W;
579
580 // turn this into a real descriptor
581 fix_desc(&temp,1);
582
583 // set up our data in the pcb
584 pcb->uldt_desc = *(struct real_descriptor*)&temp;
585 pcb->uldt_selector = USER_SETTABLE; // set the selector value
586
587 // now set it up in the current table...
588 *ldt_desc_p(USER_SETTABLE) = *(struct real_descriptor*)&temp;
589
590 mp_enable_preemption();
591
592 return USER_SETTABLE;
593 }
594
595 #endif /* MACH_BSD */
596
597
598 typedef kern_return_t (*mach_call_t)(void *);
599
600 struct mach_call_args {
601 syscall_arg_t arg1;
602 syscall_arg_t arg2;
603 syscall_arg_t arg3;
604 syscall_arg_t arg4;
605 syscall_arg_t arg5;
606 syscall_arg_t arg6;
607 syscall_arg_t arg7;
608 syscall_arg_t arg8;
609 syscall_arg_t arg9;
610 };
611
612
613 static kern_return_t
614 mach_call_arg_munger32(uint32_t sp, int nargs, int call_number, struct mach_call_args *args);
615
616
617 static kern_return_t
618 mach_call_arg_munger32(uint32_t sp, int nargs, int call_number, struct mach_call_args *args)
619 {
620 unsigned int args32[9];
621
622 if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args32, nargs * sizeof (int)))
623 return KERN_INVALID_ARGUMENT;
624
625 switch (nargs) {
626 case 9: args->arg9 = args32[8];
627 case 8: args->arg8 = args32[7];
628 case 7: args->arg7 = args32[6];
629 case 6: args->arg6 = args32[5];
630 case 5: args->arg5 = args32[4];
631 case 4: args->arg4 = args32[3];
632 case 3: args->arg3 = args32[2];
633 case 2: args->arg2 = args32[1];
634 case 1: args->arg1 = args32[0];
635 }
636 if (call_number == 90) {
637 /* munge_l for mach_wait_until_trap() */
638 args->arg1 = (((uint64_t)(args32[0])) | ((((uint64_t)(args32[1]))<<32)));
639 }
640 if (call_number == 93) {
641 /* munge_wl for mk_timer_arm_trap() */
642 args->arg2 = (((uint64_t)(args32[1])) | ((((uint64_t)(args32[2]))<<32)));
643 }
644
645 return KERN_SUCCESS;
646 }
647
648
649 __private_extern__ void
650 mach_call_munger(x86_saved_state_t *state);
651
652
653 __private_extern__
654 void
655 mach_call_munger(x86_saved_state_t *state)
656 {
657 int argc;
658 int call_number;
659 mach_call_t mach_call;
660 kern_return_t retval;
661 struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
662 x86_saved_state32_t *regs;
663
664 assert(is_saved_state32(state));
665 regs = saved_state32(state);
666
667 call_number = -(regs->eax);
668 #if DEBUG_TRACE
669 kprintf("mach_call_munger(0x%08x) code=%d\n", regs, call_number);
670 #endif
671
672 if (call_number < 0 || call_number >= mach_trap_count) {
673 i386_exception(EXC_SYSCALL, call_number, 1);
674 /* NOTREACHED */
675 }
676 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
677
678 if (mach_call == (mach_call_t)kern_invalid) {
679 i386_exception(EXC_SYSCALL, call_number, 1);
680 /* NOTREACHED */
681 }
682 argc = mach_trap_table[call_number].mach_trap_arg_count;
683
684 if (argc) {
685 retval = mach_call_arg_munger32(regs->uesp, argc, call_number, &args);
686
687 if (retval != KERN_SUCCESS) {
688 regs->eax = retval;
689
690 thread_exception_return();
691 /* NOTREACHED */
692 }
693 }
694 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
695 (int) args.arg1, (int) args.arg2, (int) args.arg3, (int) args.arg4, 0);
696
697 retval = mach_call(&args);
698
699 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END,
700 retval, 0, 0, 0, 0);
701 regs->eax = retval;
702
703 thread_exception_return();
704 /* NOTREACHED */
705 }
706
707
708
709 __private_extern__ void
710 mach_call_munger64(x86_saved_state_t *state);
711
712
713 __private_extern__
714 void
715 mach_call_munger64(x86_saved_state_t *state)
716 {
717 int call_number;
718 int argc;
719 mach_call_t mach_call;
720 x86_saved_state64_t *regs;
721
722 assert(is_saved_state64(state));
723 regs = saved_state64(state);
724
725 call_number = regs->rax & SYSCALL_NUMBER_MASK;
726
727 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
728 (int) regs->rdi, (int) regs->rsi, (int) regs->rdx, (int) regs->r10, 0);
729
730 if (call_number < 0 || call_number >= mach_trap_count) {
731 i386_exception(EXC_SYSCALL, regs->rax, 1);
732 /* NOTREACHED */
733 }
734 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
735
736 if (mach_call == (mach_call_t)kern_invalid) {
737 i386_exception(EXC_SYSCALL, regs->rax, 1);
738 /* NOTREACHED */
739 }
740 argc = mach_trap_table[call_number].mach_trap_arg_count;
741
742 if (argc > 6) {
743 int copyin_count;
744
745 copyin_count = (argc - 6) * sizeof(uint64_t);
746
747 if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&regs->v_arg6, copyin_count)) {
748 regs->rax = KERN_INVALID_ARGUMENT;
749
750 thread_exception_return();
751 /* NOTREACHED */
752 }
753 }
754 regs->rax = (uint64_t)mach_call((void *)(&regs->rdi));
755
756 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END,
757 (int)regs->rax, 0, 0, 0, 0);
758
759 thread_exception_return();
760 /* NOTREACHED */
761 }
762
763
764
765 /*
766 * thread_setuserstack:
767 *
768 * Sets the user stack pointer into the machine
769 * dependent thread state info.
770 */
771 void
772 thread_setuserstack(
773 thread_t thread,
774 mach_vm_address_t user_stack)
775 {
776 if (thread_is_64bit(thread)) {
777 x86_saved_state64_t *iss64;
778
779 iss64 = USER_REGS64(thread);
780
781 iss64->isf.rsp = (uint64_t)user_stack;
782 } else {
783 x86_saved_state32_t *iss32;
784
785 iss32 = USER_REGS32(thread);
786
787 iss32->uesp = CAST_DOWN(unsigned int, user_stack);
788 }
789 }
790
791 /*
792 * thread_adjuserstack:
793 *
794 * Returns the adjusted user stack pointer from the machine
795 * dependent thread state info. Used for small (<2G) deltas.
796 */
797 uint64_t
798 thread_adjuserstack(
799 thread_t thread,
800 int adjust)
801 {
802 if (thread_is_64bit(thread)) {
803 x86_saved_state64_t *iss64;
804
805 iss64 = USER_REGS64(thread);
806
807 iss64->isf.rsp += adjust;
808
809 return iss64->isf.rsp;
810 } else {
811 x86_saved_state32_t *iss32;
812
813 iss32 = USER_REGS32(thread);
814
815 iss32->uesp += adjust;
816
817 return CAST_USER_ADDR_T(iss32->uesp);
818 }
819 }
820
821 /*
822 * thread_setentrypoint:
823 *
824 * Sets the user PC into the machine
825 * dependent thread state info.
826 */
827 void
828 thread_setentrypoint(thread_t thread, mach_vm_address_t entry)
829 {
830 if (thread_is_64bit(thread)) {
831 x86_saved_state64_t *iss64;
832
833 iss64 = USER_REGS64(thread);
834
835 iss64->isf.rip = (uint64_t)entry;
836 } else {
837 x86_saved_state32_t *iss32;
838
839 iss32 = USER_REGS32(thread);
840
841 iss32->eip = CAST_DOWN(unsigned int, entry);
842 }
843 }
844
845
846 void
847 thread_setsinglestep(thread_t thread, int on)
848 {
849 if (thread_is_64bit(thread)) {
850 x86_saved_state64_t *iss64;
851
852 iss64 = USER_REGS64(thread);
853
854 if (on)
855 iss64->isf.rflags |= EFL_TF;
856 else
857 iss64->isf.rflags &= ~EFL_TF;
858 } else {
859 x86_saved_state32_t *iss32;
860
861 iss32 = USER_REGS32(thread);
862
863 if (on)
864 iss32->efl |= EFL_TF;
865 else
866 iss32->efl &= ~EFL_TF;
867 }
868 }
869
870
871
872 /* XXX this should be a struct savearea so that CHUD will work better on x86 */
873 void *
874 find_user_regs(
875 thread_t thread)
876 {
877 return USER_STATE(thread);
878 }
879