]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/bsd_i386.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / i386 / bsd_i386.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 #ifdef MACH_BSD
31 #include <mach_rt.h>
32 #include <mach_debug.h>
33 #include <mach_ldebug.h>
34
35 #include <mach/kern_return.h>
36 #include <mach/mach_traps.h>
37 #include <mach/thread_status.h>
38 #include <mach/vm_param.h>
39
40 #include <kern/counters.h>
41 #include <kern/cpu_data.h>
42 #include <kern/mach_param.h>
43 #include <kern/task.h>
44 #include <kern/thread.h>
45 #include <kern/sched_prim.h>
46 #include <kern/misc_protos.h>
47 #include <kern/assert.h>
48 #include <kern/spl.h>
49 #include <kern/syscall_sw.h>
50 #include <ipc/ipc_port.h>
51 #include <vm/vm_kern.h>
52 #include <vm/pmap.h>
53
54 #include <i386/cpu_data.h>
55 #include <i386/cpu_number.h>
56 #include <i386/thread.h>
57 #include <i386/eflags.h>
58 #include <i386/proc_reg.h>
59 #include <i386/seg.h>
60 #include <i386/tss.h>
61 #include <i386/user_ldt.h>
62 #include <i386/fpu.h>
63 #include <i386/iopb_entries.h>
64 #include <i386/machdep_call.h>
65 #include <i386/misc_protos.h>
66 #include <i386/cpu_data.h>
67 #include <i386/cpu_number.h>
68 #include <i386/mp_desc.h>
69 #include <i386/vmparam.h>
70 #include <sys/syscall.h>
71 #include <sys/kdebug.h>
72 #include <sys/ktrace.h>
73 #include <../bsd/sys/sysent.h>
74
75 extern struct proc *current_proc(void);
76
77 kern_return_t
78 thread_userstack(
79 thread_t,
80 int,
81 thread_state_t,
82 unsigned int,
83 mach_vm_offset_t *,
84 int *
85 );
86
87 kern_return_t
88 thread_entrypoint(
89 thread_t,
90 int,
91 thread_state_t,
92 unsigned int,
93 mach_vm_offset_t *
94 );
95
96 unsigned int get_msr_exportmask(void);
97
98 unsigned int get_msr_nbits(void);
99
100 unsigned int get_msr_rbits(void);
101
102 kern_return_t
103 thread_compose_cthread_desc(unsigned int addr, pcb_t pcb);
104
105 void IOSleep(int);
106
107 /*
108 * thread_userstack:
109 *
110 * Return the user stack pointer from the machine
111 * dependent thread state info.
112 */
113 kern_return_t
114 thread_userstack(
115 __unused thread_t thread,
116 int flavor,
117 thread_state_t tstate,
118 unsigned int count,
119 user_addr_t *user_stack,
120 int *customstack
121 )
122 {
123 struct i386_saved_state *state;
124 i386_thread_state_t *state25;
125 vm_offset_t uesp;
126
127 if (customstack)
128 *customstack = 0;
129
130 switch (flavor) {
131 case i386_THREAD_STATE: /* FIXME */
132 state25 = (i386_thread_state_t *) tstate;
133 if (state25->esp)
134 *user_stack = state25->esp;
135 else
136 *user_stack = USRSTACK;
137 if (customstack && state25->esp)
138 *customstack = 1;
139 else
140 *customstack = 0;
141 break;
142
143 case i386_NEW_THREAD_STATE:
144 if (count < i386_NEW_THREAD_STATE_COUNT)
145 return (KERN_INVALID_ARGUMENT);
146 else {
147 state = (struct i386_saved_state *) tstate;
148 uesp = state->uesp;
149 }
150
151 /* If a valid user stack is specified, use it. */
152 if (uesp)
153 *user_stack = uesp;
154 else
155 *user_stack = USRSTACK;
156 if (customstack && uesp)
157 *customstack = 1;
158 else
159 *customstack = 0;
160 break;
161 default :
162 return (KERN_INVALID_ARGUMENT);
163 }
164
165 return (KERN_SUCCESS);
166 }
167
168 kern_return_t
169 thread_entrypoint(
170 __unused thread_t thread,
171 int flavor,
172 thread_state_t tstate,
173 unsigned int count,
174 mach_vm_offset_t *entry_point
175 )
176 {
177 struct i386_saved_state *state;
178 i386_thread_state_t *state25;
179
180 /*
181 * Set a default.
182 */
183 if (*entry_point == 0)
184 *entry_point = VM_MIN_ADDRESS;
185
186 switch (flavor) {
187 case i386_THREAD_STATE:
188 state25 = (i386_thread_state_t *) tstate;
189 *entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS;
190 break;
191
192 case i386_NEW_THREAD_STATE:
193 if (count < i386_THREAD_STATE_COUNT)
194 return (KERN_INVALID_ARGUMENT);
195 else {
196 state = (struct i386_saved_state *) tstate;
197
198 /*
199 * If a valid entry point is specified, use it.
200 */
201 *entry_point = state->eip ? state->eip: VM_MIN_ADDRESS;
202 }
203 break;
204 }
205
206 return (KERN_SUCCESS);
207 }
208
209 struct i386_saved_state *
210 get_user_regs(thread_t th)
211 {
212 if (th->machine.pcb)
213 return(USER_REGS(th));
214 else {
215 printf("[get_user_regs: thread does not have pcb]");
216 return NULL;
217 }
218 }
219
220 /*
221 * Duplicate parent state in child
222 * for U**X fork.
223 */
224 kern_return_t
225 machine_thread_dup(
226 thread_t parent,
227 thread_t child
228 )
229 {
230 struct i386_float_state floatregs;
231
232 #ifdef XXX
233 /* Save the FPU state */
234 if ((pcb_t)(per_proc_info[cpu_number()].fpu_pcb) == parent->machine.pcb) {
235 fp_state_save(parent);
236 }
237 #endif
238
239 if (child->machine.pcb == NULL || parent->machine.pcb == NULL)
240 return (KERN_FAILURE);
241
242 /* Copy over the i386_saved_state registers */
243 child->machine.pcb->iss = parent->machine.pcb->iss;
244
245 /* Check to see if parent is using floating point
246 * and if so, copy the registers to the child
247 * FIXME - make sure this works.
248 */
249
250 if (parent->machine.pcb->ims.ifps) {
251 if (fpu_get_state(parent, &floatregs) == KERN_SUCCESS)
252 fpu_set_state(child, &floatregs);
253 }
254
255 /* FIXME - should a user specified LDT, TSS and V86 info
256 * be duplicated as well?? - probably not.
257 */
258 // duplicate any use LDT entry that was set I think this is appropriate.
259 #ifdef MACH_BSD
260 if (parent->machine.pcb->uldt_selector!= 0) {
261 child->machine.pcb->uldt_selector = parent->machine.pcb->uldt_selector;
262 child->machine.pcb->uldt_desc = parent->machine.pcb->uldt_desc;
263 }
264 #endif
265
266
267 return (KERN_SUCCESS);
268 }
269
270 /*
271 * FIXME - thread_set_child
272 */
273
274 void thread_set_child(thread_t child, int pid);
275 void
276 thread_set_child(thread_t child, int pid)
277 {
278 child->machine.pcb->iss.eax = pid;
279 child->machine.pcb->iss.edx = 1;
280 child->machine.pcb->iss.efl &= ~EFL_CF;
281 }
282 void thread_set_parent(thread_t parent, int pid);
283 void
284 thread_set_parent(thread_t parent, int pid)
285 {
286 parent->machine.pcb->iss.eax = pid;
287 parent->machine.pcb->iss.edx = 0;
288 parent->machine.pcb->iss.efl &= ~EFL_CF;
289 }
290
291
292
293 /*
294 * System Call handling code
295 */
296
297 #define ERESTART -1 /* restart syscall */
298 #define EJUSTRETURN -2 /* don't modify regs, just return */
299
300
301 #define NO_FUNNEL 0
302 #define KERNEL_FUNNEL 1
303
304 extern funnel_t * kernel_flock;
305
306 extern int set_bsduthreadargs (thread_t, struct i386_saved_state *, void *);
307 extern void * get_bsduthreadarg(thread_t);
308 extern int * get_bsduthreadrval(thread_t th);
309 extern int * get_bsduthreadlowpridelay(thread_t th);
310
311 extern long fuword(vm_offset_t);
312
313 extern void unix_syscall(struct i386_saved_state *);
314 extern void unix_syscall_return(int);
315
316 /* following implemented in bsd/dev/i386/unix_signal.c */
317 int __pthread_cset(struct sysent *);
318
319 void __pthread_creset(struct sysent *);
320
321
322 void
323 unix_syscall_return(int error)
324 {
325 thread_t thread;
326 volatile int *rval;
327 struct i386_saved_state *regs;
328 struct proc *p;
329 unsigned short code;
330 vm_offset_t params;
331 struct sysent *callp;
332 volatile int *lowpri_delay;
333
334 thread = current_thread();
335 rval = get_bsduthreadrval(thread);
336 lowpri_delay = get_bsduthreadlowpridelay(thread);
337 p = current_proc();
338
339 regs = USER_REGS(thread);
340
341 /* reconstruct code for tracing before blasting eax */
342 code = regs->eax;
343 params = (vm_offset_t) ((caddr_t)regs->uesp + sizeof (int));
344 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
345 if (callp == sysent) {
346 code = fuword(params);
347 }
348
349 if (error == ERESTART) {
350 regs->eip -= 7;
351 }
352 else if (error != EJUSTRETURN) {
353 if (error) {
354 regs->eax = error;
355 regs->efl |= EFL_CF; /* carry bit */
356 } else { /* (not error) */
357 regs->eax = rval[0];
358 regs->edx = rval[1];
359 regs->efl &= ~EFL_CF;
360 }
361 }
362
363 ktrsysret(p, code, error, rval[0], (callp->sy_funnel & FUNNEL_MASK));
364
365 __pthread_creset(callp);
366
367 if ((callp->sy_funnel & FUNNEL_MASK) != NO_FUNNEL)
368 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
369
370 if (*lowpri_delay) {
371 /*
372 * task is marked as a low priority I/O type
373 * and the I/O we issued while in this system call
374 * collided with normal I/O operations... we'll
375 * delay in order to mitigate the impact of this
376 * task on the normal operation of the system
377 */
378 IOSleep(*lowpri_delay);
379 *lowpri_delay = 0;
380 }
381 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
382 error, rval[0], rval[1], 0, 0);
383
384 thread_exception_return();
385 /* NOTREACHED */
386 }
387
388
389 void
390 unix_syscall(struct i386_saved_state *regs)
391 {
392 thread_t thread;
393 void *vt;
394 unsigned short code;
395 struct sysent *callp;
396 int nargs;
397 int error;
398 int *rval;
399 int funnel_type;
400 vm_offset_t params;
401 struct proc *p;
402 volatile int *lowpri_delay;
403
404 thread = current_thread();
405 p = current_proc();
406 rval = get_bsduthreadrval(thread);
407 lowpri_delay = get_bsduthreadlowpridelay(thread);
408
409 thread->task->syscalls_unix++; /* MP-safety ignored */
410
411 //printf("[scall : eax %x]", regs->eax);
412 code = regs->eax;
413 params = (vm_offset_t) ((caddr_t)regs->uesp + sizeof (int));
414 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
415 if (callp == sysent) {
416 code = fuword(params);
417 params += sizeof (int);
418 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
419 }
420
421 vt = get_bsduthreadarg(thread);
422
423 if ((nargs = (callp->sy_narg * sizeof (int))) &&
424 (error = copyin((user_addr_t) params, (char *) vt, nargs)) != 0) {
425 regs->eax = error;
426 regs->efl |= EFL_CF;
427 thread_exception_return();
428 /* NOTREACHED */
429 }
430
431 rval[0] = 0;
432 rval[1] = regs->edx;
433
434 if ((error = __pthread_cset(callp))) {
435 /* cancelled system call; let it returned with EINTR for handling */
436 regs->eax = error;
437 regs->efl |= EFL_CF;
438 thread_exception_return();
439 /* NOTREACHED */
440 }
441
442 funnel_type = (callp->sy_funnel & FUNNEL_MASK);
443 if(funnel_type == KERNEL_FUNNEL)
444 (void) thread_funnel_set(kernel_flock, TRUE);
445
446 (void) set_bsduthreadargs(thread, regs, NULL);
447
448 if (callp->sy_narg > 8)
449 panic("unix_syscall max arg count exceeded (%d)", callp->sy_narg);
450
451 ktrsyscall(p, code, callp->sy_narg, vt, funnel_type);
452
453 {
454 int *ip = (int *)vt;
455 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
456 *ip, *(ip+1), *(ip+2), *(ip+3), 0);
457 }
458
459 error = (*(callp->sy_call))((void *) p, (void *) vt, &rval[0]);
460
461 #if 0
462 /* May be needed with vfork changes */
463 regs = USER_REGS(thread);
464 #endif
465 if (error == ERESTART) {
466 regs->eip -= 7;
467 }
468 else if (error != EJUSTRETURN) {
469 if (error) {
470 regs->eax = error;
471 regs->efl |= EFL_CF; /* carry bit */
472 } else { /* (not error) */
473 regs->eax = rval[0];
474 regs->edx = rval[1];
475 regs->efl &= ~EFL_CF;
476 }
477 }
478
479 ktrsysret(p, code, error, rval[0], funnel_type);
480
481 __pthread_creset(callp);
482
483 if(funnel_type != NO_FUNNEL)
484 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
485
486 if (*lowpri_delay) {
487 /*
488 * task is marked as a low priority I/O type
489 * and the I/O we issued while in this system call
490 * collided with normal I/O operations... we'll
491 * delay in order to mitigate the impact of this
492 * task on the normal operation of the system
493 */
494 IOSleep(*lowpri_delay);
495 *lowpri_delay = 0;
496 }
497 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
498 error, rval[0], rval[1], 0, 0);
499
500 thread_exception_return();
501 /* NOTREACHED */
502 }
503
504
505 void
506 machdep_syscall( struct i386_saved_state *regs)
507 {
508 int trapno, nargs;
509 machdep_call_t *entry;
510
511 trapno = regs->eax;
512 if (trapno < 0 || trapno >= machdep_call_count) {
513 regs->eax = (unsigned int)kern_invalid(NULL);
514
515 thread_exception_return();
516 /* NOTREACHED */
517 }
518
519 entry = &machdep_call_table[trapno];
520 nargs = entry->nargs;
521
522 if (nargs > 0) {
523 int args[nargs];
524
525 if (copyin((user_addr_t) regs->uesp + sizeof (int),
526 (char *) args,
527 nargs * sizeof (int))) {
528
529 regs->eax = KERN_INVALID_ADDRESS;
530
531 thread_exception_return();
532 /* NOTREACHED */
533 }
534
535 switch (nargs) {
536 case 1:
537 regs->eax = (*entry->routine.args_1)(args[0]);
538 break;
539 case 2:
540 regs->eax = (*entry->routine.args_2)(args[0],args[1]);
541 break;
542 case 3:
543 regs->eax = (*entry->routine.args_3)(args[0],args[1],args[2]);
544 break;
545 case 4:
546 regs->eax = (*entry->routine.args_4)(args[0],args[1],args[2],args[3]);
547 break;
548 default:
549 panic("machdep_syscall(): too many args");
550 }
551 }
552 else
553 regs->eax = (*entry->routine.args_0)();
554
555 if (current_thread()->funnel_lock)
556 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
557
558 thread_exception_return();
559 /* NOTREACHED */
560 }
561
562
563 kern_return_t
564 thread_compose_cthread_desc(unsigned int addr, pcb_t pcb)
565 {
566 struct real_descriptor desc;
567
568 mp_disable_preemption();
569
570 desc.limit_low = 1;
571 desc.limit_high = 0;
572 desc.base_low = addr & 0xffff;
573 desc.base_med = (addr >> 16) & 0xff;
574 desc.base_high = (addr >> 24) & 0xff;
575 desc.access = ACC_P|ACC_PL_U|ACC_DATA_W;
576 desc.granularity = SZ_32|SZ_G;
577 pcb->cthread_desc = desc;
578 *ldt_desc_p(USER_CTHREAD) = desc;
579
580 mp_enable_preemption();
581
582 return(KERN_SUCCESS);
583 }
584
585 kern_return_t
586 thread_set_cthread_self(uint32_t self)
587 {
588 current_thread()->machine.pcb->cthread_self = self;
589
590 return (KERN_SUCCESS);
591 }
592
593 kern_return_t
594 thread_get_cthread_self(void)
595 {
596 return ((kern_return_t)current_thread()->machine.pcb->cthread_self);
597 }
598
599 kern_return_t
600 thread_fast_set_cthread_self(uint32_t self)
601 {
602 pcb_t pcb;
603 pcb = (pcb_t)current_thread()->machine.pcb;
604 thread_compose_cthread_desc(self, pcb);
605 pcb->cthread_self = self; /* preserve old func too */
606 return (USER_CTHREAD);
607 }
608
609 /*
610 * thread_set_user_ldt routine is the interface for the user level
611 * settable ldt entry feature. allowing a user to create arbitrary
612 * ldt entries seems to be too large of a security hole, so instead
613 * this mechanism is in place to allow user level processes to have
614 * an ldt entry that can be used in conjunction with the FS register.
615 *
616 * Swapping occurs inside the pcb.c file along with initialization
617 * when a thread is created. The basic functioning theory is that the
618 * pcb->uldt_selector variable will contain either 0 meaning the
619 * process has not set up any entry, or the selector to be used in
620 * the FS register. pcb->uldt_desc contains the actual descriptor the
621 * user has set up stored in machine usable ldt format.
622 *
623 * Currently one entry is shared by all threads (USER_SETTABLE), but
624 * this could be changed in the future by changing how this routine
625 * allocates the selector. There seems to be no real reason at this
626 * time to have this added feature, but in the future it might be
627 * needed.
628 *
629 * address is the linear address of the start of the data area size
630 * is the size in bytes of the area flags should always be set to 0
631 * for now. in the future it could be used to set R/W permisions or
632 * other functions. Currently the segment is created as a data segment
633 * up to 1 megabyte in size with full read/write permisions only.
634 *
635 * this call returns the segment selector or -1 if any error occurs
636 */
637 kern_return_t
638 thread_set_user_ldt(uint32_t address, uint32_t size, uint32_t flags)
639 {
640 pcb_t pcb;
641 struct fake_descriptor temp;
642 int mycpu;
643
644 if (flags != 0)
645 return -1; // flags not supported
646 if (size > 0xFFFFF)
647 return -1; // size too big, 1 meg is the limit
648
649 mp_disable_preemption();
650 mycpu = cpu_number();
651
652 // create a "fake" descriptor so we can use fix_desc()
653 // to build a real one...
654 // 32 bit default operation size
655 // standard read/write perms for a data segment
656 pcb = (pcb_t)current_thread()->machine.pcb;
657 temp.offset = address;
658 temp.lim_or_seg = size;
659 temp.size_or_wdct = SZ_32;
660 temp.access = ACC_P|ACC_PL_U|ACC_DATA_W;
661
662 // turn this into a real descriptor
663 fix_desc(&temp,1);
664
665 // set up our data in the pcb
666 pcb->uldt_desc = *(struct real_descriptor*)&temp;
667 pcb->uldt_selector = USER_SETTABLE; // set the selector value
668
669 // now set it up in the current table...
670 *ldt_desc_p(USER_SETTABLE) = *(struct real_descriptor*)&temp;
671
672 mp_enable_preemption();
673
674 return USER_SETTABLE;
675 }
676 void
677 mach25_syscall(struct i386_saved_state *regs)
678 {
679 printf("*** Atttempt to execute a Mach 2.5 system call at EIP=%x EAX=%x(%d)\n",
680 regs->eip, regs->eax, -regs->eax);
681 panic("FIXME!");
682 }
683 #endif /* MACH_BSD */
684
685
686 /* This routine is called from assembly before each and every mach trap.
687 */
688
689 extern unsigned int mach_call_start(unsigned int, unsigned int *);
690
691 __private_extern__
692 unsigned int
693 mach_call_start(unsigned int call_number, unsigned int *args)
694 {
695 int i, argc;
696 unsigned int kdarg[3];
697
698 current_thread()->task->syscalls_mach++; /* MP-safety ignored */
699
700 /* Always prepare to trace mach system calls */
701
702 kdarg[0]=0;
703 kdarg[1]=0;
704 kdarg[2]=0;
705
706 argc = mach_trap_table[call_number>>4].mach_trap_arg_count;
707
708 if (argc > 3)
709 argc = 3;
710
711 for (i=0; i < argc; i++)
712 kdarg[i] = (int)*(args + i);
713
714 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number>>4)) | DBG_FUNC_START,
715 kdarg[0], kdarg[1], kdarg[2], 0, 0);
716
717 return call_number; /* pass this back thru */
718 }
719
720 /* This routine is called from assembly after each mach system call
721 */
722
723 extern unsigned int mach_call_end(unsigned int, unsigned int);
724
725 __private_extern__
726 unsigned int
727 mach_call_end(unsigned int call_number, unsigned int retval)
728 {
729 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number>>4)) | DBG_FUNC_END,
730 retval, 0, 0, 0, 0);
731 return retval; /* pass this back thru */
732 }
733
734 typedef kern_return_t (*mach_call_t)(void *);
735
736 extern __attribute__((regparm(1))) kern_return_t
737 mach_call_munger(unsigned int call_number,
738 unsigned int arg1,
739 unsigned int arg2,
740 unsigned int arg3,
741 unsigned int arg4,
742 unsigned int arg5,
743 unsigned int arg6,
744 unsigned int arg7,
745 unsigned int arg8,
746 unsigned int arg9
747 );
748
749 struct mach_call_args {
750 unsigned int arg1;
751 unsigned int arg2;
752 unsigned int arg3;
753 unsigned int arg4;
754 unsigned int arg5;
755 unsigned int arg6;
756 unsigned int arg7;
757 unsigned int arg8;
758 unsigned int arg9;
759 };
760 __private_extern__
761 __attribute__((regparm(1))) kern_return_t
762 mach_call_munger(unsigned int call_number,
763 unsigned int arg1,
764 unsigned int arg2,
765 unsigned int arg3,
766 unsigned int arg4,
767 unsigned int arg5,
768 unsigned int arg6,
769 unsigned int arg7,
770 unsigned int arg8,
771 unsigned int arg9
772 )
773 {
774 int argc;
775 mach_call_t mach_call;
776 kern_return_t retval;
777 struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
778
779 current_thread()->task->syscalls_mach++; /* MP-safety ignored */
780 call_number >>= 4;
781
782 argc = mach_trap_table[call_number].mach_trap_arg_count;
783 switch (argc) {
784 case 9: args.arg9 = arg9;
785 case 8: args.arg8 = arg8;
786 case 7: args.arg7 = arg7;
787 case 6: args.arg6 = arg6;
788 case 5: args.arg5 = arg5;
789 case 4: args.arg4 = arg4;
790 case 3: args.arg3 = arg3;
791 case 2: args.arg2 = arg2;
792 case 1: args.arg1 = arg1;
793 }
794
795 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
796 args.arg1, args.arg2, args.arg3, 0, 0);
797
798 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
799 retval = mach_call(&args);
800
801 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END,
802 retval, 0, 0, 0, 0);
803
804 return retval;
805 }
806
807 /*
808 * thread_setuserstack:
809 *
810 * Sets the user stack pointer into the machine
811 * dependent thread state info.
812 */
813 void
814 thread_setuserstack(
815 thread_t thread,
816 mach_vm_address_t user_stack)
817 {
818 struct i386_saved_state *ss = get_user_regs(thread);
819
820 ss->uesp = CAST_DOWN(unsigned int,user_stack);
821 }
822
823 /*
824 * thread_adjuserstack:
825 *
826 * Returns the adjusted user stack pointer from the machine
827 * dependent thread state info. Used for small (<2G) deltas.
828 */
829 uint64_t
830 thread_adjuserstack(
831 thread_t thread,
832 int adjust)
833 {
834 struct i386_saved_state *ss = get_user_regs(thread);
835
836 ss->uesp += adjust;
837 return CAST_USER_ADDR_T(ss->uesp);
838 }
839
840 /*
841 * thread_setentrypoint:
842 *
843 * Sets the user PC into the machine
844 * dependent thread state info.
845 */
846 void
847 thread_setentrypoint(
848 thread_t thread,
849 mach_vm_address_t entry)
850 {
851 struct i386_saved_state *ss = get_user_regs(thread);
852
853 ss->eip = CAST_DOWN(unsigned int,entry);
854 }
855