]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/bsd_i386.c
xnu-792.24.17.tar.gz
[apple/xnu.git] / osfmk / i386 / bsd_i386.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
6601e61a 4 * @APPLE_LICENSE_HEADER_START@
1c79356b 5 *
6601e61a
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
8f6c56a5 11 *
6601e61a
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
6601e61a
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
8f6c56a5 19 *
6601e61a 20 * @APPLE_LICENSE_HEADER_END@
1c79356b
A
21 */
22#ifdef MACH_BSD
1c79356b
A
23#include <mach_rt.h>
24#include <mach_debug.h>
25#include <mach_ldebug.h>
26
27#include <mach/kern_return.h>
91447636 28#include <mach/mach_traps.h>
1c79356b
A
29#include <mach/thread_status.h>
30#include <mach/vm_param.h>
1c79356b
A
31
32#include <kern/counters.h>
33#include <kern/cpu_data.h>
34#include <kern/mach_param.h>
35#include <kern/task.h>
36#include <kern/thread.h>
1c79356b
A
37#include <kern/sched_prim.h>
38#include <kern/misc_protos.h>
39#include <kern/assert.h>
40#include <kern/spl.h>
55e303ae 41#include <kern/syscall_sw.h>
1c79356b
A
42#include <ipc/ipc_port.h>
43#include <vm/vm_kern.h>
44#include <vm/pmap.h>
45
91447636
A
46#include <i386/cpu_data.h>
47#include <i386/cpu_number.h>
1c79356b
A
48#include <i386/thread.h>
49#include <i386/eflags.h>
50#include <i386/proc_reg.h>
51#include <i386/seg.h>
52#include <i386/tss.h>
53#include <i386/user_ldt.h>
54#include <i386/fpu.h>
55#include <i386/iopb_entries.h>
56#include <i386/machdep_call.h>
91447636
A
57#include <i386/misc_protos.h>
58#include <i386/cpu_data.h>
59#include <i386/cpu_number.h>
60#include <i386/mp_desc.h>
61#include <i386/vmparam.h>
9bccf70c 62#include <sys/syscall.h>
91447636 63#include <sys/kdebug.h>
9bccf70c 64#include <sys/ktrace.h>
91447636
A
65#include <../bsd/sys/sysent.h>
66
67extern struct proc *current_proc(void);
1c79356b
A
68
69kern_return_t
70thread_userstack(
71 thread_t,
72 int,
73 thread_state_t,
74 unsigned int,
91447636 75 mach_vm_offset_t *,
6601e61a 76 int *
1c79356b
A
77);
78
79kern_return_t
80thread_entrypoint(
81 thread_t,
82 int,
83 thread_state_t,
84 unsigned int,
91447636 85 mach_vm_offset_t *
1c79356b
A
86);
87
1c79356b
A
88unsigned int get_msr_exportmask(void);
89
90unsigned int get_msr_nbits(void);
91
92unsigned int get_msr_rbits(void);
93
55e303ae
A
94kern_return_t
95thread_compose_cthread_desc(unsigned int addr, pcb_t pcb);
96
91447636
A
97void IOSleep(int);
98
1c79356b
A
99/*
100 * thread_userstack:
101 *
102 * Return the user stack pointer from the machine
103 * dependent thread state info.
104 */
105kern_return_t
106thread_userstack(
6601e61a
A
107 __unused thread_t thread,
108 int flavor,
109 thread_state_t tstate,
110 unsigned int count,
111 user_addr_t *user_stack,
112 int *customstack
113)
1c79356b 114{
6601e61a
A
115 struct i386_saved_state *state;
116 i386_thread_state_t *state25;
117 vm_offset_t uesp;
118
0b4e3aa0 119 if (customstack)
6601e61a 120 *customstack = 0;
9bccf70c 121
1c79356b 122 switch (flavor) {
6601e61a
A
123 case i386_THREAD_STATE: /* FIXME */
124 state25 = (i386_thread_state_t *) tstate;
9bccf70c
A
125 if (state25->esp)
126 *user_stack = state25->esp;
91447636 127 else
6601e61a 128 *user_stack = USRSTACK;
9bccf70c
A
129 if (customstack && state25->esp)
130 *customstack = 1;
131 else
132 *customstack = 0;
1c79356b 133 break;
4452a7af 134
6601e61a
A
135 case i386_NEW_THREAD_STATE:
136 if (count < i386_NEW_THREAD_STATE_COUNT)
137 return (KERN_INVALID_ARGUMENT);
138 else {
139 state = (struct i386_saved_state *) tstate;
140 uesp = state->uesp;
141 }
1c79356b 142
6601e61a
A
143 /* If a valid user stack is specified, use it. */
144 if (uesp)
145 *user_stack = uesp;
91447636 146 else
6601e61a
A
147 *user_stack = USRSTACK;
148 if (customstack && uesp)
9bccf70c
A
149 *customstack = 1;
150 else
151 *customstack = 0;
6601e61a
A
152 break;
153 default :
1c79356b
A
154 return (KERN_INVALID_ARGUMENT);
155 }
156
157 return (KERN_SUCCESS);
158}
159
160kern_return_t
161thread_entrypoint(
6601e61a
A
162 __unused thread_t thread,
163 int flavor,
164 thread_state_t tstate,
165 unsigned int count,
166 mach_vm_offset_t *entry_point
167)
1c79356b 168{
6601e61a
A
169 struct i386_saved_state *state;
170 i386_thread_state_t *state25;
171
172 /*
173 * Set a default.
174 */
175 if (*entry_point == 0)
176 *entry_point = VM_MIN_ADDRESS;
1c79356b 177
6601e61a
A
178 switch (flavor) {
179 case i386_THREAD_STATE:
180 state25 = (i386_thread_state_t *) tstate;
181 *entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS;
182 break;
183
184 case i386_NEW_THREAD_STATE:
185 if (count < i386_THREAD_STATE_COUNT)
186 return (KERN_INVALID_ARGUMENT);
187 else {
188 state = (struct i386_saved_state *) tstate;
189
190 /*
191 * If a valid entry point is specified, use it.
192 */
193 *entry_point = state->eip ? state->eip: VM_MIN_ADDRESS;
194 }
195 break;
4452a7af 196 }
6601e61a 197
1c79356b
A
198 return (KERN_SUCCESS);
199}
200
6601e61a
A
201struct i386_saved_state *
202get_user_regs(thread_t th)
203{
204 if (th->machine.pcb)
205 return(USER_REGS(th));
206 else {
207 printf("[get_user_regs: thread does not have pcb]");
208 return NULL;
209 }
210}
1c79356b
A
211
212/*
213 * Duplicate parent state in child
214 * for U**X fork.
215 */
55e303ae
A
216kern_return_t
217machine_thread_dup(
91447636
A
218 thread_t parent,
219 thread_t child
1c79356b
A
220)
221{
6601e61a 222 struct i386_float_state floatregs;
21362eb3 223
6601e61a
A
224#ifdef XXX
225 /* Save the FPU state */
226 if ((pcb_t)(per_proc_info[cpu_number()].fpu_pcb) == parent->machine.pcb) {
227 fp_state_save(parent);
228 }
229#endif
230
231 if (child->machine.pcb == NULL || parent->machine.pcb == NULL)
55e303ae 232 return (KERN_FAILURE);
1c79356b 233
6601e61a
A
234 /* Copy over the i386_saved_state registers */
235 child->machine.pcb->iss = parent->machine.pcb->iss;
89b3af67 236
6601e61a
A
237 /* Check to see if parent is using floating point
238 * and if so, copy the registers to the child
239 * FIXME - make sure this works.
4452a7af 240 */
4452a7af 241
6601e61a
A
242 if (parent->machine.pcb->ims.ifps) {
243 if (fpu_get_state(parent, &floatregs) == KERN_SUCCESS)
244 fpu_set_state(child, &floatregs);
245 }
246
247 /* FIXME - should a user specified LDT, TSS and V86 info
1c79356b
A
248 * be duplicated as well?? - probably not.
249 */
91447636 250 // duplicate any use LDT entry that was set I think this is appropriate.
6601e61a
A
251#ifdef MACH_BSD
252 if (parent->machine.pcb->uldt_selector!= 0) {
253 child->machine.pcb->uldt_selector = parent->machine.pcb->uldt_selector;
254 child->machine.pcb->uldt_desc = parent->machine.pcb->uldt_desc;
91447636
A
255 }
256#endif
257
6601e61a 258
55e303ae 259 return (KERN_SUCCESS);
1c79356b
A
260}
261
262/*
263 * FIXME - thread_set_child
264 */
265
91447636 266void thread_set_child(thread_t child, int pid);
1c79356b 267void
91447636 268thread_set_child(thread_t child, int pid)
1c79356b 269{
6601e61a
A
270 child->machine.pcb->iss.eax = pid;
271 child->machine.pcb->iss.edx = 1;
272 child->machine.pcb->iss.efl &= ~EFL_CF;
1c79356b 273}
91447636 274void thread_set_parent(thread_t parent, int pid);
0b4e3aa0 275void
91447636 276thread_set_parent(thread_t parent, int pid)
0b4e3aa0 277{
6601e61a
A
278 parent->machine.pcb->iss.eax = pid;
279 parent->machine.pcb->iss.edx = 0;
280 parent->machine.pcb->iss.efl &= ~EFL_CF;
0b4e3aa0 281}
1c79356b
A
282
283
284
1c79356b
A
285/*
286 * System Call handling code
287 */
288
6601e61a
A
289#define ERESTART -1 /* restart syscall */
290#define EJUSTRETURN -2 /* don't modify regs, just return */
291
292
293#define NO_FUNNEL 0
294#define KERNEL_FUNNEL 1
295
296extern funnel_t * kernel_flock;
297
298extern int set_bsduthreadargs (thread_t, struct i386_saved_state *, void *);
299extern void * get_bsduthreadarg(thread_t);
300extern int * get_bsduthreadrval(thread_t th);
301extern int * get_bsduthreadlowpridelay(thread_t th);
91447636
A
302
303extern long fuword(vm_offset_t);
304
6601e61a
A
305extern void unix_syscall(struct i386_saved_state *);
306extern void unix_syscall_return(int);
1c79356b 307
91447636
A
308/* following implemented in bsd/dev/i386/unix_signal.c */
309int __pthread_cset(struct sysent *);
1c79356b 310
91447636 311void __pthread_creset(struct sysent *);
1c79356b 312
1c79356b
A
313
314void
6601e61a 315unix_syscall_return(int error)
8f6c56a5 316{
6601e61a
A
317 thread_t thread;
318 volatile int *rval;
319 struct i386_saved_state *regs;
320 struct proc *p;
321 unsigned short code;
322 vm_offset_t params;
323 struct sysent *callp;
324 volatile int *lowpri_delay;
325
326 thread = current_thread();
327 rval = get_bsduthreadrval(thread);
328 lowpri_delay = get_bsduthreadlowpridelay(thread);
329 p = current_proc();
330
331 regs = USER_REGS(thread);
332
333 /* reconstruct code for tracing before blasting eax */
334 code = regs->eax;
335 params = (vm_offset_t) ((caddr_t)regs->uesp + sizeof (int));
336 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
337 if (callp == sysent) {
338 code = fuword(params);
339 }
340
341 if (error == ERESTART) {
342 regs->eip -= 7;
343 }
344 else if (error != EJUSTRETURN) {
345 if (error) {
346 regs->eax = error;
347 regs->efl |= EFL_CF; /* carry bit */
348 } else { /* (not error) */
349 regs->eax = rval[0];
350 regs->edx = rval[1];
351 regs->efl &= ~EFL_CF;
352 }
353 }
354
355 ktrsysret(p, code, error, rval[0], (callp->sy_funnel & FUNNEL_MASK));
356
357 __pthread_creset(callp);
358
359 if ((callp->sy_funnel & FUNNEL_MASK) != NO_FUNNEL)
360 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
361
362 if (*lowpri_delay) {
363 /*
364 * task is marked as a low priority I/O type
365 * and the I/O we issued while in this system call
366 * collided with normal I/O operations... we'll
367 * delay in order to mitigate the impact of this
368 * task on the normal operation of the system
369 */
370 IOSleep(*lowpri_delay);
371 *lowpri_delay = 0;
372 }
373 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
374 error, rval[0], rval[1], 0, 0);
375
376 thread_exception_return();
377 /* NOTREACHED */
378}
379
380
381void
382unix_syscall(struct i386_saved_state *regs)
383{
384 thread_t thread;
385 void *vt;
386 unsigned short code;
387 struct sysent *callp;
388 int nargs;
389 int error;
390 int *rval;
391 int funnel_type;
392 vm_offset_t params;
393 struct proc *p;
394 volatile int *lowpri_delay;
395
396 thread = current_thread();
397 p = current_proc();
398 rval = get_bsduthreadrval(thread);
399 lowpri_delay = get_bsduthreadlowpridelay(thread);
400
401 thread->task->syscalls_unix++; /* MP-safety ignored */
402
403 //printf("[scall : eax %x]", regs->eax);
404 code = regs->eax;
405 params = (vm_offset_t) ((caddr_t)regs->uesp + sizeof (int));
406 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
407 if (callp == sysent) {
408 code = fuword(params);
409 params += sizeof (int);
410 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
411 }
1c79356b 412
6601e61a 413 vt = get_bsduthreadarg(thread);
1c79356b 414
6601e61a
A
415 if ((nargs = (callp->sy_narg * sizeof (int))) &&
416 (error = copyin((user_addr_t) params, (char *) vt, nargs)) != 0) {
417 regs->eax = error;
418 regs->efl |= EFL_CF;
419 thread_exception_return();
420 /* NOTREACHED */
421 }
422
423 rval[0] = 0;
424 rval[1] = regs->edx;
1c79356b 425
6601e61a
A
426 if ((error = __pthread_cset(callp))) {
427 /* cancelled system call; let it returned with EINTR for handling */
428 regs->eax = error;
429 regs->efl |= EFL_CF;
91447636
A
430 thread_exception_return();
431 /* NOTREACHED */
432 }
433
6601e61a
A
434 funnel_type = (callp->sy_funnel & FUNNEL_MASK);
435 if(funnel_type == KERNEL_FUNNEL)
436 (void) thread_funnel_set(kernel_flock, TRUE);
437
438 (void) set_bsduthreadargs(thread, regs, NULL);
439
440 if (callp->sy_narg > 8)
441 panic("unix_syscall max arg count exceeded (%d)", callp->sy_narg);
8f6c56a5 442
6601e61a
A
443 ktrsyscall(p, code, callp->sy_narg, vt, funnel_type);
444
445 {
446 int *ip = (int *)vt;
447 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
448 *ip, *(ip+1), *(ip+2), *(ip+3), 0);
1c79356b 449 }
1c79356b 450
6601e61a
A
451 error = (*(callp->sy_call))((void *) p, (void *) vt, &rval[0]);
452
453#if 0
454 /* May be needed with vfork changes */
455 regs = USER_REGS(thread);
456#endif
457 if (error == ERESTART) {
458 regs->eip -= 7;
459 }
460 else if (error != EJUSTRETURN) {
461 if (error) {
462 regs->eax = error;
463 regs->efl |= EFL_CF; /* carry bit */
464 } else { /* (not error) */
465 regs->eax = rval[0];
466 regs->edx = rval[1];
467 regs->efl &= ~EFL_CF;
468 }
21362eb3 469 }
21362eb3 470
6601e61a
A
471 ktrsysret(p, code, error, rval[0], funnel_type);
472
473 __pthread_creset(callp);
474
475 if(funnel_type != NO_FUNNEL)
476 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
477
478 if (*lowpri_delay) {
479 /*
480 * task is marked as a low priority I/O type
481 * and the I/O we issued while in this system call
482 * collided with normal I/O operations... we'll
483 * delay in order to mitigate the impact of this
484 * task on the normal operation of the system
485 */
486 IOSleep(*lowpri_delay);
487 *lowpri_delay = 0;
488 }
489 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
490 error, rval[0], rval[1], 0, 0);
491
492 thread_exception_return();
493 /* NOTREACHED */
1c79356b
A
494}
495
496
497void
6601e61a 498machdep_syscall( struct i386_saved_state *regs)
1c79356b 499{
6601e61a
A
500 int trapno, nargs;
501 machdep_call_t *entry;
502
503 trapno = regs->eax;
504 if (trapno < 0 || trapno >= machdep_call_count) {
505 regs->eax = (unsigned int)kern_invalid(NULL);
1c79356b 506
6601e61a
A
507 thread_exception_return();
508 /* NOTREACHED */
509 }
1c79356b 510
6601e61a
A
511 entry = &machdep_call_table[trapno];
512 nargs = entry->nargs;
1c79356b 513
6601e61a
A
514 if (nargs > 0) {
515 int args[nargs];
1c79356b 516
6601e61a
A
517 if (copyin((user_addr_t) regs->uesp + sizeof (int),
518 (char *) args,
519 nargs * sizeof (int))) {
520
521 regs->eax = KERN_INVALID_ADDRESS;
522
523 thread_exception_return();
524 /* NOTREACHED */
1c79356b
A
525 }
526
6601e61a 527 switch (nargs) {
4452a7af 528 case 1:
6601e61a
A
529 regs->eax = (*entry->routine.args_1)(args[0]);
530 break;
531 case 2:
532 regs->eax = (*entry->routine.args_2)(args[0],args[1]);
533 break;
534 case 3:
535 regs->eax = (*entry->routine.args_3)(args[0],args[1],args[2]);
536 break;
537 case 4:
538 regs->eax = (*entry->routine.args_4)(args[0],args[1],args[2],args[3]);
55e303ae
A
539 break;
540 default:
6601e61a 541 panic("machdep_syscall(): too many args");
55e303ae 542 }
6601e61a
A
543 }
544 else
545 regs->eax = (*entry->routine.args_0)();
21362eb3 546
6601e61a
A
547 if (current_thread()->funnel_lock)
548 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
549
550 thread_exception_return();
551 /* NOTREACHED */
1c79356b
A
552}
553
554
55e303ae
A
555kern_return_t
556thread_compose_cthread_desc(unsigned int addr, pcb_t pcb)
557{
558 struct real_descriptor desc;
55e303ae 559
91447636
A
560 mp_disable_preemption();
561
55e303ae
A
562 desc.limit_low = 1;
563 desc.limit_high = 0;
564 desc.base_low = addr & 0xffff;
565 desc.base_med = (addr >> 16) & 0xff;
566 desc.base_high = (addr >> 24) & 0xff;
567 desc.access = ACC_P|ACC_PL_U|ACC_DATA_W;
568 desc.granularity = SZ_32|SZ_G;
569 pcb->cthread_desc = desc;
91447636
A
570 *ldt_desc_p(USER_CTHREAD) = desc;
571
572 mp_enable_preemption();
573
55e303ae
A
574 return(KERN_SUCCESS);
575}
576
1c79356b 577kern_return_t
91447636 578thread_set_cthread_self(uint32_t self)
1c79356b 579{
6601e61a 580 current_thread()->machine.pcb->cthread_self = self;
1c79356b
A
581
582 return (KERN_SUCCESS);
583}
584
585kern_return_t
586thread_get_cthread_self(void)
587{
91447636 588 return ((kern_return_t)current_thread()->machine.pcb->cthread_self);
1c79356b
A
589}
590
55e303ae 591kern_return_t
91447636 592thread_fast_set_cthread_self(uint32_t self)
55e303ae 593{
6601e61a
A
594 pcb_t pcb;
595 pcb = (pcb_t)current_thread()->machine.pcb;
596 thread_compose_cthread_desc(self, pcb);
597 pcb->cthread_self = self; /* preserve old func too */
598 return (USER_CTHREAD);
55e303ae
A
599}
600
91447636
A
601/*
602 * thread_set_user_ldt routine is the interface for the user level
603 * settable ldt entry feature. allowing a user to create arbitrary
604 * ldt entries seems to be too large of a security hole, so instead
605 * this mechanism is in place to allow user level processes to have
606 * an ldt entry that can be used in conjunction with the FS register.
607 *
608 * Swapping occurs inside the pcb.c file along with initialization
609 * when a thread is created. The basic functioning theory is that the
610 * pcb->uldt_selector variable will contain either 0 meaning the
611 * process has not set up any entry, or the selector to be used in
612 * the FS register. pcb->uldt_desc contains the actual descriptor the
613 * user has set up stored in machine usable ldt format.
614 *
615 * Currently one entry is shared by all threads (USER_SETTABLE), but
616 * this could be changed in the future by changing how this routine
617 * allocates the selector. There seems to be no real reason at this
618 * time to have this added feature, but in the future it might be
619 * needed.
620 *
621 * address is the linear address of the start of the data area size
622 * is the size in bytes of the area flags should always be set to 0
623 * for now. in the future it could be used to set R/W permisions or
624 * other functions. Currently the segment is created as a data segment
625 * up to 1 megabyte in size with full read/write permisions only.
626 *
627 * this call returns the segment selector or -1 if any error occurs
628 */
629kern_return_t
630thread_set_user_ldt(uint32_t address, uint32_t size, uint32_t flags)
631{
632 pcb_t pcb;
633 struct fake_descriptor temp;
634 int mycpu;
635
636 if (flags != 0)
637 return -1; // flags not supported
638 if (size > 0xFFFFF)
639 return -1; // size too big, 1 meg is the limit
640
641 mp_disable_preemption();
642 mycpu = cpu_number();
643
644 // create a "fake" descriptor so we can use fix_desc()
645 // to build a real one...
646 // 32 bit default operation size
647 // standard read/write perms for a data segment
648 pcb = (pcb_t)current_thread()->machine.pcb;
649 temp.offset = address;
650 temp.lim_or_seg = size;
651 temp.size_or_wdct = SZ_32;
652 temp.access = ACC_P|ACC_PL_U|ACC_DATA_W;
653
654 // turn this into a real descriptor
655 fix_desc(&temp,1);
656
657 // set up our data in the pcb
658 pcb->uldt_desc = *(struct real_descriptor*)&temp;
659 pcb->uldt_selector = USER_SETTABLE; // set the selector value
660
661 // now set it up in the current table...
662 *ldt_desc_p(USER_SETTABLE) = *(struct real_descriptor*)&temp;
663
664 mp_enable_preemption();
665
666 return USER_SETTABLE;
667}
6601e61a
A
668void
669mach25_syscall(struct i386_saved_state *regs)
4452a7af 670{
6601e61a
A
671 printf("*** Atttempt to execute a Mach 2.5 system call at EIP=%x EAX=%x(%d)\n",
672 regs->eip, regs->eax, -regs->eax);
673 panic("FIXME!");
21362eb3 674}
6601e61a 675#endif /* MACH_BSD */
89b3af67
A
676
677
6601e61a
A
678/* This routine is called from assembly before each and every mach trap.
679 */
4452a7af 680
6601e61a 681extern unsigned int mach_call_start(unsigned int, unsigned int *);
21362eb3 682
89b3af67 683__private_extern__
6601e61a
A
684unsigned int
685mach_call_start(unsigned int call_number, unsigned int *args)
89b3af67 686{
6601e61a
A
687 int i, argc;
688 unsigned int kdarg[3];
4452a7af 689
6601e61a 690 current_thread()->task->syscalls_mach++; /* MP-safety ignored */
4452a7af 691
6601e61a 692/* Always prepare to trace mach system calls */
4452a7af 693
6601e61a
A
694 kdarg[0]=0;
695 kdarg[1]=0;
696 kdarg[2]=0;
4452a7af 697
6601e61a 698 argc = mach_trap_table[call_number>>4].mach_trap_arg_count;
4452a7af 699
6601e61a
A
700 if (argc > 3)
701 argc = 3;
89b3af67 702
6601e61a
A
703 for (i=0; i < argc; i++)
704 kdarg[i] = (int)*(args + i);
4452a7af 705
6601e61a
A
706 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number>>4)) | DBG_FUNC_START,
707 kdarg[0], kdarg[1], kdarg[2], 0, 0);
708
709 return call_number; /* pass this back thru */
4452a7af
A
710}
711
6601e61a
A
712/* This routine is called from assembly after each mach system call
713 */
4452a7af 714
6601e61a 715extern unsigned int mach_call_end(unsigned int, unsigned int);
4452a7af 716
6601e61a
A
717__private_extern__
718unsigned int
719mach_call_end(unsigned int call_number, unsigned int retval)
720{
721 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number>>4)) | DBG_FUNC_END,
722 retval, 0, 0, 0, 0);
723 return retval; /* pass this back thru */
724}
4452a7af 725
6601e61a 726typedef kern_return_t (*mach_call_t)(void *);
4452a7af 727
6601e61a
A
728extern __attribute__((regparm(1))) kern_return_t
729mach_call_munger(unsigned int call_number,
730 unsigned int arg1,
731 unsigned int arg2,
732 unsigned int arg3,
733 unsigned int arg4,
734 unsigned int arg5,
735 unsigned int arg6,
736 unsigned int arg7,
737 unsigned int arg8,
738 unsigned int arg9
739);
740
741struct mach_call_args {
742 unsigned int arg1;
743 unsigned int arg2;
744 unsigned int arg3;
745 unsigned int arg4;
746 unsigned int arg5;
747 unsigned int arg6;
748 unsigned int arg7;
749 unsigned int arg8;
750 unsigned int arg9;
751};
4452a7af 752__private_extern__
6601e61a
A
753__attribute__((regparm(1))) kern_return_t
754mach_call_munger(unsigned int call_number,
755 unsigned int arg1,
756 unsigned int arg2,
757 unsigned int arg3,
758 unsigned int arg4,
759 unsigned int arg5,
760 unsigned int arg6,
761 unsigned int arg7,
762 unsigned int arg8,
763 unsigned int arg9
764)
4452a7af 765{
4452a7af
A
766 int argc;
767 mach_call_t mach_call;
6601e61a
A
768 kern_return_t retval;
769 struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
770
771 current_thread()->task->syscalls_mach++; /* MP-safety ignored */
772 call_number >>= 4;
21362eb3 773
4452a7af 774 argc = mach_trap_table[call_number].mach_trap_arg_count;
6601e61a
A
775 switch (argc) {
776 case 9: args.arg9 = arg9;
777 case 8: args.arg8 = arg8;
778 case 7: args.arg7 = arg7;
779 case 6: args.arg6 = arg6;
780 case 5: args.arg5 = arg5;
781 case 4: args.arg4 = arg4;
782 case 3: args.arg3 = arg3;
783 case 2: args.arg2 = arg2;
784 case 1: args.arg1 = arg1;
4452a7af 785 }
4452a7af 786
6601e61a
A
787 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
788 args.arg1, args.arg2, args.arg3, 0, 0);
789
790 mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
791 retval = mach_call(&args);
792
91447636 793 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END,
6601e61a 794 retval, 0, 0, 0, 0);
91447636 795
6601e61a 796 return retval;
91447636
A
797}
798
799/*
800 * thread_setuserstack:
801 *
802 * Sets the user stack pointer into the machine
803 * dependent thread state info.
804 */
805void
806thread_setuserstack(
807 thread_t thread,
808 mach_vm_address_t user_stack)
809{
6601e61a 810 struct i386_saved_state *ss = get_user_regs(thread);
5d5c5d0d 811
6601e61a 812 ss->uesp = CAST_DOWN(unsigned int,user_stack);
91447636
A
813}
814
815/*
816 * thread_adjuserstack:
817 *
818 * Returns the adjusted user stack pointer from the machine
819 * dependent thread state info. Used for small (<2G) deltas.
820 */
821uint64_t
822thread_adjuserstack(
823 thread_t thread,
824 int adjust)
825{
6601e61a 826 struct i386_saved_state *ss = get_user_regs(thread);
5d5c5d0d 827
6601e61a
A
828 ss->uesp += adjust;
829 return CAST_USER_ADDR_T(ss->uesp);
91447636
A
830}
831
832/*
833 * thread_setentrypoint:
834 *
835 * Sets the user PC into the machine
836 * dependent thread state info.
837 */
838void
6601e61a
A
839thread_setentrypoint(
840 thread_t thread,
841 mach_vm_address_t entry)
4452a7af 842{
6601e61a 843 struct i386_saved_state *ss = get_user_regs(thread);
4452a7af 844
6601e61a
A
845 ss->eip = CAST_DOWN(unsigned int,entry);
846}
91447636 847