2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
28 #include <mach_debug.h>
29 #include <mach_ldebug.h>
31 #include <mach/kern_return.h>
32 #include <mach/thread_status.h>
33 #include <mach/vm_param.h>
35 #include <kern/counters.h>
36 #include <kern/cpu_data.h>
37 #include <kern/mach_param.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/thread_swap.h>
41 #include <kern/sched_prim.h>
42 #include <kern/misc_protos.h>
43 #include <kern/assert.h>
45 #include <kern/syscall_sw.h>
46 #include <ipc/ipc_port.h>
47 #include <vm/vm_kern.h>
50 #include <i386/thread.h>
51 #include <i386/eflags.h>
52 #include <i386/proc_reg.h>
55 #include <i386/user_ldt.h>
57 #include <i386/iopb_entries.h>
58 #include <i386/machdep_call.h>
60 #include <sys/syscall.h>
61 #include <sys/ktrace.h>
83 struct i386_saved_state
*
87 unsigned int get_msr_exportmask(void);
89 unsigned int get_msr_nbits(void);
91 unsigned int get_msr_rbits(void);
94 thread_compose_cthread_desc(unsigned int addr
, pcb_t pcb
);
99 * Return the user stack pointer from the machine
100 * dependent thread state info.
106 thread_state_t tstate
,
108 vm_offset_t
*user_stack
,
112 struct i386_saved_state
*state
;
113 i386_thread_state_t
*state25
;
120 case i386_THREAD_STATE
: /* FIXME */
121 state25
= (i386_thread_state_t
*) tstate
;
123 *user_stack
= state25
->esp
;
124 if (customstack
&& state25
->esp
)
130 case i386_NEW_THREAD_STATE
:
131 if (count
< i386_NEW_THREAD_STATE_COUNT
)
132 return (KERN_INVALID_ARGUMENT
);
134 state
= (struct i386_saved_state
*) tstate
;
138 /* If a valid user stack is specified, use it. */
141 if (customstack
&& uesp
)
147 return (KERN_INVALID_ARGUMENT
);
150 return (KERN_SUCCESS
);
157 thread_state_t tstate
,
159 vm_offset_t
*entry_point
162 struct i386_saved_state
*state
;
163 i386_thread_state_t
*state25
;
168 if (*entry_point
== 0)
169 *entry_point
= VM_MIN_ADDRESS
;
172 case i386_THREAD_STATE
:
173 state25
= (i386_thread_state_t
*) tstate
;
174 *entry_point
= state25
->eip
? state25
->eip
: VM_MIN_ADDRESS
;
177 case i386_NEW_THREAD_STATE
:
178 if (count
< i386_THREAD_STATE_COUNT
)
179 return (KERN_INVALID_ARGUMENT
);
181 state
= (struct i386_saved_state
*) tstate
;
184 * If a valid entry point is specified, use it.
186 *entry_point
= state
->eip
? state
->eip
: VM_MIN_ADDRESS
;
191 return (KERN_SUCCESS
);
194 struct i386_saved_state
*
195 get_user_regs(thread_act_t th
)
198 return(USER_REGS(th
));
200 printf("[get_user_regs: thread does not have pcb]");
206 * Duplicate parent state in child
215 struct i386_saved_state
*parent_state
, *child_state
;
216 struct i386_machine_state
*ims
;
217 struct i386_float_state floatregs
;
220 /* Save the FPU state */
221 if ((pcb_t
)(per_proc_info
[cpu_number()].fpu_pcb
) == parent
->mact
.pcb
) {
222 fp_state_save(parent
);
226 if (child
->mact
.pcb
== NULL
|| parent
->mact
.pcb
== NULL
)
227 return (KERN_FAILURE
);
229 /* Copy over the i386_saved_state registers */
230 child
->mact
.pcb
->iss
= parent
->mact
.pcb
->iss
;
232 /* Check to see if parent is using floating point
233 * and if so, copy the registers to the child
234 * FIXME - make sure this works.
237 if (parent
->mact
.pcb
->ims
.ifps
) {
238 if (fpu_get_state(parent
, &floatregs
) == KERN_SUCCESS
)
239 fpu_set_state(child
, &floatregs
);
242 /* FIXME - should a user specified LDT, TSS and V86 info
243 * be duplicated as well?? - probably not.
246 return (KERN_SUCCESS
);
250 * FIXME - thread_set_child
253 void thread_set_child(thread_act_t child
, int pid
);
255 thread_set_child(thread_act_t child
, int pid
)
257 child
->mact
.pcb
->iss
.eax
= pid
;
258 child
->mact
.pcb
->iss
.edx
= 1;
259 child
->mact
.pcb
->iss
.efl
&= ~EFL_CF
;
261 void thread_set_parent(thread_act_t parent
, int pid
);
263 thread_set_parent(thread_act_t parent
, int pid
)
265 parent
->mact
.pcb
->iss
.eax
= pid
;
266 parent
->mact
.pcb
->iss
.edx
= 0;
267 parent
->mact
.pcb
->iss
.efl
&= ~EFL_CF
;
273 * Move pages from one kernel virtual address to another.
274 * Both addresses are assumed to reside in the Sysmap,
275 * and size must be a multiple of the page size.
279 register caddr_t from
,
283 pmap_movepage((unsigned long)from
, (unsigned long)to
, (vm_size_t
)size
);
287 * System Call handling code
290 #define ERESTART -1 /* restart syscall */
291 #define EJUSTRETURN -2 /* don't modify regs, just return */
293 struct sysent
{ /* system call table */
294 unsigned short sy_narg
; /* number of args */
295 char sy_parallel
; /* can execute in parallel */
296 char sy_funnel
; /* funnel type */
297 unsigned long (*sy_call
)(void *, void *, int *); /* implementing function */
301 #define KERNEL_FUNNEL 1
302 #define NETWORK_FUNNEL 2
304 extern funnel_t
* kernel_flock
;
305 extern funnel_t
* network_flock
;
307 extern struct sysent sysent
[];
309 int set_bsduthreadargs (thread_act_t
, struct i386_saved_state
*, void *);
311 void * get_bsduthreadarg(thread_act_t
);
313 void unix_syscall(struct i386_saved_state
*);
316 unix_syscall_return(int error
)
320 struct i386_saved_state
*regs
;
322 struct proc
*current_proc();
325 struct sysent
*callp
;
328 thread
= current_act();
329 rval
= (int *)get_bsduthreadrval(thread
);
332 regs
= USER_REGS(thread
);
334 /* reconstruct code for tracing before blasting eax */
336 params
= (vm_offset_t
) ((caddr_t
)regs
->uesp
+ sizeof (int));
337 callp
= (code
>= nsysent
) ? &sysent
[63] : &sysent
[code
];
338 if (callp
== sysent
) {
339 code
= fuword(params
);
342 if (error
== ERESTART
) {
345 else if (error
!= EJUSTRETURN
) {
348 regs
->efl
|= EFL_CF
; /* carry bit */
349 } else { /* (not error) */
352 regs
->efl
&= ~EFL_CF
;
356 ktrsysret(p
, code
, error
, rval
[0], callp
->sy_funnel
);
358 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_END
,
359 error
, rval
[0], rval
[1], 0, 0);
361 if (callp
->sy_funnel
!= NO_FUNNEL
)
362 (void) thread_funnel_set(current_thread()->funnel_lock
, FALSE
);
364 thread_exception_return();
370 unix_syscall(struct i386_saved_state
*regs
)
375 struct sysent
*callp
;
382 struct proc
*current_proc();
384 thread
= current_act();
386 rval
= (int *)get_bsduthreadrval(thread
);
388 //printf("[scall : eax %x]", regs->eax);
390 params
= (vm_offset_t
) ((caddr_t
)regs
->uesp
+ sizeof (int));
391 callp
= (code
>= nsysent
) ? &sysent
[63] : &sysent
[code
];
392 if (callp
== sysent
) {
393 code
= fuword(params
);
394 params
+= sizeof (int);
395 callp
= (code
>= nsysent
) ? &sysent
[63] : &sysent
[code
];
398 vt
= get_bsduthreadarg(thread
);
400 if ((nargs
= (callp
->sy_narg
* sizeof (int))) &&
401 (error
= copyin((char *) params
, (char *)vt
, nargs
)) != 0) {
404 thread_exception_return();
411 funnel_type
= callp
->sy_funnel
;
412 if(funnel_type
== KERNEL_FUNNEL
)
413 (void) thread_funnel_set(kernel_flock
, TRUE
);
414 else if (funnel_type
== NETWORK_FUNNEL
)
415 (void) thread_funnel_set(network_flock
, TRUE
);
417 set_bsduthreadargs(thread
, regs
, NULL
);
419 if (callp
->sy_narg
> 8)
420 panic("unix_syscall max arg count exceeded (%d)", callp
->sy_narg
);
422 ktrsyscall(p
, code
, callp
->sy_narg
, vt
, funnel_type
);
426 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_START
,
427 *ip
, *(ip
+1), *(ip
+2), *(ip
+3), 0);
430 error
= (*(callp
->sy_call
))(p
, (void *) vt
, (int *) &rval
[0]);
433 /* May be needed with vfork changes */
434 regs
= USER_REGS(thread
);
436 if (error
== ERESTART
) {
439 else if (error
!= EJUSTRETURN
) {
442 regs
->efl
|= EFL_CF
; /* carry bit */
443 } else { /* (not error) */
446 regs
->efl
&= ~EFL_CF
;
450 ktrsysret(p
, code
, error
, rval
[0], funnel_type
);
452 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_END
,
453 error
, rval
[0], rval
[1], 0, 0);
455 if(funnel_type
!= NO_FUNNEL
)
456 (void) thread_funnel_set(current_thread()->funnel_lock
, FALSE
);
458 thread_exception_return();
464 machdep_syscall( struct i386_saved_state
*regs
)
467 machdep_call_t
*entry
;
470 struct proc
*current_proc();
473 if (trapno
< 0 || trapno
>= machdep_call_count
) {
474 regs
->eax
= (unsigned int)kern_invalid();
476 thread_exception_return();
480 entry
= &machdep_call_table
[trapno
];
481 nargs
= entry
->nargs
;
486 if (copyin((char *) regs
->uesp
+ sizeof (int),
488 nargs
* sizeof (int))) {
490 regs
->eax
= KERN_INVALID_ADDRESS
;
492 thread_exception_return();
498 regs
->eax
= (*entry
->routine
)(args
[0]);
501 regs
->eax
= (*entry
->routine
)(args
[0],args
[1]);
504 regs
->eax
= (*entry
->routine
)(args
[0],args
[1],args
[2]);
507 regs
->eax
= (*entry
->routine
)(args
[0],args
[1],args
[2],args
[3]);
510 panic("machdep_syscall(): too many args");
514 regs
->eax
= (*entry
->routine
)();
516 if (current_thread()->funnel_lock
)
517 (void) thread_funnel_set(current_thread()->funnel_lock
, FALSE
);
519 thread_exception_return();
525 thread_compose_cthread_desc(unsigned int addr
, pcb_t pcb
)
527 struct real_descriptor desc
;
528 extern struct fake_descriptor
*mp_ldt
[];
529 struct real_descriptor
*ldtp
;
530 int mycpu
= cpu_number();
532 ldtp
= (struct real_descriptor
*)mp_ldt
[mycpu
];
535 desc
.base_low
= addr
& 0xffff;
536 desc
.base_med
= (addr
>> 16) & 0xff;
537 desc
.base_high
= (addr
>> 24) & 0xff;
538 desc
.access
= ACC_P
|ACC_PL_U
|ACC_DATA_W
;
539 desc
.granularity
= SZ_32
|SZ_G
;
540 pcb
->cthread_desc
= desc
;
541 ldtp
[sel_idx(USER_CTHREAD
)] = desc
;
542 return(KERN_SUCCESS
);
546 thread_set_cthread_self(int self
)
548 current_act()->mact
.pcb
->cthread_self
= (unsigned int)self
;
550 return (KERN_SUCCESS
);
554 thread_get_cthread_self(void)
556 return ((kern_return_t
)current_act()->mact
.pcb
->cthread_self
);
560 thread_fast_set_cthread_self(int self
)
563 pcb
= (pcb_t
)current_act()->mact
.pcb
;
564 thread_compose_cthread_desc((unsigned int)self
, pcb
);
565 pcb
->cthread_self
= (unsigned int)self
; /* preserve old func too */
566 return (USER_CTHREAD
);
570 mach25_syscall(struct i386_saved_state
*regs
)
572 printf("*** Atttempt to execute a Mach 2.5 system call at EIP=%x EAX=%x(%d)\n",
573 regs
->eip
, regs
->eax
, -regs
->eax
);
576 #endif /* MACH_BSD */
579 /* This routine is called from assembly before each and every mach trap.
582 extern unsigned int mach_call_start(unsigned int, unsigned int *);
586 mach_call_start(unsigned int call_number
, unsigned int *args
)
589 unsigned int kdarg
[3];
591 /* Always prepare to trace mach system calls */
597 argc
= mach_trap_table
[call_number
>>4].mach_trap_arg_count
;
602 for (i
=0; i
< argc
; i
++)
603 kdarg
[i
] = (int)*(args
+ i
);
605 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC
, (call_number
>>4)) | DBG_FUNC_START
,
606 kdarg
[0], kdarg
[1], kdarg
[2], 0, 0);
608 return call_number
; /* pass this back thru */
611 /* This routine is called from assembly after each mach system call
614 extern unsigned int mach_call_end(unsigned int, unsigned int);
618 mach_call_end(unsigned int call_number
, unsigned int retval
)
620 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC
,(call_number
>>4)) | DBG_FUNC_END
,
622 return retval
; /* pass this back thru */