]>
git.saurik.com Git - apple/xnu.git/blob - bsd/dev/i386/systemcalls.c
77ecfba3a70c91585faafe684d09ad953578ebb7
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <kern/task.h>
29 #include <kern/thread.h>
30 #include <kern/assert.h>
31 #include <kern/clock.h>
32 #include <kern/locks.h>
33 #include <kern/sched_prim.h>
34 #include <kern/debug.h>
35 #include <mach/machine/thread_status.h>
36 #include <mach/thread_act.h>
37 #include <mach/branch_predicates.h>
39 #include <sys/kernel.h>
41 #include <sys/proc_internal.h>
42 #include <sys/syscall.h>
43 #include <sys/systm.h>
45 #include <sys/errno.h>
46 #include <sys/kdebug.h>
47 #include <sys/sysent.h>
48 #include <sys/sysproto.h>
49 #include <sys/kauth.h>
50 #include <sys/systm.h>
52 #include <security/audit/audit.h>
55 #include <i386/machine_routines.h>
56 #include <mach/i386/syscall_sw.h>
58 #include <machine/pal_routines.h>
61 extern int32_t dtrace_systrace_syscall(struct proc
*, void *, int *);
62 extern void dtrace_systrace_syscall_return(unsigned short, int, int *);
65 extern void unix_syscall(x86_saved_state_t
*);
66 extern void unix_syscall64(x86_saved_state_t
*);
67 extern void *find_user_regs(thread_t
);
69 /* dynamically generated at build time based on syscalls.master */
70 extern const char *syscallnames
[];
73 * This needs to be a single switch so that it's "all on" or "all off",
74 * rather than being turned on for some code paths and not others, as this
75 * has a tendency to introduce "blame the next guy" bugs.
78 #define FUNNEL_DEBUG 1 /* Check for funnel held on exit */
82 * Function: unix_syscall
84 * Inputs: regs - pointer to i386 save area
89 unix_syscall(x86_saved_state_t
*state
)
99 struct uthread
*uthread
;
100 x86_saved_state32_t
*regs
;
103 assert(is_saved_state32(state
));
104 regs
= saved_state32(state
);
106 if (regs
->eax
== 0x800)
107 thread_exception_return();
109 thread
= current_thread();
110 uthread
= get_bsdthread_info(thread
);
112 /* Get the approriate proc; may be different from task's for vfork() */
113 is_vfork
= uthread
->uu_flag
& UT_VFORK
;
114 if (__improbable(is_vfork
!= 0))
117 p
= (struct proc
*)get_bsdtask_info(current_task());
119 /* Verify that we are not being called from a task without a proc */
120 if (__improbable(p
== NULL
)) {
123 task_terminate_internal(current_task());
124 thread_exception_return();
128 code
= regs
->eax
& I386_SYSCALL_NUMBER_MASK
;
129 DEBUG_KPRINT_SYSCALL_UNIX("unix_syscall: code=%d(%s) eip=%u\n",
130 code
, syscallnames
[code
>= NUM_SYSENT
? 63 : code
], (uint32_t)regs
->eip
);
131 params
= (vm_offset_t
) (regs
->uesp
+ sizeof (int));
133 regs
->efl
&= ~(EFL_CF
);
135 callp
= (code
>= NUM_SYSENT
) ? &sysent
[63] : &sysent
[code
];
137 if (__improbable(callp
== sysent
)) {
138 code
= fuword(params
);
139 params
+= sizeof(int);
140 callp
= (code
>= NUM_SYSENT
) ? &sysent
[63] : &sysent
[code
];
143 vt
= (void *)uthread
->uu_arg
;
146 if (callp
->sy_arg_bytes
!= 0) {
150 assert((unsigned) callp
->sy_arg_bytes
<= sizeof (uthread
->uu_arg
));
151 nargs
= callp
->sy_arg_bytes
;
152 error
= copyin((user_addr_t
) params
, (char *) vt
, nargs
);
156 thread_exception_return();
160 if (__probable(code
!= 180)) {
163 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
164 BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_START
,
165 *ip
, *(ip
+1), *(ip
+2), *(ip
+3), 0);
167 mungerp
= callp
->sy_arg_munge32
;
170 * If non-NULL, then call the syscall argument munger to
171 * copy in arguments (see xnu/bsd/dev/{i386|x86_64}/munge.s); the
172 * first argument is NULL because we are munging in place
173 * after a copyin because the ABI currently doesn't use
174 * registers to pass system call arguments.
177 (*mungerp
)(NULL
, vt
);
179 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
180 BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_START
,
184 * Delayed binding of thread credential to process credential, if we
185 * are not running with an explicitly set thread credential.
187 kauth_cred_uthread_update(uthread
, p
);
189 uthread
->uu_rval
[0] = 0;
190 uthread
->uu_rval
[1] = regs
->edx
;
191 uthread
->uu_flag
|= UT_NOTCANCELPT
;
195 uthread
->uu_iocount
= 0;
196 uthread
->uu_vpindex
= 0;
199 AUDIT_SYSCALL_ENTER(code
, p
, uthread
);
200 error
= (*(callp
->sy_call
))((void *) p
, (void *) vt
, &(uthread
->uu_rval
[0]));
201 AUDIT_SYSCALL_EXIT(code
, p
, uthread
, error
);
204 if (uthread
->uu_iocount
)
205 printf("system call returned with uu_iocount != 0\n");
208 uthread
->t_dtrace_errno
= error
;
209 #endif /* CONFIG_DTRACE */
211 if (__improbable(error
== ERESTART
)) {
213 * Move the user's pc back to repeat the syscall:
214 * 5 bytes for a sysenter, or 2 for an int 8x.
215 * The SYSENTER_TF_CS covers single-stepping over a sysenter
216 * - see debug trap handler in idt.s/idt64.s
219 pal_syscall_restart(thread
, state
);
221 else if (error
!= EJUSTRETURN
) {
222 if (__improbable(error
)) {
224 regs
->efl
|= EFL_CF
; /* carry bit */
225 } else { /* (not error) */
226 regs
->eax
= uthread
->uu_rval
[0];
227 regs
->edx
= uthread
->uu_rval
[1];
231 DEBUG_KPRINT_SYSCALL_UNIX(
232 "unix_syscall: error=%d retval=(%u,%u)\n",
233 error
, regs
->eax
, regs
->edx
);
235 uthread
->uu_flag
&= ~UT_NOTCANCELPT
;
238 * if we're holding the funnel panic
240 syscall_exit_funnelcheck();
241 #endif /* FUNNEL_DEBUG */
243 if (__improbable(uthread
->uu_lowpri_window
)) {
245 * task is marked as a low priority I/O type
246 * and the I/O we issued while in this system call
247 * collided with normal I/O operations... we'll
248 * delay in order to mitigate the impact of this
249 * task on the normal operation of the system
251 throttle_lowpri_io(1);
253 if (__probable(code
!= 180))
254 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
255 BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_END
,
256 error
, uthread
->uu_rval
[0], uthread
->uu_rval
[1], p
->p_pid
, 0);
258 if (__improbable(!is_vfork
&& callp
->sy_call
== (sy_call_t
*)execve
&& !error
)) {
259 pal_execve_return(thread
);
262 thread_exception_return();
268 unix_syscall64(x86_saved_state_t
*state
)
272 struct sysent
*callp
;
277 struct uthread
*uthread
;
278 x86_saved_state64_t
*regs
;
280 assert(is_saved_state64(state
));
281 regs
= saved_state64(state
);
283 if (regs
->rax
== 0x2000800)
284 thread_exception_return();
286 thread
= current_thread();
287 uthread
= get_bsdthread_info(thread
);
289 /* Get the approriate proc; may be different from task's for vfork() */
290 if (__probable(!(uthread
->uu_flag
& UT_VFORK
)))
291 p
= (struct proc
*)get_bsdtask_info(current_task());
295 /* Verify that we are not being called from a task without a proc */
296 if (__improbable(p
== NULL
)) {
298 regs
->isf
.rflags
|= EFL_CF
;
299 task_terminate_internal(current_task());
300 thread_exception_return();
305 code
= regs
->rax
& SYSCALL_NUMBER_MASK
;
306 DEBUG_KPRINT_SYSCALL_UNIX(
307 "unix_syscall64: code=%d(%s) rip=%llx\n",
308 code
, syscallnames
[code
>= NUM_SYSENT
? 63 : code
], regs
->isf
.rip
);
309 callp
= (code
>= NUM_SYSENT
) ? &sysent
[63] : &sysent
[code
];
310 uargp
= (void *)(®s
->rdi
);
312 if (__improbable(callp
== sysent
)) {
314 * indirect system call... system call number
318 callp
= (code
>= NUM_SYSENT
) ? &sysent
[63] : &sysent
[code
];
319 uargp
= (void *)(®s
->rsi
);
322 uthread
->uu_ap
= uargp
;
324 if (callp
->sy_narg
!= 0) {
326 uint64_t *ip
= (uint64_t *)uargp
;
328 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
329 BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_START
,
330 (int)(*ip
), (int)(*(ip
+1)), (int)(*(ip
+2)), (int)(*(ip
+3)), 0);
332 assert(callp
->sy_narg
<= 8);
334 if (__improbable(callp
->sy_narg
> args_in_regs
)) {
337 copyin_count
= (callp
->sy_narg
- args_in_regs
) * sizeof(uint64_t);
339 error
= copyin((user_addr_t
)(regs
->isf
.rsp
+ sizeof(user_addr_t
)), (char *)®s
->v_arg6
, copyin_count
);
342 regs
->isf
.rflags
|= EFL_CF
;
343 thread_exception_return();
348 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
349 BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_START
,
353 * Delayed binding of thread credential to process credential, if we
354 * are not running with an explicitly set thread credential.
356 kauth_cred_uthread_update(uthread
, p
);
358 uthread
->uu_rval
[0] = 0;
359 uthread
->uu_rval
[1] = 0;
362 uthread
->uu_flag
|= UT_NOTCANCELPT
;
365 uthread
->uu_iocount
= 0;
366 uthread
->uu_vpindex
= 0;
369 AUDIT_SYSCALL_ENTER(code
, p
, uthread
);
370 error
= (*(callp
->sy_call
))((void *) p
, uargp
, &(uthread
->uu_rval
[0]));
371 AUDIT_SYSCALL_EXIT(code
, p
, uthread
, error
);
374 if (uthread
->uu_iocount
)
375 printf("system call returned with uu_iocount != 0\n");
379 uthread
->t_dtrace_errno
= error
;
380 #endif /* CONFIG_DTRACE */
382 if (__improbable(error
== ERESTART
)) {
384 * all system calls come through via the syscall instruction
385 * in 64 bit mode... its 2 bytes in length
386 * move the user's pc back to repeat the syscall:
388 pal_syscall_restart( thread
, state
);
390 else if (error
!= EJUSTRETURN
) {
391 if (__improbable(error
)) {
393 regs
->isf
.rflags
|= EFL_CF
; /* carry bit */
394 } else { /* (not error) */
396 switch (callp
->sy_return_type
) {
397 case _SYSCALL_RET_INT_T
:
398 regs
->rax
= uthread
->uu_rval
[0];
399 regs
->rdx
= uthread
->uu_rval
[1];
401 case _SYSCALL_RET_UINT_T
:
402 regs
->rax
= ((u_int
)uthread
->uu_rval
[0]);
403 regs
->rdx
= ((u_int
)uthread
->uu_rval
[1]);
405 case _SYSCALL_RET_OFF_T
:
406 case _SYSCALL_RET_ADDR_T
:
407 case _SYSCALL_RET_SIZE_T
:
408 case _SYSCALL_RET_SSIZE_T
:
409 case _SYSCALL_RET_UINT64_T
:
410 regs
->rax
= *((uint64_t *)(&uthread
->uu_rval
[0]));
413 case _SYSCALL_RET_NONE
:
416 panic("unix_syscall: unknown return type");
419 regs
->isf
.rflags
&= ~EFL_CF
;
423 DEBUG_KPRINT_SYSCALL_UNIX(
424 "unix_syscall64: error=%d retval=(%llu,%llu)\n",
425 error
, regs
->rax
, regs
->rdx
);
427 uthread
->uu_flag
&= ~UT_NOTCANCELPT
;
431 * if we're holding the funnel panic
433 syscall_exit_funnelcheck();
434 #endif /* FUNNEL_DEBUG */
436 if (__improbable(uthread
->uu_lowpri_window
)) {
438 * task is marked as a low priority I/O type
439 * and the I/O we issued while in this system call
440 * collided with normal I/O operations... we'll
441 * delay in order to mitigate the impact of this
442 * task on the normal operation of the system
444 throttle_lowpri_io(1);
446 if (__probable(code
!= 180))
447 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
448 BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_END
,
449 error
, uthread
->uu_rval
[0], uthread
->uu_rval
[1], p
->p_pid
, 0);
451 thread_exception_return();
457 unix_syscall_return(int error
)
460 struct uthread
*uthread
;
464 struct sysent
*callp
;
466 thread
= current_thread();
467 uthread
= get_bsdthread_info(thread
);
469 pal_register_cache_state(thread
, DIRTY
);
473 if (proc_is64bit(p
)) {
474 x86_saved_state64_t
*regs
;
476 regs
= saved_state64(find_user_regs(thread
));
478 /* reconstruct code for tracing before blasting rax */
479 code
= regs
->rax
& SYSCALL_NUMBER_MASK
;
480 callp
= (code
>= NUM_SYSENT
) ? &sysent
[63] : &sysent
[code
];
484 * indirect system call... system call number
490 if (callp
->sy_call
== dtrace_systrace_syscall
)
491 dtrace_systrace_syscall_return( code
, error
, uthread
->uu_rval
);
492 #endif /* CONFIG_DTRACE */
493 AUDIT_SYSCALL_EXIT(code
, p
, uthread
, error
);
495 if (error
== ERESTART
) {
499 pal_syscall_restart( thread
, find_user_regs(thread
) );
501 else if (error
!= EJUSTRETURN
) {
504 regs
->isf
.rflags
|= EFL_CF
; /* carry bit */
505 } else { /* (not error) */
507 switch (callp
->sy_return_type
) {
508 case _SYSCALL_RET_INT_T
:
509 regs
->rax
= uthread
->uu_rval
[0];
510 regs
->rdx
= uthread
->uu_rval
[1];
512 case _SYSCALL_RET_UINT_T
:
513 regs
->rax
= ((u_int
)uthread
->uu_rval
[0]);
514 regs
->rdx
= ((u_int
)uthread
->uu_rval
[1]);
516 case _SYSCALL_RET_OFF_T
:
517 case _SYSCALL_RET_ADDR_T
:
518 case _SYSCALL_RET_SIZE_T
:
519 case _SYSCALL_RET_SSIZE_T
:
520 case _SYSCALL_RET_UINT64_T
:
521 regs
->rax
= *((uint64_t *)(&uthread
->uu_rval
[0]));
524 case _SYSCALL_RET_NONE
:
527 panic("unix_syscall: unknown return type");
530 regs
->isf
.rflags
&= ~EFL_CF
;
533 DEBUG_KPRINT_SYSCALL_UNIX(
534 "unix_syscall_return: error=%d retval=(%llu,%llu)\n",
535 error
, regs
->rax
, regs
->rdx
);
537 x86_saved_state32_t
*regs
;
539 regs
= saved_state32(find_user_regs(thread
));
541 regs
->efl
&= ~(EFL_CF
);
542 /* reconstruct code for tracing before blasting eax */
543 code
= regs
->eax
& I386_SYSCALL_NUMBER_MASK
;
544 callp
= (code
>= NUM_SYSENT
) ? &sysent
[63] : &sysent
[code
];
547 if (callp
->sy_call
== dtrace_systrace_syscall
)
548 dtrace_systrace_syscall_return( code
, error
, uthread
->uu_rval
);
549 #endif /* CONFIG_DTRACE */
550 AUDIT_SYSCALL_EXIT(code
, p
, uthread
, error
);
552 if (callp
== sysent
) {
553 params
= (vm_offset_t
) (regs
->uesp
+ sizeof (int));
554 code
= fuword(params
);
556 if (error
== ERESTART
) {
557 pal_syscall_restart( thread
, find_user_regs(thread
) );
559 else if (error
!= EJUSTRETURN
) {
562 regs
->efl
|= EFL_CF
; /* carry bit */
563 } else { /* (not error) */
564 regs
->eax
= uthread
->uu_rval
[0];
565 regs
->edx
= uthread
->uu_rval
[1];
568 DEBUG_KPRINT_SYSCALL_UNIX(
569 "unix_syscall_return: error=%d retval=(%u,%u)\n",
570 error
, regs
->eax
, regs
->edx
);
574 uthread
->uu_flag
&= ~UT_NOTCANCELPT
;
578 * if we're holding the funnel panic
580 syscall_exit_funnelcheck();
581 #endif /* FUNNEL_DEBUG */
583 if (uthread
->uu_lowpri_window
) {
585 * task is marked as a low priority I/O type
586 * and the I/O we issued while in this system call
587 * collided with normal I/O operations... we'll
588 * delay in order to mitigate the impact of this
589 * task on the normal operation of the system
591 throttle_lowpri_io(1);
594 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
595 BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_END
,
596 error
, uthread
->uu_rval
[0], uthread
->uu_rval
[1], p
->p_pid
, 0);
598 thread_exception_return();