2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <kern/task.h>
29 #include <kern/thread.h>
30 #include <kern/assert.h>
31 #include <kern/clock.h>
32 #include <kern/locks.h>
33 #include <kern/sched_prim.h>
34 #include <kern/debug.h>
35 #include <mach/machine/thread_status.h>
36 #include <mach/thread_act.h>
37 #include <mach/branch_predicates.h>
39 #include <sys/kernel.h>
41 #include <sys/proc_internal.h>
42 #include <sys/syscall.h>
43 #include <sys/systm.h>
45 #include <sys/errno.h>
46 #include <sys/kdebug.h>
47 #include <sys/sysent.h>
48 #include <sys/sysproto.h>
49 #include <sys/kauth.h>
50 #include <sys/systm.h>
52 #include <security/audit/audit.h>
55 #include <i386/machine_routines.h>
56 #include <mach/i386/syscall_sw.h>
58 #include <machine/pal_routines.h>
61 extern int32_t dtrace_systrace_syscall(struct proc
*, void *, int *);
62 extern void dtrace_systrace_syscall_return(unsigned short, int, int *);
65 extern void unix_syscall(x86_saved_state_t
*);
66 extern void unix_syscall64(x86_saved_state_t
*);
67 extern void *find_user_regs(thread_t
);
69 extern void x86_toggle_sysenter_arg_store(thread_t thread
, boolean_t valid
);
70 extern boolean_t
x86_sysenter_arg_store_isvalid(thread_t thread
);
72 /* dynamically generated at build time based on syscalls.master */
73 extern const char *syscallnames
[];
76 * This needs to be a single switch so that it's "all on" or "all off",
77 * rather than being turned on for some code paths and not others, as this
78 * has a tendency to introduce "blame the next guy" bugs.
81 #define FUNNEL_DEBUG 1 /* Check for funnel held on exit */
85 * Function: unix_syscall
87 * Inputs: regs - pointer to i386 save area
92 unix_syscall(x86_saved_state_t
*state
)
102 struct uthread
*uthread
;
103 x86_saved_state32_t
*regs
;
104 boolean_t args_in_uthread
;
107 assert(is_saved_state32(state
));
108 regs
= saved_state32(state
);
110 if (regs
->eax
== 0x800)
111 thread_exception_return();
113 thread
= current_thread();
114 uthread
= get_bsdthread_info(thread
);
116 /* Get the approriate proc; may be different from task's for vfork() */
117 is_vfork
= uthread
->uu_flag
& UT_VFORK
;
118 if (__improbable(is_vfork
!= 0))
121 p
= (struct proc
*)get_bsdtask_info(current_task());
123 /* Verify that we are not being called from a task without a proc */
124 if (__improbable(p
== NULL
)) {
127 task_terminate_internal(current_task());
128 thread_exception_return();
132 code
= regs
->eax
& I386_SYSCALL_NUMBER_MASK
;
133 DEBUG_KPRINT_SYSCALL_UNIX("unix_syscall: code=%d(%s) eip=%u\n",
134 code
, syscallnames
[code
>= NUM_SYSENT
? 63 : code
], (uint32_t)regs
->eip
);
135 args_in_uthread
= ((regs
->eax
& I386_SYSCALL_ARG_BYTES_MASK
) != 0) && x86_sysenter_arg_store_isvalid(thread
);
136 params
= (vm_offset_t
) (regs
->uesp
+ sizeof (int));
138 regs
->efl
&= ~(EFL_CF
);
140 callp
= (code
>= NUM_SYSENT
) ? &sysent
[63] : &sysent
[code
];
142 if (__improbable(callp
== sysent
)) {
143 code
= fuword(params
);
144 params
+= sizeof(int);
145 callp
= (code
>= NUM_SYSENT
) ? &sysent
[63] : &sysent
[code
];
148 vt
= (void *)uthread
->uu_arg
;
150 if (callp
->sy_arg_bytes
!= 0) {
153 assert((unsigned) callp
->sy_arg_bytes
<= sizeof (uthread
->uu_arg
));
154 if (!args_in_uthread
)
157 nargs
= callp
->sy_arg_bytes
;
158 error
= copyin((user_addr_t
) params
, (char *) vt
, nargs
);
162 thread_exception_return();
167 if (__probable(code
!= 180)) {
170 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
171 BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_START
,
172 *ip
, *(ip
+1), *(ip
+2), *(ip
+3), 0);
174 mungerp
= callp
->sy_arg_munge32
;
177 * If non-NULL, then call the syscall argument munger to
178 * copy in arguments (see xnu/bsd/dev/{i386|x86_64}/munge.s); the
179 * first argument is NULL because we are munging in place
180 * after a copyin because the ABI currently doesn't use
181 * registers to pass system call arguments.
184 (*mungerp
)(NULL
, vt
);
186 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
187 BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_START
,
191 * Delayed binding of thread credential to process credential, if we
192 * are not running with an explicitly set thread credential.
194 kauth_cred_uthread_update(uthread
, p
);
196 uthread
->uu_rval
[0] = 0;
197 uthread
->uu_rval
[1] = regs
->edx
;
198 uthread
->uu_flag
|= UT_NOTCANCELPT
;
202 uthread
->uu_iocount
= 0;
203 uthread
->uu_vpindex
= 0;
206 AUDIT_SYSCALL_ENTER(code
, p
, uthread
);
207 error
= (*(callp
->sy_call
))((void *) p
, (void *) vt
, &(uthread
->uu_rval
[0]));
208 AUDIT_SYSCALL_EXIT(code
, p
, uthread
, error
);
211 if (uthread
->uu_iocount
)
212 printf("system call returned with uu_iocount != 0\n");
215 uthread
->t_dtrace_errno
= error
;
216 #endif /* CONFIG_DTRACE */
218 if (__improbable(error
== ERESTART
)) {
220 * Move the user's pc back to repeat the syscall:
221 * 5 bytes for a sysenter, or 2 for an int 8x.
222 * The SYSENTER_TF_CS covers single-stepping over a sysenter
223 * - see debug trap handler in idt.s/idt64.s
226 pal_syscall_restart(thread
, state
);
228 else if (error
!= EJUSTRETURN
) {
229 if (__improbable(error
)) {
231 regs
->efl
|= EFL_CF
; /* carry bit */
232 } else { /* (not error) */
233 regs
->eax
= uthread
->uu_rval
[0];
234 regs
->edx
= uthread
->uu_rval
[1];
238 DEBUG_KPRINT_SYSCALL_UNIX(
239 "unix_syscall: error=%d retval=(%u,%u)\n",
240 error
, regs
->eax
, regs
->edx
);
242 uthread
->uu_flag
&= ~UT_NOTCANCELPT
;
245 * if we're holding the funnel panic
247 syscall_exit_funnelcheck();
248 #endif /* FUNNEL_DEBUG */
250 if (__improbable(uthread
->uu_lowpri_window
)) {
252 * task is marked as a low priority I/O type
253 * and the I/O we issued while in this system call
254 * collided with normal I/O operations... we'll
255 * delay in order to mitigate the impact of this
256 * task on the normal operation of the system
258 throttle_lowpri_io(TRUE
);
260 if (__probable(code
!= 180))
261 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
262 BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_END
,
263 error
, uthread
->uu_rval
[0], uthread
->uu_rval
[1], p
->p_pid
, 0);
265 if (__improbable(!is_vfork
&& callp
->sy_call
== (sy_call_t
*)execve
&& !error
)) {
266 pal_execve_return(thread
);
269 thread_exception_return();
275 unix_syscall64(x86_saved_state_t
*state
)
279 struct sysent
*callp
;
284 struct uthread
*uthread
;
285 x86_saved_state64_t
*regs
;
287 assert(is_saved_state64(state
));
288 regs
= saved_state64(state
);
290 if (regs
->rax
== 0x2000800)
291 thread_exception_return();
293 thread
= current_thread();
294 uthread
= get_bsdthread_info(thread
);
296 /* Get the approriate proc; may be different from task's for vfork() */
297 if (__probable(!(uthread
->uu_flag
& UT_VFORK
)))
298 p
= (struct proc
*)get_bsdtask_info(current_task());
302 /* Verify that we are not being called from a task without a proc */
303 if (__improbable(p
== NULL
)) {
305 regs
->isf
.rflags
|= EFL_CF
;
306 task_terminate_internal(current_task());
307 thread_exception_return();
312 code
= regs
->rax
& SYSCALL_NUMBER_MASK
;
313 DEBUG_KPRINT_SYSCALL_UNIX(
314 "unix_syscall64: code=%d(%s) rip=%llx\n",
315 code
, syscallnames
[code
>= NUM_SYSENT
? 63 : code
], regs
->isf
.rip
);
316 callp
= (code
>= NUM_SYSENT
) ? &sysent
[63] : &sysent
[code
];
317 uargp
= (void *)(®s
->rdi
);
319 if (__improbable(callp
== sysent
)) {
321 * indirect system call... system call number
325 callp
= (code
>= NUM_SYSENT
) ? &sysent
[63] : &sysent
[code
];
326 uargp
= (void *)(®s
->rsi
);
330 if (callp
->sy_narg
!= 0) {
332 uint64_t *ip
= (uint64_t *)uargp
;
334 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
335 BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_START
,
336 (int)(*ip
), (int)(*(ip
+1)), (int)(*(ip
+2)), (int)(*(ip
+3)), 0);
338 assert(callp
->sy_narg
<= 8);
340 if (__improbable(callp
->sy_narg
> args_in_regs
)) {
343 copyin_count
= (callp
->sy_narg
- args_in_regs
) * sizeof(uint64_t);
345 error
= copyin((user_addr_t
)(regs
->isf
.rsp
+ sizeof(user_addr_t
)), (char *)®s
->v_arg6
, copyin_count
);
348 regs
->isf
.rflags
|= EFL_CF
;
349 thread_exception_return();
354 * XXX Turn 64 bit unsafe calls into nosys()
356 if (__improbable(callp
->sy_flags
& UNSAFE_64BIT
)) {
361 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
362 BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_START
,
367 * Delayed binding of thread credential to process credential, if we
368 * are not running with an explicitly set thread credential.
370 kauth_cred_uthread_update(uthread
, p
);
372 uthread
->uu_rval
[0] = 0;
373 uthread
->uu_rval
[1] = 0;
376 uthread
->uu_flag
|= UT_NOTCANCELPT
;
379 uthread
->uu_iocount
= 0;
380 uthread
->uu_vpindex
= 0;
383 AUDIT_SYSCALL_ENTER(code
, p
, uthread
);
384 error
= (*(callp
->sy_call
))((void *) p
, uargp
, &(uthread
->uu_rval
[0]));
385 AUDIT_SYSCALL_EXIT(code
, p
, uthread
, error
);
388 if (uthread
->uu_iocount
)
389 printf("system call returned with uu_iocount != 0\n");
393 uthread
->t_dtrace_errno
= error
;
394 #endif /* CONFIG_DTRACE */
396 if (__improbable(error
== ERESTART
)) {
398 * all system calls come through via the syscall instruction
399 * in 64 bit mode... its 2 bytes in length
400 * move the user's pc back to repeat the syscall:
402 pal_syscall_restart( thread
, state
);
404 else if (error
!= EJUSTRETURN
) {
405 if (__improbable(error
)) {
407 regs
->isf
.rflags
|= EFL_CF
; /* carry bit */
408 } else { /* (not error) */
410 switch (callp
->sy_return_type
) {
411 case _SYSCALL_RET_INT_T
:
412 regs
->rax
= uthread
->uu_rval
[0];
413 regs
->rdx
= uthread
->uu_rval
[1];
415 case _SYSCALL_RET_UINT_T
:
416 regs
->rax
= ((u_int
)uthread
->uu_rval
[0]);
417 regs
->rdx
= ((u_int
)uthread
->uu_rval
[1]);
419 case _SYSCALL_RET_OFF_T
:
420 case _SYSCALL_RET_ADDR_T
:
421 case _SYSCALL_RET_SIZE_T
:
422 case _SYSCALL_RET_SSIZE_T
:
423 case _SYSCALL_RET_UINT64_T
:
424 regs
->rax
= *((uint64_t *)(&uthread
->uu_rval
[0]));
427 case _SYSCALL_RET_NONE
:
430 panic("unix_syscall: unknown return type");
433 regs
->isf
.rflags
&= ~EFL_CF
;
437 DEBUG_KPRINT_SYSCALL_UNIX(
438 "unix_syscall64: error=%d retval=(%llu,%llu)\n",
439 error
, regs
->rax
, regs
->rdx
);
441 uthread
->uu_flag
&= ~UT_NOTCANCELPT
;
445 * if we're holding the funnel panic
447 syscall_exit_funnelcheck();
448 #endif /* FUNNEL_DEBUG */
450 if (__improbable(uthread
->uu_lowpri_window
)) {
452 * task is marked as a low priority I/O type
453 * and the I/O we issued while in this system call
454 * collided with normal I/O operations... we'll
455 * delay in order to mitigate the impact of this
456 * task on the normal operation of the system
458 throttle_lowpri_io(TRUE
);
460 if (__probable(code
!= 180))
461 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
462 BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_END
,
463 error
, uthread
->uu_rval
[0], uthread
->uu_rval
[1], p
->p_pid
, 0);
465 thread_exception_return();
471 unix_syscall_return(int error
)
474 struct uthread
*uthread
;
478 struct sysent
*callp
;
480 thread
= current_thread();
481 uthread
= get_bsdthread_info(thread
);
483 pal_register_cache_state(thread
, DIRTY
);
487 if (proc_is64bit(p
)) {
488 x86_saved_state64_t
*regs
;
490 regs
= saved_state64(find_user_regs(thread
));
492 /* reconstruct code for tracing before blasting rax */
493 code
= regs
->rax
& SYSCALL_NUMBER_MASK
;
494 callp
= (code
>= NUM_SYSENT
) ? &sysent
[63] : &sysent
[code
];
498 * indirect system call... system call number
504 if (callp
->sy_call
== dtrace_systrace_syscall
)
505 dtrace_systrace_syscall_return( code
, error
, uthread
->uu_rval
);
506 #endif /* CONFIG_DTRACE */
507 AUDIT_SYSCALL_EXIT(code
, p
, uthread
, error
);
509 if (error
== ERESTART
) {
513 pal_syscall_restart( thread
, find_user_regs(thread
) );
515 else if (error
!= EJUSTRETURN
) {
518 regs
->isf
.rflags
|= EFL_CF
; /* carry bit */
519 } else { /* (not error) */
521 switch (callp
->sy_return_type
) {
522 case _SYSCALL_RET_INT_T
:
523 regs
->rax
= uthread
->uu_rval
[0];
524 regs
->rdx
= uthread
->uu_rval
[1];
526 case _SYSCALL_RET_UINT_T
:
527 regs
->rax
= ((u_int
)uthread
->uu_rval
[0]);
528 regs
->rdx
= ((u_int
)uthread
->uu_rval
[1]);
530 case _SYSCALL_RET_OFF_T
:
531 case _SYSCALL_RET_ADDR_T
:
532 case _SYSCALL_RET_SIZE_T
:
533 case _SYSCALL_RET_SSIZE_T
:
534 case _SYSCALL_RET_UINT64_T
:
535 regs
->rax
= *((uint64_t *)(&uthread
->uu_rval
[0]));
538 case _SYSCALL_RET_NONE
:
541 panic("unix_syscall: unknown return type");
544 regs
->isf
.rflags
&= ~EFL_CF
;
547 DEBUG_KPRINT_SYSCALL_UNIX(
548 "unix_syscall_return: error=%d retval=(%llu,%llu)\n",
549 error
, regs
->rax
, regs
->rdx
);
551 x86_saved_state32_t
*regs
;
553 regs
= saved_state32(find_user_regs(thread
));
555 regs
->efl
&= ~(EFL_CF
);
556 /* reconstruct code for tracing before blasting eax */
557 code
= regs
->eax
& I386_SYSCALL_NUMBER_MASK
;
558 callp
= (code
>= NUM_SYSENT
) ? &sysent
[63] : &sysent
[code
];
561 if (callp
->sy_call
== dtrace_systrace_syscall
)
562 dtrace_systrace_syscall_return( code
, error
, uthread
->uu_rval
);
563 #endif /* CONFIG_DTRACE */
564 AUDIT_SYSCALL_EXIT(code
, p
, uthread
, error
);
566 if (callp
== sysent
) {
567 params
= (vm_offset_t
) (regs
->uesp
+ sizeof (int));
568 code
= fuword(params
);
570 if (error
== ERESTART
) {
571 pal_syscall_restart( thread
, find_user_regs(thread
) );
573 else if (error
!= EJUSTRETURN
) {
576 regs
->efl
|= EFL_CF
; /* carry bit */
577 } else { /* (not error) */
578 regs
->eax
= uthread
->uu_rval
[0];
579 regs
->edx
= uthread
->uu_rval
[1];
582 DEBUG_KPRINT_SYSCALL_UNIX(
583 "unix_syscall_return: error=%d retval=(%u,%u)\n",
584 error
, regs
->eax
, regs
->edx
);
588 uthread
->uu_flag
&= ~UT_NOTCANCELPT
;
592 * if we're holding the funnel panic
594 syscall_exit_funnelcheck();
595 #endif /* FUNNEL_DEBUG */
597 if (uthread
->uu_lowpri_window
) {
599 * task is marked as a low priority I/O type
600 * and the I/O we issued while in this system call
601 * collided with normal I/O operations... we'll
602 * delay in order to mitigate the impact of this
603 * task on the normal operation of the system
605 throttle_lowpri_io(TRUE
);
608 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
609 BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_END
,
610 error
, uthread
->uu_rval
[0], uthread
->uu_rval
[1], p
->p_pid
, 0);
612 thread_exception_return();
618 __unused
const void *in32
,
624 /* we convert in place in out64 */
625 arg32
= (uint32_t *) out64
;
626 arg64
= (uint64_t *) out64
;
628 arg64
[5] = arg32
[6]; /* wwwlwW */
629 arg64
[4] = arg32
[5]; /* wwwlWw */
630 arg32
[7] = arg32
[4]; /* wwwLww (hi) */
631 arg32
[6] = arg32
[3]; /* wwwLww (lo) */
632 arg64
[2] = arg32
[2]; /* wwWlww */
633 arg64
[1] = arg32
[1]; /* wWwlww */
634 arg64
[0] = arg32
[0]; /* Wwwlww */
640 __unused
const void *in32
,
646 /* we convert in place in out64 */
647 arg32
= (uint32_t *) out64
;
648 arg64
= (uint64_t *) out64
;
650 arg64
[5] = arg32
[6]; /* wwlwwW */
651 arg64
[4] = arg32
[5]; /* wwlwWw */
652 arg64
[3] = arg32
[4]; /* wwlWww */
653 arg32
[5] = arg32
[3]; /* wwLwww (hi) */
654 arg32
[4] = arg32
[2]; /* wwLwww (lo) */
655 arg64
[1] = arg32
[1]; /* wWlwww */
656 arg64
[0] = arg32
[0]; /* Wwlwww */