]>
git.saurik.com Git - apple/xnu.git/blob - bsd/dev/ppc/systemcalls.c
6c38ac6a18cab8ca0e81fd5a18219d009decc69c
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <kern/task.h>
24 #include <kern/thread.h>
25 #include <kern/assert.h>
26 #include <kern/clock.h>
27 #include <kern/locks.h>
28 #include <kern/sched_prim.h>
29 #include <mach/machine/thread_status.h>
30 #include <ppc/savearea.h>
32 #include <sys/kernel.h>
34 #include <sys/proc_internal.h>
35 #include <sys/syscall.h>
36 #include <sys/systm.h>
38 #include <sys/errno.h>
39 #include <sys/ktrace.h>
40 #include <sys/kdebug.h>
41 #include <sys/sysent.h>
42 #include <sys/sysproto.h>
43 #include <sys/kauth.h>
45 #include <bsm/audit_kernel.h>
48 unix_syscall(struct savearea
*regs
);
50 unix_syscall_return(int error
);
52 extern struct savearea
*
56 extern void enter_funnel_section(funnel_t
*funnel_lock
);
57 extern void exit_funnel_section(void);
60 * Function: unix_syscall
62 * Inputs: regs - pointer to Process Control Block
67 unix_syscall(struct savearea
*regs
)
70 struct uthread
*uthread
;
77 unsigned int cancel_enable
;
79 flavor
= (((unsigned int)regs
->save_r0
) == 0)? 1: 0;
86 if (kdebug_enable
&& (code
!= 180)) {
88 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_START
,
89 regs
->save_r4
, regs
->save_r5
, regs
->save_r6
, regs
->save_r7
, 0);
91 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_START
,
92 regs
->save_r3
, regs
->save_r4
, regs
->save_r5
, regs
->save_r6
, 0);
94 thread_act
= current_thread();
95 uthread
= get_bsdthread_info(thread_act
);
97 if (!(uthread
->uu_flag
& UT_VFORK
))
98 proc
= (struct proc
*)get_bsdtask_info(current_task());
100 proc
= current_proc();
102 /* Make sure there is a process associated with this task */
104 regs
->save_r3
= (long long)EPERM
;
105 /* set the "pc" to execute cerror routine */
106 regs
->save_srr0
-= 4;
107 task_terminate_internal(current_task());
108 thread_exception_return();
113 * Delayed binding of thread credential to process credential, if we
114 * are not running with an explicitly set thread credential.
116 if (uthread
->uu_ucred
!= proc
->p_ucred
&&
117 (uthread
->uu_flag
& UT_SETUID
) == 0) {
118 kauth_cred_t old
= uthread
->uu_ucred
;
120 uthread
->uu_ucred
= proc
->p_ucred
;
121 kauth_cred_ref(uthread
->uu_ucred
);
124 kauth_cred_rele(old
);
127 callp
= (code
>= nsysent
) ? &sysent
[63] : &sysent
[code
];
129 if (callp
->sy_narg
!= 0) {
133 if (IS_64BIT_PROCESS(proc
)) {
134 /* XXX Turn 64 bit unsafe calls into nosys() */
135 if (callp
->sy_funnel
& UNSAFE_64BIT
) {
139 mungerp
= callp
->sy_arg_munge64
;
142 mungerp
= callp
->sy_arg_munge32
;
145 regsp
= (void *) ®s
->save_r3
;
147 /* indirect system call consumes an argument so only 7 are supported */
148 if (callp
->sy_narg
> 7) {
152 regsp
= (void *) ®s
->save_r4
;
154 /* call syscall argument munger to copy in arguments (see xnu/bsd/dev/ppc/munge.s) */
155 (*mungerp
)(regsp
, (void *) &uthread
->uu_arg
[0]);
159 cancel_enable
= callp
->sy_cancel
;
161 if (cancel_enable
== _SYSCALL_CANCEL_NONE
) {
162 uthread
->uu_flag
|= UT_NOTCANCELPT
;
164 if((uthread
->uu_flag
& (UT_CANCELDISABLE
| UT_CANCEL
| UT_CANCELED
)) == UT_CANCEL
) {
165 if (cancel_enable
== _SYSCALL_CANCEL_PRE
) {
166 /* system call cancelled; return to handle cancellation */
167 regs
->save_r3
= (long long)EINTR
;
168 thread_exception_return();
171 thread_abort_safely(thread_act
);
176 funnel_type
= (int)(callp
->sy_funnel
& FUNNEL_MASK
);
177 if (funnel_type
== KERNEL_FUNNEL
)
178 enter_funnel_section(kernel_flock
);
180 uthread
->uu_rval
[0] = 0;
183 * r4 is volatile, if we set it to regs->save_r4 here the child
184 * will have parents r4 after execve
186 uthread
->uu_rval
[1] = 0;
191 * PPC runtime calls cerror after every unix system call, so
192 * assume no error and adjust the "pc" to skip this call.
193 * It will be set back to the cerror call if an error is detected.
195 regs
->save_srr0
+= 4;
197 if (KTRPOINT(proc
, KTR_SYSCALL
))
198 ktrsyscall(proc
, code
, callp
->sy_narg
, uthread
->uu_arg
);
201 uthread
->uu_iocount
= 0;
202 uthread
->uu_vpindex
= 0;
204 AUDIT_SYSCALL_ENTER(code
, proc
, uthread
);
205 error
= (*(callp
->sy_call
))(proc
, (void *)uthread
->uu_arg
, &(uthread
->uu_rval
[0]));
206 AUDIT_SYSCALL_EXIT(error
, proc
, uthread
);
209 if (uthread
->uu_iocount
)
210 joe_debug("system call returned with uu_iocount != 0");
212 regs
= find_user_regs(thread_act
);
214 if (error
== ERESTART
) {
215 regs
->save_srr0
-= 8;
216 } else if (error
!= EJUSTRETURN
) {
218 regs
->save_r3
= (long long)error
;
219 /* set the "pc" to execute cerror routine */
220 regs
->save_srr0
-= 4;
221 } else { /* (not error) */
222 switch (callp
->sy_return_type
) {
223 case _SYSCALL_RET_INT_T
:
224 regs
->save_r3
= uthread
->uu_rval
[0];
225 regs
->save_r4
= uthread
->uu_rval
[1];
227 case _SYSCALL_RET_UINT_T
:
228 regs
->save_r3
= ((u_int
)uthread
->uu_rval
[0]);
229 regs
->save_r4
= ((u_int
)uthread
->uu_rval
[1]);
231 case _SYSCALL_RET_OFF_T
:
232 /* off_t returns 64 bits split across two registers for 32 bit */
233 /* process and in one register for 64 bit process */
234 if (IS_64BIT_PROCESS(proc
)) {
235 u_int64_t
*retp
= (u_int64_t
*)&uthread
->uu_rval
[0];
236 regs
->save_r3
= *retp
;
240 regs
->save_r3
= uthread
->uu_rval
[0];
241 regs
->save_r4
= uthread
->uu_rval
[1];
244 case _SYSCALL_RET_ADDR_T
:
245 case _SYSCALL_RET_SIZE_T
:
246 case _SYSCALL_RET_SSIZE_T
:
247 /* the variable length return types (user_addr_t, user_ssize_t,
248 * and user_size_t) are always the largest possible size in the
249 * kernel (we use uu_rval[0] and [1] as one 64 bit value).
252 user_addr_t
*retp
= (user_addr_t
*)&uthread
->uu_rval
[0];
253 regs
->save_r3
= *retp
;
257 case _SYSCALL_RET_NONE
:
260 panic("unix_syscall: unknown return type");
265 /* else (error == EJUSTRETURN) { nothing } */
268 if (KTRPOINT(proc
, KTR_SYSRET
)) {
269 switch(callp
->sy_return_type
) {
270 case _SYSCALL_RET_ADDR_T
:
271 case _SYSCALL_RET_SIZE_T
:
272 case _SYSCALL_RET_SSIZE_T
:
274 * Trace the value of the least significant bits,
275 * until we can revise the ktrace API safely.
277 ktrsysret(proc
, code
, error
, uthread
->uu_rval
[1]);
280 ktrsysret(proc
, code
, error
, uthread
->uu_rval
[0]);
285 if (cancel_enable
== _SYSCALL_CANCEL_NONE
)
286 uthread
->uu_flag
&= ~UT_NOTCANCELPT
;
288 exit_funnel_section();
290 if (uthread
->uu_lowpri_delay
) {
292 * task is marked as a low priority I/O type
293 * and the I/O we issued while in this system call
294 * collided with normal I/O operations... we'll
295 * delay in order to mitigate the impact of this
296 * task on the normal operation of the system
298 IOSleep(uthread
->uu_lowpri_delay
);
299 uthread
->uu_lowpri_delay
= 0;
301 if (kdebug_enable
&& (code
!= 180)) {
303 if (callp
->sy_return_type
== _SYSCALL_RET_SSIZE_T
)
304 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_END
,
305 error
, uthread
->uu_rval
[1], 0, 0, 0);
307 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_END
,
308 error
, uthread
->uu_rval
[0], uthread
->uu_rval
[1], 0, 0);
311 thread_exception_return();
316 unix_syscall_return(int error
)
319 struct uthread
*uthread
;
321 struct savearea
*regs
;
323 struct sysent
*callp
;
324 unsigned int cancel_enable
;
326 thread_act
= current_thread();
327 proc
= current_proc();
328 uthread
= get_bsdthread_info(thread_act
);
330 regs
= find_user_regs(thread_act
);
332 if (regs
->save_r0
!= 0)
333 code
= regs
->save_r0
;
335 code
= regs
->save_r3
;
337 callp
= (code
>= nsysent
) ? &sysent
[63] : &sysent
[code
];
340 * Get index into sysent table
342 if (error
== ERESTART
) {
343 regs
->save_srr0
-= 8;
344 } else if (error
!= EJUSTRETURN
) {
346 regs
->save_r3
= (long long)error
;
347 /* set the "pc" to execute cerror routine */
348 regs
->save_srr0
-= 4;
349 } else { /* (not error) */
350 switch (callp
->sy_return_type
) {
351 case _SYSCALL_RET_INT_T
:
352 regs
->save_r3
= uthread
->uu_rval
[0];
353 regs
->save_r4
= uthread
->uu_rval
[1];
355 case _SYSCALL_RET_UINT_T
:
356 regs
->save_r3
= ((u_int
)uthread
->uu_rval
[0]);
357 regs
->save_r4
= ((u_int
)uthread
->uu_rval
[1]);
359 case _SYSCALL_RET_OFF_T
:
360 /* off_t returns 64 bits split across two registers for 32 bit */
361 /* process and in one register for 64 bit process */
362 if (IS_64BIT_PROCESS(proc
)) {
363 u_int64_t
*retp
= (u_int64_t
*)&uthread
->uu_rval
[0];
364 regs
->save_r3
= *retp
;
367 regs
->save_r3
= uthread
->uu_rval
[0];
368 regs
->save_r4
= uthread
->uu_rval
[1];
371 case _SYSCALL_RET_ADDR_T
:
372 case _SYSCALL_RET_SIZE_T
:
373 case _SYSCALL_RET_SSIZE_T
:
374 /* the variable length return types (user_addr_t, user_ssize_t,
375 * and user_size_t) are always the largest possible size in the
376 * kernel (we use uu_rval[0] and [1] as one 64 bit value).
379 u_int64_t
*retp
= (u_int64_t
*)&uthread
->uu_rval
[0];
380 regs
->save_r3
= *retp
;
383 case _SYSCALL_RET_NONE
:
386 panic("unix_syscall: unknown return type");
391 /* else (error == EJUSTRETURN) { nothing } */
393 if (KTRPOINT(proc
, KTR_SYSRET
)) {
394 switch(callp
->sy_return_type
) {
395 case _SYSCALL_RET_ADDR_T
:
396 case _SYSCALL_RET_SIZE_T
:
397 case _SYSCALL_RET_SSIZE_T
:
399 * Trace the value of the least significant bits,
400 * until we can revise the ktrace API safely.
402 ktrsysret(proc
, code
, error
, uthread
->uu_rval
[1]);
405 ktrsysret(proc
, code
, error
, uthread
->uu_rval
[0]);
410 cancel_enable
= callp
->sy_cancel
;
412 if (cancel_enable
== _SYSCALL_CANCEL_NONE
)
413 uthread
->uu_flag
&= ~UT_NOTCANCELPT
;
415 exit_funnel_section();
417 if (uthread
->uu_lowpri_delay
) {
419 * task is marked as a low priority I/O type
420 * and the I/O we issued while in this system call
421 * collided with normal I/O operations... we'll
422 * delay in order to mitigate the impact of this
423 * task on the normal operation of the system
425 IOSleep(uthread
->uu_lowpri_delay
);
426 uthread
->uu_lowpri_delay
= 0;
428 if (kdebug_enable
&& (code
!= 180)) {
429 if (callp
->sy_return_type
== _SYSCALL_RET_SSIZE_T
)
430 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_END
,
431 error
, uthread
->uu_rval
[1], 0, 0, 0);
433 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_END
,
434 error
, uthread
->uu_rval
[0], uthread
->uu_rval
[1], 0, 0);
437 thread_exception_return();