]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/dev/ppc/systemcalls.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / bsd / dev / ppc / systemcalls.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23#include <kern/task.h>
24#include <kern/thread.h>
25#include <kern/assert.h>
26#include <kern/clock.h>
27#include <kern/locks.h>
28#include <kern/sched_prim.h>
29#include <mach/machine/thread_status.h>
30#include <ppc/savearea.h>
31
32#include <sys/kernel.h>
33#include <sys/vm.h>
34#include <sys/proc_internal.h>
35#include <sys/syscall.h>
36#include <sys/systm.h>
37#include <sys/user.h>
38#include <sys/errno.h>
39#include <sys/ktrace.h>
40#include <sys/kdebug.h>
41#include <sys/sysent.h>
42#include <sys/sysproto.h>
43#include <sys/kauth.h>
44
45#include <bsm/audit_kernel.h>
46
47extern void
48unix_syscall(struct savearea *regs);
49void
50unix_syscall_return(int error);
51
52extern struct savearea *
53find_user_regs(
54 thread_t act);
55
56extern void enter_funnel_section(funnel_t *funnel_lock);
57extern void exit_funnel_section(void);
58
59/*
60 * Function: unix_syscall
61 *
62 * Inputs: regs - pointer to Process Control Block
63 *
64 * Outputs: none
65 */
66void
67unix_syscall(struct savearea *regs)
68{
69 thread_t thread_act;
70 struct uthread *uthread;
71 struct proc *proc;
72 struct sysent *callp;
73 int error;
74 unsigned short code;
75 boolean_t flavor;
76 int funnel_type;
77 unsigned int cancel_enable;
78
79 flavor = (((unsigned int)regs->save_r0) == 0)? 1: 0;
80
81 if (flavor)
82 code = regs->save_r3;
83 else
84 code = regs->save_r0;
85
86 if (kdebug_enable && (code != 180)) {
87 if (flavor)
88 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
89 regs->save_r4, regs->save_r5, regs->save_r6, regs->save_r7, 0);
90 else
91 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
92 regs->save_r3, regs->save_r4, regs->save_r5, regs->save_r6, 0);
93 }
94 thread_act = current_thread();
95 uthread = get_bsdthread_info(thread_act);
96
97 if (!(uthread->uu_flag & UT_VFORK))
98 proc = (struct proc *)get_bsdtask_info(current_task());
99 else
100 proc = current_proc();
101
102 /* Make sure there is a process associated with this task */
103 if (proc == NULL) {
104 regs->save_r3 = (long long)EPERM;
105 /* set the "pc" to execute cerror routine */
106 regs->save_srr0 -= 4;
107 task_terminate_internal(current_task());
108 thread_exception_return();
109 /* NOTREACHED */
110 }
111
112 /*
113 * Delayed binding of thread credential to process credential, if we
114 * are not running with an explicitly set thread credential.
115 */
116 if (uthread->uu_ucred != proc->p_ucred &&
117 (uthread->uu_flag & UT_SETUID) == 0) {
118 kauth_cred_t old = uthread->uu_ucred;
119 proc_lock(proc);
120 uthread->uu_ucred = proc->p_ucred;
121 kauth_cred_ref(uthread->uu_ucred);
122 proc_unlock(proc);
123 if (old != NOCRED)
124 kauth_cred_rele(old);
125 }
126
127 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
128
129 if (callp->sy_narg != 0) {
130 void *regsp;
131 sy_munge_t *mungerp;
132
133 if (IS_64BIT_PROCESS(proc)) {
134 /* XXX Turn 64 bit unsafe calls into nosys() */
135 if (callp->sy_funnel & UNSAFE_64BIT) {
136 callp = &sysent[63];
137 goto unsafe;
138 }
139 mungerp = callp->sy_arg_munge64;
140 }
141 else {
142 mungerp = callp->sy_arg_munge32;
143 }
144 if ( !flavor) {
145 regsp = (void *) &regs->save_r3;
146 } else {
147 /* indirect system call consumes an argument so only 7 are supported */
148 if (callp->sy_narg > 7) {
149 callp = &sysent[63];
150 goto unsafe;
151 }
152 regsp = (void *) &regs->save_r4;
153 }
154 /* call syscall argument munger to copy in arguments (see xnu/bsd/dev/ppc/munge.s) */
155 (*mungerp)(regsp, (void *) &uthread->uu_arg[0]);
156 }
157
158unsafe:
159 cancel_enable = callp->sy_cancel;
160
161 if (cancel_enable == _SYSCALL_CANCEL_NONE) {
162 uthread->uu_flag |= UT_NOTCANCELPT;
163 } else {
164 if((uthread->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
165 if (cancel_enable == _SYSCALL_CANCEL_PRE) {
166 /* system call cancelled; return to handle cancellation */
167 regs->save_r3 = (long long)EINTR;
168 thread_exception_return();
169 /* NOTREACHED */
170 } else {
171 thread_abort_safely(thread_act);
172 }
173 }
174 }
175
176 funnel_type = (int)(callp->sy_funnel & FUNNEL_MASK);
177 if (funnel_type == KERNEL_FUNNEL)
178 enter_funnel_section(kernel_flock);
179
180 uthread->uu_rval[0] = 0;
181
182 /*
183 * r4 is volatile, if we set it to regs->save_r4 here the child
184 * will have parents r4 after execve
185 */
186 uthread->uu_rval[1] = 0;
187
188 error = 0;
189
190 /*
191 * PPC runtime calls cerror after every unix system call, so
192 * assume no error and adjust the "pc" to skip this call.
193 * It will be set back to the cerror call if an error is detected.
194 */
195 regs->save_srr0 += 4;
196
197 if (KTRPOINT(proc, KTR_SYSCALL))
198 ktrsyscall(proc, code, callp->sy_narg, uthread->uu_arg);
199
200#ifdef JOE_DEBUG
201 uthread->uu_iocount = 0;
202 uthread->uu_vpindex = 0;
203#endif
204 AUDIT_SYSCALL_ENTER(code, proc, uthread);
205 error = (*(callp->sy_call))(proc, (void *)uthread->uu_arg, &(uthread->uu_rval[0]));
206 AUDIT_SYSCALL_EXIT(error, proc, uthread);
207
208#ifdef JOE_DEBUG
209 if (uthread->uu_iocount)
210 joe_debug("system call returned with uu_iocount != 0");
211#endif
212 regs = find_user_regs(thread_act);
213
214 if (error == ERESTART) {
215 regs->save_srr0 -= 8;
216 } else if (error != EJUSTRETURN) {
217 if (error) {
218 regs->save_r3 = (long long)error;
219 /* set the "pc" to execute cerror routine */
220 regs->save_srr0 -= 4;
221 } else { /* (not error) */
222 switch (callp->sy_return_type) {
223 case _SYSCALL_RET_INT_T:
224 regs->save_r3 = uthread->uu_rval[0];
225 regs->save_r4 = uthread->uu_rval[1];
226 break;
227 case _SYSCALL_RET_UINT_T:
228 regs->save_r3 = ((u_int)uthread->uu_rval[0]);
229 regs->save_r4 = ((u_int)uthread->uu_rval[1]);
230 break;
231 case _SYSCALL_RET_OFF_T:
232 /* off_t returns 64 bits split across two registers for 32 bit */
233 /* process and in one register for 64 bit process */
234 if (IS_64BIT_PROCESS(proc)) {
235 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
236 regs->save_r3 = *retp;
237 regs->save_r4 = 0;
238 }
239 else {
240 regs->save_r3 = uthread->uu_rval[0];
241 regs->save_r4 = uthread->uu_rval[1];
242 }
243 break;
244 case _SYSCALL_RET_ADDR_T:
245 case _SYSCALL_RET_SIZE_T:
246 case _SYSCALL_RET_SSIZE_T:
247 /* the variable length return types (user_addr_t, user_ssize_t,
248 * and user_size_t) are always the largest possible size in the
249 * kernel (we use uu_rval[0] and [1] as one 64 bit value).
250 */
251 {
252 user_addr_t *retp = (user_addr_t *)&uthread->uu_rval[0];
253 regs->save_r3 = *retp;
254 regs->save_r4 = 0;
255 }
256 break;
257 case _SYSCALL_RET_NONE:
258 break;
259 default:
260 panic("unix_syscall: unknown return type");
261 break;
262 }
263 }
264 }
265 /* else (error == EJUSTRETURN) { nothing } */
266
267
268 if (KTRPOINT(proc, KTR_SYSRET)) {
269 switch(callp->sy_return_type) {
270 case _SYSCALL_RET_ADDR_T:
271 case _SYSCALL_RET_SIZE_T:
272 case _SYSCALL_RET_SSIZE_T:
273 /*
274 * Trace the value of the least significant bits,
275 * until we can revise the ktrace API safely.
276 */
277 ktrsysret(proc, code, error, uthread->uu_rval[1]);
278 break;
279 default:
280 ktrsysret(proc, code, error, uthread->uu_rval[0]);
281 break;
282 }
283 }
284
285 if (cancel_enable == _SYSCALL_CANCEL_NONE)
286 uthread->uu_flag &= ~UT_NOTCANCELPT;
287
288 exit_funnel_section();
289
290 if (uthread->uu_lowpri_delay) {
291 /*
292 * task is marked as a low priority I/O type
293 * and the I/O we issued while in this system call
294 * collided with normal I/O operations... we'll
295 * delay in order to mitigate the impact of this
296 * task on the normal operation of the system
297 */
298 IOSleep(uthread->uu_lowpri_delay);
299 uthread->uu_lowpri_delay = 0;
300 }
301 if (kdebug_enable && (code != 180)) {
302
303 if (callp->sy_return_type == _SYSCALL_RET_SSIZE_T)
304 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
305 error, uthread->uu_rval[1], 0, 0, 0);
306 else
307 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
308 error, uthread->uu_rval[0], uthread->uu_rval[1], 0, 0);
309 }
310
311 thread_exception_return();
312 /* NOTREACHED */
313}
314
315void
316unix_syscall_return(int error)
317{
318 thread_t thread_act;
319 struct uthread *uthread;
320 struct proc *proc;
321 struct savearea *regs;
322 unsigned short code;
323 struct sysent *callp;
324 unsigned int cancel_enable;
325
326 thread_act = current_thread();
327 proc = current_proc();
328 uthread = get_bsdthread_info(thread_act);
329
330 regs = find_user_regs(thread_act);
331
332 if (regs->save_r0 != 0)
333 code = regs->save_r0;
334 else
335 code = regs->save_r3;
336
337 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
338
339 /*
340 * Get index into sysent table
341 */
342 if (error == ERESTART) {
343 regs->save_srr0 -= 8;
344 } else if (error != EJUSTRETURN) {
345 if (error) {
346 regs->save_r3 = (long long)error;
347 /* set the "pc" to execute cerror routine */
348 regs->save_srr0 -= 4;
349 } else { /* (not error) */
350 switch (callp->sy_return_type) {
351 case _SYSCALL_RET_INT_T:
352 regs->save_r3 = uthread->uu_rval[0];
353 regs->save_r4 = uthread->uu_rval[1];
354 break;
355 case _SYSCALL_RET_UINT_T:
356 regs->save_r3 = ((u_int)uthread->uu_rval[0]);
357 regs->save_r4 = ((u_int)uthread->uu_rval[1]);
358 break;
359 case _SYSCALL_RET_OFF_T:
360 /* off_t returns 64 bits split across two registers for 32 bit */
361 /* process and in one register for 64 bit process */
362 if (IS_64BIT_PROCESS(proc)) {
363 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
364 regs->save_r3 = *retp;
365 }
366 else {
367 regs->save_r3 = uthread->uu_rval[0];
368 regs->save_r4 = uthread->uu_rval[1];
369 }
370 break;
371 case _SYSCALL_RET_ADDR_T:
372 case _SYSCALL_RET_SIZE_T:
373 case _SYSCALL_RET_SSIZE_T:
374 /* the variable length return types (user_addr_t, user_ssize_t,
375 * and user_size_t) are always the largest possible size in the
376 * kernel (we use uu_rval[0] and [1] as one 64 bit value).
377 */
378 {
379 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
380 regs->save_r3 = *retp;
381 }
382 break;
383 case _SYSCALL_RET_NONE:
384 break;
385 default:
386 panic("unix_syscall: unknown return type");
387 break;
388 }
389 }
390 }
391 /* else (error == EJUSTRETURN) { nothing } */
392
393 if (KTRPOINT(proc, KTR_SYSRET)) {
394 switch(callp->sy_return_type) {
395 case _SYSCALL_RET_ADDR_T:
396 case _SYSCALL_RET_SIZE_T:
397 case _SYSCALL_RET_SSIZE_T:
398 /*
399 * Trace the value of the least significant bits,
400 * until we can revise the ktrace API safely.
401 */
402 ktrsysret(proc, code, error, uthread->uu_rval[1]);
403 break;
404 default:
405 ktrsysret(proc, code, error, uthread->uu_rval[0]);
406 break;
407 }
408 }
409
410 cancel_enable = callp->sy_cancel;
411
412 if (cancel_enable == _SYSCALL_CANCEL_NONE)
413 uthread->uu_flag &= ~UT_NOTCANCELPT;
414
415 exit_funnel_section();
416
417 if (uthread->uu_lowpri_delay) {
418 /*
419 * task is marked as a low priority I/O type
420 * and the I/O we issued while in this system call
421 * collided with normal I/O operations... we'll
422 * delay in order to mitigate the impact of this
423 * task on the normal operation of the system
424 */
425 IOSleep(uthread->uu_lowpri_delay);
426 uthread->uu_lowpri_delay = 0;
427 }
428 if (kdebug_enable && (code != 180)) {
429 if (callp->sy_return_type == _SYSCALL_RET_SSIZE_T)
430 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
431 error, uthread->uu_rval[1], 0, 0, 0);
432 else
433 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
434 error, uthread->uu_rval[0], uthread->uu_rval[1], 0, 0);
435 }
436
437 thread_exception_return();
438 /* NOTREACHED */
439}
440
441#ifdef JOE_DEBUG
442joe_debug(char *p) {
443
444 printf("%s\n", p);
445}
446#endif