]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/ppc/systemcalls.c
xnu-792.25.20.tar.gz
[apple/xnu.git] / bsd / dev / ppc / systemcalls.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <kern/task.h>
24 #include <kern/thread.h>
25 #include <kern/assert.h>
26 #include <kern/clock.h>
27 #include <kern/locks.h>
28 #include <kern/sched_prim.h>
29 #include <mach/machine/thread_status.h>
30 #include <ppc/savearea.h>
31
32 #include <sys/kernel.h>
33 #include <sys/vm.h>
34 #include <sys/proc_internal.h>
35 #include <sys/syscall.h>
36 #include <sys/systm.h>
37 #include <sys/user.h>
38 #include <sys/errno.h>
39 #include <sys/ktrace.h>
40 #include <sys/kdebug.h>
41 #include <sys/sysent.h>
42 #include <sys/sysproto.h>
43 #include <sys/kauth.h>
44
45 #include <bsm/audit_kernel.h>
46
47 extern void
48 unix_syscall(struct savearea *regs);
49 void
50 unix_syscall_return(int error);
51
52 extern struct savearea *
53 find_user_regs(
54 thread_t act);
55
56 extern void enter_funnel_section(funnel_t *funnel_lock);
57 extern void exit_funnel_section(void);
58
59 /*
60 * Function: unix_syscall
61 *
62 * Inputs: regs - pointer to Process Control Block
63 *
64 * Outputs: none
65 */
66 void
67 unix_syscall(struct savearea *regs)
68 {
69 thread_t thread_act;
70 struct uthread *uthread;
71 struct proc *proc;
72 struct sysent *callp;
73 int error;
74 unsigned short code;
75 boolean_t flavor;
76 int funnel_type;
77 unsigned int cancel_enable;
78
79 flavor = (((unsigned int)regs->save_r0) == 0)? 1: 0;
80
81 if (flavor)
82 code = regs->save_r3;
83 else
84 code = regs->save_r0;
85
86 if (kdebug_enable && (code != 180)) {
87 if (flavor)
88 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
89 regs->save_r4, regs->save_r5, regs->save_r6, regs->save_r7, 0);
90 else
91 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
92 regs->save_r3, regs->save_r4, regs->save_r5, regs->save_r6, 0);
93 }
94 thread_act = current_thread();
95 uthread = get_bsdthread_info(thread_act);
96
97 if (!(uthread->uu_flag & UT_VFORK))
98 proc = (struct proc *)get_bsdtask_info(current_task());
99 else
100 proc = current_proc();
101
102 /* Make sure there is a process associated with this task */
103 if (proc == NULL) {
104 regs->save_r3 = (long long)EPERM;
105 /* set the "pc" to execute cerror routine */
106 regs->save_srr0 -= 4;
107 task_terminate_internal(current_task());
108 thread_exception_return();
109 /* NOTREACHED */
110 }
111
112 /*
113 * Delayed binding of thread credential to process credential, if we
114 * are not running with an explicitly set thread credential.
115 */
116 if (uthread->uu_ucred != proc->p_ucred &&
117 (uthread->uu_flag & UT_SETUID) == 0) {
118 kauth_cred_t old = uthread->uu_ucred;
119 uthread->uu_ucred = kauth_cred_proc_ref(proc);
120 if (IS_VALID_CRED(old))
121 kauth_cred_unref(&old);
122 }
123
124 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
125
126 if (callp->sy_narg != 0) {
127 void *regsp;
128 sy_munge_t *mungerp;
129
130 if (IS_64BIT_PROCESS(proc)) {
131 /* XXX Turn 64 bit unsafe calls into nosys() */
132 if (callp->sy_funnel & UNSAFE_64BIT) {
133 callp = &sysent[63];
134 goto unsafe;
135 }
136 mungerp = callp->sy_arg_munge64;
137 }
138 else {
139 mungerp = callp->sy_arg_munge32;
140 }
141 if ( !flavor) {
142 regsp = (void *) &regs->save_r3;
143 } else {
144 /* indirect system call consumes an argument so only 7 are supported */
145 if (callp->sy_narg > 7) {
146 callp = &sysent[63];
147 goto unsafe;
148 }
149 regsp = (void *) &regs->save_r4;
150 }
151 /* call syscall argument munger to copy in arguments (see xnu/bsd/dev/ppc/munge.s) */
152 (*mungerp)(regsp, (void *) &uthread->uu_arg[0]);
153 }
154
155 unsafe:
156 cancel_enable = callp->sy_cancel;
157
158 if (cancel_enable == _SYSCALL_CANCEL_NONE) {
159 uthread->uu_flag |= UT_NOTCANCELPT;
160 } else {
161 if((uthread->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
162 if (cancel_enable == _SYSCALL_CANCEL_PRE) {
163 /* system call cancelled; return to handle cancellation */
164 regs->save_r3 = (long long)EINTR;
165 thread_exception_return();
166 /* NOTREACHED */
167 } else {
168 thread_abort_safely(thread_act);
169 }
170 }
171 }
172
173 funnel_type = (int)(callp->sy_funnel & FUNNEL_MASK);
174 if (funnel_type == KERNEL_FUNNEL)
175 enter_funnel_section(kernel_flock);
176
177 uthread->uu_rval[0] = 0;
178
179 /*
180 * r4 is volatile, if we set it to regs->save_r4 here the child
181 * will have parents r4 after execve
182 */
183 uthread->uu_rval[1] = 0;
184
185 error = 0;
186
187 /*
188 * PPC runtime calls cerror after every unix system call, so
189 * assume no error and adjust the "pc" to skip this call.
190 * It will be set back to the cerror call if an error is detected.
191 */
192 regs->save_srr0 += 4;
193
194 if (KTRPOINT(proc, KTR_SYSCALL))
195 ktrsyscall(proc, code, callp->sy_narg, uthread->uu_arg);
196
197 #ifdef JOE_DEBUG
198 uthread->uu_iocount = 0;
199 uthread->uu_vpindex = 0;
200 #endif
201 AUDIT_SYSCALL_ENTER(code, proc, uthread);
202 error = (*(callp->sy_call))(proc, (void *)uthread->uu_arg, &(uthread->uu_rval[0]));
203 AUDIT_SYSCALL_EXIT(error, proc, uthread);
204
205 #ifdef JOE_DEBUG
206 if (uthread->uu_iocount)
207 joe_debug("system call returned with uu_iocount != 0");
208 #endif
209 regs = find_user_regs(thread_act);
210
211 if (error == ERESTART) {
212 regs->save_srr0 -= 8;
213 } else if (error != EJUSTRETURN) {
214 if (error) {
215 regs->save_r3 = (long long)error;
216 /* set the "pc" to execute cerror routine */
217 regs->save_srr0 -= 4;
218 } else { /* (not error) */
219 switch (callp->sy_return_type) {
220 case _SYSCALL_RET_INT_T:
221 regs->save_r3 = uthread->uu_rval[0];
222 regs->save_r4 = uthread->uu_rval[1];
223 break;
224 case _SYSCALL_RET_UINT_T:
225 regs->save_r3 = ((u_int)uthread->uu_rval[0]);
226 regs->save_r4 = ((u_int)uthread->uu_rval[1]);
227 break;
228 case _SYSCALL_RET_OFF_T:
229 /* off_t returns 64 bits split across two registers for 32 bit */
230 /* process and in one register for 64 bit process */
231 if (IS_64BIT_PROCESS(proc)) {
232 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
233 regs->save_r3 = *retp;
234 regs->save_r4 = 0;
235 }
236 else {
237 regs->save_r3 = uthread->uu_rval[0];
238 regs->save_r4 = uthread->uu_rval[1];
239 }
240 break;
241 case _SYSCALL_RET_ADDR_T:
242 case _SYSCALL_RET_SIZE_T:
243 case _SYSCALL_RET_SSIZE_T:
244 /* the variable length return types (user_addr_t, user_ssize_t,
245 * and user_size_t) are always the largest possible size in the
246 * kernel (we use uu_rval[0] and [1] as one 64 bit value).
247 */
248 {
249 user_addr_t *retp = (user_addr_t *)&uthread->uu_rval[0];
250 regs->save_r3 = *retp;
251 regs->save_r4 = 0;
252 }
253 break;
254 case _SYSCALL_RET_NONE:
255 break;
256 default:
257 panic("unix_syscall: unknown return type");
258 break;
259 }
260 }
261 }
262 /* else (error == EJUSTRETURN) { nothing } */
263
264
265 if (KTRPOINT(proc, KTR_SYSRET)) {
266 switch(callp->sy_return_type) {
267 case _SYSCALL_RET_ADDR_T:
268 case _SYSCALL_RET_SIZE_T:
269 case _SYSCALL_RET_SSIZE_T:
270 /*
271 * Trace the value of the least significant bits,
272 * until we can revise the ktrace API safely.
273 */
274 ktrsysret(proc, code, error, uthread->uu_rval[1]);
275 break;
276 default:
277 ktrsysret(proc, code, error, uthread->uu_rval[0]);
278 break;
279 }
280 }
281
282 if (cancel_enable == _SYSCALL_CANCEL_NONE)
283 uthread->uu_flag &= ~UT_NOTCANCELPT;
284
285 exit_funnel_section();
286
287 if (uthread->uu_lowpri_delay) {
288 /*
289 * task is marked as a low priority I/O type
290 * and the I/O we issued while in this system call
291 * collided with normal I/O operations... we'll
292 * delay in order to mitigate the impact of this
293 * task on the normal operation of the system
294 */
295 IOSleep(uthread->uu_lowpri_delay);
296 uthread->uu_lowpri_delay = 0;
297 }
298 if (kdebug_enable && (code != 180)) {
299
300 if (callp->sy_return_type == _SYSCALL_RET_SSIZE_T)
301 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
302 error, uthread->uu_rval[1], 0, 0, 0);
303 else
304 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
305 error, uthread->uu_rval[0], uthread->uu_rval[1], 0, 0);
306 }
307
308 thread_exception_return();
309 /* NOTREACHED */
310 }
311
312 void
313 unix_syscall_return(int error)
314 {
315 thread_t thread_act;
316 struct uthread *uthread;
317 struct proc *proc;
318 struct savearea *regs;
319 unsigned short code;
320 struct sysent *callp;
321 unsigned int cancel_enable;
322
323 thread_act = current_thread();
324 proc = current_proc();
325 uthread = get_bsdthread_info(thread_act);
326
327 regs = find_user_regs(thread_act);
328
329 if (regs->save_r0 != 0)
330 code = regs->save_r0;
331 else
332 code = regs->save_r3;
333
334 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
335
336 /*
337 * Get index into sysent table
338 */
339 if (error == ERESTART) {
340 regs->save_srr0 -= 8;
341 } else if (error != EJUSTRETURN) {
342 if (error) {
343 regs->save_r3 = (long long)error;
344 /* set the "pc" to execute cerror routine */
345 regs->save_srr0 -= 4;
346 } else { /* (not error) */
347 switch (callp->sy_return_type) {
348 case _SYSCALL_RET_INT_T:
349 regs->save_r3 = uthread->uu_rval[0];
350 regs->save_r4 = uthread->uu_rval[1];
351 break;
352 case _SYSCALL_RET_UINT_T:
353 regs->save_r3 = ((u_int)uthread->uu_rval[0]);
354 regs->save_r4 = ((u_int)uthread->uu_rval[1]);
355 break;
356 case _SYSCALL_RET_OFF_T:
357 /* off_t returns 64 bits split across two registers for 32 bit */
358 /* process and in one register for 64 bit process */
359 if (IS_64BIT_PROCESS(proc)) {
360 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
361 regs->save_r3 = *retp;
362 }
363 else {
364 regs->save_r3 = uthread->uu_rval[0];
365 regs->save_r4 = uthread->uu_rval[1];
366 }
367 break;
368 case _SYSCALL_RET_ADDR_T:
369 case _SYSCALL_RET_SIZE_T:
370 case _SYSCALL_RET_SSIZE_T:
371 /* the variable length return types (user_addr_t, user_ssize_t,
372 * and user_size_t) are always the largest possible size in the
373 * kernel (we use uu_rval[0] and [1] as one 64 bit value).
374 */
375 {
376 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
377 regs->save_r3 = *retp;
378 }
379 break;
380 case _SYSCALL_RET_NONE:
381 break;
382 default:
383 panic("unix_syscall: unknown return type");
384 break;
385 }
386 }
387 }
388 /* else (error == EJUSTRETURN) { nothing } */
389
390 if (KTRPOINT(proc, KTR_SYSRET)) {
391 switch(callp->sy_return_type) {
392 case _SYSCALL_RET_ADDR_T:
393 case _SYSCALL_RET_SIZE_T:
394 case _SYSCALL_RET_SSIZE_T:
395 /*
396 * Trace the value of the least significant bits,
397 * until we can revise the ktrace API safely.
398 */
399 ktrsysret(proc, code, error, uthread->uu_rval[1]);
400 break;
401 default:
402 ktrsysret(proc, code, error, uthread->uu_rval[0]);
403 break;
404 }
405 }
406
407 cancel_enable = callp->sy_cancel;
408
409 if (cancel_enable == _SYSCALL_CANCEL_NONE)
410 uthread->uu_flag &= ~UT_NOTCANCELPT;
411
412 exit_funnel_section();
413
414 if (uthread->uu_lowpri_delay) {
415 /*
416 * task is marked as a low priority I/O type
417 * and the I/O we issued while in this system call
418 * collided with normal I/O operations... we'll
419 * delay in order to mitigate the impact of this
420 * task on the normal operation of the system
421 */
422 IOSleep(uthread->uu_lowpri_delay);
423 uthread->uu_lowpri_delay = 0;
424 }
425 if (kdebug_enable && (code != 180)) {
426 if (callp->sy_return_type == _SYSCALL_RET_SSIZE_T)
427 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
428 error, uthread->uu_rval[1], 0, 0, 0);
429 else
430 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
431 error, uthread->uu_rval[0], uthread->uu_rval[1], 0, 0);
432 }
433
434 thread_exception_return();
435 /* NOTREACHED */
436 }
437
438 #ifdef JOE_DEBUG
439 joe_debug(char *p) {
440
441 printf("%s\n", p);
442 }
443 #endif