]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/ppc/systemcalls.c
xnu-792.tar.gz
[apple/xnu.git] / bsd / dev / ppc / systemcalls.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <kern/task.h>
24 #include <kern/thread.h>
25 #include <kern/assert.h>
26 #include <kern/clock.h>
27 #include <kern/locks.h>
28 #include <kern/sched_prim.h>
29 #include <mach/machine/thread_status.h>
30 #include <ppc/savearea.h>
31
32 #include <sys/kernel.h>
33 #include <sys/vm.h>
34 #include <sys/proc_internal.h>
35 #include <sys/syscall.h>
36 #include <sys/systm.h>
37 #include <sys/user.h>
38 #include <sys/errno.h>
39 #include <sys/ktrace.h>
40 #include <sys/kdebug.h>
41 #include <sys/sysent.h>
42 #include <sys/sysproto.h>
43 #include <sys/kauth.h>
44
45 #include <bsm/audit_kernel.h>
46
47 extern void
48 unix_syscall(struct savearea *regs);
49 void
50 unix_syscall_return(int error);
51
52 extern struct savearea *
53 find_user_regs(
54 thread_t act);
55
56 extern void enter_funnel_section(funnel_t *funnel_lock);
57 extern void exit_funnel_section(void);
58
59 /*
60 * Function: unix_syscall
61 *
62 * Inputs: regs - pointer to Process Control Block
63 *
64 * Outputs: none
65 */
66 void
67 unix_syscall(struct savearea *regs)
68 {
69 thread_t thread_act;
70 struct uthread *uthread;
71 struct proc *proc;
72 struct sysent *callp;
73 int error;
74 unsigned short code;
75 boolean_t flavor;
76 int funnel_type;
77 unsigned int cancel_enable;
78
79 flavor = (((unsigned int)regs->save_r0) == 0)? 1: 0;
80
81 if (flavor)
82 code = regs->save_r3;
83 else
84 code = regs->save_r0;
85
86 if (kdebug_enable && (code != 180)) {
87 if (flavor)
88 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
89 regs->save_r4, regs->save_r5, regs->save_r6, regs->save_r7, 0);
90 else
91 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
92 regs->save_r3, regs->save_r4, regs->save_r5, regs->save_r6, 0);
93 }
94 thread_act = current_thread();
95 uthread = get_bsdthread_info(thread_act);
96
97 if (!(uthread->uu_flag & UT_VFORK))
98 proc = (struct proc *)get_bsdtask_info(current_task());
99 else
100 proc = current_proc();
101
102 /*
103 * Delayed binding of thread credential to process credential, if we
104 * are not running with an explicitly set thread credential.
105 */
106 if (uthread->uu_ucred != proc->p_ucred &&
107 (uthread->uu_flag & UT_SETUID) == 0) {
108 kauth_cred_t old = uthread->uu_ucred;
109 proc_lock(proc);
110 uthread->uu_ucred = proc->p_ucred;
111 kauth_cred_ref(uthread->uu_ucred);
112 proc_unlock(proc);
113 if (old != NOCRED)
114 kauth_cred_rele(old);
115 }
116
117 uthread->uu_ar0 = (int *)regs;
118
119 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
120
121 if (callp->sy_narg != 0) {
122 void *regsp;
123 sy_munge_t *mungerp;
124
125 if (IS_64BIT_PROCESS(proc)) {
126 /* XXX Turn 64 bit unsafe calls into nosys() */
127 if (callp->sy_funnel & UNSAFE_64BIT) {
128 callp = &sysent[63];
129 goto unsafe;
130 }
131 mungerp = callp->sy_arg_munge64;
132 }
133 else {
134 mungerp = callp->sy_arg_munge32;
135 }
136 if ( !flavor) {
137 regsp = (void *) &regs->save_r3;
138 } else {
139 /* indirect system call consumes an argument so only 7 are supported */
140 if (callp->sy_narg > 7) {
141 callp = &sysent[63];
142 goto unsafe;
143 }
144 regsp = (void *) &regs->save_r4;
145 }
146 /* call syscall argument munger to copy in arguments (see xnu/bsd/dev/ppc/munge.s) */
147 (*mungerp)(regsp, (void *) &uthread->uu_arg[0]);
148 }
149
150 unsafe:
151 cancel_enable = callp->sy_cancel;
152
153 if (cancel_enable == _SYSCALL_CANCEL_NONE) {
154 uthread->uu_flag |= UT_NOTCANCELPT;
155 } else {
156 if((uthread->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
157 if (cancel_enable == _SYSCALL_CANCEL_PRE) {
158 /* system call cancelled; return to handle cancellation */
159 regs->save_r3 = (long long)EINTR;
160 thread_exception_return();
161 /* NOTREACHED */
162 } else {
163 thread_abort_safely(thread_act);
164 }
165 }
166 }
167
168 funnel_type = (int)(callp->sy_funnel & FUNNEL_MASK);
169 if (funnel_type == KERNEL_FUNNEL)
170 enter_funnel_section(kernel_flock);
171
172 uthread->uu_rval[0] = 0;
173
174 /*
175 * r4 is volatile, if we set it to regs->save_r4 here the child
176 * will have parents r4 after execve
177 */
178 uthread->uu_rval[1] = 0;
179
180 error = 0;
181
182 /*
183 * PPC runtime calls cerror after every unix system call, so
184 * assume no error and adjust the "pc" to skip this call.
185 * It will be set back to the cerror call if an error is detected.
186 */
187 regs->save_srr0 += 4;
188
189 if (KTRPOINT(proc, KTR_SYSCALL))
190 ktrsyscall(proc, code, callp->sy_narg, uthread->uu_arg);
191
192 #ifdef JOE_DEBUG
193 uthread->uu_iocount = 0;
194 uthread->uu_vpindex = 0;
195 #endif
196 AUDIT_SYSCALL_ENTER(code, proc, uthread);
197 error = (*(callp->sy_call))(proc, (void *)uthread->uu_arg, &(uthread->uu_rval[0]));
198 AUDIT_SYSCALL_EXIT(error, proc, uthread);
199
200 #ifdef JOE_DEBUG
201 if (uthread->uu_iocount)
202 joe_debug("system call returned with uu_iocount != 0");
203 #endif
204 regs = find_user_regs(thread_act);
205
206 if (error == ERESTART) {
207 regs->save_srr0 -= 8;
208 } else if (error != EJUSTRETURN) {
209 if (error) {
210 regs->save_r3 = (long long)error;
211 /* set the "pc" to execute cerror routine */
212 regs->save_srr0 -= 4;
213 } else { /* (not error) */
214 switch (callp->sy_return_type) {
215 case _SYSCALL_RET_INT_T:
216 regs->save_r3 = uthread->uu_rval[0];
217 regs->save_r4 = uthread->uu_rval[1];
218 break;
219 case _SYSCALL_RET_UINT_T:
220 regs->save_r3 = ((u_int)uthread->uu_rval[0]);
221 regs->save_r4 = ((u_int)uthread->uu_rval[1]);
222 break;
223 case _SYSCALL_RET_OFF_T:
224 /* off_t returns 64 bits split across two registers for 32 bit */
225 /* process and in one register for 64 bit process */
226 if (IS_64BIT_PROCESS(proc)) {
227 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
228 regs->save_r3 = *retp;
229 regs->save_r4 = 0;
230 }
231 else {
232 regs->save_r3 = uthread->uu_rval[0];
233 regs->save_r4 = uthread->uu_rval[1];
234 }
235 break;
236 case _SYSCALL_RET_ADDR_T:
237 case _SYSCALL_RET_SIZE_T:
238 case _SYSCALL_RET_SSIZE_T:
239 /* the variable length return types (user_addr_t, user_ssize_t,
240 * and user_size_t) are always the largest possible size in the
241 * kernel (we use uu_rval[0] and [1] as one 64 bit value).
242 */
243 {
244 user_addr_t *retp = (user_addr_t *)&uthread->uu_rval[0];
245 regs->save_r3 = *retp;
246 regs->save_r4 = 0;
247 }
248 break;
249 case _SYSCALL_RET_NONE:
250 break;
251 default:
252 panic("unix_syscall: unknown return type");
253 break;
254 }
255 }
256 }
257 /* else (error == EJUSTRETURN) { nothing } */
258
259
260 if (KTRPOINT(proc, KTR_SYSRET)) {
261 switch(callp->sy_return_type) {
262 case _SYSCALL_RET_ADDR_T:
263 case _SYSCALL_RET_SIZE_T:
264 case _SYSCALL_RET_SSIZE_T:
265 /*
266 * Trace the value of the least significant bits,
267 * until we can revise the ktrace API safely.
268 */
269 ktrsysret(proc, code, error, uthread->uu_rval[1]);
270 break;
271 default:
272 ktrsysret(proc, code, error, uthread->uu_rval[0]);
273 break;
274 }
275 }
276
277 if (cancel_enable == _SYSCALL_CANCEL_NONE)
278 uthread->uu_flag &= ~UT_NOTCANCELPT;
279
280 exit_funnel_section();
281
282 if (uthread->uu_lowpri_delay) {
283 /*
284 * task is marked as a low priority I/O type
285 * and the I/O we issued while in this system call
286 * collided with normal I/O operations... we'll
287 * delay in order to mitigate the impact of this
288 * task on the normal operation of the system
289 */
290 IOSleep(uthread->uu_lowpri_delay);
291 uthread->uu_lowpri_delay = 0;
292 }
293 if (kdebug_enable && (code != 180)) {
294
295 if (callp->sy_return_type == _SYSCALL_RET_SSIZE_T)
296 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
297 error, uthread->uu_rval[1], 0, 0, 0);
298 else
299 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
300 error, uthread->uu_rval[0], uthread->uu_rval[1], 0, 0);
301 }
302
303 thread_exception_return();
304 /* NOTREACHED */
305 }
306
307 void
308 unix_syscall_return(int error)
309 {
310 thread_t thread_act;
311 struct uthread *uthread;
312 struct proc *proc;
313 struct savearea *regs;
314 unsigned short code;
315 struct sysent *callp;
316 int funnel_type;
317 unsigned int cancel_enable;
318
319 thread_act = current_thread();
320 proc = current_proc();
321 uthread = get_bsdthread_info(thread_act);
322
323 regs = find_user_regs(thread_act);
324
325 if (regs->save_r0 != 0)
326 code = regs->save_r0;
327 else
328 code = regs->save_r3;
329
330 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
331
332 /*
333 * Get index into sysent table
334 */
335 if (error == ERESTART) {
336 regs->save_srr0 -= 8;
337 } else if (error != EJUSTRETURN) {
338 if (error) {
339 regs->save_r3 = (long long)error;
340 /* set the "pc" to execute cerror routine */
341 regs->save_srr0 -= 4;
342 } else { /* (not error) */
343 switch (callp->sy_return_type) {
344 case _SYSCALL_RET_INT_T:
345 regs->save_r3 = uthread->uu_rval[0];
346 regs->save_r4 = uthread->uu_rval[1];
347 break;
348 case _SYSCALL_RET_UINT_T:
349 regs->save_r3 = ((u_int)uthread->uu_rval[0]);
350 regs->save_r4 = ((u_int)uthread->uu_rval[1]);
351 break;
352 case _SYSCALL_RET_OFF_T:
353 /* off_t returns 64 bits split across two registers for 32 bit */
354 /* process and in one register for 64 bit process */
355 if (IS_64BIT_PROCESS(proc)) {
356 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
357 regs->save_r3 = *retp;
358 }
359 else {
360 regs->save_r3 = uthread->uu_rval[0];
361 regs->save_r4 = uthread->uu_rval[1];
362 }
363 break;
364 case _SYSCALL_RET_ADDR_T:
365 case _SYSCALL_RET_SIZE_T:
366 case _SYSCALL_RET_SSIZE_T:
367 /* the variable length return types (user_addr_t, user_ssize_t,
368 * and user_size_t) are always the largest possible size in the
369 * kernel (we use uu_rval[0] and [1] as one 64 bit value).
370 */
371 {
372 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
373 regs->save_r3 = *retp;
374 }
375 break;
376 case _SYSCALL_RET_NONE:
377 break;
378 default:
379 panic("unix_syscall: unknown return type");
380 break;
381 }
382 }
383 }
384 /* else (error == EJUSTRETURN) { nothing } */
385
386 if (KTRPOINT(proc, KTR_SYSRET)) {
387 switch(callp->sy_return_type) {
388 case _SYSCALL_RET_ADDR_T:
389 case _SYSCALL_RET_SIZE_T:
390 case _SYSCALL_RET_SSIZE_T:
391 /*
392 * Trace the value of the least significant bits,
393 * until we can revise the ktrace API safely.
394 */
395 ktrsysret(proc, code, error, uthread->uu_rval[1]);
396 break;
397 default:
398 ktrsysret(proc, code, error, uthread->uu_rval[0]);
399 break;
400 }
401 }
402
403 cancel_enable = callp->sy_cancel;
404
405 if (cancel_enable == _SYSCALL_CANCEL_NONE)
406 uthread->uu_flag &= ~UT_NOTCANCELPT;
407
408 exit_funnel_section();
409
410 if (uthread->uu_lowpri_delay) {
411 /*
412 * task is marked as a low priority I/O type
413 * and the I/O we issued while in this system call
414 * collided with normal I/O operations... we'll
415 * delay in order to mitigate the impact of this
416 * task on the normal operation of the system
417 */
418 IOSleep(uthread->uu_lowpri_delay);
419 uthread->uu_lowpri_delay = 0;
420 }
421 if (kdebug_enable && (code != 180)) {
422 if (callp->sy_return_type == _SYSCALL_RET_SSIZE_T)
423 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
424 error, uthread->uu_rval[1], 0, 0, 0);
425 else
426 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
427 error, uthread->uu_rval[0], uthread->uu_rval[1], 0, 0);
428 }
429
430 thread_exception_return();
431 /* NOTREACHED */
432 }
433
434 /*
435 * Time of day and interval timer support.
436 *
437 * These routines provide the kernel entry points to get and set
438 * the time-of-day and per-process interval timers. Subroutines
439 * here provide support for adding and subtracting timeval structures
440 * and decrementing interval timers, optionally reloading the interval
441 * timers when they expire.
442 */
443 /* NOTE THIS implementation is for ppc architectures only.
444 * It is infrequently called, since the commpage intercepts
445 * most calls in user mode.
446 *
447 * XXX Y2038 bug because of assumed return of 32 bit seconds value, and
448 * XXX first parameter to clock_gettimeofday()
449 */
450 int
451 ppc_gettimeofday(__unused struct proc *p,
452 register struct ppc_gettimeofday_args *uap,
453 register_t *retval)
454 {
455 int error = 0;
456 extern lck_spin_t * tz_slock;
457
458 if (uap->tp)
459 clock_gettimeofday(&retval[0], &retval[1]);
460
461 if (uap->tzp) {
462 struct timezone ltz;
463
464 lck_spin_lock(tz_slock);
465 ltz = tz;
466 lck_spin_unlock(tz_slock);
467 error = copyout((caddr_t)&ltz, uap->tzp, sizeof (tz));
468 }
469
470 return (error);
471 }
472
473 #ifdef JOE_DEBUG
474 joe_debug(char *p) {
475
476 printf("%s\n", p);
477 }
478 #endif
479
480
481 /*
482 * WARNING - this is a temporary workaround for binary compatibility issues
483 * with anti-piracy software that relies on patching ptrace (3928003).
484 * This KPI will be removed in the system release after Tiger.
485 */
486 uintptr_t temp_patch_ptrace(uintptr_t new_ptrace)
487 {
488 struct sysent * callp;
489 sy_call_t * old_ptrace;
490
491 if (new_ptrace == 0)
492 return(0);
493
494 enter_funnel_section(kernel_flock);
495 callp = &sysent[26];
496 old_ptrace = callp->sy_call;
497
498 /* only allow one patcher of ptrace */
499 if (old_ptrace == (sy_call_t *) ptrace) {
500 callp->sy_call = (sy_call_t *) new_ptrace;
501 }
502 else {
503 old_ptrace = NULL;
504 }
505 exit_funnel_section( );
506
507 return((uintptr_t)old_ptrace);
508 }
509
510 void temp_unpatch_ptrace(void)
511 {
512 struct sysent * callp;
513
514 enter_funnel_section(kernel_flock);
515 callp = &sysent[26];
516 callp->sy_call = (sy_call_t *) ptrace;
517 exit_funnel_section( );
518
519 return;
520 }