]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/ppc/systemcalls.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / bsd / dev / ppc / systemcalls.c
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 #include <kern/task.h>
32 #include <kern/thread.h>
33 #include <kern/assert.h>
34 #include <kern/clock.h>
35 #include <kern/locks.h>
36 #include <kern/sched_prim.h>
37 #include <mach/machine/thread_status.h>
38 #include <ppc/savearea.h>
39
40 #include <sys/kernel.h>
41 #include <sys/vm.h>
42 #include <sys/proc_internal.h>
43 #include <sys/syscall.h>
44 #include <sys/systm.h>
45 #include <sys/user.h>
46 #include <sys/errno.h>
47 #include <sys/ktrace.h>
48 #include <sys/kdebug.h>
49 #include <sys/sysent.h>
50 #include <sys/sysproto.h>
51 #include <sys/kauth.h>
52
53 #include <bsm/audit_kernel.h>
54
55 extern void
56 unix_syscall(struct savearea *regs);
57 void
58 unix_syscall_return(int error);
59
60 extern struct savearea *
61 find_user_regs(
62 thread_t act);
63
64 extern void enter_funnel_section(funnel_t *funnel_lock);
65 extern void exit_funnel_section(void);
66
67 /*
68 * Function: unix_syscall
69 *
70 * Inputs: regs - pointer to Process Control Block
71 *
72 * Outputs: none
73 */
74 void
75 unix_syscall(struct savearea *regs)
76 {
77 thread_t thread_act;
78 struct uthread *uthread;
79 struct proc *proc;
80 struct sysent *callp;
81 int error;
82 unsigned short code;
83 boolean_t flavor;
84 int funnel_type;
85 unsigned int cancel_enable;
86
87 flavor = (((unsigned int)regs->save_r0) == 0)? 1: 0;
88
89 if (flavor)
90 code = regs->save_r3;
91 else
92 code = regs->save_r0;
93
94 if (kdebug_enable && (code != 180)) {
95 if (flavor)
96 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
97 regs->save_r4, regs->save_r5, regs->save_r6, regs->save_r7, 0);
98 else
99 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
100 regs->save_r3, regs->save_r4, regs->save_r5, regs->save_r6, 0);
101 }
102 thread_act = current_thread();
103 uthread = get_bsdthread_info(thread_act);
104
105 if (!(uthread->uu_flag & UT_VFORK))
106 proc = (struct proc *)get_bsdtask_info(current_task());
107 else
108 proc = current_proc();
109
110 /* Make sure there is a process associated with this task */
111 if (proc == NULL) {
112 regs->save_r3 = (long long)EPERM;
113 /* set the "pc" to execute cerror routine */
114 regs->save_srr0 -= 4;
115 task_terminate_internal(current_task());
116 thread_exception_return();
117 /* NOTREACHED */
118 }
119
120 /*
121 * Delayed binding of thread credential to process credential, if we
122 * are not running with an explicitly set thread credential.
123 */
124 if (uthread->uu_ucred != proc->p_ucred &&
125 (uthread->uu_flag & UT_SETUID) == 0) {
126 kauth_cred_t old = uthread->uu_ucred;
127 proc_lock(proc);
128 uthread->uu_ucred = proc->p_ucred;
129 kauth_cred_ref(uthread->uu_ucred);
130 proc_unlock(proc);
131 if (old != NOCRED)
132 kauth_cred_rele(old);
133 }
134
135 uthread->uu_ar0 = (int *)regs;
136
137 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
138
139 if (callp->sy_narg != 0) {
140 void *regsp;
141 sy_munge_t *mungerp;
142
143 if (IS_64BIT_PROCESS(proc)) {
144 /* XXX Turn 64 bit unsafe calls into nosys() */
145 if (callp->sy_funnel & UNSAFE_64BIT) {
146 callp = &sysent[63];
147 goto unsafe;
148 }
149 mungerp = callp->sy_arg_munge64;
150 }
151 else {
152 mungerp = callp->sy_arg_munge32;
153 }
154 if ( !flavor) {
155 regsp = (void *) &regs->save_r3;
156 } else {
157 /* indirect system call consumes an argument so only 7 are supported */
158 if (callp->sy_narg > 7) {
159 callp = &sysent[63];
160 goto unsafe;
161 }
162 regsp = (void *) &regs->save_r4;
163 }
164 /* call syscall argument munger to copy in arguments (see xnu/bsd/dev/ppc/munge.s) */
165 (*mungerp)(regsp, (void *) &uthread->uu_arg[0]);
166 }
167
168 unsafe:
169 cancel_enable = callp->sy_cancel;
170
171 if (cancel_enable == _SYSCALL_CANCEL_NONE) {
172 uthread->uu_flag |= UT_NOTCANCELPT;
173 } else {
174 if((uthread->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
175 if (cancel_enable == _SYSCALL_CANCEL_PRE) {
176 /* system call cancelled; return to handle cancellation */
177 regs->save_r3 = (long long)EINTR;
178 thread_exception_return();
179 /* NOTREACHED */
180 } else {
181 thread_abort_safely(thread_act);
182 }
183 }
184 }
185
186 funnel_type = (int)(callp->sy_funnel & FUNNEL_MASK);
187 if (funnel_type == KERNEL_FUNNEL)
188 enter_funnel_section(kernel_flock);
189
190 uthread->uu_rval[0] = 0;
191
192 /*
193 * r4 is volatile, if we set it to regs->save_r4 here the child
194 * will have parents r4 after execve
195 */
196 uthread->uu_rval[1] = 0;
197
198 error = 0;
199
200 /*
201 * PPC runtime calls cerror after every unix system call, so
202 * assume no error and adjust the "pc" to skip this call.
203 * It will be set back to the cerror call if an error is detected.
204 */
205 regs->save_srr0 += 4;
206
207 if (KTRPOINT(proc, KTR_SYSCALL))
208 ktrsyscall(proc, code, callp->sy_narg, uthread->uu_arg);
209
210 #ifdef JOE_DEBUG
211 uthread->uu_iocount = 0;
212 uthread->uu_vpindex = 0;
213 #endif
214 AUDIT_SYSCALL_ENTER(code, proc, uthread);
215 error = (*(callp->sy_call))(proc, (void *)uthread->uu_arg, &(uthread->uu_rval[0]));
216 AUDIT_SYSCALL_EXIT(error, proc, uthread);
217
218 #ifdef JOE_DEBUG
219 if (uthread->uu_iocount)
220 joe_debug("system call returned with uu_iocount != 0");
221 #endif
222 regs = find_user_regs(thread_act);
223
224 if (error == ERESTART) {
225 regs->save_srr0 -= 8;
226 } else if (error != EJUSTRETURN) {
227 if (error) {
228 regs->save_r3 = (long long)error;
229 /* set the "pc" to execute cerror routine */
230 regs->save_srr0 -= 4;
231 } else { /* (not error) */
232 switch (callp->sy_return_type) {
233 case _SYSCALL_RET_INT_T:
234 regs->save_r3 = uthread->uu_rval[0];
235 regs->save_r4 = uthread->uu_rval[1];
236 break;
237 case _SYSCALL_RET_UINT_T:
238 regs->save_r3 = ((u_int)uthread->uu_rval[0]);
239 regs->save_r4 = ((u_int)uthread->uu_rval[1]);
240 break;
241 case _SYSCALL_RET_OFF_T:
242 /* off_t returns 64 bits split across two registers for 32 bit */
243 /* process and in one register for 64 bit process */
244 if (IS_64BIT_PROCESS(proc)) {
245 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
246 regs->save_r3 = *retp;
247 regs->save_r4 = 0;
248 }
249 else {
250 regs->save_r3 = uthread->uu_rval[0];
251 regs->save_r4 = uthread->uu_rval[1];
252 }
253 break;
254 case _SYSCALL_RET_ADDR_T:
255 case _SYSCALL_RET_SIZE_T:
256 case _SYSCALL_RET_SSIZE_T:
257 /* the variable length return types (user_addr_t, user_ssize_t,
258 * and user_size_t) are always the largest possible size in the
259 * kernel (we use uu_rval[0] and [1] as one 64 bit value).
260 */
261 {
262 user_addr_t *retp = (user_addr_t *)&uthread->uu_rval[0];
263 regs->save_r3 = *retp;
264 regs->save_r4 = 0;
265 }
266 break;
267 case _SYSCALL_RET_NONE:
268 break;
269 default:
270 panic("unix_syscall: unknown return type");
271 break;
272 }
273 }
274 }
275 /* else (error == EJUSTRETURN) { nothing } */
276
277
278 if (KTRPOINT(proc, KTR_SYSRET)) {
279 switch(callp->sy_return_type) {
280 case _SYSCALL_RET_ADDR_T:
281 case _SYSCALL_RET_SIZE_T:
282 case _SYSCALL_RET_SSIZE_T:
283 /*
284 * Trace the value of the least significant bits,
285 * until we can revise the ktrace API safely.
286 */
287 ktrsysret(proc, code, error, uthread->uu_rval[1]);
288 break;
289 default:
290 ktrsysret(proc, code, error, uthread->uu_rval[0]);
291 break;
292 }
293 }
294
295 if (cancel_enable == _SYSCALL_CANCEL_NONE)
296 uthread->uu_flag &= ~UT_NOTCANCELPT;
297
298 exit_funnel_section();
299
300 if (uthread->uu_lowpri_delay) {
301 /*
302 * task is marked as a low priority I/O type
303 * and the I/O we issued while in this system call
304 * collided with normal I/O operations... we'll
305 * delay in order to mitigate the impact of this
306 * task on the normal operation of the system
307 */
308 IOSleep(uthread->uu_lowpri_delay);
309 uthread->uu_lowpri_delay = 0;
310 }
311 if (kdebug_enable && (code != 180)) {
312
313 if (callp->sy_return_type == _SYSCALL_RET_SSIZE_T)
314 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
315 error, uthread->uu_rval[1], 0, 0, 0);
316 else
317 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
318 error, uthread->uu_rval[0], uthread->uu_rval[1], 0, 0);
319 }
320
321 thread_exception_return();
322 /* NOTREACHED */
323 }
324
325 void
326 unix_syscall_return(int error)
327 {
328 thread_t thread_act;
329 struct uthread *uthread;
330 struct proc *proc;
331 struct savearea *regs;
332 unsigned short code;
333 struct sysent *callp;
334 int funnel_type;
335 unsigned int cancel_enable;
336
337 thread_act = current_thread();
338 proc = current_proc();
339 uthread = get_bsdthread_info(thread_act);
340
341 regs = find_user_regs(thread_act);
342
343 if (regs->save_r0 != 0)
344 code = regs->save_r0;
345 else
346 code = regs->save_r3;
347
348 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
349
350 /*
351 * Get index into sysent table
352 */
353 if (error == ERESTART) {
354 regs->save_srr0 -= 8;
355 } else if (error != EJUSTRETURN) {
356 if (error) {
357 regs->save_r3 = (long long)error;
358 /* set the "pc" to execute cerror routine */
359 regs->save_srr0 -= 4;
360 } else { /* (not error) */
361 switch (callp->sy_return_type) {
362 case _SYSCALL_RET_INT_T:
363 regs->save_r3 = uthread->uu_rval[0];
364 regs->save_r4 = uthread->uu_rval[1];
365 break;
366 case _SYSCALL_RET_UINT_T:
367 regs->save_r3 = ((u_int)uthread->uu_rval[0]);
368 regs->save_r4 = ((u_int)uthread->uu_rval[1]);
369 break;
370 case _SYSCALL_RET_OFF_T:
371 /* off_t returns 64 bits split across two registers for 32 bit */
372 /* process and in one register for 64 bit process */
373 if (IS_64BIT_PROCESS(proc)) {
374 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
375 regs->save_r3 = *retp;
376 }
377 else {
378 regs->save_r3 = uthread->uu_rval[0];
379 regs->save_r4 = uthread->uu_rval[1];
380 }
381 break;
382 case _SYSCALL_RET_ADDR_T:
383 case _SYSCALL_RET_SIZE_T:
384 case _SYSCALL_RET_SSIZE_T:
385 /* the variable length return types (user_addr_t, user_ssize_t,
386 * and user_size_t) are always the largest possible size in the
387 * kernel (we use uu_rval[0] and [1] as one 64 bit value).
388 */
389 {
390 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
391 regs->save_r3 = *retp;
392 }
393 break;
394 case _SYSCALL_RET_NONE:
395 break;
396 default:
397 panic("unix_syscall: unknown return type");
398 break;
399 }
400 }
401 }
402 /* else (error == EJUSTRETURN) { nothing } */
403
404 if (KTRPOINT(proc, KTR_SYSRET)) {
405 switch(callp->sy_return_type) {
406 case _SYSCALL_RET_ADDR_T:
407 case _SYSCALL_RET_SIZE_T:
408 case _SYSCALL_RET_SSIZE_T:
409 /*
410 * Trace the value of the least significant bits,
411 * until we can revise the ktrace API safely.
412 */
413 ktrsysret(proc, code, error, uthread->uu_rval[1]);
414 break;
415 default:
416 ktrsysret(proc, code, error, uthread->uu_rval[0]);
417 break;
418 }
419 }
420
421 cancel_enable = callp->sy_cancel;
422
423 if (cancel_enable == _SYSCALL_CANCEL_NONE)
424 uthread->uu_flag &= ~UT_NOTCANCELPT;
425
426 exit_funnel_section();
427
428 if (uthread->uu_lowpri_delay) {
429 /*
430 * task is marked as a low priority I/O type
431 * and the I/O we issued while in this system call
432 * collided with normal I/O operations... we'll
433 * delay in order to mitigate the impact of this
434 * task on the normal operation of the system
435 */
436 IOSleep(uthread->uu_lowpri_delay);
437 uthread->uu_lowpri_delay = 0;
438 }
439 if (kdebug_enable && (code != 180)) {
440 if (callp->sy_return_type == _SYSCALL_RET_SSIZE_T)
441 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
442 error, uthread->uu_rval[1], 0, 0, 0);
443 else
444 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
445 error, uthread->uu_rval[0], uthread->uu_rval[1], 0, 0);
446 }
447
448 thread_exception_return();
449 /* NOTREACHED */
450 }
451
452 /*
453 * Time of day and interval timer support.
454 *
455 * These routines provide the kernel entry points to get and set
456 * the time-of-day and per-process interval timers. Subroutines
457 * here provide support for adding and subtracting timeval structures
458 * and decrementing interval timers, optionally reloading the interval
459 * timers when they expire.
460 */
461 /* NOTE THIS implementation is for ppc architectures only.
462 * It is infrequently called, since the commpage intercepts
463 * most calls in user mode.
464 *
465 * XXX Y2038 bug because of assumed return of 32 bit seconds value, and
466 * XXX first parameter to clock_gettimeofday()
467 */
468 int
469 ppc_gettimeofday(__unused struct proc *p,
470 register struct ppc_gettimeofday_args *uap,
471 register_t *retval)
472 {
473 int error = 0;
474 extern lck_spin_t * tz_slock;
475
476 if (uap->tp)
477 clock_gettimeofday(&retval[0], &retval[1]);
478
479 if (uap->tzp) {
480 struct timezone ltz;
481
482 lck_spin_lock(tz_slock);
483 ltz = tz;
484 lck_spin_unlock(tz_slock);
485 error = copyout((caddr_t)&ltz, uap->tzp, sizeof (tz));
486 }
487
488 return (error);
489 }
490
491 #ifdef JOE_DEBUG
492 joe_debug(char *p) {
493
494 printf("%s\n", p);
495 }
496 #endif
497
498
499 /*
500 * WARNING - this is a temporary workaround for binary compatibility issues
501 * with anti-piracy software that relies on patching ptrace (3928003).
502 * This KPI will be removed in the system release after Tiger.
503 */
504 uintptr_t temp_patch_ptrace(uintptr_t new_ptrace)
505 {
506 struct sysent * callp;
507 sy_call_t * old_ptrace;
508
509 if (new_ptrace == 0)
510 return(0);
511
512 enter_funnel_section(kernel_flock);
513 callp = &sysent[26];
514 old_ptrace = callp->sy_call;
515
516 /* only allow one patcher of ptrace */
517 if (old_ptrace == (sy_call_t *) ptrace) {
518 callp->sy_call = (sy_call_t *) new_ptrace;
519 }
520 else {
521 old_ptrace = NULL;
522 }
523 exit_funnel_section( );
524
525 return((uintptr_t)old_ptrace);
526 }
527
528 void temp_unpatch_ptrace(void)
529 {
530 struct sysent * callp;
531
532 enter_funnel_section(kernel_flock);
533 callp = &sysent[26];
534 callp->sy_call = (sy_call_t *) ptrace;
535 exit_funnel_section( );
536
537 return;
538 }