]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/ppc/systemcalls.c
xnu-792.17.14.tar.gz
[apple/xnu.git] / bsd / dev / ppc / systemcalls.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/task.h>
30 #include <kern/thread.h>
31 #include <kern/assert.h>
32 #include <kern/clock.h>
33 #include <kern/locks.h>
34 #include <kern/sched_prim.h>
35 #include <mach/machine/thread_status.h>
36 #include <ppc/savearea.h>
37
38 #include <sys/kernel.h>
39 #include <sys/vm.h>
40 #include <sys/proc_internal.h>
41 #include <sys/syscall.h>
42 #include <sys/systm.h>
43 #include <sys/user.h>
44 #include <sys/errno.h>
45 #include <sys/ktrace.h>
46 #include <sys/kdebug.h>
47 #include <sys/sysent.h>
48 #include <sys/sysproto.h>
49 #include <sys/kauth.h>
50
51 #include <bsm/audit_kernel.h>
52
53 extern void
54 unix_syscall(struct savearea *regs);
55 void
56 unix_syscall_return(int error);
57
58 extern struct savearea *
59 find_user_regs(
60 thread_t act);
61
62 extern void enter_funnel_section(funnel_t *funnel_lock);
63 extern void exit_funnel_section(void);
64
65 /*
66 * Function: unix_syscall
67 *
68 * Inputs: regs - pointer to Process Control Block
69 *
70 * Outputs: none
71 */
72 void
73 unix_syscall(struct savearea *regs)
74 {
75 thread_t thread_act;
76 struct uthread *uthread;
77 struct proc *proc;
78 struct sysent *callp;
79 int error;
80 unsigned short code;
81 boolean_t flavor;
82 int funnel_type;
83 unsigned int cancel_enable;
84
85 flavor = (((unsigned int)regs->save_r0) == 0)? 1: 0;
86
87 if (flavor)
88 code = regs->save_r3;
89 else
90 code = regs->save_r0;
91
92 if (kdebug_enable && (code != 180)) {
93 if (flavor)
94 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
95 regs->save_r4, regs->save_r5, regs->save_r6, regs->save_r7, 0);
96 else
97 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
98 regs->save_r3, regs->save_r4, regs->save_r5, regs->save_r6, 0);
99 }
100 thread_act = current_thread();
101 uthread = get_bsdthread_info(thread_act);
102
103 if (!(uthread->uu_flag & UT_VFORK))
104 proc = (struct proc *)get_bsdtask_info(current_task());
105 else
106 proc = current_proc();
107
108 /* Make sure there is a process associated with this task */
109 if (proc == NULL) {
110 regs->save_r3 = (long long)EPERM;
111 /* set the "pc" to execute cerror routine */
112 regs->save_srr0 -= 4;
113 task_terminate_internal(current_task());
114 thread_exception_return();
115 /* NOTREACHED */
116 }
117
118 /*
119 * Delayed binding of thread credential to process credential, if we
120 * are not running with an explicitly set thread credential.
121 */
122 if (uthread->uu_ucred != proc->p_ucred &&
123 (uthread->uu_flag & UT_SETUID) == 0) {
124 kauth_cred_t old = uthread->uu_ucred;
125 proc_lock(proc);
126 uthread->uu_ucred = proc->p_ucred;
127 kauth_cred_ref(uthread->uu_ucred);
128 proc_unlock(proc);
129 if (old != NOCRED)
130 kauth_cred_rele(old);
131 }
132
133 uthread->uu_ar0 = (int *)regs;
134
135 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
136
137 if (callp->sy_narg != 0) {
138 void *regsp;
139 sy_munge_t *mungerp;
140
141 if (IS_64BIT_PROCESS(proc)) {
142 /* XXX Turn 64 bit unsafe calls into nosys() */
143 if (callp->sy_funnel & UNSAFE_64BIT) {
144 callp = &sysent[63];
145 goto unsafe;
146 }
147 mungerp = callp->sy_arg_munge64;
148 }
149 else {
150 mungerp = callp->sy_arg_munge32;
151 }
152 if ( !flavor) {
153 regsp = (void *) &regs->save_r3;
154 } else {
155 /* indirect system call consumes an argument so only 7 are supported */
156 if (callp->sy_narg > 7) {
157 callp = &sysent[63];
158 goto unsafe;
159 }
160 regsp = (void *) &regs->save_r4;
161 }
162 /* call syscall argument munger to copy in arguments (see xnu/bsd/dev/ppc/munge.s) */
163 (*mungerp)(regsp, (void *) &uthread->uu_arg[0]);
164 }
165
166 unsafe:
167 cancel_enable = callp->sy_cancel;
168
169 if (cancel_enable == _SYSCALL_CANCEL_NONE) {
170 uthread->uu_flag |= UT_NOTCANCELPT;
171 } else {
172 if((uthread->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
173 if (cancel_enable == _SYSCALL_CANCEL_PRE) {
174 /* system call cancelled; return to handle cancellation */
175 regs->save_r3 = (long long)EINTR;
176 thread_exception_return();
177 /* NOTREACHED */
178 } else {
179 thread_abort_safely(thread_act);
180 }
181 }
182 }
183
184 funnel_type = (int)(callp->sy_funnel & FUNNEL_MASK);
185 if (funnel_type == KERNEL_FUNNEL)
186 enter_funnel_section(kernel_flock);
187
188 uthread->uu_rval[0] = 0;
189
190 /*
191 * r4 is volatile, if we set it to regs->save_r4 here the child
192 * will have parents r4 after execve
193 */
194 uthread->uu_rval[1] = 0;
195
196 error = 0;
197
198 /*
199 * PPC runtime calls cerror after every unix system call, so
200 * assume no error and adjust the "pc" to skip this call.
201 * It will be set back to the cerror call if an error is detected.
202 */
203 regs->save_srr0 += 4;
204
205 if (KTRPOINT(proc, KTR_SYSCALL))
206 ktrsyscall(proc, code, callp->sy_narg, uthread->uu_arg);
207
208 #ifdef JOE_DEBUG
209 uthread->uu_iocount = 0;
210 uthread->uu_vpindex = 0;
211 #endif
212 AUDIT_SYSCALL_ENTER(code, proc, uthread);
213 error = (*(callp->sy_call))(proc, (void *)uthread->uu_arg, &(uthread->uu_rval[0]));
214 AUDIT_SYSCALL_EXIT(error, proc, uthread);
215
216 #ifdef JOE_DEBUG
217 if (uthread->uu_iocount)
218 joe_debug("system call returned with uu_iocount != 0");
219 #endif
220 regs = find_user_regs(thread_act);
221
222 if (error == ERESTART) {
223 regs->save_srr0 -= 8;
224 } else if (error != EJUSTRETURN) {
225 if (error) {
226 regs->save_r3 = (long long)error;
227 /* set the "pc" to execute cerror routine */
228 regs->save_srr0 -= 4;
229 } else { /* (not error) */
230 switch (callp->sy_return_type) {
231 case _SYSCALL_RET_INT_T:
232 regs->save_r3 = uthread->uu_rval[0];
233 regs->save_r4 = uthread->uu_rval[1];
234 break;
235 case _SYSCALL_RET_UINT_T:
236 regs->save_r3 = ((u_int)uthread->uu_rval[0]);
237 regs->save_r4 = ((u_int)uthread->uu_rval[1]);
238 break;
239 case _SYSCALL_RET_OFF_T:
240 /* off_t returns 64 bits split across two registers for 32 bit */
241 /* process and in one register for 64 bit process */
242 if (IS_64BIT_PROCESS(proc)) {
243 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
244 regs->save_r3 = *retp;
245 regs->save_r4 = 0;
246 }
247 else {
248 regs->save_r3 = uthread->uu_rval[0];
249 regs->save_r4 = uthread->uu_rval[1];
250 }
251 break;
252 case _SYSCALL_RET_ADDR_T:
253 case _SYSCALL_RET_SIZE_T:
254 case _SYSCALL_RET_SSIZE_T:
255 /* the variable length return types (user_addr_t, user_ssize_t,
256 * and user_size_t) are always the largest possible size in the
257 * kernel (we use uu_rval[0] and [1] as one 64 bit value).
258 */
259 {
260 user_addr_t *retp = (user_addr_t *)&uthread->uu_rval[0];
261 regs->save_r3 = *retp;
262 regs->save_r4 = 0;
263 }
264 break;
265 case _SYSCALL_RET_NONE:
266 break;
267 default:
268 panic("unix_syscall: unknown return type");
269 break;
270 }
271 }
272 }
273 /* else (error == EJUSTRETURN) { nothing } */
274
275
276 if (KTRPOINT(proc, KTR_SYSRET)) {
277 switch(callp->sy_return_type) {
278 case _SYSCALL_RET_ADDR_T:
279 case _SYSCALL_RET_SIZE_T:
280 case _SYSCALL_RET_SSIZE_T:
281 /*
282 * Trace the value of the least significant bits,
283 * until we can revise the ktrace API safely.
284 */
285 ktrsysret(proc, code, error, uthread->uu_rval[1]);
286 break;
287 default:
288 ktrsysret(proc, code, error, uthread->uu_rval[0]);
289 break;
290 }
291 }
292
293 if (cancel_enable == _SYSCALL_CANCEL_NONE)
294 uthread->uu_flag &= ~UT_NOTCANCELPT;
295
296 exit_funnel_section();
297
298 if (uthread->uu_lowpri_delay) {
299 /*
300 * task is marked as a low priority I/O type
301 * and the I/O we issued while in this system call
302 * collided with normal I/O operations... we'll
303 * delay in order to mitigate the impact of this
304 * task on the normal operation of the system
305 */
306 IOSleep(uthread->uu_lowpri_delay);
307 uthread->uu_lowpri_delay = 0;
308 }
309 if (kdebug_enable && (code != 180)) {
310
311 if (callp->sy_return_type == _SYSCALL_RET_SSIZE_T)
312 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
313 error, uthread->uu_rval[1], 0, 0, 0);
314 else
315 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
316 error, uthread->uu_rval[0], uthread->uu_rval[1], 0, 0);
317 }
318
319 thread_exception_return();
320 /* NOTREACHED */
321 }
322
323 void
324 unix_syscall_return(int error)
325 {
326 thread_t thread_act;
327 struct uthread *uthread;
328 struct proc *proc;
329 struct savearea *regs;
330 unsigned short code;
331 struct sysent *callp;
332 int funnel_type;
333 unsigned int cancel_enable;
334
335 thread_act = current_thread();
336 proc = current_proc();
337 uthread = get_bsdthread_info(thread_act);
338
339 regs = find_user_regs(thread_act);
340
341 if (regs->save_r0 != 0)
342 code = regs->save_r0;
343 else
344 code = regs->save_r3;
345
346 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
347
348 /*
349 * Get index into sysent table
350 */
351 if (error == ERESTART) {
352 regs->save_srr0 -= 8;
353 } else if (error != EJUSTRETURN) {
354 if (error) {
355 regs->save_r3 = (long long)error;
356 /* set the "pc" to execute cerror routine */
357 regs->save_srr0 -= 4;
358 } else { /* (not error) */
359 switch (callp->sy_return_type) {
360 case _SYSCALL_RET_INT_T:
361 regs->save_r3 = uthread->uu_rval[0];
362 regs->save_r4 = uthread->uu_rval[1];
363 break;
364 case _SYSCALL_RET_UINT_T:
365 regs->save_r3 = ((u_int)uthread->uu_rval[0]);
366 regs->save_r4 = ((u_int)uthread->uu_rval[1]);
367 break;
368 case _SYSCALL_RET_OFF_T:
369 /* off_t returns 64 bits split across two registers for 32 bit */
370 /* process and in one register for 64 bit process */
371 if (IS_64BIT_PROCESS(proc)) {
372 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
373 regs->save_r3 = *retp;
374 }
375 else {
376 regs->save_r3 = uthread->uu_rval[0];
377 regs->save_r4 = uthread->uu_rval[1];
378 }
379 break;
380 case _SYSCALL_RET_ADDR_T:
381 case _SYSCALL_RET_SIZE_T:
382 case _SYSCALL_RET_SSIZE_T:
383 /* the variable length return types (user_addr_t, user_ssize_t,
384 * and user_size_t) are always the largest possible size in the
385 * kernel (we use uu_rval[0] and [1] as one 64 bit value).
386 */
387 {
388 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
389 regs->save_r3 = *retp;
390 }
391 break;
392 case _SYSCALL_RET_NONE:
393 break;
394 default:
395 panic("unix_syscall: unknown return type");
396 break;
397 }
398 }
399 }
400 /* else (error == EJUSTRETURN) { nothing } */
401
402 if (KTRPOINT(proc, KTR_SYSRET)) {
403 switch(callp->sy_return_type) {
404 case _SYSCALL_RET_ADDR_T:
405 case _SYSCALL_RET_SIZE_T:
406 case _SYSCALL_RET_SSIZE_T:
407 /*
408 * Trace the value of the least significant bits,
409 * until we can revise the ktrace API safely.
410 */
411 ktrsysret(proc, code, error, uthread->uu_rval[1]);
412 break;
413 default:
414 ktrsysret(proc, code, error, uthread->uu_rval[0]);
415 break;
416 }
417 }
418
419 cancel_enable = callp->sy_cancel;
420
421 if (cancel_enable == _SYSCALL_CANCEL_NONE)
422 uthread->uu_flag &= ~UT_NOTCANCELPT;
423
424 exit_funnel_section();
425
426 if (uthread->uu_lowpri_delay) {
427 /*
428 * task is marked as a low priority I/O type
429 * and the I/O we issued while in this system call
430 * collided with normal I/O operations... we'll
431 * delay in order to mitigate the impact of this
432 * task on the normal operation of the system
433 */
434 IOSleep(uthread->uu_lowpri_delay);
435 uthread->uu_lowpri_delay = 0;
436 }
437 if (kdebug_enable && (code != 180)) {
438 if (callp->sy_return_type == _SYSCALL_RET_SSIZE_T)
439 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
440 error, uthread->uu_rval[1], 0, 0, 0);
441 else
442 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
443 error, uthread->uu_rval[0], uthread->uu_rval[1], 0, 0);
444 }
445
446 thread_exception_return();
447 /* NOTREACHED */
448 }
449
450 /*
451 * Time of day and interval timer support.
452 *
453 * These routines provide the kernel entry points to get and set
454 * the time-of-day and per-process interval timers. Subroutines
455 * here provide support for adding and subtracting timeval structures
456 * and decrementing interval timers, optionally reloading the interval
457 * timers when they expire.
458 */
459 /* NOTE THIS implementation is for ppc architectures only.
460 * It is infrequently called, since the commpage intercepts
461 * most calls in user mode.
462 *
463 * XXX Y2038 bug because of assumed return of 32 bit seconds value, and
464 * XXX first parameter to clock_gettimeofday()
465 */
466 int
467 ppc_gettimeofday(__unused struct proc *p,
468 register struct ppc_gettimeofday_args *uap,
469 register_t *retval)
470 {
471 int error = 0;
472 extern lck_spin_t * tz_slock;
473
474 if (uap->tp)
475 clock_gettimeofday(&retval[0], &retval[1]);
476
477 if (uap->tzp) {
478 struct timezone ltz;
479
480 lck_spin_lock(tz_slock);
481 ltz = tz;
482 lck_spin_unlock(tz_slock);
483 error = copyout((caddr_t)&ltz, uap->tzp, sizeof (tz));
484 }
485
486 return (error);
487 }
488
489 #ifdef JOE_DEBUG
490 joe_debug(char *p) {
491
492 printf("%s\n", p);
493 }
494 #endif
495
496
497 /*
498 * WARNING - this is a temporary workaround for binary compatibility issues
499 * with anti-piracy software that relies on patching ptrace (3928003).
500 * This KPI will be removed in the system release after Tiger.
501 */
502 uintptr_t temp_patch_ptrace(uintptr_t new_ptrace)
503 {
504 struct sysent * callp;
505 sy_call_t * old_ptrace;
506
507 if (new_ptrace == 0)
508 return(0);
509
510 enter_funnel_section(kernel_flock);
511 callp = &sysent[26];
512 old_ptrace = callp->sy_call;
513
514 /* only allow one patcher of ptrace */
515 if (old_ptrace == (sy_call_t *) ptrace) {
516 callp->sy_call = (sy_call_t *) new_ptrace;
517 }
518 else {
519 old_ptrace = NULL;
520 }
521 exit_funnel_section( );
522
523 return((uintptr_t)old_ptrace);
524 }
525
526 void temp_unpatch_ptrace(void)
527 {
528 struct sysent * callp;
529
530 enter_funnel_section(kernel_flock);
531 callp = &sysent[26];
532 callp->sy_call = (sy_call_t *) ptrace;
533 exit_funnel_section( );
534
535 return;
536 }