]> git.saurik.com Git - apple/xnu.git/blame - bsd/dev/ppc/systemcalls.c
xnu-792.6.76.tar.gz
[apple/xnu.git] / bsd / dev / ppc / systemcalls.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
9bccf70c
A
22
23#include <kern/task.h>
24#include <kern/thread.h>
9bccf70c 25#include <kern/assert.h>
55e303ae 26#include <kern/clock.h>
91447636
A
27#include <kern/locks.h>
28#include <kern/sched_prim.h>
9bccf70c
A
29#include <mach/machine/thread_status.h>
30#include <ppc/savearea.h>
31
32#include <sys/kernel.h>
33#include <sys/vm.h>
91447636 34#include <sys/proc_internal.h>
9bccf70c
A
35#include <sys/syscall.h>
36#include <sys/systm.h>
37#include <sys/user.h>
38#include <sys/errno.h>
39#include <sys/ktrace.h>
40#include <sys/kdebug.h>
91447636
A
41#include <sys/sysent.h>
42#include <sys/sysproto.h>
43#include <sys/kauth.h>
e5568f75
A
44
45#include <bsm/audit_kernel.h>
9bccf70c
A
46
47extern void
91447636
A
48unix_syscall(struct savearea *regs);
49void
50unix_syscall_return(int error);
9bccf70c
A
51
52extern struct savearea *
53find_user_regs(
91447636 54 thread_t act);
9bccf70c 55
55e303ae
A
56extern void enter_funnel_section(funnel_t *funnel_lock);
57extern void exit_funnel_section(void);
9bccf70c 58
1c79356b 59/*
9bccf70c 60 * Function: unix_syscall
1c79356b 61 *
9bccf70c 62 * Inputs: regs - pointer to Process Control Block
1c79356b 63 *
9bccf70c 64 * Outputs: none
1c79356b 65 */
9bccf70c 66void
91447636 67unix_syscall(struct savearea *regs)
9bccf70c 68{
91447636 69 thread_t thread_act;
9bccf70c
A
70 struct uthread *uthread;
71 struct proc *proc;
72 struct sysent *callp;
73 int error;
74 unsigned short code;
75 boolean_t flavor;
76 int funnel_type;
91447636 77 unsigned int cancel_enable;
1c79356b 78
91447636 79 flavor = (((unsigned int)regs->save_r0) == 0)? 1: 0;
55e303ae
A
80
81 if (flavor)
82 code = regs->save_r3;
83 else
84 code = regs->save_r0;
85
86 if (kdebug_enable && (code != 180)) {
87 if (flavor)
88 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
89 regs->save_r4, regs->save_r5, regs->save_r6, regs->save_r7, 0);
90 else
91 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
92 regs->save_r3, regs->save_r4, regs->save_r5, regs->save_r6, 0);
93 }
91447636 94 thread_act = current_thread();
9bccf70c 95 uthread = get_bsdthread_info(thread_act);
1c79356b 96
91447636 97 if (!(uthread->uu_flag & UT_VFORK))
9bccf70c
A
98 proc = (struct proc *)get_bsdtask_info(current_task());
99 else
100 proc = current_proc();
1c79356b 101
cc9f6e38
A
102 /* Make sure there is a process associated with this task */
103 if (proc == NULL) {
104 regs->save_r3 = (long long)EPERM;
105 /* set the "pc" to execute cerror routine */
106 regs->save_srr0 -= 4;
107 task_terminate_internal(current_task());
108 thread_exception_return();
109 /* NOTREACHED */
110 }
111
91447636
A
112 /*
113 * Delayed binding of thread credential to process credential, if we
114 * are not running with an explicitly set thread credential.
115 */
116 if (uthread->uu_ucred != proc->p_ucred &&
117 (uthread->uu_flag & UT_SETUID) == 0) {
118 kauth_cred_t old = uthread->uu_ucred;
119 proc_lock(proc);
120 uthread->uu_ucred = proc->p_ucred;
121 kauth_cred_ref(uthread->uu_ucred);
122 proc_unlock(proc);
123 if (old != NOCRED)
124 kauth_cred_rele(old);
125 }
126
9bccf70c 127 uthread->uu_ar0 = (int *)regs;
1c79356b 128
9bccf70c 129 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
1c79356b 130
9bccf70c 131 if (callp->sy_narg != 0) {
91447636
A
132 void *regsp;
133 sy_munge_t *mungerp;
134
135 if (IS_64BIT_PROCESS(proc)) {
136 /* XXX Turn 64 bit unsafe calls into nosys() */
137 if (callp->sy_funnel & UNSAFE_64BIT) {
138 callp = &sysent[63];
139 goto unsafe;
140 }
141 mungerp = callp->sy_arg_munge64;
142 }
143 else {
144 mungerp = callp->sy_arg_munge32;
145 }
9bccf70c 146 if ( !flavor) {
91447636 147 regsp = (void *) &regs->save_r3;
9bccf70c 148 } else {
91447636
A
149 /* indirect system call consumes an argument so only 7 are supported */
150 if (callp->sy_narg > 7) {
151 callp = &sysent[63];
152 goto unsafe;
153 }
154 regsp = (void *) &regs->save_r4;
9bccf70c 155 }
91447636
A
156 /* call syscall argument munger to copy in arguments (see xnu/bsd/dev/ppc/munge.s) */
157 (*mungerp)(regsp, (void *) &uthread->uu_arg[0]);
9bccf70c 158 }
1c79356b 159
91447636
A
160unsafe:
161 cancel_enable = callp->sy_cancel;
162
163 if (cancel_enable == _SYSCALL_CANCEL_NONE) {
164 uthread->uu_flag |= UT_NOTCANCELPT;
165 } else {
166 if((uthread->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
167 if (cancel_enable == _SYSCALL_CANCEL_PRE) {
168 /* system call cancelled; return to handle cancellation */
169 regs->save_r3 = (long long)EINTR;
170 thread_exception_return();
171 /* NOTREACHED */
172 } else {
173 thread_abort_safely(thread_act);
174 }
175 }
176 }
177
178 funnel_type = (int)(callp->sy_funnel & FUNNEL_MASK);
55e303ae 179 if (funnel_type == KERNEL_FUNNEL)
9bccf70c 180 enter_funnel_section(kernel_flock);
9bccf70c 181
9bccf70c 182 uthread->uu_rval[0] = 0;
1c79356b 183
1c79356b 184 /*
9bccf70c
A
185 * r4 is volatile, if we set it to regs->save_r4 here the child
186 * will have parents r4 after execve
187 */
188 uthread->uu_rval[1] = 0;
1c79356b 189
9bccf70c 190 error = 0;
1c79356b 191
9bccf70c
A
192 /*
193 * PPC runtime calls cerror after every unix system call, so
194 * assume no error and adjust the "pc" to skip this call.
195 * It will be set back to the cerror call if an error is detected.
196 */
197 regs->save_srr0 += 4;
1c79356b 198
9bccf70c 199 if (KTRPOINT(proc, KTR_SYSCALL))
91447636 200 ktrsyscall(proc, code, callp->sy_narg, uthread->uu_arg);
1c79356b 201
91447636
A
202#ifdef JOE_DEBUG
203 uthread->uu_iocount = 0;
204 uthread->uu_vpindex = 0;
205#endif
e5568f75 206 AUDIT_SYSCALL_ENTER(code, proc, uthread);
9bccf70c 207 error = (*(callp->sy_call))(proc, (void *)uthread->uu_arg, &(uthread->uu_rval[0]));
e5568f75 208 AUDIT_SYSCALL_EXIT(error, proc, uthread);
9bccf70c 209
91447636
A
210#ifdef JOE_DEBUG
211 if (uthread->uu_iocount)
212 joe_debug("system call returned with uu_iocount != 0");
213#endif
9bccf70c 214 regs = find_user_regs(thread_act);
1c79356b 215
9bccf70c
A
216 if (error == ERESTART) {
217 regs->save_srr0 -= 8;
218 } else if (error != EJUSTRETURN) {
219 if (error) {
55e303ae 220 regs->save_r3 = (long long)error;
9bccf70c
A
221 /* set the "pc" to execute cerror routine */
222 regs->save_srr0 -= 4;
223 } else { /* (not error) */
91447636
A
224 switch (callp->sy_return_type) {
225 case _SYSCALL_RET_INT_T:
226 regs->save_r3 = uthread->uu_rval[0];
227 regs->save_r4 = uthread->uu_rval[1];
228 break;
229 case _SYSCALL_RET_UINT_T:
230 regs->save_r3 = ((u_int)uthread->uu_rval[0]);
231 regs->save_r4 = ((u_int)uthread->uu_rval[1]);
232 break;
233 case _SYSCALL_RET_OFF_T:
234 /* off_t returns 64 bits split across two registers for 32 bit */
235 /* process and in one register for 64 bit process */
236 if (IS_64BIT_PROCESS(proc)) {
237 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
238 regs->save_r3 = *retp;
239 regs->save_r4 = 0;
240 }
241 else {
242 regs->save_r3 = uthread->uu_rval[0];
243 regs->save_r4 = uthread->uu_rval[1];
244 }
245 break;
246 case _SYSCALL_RET_ADDR_T:
247 case _SYSCALL_RET_SIZE_T:
248 case _SYSCALL_RET_SSIZE_T:
249 /* the variable length return types (user_addr_t, user_ssize_t,
250 * and user_size_t) are always the largest possible size in the
251 * kernel (we use uu_rval[0] and [1] as one 64 bit value).
252 */
253 {
254 user_addr_t *retp = (user_addr_t *)&uthread->uu_rval[0];
255 regs->save_r3 = *retp;
256 regs->save_r4 = 0;
257 }
258 break;
259 case _SYSCALL_RET_NONE:
260 break;
261 default:
262 panic("unix_syscall: unknown return type");
263 break;
264 }
9bccf70c
A
265 }
266 }
267 /* else (error == EJUSTRETURN) { nothing } */
1c79356b 268
1c79356b 269
91447636
A
270 if (KTRPOINT(proc, KTR_SYSRET)) {
271 switch(callp->sy_return_type) {
272 case _SYSCALL_RET_ADDR_T:
273 case _SYSCALL_RET_SIZE_T:
274 case _SYSCALL_RET_SSIZE_T:
275 /*
276 * Trace the value of the least significant bits,
277 * until we can revise the ktrace API safely.
278 */
279 ktrsysret(proc, code, error, uthread->uu_rval[1]);
280 break;
281 default:
282 ktrsysret(proc, code, error, uthread->uu_rval[0]);
283 break;
284 }
285 }
9bccf70c 286
91447636
A
287 if (cancel_enable == _SYSCALL_CANCEL_NONE)
288 uthread->uu_flag &= ~UT_NOTCANCELPT;
289
290 exit_funnel_section();
291
292 if (uthread->uu_lowpri_delay) {
293 /*
294 * task is marked as a low priority I/O type
295 * and the I/O we issued while in this system call
296 * collided with normal I/O operations... we'll
297 * delay in order to mitigate the impact of this
298 * task on the normal operation of the system
299 */
300 IOSleep(uthread->uu_lowpri_delay);
301 uthread->uu_lowpri_delay = 0;
302 }
9bccf70c 303 if (kdebug_enable && (code != 180)) {
91447636
A
304
305 if (callp->sy_return_type == _SYSCALL_RET_SSIZE_T)
306 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
307 error, uthread->uu_rval[1], 0, 0, 0);
308 else
309 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
310 error, uthread->uu_rval[0], uthread->uu_rval[1], 0, 0);
9bccf70c
A
311 }
312
313 thread_exception_return();
314 /* NOTREACHED */
315}
316
91447636
A
317void
318unix_syscall_return(int error)
9bccf70c 319{
91447636 320 thread_t thread_act;
9bccf70c
A
321 struct uthread *uthread;
322 struct proc *proc;
323 struct savearea *regs;
324 unsigned short code;
325 struct sysent *callp;
326 int funnel_type;
91447636 327 unsigned int cancel_enable;
9bccf70c 328
91447636 329 thread_act = current_thread();
9bccf70c
A
330 proc = current_proc();
331 uthread = get_bsdthread_info(thread_act);
332
333 regs = find_user_regs(thread_act);
1c79356b 334
91447636
A
335 if (regs->save_r0 != 0)
336 code = regs->save_r0;
337 else
338 code = regs->save_r3;
339
340 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
341
1c79356b 342 /*
9bccf70c
A
343 * Get index into sysent table
344 */
345 if (error == ERESTART) {
346 regs->save_srr0 -= 8;
347 } else if (error != EJUSTRETURN) {
348 if (error) {
55e303ae 349 regs->save_r3 = (long long)error;
9bccf70c
A
350 /* set the "pc" to execute cerror routine */
351 regs->save_srr0 -= 4;
352 } else { /* (not error) */
91447636
A
353 switch (callp->sy_return_type) {
354 case _SYSCALL_RET_INT_T:
355 regs->save_r3 = uthread->uu_rval[0];
356 regs->save_r4 = uthread->uu_rval[1];
357 break;
358 case _SYSCALL_RET_UINT_T:
359 regs->save_r3 = ((u_int)uthread->uu_rval[0]);
360 regs->save_r4 = ((u_int)uthread->uu_rval[1]);
361 break;
362 case _SYSCALL_RET_OFF_T:
363 /* off_t returns 64 bits split across two registers for 32 bit */
364 /* process and in one register for 64 bit process */
365 if (IS_64BIT_PROCESS(proc)) {
366 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
367 regs->save_r3 = *retp;
368 }
369 else {
370 regs->save_r3 = uthread->uu_rval[0];
371 regs->save_r4 = uthread->uu_rval[1];
372 }
373 break;
374 case _SYSCALL_RET_ADDR_T:
375 case _SYSCALL_RET_SIZE_T:
376 case _SYSCALL_RET_SSIZE_T:
377 /* the variable length return types (user_addr_t, user_ssize_t,
378 * and user_size_t) are always the largest possible size in the
379 * kernel (we use uu_rval[0] and [1] as one 64 bit value).
380 */
381 {
382 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
383 regs->save_r3 = *retp;
384 }
385 break;
386 case _SYSCALL_RET_NONE:
387 break;
388 default:
389 panic("unix_syscall: unknown return type");
390 break;
391 }
9bccf70c
A
392 }
393 }
394 /* else (error == EJUSTRETURN) { nothing } */
395
91447636
A
396 if (KTRPOINT(proc, KTR_SYSRET)) {
397 switch(callp->sy_return_type) {
398 case _SYSCALL_RET_ADDR_T:
399 case _SYSCALL_RET_SIZE_T:
400 case _SYSCALL_RET_SSIZE_T:
401 /*
402 * Trace the value of the least significant bits,
403 * until we can revise the ktrace API safely.
404 */
405 ktrsysret(proc, code, error, uthread->uu_rval[1]);
406 break;
407 default:
408 ktrsysret(proc, code, error, uthread->uu_rval[0]);
409 break;
410 }
411 }
9bccf70c 412
91447636 413 cancel_enable = callp->sy_cancel;
9bccf70c 414
91447636
A
415 if (cancel_enable == _SYSCALL_CANCEL_NONE)
416 uthread->uu_flag &= ~UT_NOTCANCELPT;
9bccf70c 417
91447636 418 exit_funnel_section();
9bccf70c 419
91447636
A
420 if (uthread->uu_lowpri_delay) {
421 /*
422 * task is marked as a low priority I/O type
423 * and the I/O we issued while in this system call
424 * collided with normal I/O operations... we'll
425 * delay in order to mitigate the impact of this
426 * task on the normal operation of the system
427 */
428 IOSleep(uthread->uu_lowpri_delay);
429 uthread->uu_lowpri_delay = 0;
430 }
9bccf70c 431 if (kdebug_enable && (code != 180)) {
91447636
A
432 if (callp->sy_return_type == _SYSCALL_RET_SSIZE_T)
433 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
434 error, uthread->uu_rval[1], 0, 0, 0);
435 else
436 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
437 error, uthread->uu_rval[0], uthread->uu_rval[1], 0, 0);
9bccf70c
A
438 }
439
440 thread_exception_return();
441 /* NOTREACHED */
442}
443
444/*
445 * Time of day and interval timer support.
446 *
447 * These routines provide the kernel entry points to get and set
448 * the time-of-day and per-process interval timers. Subroutines
449 * here provide support for adding and subtracting timeval structures
450 * and decrementing interval timers, optionally reloading the interval
451 * timers when they expire.
452 */
55e303ae
A
453/* NOTE THIS implementation is for ppc architectures only.
454 * It is infrequently called, since the commpage intercepts
455 * most calls in user mode.
91447636
A
456 *
457 * XXX Y2038 bug because of assumed return of 32 bit seconds value, and
458 * XXX first parameter to clock_gettimeofday()
55e303ae 459 */
9bccf70c 460int
91447636
A
461ppc_gettimeofday(__unused struct proc *p,
462 register struct ppc_gettimeofday_args *uap,
463 register_t *retval)
9bccf70c 464{
9bccf70c 465 int error = 0;
91447636 466 extern lck_spin_t * tz_slock;
55e303ae
A
467
468 if (uap->tp)
469 clock_gettimeofday(&retval[0], &retval[1]);
1c79356b 470
9bccf70c 471 if (uap->tzp) {
55e303ae 472 struct timezone ltz;
55e303ae 473
91447636 474 lck_spin_lock(tz_slock);
9bccf70c 475 ltz = tz;
91447636
A
476 lck_spin_unlock(tz_slock);
477 error = copyout((caddr_t)&ltz, uap->tzp, sizeof (tz));
9bccf70c
A
478 }
479
55e303ae 480 return (error);
1c79356b
A
481}
482
91447636
A
483#ifdef JOE_DEBUG
484joe_debug(char *p) {
485
486 printf("%s\n", p);
487}
488#endif
489
490
491/*
492 * WARNING - this is a temporary workaround for binary compatibility issues
493 * with anti-piracy software that relies on patching ptrace (3928003).
494 * This KPI will be removed in the system release after Tiger.
495 */
496uintptr_t temp_patch_ptrace(uintptr_t new_ptrace)
497{
498 struct sysent * callp;
499 sy_call_t * old_ptrace;
500
501 if (new_ptrace == 0)
502 return(0);
503
504 enter_funnel_section(kernel_flock);
505 callp = &sysent[26];
506 old_ptrace = callp->sy_call;
507
508 /* only allow one patcher of ptrace */
509 if (old_ptrace == (sy_call_t *) ptrace) {
510 callp->sy_call = (sy_call_t *) new_ptrace;
511 }
512 else {
513 old_ptrace = NULL;
514 }
515 exit_funnel_section( );
516
517 return((uintptr_t)old_ptrace);
518}
519
520void temp_unpatch_ptrace(void)
521{
522 struct sysent * callp;
523
524 enter_funnel_section(kernel_flock);
525 callp = &sysent[26];
526 callp->sy_call = (sy_call_t *) ptrace;
527 exit_funnel_section( );
528
529 return;
530}