]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/ppc/systemcalls.c
112ef39214e6e3065687080ef3a64a7a4a9784a8
[apple/xnu.git] / bsd / dev / ppc / systemcalls.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <kern/task.h>
25 #include <kern/thread.h>
26 #include <kern/assert.h>
27 #include <kern/clock.h>
28 #include <kern/locks.h>
29 #include <kern/sched_prim.h>
30 #include <mach/machine/thread_status.h>
31 #include <ppc/savearea.h>
32
33 #include <sys/kernel.h>
34 #include <sys/vm.h>
35 #include <sys/proc_internal.h>
36 #include <sys/syscall.h>
37 #include <sys/systm.h>
38 #include <sys/user.h>
39 #include <sys/errno.h>
40 #include <sys/ktrace.h>
41 #include <sys/kdebug.h>
42 #include <sys/sysent.h>
43 #include <sys/sysproto.h>
44 #include <sys/kauth.h>
45
46 #include <bsm/audit_kernel.h>
47
48 extern void
49 unix_syscall(struct savearea *regs);
50 void
51 unix_syscall_return(int error);
52
53 extern struct savearea *
54 find_user_regs(
55 thread_t act);
56
57 extern void enter_funnel_section(funnel_t *funnel_lock);
58 extern void exit_funnel_section(void);
59
60 /*
61 * Function: unix_syscall
62 *
63 * Inputs: regs - pointer to Process Control Block
64 *
65 * Outputs: none
66 */
67 void
68 unix_syscall(struct savearea *regs)
69 {
70 thread_t thread_act;
71 struct uthread *uthread;
72 struct proc *proc;
73 struct sysent *callp;
74 int error;
75 unsigned short code;
76 boolean_t flavor;
77 int funnel_type;
78 unsigned int cancel_enable;
79
80 flavor = (((unsigned int)regs->save_r0) == 0)? 1: 0;
81
82 if (flavor)
83 code = regs->save_r3;
84 else
85 code = regs->save_r0;
86
87 if (kdebug_enable && (code != 180)) {
88 if (flavor)
89 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
90 regs->save_r4, regs->save_r5, regs->save_r6, regs->save_r7, 0);
91 else
92 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
93 regs->save_r3, regs->save_r4, regs->save_r5, regs->save_r6, 0);
94 }
95 thread_act = current_thread();
96 uthread = get_bsdthread_info(thread_act);
97
98 if (!(uthread->uu_flag & UT_VFORK))
99 proc = (struct proc *)get_bsdtask_info(current_task());
100 else
101 proc = current_proc();
102
103 /* Make sure there is a process associated with this task */
104 if (proc == NULL) {
105 regs->save_r3 = (long long)EPERM;
106 /* set the "pc" to execute cerror routine */
107 regs->save_srr0 -= 4;
108 task_terminate_internal(current_task());
109 thread_exception_return();
110 /* NOTREACHED */
111 }
112
113 /*
114 * Delayed binding of thread credential to process credential, if we
115 * are not running with an explicitly set thread credential.
116 */
117 if (uthread->uu_ucred != proc->p_ucred &&
118 (uthread->uu_flag & UT_SETUID) == 0) {
119 kauth_cred_t old = uthread->uu_ucred;
120 proc_lock(proc);
121 uthread->uu_ucred = proc->p_ucred;
122 kauth_cred_ref(uthread->uu_ucred);
123 proc_unlock(proc);
124 if (old != NOCRED)
125 kauth_cred_rele(old);
126 }
127
128 uthread->uu_ar0 = (int *)regs;
129
130 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
131
132 if (callp->sy_narg != 0) {
133 void *regsp;
134 sy_munge_t *mungerp;
135
136 if (IS_64BIT_PROCESS(proc)) {
137 /* XXX Turn 64 bit unsafe calls into nosys() */
138 if (callp->sy_funnel & UNSAFE_64BIT) {
139 callp = &sysent[63];
140 goto unsafe;
141 }
142 mungerp = callp->sy_arg_munge64;
143 }
144 else {
145 mungerp = callp->sy_arg_munge32;
146 }
147 if ( !flavor) {
148 regsp = (void *) &regs->save_r3;
149 } else {
150 /* indirect system call consumes an argument so only 7 are supported */
151 if (callp->sy_narg > 7) {
152 callp = &sysent[63];
153 goto unsafe;
154 }
155 regsp = (void *) &regs->save_r4;
156 }
157 /* call syscall argument munger to copy in arguments (see xnu/bsd/dev/ppc/munge.s) */
158 (*mungerp)(regsp, (void *) &uthread->uu_arg[0]);
159 }
160
161 unsafe:
162 cancel_enable = callp->sy_cancel;
163
164 if (cancel_enable == _SYSCALL_CANCEL_NONE) {
165 uthread->uu_flag |= UT_NOTCANCELPT;
166 } else {
167 if((uthread->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
168 if (cancel_enable == _SYSCALL_CANCEL_PRE) {
169 /* system call cancelled; return to handle cancellation */
170 regs->save_r3 = (long long)EINTR;
171 thread_exception_return();
172 /* NOTREACHED */
173 } else {
174 thread_abort_safely(thread_act);
175 }
176 }
177 }
178
179 funnel_type = (int)(callp->sy_funnel & FUNNEL_MASK);
180 if (funnel_type == KERNEL_FUNNEL)
181 enter_funnel_section(kernel_flock);
182
183 uthread->uu_rval[0] = 0;
184
185 /*
186 * r4 is volatile, if we set it to regs->save_r4 here the child
187 * will have parents r4 after execve
188 */
189 uthread->uu_rval[1] = 0;
190
191 error = 0;
192
193 /*
194 * PPC runtime calls cerror after every unix system call, so
195 * assume no error and adjust the "pc" to skip this call.
196 * It will be set back to the cerror call if an error is detected.
197 */
198 regs->save_srr0 += 4;
199
200 if (KTRPOINT(proc, KTR_SYSCALL))
201 ktrsyscall(proc, code, callp->sy_narg, uthread->uu_arg);
202
203 #ifdef JOE_DEBUG
204 uthread->uu_iocount = 0;
205 uthread->uu_vpindex = 0;
206 #endif
207 AUDIT_SYSCALL_ENTER(code, proc, uthread);
208 error = (*(callp->sy_call))(proc, (void *)uthread->uu_arg, &(uthread->uu_rval[0]));
209 AUDIT_SYSCALL_EXIT(error, proc, uthread);
210
211 #ifdef JOE_DEBUG
212 if (uthread->uu_iocount)
213 joe_debug("system call returned with uu_iocount != 0");
214 #endif
215 regs = find_user_regs(thread_act);
216
217 if (error == ERESTART) {
218 regs->save_srr0 -= 8;
219 } else if (error != EJUSTRETURN) {
220 if (error) {
221 regs->save_r3 = (long long)error;
222 /* set the "pc" to execute cerror routine */
223 regs->save_srr0 -= 4;
224 } else { /* (not error) */
225 switch (callp->sy_return_type) {
226 case _SYSCALL_RET_INT_T:
227 regs->save_r3 = uthread->uu_rval[0];
228 regs->save_r4 = uthread->uu_rval[1];
229 break;
230 case _SYSCALL_RET_UINT_T:
231 regs->save_r3 = ((u_int)uthread->uu_rval[0]);
232 regs->save_r4 = ((u_int)uthread->uu_rval[1]);
233 break;
234 case _SYSCALL_RET_OFF_T:
235 /* off_t returns 64 bits split across two registers for 32 bit */
236 /* process and in one register for 64 bit process */
237 if (IS_64BIT_PROCESS(proc)) {
238 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
239 regs->save_r3 = *retp;
240 regs->save_r4 = 0;
241 }
242 else {
243 regs->save_r3 = uthread->uu_rval[0];
244 regs->save_r4 = uthread->uu_rval[1];
245 }
246 break;
247 case _SYSCALL_RET_ADDR_T:
248 case _SYSCALL_RET_SIZE_T:
249 case _SYSCALL_RET_SSIZE_T:
250 /* the variable length return types (user_addr_t, user_ssize_t,
251 * and user_size_t) are always the largest possible size in the
252 * kernel (we use uu_rval[0] and [1] as one 64 bit value).
253 */
254 {
255 user_addr_t *retp = (user_addr_t *)&uthread->uu_rval[0];
256 regs->save_r3 = *retp;
257 regs->save_r4 = 0;
258 }
259 break;
260 case _SYSCALL_RET_NONE:
261 break;
262 default:
263 panic("unix_syscall: unknown return type");
264 break;
265 }
266 }
267 }
268 /* else (error == EJUSTRETURN) { nothing } */
269
270
271 if (KTRPOINT(proc, KTR_SYSRET)) {
272 switch(callp->sy_return_type) {
273 case _SYSCALL_RET_ADDR_T:
274 case _SYSCALL_RET_SIZE_T:
275 case _SYSCALL_RET_SSIZE_T:
276 /*
277 * Trace the value of the least significant bits,
278 * until we can revise the ktrace API safely.
279 */
280 ktrsysret(proc, code, error, uthread->uu_rval[1]);
281 break;
282 default:
283 ktrsysret(proc, code, error, uthread->uu_rval[0]);
284 break;
285 }
286 }
287
288 if (cancel_enable == _SYSCALL_CANCEL_NONE)
289 uthread->uu_flag &= ~UT_NOTCANCELPT;
290
291 exit_funnel_section();
292
293 if (uthread->uu_lowpri_delay) {
294 /*
295 * task is marked as a low priority I/O type
296 * and the I/O we issued while in this system call
297 * collided with normal I/O operations... we'll
298 * delay in order to mitigate the impact of this
299 * task on the normal operation of the system
300 */
301 IOSleep(uthread->uu_lowpri_delay);
302 uthread->uu_lowpri_delay = 0;
303 }
304 if (kdebug_enable && (code != 180)) {
305
306 if (callp->sy_return_type == _SYSCALL_RET_SSIZE_T)
307 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
308 error, uthread->uu_rval[1], 0, 0, 0);
309 else
310 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
311 error, uthread->uu_rval[0], uthread->uu_rval[1], 0, 0);
312 }
313
314 thread_exception_return();
315 /* NOTREACHED */
316 }
317
318 void
319 unix_syscall_return(int error)
320 {
321 thread_t thread_act;
322 struct uthread *uthread;
323 struct proc *proc;
324 struct savearea *regs;
325 unsigned short code;
326 struct sysent *callp;
327 int funnel_type;
328 unsigned int cancel_enable;
329
330 thread_act = current_thread();
331 proc = current_proc();
332 uthread = get_bsdthread_info(thread_act);
333
334 regs = find_user_regs(thread_act);
335
336 if (regs->save_r0 != 0)
337 code = regs->save_r0;
338 else
339 code = regs->save_r3;
340
341 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
342
343 /*
344 * Get index into sysent table
345 */
346 if (error == ERESTART) {
347 regs->save_srr0 -= 8;
348 } else if (error != EJUSTRETURN) {
349 if (error) {
350 regs->save_r3 = (long long)error;
351 /* set the "pc" to execute cerror routine */
352 regs->save_srr0 -= 4;
353 } else { /* (not error) */
354 switch (callp->sy_return_type) {
355 case _SYSCALL_RET_INT_T:
356 regs->save_r3 = uthread->uu_rval[0];
357 regs->save_r4 = uthread->uu_rval[1];
358 break;
359 case _SYSCALL_RET_UINT_T:
360 regs->save_r3 = ((u_int)uthread->uu_rval[0]);
361 regs->save_r4 = ((u_int)uthread->uu_rval[1]);
362 break;
363 case _SYSCALL_RET_OFF_T:
364 /* off_t returns 64 bits split across two registers for 32 bit */
365 /* process and in one register for 64 bit process */
366 if (IS_64BIT_PROCESS(proc)) {
367 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
368 regs->save_r3 = *retp;
369 }
370 else {
371 regs->save_r3 = uthread->uu_rval[0];
372 regs->save_r4 = uthread->uu_rval[1];
373 }
374 break;
375 case _SYSCALL_RET_ADDR_T:
376 case _SYSCALL_RET_SIZE_T:
377 case _SYSCALL_RET_SSIZE_T:
378 /* the variable length return types (user_addr_t, user_ssize_t,
379 * and user_size_t) are always the largest possible size in the
380 * kernel (we use uu_rval[0] and [1] as one 64 bit value).
381 */
382 {
383 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
384 regs->save_r3 = *retp;
385 }
386 break;
387 case _SYSCALL_RET_NONE:
388 break;
389 default:
390 panic("unix_syscall: unknown return type");
391 break;
392 }
393 }
394 }
395 /* else (error == EJUSTRETURN) { nothing } */
396
397 if (KTRPOINT(proc, KTR_SYSRET)) {
398 switch(callp->sy_return_type) {
399 case _SYSCALL_RET_ADDR_T:
400 case _SYSCALL_RET_SIZE_T:
401 case _SYSCALL_RET_SSIZE_T:
402 /*
403 * Trace the value of the least significant bits,
404 * until we can revise the ktrace API safely.
405 */
406 ktrsysret(proc, code, error, uthread->uu_rval[1]);
407 break;
408 default:
409 ktrsysret(proc, code, error, uthread->uu_rval[0]);
410 break;
411 }
412 }
413
414 cancel_enable = callp->sy_cancel;
415
416 if (cancel_enable == _SYSCALL_CANCEL_NONE)
417 uthread->uu_flag &= ~UT_NOTCANCELPT;
418
419 exit_funnel_section();
420
421 if (uthread->uu_lowpri_delay) {
422 /*
423 * task is marked as a low priority I/O type
424 * and the I/O we issued while in this system call
425 * collided with normal I/O operations... we'll
426 * delay in order to mitigate the impact of this
427 * task on the normal operation of the system
428 */
429 IOSleep(uthread->uu_lowpri_delay);
430 uthread->uu_lowpri_delay = 0;
431 }
432 if (kdebug_enable && (code != 180)) {
433 if (callp->sy_return_type == _SYSCALL_RET_SSIZE_T)
434 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
435 error, uthread->uu_rval[1], 0, 0, 0);
436 else
437 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
438 error, uthread->uu_rval[0], uthread->uu_rval[1], 0, 0);
439 }
440
441 thread_exception_return();
442 /* NOTREACHED */
443 }
444
445 /*
446 * Time of day and interval timer support.
447 *
448 * These routines provide the kernel entry points to get and set
449 * the time-of-day and per-process interval timers. Subroutines
450 * here provide support for adding and subtracting timeval structures
451 * and decrementing interval timers, optionally reloading the interval
452 * timers when they expire.
453 */
454 /* NOTE THIS implementation is for ppc architectures only.
455 * It is infrequently called, since the commpage intercepts
456 * most calls in user mode.
457 *
458 * XXX Y2038 bug because of assumed return of 32 bit seconds value, and
459 * XXX first parameter to clock_gettimeofday()
460 */
461 int
462 ppc_gettimeofday(__unused struct proc *p,
463 register struct ppc_gettimeofday_args *uap,
464 register_t *retval)
465 {
466 int error = 0;
467 extern lck_spin_t * tz_slock;
468
469 if (uap->tp)
470 clock_gettimeofday(&retval[0], &retval[1]);
471
472 if (uap->tzp) {
473 struct timezone ltz;
474
475 lck_spin_lock(tz_slock);
476 ltz = tz;
477 lck_spin_unlock(tz_slock);
478 error = copyout((caddr_t)&ltz, uap->tzp, sizeof (tz));
479 }
480
481 return (error);
482 }
483
484 #ifdef JOE_DEBUG
485 joe_debug(char *p) {
486
487 printf("%s\n", p);
488 }
489 #endif
490
491
492 /*
493 * WARNING - this is a temporary workaround for binary compatibility issues
494 * with anti-piracy software that relies on patching ptrace (3928003).
495 * This KPI will be removed in the system release after Tiger.
496 */
497 uintptr_t temp_patch_ptrace(uintptr_t new_ptrace)
498 {
499 struct sysent * callp;
500 sy_call_t * old_ptrace;
501
502 if (new_ptrace == 0)
503 return(0);
504
505 enter_funnel_section(kernel_flock);
506 callp = &sysent[26];
507 old_ptrace = callp->sy_call;
508
509 /* only allow one patcher of ptrace */
510 if (old_ptrace == (sy_call_t *) ptrace) {
511 callp->sy_call = (sy_call_t *) new_ptrace;
512 }
513 else {
514 old_ptrace = NULL;
515 }
516 exit_funnel_section( );
517
518 return((uintptr_t)old_ptrace);
519 }
520
521 void temp_unpatch_ptrace(void)
522 {
523 struct sysent * callp;
524
525 enter_funnel_section(kernel_flock);
526 callp = &sysent[26];
527 callp->sy_call = (sy_call_t *) ptrace;
528 exit_funnel_section( );
529
530 return;
531 }