]> git.saurik.com Git - apple/xnu.git/blob - bsd/dev/ppc/systemcalls.c
xnu-1228.5.18.tar.gz
[apple/xnu.git] / bsd / dev / ppc / systemcalls.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
30 * support for mandatory and extensible security protections. This notice
31 * is included in support of clause 2.2 (b) of the Apple Public License,
32 * Version 2.0.
33 */
34
35 #include <kern/task.h>
36 #include <kern/thread.h>
37 #include <kern/assert.h>
38 #include <kern/clock.h>
39 #include <kern/locks.h>
40 #include <kern/sched_prim.h>
41 #include <mach/machine/thread_status.h>
42 #include <mach/thread_act.h>
43 #include <ppc/savearea.h>
44
45 #include <sys/kernel.h>
46 #include <sys/vm.h>
47 #include <sys/proc_internal.h>
48 #include <sys/syscall.h>
49 #include <sys/systm.h>
50 #include <sys/user.h>
51 #include <sys/errno.h>
52 #include <sys/kdebug.h>
53 #include <sys/sysent.h>
54 #include <sys/sysproto.h>
55 #include <sys/kauth.h>
56
57 #include <bsm/audit_kernel.h>
58
59 #if CONFIG_DTRACE
60 extern int32_t dtrace_systrace_syscall(struct proc *, void *, int *);
61 extern void dtrace_systrace_syscall_return(unsigned short, int, int *);
62 #endif
63
64 extern void
65 unix_syscall(struct savearea *regs);
66
67 extern struct savearea *
68 find_user_regs(
69 thread_t act);
70
71 extern lck_spin_t * tz_slock;
72 extern void throttle_lowpri_io(int *lowpri_window, mount_t v_mount);
73
74 /*
75 * Function: unix_syscall
76 *
77 * Inputs: regs - pointer to Process Control Block
78 *
79 * Outputs: none
80 */
81 void
82 unix_syscall(struct savearea *regs)
83 {
84 thread_t thread_act;
85 struct uthread *uthread;
86 struct proc *proc;
87 struct sysent *callp;
88 int error;
89 unsigned int code;
90 boolean_t flavor;
91
92 flavor = (((unsigned int)regs->save_r0) == 0)? 1: 0;
93
94 if (flavor)
95 code = regs->save_r3;
96 else
97 code = regs->save_r0;
98
99 if (kdebug_enable && (code != 180)) {
100 if (flavor)
101 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
102 regs->save_r4, regs->save_r5, regs->save_r6, regs->save_r7, 0);
103 else
104 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
105 regs->save_r3, regs->save_r4, regs->save_r5, regs->save_r6, 0);
106 }
107 thread_act = current_thread();
108 uthread = get_bsdthread_info(thread_act);
109
110 if (!(uthread->uu_flag & UT_VFORK))
111 proc = (struct proc *)get_bsdtask_info(current_task());
112 else
113 proc = current_proc();
114
115 /* Make sure there is a process associated with this task */
116 if (proc == NULL) {
117 regs->save_r3 = (long long)EPERM;
118 /* set the "pc" to execute cerror routine */
119 regs->save_srr0 -= 4;
120 task_terminate_internal(current_task());
121 thread_exception_return();
122 /* NOTREACHED */
123 }
124
125 /*
126 * Delayed binding of thread credential to process credential, if we
127 * are not running with an explicitly set thread credential.
128 */
129 kauth_cred_uthread_update(uthread, proc);
130
131 callp = (code >= NUM_SYSENT) ? &sysent[63] : &sysent[code];
132
133 if (callp->sy_narg != 0) {
134 void *regsp;
135 sy_munge_t *mungerp;
136
137 if (IS_64BIT_PROCESS(proc)) {
138 /* XXX Turn 64 bit unsafe calls into nosys() */
139 if (callp->sy_flags & UNSAFE_64BIT) {
140 callp = &sysent[63];
141 goto unsafe;
142 }
143 mungerp = callp->sy_arg_munge64;
144 }
145 else {
146 mungerp = callp->sy_arg_munge32;
147 }
148 if ( !flavor) {
149 regsp = (void *) &regs->save_r3;
150 } else {
151 /* indirect system call consumes an argument so only 7 are supported */
152 if (callp->sy_narg > 7) {
153 callp = &sysent[63];
154 goto unsafe;
155 }
156 regsp = (void *) &regs->save_r4;
157 }
158 /* call syscall argument munger to copy in arguments (see xnu/bsd/dev/ppc/munge.s) */
159 (*mungerp)(regsp, (void *) &uthread->uu_arg[0]);
160 }
161
162 unsafe:
163
164 uthread->uu_flag |= UT_NOTCANCELPT;
165
166 uthread->uu_rval[0] = 0;
167
168 /*
169 * r4 is volatile, if we set it to regs->save_r4 here the child
170 * will have parents r4 after execve
171 */
172 uthread->uu_rval[1] = 0;
173
174 error = 0;
175
176 /*
177 * PPC runtime calls cerror after every unix system call, so
178 * assume no error and adjust the "pc" to skip this call.
179 * It will be set back to the cerror call if an error is detected.
180 */
181 regs->save_srr0 += 4;
182
183 #ifdef JOE_DEBUG
184 uthread->uu_iocount = 0;
185 uthread->uu_vpindex = 0;
186 #endif
187 AUDIT_SYSCALL_ENTER(code, proc, uthread);
188 error = (*(callp->sy_call))(proc, (void *)uthread->uu_arg, &(uthread->uu_rval[0]));
189 AUDIT_SYSCALL_EXIT(code, proc, uthread, error);
190 #if CONFIG_MACF
191 mac_thread_userret(code, error, thread_act);
192 #endif
193
194
195 #ifdef JOE_DEBUG
196 if (uthread->uu_iocount)
197 joe_debug("system call returned with uu_iocount != 0");
198 #endif
199 #if CONFIG_DTRACE
200 uthread->t_dtrace_errno = error;
201 #endif /* CONFIG_DTRACE */
202
203 regs = find_user_regs(thread_act);
204
205 if (error == ERESTART) {
206 regs->save_srr0 -= 8;
207 } else if (error != EJUSTRETURN) {
208 if (error) {
209 regs->save_r3 = (long long)error;
210 /* set the "pc" to execute cerror routine */
211 regs->save_srr0 -= 4;
212 } else { /* (not error) */
213 switch (callp->sy_return_type) {
214 case _SYSCALL_RET_INT_T:
215 regs->save_r3 = uthread->uu_rval[0];
216 regs->save_r4 = uthread->uu_rval[1];
217 break;
218 case _SYSCALL_RET_UINT_T:
219 regs->save_r3 = ((u_int)uthread->uu_rval[0]);
220 regs->save_r4 = ((u_int)uthread->uu_rval[1]);
221 break;
222 case _SYSCALL_RET_OFF_T:
223 /* off_t returns 64 bits split across two registers for 32 bit */
224 /* process and in one register for 64 bit process */
225 if (IS_64BIT_PROCESS(proc)) {
226 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
227 regs->save_r3 = *retp;
228 regs->save_r4 = 0;
229 }
230 else {
231 regs->save_r3 = uthread->uu_rval[0];
232 regs->save_r4 = uthread->uu_rval[1];
233 }
234 break;
235 case _SYSCALL_RET_ADDR_T:
236 case _SYSCALL_RET_SIZE_T:
237 case _SYSCALL_RET_SSIZE_T:
238 /* the variable length return types (user_addr_t, user_ssize_t,
239 * and user_size_t) are always the largest possible size in the
240 * kernel (we use uu_rval[0] and [1] as one 64 bit value).
241 */
242 {
243 user_addr_t *retp = (user_addr_t *)&uthread->uu_rval[0];
244 regs->save_r3 = *retp;
245 regs->save_r4 = 0;
246 }
247 break;
248 case _SYSCALL_RET_NONE:
249 break;
250 default:
251 panic("unix_syscall: unknown return type");
252 break;
253 }
254 }
255 }
256 /* else (error == EJUSTRETURN) { nothing } */
257
258
259 uthread->uu_flag &= ~UT_NOTCANCELPT;
260
261 /* panic if funnel is held */
262 syscall_exit_funnelcheck();
263
264 if (uthread->uu_lowpri_window && uthread->v_mount) {
265 /*
266 * task is marked as a low priority I/O type
267 * and the I/O we issued while in this system call
268 * collided with normal I/O operations... we'll
269 * delay in order to mitigate the impact of this
270 * task on the normal operation of the system
271 */
272 throttle_lowpri_io(&uthread->uu_lowpri_window,uthread->v_mount);
273 }
274 if (kdebug_enable && (code != 180)) {
275
276 if (callp->sy_return_type == _SYSCALL_RET_SSIZE_T)
277 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
278 error, uthread->uu_rval[1], 0, 0, 0);
279 else
280 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
281 error, uthread->uu_rval[0], uthread->uu_rval[1], 0, 0);
282 }
283
284 thread_exception_return();
285 /* NOTREACHED */
286 }
287
288 void
289 unix_syscall_return(int error)
290 {
291 thread_t thread_act;
292 struct uthread *uthread;
293 struct proc *proc;
294 struct savearea *regs;
295 unsigned int code;
296 struct sysent *callp;
297
298 thread_act = current_thread();
299 proc = current_proc();
300 uthread = get_bsdthread_info(thread_act);
301
302 regs = find_user_regs(thread_act);
303
304 if (regs->save_r0 != 0)
305 code = regs->save_r0;
306 else
307 code = regs->save_r3;
308
309 callp = (code >= NUM_SYSENT) ? &sysent[63] : &sysent[code];
310
311 #if CONFIG_DTRACE
312 if (callp->sy_call == dtrace_systrace_syscall)
313 dtrace_systrace_syscall_return( code, error, uthread->uu_rval );
314 #endif /* CONFIG_DTRACE */
315
316 /*
317 * Get index into sysent table
318 */
319 if (error == ERESTART) {
320 regs->save_srr0 -= 8;
321 } else if (error != EJUSTRETURN) {
322 if (error) {
323 regs->save_r3 = (long long)error;
324 /* set the "pc" to execute cerror routine */
325 regs->save_srr0 -= 4;
326 } else { /* (not error) */
327 switch (callp->sy_return_type) {
328 case _SYSCALL_RET_INT_T:
329 regs->save_r3 = uthread->uu_rval[0];
330 regs->save_r4 = uthread->uu_rval[1];
331 break;
332 case _SYSCALL_RET_UINT_T:
333 regs->save_r3 = ((u_int)uthread->uu_rval[0]);
334 regs->save_r4 = ((u_int)uthread->uu_rval[1]);
335 break;
336 case _SYSCALL_RET_OFF_T:
337 /* off_t returns 64 bits split across two registers for 32 bit */
338 /* process and in one register for 64 bit process */
339 if (IS_64BIT_PROCESS(proc)) {
340 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
341 regs->save_r3 = *retp;
342 }
343 else {
344 regs->save_r3 = uthread->uu_rval[0];
345 regs->save_r4 = uthread->uu_rval[1];
346 }
347 break;
348 case _SYSCALL_RET_ADDR_T:
349 case _SYSCALL_RET_SIZE_T:
350 case _SYSCALL_RET_SSIZE_T:
351 /* the variable length return types (user_addr_t, user_ssize_t,
352 * and user_size_t) are always the largest possible size in the
353 * kernel (we use uu_rval[0] and [1] as one 64 bit value).
354 */
355 {
356 u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0];
357 regs->save_r3 = *retp;
358 }
359 break;
360 case _SYSCALL_RET_NONE:
361 break;
362 default:
363 panic("unix_syscall: unknown return type");
364 break;
365 }
366 }
367 }
368 /* else (error == EJUSTRETURN) { nothing } */
369
370
371 uthread->uu_flag &= ~UT_NOTCANCELPT;
372
373 /* panic if funnel is held */
374 syscall_exit_funnelcheck();
375
376 if (uthread->uu_lowpri_window && uthread->v_mount) {
377 /*
378 * task is marked as a low priority I/O type
379 * and the I/O we issued while in this system call
380 * collided with normal I/O operations... we'll
381 * delay in order to mitigate the impact of this
382 * task on the normal operation of the system
383 */
384 throttle_lowpri_io(&uthread->uu_lowpri_window,uthread->v_mount);
385 }
386 if (kdebug_enable && (code != 180)) {
387 if (callp->sy_return_type == _SYSCALL_RET_SSIZE_T)
388 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
389 error, uthread->uu_rval[1], 0, 0, 0);
390 else
391 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
392 error, uthread->uu_rval[0], uthread->uu_rval[1], 0, 0);
393 }
394
395 thread_exception_return();
396 /* NOTREACHED */
397 }
398
399 #ifdef JOE_DEBUG
400 joe_debug(char *p) {
401
402 printf("%s\n", p);
403 }
404 #endif