2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <kern/task.h>
24 #include <kern/thread.h>
25 #include <kern/thread_act.h>
26 #include <kern/assert.h>
27 #include <mach/machine/thread_status.h>
28 #include <ppc/savearea.h>
30 #include <sys/kernel.h>
33 #include <sys/syscall.h>
34 #include <sys/systm.h>
36 #include <sys/errno.h>
37 #include <sys/ktrace.h>
38 #include <sys/kdebug.h>
45 extern struct savearea
*
49 extern enter_funnel_section(funnel_t
*funnel_lock
);
50 extern exit_funnel_section(funnel_t
*funnel_lock
);
53 * Function: unix_syscall
55 * Inputs: regs - pointer to Process Control Block
64 thread_act_t thread_act
;
65 struct uthread
*uthread
;
73 thread_act
= current_act();
74 uthread
= get_bsdthread_info(thread_act
);
76 if (!(uthread
->uu_flag
& P_VFORK
))
77 proc
= (struct proc
*)get_bsdtask_info(current_task());
79 proc
= current_proc();
81 flavor
= (regs
->save_r0
== NULL
)? 1: 0;
83 uthread
->uu_ar0
= (int *)regs
;
90 callp
= (code
>= nsysent
) ? &sysent
[63] : &sysent
[code
];
93 if (callp
->sy_narg
> 8)
94 panic("unix_syscall: max arg count exceeded");
97 if (callp
->sy_narg
!= 0) {
99 uthread
->uu_arg
[0] = regs
->save_r3
;
100 uthread
->uu_arg
[1] = regs
->save_r4
;
101 uthread
->uu_arg
[2] = regs
->save_r5
;
102 uthread
->uu_arg
[3] = regs
->save_r6
;
103 uthread
->uu_arg
[4] = regs
->save_r7
;
104 uthread
->uu_arg
[5] = regs
->save_r8
;
105 uthread
->uu_arg
[6] = regs
->save_r9
;
106 uthread
->uu_arg
[7] = regs
->save_r10
;
108 uthread
->uu_arg
[0] = regs
->save_r4
;
109 uthread
->uu_arg
[1] = regs
->save_r5
;
110 uthread
->uu_arg
[2] = regs
->save_r6
;
111 uthread
->uu_arg
[3] = regs
->save_r7
;
112 uthread
->uu_arg
[4] = regs
->save_r8
;
113 uthread
->uu_arg
[5] = regs
->save_r9
;
114 uthread
->uu_arg
[7] = regs
->save_r10
;
118 callp
= (code
>= nsysent
) ? &sysent
[63] : &sysent
[code
];
120 if (kdebug_enable
&& (code
!= 180)) {
122 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_START
,
123 regs
->save_r4
, regs
->save_r5
, regs
->save_r6
, regs
->save_r7
, 0);
125 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_START
,
126 regs
->save_r3
, regs
->save_r4
, regs
->save_r5
, regs
->save_r6
, 0);
129 funnel_type
= (int)callp
->sy_funnel
;
130 if(funnel_type
== KERNEL_FUNNEL
)
131 enter_funnel_section(kernel_flock
);
132 else if (funnel_type
== NETWORK_FUNNEL
)
133 enter_funnel_section(network_flock
);
136 uthread
->uu_rval
[0] = 0;
139 * r4 is volatile, if we set it to regs->save_r4 here the child
140 * will have parents r4 after execve
142 uthread
->uu_rval
[1] = 0;
147 * PPC runtime calls cerror after every unix system call, so
148 * assume no error and adjust the "pc" to skip this call.
149 * It will be set back to the cerror call if an error is detected.
151 regs
->save_srr0
+= 4;
153 if (KTRPOINT(proc
, KTR_SYSCALL
))
154 ktrsyscall(proc
, code
, callp
->sy_narg
, uthread
->uu_arg
, funnel_type
);
156 error
= (*(callp
->sy_call
))(proc
, (void *)uthread
->uu_arg
, &(uthread
->uu_rval
[0]));
158 regs
= find_user_regs(thread_act
);
160 if (error
== ERESTART
) {
161 regs
->save_srr0
-= 8;
162 } else if (error
!= EJUSTRETURN
) {
164 regs
->save_r3
= error
;
165 /* set the "pc" to execute cerror routine */
166 regs
->save_srr0
-= 4;
167 } else { /* (not error) */
168 regs
->save_r3
= uthread
->uu_rval
[0];
169 regs
->save_r4
= uthread
->uu_rval
[1];
172 /* else (error == EJUSTRETURN) { nothing } */
174 if (KTRPOINT(proc
, KTR_SYSRET
))
175 ktrsysret(proc
, code
, error
, uthread
->uu_rval
[0], funnel_type
);
177 if(funnel_type
== KERNEL_FUNNEL
)
178 exit_funnel_section(kernel_flock
);
179 else if (funnel_type
== NETWORK_FUNNEL
)
180 exit_funnel_section(network_flock
);
182 if (kdebug_enable
&& (code
!= 180)) {
183 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_END
,
184 error
, uthread
->uu_rval
[0], uthread
->uu_rval
[1], 0, 0);
187 thread_exception_return();
191 unix_syscall_return(error
)
193 thread_act_t thread_act
;
194 struct uthread
*uthread
;
196 struct savearea
*regs
;
198 struct sysent
*callp
;
201 thread_act
= current_act();
202 proc
= current_proc();
203 uthread
= get_bsdthread_info(thread_act
);
205 regs
= find_user_regs(thread_act
);
208 * Get index into sysent table
210 if (error
== ERESTART
) {
211 regs
->save_srr0
-= 8;
212 } else if (error
!= EJUSTRETURN
) {
214 regs
->save_r3
= error
;
215 /* set the "pc" to execute cerror routine */
216 regs
->save_srr0
-= 4;
217 } else { /* (not error) */
218 regs
->save_r3
= uthread
->uu_rval
[0];
219 regs
->save_r4
= uthread
->uu_rval
[1];
222 /* else (error == EJUSTRETURN) { nothing } */
224 if (regs
->save_r0
!= NULL
)
225 code
= regs
->save_r0
;
227 code
= regs
->save_r3
;
229 callp
= (code
>= nsysent
) ? &sysent
[63] : &sysent
[code
];
231 funnel_type
= (int)callp
->sy_funnel
;
233 if (KTRPOINT(proc
, KTR_SYSRET
))
234 ktrsysret(proc
, code
, error
, uthread
->uu_rval
[0], funnel_type
);
236 if(funnel_type
== KERNEL_FUNNEL
)
237 exit_funnel_section(kernel_flock
);
238 else if (funnel_type
== NETWORK_FUNNEL
)
239 exit_funnel_section(network_flock
);
241 if (kdebug_enable
&& (code
!= 180)) {
242 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC
, code
) | DBG_FUNC_END
,
243 error
, uthread
->uu_rval
[0], uthread
->uu_rval
[1], 0, 0);
246 thread_exception_return();
251 * Time of day and interval timer support.
253 * These routines provide the kernel entry points to get and set
254 * the time-of-day and per-process interval timers. Subroutines
255 * here provide support for adding and subtracting timeval structures
256 * and decrementing interval timers, optionally reloading the interval
257 * timers when they expire.
259 struct gettimeofday_args
{
261 struct timezone
*tzp
;
263 /* NOTE THIS implementation is for ppc architectures only */
265 ppc_gettimeofday(p
, uap
, retval
)
267 register struct gettimeofday_args
*uap
;
273 //struct savearea *child_state;
274 extern simple_lock_data_t tz_slock
;
278 retval
[0] = atv
.tv_sec
;
279 retval
[1] = atv
.tv_usec
;
283 usimple_lock(&tz_slock
);
285 usimple_unlock(&tz_slock
);
286 error
= copyout((caddr_t
)<z
, (caddr_t
)uap
->tzp
,