2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <mach/boolean.h>
31 #include <mach/mach_types.h>
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/thread_call.h>
36 #include <kern/kalloc.h>
37 #include <kern/thread.h>
39 #include <ppc/machine_routines.h>
40 #include <ppc/cpu_data.h>
41 #include <ppc/cpu_internal.h>
42 #include <ppc/exception.h>
43 #include <ppc/thread.h>
46 #include <chud/chud_xnu.h>
47 #include <chud/chud_xnu_private.h>
50 void chudxnu_cancel_all_callbacks(void)
52 chudxnu_cpu_timer_callback_cancel_all();
53 chudxnu_trap_callback_cancel();
54 chudxnu_interrupt_callback_cancel();
55 chudxnu_perfmon_ast_callback_cancel();
56 chudxnu_cpusig_callback_cancel();
57 chudxnu_kdebug_callback_cancel();
58 chudxnu_syscall_callback_cancel();
59 chudxnu_dtrace_callback_cancel();
62 static chudcpu_data_t chudcpu_boot_cpu
;
64 void *chudxnu_per_proc_alloc(boolean_t boot_processor
)
66 chudcpu_data_t
*chud_proc_info
;
69 chud_proc_info
= &chudcpu_boot_cpu
;
71 chud_proc_info
= (chudcpu_data_t
*)kalloc(sizeof(chudcpu_data_t
));
72 if (chud_proc_info
== (chudcpu_data_t
*)NULL
) {
76 bzero((char *)chud_proc_info
, sizeof(chudcpu_data_t
));
77 chud_proc_info
->t_deadline
= 0xFFFFFFFFFFFFFFFFULL
;
78 return (void *)chud_proc_info
;
81 void chudxnu_per_proc_free(void *per_proc_chud
)
83 if (per_proc_chud
== (void *)&chudcpu_boot_cpu
) {
86 kfree(per_proc_chud
,sizeof(chudcpu_data_t
));
91 chudxnu_private_cpu_timer_callback(__unused timer_call_param_t param0
,
92 __unused timer_call_param_t param1
)
94 chudcpu_data_t
*chud_proc_info
;
96 struct ppc_thread_state64 state
;
97 mach_msg_type_number_t count
;
98 chudxnu_cpu_timer_callback_func_t fn
= NULL
;
100 oldlevel
= ml_set_interrupts_enabled(FALSE
);
101 chud_proc_info
= (chudcpu_data_t
*)(getPerProc()->pp_chud
);
103 count
= PPC_THREAD_STATE64_COUNT
;
104 if(chudxnu_thread_get_state(current_thread(), PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, FALSE
)==KERN_SUCCESS
) {
105 fn
= chud_proc_info
->cpu_timer_callback_fn
;
107 (fn
)(PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
111 ml_set_interrupts_enabled(oldlevel
);
115 kern_return_t
chudxnu_cpu_timer_callback_enter(chudxnu_cpu_timer_callback_func_t func
, uint32_t time
, uint32_t units
)
117 chudcpu_data_t
*chud_proc_info
;
120 oldlevel
= ml_set_interrupts_enabled(FALSE
);
121 chud_proc_info
= (chudcpu_data_t
*)(getPerProc()->pp_chud
);
123 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
)); // cancel any existing callback for this cpu
125 chud_proc_info
->cpu_timer_callback_fn
= func
;
127 clock_interval_to_deadline(time
, units
, &(chud_proc_info
->t_deadline
));
128 timer_call_setup(&(chud_proc_info
->cpu_timer_call
), chudxnu_private_cpu_timer_callback
, NULL
);
129 timer_call_enter(&(chud_proc_info
->cpu_timer_call
), chud_proc_info
->t_deadline
);
131 ml_set_interrupts_enabled(oldlevel
);
136 kern_return_t
chudxnu_cpu_timer_callback_cancel(void)
138 chudcpu_data_t
*chud_proc_info
;
141 oldlevel
= ml_set_interrupts_enabled(FALSE
);
142 chud_proc_info
= (chudcpu_data_t
*)(getPerProc()->pp_chud
);
144 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
));
145 chud_proc_info
->t_deadline
= chud_proc_info
->t_deadline
| ~(chud_proc_info
->t_deadline
); // set to max value
146 chud_proc_info
->cpu_timer_callback_fn
= NULL
;
148 ml_set_interrupts_enabled(oldlevel
);
153 kern_return_t
chudxnu_cpu_timer_callback_cancel_all(void)
156 chudcpu_data_t
*chud_proc_info
;
158 for(cpu
=0; cpu
<real_ncpus
; cpu
++) {
159 if ((PerProcTable
[cpu
].ppe_vaddr
== 0)
160 || (PerProcTable
[cpu
].ppe_vaddr
->pp_chud
== 0))
162 chud_proc_info
= (chudcpu_data_t
*)PerProcTable
[cpu
].ppe_vaddr
->pp_chud
;
163 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
));
164 chud_proc_info
->t_deadline
= chud_proc_info
->t_deadline
| ~(chud_proc_info
->t_deadline
); // set to max value
165 chud_proc_info
->cpu_timer_callback_fn
= NULL
;
170 #pragma mark **** trap ****
171 static chudxnu_trap_callback_func_t trap_callback_fn
= NULL
;
173 #define TRAP_ENTRY_POINT(t) ((t==T_RESET) ? 0x100 : \
174 (t==T_MACHINE_CHECK) ? 0x200 : \
175 (t==T_DATA_ACCESS) ? 0x300 : \
176 (t==T_DATA_SEGMENT) ? 0x380 : \
177 (t==T_INSTRUCTION_ACCESS) ? 0x400 : \
178 (t==T_INSTRUCTION_SEGMENT) ? 0x480 : \
179 (t==T_INTERRUPT) ? 0x500 : \
180 (t==T_ALIGNMENT) ? 0x600 : \
181 (t==T_PROGRAM) ? 0x700 : \
182 (t==T_FP_UNAVAILABLE) ? 0x800 : \
183 (t==T_DECREMENTER) ? 0x900 : \
184 (t==T_IO_ERROR) ? 0xa00 : \
185 (t==T_RESERVED) ? 0xb00 : \
186 (t==T_SYSTEM_CALL) ? 0xc00 : \
187 (t==T_TRACE) ? 0xd00 : \
188 (t==T_FP_ASSIST) ? 0xe00 : \
189 (t==T_PERF_MON) ? 0xf00 : \
190 (t==T_VMX) ? 0xf20 : \
191 (t==T_INVALID_EXCP0) ? 0x1000 : \
192 (t==T_INVALID_EXCP1) ? 0x1100 : \
193 (t==T_INVALID_EXCP2) ? 0x1200 : \
194 (t==T_INSTRUCTION_BKPT) ? 0x1300 : \
195 (t==T_SYSTEM_MANAGEMENT) ? 0x1400 : \
196 (t==T_SOFT_PATCH) ? 0x1500 : \
197 (t==T_ALTIVEC_ASSIST) ? 0x1600 : \
198 (t==T_THERMAL) ? 0x1700 : \
199 (t==T_ARCHDEP0) ? 0x1800 : \
200 (t==T_INSTRUMENTATION) ? 0x2000 : \
204 chudxnu_private_trap_callback(int trapno
, struct savearea
*ssp
,
205 __unused
unsigned int dsisr
,
206 __unused addr64_t dar
)
208 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
209 kern_return_t retval
= KERN_FAILURE
;
210 uint32_t trapentry
= TRAP_ENTRY_POINT(trapno
);
211 chudxnu_trap_callback_func_t fn
= trap_callback_fn
;
215 struct ppc_thread_state64 state
;
216 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
217 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
218 retval
= (fn
)(trapentry
, PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
222 ml_set_interrupts_enabled(oldlevel
);
228 kern_return_t
chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func
)
230 trap_callback_fn
= func
;
231 perfTrapHook
= chudxnu_private_trap_callback
;
232 __asm__
volatile("eieio"); /* force order */
233 __asm__
volatile("sync"); /* force to memory */
238 kern_return_t
chudxnu_trap_callback_cancel(void)
240 trap_callback_fn
= NULL
;
242 __asm__
volatile("eieio"); /* force order */
243 __asm__
volatile("sync"); /* force to memory */
247 #pragma mark **** ast ****
248 static chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn
= NULL
;
251 chudxnu_private_chud_ast_callback(__unused
int trapno
,
252 __unused
struct savearea
*ssp
,
253 __unused
unsigned int dsisr
,
254 __unused addr64_t dar
)
256 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
257 ast_t
*myast
= ast_pending();
258 kern_return_t retval
= KERN_FAILURE
;
259 chudxnu_perfmon_ast_callback_func_t fn
= perfmon_ast_callback_fn
;
261 if(*myast
& AST_CHUD_URGENT
) {
262 *myast
&= ~(AST_CHUD_URGENT
| AST_CHUD
);
263 if((*myast
& AST_PREEMPTION
) != AST_PREEMPTION
) *myast
&= ~(AST_URGENT
);
264 retval
= KERN_SUCCESS
;
265 } else if(*myast
& AST_CHUD
) {
266 *myast
&= ~(AST_CHUD
);
267 retval
= KERN_SUCCESS
;
271 struct ppc_thread_state64 state
;
272 mach_msg_type_number_t count
;
273 count
= PPC_THREAD_STATE64_COUNT
;
275 if(chudxnu_thread_get_state(current_thread(), PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, FALSE
)==KERN_SUCCESS
) {
276 (fn
)(PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
281 // ASTs from ihandler go through thandler and are made to look like traps
282 // always handle AST_CHUD_URGENT if there's a callback
283 // only handle AST_CHUD if it's the only AST pending
284 if(perfmon_ast_callback_fn
&& ((*myast
& AST_CHUD_URGENT
) || ((*myast
& AST_CHUD
) && !(*myast
& AST_URGENT
)))) {
285 struct ppc_thread_state64 state
;
286 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
287 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
288 if(*myast
& AST_CHUD_URGENT
) {
289 *myast
&= ~(AST_CHUD_URGENT
| AST_CHUD
);
290 if((*myast
& AST_PREEMPTION
) != AST_PREEMPTION
) *myast
&= ~(AST_URGENT
);
291 retval
= KERN_SUCCESS
;
292 } else if(*myast
& AST_CHUD
) {
293 *myast
&= ~(AST_CHUD
);
294 retval
= KERN_SUCCESS
;
296 (perfmon_ast_callback_fn
)(PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
300 ml_set_interrupts_enabled(oldlevel
);
305 kern_return_t
chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func
)
307 perfmon_ast_callback_fn
= func
;
308 perfASTHook
= chudxnu_private_chud_ast_callback
;
309 __asm__
volatile("eieio"); /* force order */
310 __asm__
volatile("sync"); /* force to memory */
315 kern_return_t
chudxnu_perfmon_ast_callback_cancel(void)
317 perfmon_ast_callback_fn
= NULL
;
319 __asm__
volatile("eieio"); /* force order */
320 __asm__
volatile("sync"); /* force to memory */
325 kern_return_t
chudxnu_perfmon_ast_send_urgent(boolean_t urgent
)
327 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
328 ast_t
*myast
= ast_pending();
331 *myast
|= (AST_CHUD_URGENT
| AST_URGENT
);
333 *myast
|= (AST_CHUD
);
336 ml_set_interrupts_enabled(oldlevel
);
341 kern_return_t
chudxnu_perfmon_ast_send(void)
343 return chudxnu_perfmon_ast_send_urgent(TRUE
);
346 #pragma mark **** interrupt ****
347 static chudxnu_interrupt_callback_func_t interrupt_callback_fn
= NULL
;
348 //extern perfCallback perfIntHook; /* function hook into interrupt() */
351 chudxnu_private_interrupt_callback(int trapno
, struct savearea
*ssp
,
352 __unused
unsigned int dsisr
,
353 __unused addr64_t dar
)
355 chudxnu_interrupt_callback_func_t fn
= interrupt_callback_fn
;
358 struct ppc_thread_state64 state
;
359 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
360 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
361 return (fn
)(TRAP_ENTRY_POINT(trapno
), PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
368 kern_return_t
chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func
)
370 interrupt_callback_fn
= func
;
371 perfIntHook
= chudxnu_private_interrupt_callback
;
372 __asm__
volatile("eieio"); /* force order */
373 __asm__
volatile("sync"); /* force to memory */
378 kern_return_t
chudxnu_interrupt_callback_cancel(void)
380 interrupt_callback_fn
= NULL
;
382 __asm__
volatile("eieio"); /* force order */
383 __asm__
volatile("sync"); /* force to memory */
387 #pragma mark **** cpu signal ****
388 static chudxnu_cpusig_callback_func_t cpusig_callback_fn
= NULL
;
389 extern perfCallback perfCpuSigHook
; /* function hook into cpu_signal_handler() */
392 chudxnu_private_cpu_signal_handler(int request
, struct savearea
*ssp
,
393 __unused
unsigned int arg0
,
394 __unused addr64_t arg1
)
396 chudxnu_cpusig_callback_func_t fn
= cpusig_callback_fn
;
399 struct ppc_thread_state64 state
;
400 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
401 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
402 (fn
)(request
, PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
404 return KERN_SUCCESS
; // ignored
408 kern_return_t
chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func
)
410 cpusig_callback_fn
= func
;
411 perfCpuSigHook
= chudxnu_private_cpu_signal_handler
;
412 __asm__
volatile("eieio"); /* force order */
413 __asm__
volatile("sync"); /* force to memory */
418 kern_return_t
chudxnu_cpusig_callback_cancel(void)
420 cpusig_callback_fn
= NULL
;
421 perfCpuSigHook
= NULL
;
422 __asm__
volatile("eieio"); /* force order */
423 __asm__
volatile("sync"); /* force to memory */
428 kern_return_t
chudxnu_cpusig_send(int otherCPU
, uint32_t request
)
431 kern_return_t retval
= KERN_FAILURE
;
436 oldlevel
= ml_set_interrupts_enabled(FALSE
);
437 thisCPU
= cpu_number();
439 if(thisCPU
!=otherCPU
) {
440 temp
[0] = 0xFFFFFFFF; /* set sync flag */
441 temp
[1] = request
; /* set request */
442 __asm__
volatile("eieio"); /* force order */
443 __asm__
volatile("sync"); /* force to memory */
446 retval
=cpu_signal(otherCPU
, SIGPcpureq
, CPRQchud
, (uint32_t)&temp
);
447 } while(retval
!=KERN_SUCCESS
&& (retries
++)<16);
450 retval
= KERN_FAILURE
;
452 retval
= hw_cpu_sync(temp
, LockTimeOut
); /* wait for the other processor */
454 retval
= KERN_FAILURE
;
456 retval
= KERN_SUCCESS
;
460 retval
= KERN_INVALID_ARGUMENT
;
463 ml_set_interrupts_enabled(oldlevel
);