2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <mach/boolean.h>
31 #include <mach/mach_types.h>
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/timer_call.h>
36 #include <kern/thread_call.h>
37 #include <kern/kalloc.h>
38 #include <kern/thread.h>
40 #include <ppc/machine_routines.h>
41 #include <ppc/cpu_data.h>
42 #include <ppc/cpu_internal.h>
43 #include <ppc/exception.h>
44 #include <ppc/thread.h>
47 #include <ppc/chud/chud_xnu.h>
48 #include <ppc/chud/chud_xnu_private.h>
51 void chudxnu_cancel_all_callbacks(void)
53 chudxnu_cpu_timer_callback_cancel_all();
54 chudxnu_trap_callback_cancel();
55 chudxnu_interrupt_callback_cancel();
56 chudxnu_perfmon_ast_callback_cancel();
57 chudxnu_cpusig_callback_cancel();
58 chudxnu_kdebug_callback_cancel();
59 chudxnu_thread_timer_callback_cancel();
60 chudxnu_syscall_callback_cancel();
63 #pragma mark **** cpu timer ****
65 timer_call_data_t cpu_timer_call
;
67 chudxnu_cpu_timer_callback_func_t cpu_timer_callback_fn
;
70 static chudcpu_data_t chudcpu_boot_cpu
;
72 void *chudxnu_per_proc_alloc(boolean_t boot_processor
)
74 chudcpu_data_t
*chud_proc_info
;
77 chud_proc_info
= &chudcpu_boot_cpu
;
79 chud_proc_info
= (chudcpu_data_t
*)kalloc(sizeof(chudcpu_data_t
));
80 if (chud_proc_info
== (chudcpu_data_t
*)NULL
) {
84 bzero((char *)chud_proc_info
, sizeof(chudcpu_data_t
));
85 chud_proc_info
->t_deadline
= 0xFFFFFFFFFFFFFFFFULL
;
86 return (void *)chud_proc_info
;
89 void chudxnu_per_proc_free(void *per_proc_chud
)
91 if (per_proc_chud
== (void *)&chudcpu_boot_cpu
) {
94 kfree(per_proc_chud
,sizeof(chudcpu_data_t
));
98 static void chudxnu_private_cpu_timer_callback(timer_call_param_t param0
, timer_call_param_t param1
)
100 chudcpu_data_t
*chud_proc_info
;
102 struct ppc_thread_state64 state
;
103 mach_msg_type_number_t count
;
105 oldlevel
= ml_set_interrupts_enabled(FALSE
);
106 chud_proc_info
= (chudcpu_data_t
*)(getPerProc()->pp_chud
);
108 count
= PPC_THREAD_STATE64_COUNT
;
109 if(chudxnu_thread_get_state(current_thread(), PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, FALSE
)==KERN_SUCCESS
) {
110 if(chud_proc_info
->cpu_timer_callback_fn
) {
111 (chud_proc_info
->cpu_timer_callback_fn
)(PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
115 ml_set_interrupts_enabled(oldlevel
);
119 kern_return_t
chudxnu_cpu_timer_callback_enter(chudxnu_cpu_timer_callback_func_t func
, uint32_t time
, uint32_t units
)
121 chudcpu_data_t
*chud_proc_info
;
124 oldlevel
= ml_set_interrupts_enabled(FALSE
);
125 chud_proc_info
= (chudcpu_data_t
*)(getPerProc()->pp_chud
);
127 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
)); // cancel any existing callback for this cpu
129 chud_proc_info
->cpu_timer_callback_fn
= func
;
131 clock_interval_to_deadline(time
, units
, &(chud_proc_info
->t_deadline
));
132 timer_call_setup(&(chud_proc_info
->cpu_timer_call
), chudxnu_private_cpu_timer_callback
, NULL
);
133 timer_call_enter(&(chud_proc_info
->cpu_timer_call
), chud_proc_info
->t_deadline
);
135 ml_set_interrupts_enabled(oldlevel
);
140 kern_return_t
chudxnu_cpu_timer_callback_cancel(void)
142 chudcpu_data_t
*chud_proc_info
;
145 oldlevel
= ml_set_interrupts_enabled(FALSE
);
146 chud_proc_info
= (chudcpu_data_t
*)(getPerProc()->pp_chud
);
148 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
));
149 chud_proc_info
->t_deadline
= chud_proc_info
->t_deadline
| ~(chud_proc_info
->t_deadline
); // set to max value
150 chud_proc_info
->cpu_timer_callback_fn
= NULL
;
152 ml_set_interrupts_enabled(oldlevel
);
157 kern_return_t
chudxnu_cpu_timer_callback_cancel_all(void)
160 chudcpu_data_t
*chud_proc_info
;
162 for(cpu
=0; cpu
<real_ncpus
; cpu
++) {
163 if ((PerProcTable
[cpu
].ppe_vaddr
== 0)
164 || (PerProcTable
[cpu
].ppe_vaddr
->pp_chud
== 0))
166 chud_proc_info
= (chudcpu_data_t
*)PerProcTable
[cpu
].ppe_vaddr
->pp_chud
;
167 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
));
168 chud_proc_info
->t_deadline
= chud_proc_info
->t_deadline
| ~(chud_proc_info
->t_deadline
); // set to max value
169 chud_proc_info
->cpu_timer_callback_fn
= NULL
;
174 #pragma mark **** trap ****
175 static chudxnu_trap_callback_func_t trap_callback_fn
= NULL
;
177 #define TRAP_ENTRY_POINT(t) ((t==T_RESET) ? 0x100 : \
178 (t==T_MACHINE_CHECK) ? 0x200 : \
179 (t==T_DATA_ACCESS) ? 0x300 : \
180 (t==T_DATA_SEGMENT) ? 0x380 : \
181 (t==T_INSTRUCTION_ACCESS) ? 0x400 : \
182 (t==T_INSTRUCTION_SEGMENT) ? 0x480 : \
183 (t==T_INTERRUPT) ? 0x500 : \
184 (t==T_ALIGNMENT) ? 0x600 : \
185 (t==T_PROGRAM) ? 0x700 : \
186 (t==T_FP_UNAVAILABLE) ? 0x800 : \
187 (t==T_DECREMENTER) ? 0x900 : \
188 (t==T_IO_ERROR) ? 0xa00 : \
189 (t==T_RESERVED) ? 0xb00 : \
190 (t==T_SYSTEM_CALL) ? 0xc00 : \
191 (t==T_TRACE) ? 0xd00 : \
192 (t==T_FP_ASSIST) ? 0xe00 : \
193 (t==T_PERF_MON) ? 0xf00 : \
194 (t==T_VMX) ? 0xf20 : \
195 (t==T_INVALID_EXCP0) ? 0x1000 : \
196 (t==T_INVALID_EXCP1) ? 0x1100 : \
197 (t==T_INVALID_EXCP2) ? 0x1200 : \
198 (t==T_INSTRUCTION_BKPT) ? 0x1300 : \
199 (t==T_SYSTEM_MANAGEMENT) ? 0x1400 : \
200 (t==T_SOFT_PATCH) ? 0x1500 : \
201 (t==T_ALTIVEC_ASSIST) ? 0x1600 : \
202 (t==T_THERMAL) ? 0x1700 : \
203 (t==T_ARCHDEP0) ? 0x1800 : \
204 (t==T_INSTRUMENTATION) ? 0x2000 : \
207 static kern_return_t
chudxnu_private_trap_callback(int trapno
, struct savearea
*ssp
, unsigned int dsisr
, unsigned int dar
)
209 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
210 kern_return_t retval
= KERN_FAILURE
;
211 uint32_t trapentry
= TRAP_ENTRY_POINT(trapno
);
214 if(trap_callback_fn
) {
215 struct ppc_thread_state64 state
;
216 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
217 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
218 retval
= (trap_callback_fn
)(trapentry
, PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
222 ml_set_interrupts_enabled(oldlevel
);
228 kern_return_t
chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func
)
230 trap_callback_fn
= func
;
231 perfTrapHook
= chudxnu_private_trap_callback
;
232 __asm__
volatile("eieio"); /* force order */
233 __asm__
volatile("sync"); /* force to memory */
238 kern_return_t
chudxnu_trap_callback_cancel(void)
240 trap_callback_fn
= NULL
;
242 __asm__
volatile("eieio"); /* force order */
243 __asm__
volatile("sync"); /* force to memory */
247 #pragma mark **** ast ****
248 static chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn
= NULL
;
250 static kern_return_t
chudxnu_private_chud_ast_callback(int trapno
, struct savearea
*ssp
, unsigned int dsisr
, unsigned int dar
)
252 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
253 ast_t
*myast
= ast_pending();
254 kern_return_t retval
= KERN_FAILURE
;
256 if(*myast
& AST_PPC_CHUD_URGENT
) {
257 *myast
&= ~(AST_PPC_CHUD_URGENT
| AST_PPC_CHUD
);
258 if((*myast
& AST_PREEMPTION
) != AST_PREEMPTION
) *myast
&= ~(AST_URGENT
);
259 retval
= KERN_SUCCESS
;
260 } else if(*myast
& AST_PPC_CHUD
) {
261 *myast
&= ~(AST_PPC_CHUD
);
262 retval
= KERN_SUCCESS
;
265 if(perfmon_ast_callback_fn
) {
266 struct ppc_thread_state64 state
;
267 mach_msg_type_number_t count
;
268 count
= PPC_THREAD_STATE64_COUNT
;
270 if(chudxnu_thread_get_state(current_thread(), PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, FALSE
)==KERN_SUCCESS
) {
271 (perfmon_ast_callback_fn
)(PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
276 // ASTs from ihandler go through thandler and are made to look like traps
277 // always handle AST_PPC_CHUD_URGENT if there's a callback
278 // only handle AST_PPC_CHUD if it's the only AST pending
279 if(perfmon_ast_callback_fn
&& ((*myast
& AST_PPC_CHUD_URGENT
) || ((*myast
& AST_PPC_CHUD
) && !(*myast
& AST_URGENT
)))) {
280 struct ppc_thread_state64 state
;
281 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
282 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
283 if(*myast
& AST_PPC_CHUD_URGENT
) {
284 *myast
&= ~(AST_PPC_CHUD_URGENT
| AST_PPC_CHUD
);
285 if((*myast
& AST_PREEMPTION
) != AST_PREEMPTION
) *myast
&= ~(AST_URGENT
);
286 retval
= KERN_SUCCESS
;
287 } else if(*myast
& AST_PPC_CHUD
) {
288 *myast
&= ~(AST_PPC_CHUD
);
289 retval
= KERN_SUCCESS
;
291 (perfmon_ast_callback_fn
)(PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
295 ml_set_interrupts_enabled(oldlevel
);
300 kern_return_t
chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func
)
302 perfmon_ast_callback_fn
= func
;
303 perfASTHook
= chudxnu_private_chud_ast_callback
;
304 __asm__
volatile("eieio"); /* force order */
305 __asm__
volatile("sync"); /* force to memory */
310 kern_return_t
chudxnu_perfmon_ast_callback_cancel(void)
312 perfmon_ast_callback_fn
= NULL
;
314 __asm__
volatile("eieio"); /* force order */
315 __asm__
volatile("sync"); /* force to memory */
320 kern_return_t
chudxnu_perfmon_ast_send_urgent(boolean_t urgent
)
322 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
323 ast_t
*myast
= ast_pending();
326 *myast
|= (AST_PPC_CHUD_URGENT
| AST_URGENT
);
328 *myast
|= (AST_PPC_CHUD
);
331 ml_set_interrupts_enabled(oldlevel
);
336 kern_return_t
chudxnu_perfmon_ast_send(void)
338 return chudxnu_perfmon_ast_send_urgent(TRUE
);
341 #pragma mark **** interrupt ****
342 static chudxnu_interrupt_callback_func_t interrupt_callback_fn
= NULL
;
343 //extern perfCallback perfIntHook; /* function hook into interrupt() */
345 static kern_return_t
chudxnu_private_interrupt_callback(int trapno
, struct savearea
*ssp
, unsigned int dsisr
, unsigned int dar
)
347 if(interrupt_callback_fn
) {
348 struct ppc_thread_state64 state
;
349 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
350 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
351 return (interrupt_callback_fn
)(TRAP_ENTRY_POINT(trapno
), PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
358 kern_return_t
chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func
)
360 interrupt_callback_fn
= func
;
361 perfIntHook
= chudxnu_private_interrupt_callback
;
362 __asm__
volatile("eieio"); /* force order */
363 __asm__
volatile("sync"); /* force to memory */
368 kern_return_t
chudxnu_interrupt_callback_cancel(void)
370 interrupt_callback_fn
= NULL
;
372 __asm__
volatile("eieio"); /* force order */
373 __asm__
volatile("sync"); /* force to memory */
377 #pragma mark **** cpu signal ****
378 static chudxnu_cpusig_callback_func_t cpusig_callback_fn
= NULL
;
379 extern perfCallback perfCpuSigHook
; /* function hook into cpu_signal_handler() */
381 static kern_return_t
chudxnu_private_cpu_signal_handler(int request
, struct savearea
*ssp
, unsigned int arg0
, unsigned int arg1
)
383 if(cpusig_callback_fn
) {
384 struct ppc_thread_state64 state
;
385 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
386 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
387 (cpusig_callback_fn
)(request
, PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
389 return KERN_SUCCESS
; // ignored
393 kern_return_t
chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func
)
395 cpusig_callback_fn
= func
;
396 perfCpuSigHook
= chudxnu_private_cpu_signal_handler
;
397 __asm__
volatile("eieio"); /* force order */
398 __asm__
volatile("sync"); /* force to memory */
403 kern_return_t
chudxnu_cpusig_callback_cancel(void)
405 cpusig_callback_fn
= NULL
;
406 perfCpuSigHook
= NULL
;
407 __asm__
volatile("eieio"); /* force order */
408 __asm__
volatile("sync"); /* force to memory */
413 kern_return_t
chudxnu_cpusig_send(int otherCPU
, uint32_t request
)
416 kern_return_t retval
= KERN_FAILURE
;
421 oldlevel
= ml_set_interrupts_enabled(FALSE
);
422 thisCPU
= cpu_number();
424 if(thisCPU
!=otherCPU
) {
425 temp
[0] = 0xFFFFFFFF; /* set sync flag */
426 temp
[1] = request
; /* set request */
427 __asm__
volatile("eieio"); /* force order */
428 __asm__
volatile("sync"); /* force to memory */
431 retval
=cpu_signal(otherCPU
, SIGPcpureq
, CPRQchud
, (uint32_t)&temp
);
432 } while(retval
!=KERN_SUCCESS
&& (retries
++)<16);
435 retval
= KERN_FAILURE
;
437 retval
= hw_cpu_sync(temp
, LockTimeOut
); /* wait for the other processor */
439 retval
= KERN_FAILURE
;
441 retval
= KERN_SUCCESS
;
445 retval
= KERN_INVALID_ARGUMENT
;
448 ml_set_interrupts_enabled(oldlevel
);
452 #pragma mark **** timer ****
454 chud_timer_t
chudxnu_timer_alloc(chudxnu_timer_callback_func_t func
, uint32_t param0
)
456 return (chud_timer_t
)thread_call_allocate((thread_call_func_t
)func
, (thread_call_param_t
)param0
);
460 kern_return_t
chudxnu_timer_callback_enter(chud_timer_t timer
, uint32_t param1
, uint32_t time
, uint32_t units
)
463 clock_interval_to_deadline(time
, units
, &t_delay
);
464 thread_call_enter1_delayed((thread_call_t
)timer
, (thread_call_param_t
)param1
, t_delay
);
469 kern_return_t
chudxnu_timer_callback_cancel(chud_timer_t timer
)
471 thread_call_cancel((thread_call_t
)timer
);
476 kern_return_t
chudxnu_timer_free(chud_timer_t timer
)
478 thread_call_cancel((thread_call_t
)timer
);
479 thread_call_free((thread_call_t
)timer
);
483 #pragma mark **** CHUD syscall (PPC) ****
485 typedef int (*PPCcallEnt
)(struct savearea
*save
);
486 extern PPCcallEnt PPCcalls
[];
488 static chudxnu_syscall_callback_func_t syscall_callback_fn
= NULL
;
490 static int chudxnu_private_syscall_callback(struct savearea
*ssp
)
493 if(syscall_callback_fn
) {
494 struct ppc_thread_state64 state
;
495 kern_return_t retval
;
496 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
497 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
498 ssp
->save_r3
= (syscall_callback_fn
)(PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
500 ssp
->save_r3
= KERN_FAILURE
;
504 return 1; // check for ASTs (always)
508 kern_return_t
chudxnu_syscall_callback_enter(chudxnu_syscall_callback_func_t func
)
510 syscall_callback_fn
= func
;
511 PPCcalls
[9] = chudxnu_private_syscall_callback
;
512 __asm__
volatile("eieio"); /* force order */
513 __asm__
volatile("sync"); /* force to memory */
518 kern_return_t
chudxnu_syscall_callback_cancel(void)
520 syscall_callback_fn
= NULL
;
522 __asm__
volatile("eieio"); /* force order */
523 __asm__
volatile("sync"); /* force to memory */
527 #pragma mark **** thread timer - DEPRECATED ****
529 static thread_call_t thread_timer_call
= NULL
;
530 static chudxnu_thread_timer_callback_func_t thread_timer_callback_fn
= NULL
;
532 static void chudxnu_private_thread_timer_callback(thread_call_param_t param0
, thread_call_param_t param1
)
534 if(thread_timer_call
) {
535 thread_call_free(thread_timer_call
);
536 thread_timer_call
= NULL
;
538 if(thread_timer_callback_fn
) {
539 (thread_timer_callback_fn
)((uint32_t)param0
);
546 kern_return_t
chudxnu_thread_timer_callback_enter(chudxnu_thread_timer_callback_func_t func
, uint32_t param
, uint32_t time
, uint32_t units
)
548 if(!thread_timer_call
) {
550 thread_timer_callback_fn
= func
;
551 thread_timer_call
= thread_call_allocate((thread_call_func_t
)chudxnu_private_thread_timer_callback
, (thread_call_param_t
)param
);
552 clock_interval_to_deadline(time
, units
, &t_delay
);
553 thread_call_enter_delayed(thread_timer_call
, t_delay
);
556 return KERN_FAILURE
; // thread timer call already pending
562 kern_return_t
chudxnu_thread_timer_callback_cancel(void)
564 if(thread_timer_call
) {
565 thread_call_cancel(thread_timer_call
);
566 thread_call_free(thread_timer_call
);
567 thread_timer_call
= NULL
;
569 thread_timer_callback_fn
= NULL
;