2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 #include <mach/boolean.h>
25 #include <mach/mach_types.h>
27 #include <kern/kern_types.h>
28 #include <kern/processor.h>
29 #include <kern/timer_call.h>
30 #include <kern/thread_call.h>
31 #include <kern/kalloc.h>
32 #include <kern/thread.h>
34 #include <ppc/machine_routines.h>
35 #include <ppc/cpu_data.h>
36 #include <ppc/cpu_internal.h>
37 #include <ppc/exception.h>
38 #include <ppc/thread.h>
41 #include <ppc/chud/chud_xnu.h>
42 #include <ppc/chud/chud_xnu_private.h>
45 void chudxnu_cancel_all_callbacks(void)
47 chudxnu_cpu_timer_callback_cancel_all();
48 chudxnu_trap_callback_cancel();
49 chudxnu_interrupt_callback_cancel();
50 chudxnu_perfmon_ast_callback_cancel();
51 chudxnu_cpusig_callback_cancel();
52 chudxnu_kdebug_callback_cancel();
53 chudxnu_thread_timer_callback_cancel();
54 chudxnu_syscall_callback_cancel();
57 #pragma mark **** cpu timer ****
59 timer_call_data_t cpu_timer_call
;
61 chudxnu_cpu_timer_callback_func_t cpu_timer_callback_fn
;
64 static chudcpu_data_t chudcpu_boot_cpu
;
66 void *chudxnu_per_proc_alloc(boolean_t boot_processor
)
68 chudcpu_data_t
*chud_proc_info
;
71 chud_proc_info
= &chudcpu_boot_cpu
;
73 chud_proc_info
= (chudcpu_data_t
*)kalloc(sizeof(chudcpu_data_t
));
74 if (chud_proc_info
== (chudcpu_data_t
*)NULL
) {
78 bzero((char *)chud_proc_info
, sizeof(chudcpu_data_t
));
79 chud_proc_info
->t_deadline
= 0xFFFFFFFFFFFFFFFFULL
;
80 return (void *)chud_proc_info
;
83 void chudxnu_per_proc_free(void *per_proc_chud
)
85 if (per_proc_chud
== (void *)&chudcpu_boot_cpu
) {
88 kfree(per_proc_chud
,sizeof(chudcpu_data_t
));
92 static void chudxnu_private_cpu_timer_callback(timer_call_param_t param0
, timer_call_param_t param1
)
94 chudcpu_data_t
*chud_proc_info
;
96 struct ppc_thread_state64 state
;
97 mach_msg_type_number_t count
;
99 oldlevel
= ml_set_interrupts_enabled(FALSE
);
100 chud_proc_info
= (chudcpu_data_t
*)(getPerProc()->pp_chud
);
102 count
= PPC_THREAD_STATE64_COUNT
;
103 if(chudxnu_thread_get_state(current_thread(), PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, FALSE
)==KERN_SUCCESS
) {
104 if(chud_proc_info
->cpu_timer_callback_fn
) {
105 (chud_proc_info
->cpu_timer_callback_fn
)(PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
109 ml_set_interrupts_enabled(oldlevel
);
113 kern_return_t
chudxnu_cpu_timer_callback_enter(chudxnu_cpu_timer_callback_func_t func
, uint32_t time
, uint32_t units
)
115 chudcpu_data_t
*chud_proc_info
;
118 oldlevel
= ml_set_interrupts_enabled(FALSE
);
119 chud_proc_info
= (chudcpu_data_t
*)(getPerProc()->pp_chud
);
121 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
)); // cancel any existing callback for this cpu
123 chud_proc_info
->cpu_timer_callback_fn
= func
;
125 clock_interval_to_deadline(time
, units
, &(chud_proc_info
->t_deadline
));
126 timer_call_setup(&(chud_proc_info
->cpu_timer_call
), chudxnu_private_cpu_timer_callback
, NULL
);
127 timer_call_enter(&(chud_proc_info
->cpu_timer_call
), chud_proc_info
->t_deadline
);
129 ml_set_interrupts_enabled(oldlevel
);
134 kern_return_t
chudxnu_cpu_timer_callback_cancel(void)
136 chudcpu_data_t
*chud_proc_info
;
139 oldlevel
= ml_set_interrupts_enabled(FALSE
);
140 chud_proc_info
= (chudcpu_data_t
*)(getPerProc()->pp_chud
);
142 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
));
143 chud_proc_info
->t_deadline
= chud_proc_info
->t_deadline
| ~(chud_proc_info
->t_deadline
); // set to max value
144 chud_proc_info
->cpu_timer_callback_fn
= NULL
;
146 ml_set_interrupts_enabled(oldlevel
);
151 kern_return_t
chudxnu_cpu_timer_callback_cancel_all(void)
154 chudcpu_data_t
*chud_proc_info
;
156 for(cpu
=0; cpu
<real_ncpus
; cpu
++) {
157 if ((PerProcTable
[cpu
].ppe_vaddr
== 0)
158 || (PerProcTable
[cpu
].ppe_vaddr
->pp_chud
== 0))
160 chud_proc_info
= (chudcpu_data_t
*)PerProcTable
[cpu
].ppe_vaddr
->pp_chud
;
161 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
));
162 chud_proc_info
->t_deadline
= chud_proc_info
->t_deadline
| ~(chud_proc_info
->t_deadline
); // set to max value
163 chud_proc_info
->cpu_timer_callback_fn
= NULL
;
168 #pragma mark **** trap ****
169 static chudxnu_trap_callback_func_t trap_callback_fn
= NULL
;
171 #define TRAP_ENTRY_POINT(t) ((t==T_RESET) ? 0x100 : \
172 (t==T_MACHINE_CHECK) ? 0x200 : \
173 (t==T_DATA_ACCESS) ? 0x300 : \
174 (t==T_DATA_SEGMENT) ? 0x380 : \
175 (t==T_INSTRUCTION_ACCESS) ? 0x400 : \
176 (t==T_INSTRUCTION_SEGMENT) ? 0x480 : \
177 (t==T_INTERRUPT) ? 0x500 : \
178 (t==T_ALIGNMENT) ? 0x600 : \
179 (t==T_PROGRAM) ? 0x700 : \
180 (t==T_FP_UNAVAILABLE) ? 0x800 : \
181 (t==T_DECREMENTER) ? 0x900 : \
182 (t==T_IO_ERROR) ? 0xa00 : \
183 (t==T_RESERVED) ? 0xb00 : \
184 (t==T_SYSTEM_CALL) ? 0xc00 : \
185 (t==T_TRACE) ? 0xd00 : \
186 (t==T_FP_ASSIST) ? 0xe00 : \
187 (t==T_PERF_MON) ? 0xf00 : \
188 (t==T_VMX) ? 0xf20 : \
189 (t==T_INVALID_EXCP0) ? 0x1000 : \
190 (t==T_INVALID_EXCP1) ? 0x1100 : \
191 (t==T_INVALID_EXCP2) ? 0x1200 : \
192 (t==T_INSTRUCTION_BKPT) ? 0x1300 : \
193 (t==T_SYSTEM_MANAGEMENT) ? 0x1400 : \
194 (t==T_SOFT_PATCH) ? 0x1500 : \
195 (t==T_ALTIVEC_ASSIST) ? 0x1600 : \
196 (t==T_THERMAL) ? 0x1700 : \
197 (t==T_ARCHDEP0) ? 0x1800 : \
198 (t==T_INSTRUMENTATION) ? 0x2000 : \
201 static kern_return_t
chudxnu_private_trap_callback(int trapno
, struct savearea
*ssp
, unsigned int dsisr
, unsigned int dar
)
203 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
204 kern_return_t retval
= KERN_FAILURE
;
205 uint32_t trapentry
= TRAP_ENTRY_POINT(trapno
);
208 if(trap_callback_fn
) {
209 struct ppc_thread_state64 state
;
210 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
211 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
212 retval
= (trap_callback_fn
)(trapentry
, PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
216 ml_set_interrupts_enabled(oldlevel
);
222 kern_return_t
chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func
)
224 trap_callback_fn
= func
;
225 perfTrapHook
= chudxnu_private_trap_callback
;
226 __asm__
volatile("eieio"); /* force order */
227 __asm__
volatile("sync"); /* force to memory */
232 kern_return_t
chudxnu_trap_callback_cancel(void)
234 trap_callback_fn
= NULL
;
236 __asm__
volatile("eieio"); /* force order */
237 __asm__
volatile("sync"); /* force to memory */
241 #pragma mark **** ast ****
242 static chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn
= NULL
;
244 static kern_return_t
chudxnu_private_chud_ast_callback(int trapno
, struct savearea
*ssp
, unsigned int dsisr
, unsigned int dar
)
246 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
247 ast_t
*myast
= ast_pending();
248 kern_return_t retval
= KERN_FAILURE
;
250 if(*myast
& AST_PPC_CHUD_URGENT
) {
251 *myast
&= ~(AST_PPC_CHUD_URGENT
| AST_PPC_CHUD
);
252 if((*myast
& AST_PREEMPTION
) != AST_PREEMPTION
) *myast
&= ~(AST_URGENT
);
253 retval
= KERN_SUCCESS
;
254 } else if(*myast
& AST_PPC_CHUD
) {
255 *myast
&= ~(AST_PPC_CHUD
);
256 retval
= KERN_SUCCESS
;
259 if(perfmon_ast_callback_fn
) {
260 struct ppc_thread_state64 state
;
261 mach_msg_type_number_t count
;
262 count
= PPC_THREAD_STATE64_COUNT
;
264 if(chudxnu_thread_get_state(current_thread(), PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, FALSE
)==KERN_SUCCESS
) {
265 (perfmon_ast_callback_fn
)(PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
270 // ASTs from ihandler go through thandler and are made to look like traps
271 // always handle AST_PPC_CHUD_URGENT if there's a callback
272 // only handle AST_PPC_CHUD if it's the only AST pending
273 if(perfmon_ast_callback_fn
&& ((*myast
& AST_PPC_CHUD_URGENT
) || ((*myast
& AST_PPC_CHUD
) && !(*myast
& AST_URGENT
)))) {
274 struct ppc_thread_state64 state
;
275 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
276 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
277 if(*myast
& AST_PPC_CHUD_URGENT
) {
278 *myast
&= ~(AST_PPC_CHUD_URGENT
| AST_PPC_CHUD
);
279 if((*myast
& AST_PREEMPTION
) != AST_PREEMPTION
) *myast
&= ~(AST_URGENT
);
280 retval
= KERN_SUCCESS
;
281 } else if(*myast
& AST_PPC_CHUD
) {
282 *myast
&= ~(AST_PPC_CHUD
);
283 retval
= KERN_SUCCESS
;
285 (perfmon_ast_callback_fn
)(PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
289 ml_set_interrupts_enabled(oldlevel
);
294 kern_return_t
chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func
)
296 perfmon_ast_callback_fn
= func
;
297 perfASTHook
= chudxnu_private_chud_ast_callback
;
298 __asm__
volatile("eieio"); /* force order */
299 __asm__
volatile("sync"); /* force to memory */
304 kern_return_t
chudxnu_perfmon_ast_callback_cancel(void)
306 perfmon_ast_callback_fn
= NULL
;
308 __asm__
volatile("eieio"); /* force order */
309 __asm__
volatile("sync"); /* force to memory */
314 kern_return_t
chudxnu_perfmon_ast_send_urgent(boolean_t urgent
)
316 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
317 ast_t
*myast
= ast_pending();
320 *myast
|= (AST_PPC_CHUD_URGENT
| AST_URGENT
);
322 *myast
|= (AST_PPC_CHUD
);
325 ml_set_interrupts_enabled(oldlevel
);
330 kern_return_t
chudxnu_perfmon_ast_send(void)
332 return chudxnu_perfmon_ast_send_urgent(TRUE
);
335 #pragma mark **** interrupt ****
336 static chudxnu_interrupt_callback_func_t interrupt_callback_fn
= NULL
;
337 //extern perfCallback perfIntHook; /* function hook into interrupt() */
339 static kern_return_t
chudxnu_private_interrupt_callback(int trapno
, struct savearea
*ssp
, unsigned int dsisr
, unsigned int dar
)
341 if(interrupt_callback_fn
) {
342 struct ppc_thread_state64 state
;
343 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
344 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
345 return (interrupt_callback_fn
)(TRAP_ENTRY_POINT(trapno
), PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
352 kern_return_t
chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func
)
354 interrupt_callback_fn
= func
;
355 perfIntHook
= chudxnu_private_interrupt_callback
;
356 __asm__
volatile("eieio"); /* force order */
357 __asm__
volatile("sync"); /* force to memory */
362 kern_return_t
chudxnu_interrupt_callback_cancel(void)
364 interrupt_callback_fn
= NULL
;
366 __asm__
volatile("eieio"); /* force order */
367 __asm__
volatile("sync"); /* force to memory */
371 #pragma mark **** cpu signal ****
372 static chudxnu_cpusig_callback_func_t cpusig_callback_fn
= NULL
;
373 extern perfCallback perfCpuSigHook
; /* function hook into cpu_signal_handler() */
375 static kern_return_t
chudxnu_private_cpu_signal_handler(int request
, struct savearea
*ssp
, unsigned int arg0
, unsigned int arg1
)
377 if(cpusig_callback_fn
) {
378 struct ppc_thread_state64 state
;
379 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
380 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
381 (cpusig_callback_fn
)(request
, PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
383 return KERN_SUCCESS
; // ignored
387 kern_return_t
chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func
)
389 cpusig_callback_fn
= func
;
390 perfCpuSigHook
= chudxnu_private_cpu_signal_handler
;
391 __asm__
volatile("eieio"); /* force order */
392 __asm__
volatile("sync"); /* force to memory */
397 kern_return_t
chudxnu_cpusig_callback_cancel(void)
399 cpusig_callback_fn
= NULL
;
400 perfCpuSigHook
= NULL
;
401 __asm__
volatile("eieio"); /* force order */
402 __asm__
volatile("sync"); /* force to memory */
407 kern_return_t
chudxnu_cpusig_send(int otherCPU
, uint32_t request
)
410 kern_return_t retval
= KERN_FAILURE
;
415 oldlevel
= ml_set_interrupts_enabled(FALSE
);
416 thisCPU
= cpu_number();
418 if(thisCPU
!=otherCPU
) {
419 temp
[0] = 0xFFFFFFFF; /* set sync flag */
420 temp
[1] = request
; /* set request */
421 __asm__
volatile("eieio"); /* force order */
422 __asm__
volatile("sync"); /* force to memory */
425 retval
=cpu_signal(otherCPU
, SIGPcpureq
, CPRQchud
, (uint32_t)&temp
);
426 } while(retval
!=KERN_SUCCESS
&& (retries
++)<16);
429 retval
= KERN_FAILURE
;
431 retval
= hw_cpu_sync(temp
, LockTimeOut
); /* wait for the other processor */
433 retval
= KERN_FAILURE
;
435 retval
= KERN_SUCCESS
;
439 retval
= KERN_INVALID_ARGUMENT
;
442 ml_set_interrupts_enabled(oldlevel
);
446 #pragma mark **** timer ****
448 chud_timer_t
chudxnu_timer_alloc(chudxnu_timer_callback_func_t func
, uint32_t param0
)
450 return (chud_timer_t
)thread_call_allocate((thread_call_func_t
)func
, (thread_call_param_t
)param0
);
454 kern_return_t
chudxnu_timer_callback_enter(chud_timer_t timer
, uint32_t param1
, uint32_t time
, uint32_t units
)
457 clock_interval_to_deadline(time
, units
, &t_delay
);
458 thread_call_enter1_delayed((thread_call_t
)timer
, (thread_call_param_t
)param1
, t_delay
);
463 kern_return_t
chudxnu_timer_callback_cancel(chud_timer_t timer
)
465 thread_call_cancel((thread_call_t
)timer
);
470 kern_return_t
chudxnu_timer_free(chud_timer_t timer
)
472 thread_call_cancel((thread_call_t
)timer
);
473 thread_call_free((thread_call_t
)timer
);
477 #pragma mark **** CHUD syscall (PPC) ****
479 typedef int (*PPCcallEnt
)(struct savearea
*save
);
480 extern PPCcallEnt PPCcalls
[];
482 static chudxnu_syscall_callback_func_t syscall_callback_fn
= NULL
;
484 static int chudxnu_private_syscall_callback(struct savearea
*ssp
)
487 if(syscall_callback_fn
) {
488 struct ppc_thread_state64 state
;
489 kern_return_t retval
;
490 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
491 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
492 ssp
->save_r3
= (syscall_callback_fn
)(PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
494 ssp
->save_r3
= KERN_FAILURE
;
498 return 1; // check for ASTs (always)
502 kern_return_t
chudxnu_syscall_callback_enter(chudxnu_syscall_callback_func_t func
)
504 syscall_callback_fn
= func
;
505 PPCcalls
[9] = chudxnu_private_syscall_callback
;
506 __asm__
volatile("eieio"); /* force order */
507 __asm__
volatile("sync"); /* force to memory */
512 kern_return_t
chudxnu_syscall_callback_cancel(void)
514 syscall_callback_fn
= NULL
;
516 __asm__
volatile("eieio"); /* force order */
517 __asm__
volatile("sync"); /* force to memory */
521 #pragma mark **** thread timer - DEPRECATED ****
523 static thread_call_t thread_timer_call
= NULL
;
524 static chudxnu_thread_timer_callback_func_t thread_timer_callback_fn
= NULL
;
526 static void chudxnu_private_thread_timer_callback(thread_call_param_t param0
, thread_call_param_t param1
)
528 if(thread_timer_call
) {
529 thread_call_free(thread_timer_call
);
530 thread_timer_call
= NULL
;
532 if(thread_timer_callback_fn
) {
533 (thread_timer_callback_fn
)((uint32_t)param0
);
540 kern_return_t
chudxnu_thread_timer_callback_enter(chudxnu_thread_timer_callback_func_t func
, uint32_t param
, uint32_t time
, uint32_t units
)
542 if(!thread_timer_call
) {
544 thread_timer_callback_fn
= func
;
545 thread_timer_call
= thread_call_allocate((thread_call_func_t
)chudxnu_private_thread_timer_callback
, (thread_call_param_t
)param
);
546 clock_interval_to_deadline(time
, units
, &t_delay
);
547 thread_call_enter_delayed(thread_timer_call
, t_delay
);
550 return KERN_FAILURE
; // thread timer call already pending
556 kern_return_t
chudxnu_thread_timer_callback_cancel(void)
558 if(thread_timer_call
) {
559 thread_call_cancel(thread_timer_call
);
560 thread_call_free(thread_timer_call
);
561 thread_timer_call
= NULL
;
563 thread_timer_callback_fn
= NULL
;