2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
27 #include <mach/boolean.h>
28 #include <mach/mach_types.h>
30 #include <ppc/machine_routines.h>
31 #include <ppc/exception.h>
33 #include <kern/timer_call.h>
34 #include <kern/kern_types.h>
36 extern kern_return_t
chud_copy_savearea_to_threadstate(thread_flavor_t flavor
, thread_state_t tstate
, mach_msg_type_number_t
*count
, struct savearea
*sv
);
37 extern kern_return_t
chud_copy_threadstate_to_savearea(struct savearea
*sv
, thread_flavor_t flavor
, thread_state_t tstate
, mach_msg_type_number_t
*count
);
40 void chudxnu_cancel_all_callbacks(void)
42 extern void chudxnu_exit_callback_cancel(void);
43 extern void chudxnu_thread_timer_callback_cancel(void);
45 chudxnu_cpu_timer_callback_cancel_all();
46 chudxnu_trap_callback_cancel();
47 chudxnu_interrupt_callback_cancel();
48 chudxnu_perfmon_ast_callback_cancel();
49 chudxnu_cpusig_callback_cancel();
50 chudxnu_kdebug_callback_cancel();
51 chudxnu_exit_callback_cancel();
52 chudxnu_thread_timer_callback_cancel();
55 #pragma mark **** cpu timer ****
56 static timer_call_data_t cpu_timer_call
[NCPUS
] = {{0}, {0}};
57 static uint64_t t_deadline
[NCPUS
] = {0xFFFFFFFFFFFFFFFFULL
, 0xFFFFFFFFFFFFFFFFULL
};
59 typedef void (*chudxnu_cpu_timer_callback_func_t
)(thread_flavor_t flavor
, thread_state_t tstate
, mach_msg_type_number_t count
);
60 static chudxnu_cpu_timer_callback_func_t cpu_timer_callback_fn
[NCPUS
] = {NULL
, NULL
};
62 static void chudxnu_private_cpu_timer_callback(timer_call_param_t param0
, timer_call_param_t param1
)
66 struct ppc_thread_state64 state
;
67 mach_msg_type_number_t count
;
69 oldlevel
= ml_set_interrupts_enabled(FALSE
);
72 count
= PPC_THREAD_STATE64_COUNT
;
73 if(chudxnu_thread_get_state(current_act(), PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, FALSE
)==KERN_SUCCESS
) {
74 if(cpu_timer_callback_fn
[cpu
]) {
75 (cpu_timer_callback_fn
[cpu
])(PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
79 ml_set_interrupts_enabled(oldlevel
);
83 kern_return_t
chudxnu_cpu_timer_callback_enter(chudxnu_cpu_timer_callback_func_t func
, uint32_t time
, uint32_t units
)
88 oldlevel
= ml_set_interrupts_enabled(FALSE
);
91 timer_call_cancel(&(cpu_timer_call
[cpu
])); // cancel any existing callback for this cpu
93 cpu_timer_callback_fn
[cpu
] = func
;
95 clock_interval_to_deadline(time
, units
, &(t_deadline
[cpu
]));
96 timer_call_setup(&(cpu_timer_call
[cpu
]), chudxnu_private_cpu_timer_callback
, NULL
);
97 timer_call_enter(&(cpu_timer_call
[cpu
]), t_deadline
[cpu
]);
99 ml_set_interrupts_enabled(oldlevel
);
104 kern_return_t
chudxnu_cpu_timer_callback_cancel(void)
109 oldlevel
= ml_set_interrupts_enabled(FALSE
);
112 timer_call_cancel(&(cpu_timer_call
[cpu
]));
113 t_deadline
[cpu
] = t_deadline
[cpu
] | ~(t_deadline
[cpu
]); // set to max value
114 cpu_timer_callback_fn
[cpu
] = NULL
;
116 ml_set_interrupts_enabled(oldlevel
);
121 kern_return_t
chudxnu_cpu_timer_callback_cancel_all(void)
125 for(cpu
=0; cpu
<NCPUS
; cpu
++) {
126 timer_call_cancel(&(cpu_timer_call
[cpu
]));
127 t_deadline
[cpu
] = t_deadline
[cpu
] | ~(t_deadline
[cpu
]); // set to max value
128 cpu_timer_callback_fn
[cpu
] = NULL
;
133 #pragma mark **** trap and ast ****
134 typedef kern_return_t (*chudxnu_trap_callback_func_t
)(uint32_t trapentry
, thread_flavor_t flavor
, thread_state_t tstate
, mach_msg_type_number_t count
);
135 static chudxnu_trap_callback_func_t trap_callback_fn
= NULL
;
137 typedef kern_return_t (*perfTrap
)(int trapno
, struct savearea
*ssp
, unsigned int dsisr
, unsigned int dar
);
138 extern perfTrap perfTrapHook
; /* function hook into trap() */
140 typedef void (*chudxnu_perfmon_ast_callback_func_t
)(thread_flavor_t flavor
, thread_state_t tstate
, mach_msg_type_number_t count
);
141 static chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn
= NULL
;
143 #define TRAP_ENTRY_POINT(t) ((t==T_RESET) ? 0x100 : \
144 (t==T_MACHINE_CHECK) ? 0x200 : \
145 (t==T_DATA_ACCESS) ? 0x300 : \
146 (t==T_DATA_SEGMENT) ? 0x380 : \
147 (t==T_INSTRUCTION_ACCESS) ? 0x400 : \
148 (t==T_INSTRUCTION_SEGMENT) ? 0x480 : \
149 (t==T_INTERRUPT) ? 0x500 : \
150 (t==T_ALIGNMENT) ? 0x600 : \
151 (t==T_PROGRAM) ? 0x700 : \
152 (t==T_FP_UNAVAILABLE) ? 0x800 : \
153 (t==T_DECREMENTER) ? 0x900 : \
154 (t==T_IO_ERROR) ? 0xa00 : \
155 (t==T_RESERVED) ? 0xb00 : \
156 (t==T_SYSTEM_CALL) ? 0xc00 : \
157 (t==T_TRACE) ? 0xd00 : \
158 (t==T_FP_ASSIST) ? 0xe00 : \
159 (t==T_PERF_MON) ? 0xf00 : \
160 (t==T_VMX) ? 0xf20 : \
161 (t==T_INVALID_EXCP0) ? 0x1000 : \
162 (t==T_INVALID_EXCP1) ? 0x1100 : \
163 (t==T_INVALID_EXCP2) ? 0x1200 : \
164 (t==T_INSTRUCTION_BKPT) ? 0x1300 : \
165 (t==T_SYSTEM_MANAGEMENT) ? 0x1400 : \
166 (t==T_SOFT_PATCH) ? 0x1500 : \
167 (t==T_ALTIVEC_ASSIST) ? 0x1600 : \
168 (t==T_THERMAL) ? 0x1700 : \
169 (t==T_ARCHDEP0) ? 0x1800 : \
170 (t==T_INSTRUMENTATION) ? 0x2000 : \
173 static kern_return_t
chudxnu_private_trap_callback(int trapno
, struct savearea
*ssp
, unsigned int dsisr
, unsigned int dar
)
175 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
176 int cpu
= cpu_number();
178 kern_return_t retval
= KERN_FAILURE
;
179 uint32_t trapentry
= TRAP_ENTRY_POINT(trapno
);
181 // ASTs from ihandler go through thandler and are made to look like traps
182 if(perfmon_ast_callback_fn
&& (need_ast
[cpu
] & AST_PPC_CHUD
)) {
183 struct ppc_thread_state64 state
;
184 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
185 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
186 (perfmon_ast_callback_fn
)(PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
187 need_ast
[cpu
] &= ~(AST_PPC_CHUD
);
191 if(trap_callback_fn
) {
192 struct ppc_thread_state64 state
;
193 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
194 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
195 retval
= (trap_callback_fn
)(trapentry
, PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
199 ml_set_interrupts_enabled(oldlevel
);
205 kern_return_t
chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func
)
207 trap_callback_fn
= func
;
208 perfTrapHook
= chudxnu_private_trap_callback
;
209 __asm__
volatile("eieio"); /* force order */
210 __asm__
volatile("sync"); /* force to memory */
215 kern_return_t
chudxnu_trap_callback_cancel(void)
217 trap_callback_fn
= NULL
;
218 if(!perfmon_ast_callback_fn
) {
221 __asm__
volatile("eieio"); /* force order */
222 __asm__
volatile("sync"); /* force to memory */
227 kern_return_t
chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func
)
229 perfmon_ast_callback_fn
= func
;
230 perfTrapHook
= chudxnu_private_trap_callback
;
231 __asm__
volatile("eieio"); /* force order */
232 __asm__
volatile("sync"); /* force to memory */
237 kern_return_t
chudxnu_perfmon_ast_callback_cancel(void)
239 perfmon_ast_callback_fn
= NULL
;
240 if(!trap_callback_fn
) {
243 __asm__
volatile("eieio"); /* force order */
244 __asm__
volatile("sync"); /* force to memory */
249 kern_return_t
chudxnu_perfmon_ast_send(void)
254 oldlevel
= ml_set_interrupts_enabled(FALSE
);
257 need_ast
[cpu
] |= (AST_PPC_CHUD
| AST_URGENT
);
259 ml_set_interrupts_enabled(oldlevel
);
263 #pragma mark **** interrupt ****
264 typedef kern_return_t (*chudxnu_interrupt_callback_func_t
)(uint32_t trapentry
, thread_flavor_t flavor
, thread_state_t tstate
, mach_msg_type_number_t count
);
265 static chudxnu_interrupt_callback_func_t interrupt_callback_fn
= NULL
;
267 extern perfTrap perfIntHook
; /* function hook into interrupt() */
269 static kern_return_t
chudxnu_private_interrupt_callback(int trapno
, struct savearea
*ssp
, unsigned int dsisr
, unsigned int dar
)
271 if(interrupt_callback_fn
) {
272 struct ppc_thread_state64 state
;
273 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
274 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
275 return (interrupt_callback_fn
)(TRAP_ENTRY_POINT(trapno
), PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
282 kern_return_t
chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func
)
284 interrupt_callback_fn
= func
;
285 perfIntHook
= chudxnu_private_interrupt_callback
;
286 __asm__
volatile("eieio"); /* force order */
287 __asm__
volatile("sync"); /* force to memory */
292 kern_return_t
chudxnu_interrupt_callback_cancel(void)
294 interrupt_callback_fn
= NULL
;
296 __asm__
volatile("eieio"); /* force order */
297 __asm__
volatile("sync"); /* force to memory */
301 #pragma mark **** cpu signal ****
302 typedef kern_return_t (*chudxnu_cpusig_callback_func_t
)(int request
, thread_flavor_t flavor
, thread_state_t tstate
, mach_msg_type_number_t count
);
303 static chudxnu_cpusig_callback_func_t cpusig_callback_fn
= NULL
;
305 extern perfTrap perfCpuSigHook
; /* function hook into cpu_signal_handler() */
307 static kern_return_t
chudxnu_private_cpu_signal_handler(int request
, struct savearea
*ssp
, unsigned int arg0
, unsigned int arg1
)
309 if(cpusig_callback_fn
) {
310 struct ppc_thread_state64 state
;
311 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
312 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
313 (cpusig_callback_fn
)(request
, PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
315 return KERN_SUCCESS
; // ignored
319 kern_return_t
chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func
)
321 cpusig_callback_fn
= func
;
322 perfCpuSigHook
= chudxnu_private_cpu_signal_handler
;
323 __asm__
volatile("eieio"); /* force order */
324 __asm__
volatile("sync"); /* force to memory */
329 kern_return_t
chudxnu_cpusig_callback_cancel(void)
331 cpusig_callback_fn
= NULL
;
332 perfCpuSigHook
= NULL
;
333 __asm__
volatile("eieio"); /* force order */
334 __asm__
volatile("sync"); /* force to memory */
339 kern_return_t
chudxnu_cpusig_send(int otherCPU
, uint32_t request
)
342 kern_return_t retval
= KERN_FAILURE
;
347 oldlevel
= ml_set_interrupts_enabled(FALSE
);
348 thisCPU
= cpu_number();
350 if(thisCPU
!=otherCPU
) {
351 temp
[0] = 0xFFFFFFFF; /* set sync flag */
352 temp
[1] = request
; /* set request */
353 __asm__
volatile("eieio"); /* force order */
354 __asm__
volatile("sync"); /* force to memory */
357 retval
=cpu_signal(otherCPU
, SIGPcpureq
, CPRQchud
, (uint32_t)&temp
);
358 } while(retval
!=KERN_SUCCESS
&& (retries
++)<16);
361 retval
= KERN_FAILURE
;
363 retval
= hw_cpu_sync(temp
, LockTimeOut
); /* wait for the other processor */
365 retval
= KERN_FAILURE
;
367 retval
= KERN_SUCCESS
;
371 retval
= KERN_INVALID_ARGUMENT
;
374 ml_set_interrupts_enabled(oldlevel
);
378 #pragma mark **** thread timer ****
380 static thread_call_t thread_timer_call
= NULL
;
382 typedef void (*chudxnu_thread_timer_callback_func_t
)(uint32_t arg
);
383 static chudxnu_thread_timer_callback_func_t thread_timer_callback_fn
= NULL
;
385 static void chudxnu_private_thread_timer_callback(thread_call_param_t param0
, thread_call_param_t param1
)
387 if(thread_timer_call
) {
388 thread_call_free(thread_timer_call
);
389 thread_timer_call
= NULL
;
391 if(thread_timer_callback_fn
) {
392 (thread_timer_callback_fn
)((uint32_t)param0
);
398 kern_return_t
chudxnu_thread_timer_callback_enter(chudxnu_thread_timer_callback_func_t func
, uint32_t arg
, uint32_t time
, uint32_t units
)
400 if(!thread_timer_call
) {
402 thread_timer_callback_fn
= func
;
403 thread_timer_call
= thread_call_allocate((thread_call_func_t
)chudxnu_private_thread_timer_callback
, (thread_call_param_t
)arg
);
404 clock_interval_to_deadline(time
, units
, &t_delay
);
405 thread_call_enter_delayed(thread_timer_call
, t_delay
);
408 return KERN_FAILURE
; // thread timer call already pending
413 kern_return_t
chudxnu_thread_timer_callback_cancel(void)
415 if(thread_timer_call
) {
416 thread_call_free(thread_timer_call
);
417 thread_timer_call
= NULL
;
419 thread_timer_callback_fn
= NULL
;