2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 #include <mach/boolean.h>
25 #include <mach/mach_types.h>
27 #include <ppc/machine_routines.h>
28 #include <ppc/exception.h>
30 #include <kern/timer_call.h>
31 #include <kern/kern_types.h>
33 extern kern_return_t
chud_copy_savearea_to_threadstate(thread_flavor_t flavor
, thread_state_t tstate
, mach_msg_type_number_t
*count
, struct savearea
*sv
);
34 extern kern_return_t
chud_copy_threadstate_to_savearea(struct savearea
*sv
, thread_flavor_t flavor
, thread_state_t tstate
, mach_msg_type_number_t
*count
);
37 void chudxnu_cancel_all_callbacks(void)
39 extern void chudxnu_exit_callback_cancel(void);
40 extern void chudxnu_thread_timer_callback_cancel(void);
42 chudxnu_cpu_timer_callback_cancel_all();
43 chudxnu_trap_callback_cancel();
44 chudxnu_interrupt_callback_cancel();
45 chudxnu_perfmon_ast_callback_cancel();
46 chudxnu_cpusig_callback_cancel();
47 chudxnu_kdebug_callback_cancel();
48 chudxnu_exit_callback_cancel();
49 chudxnu_thread_timer_callback_cancel();
52 #pragma mark **** cpu timer ****
53 static timer_call_data_t cpu_timer_call
[NCPUS
] = {{0}, {0}};
54 static uint64_t t_deadline
[NCPUS
] = {0xFFFFFFFFFFFFFFFFULL
, 0xFFFFFFFFFFFFFFFFULL
};
56 typedef void (*chudxnu_cpu_timer_callback_func_t
)(thread_flavor_t flavor
, thread_state_t tstate
, mach_msg_type_number_t count
);
57 static chudxnu_cpu_timer_callback_func_t cpu_timer_callback_fn
[NCPUS
] = {NULL
, NULL
};
59 static void chudxnu_private_cpu_timer_callback(timer_call_param_t param0
, timer_call_param_t param1
)
63 struct ppc_thread_state64 state
;
64 mach_msg_type_number_t count
;
66 oldlevel
= ml_set_interrupts_enabled(FALSE
);
69 count
= PPC_THREAD_STATE64_COUNT
;
70 if(chudxnu_thread_get_state(current_act(), PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, FALSE
)==KERN_SUCCESS
) {
71 if(cpu_timer_callback_fn
[cpu
]) {
72 (cpu_timer_callback_fn
[cpu
])(PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
76 ml_set_interrupts_enabled(oldlevel
);
80 kern_return_t
chudxnu_cpu_timer_callback_enter(chudxnu_cpu_timer_callback_func_t func
, uint32_t time
, uint32_t units
)
85 oldlevel
= ml_set_interrupts_enabled(FALSE
);
88 timer_call_cancel(&(cpu_timer_call
[cpu
])); // cancel any existing callback for this cpu
90 cpu_timer_callback_fn
[cpu
] = func
;
92 clock_interval_to_deadline(time
, units
, &(t_deadline
[cpu
]));
93 timer_call_setup(&(cpu_timer_call
[cpu
]), chudxnu_private_cpu_timer_callback
, NULL
);
94 timer_call_enter(&(cpu_timer_call
[cpu
]), t_deadline
[cpu
]);
96 ml_set_interrupts_enabled(oldlevel
);
101 kern_return_t
chudxnu_cpu_timer_callback_cancel(void)
106 oldlevel
= ml_set_interrupts_enabled(FALSE
);
109 timer_call_cancel(&(cpu_timer_call
[cpu
]));
110 t_deadline
[cpu
] = t_deadline
[cpu
] | ~(t_deadline
[cpu
]); // set to max value
111 cpu_timer_callback_fn
[cpu
] = NULL
;
113 ml_set_interrupts_enabled(oldlevel
);
118 kern_return_t
chudxnu_cpu_timer_callback_cancel_all(void)
122 for(cpu
=0; cpu
<NCPUS
; cpu
++) {
123 timer_call_cancel(&(cpu_timer_call
[cpu
]));
124 t_deadline
[cpu
] = t_deadline
[cpu
] | ~(t_deadline
[cpu
]); // set to max value
125 cpu_timer_callback_fn
[cpu
] = NULL
;
130 #pragma mark **** trap and ast ****
131 typedef kern_return_t (*chudxnu_trap_callback_func_t
)(uint32_t trapentry
, thread_flavor_t flavor
, thread_state_t tstate
, mach_msg_type_number_t count
);
132 static chudxnu_trap_callback_func_t trap_callback_fn
= NULL
;
134 typedef kern_return_t (*perfTrap
)(int trapno
, struct savearea
*ssp
, unsigned int dsisr
, unsigned int dar
);
135 extern perfTrap perfTrapHook
; /* function hook into trap() */
137 typedef void (*chudxnu_perfmon_ast_callback_func_t
)(thread_flavor_t flavor
, thread_state_t tstate
, mach_msg_type_number_t count
);
138 static chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn
= NULL
;
140 #define TRAP_ENTRY_POINT(t) ((t==T_RESET) ? 0x100 : \
141 (t==T_MACHINE_CHECK) ? 0x200 : \
142 (t==T_DATA_ACCESS) ? 0x300 : \
143 (t==T_DATA_SEGMENT) ? 0x380 : \
144 (t==T_INSTRUCTION_ACCESS) ? 0x400 : \
145 (t==T_INSTRUCTION_SEGMENT) ? 0x480 : \
146 (t==T_INTERRUPT) ? 0x500 : \
147 (t==T_ALIGNMENT) ? 0x600 : \
148 (t==T_PROGRAM) ? 0x700 : \
149 (t==T_FP_UNAVAILABLE) ? 0x800 : \
150 (t==T_DECREMENTER) ? 0x900 : \
151 (t==T_IO_ERROR) ? 0xa00 : \
152 (t==T_RESERVED) ? 0xb00 : \
153 (t==T_SYSTEM_CALL) ? 0xc00 : \
154 (t==T_TRACE) ? 0xd00 : \
155 (t==T_FP_ASSIST) ? 0xe00 : \
156 (t==T_PERF_MON) ? 0xf00 : \
157 (t==T_VMX) ? 0xf20 : \
158 (t==T_INVALID_EXCP0) ? 0x1000 : \
159 (t==T_INVALID_EXCP1) ? 0x1100 : \
160 (t==T_INVALID_EXCP2) ? 0x1200 : \
161 (t==T_INSTRUCTION_BKPT) ? 0x1300 : \
162 (t==T_SYSTEM_MANAGEMENT) ? 0x1400 : \
163 (t==T_SOFT_PATCH) ? 0x1500 : \
164 (t==T_ALTIVEC_ASSIST) ? 0x1600 : \
165 (t==T_THERMAL) ? 0x1700 : \
166 (t==T_ARCHDEP0) ? 0x1800 : \
167 (t==T_INSTRUMENTATION) ? 0x2000 : \
170 static kern_return_t
chudxnu_private_trap_callback(int trapno
, struct savearea
*ssp
, unsigned int dsisr
, unsigned int dar
)
172 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
173 int cpu
= cpu_number();
175 kern_return_t retval
= KERN_FAILURE
;
176 uint32_t trapentry
= TRAP_ENTRY_POINT(trapno
);
178 // ASTs from ihandler go through thandler and are made to look like traps
179 if(perfmon_ast_callback_fn
&& (need_ast
[cpu
] & AST_PPC_CHUD
)) {
180 struct ppc_thread_state64 state
;
181 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
182 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
183 (perfmon_ast_callback_fn
)(PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
184 need_ast
[cpu
] &= ~(AST_PPC_CHUD
);
188 if(trap_callback_fn
) {
189 struct ppc_thread_state64 state
;
190 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
191 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
192 retval
= (trap_callback_fn
)(trapentry
, PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
196 ml_set_interrupts_enabled(oldlevel
);
202 kern_return_t
chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func
)
204 trap_callback_fn
= func
;
205 perfTrapHook
= chudxnu_private_trap_callback
;
206 __asm__
volatile("eieio"); /* force order */
207 __asm__
volatile("sync"); /* force to memory */
212 kern_return_t
chudxnu_trap_callback_cancel(void)
214 trap_callback_fn
= NULL
;
215 if(!perfmon_ast_callback_fn
) {
218 __asm__
volatile("eieio"); /* force order */
219 __asm__
volatile("sync"); /* force to memory */
224 kern_return_t
chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func
)
226 perfmon_ast_callback_fn
= func
;
227 perfTrapHook
= chudxnu_private_trap_callback
;
228 __asm__
volatile("eieio"); /* force order */
229 __asm__
volatile("sync"); /* force to memory */
234 kern_return_t
chudxnu_perfmon_ast_callback_cancel(void)
236 perfmon_ast_callback_fn
= NULL
;
237 if(!trap_callback_fn
) {
240 __asm__
volatile("eieio"); /* force order */
241 __asm__
volatile("sync"); /* force to memory */
246 kern_return_t
chudxnu_perfmon_ast_send(void)
251 oldlevel
= ml_set_interrupts_enabled(FALSE
);
254 need_ast
[cpu
] |= (AST_PPC_CHUD
| AST_URGENT
);
256 ml_set_interrupts_enabled(oldlevel
);
260 #pragma mark **** interrupt ****
261 typedef kern_return_t (*chudxnu_interrupt_callback_func_t
)(uint32_t trapentry
, thread_flavor_t flavor
, thread_state_t tstate
, mach_msg_type_number_t count
);
262 static chudxnu_interrupt_callback_func_t interrupt_callback_fn
= NULL
;
264 extern perfTrap perfIntHook
; /* function hook into interrupt() */
266 static kern_return_t
chudxnu_private_interrupt_callback(int trapno
, struct savearea
*ssp
, unsigned int dsisr
, unsigned int dar
)
268 if(interrupt_callback_fn
) {
269 struct ppc_thread_state64 state
;
270 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
271 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
272 return (interrupt_callback_fn
)(TRAP_ENTRY_POINT(trapno
), PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
279 kern_return_t
chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func
)
281 interrupt_callback_fn
= func
;
282 perfIntHook
= chudxnu_private_interrupt_callback
;
283 __asm__
volatile("eieio"); /* force order */
284 __asm__
volatile("sync"); /* force to memory */
289 kern_return_t
chudxnu_interrupt_callback_cancel(void)
291 interrupt_callback_fn
= NULL
;
293 __asm__
volatile("eieio"); /* force order */
294 __asm__
volatile("sync"); /* force to memory */
298 #pragma mark **** cpu signal ****
299 typedef kern_return_t (*chudxnu_cpusig_callback_func_t
)(int request
, thread_flavor_t flavor
, thread_state_t tstate
, mach_msg_type_number_t count
);
300 static chudxnu_cpusig_callback_func_t cpusig_callback_fn
= NULL
;
302 extern perfTrap perfCpuSigHook
; /* function hook into cpu_signal_handler() */
304 static kern_return_t
chudxnu_private_cpu_signal_handler(int request
, struct savearea
*ssp
, unsigned int arg0
, unsigned int arg1
)
306 if(cpusig_callback_fn
) {
307 struct ppc_thread_state64 state
;
308 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
309 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
310 (cpusig_callback_fn
)(request
, PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
312 return KERN_SUCCESS
; // ignored
316 kern_return_t
chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func
)
318 cpusig_callback_fn
= func
;
319 perfCpuSigHook
= chudxnu_private_cpu_signal_handler
;
320 __asm__
volatile("eieio"); /* force order */
321 __asm__
volatile("sync"); /* force to memory */
326 kern_return_t
chudxnu_cpusig_callback_cancel(void)
328 cpusig_callback_fn
= NULL
;
329 perfCpuSigHook
= NULL
;
330 __asm__
volatile("eieio"); /* force order */
331 __asm__
volatile("sync"); /* force to memory */
336 kern_return_t
chudxnu_cpusig_send(int otherCPU
, uint32_t request
)
339 kern_return_t retval
= KERN_FAILURE
;
344 oldlevel
= ml_set_interrupts_enabled(FALSE
);
345 thisCPU
= cpu_number();
347 if(thisCPU
!=otherCPU
) {
348 temp
[0] = 0xFFFFFFFF; /* set sync flag */
349 temp
[1] = request
; /* set request */
350 __asm__
volatile("eieio"); /* force order */
351 __asm__
volatile("sync"); /* force to memory */
354 retval
=cpu_signal(otherCPU
, SIGPcpureq
, CPRQchud
, (uint32_t)&temp
);
355 } while(retval
!=KERN_SUCCESS
&& (retries
++)<16);
358 retval
= KERN_FAILURE
;
360 retval
= hw_cpu_sync(temp
, LockTimeOut
); /* wait for the other processor */
362 retval
= KERN_FAILURE
;
364 retval
= KERN_SUCCESS
;
368 retval
= KERN_INVALID_ARGUMENT
;
371 ml_set_interrupts_enabled(oldlevel
);
375 #pragma mark **** thread timer ****
377 static thread_call_t thread_timer_call
= NULL
;
379 typedef void (*chudxnu_thread_timer_callback_func_t
)(uint32_t arg
);
380 static chudxnu_thread_timer_callback_func_t thread_timer_callback_fn
= NULL
;
382 static void chudxnu_private_thread_timer_callback(thread_call_param_t param0
, thread_call_param_t param1
)
384 if(thread_timer_call
) {
385 thread_call_free(thread_timer_call
);
386 thread_timer_call
= NULL
;
388 if(thread_timer_callback_fn
) {
389 (thread_timer_callback_fn
)((uint32_t)param0
);
395 kern_return_t
chudxnu_thread_timer_callback_enter(chudxnu_thread_timer_callback_func_t func
, uint32_t arg
, uint32_t time
, uint32_t units
)
397 if(!thread_timer_call
) {
399 thread_timer_callback_fn
= func
;
400 thread_timer_call
= thread_call_allocate((thread_call_func_t
)chudxnu_private_thread_timer_callback
, (thread_call_param_t
)arg
);
401 clock_interval_to_deadline(time
, units
, &t_delay
);
402 thread_call_enter_delayed(thread_timer_call
, t_delay
);
405 return KERN_FAILURE
; // thread timer call already pending
410 kern_return_t
chudxnu_thread_timer_callback_cancel(void)
412 if(thread_timer_call
) {
413 thread_call_free(thread_timer_call
);
414 thread_timer_call
= NULL
;
416 thread_timer_callback_fn
= NULL
;