]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/chud/chud_osfmk_callback.c
xnu-517.tar.gz
[apple/xnu.git] / osfmk / ppc / chud / chud_osfmk_callback.c
1 /*
2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25
26 #include <stdint.h>
27 #include <mach/boolean.h>
28 #include <mach/mach_types.h>
29
30 #include <ppc/machine_routines.h>
31 #include <ppc/exception.h>
32 #include <kern/ast.h>
33 #include <kern/timer_call.h>
34 #include <kern/kern_types.h>
35
36 extern kern_return_t chud_copy_savearea_to_threadstate(thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count, struct savearea *sv);
37 extern kern_return_t chud_copy_threadstate_to_savearea(struct savearea *sv, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count);
38
39 __private_extern__
40 void chudxnu_cancel_all_callbacks(void)
41 {
42 extern void chudxnu_exit_callback_cancel(void);
43 extern void chudxnu_thread_timer_callback_cancel(void);
44
45 chudxnu_cpu_timer_callback_cancel_all();
46 chudxnu_trap_callback_cancel();
47 chudxnu_interrupt_callback_cancel();
48 chudxnu_perfmon_ast_callback_cancel();
49 chudxnu_cpusig_callback_cancel();
50 chudxnu_kdebug_callback_cancel();
51 chudxnu_exit_callback_cancel();
52 chudxnu_thread_timer_callback_cancel();
53 }
54
55 #pragma mark **** cpu timer ****
56 static timer_call_data_t cpu_timer_call[NCPUS] = {{0}, {0}};
57 static uint64_t t_deadline[NCPUS] = {0xFFFFFFFFFFFFFFFFULL, 0xFFFFFFFFFFFFFFFFULL};
58
59 typedef void (*chudxnu_cpu_timer_callback_func_t)(thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t count);
60 static chudxnu_cpu_timer_callback_func_t cpu_timer_callback_fn[NCPUS] = {NULL, NULL};
61
62 static void chudxnu_private_cpu_timer_callback(timer_call_param_t param0, timer_call_param_t param1)
63 {
64 int cpu;
65 boolean_t oldlevel;
66 struct ppc_thread_state64 state;
67 mach_msg_type_number_t count;
68
69 oldlevel = ml_set_interrupts_enabled(FALSE);
70 cpu = cpu_number();
71
72 count = PPC_THREAD_STATE64_COUNT;
73 if(chudxnu_thread_get_state(current_act(), PPC_THREAD_STATE64, (thread_state_t)&state, &count, FALSE)==KERN_SUCCESS) {
74 if(cpu_timer_callback_fn[cpu]) {
75 (cpu_timer_callback_fn[cpu])(PPC_THREAD_STATE64, (thread_state_t)&state, count);
76 }
77 }
78
79 ml_set_interrupts_enabled(oldlevel);
80 }
81
82 __private_extern__
83 kern_return_t chudxnu_cpu_timer_callback_enter(chudxnu_cpu_timer_callback_func_t func, uint32_t time, uint32_t units)
84 {
85 int cpu;
86 boolean_t oldlevel;
87
88 oldlevel = ml_set_interrupts_enabled(FALSE);
89 cpu = cpu_number();
90
91 timer_call_cancel(&(cpu_timer_call[cpu])); // cancel any existing callback for this cpu
92
93 cpu_timer_callback_fn[cpu] = func;
94
95 clock_interval_to_deadline(time, units, &(t_deadline[cpu]));
96 timer_call_setup(&(cpu_timer_call[cpu]), chudxnu_private_cpu_timer_callback, NULL);
97 timer_call_enter(&(cpu_timer_call[cpu]), t_deadline[cpu]);
98
99 ml_set_interrupts_enabled(oldlevel);
100 return KERN_SUCCESS;
101 }
102
103 __private_extern__
104 kern_return_t chudxnu_cpu_timer_callback_cancel(void)
105 {
106 int cpu;
107 boolean_t oldlevel;
108
109 oldlevel = ml_set_interrupts_enabled(FALSE);
110 cpu = cpu_number();
111
112 timer_call_cancel(&(cpu_timer_call[cpu]));
113 t_deadline[cpu] = t_deadline[cpu] | ~(t_deadline[cpu]); // set to max value
114 cpu_timer_callback_fn[cpu] = NULL;
115
116 ml_set_interrupts_enabled(oldlevel);
117 return KERN_SUCCESS;
118 }
119
120 __private_extern__
121 kern_return_t chudxnu_cpu_timer_callback_cancel_all(void)
122 {
123 int cpu;
124
125 for(cpu=0; cpu<NCPUS; cpu++) {
126 timer_call_cancel(&(cpu_timer_call[cpu]));
127 t_deadline[cpu] = t_deadline[cpu] | ~(t_deadline[cpu]); // set to max value
128 cpu_timer_callback_fn[cpu] = NULL;
129 }
130 return KERN_SUCCESS;
131 }
132
133 #pragma mark **** trap and ast ****
134 typedef kern_return_t (*chudxnu_trap_callback_func_t)(uint32_t trapentry, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t count);
135 static chudxnu_trap_callback_func_t trap_callback_fn = NULL;
136
137 typedef kern_return_t (*perfTrap)(int trapno, struct savearea *ssp, unsigned int dsisr, unsigned int dar);
138 extern perfTrap perfTrapHook; /* function hook into trap() */
139
140 typedef void (*chudxnu_perfmon_ast_callback_func_t)(thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t count);
141 static chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn = NULL;
142
143 #define TRAP_ENTRY_POINT(t) ((t==T_RESET) ? 0x100 : \
144 (t==T_MACHINE_CHECK) ? 0x200 : \
145 (t==T_DATA_ACCESS) ? 0x300 : \
146 (t==T_DATA_SEGMENT) ? 0x380 : \
147 (t==T_INSTRUCTION_ACCESS) ? 0x400 : \
148 (t==T_INSTRUCTION_SEGMENT) ? 0x480 : \
149 (t==T_INTERRUPT) ? 0x500 : \
150 (t==T_ALIGNMENT) ? 0x600 : \
151 (t==T_PROGRAM) ? 0x700 : \
152 (t==T_FP_UNAVAILABLE) ? 0x800 : \
153 (t==T_DECREMENTER) ? 0x900 : \
154 (t==T_IO_ERROR) ? 0xa00 : \
155 (t==T_RESERVED) ? 0xb00 : \
156 (t==T_SYSTEM_CALL) ? 0xc00 : \
157 (t==T_TRACE) ? 0xd00 : \
158 (t==T_FP_ASSIST) ? 0xe00 : \
159 (t==T_PERF_MON) ? 0xf00 : \
160 (t==T_VMX) ? 0xf20 : \
161 (t==T_INVALID_EXCP0) ? 0x1000 : \
162 (t==T_INVALID_EXCP1) ? 0x1100 : \
163 (t==T_INVALID_EXCP2) ? 0x1200 : \
164 (t==T_INSTRUCTION_BKPT) ? 0x1300 : \
165 (t==T_SYSTEM_MANAGEMENT) ? 0x1400 : \
166 (t==T_SOFT_PATCH) ? 0x1500 : \
167 (t==T_ALTIVEC_ASSIST) ? 0x1600 : \
168 (t==T_THERMAL) ? 0x1700 : \
169 (t==T_ARCHDEP0) ? 0x1800 : \
170 (t==T_INSTRUMENTATION) ? 0x2000 : \
171 0x0)
172
173 static kern_return_t chudxnu_private_trap_callback(int trapno, struct savearea *ssp, unsigned int dsisr, unsigned int dar)
174 {
175 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
176 int cpu = cpu_number();
177
178 kern_return_t retval = KERN_FAILURE;
179 uint32_t trapentry = TRAP_ENTRY_POINT(trapno);
180
181 // ASTs from ihandler go through thandler and are made to look like traps
182 if(perfmon_ast_callback_fn && (need_ast[cpu] & AST_PPC_CHUD)) {
183 struct ppc_thread_state64 state;
184 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
185 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
186 (perfmon_ast_callback_fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count);
187 need_ast[cpu] &= ~(AST_PPC_CHUD);
188 }
189
190 if(trapentry!=0x0) {
191 if(trap_callback_fn) {
192 struct ppc_thread_state64 state;
193 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
194 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
195 retval = (trap_callback_fn)(trapentry, PPC_THREAD_STATE64, (thread_state_t)&state, count);
196 }
197 }
198
199 ml_set_interrupts_enabled(oldlevel);
200
201 return retval;
202 }
203
204 __private_extern__
205 kern_return_t chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func)
206 {
207 trap_callback_fn = func;
208 perfTrapHook = chudxnu_private_trap_callback;
209 __asm__ volatile("eieio"); /* force order */
210 __asm__ volatile("sync"); /* force to memory */
211 return KERN_SUCCESS;
212 }
213
214 __private_extern__
215 kern_return_t chudxnu_trap_callback_cancel(void)
216 {
217 trap_callback_fn = NULL;
218 if(!perfmon_ast_callback_fn) {
219 perfTrapHook = NULL;
220 }
221 __asm__ volatile("eieio"); /* force order */
222 __asm__ volatile("sync"); /* force to memory */
223 return KERN_SUCCESS;
224 }
225
226 __private_extern__
227 kern_return_t chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func)
228 {
229 perfmon_ast_callback_fn = func;
230 perfTrapHook = chudxnu_private_trap_callback;
231 __asm__ volatile("eieio"); /* force order */
232 __asm__ volatile("sync"); /* force to memory */
233 return KERN_SUCCESS;
234 }
235
236 __private_extern__
237 kern_return_t chudxnu_perfmon_ast_callback_cancel(void)
238 {
239 perfmon_ast_callback_fn = NULL;
240 if(!trap_callback_fn) {
241 perfTrapHook = NULL;
242 }
243 __asm__ volatile("eieio"); /* force order */
244 __asm__ volatile("sync"); /* force to memory */
245 return KERN_SUCCESS;
246 }
247
248 __private_extern__
249 kern_return_t chudxnu_perfmon_ast_send(void)
250 {
251 int cpu;
252 boolean_t oldlevel;
253
254 oldlevel = ml_set_interrupts_enabled(FALSE);
255 cpu = cpu_number();
256
257 need_ast[cpu] |= (AST_PPC_CHUD | AST_URGENT);
258
259 ml_set_interrupts_enabled(oldlevel);
260 return KERN_SUCCESS;
261 }
262
263 #pragma mark **** interrupt ****
264 typedef kern_return_t (*chudxnu_interrupt_callback_func_t)(uint32_t trapentry, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t count);
265 static chudxnu_interrupt_callback_func_t interrupt_callback_fn = NULL;
266
267 extern perfTrap perfIntHook; /* function hook into interrupt() */
268
269 static kern_return_t chudxnu_private_interrupt_callback(int trapno, struct savearea *ssp, unsigned int dsisr, unsigned int dar)
270 {
271 if(interrupt_callback_fn) {
272 struct ppc_thread_state64 state;
273 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
274 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
275 return (interrupt_callback_fn)(TRAP_ENTRY_POINT(trapno), PPC_THREAD_STATE64, (thread_state_t)&state, count);
276 } else {
277 return KERN_FAILURE;
278 }
279 }
280
281 __private_extern__
282 kern_return_t chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func)
283 {
284 interrupt_callback_fn = func;
285 perfIntHook = chudxnu_private_interrupt_callback;
286 __asm__ volatile("eieio"); /* force order */
287 __asm__ volatile("sync"); /* force to memory */
288 return KERN_SUCCESS;
289 }
290
291 __private_extern__
292 kern_return_t chudxnu_interrupt_callback_cancel(void)
293 {
294 interrupt_callback_fn = NULL;
295 perfIntHook = NULL;
296 __asm__ volatile("eieio"); /* force order */
297 __asm__ volatile("sync"); /* force to memory */
298 return KERN_SUCCESS;
299 }
300
301 #pragma mark **** cpu signal ****
302 typedef kern_return_t (*chudxnu_cpusig_callback_func_t)(int request, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t count);
303 static chudxnu_cpusig_callback_func_t cpusig_callback_fn = NULL;
304
305 extern perfTrap perfCpuSigHook; /* function hook into cpu_signal_handler() */
306
307 static kern_return_t chudxnu_private_cpu_signal_handler(int request, struct savearea *ssp, unsigned int arg0, unsigned int arg1)
308 {
309 if(cpusig_callback_fn) {
310 struct ppc_thread_state64 state;
311 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
312 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
313 (cpusig_callback_fn)(request, PPC_THREAD_STATE64, (thread_state_t)&state, count);
314 }
315 return KERN_SUCCESS; // ignored
316 }
317
318 __private_extern__
319 kern_return_t chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func)
320 {
321 cpusig_callback_fn = func;
322 perfCpuSigHook = chudxnu_private_cpu_signal_handler;
323 __asm__ volatile("eieio"); /* force order */
324 __asm__ volatile("sync"); /* force to memory */
325 return KERN_SUCCESS;
326 }
327
328 __private_extern__
329 kern_return_t chudxnu_cpusig_callback_cancel(void)
330 {
331 cpusig_callback_fn = NULL;
332 perfCpuSigHook = NULL;
333 __asm__ volatile("eieio"); /* force order */
334 __asm__ volatile("sync"); /* force to memory */
335 return KERN_SUCCESS;
336 }
337
338 __private_extern__
339 kern_return_t chudxnu_cpusig_send(int otherCPU, uint32_t request)
340 {
341 int thisCPU;
342 kern_return_t retval = KERN_FAILURE;
343 int retries = 0;
344 boolean_t oldlevel;
345 uint32_t temp[2];
346
347 oldlevel = ml_set_interrupts_enabled(FALSE);
348 thisCPU = cpu_number();
349
350 if(thisCPU!=otherCPU) {
351 temp[0] = 0xFFFFFFFF; /* set sync flag */
352 temp[1] = request; /* set request */
353 __asm__ volatile("eieio"); /* force order */
354 __asm__ volatile("sync"); /* force to memory */
355
356 do {
357 retval=cpu_signal(otherCPU, SIGPcpureq, CPRQchud, (uint32_t)&temp);
358 } while(retval!=KERN_SUCCESS && (retries++)<16);
359
360 if(retries>=16) {
361 retval = KERN_FAILURE;
362 } else {
363 retval = hw_cpu_sync(temp, LockTimeOut); /* wait for the other processor */
364 if(!retval) {
365 retval = KERN_FAILURE;
366 } else {
367 retval = KERN_SUCCESS;
368 }
369 }
370 } else {
371 retval = KERN_INVALID_ARGUMENT;
372 }
373
374 ml_set_interrupts_enabled(oldlevel);
375 return retval;
376 }
377
378 #pragma mark **** thread timer ****
379
380 static thread_call_t thread_timer_call = NULL;
381
382 typedef void (*chudxnu_thread_timer_callback_func_t)(uint32_t arg);
383 static chudxnu_thread_timer_callback_func_t thread_timer_callback_fn = NULL;
384
385 static void chudxnu_private_thread_timer_callback(thread_call_param_t param0, thread_call_param_t param1)
386 {
387 if(thread_timer_call) {
388 thread_call_free(thread_timer_call);
389 thread_timer_call = NULL;
390
391 if(thread_timer_callback_fn) {
392 (thread_timer_callback_fn)((uint32_t)param0);
393 }
394 }
395 }
396
397 __private_extern__
398 kern_return_t chudxnu_thread_timer_callback_enter(chudxnu_thread_timer_callback_func_t func, uint32_t arg, uint32_t time, uint32_t units)
399 {
400 if(!thread_timer_call) {
401 uint64_t t_delay;
402 thread_timer_callback_fn = func;
403 thread_timer_call = thread_call_allocate((thread_call_func_t)chudxnu_private_thread_timer_callback, (thread_call_param_t)arg);
404 clock_interval_to_deadline(time, units, &t_delay);
405 thread_call_enter_delayed(thread_timer_call, t_delay);
406 return KERN_SUCCESS;
407 } else {
408 return KERN_FAILURE; // thread timer call already pending
409 }
410 }
411
412 __private_extern__
413 kern_return_t chudxnu_thread_timer_callback_cancel(void)
414 {
415 if(thread_timer_call) {
416 thread_call_free(thread_timer_call);
417 thread_timer_call = NULL;
418 }
419 thread_timer_callback_fn = NULL;
420 return KERN_SUCCESS;
421 }