]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/chud/chud_osfmk_callback.c
xnu-792.6.76.tar.gz
[apple/xnu.git] / osfmk / ppc / chud / chud_osfmk_callback.c
1 /*
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <stdint.h>
24 #include <mach/boolean.h>
25 #include <mach/mach_types.h>
26
27 #include <kern/kern_types.h>
28 #include <kern/processor.h>
29 #include <kern/timer_call.h>
30 #include <kern/thread_call.h>
31 #include <kern/kalloc.h>
32 #include <kern/thread.h>
33
34 #include <ppc/machine_routines.h>
35 #include <ppc/cpu_data.h>
36 #include <ppc/cpu_internal.h>
37 #include <ppc/exception.h>
38 #include <ppc/thread.h>
39 #include <ppc/trap.h>
40
41 #include <ppc/chud/chud_xnu.h>
42 #include <ppc/chud/chud_xnu_private.h>
43
44 __private_extern__
45 void chudxnu_cancel_all_callbacks(void)
46 {
47 chudxnu_cpu_timer_callback_cancel_all();
48 chudxnu_trap_callback_cancel();
49 chudxnu_interrupt_callback_cancel();
50 chudxnu_perfmon_ast_callback_cancel();
51 chudxnu_cpusig_callback_cancel();
52 chudxnu_kdebug_callback_cancel();
53 chudxnu_thread_timer_callback_cancel();
54 chudxnu_syscall_callback_cancel();
55 }
56
57 #pragma mark **** cpu timer ****
58 typedef struct {
59 timer_call_data_t cpu_timer_call;
60 uint64_t t_deadline;
61 chudxnu_cpu_timer_callback_func_t cpu_timer_callback_fn;
62 } chudcpu_data_t;
63
64 static chudcpu_data_t chudcpu_boot_cpu;
65
66 void *chudxnu_per_proc_alloc(boolean_t boot_processor)
67 {
68 chudcpu_data_t *chud_proc_info;
69
70 if (boot_processor) {
71 chud_proc_info = &chudcpu_boot_cpu;
72 } else {
73 chud_proc_info = (chudcpu_data_t *)kalloc(sizeof(chudcpu_data_t));
74 if (chud_proc_info == (chudcpu_data_t *)NULL) {
75 return (void *)NULL;
76 }
77 }
78 bzero((char *)chud_proc_info, sizeof(chudcpu_data_t));
79 chud_proc_info->t_deadline = 0xFFFFFFFFFFFFFFFFULL;
80 return (void *)chud_proc_info;
81 }
82
83 void chudxnu_per_proc_free(void *per_proc_chud)
84 {
85 if (per_proc_chud == (void *)&chudcpu_boot_cpu) {
86 return;
87 } else {
88 kfree(per_proc_chud,sizeof(chudcpu_data_t));
89 }
90 }
91
92 static void chudxnu_private_cpu_timer_callback(timer_call_param_t param0, timer_call_param_t param1)
93 {
94 chudcpu_data_t *chud_proc_info;
95 boolean_t oldlevel;
96 struct ppc_thread_state64 state;
97 mach_msg_type_number_t count;
98
99 oldlevel = ml_set_interrupts_enabled(FALSE);
100 chud_proc_info = (chudcpu_data_t *)(getPerProc()->pp_chud);
101
102 count = PPC_THREAD_STATE64_COUNT;
103 if(chudxnu_thread_get_state(current_thread(), PPC_THREAD_STATE64, (thread_state_t)&state, &count, FALSE)==KERN_SUCCESS) {
104 if(chud_proc_info->cpu_timer_callback_fn) {
105 (chud_proc_info->cpu_timer_callback_fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count);
106 }
107 }
108
109 ml_set_interrupts_enabled(oldlevel);
110 }
111
112 __private_extern__
113 kern_return_t chudxnu_cpu_timer_callback_enter(chudxnu_cpu_timer_callback_func_t func, uint32_t time, uint32_t units)
114 {
115 chudcpu_data_t *chud_proc_info;
116 boolean_t oldlevel;
117
118 oldlevel = ml_set_interrupts_enabled(FALSE);
119 chud_proc_info = (chudcpu_data_t *)(getPerProc()->pp_chud);
120
121 timer_call_cancel(&(chud_proc_info->cpu_timer_call)); // cancel any existing callback for this cpu
122
123 chud_proc_info->cpu_timer_callback_fn = func;
124
125 clock_interval_to_deadline(time, units, &(chud_proc_info->t_deadline));
126 timer_call_setup(&(chud_proc_info->cpu_timer_call), chudxnu_private_cpu_timer_callback, NULL);
127 timer_call_enter(&(chud_proc_info->cpu_timer_call), chud_proc_info->t_deadline);
128
129 ml_set_interrupts_enabled(oldlevel);
130 return KERN_SUCCESS;
131 }
132
133 __private_extern__
134 kern_return_t chudxnu_cpu_timer_callback_cancel(void)
135 {
136 chudcpu_data_t *chud_proc_info;
137 boolean_t oldlevel;
138
139 oldlevel = ml_set_interrupts_enabled(FALSE);
140 chud_proc_info = (chudcpu_data_t *)(getPerProc()->pp_chud);
141
142 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
143 chud_proc_info->t_deadline = chud_proc_info->t_deadline | ~(chud_proc_info->t_deadline); // set to max value
144 chud_proc_info->cpu_timer_callback_fn = NULL;
145
146 ml_set_interrupts_enabled(oldlevel);
147 return KERN_SUCCESS;
148 }
149
150 __private_extern__
151 kern_return_t chudxnu_cpu_timer_callback_cancel_all(void)
152 {
153 unsigned int cpu;
154 chudcpu_data_t *chud_proc_info;
155
156 for(cpu=0; cpu<real_ncpus; cpu++) {
157 if ((PerProcTable[cpu].ppe_vaddr == 0)
158 || (PerProcTable[cpu].ppe_vaddr->pp_chud == 0))
159 continue;
160 chud_proc_info = (chudcpu_data_t *)PerProcTable[cpu].ppe_vaddr->pp_chud;
161 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
162 chud_proc_info->t_deadline = chud_proc_info->t_deadline | ~(chud_proc_info->t_deadline); // set to max value
163 chud_proc_info->cpu_timer_callback_fn = NULL;
164 }
165 return KERN_SUCCESS;
166 }
167
168 #pragma mark **** trap ****
169 static chudxnu_trap_callback_func_t trap_callback_fn = NULL;
170
171 #define TRAP_ENTRY_POINT(t) ((t==T_RESET) ? 0x100 : \
172 (t==T_MACHINE_CHECK) ? 0x200 : \
173 (t==T_DATA_ACCESS) ? 0x300 : \
174 (t==T_DATA_SEGMENT) ? 0x380 : \
175 (t==T_INSTRUCTION_ACCESS) ? 0x400 : \
176 (t==T_INSTRUCTION_SEGMENT) ? 0x480 : \
177 (t==T_INTERRUPT) ? 0x500 : \
178 (t==T_ALIGNMENT) ? 0x600 : \
179 (t==T_PROGRAM) ? 0x700 : \
180 (t==T_FP_UNAVAILABLE) ? 0x800 : \
181 (t==T_DECREMENTER) ? 0x900 : \
182 (t==T_IO_ERROR) ? 0xa00 : \
183 (t==T_RESERVED) ? 0xb00 : \
184 (t==T_SYSTEM_CALL) ? 0xc00 : \
185 (t==T_TRACE) ? 0xd00 : \
186 (t==T_FP_ASSIST) ? 0xe00 : \
187 (t==T_PERF_MON) ? 0xf00 : \
188 (t==T_VMX) ? 0xf20 : \
189 (t==T_INVALID_EXCP0) ? 0x1000 : \
190 (t==T_INVALID_EXCP1) ? 0x1100 : \
191 (t==T_INVALID_EXCP2) ? 0x1200 : \
192 (t==T_INSTRUCTION_BKPT) ? 0x1300 : \
193 (t==T_SYSTEM_MANAGEMENT) ? 0x1400 : \
194 (t==T_SOFT_PATCH) ? 0x1500 : \
195 (t==T_ALTIVEC_ASSIST) ? 0x1600 : \
196 (t==T_THERMAL) ? 0x1700 : \
197 (t==T_ARCHDEP0) ? 0x1800 : \
198 (t==T_INSTRUMENTATION) ? 0x2000 : \
199 0x0)
200
201 static kern_return_t chudxnu_private_trap_callback(int trapno, struct savearea *ssp, unsigned int dsisr, unsigned int dar)
202 {
203 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
204 kern_return_t retval = KERN_FAILURE;
205 uint32_t trapentry = TRAP_ENTRY_POINT(trapno);
206
207 if(trapentry!=0x0) {
208 if(trap_callback_fn) {
209 struct ppc_thread_state64 state;
210 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
211 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
212 retval = (trap_callback_fn)(trapentry, PPC_THREAD_STATE64, (thread_state_t)&state, count);
213 }
214 }
215
216 ml_set_interrupts_enabled(oldlevel);
217
218 return retval;
219 }
220
221 __private_extern__
222 kern_return_t chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func)
223 {
224 trap_callback_fn = func;
225 perfTrapHook = chudxnu_private_trap_callback;
226 __asm__ volatile("eieio"); /* force order */
227 __asm__ volatile("sync"); /* force to memory */
228 return KERN_SUCCESS;
229 }
230
231 __private_extern__
232 kern_return_t chudxnu_trap_callback_cancel(void)
233 {
234 trap_callback_fn = NULL;
235 perfTrapHook = NULL;
236 __asm__ volatile("eieio"); /* force order */
237 __asm__ volatile("sync"); /* force to memory */
238 return KERN_SUCCESS;
239 }
240
241 #pragma mark **** ast ****
242 static chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn = NULL;
243
244 static kern_return_t chudxnu_private_chud_ast_callback(int trapno, struct savearea *ssp, unsigned int dsisr, unsigned int dar)
245 {
246 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
247 ast_t *myast = ast_pending();
248 kern_return_t retval = KERN_FAILURE;
249
250 if(*myast & AST_PPC_CHUD_URGENT) {
251 *myast &= ~(AST_PPC_CHUD_URGENT | AST_PPC_CHUD);
252 if((*myast & AST_PREEMPTION) != AST_PREEMPTION) *myast &= ~(AST_URGENT);
253 retval = KERN_SUCCESS;
254 } else if(*myast & AST_PPC_CHUD) {
255 *myast &= ~(AST_PPC_CHUD);
256 retval = KERN_SUCCESS;
257 }
258
259 if(perfmon_ast_callback_fn) {
260 struct ppc_thread_state64 state;
261 mach_msg_type_number_t count;
262 count = PPC_THREAD_STATE64_COUNT;
263
264 if(chudxnu_thread_get_state(current_thread(), PPC_THREAD_STATE64, (thread_state_t)&state, &count, FALSE)==KERN_SUCCESS) {
265 (perfmon_ast_callback_fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count);
266 }
267 }
268
269 #if 0
270 // ASTs from ihandler go through thandler and are made to look like traps
271 // always handle AST_PPC_CHUD_URGENT if there's a callback
272 // only handle AST_PPC_CHUD if it's the only AST pending
273 if(perfmon_ast_callback_fn && ((*myast & AST_PPC_CHUD_URGENT) || ((*myast & AST_PPC_CHUD) && !(*myast & AST_URGENT)))) {
274 struct ppc_thread_state64 state;
275 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
276 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
277 if(*myast & AST_PPC_CHUD_URGENT) {
278 *myast &= ~(AST_PPC_CHUD_URGENT | AST_PPC_CHUD);
279 if((*myast & AST_PREEMPTION) != AST_PREEMPTION) *myast &= ~(AST_URGENT);
280 retval = KERN_SUCCESS;
281 } else if(*myast & AST_PPC_CHUD) {
282 *myast &= ~(AST_PPC_CHUD);
283 retval = KERN_SUCCESS;
284 }
285 (perfmon_ast_callback_fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count);
286 }
287 #endif
288
289 ml_set_interrupts_enabled(oldlevel);
290 return retval;
291 }
292
293 __private_extern__
294 kern_return_t chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func)
295 {
296 perfmon_ast_callback_fn = func;
297 perfASTHook = chudxnu_private_chud_ast_callback;
298 __asm__ volatile("eieio"); /* force order */
299 __asm__ volatile("sync"); /* force to memory */
300 return KERN_SUCCESS;
301 }
302
303 __private_extern__
304 kern_return_t chudxnu_perfmon_ast_callback_cancel(void)
305 {
306 perfmon_ast_callback_fn = NULL;
307 perfASTHook = NULL;
308 __asm__ volatile("eieio"); /* force order */
309 __asm__ volatile("sync"); /* force to memory */
310 return KERN_SUCCESS;
311 }
312
313 __private_extern__
314 kern_return_t chudxnu_perfmon_ast_send_urgent(boolean_t urgent)
315 {
316 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
317 ast_t *myast = ast_pending();
318
319 if(urgent) {
320 *myast |= (AST_PPC_CHUD_URGENT | AST_URGENT);
321 } else {
322 *myast |= (AST_PPC_CHUD);
323 }
324
325 ml_set_interrupts_enabled(oldlevel);
326 return KERN_SUCCESS;
327 }
328
329 __private_extern__
330 kern_return_t chudxnu_perfmon_ast_send(void)
331 {
332 return chudxnu_perfmon_ast_send_urgent(TRUE);
333 }
334
335 #pragma mark **** interrupt ****
336 static chudxnu_interrupt_callback_func_t interrupt_callback_fn = NULL;
337 //extern perfCallback perfIntHook; /* function hook into interrupt() */
338
339 static kern_return_t chudxnu_private_interrupt_callback(int trapno, struct savearea *ssp, unsigned int dsisr, unsigned int dar)
340 {
341 if(interrupt_callback_fn) {
342 struct ppc_thread_state64 state;
343 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
344 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
345 return (interrupt_callback_fn)(TRAP_ENTRY_POINT(trapno), PPC_THREAD_STATE64, (thread_state_t)&state, count);
346 } else {
347 return KERN_FAILURE;
348 }
349 }
350
351 __private_extern__
352 kern_return_t chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func)
353 {
354 interrupt_callback_fn = func;
355 perfIntHook = chudxnu_private_interrupt_callback;
356 __asm__ volatile("eieio"); /* force order */
357 __asm__ volatile("sync"); /* force to memory */
358 return KERN_SUCCESS;
359 }
360
361 __private_extern__
362 kern_return_t chudxnu_interrupt_callback_cancel(void)
363 {
364 interrupt_callback_fn = NULL;
365 perfIntHook = NULL;
366 __asm__ volatile("eieio"); /* force order */
367 __asm__ volatile("sync"); /* force to memory */
368 return KERN_SUCCESS;
369 }
370
371 #pragma mark **** cpu signal ****
372 static chudxnu_cpusig_callback_func_t cpusig_callback_fn = NULL;
373 extern perfCallback perfCpuSigHook; /* function hook into cpu_signal_handler() */
374
375 static kern_return_t chudxnu_private_cpu_signal_handler(int request, struct savearea *ssp, unsigned int arg0, unsigned int arg1)
376 {
377 if(cpusig_callback_fn) {
378 struct ppc_thread_state64 state;
379 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
380 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
381 (cpusig_callback_fn)(request, PPC_THREAD_STATE64, (thread_state_t)&state, count);
382 }
383 return KERN_SUCCESS; // ignored
384 }
385
386 __private_extern__
387 kern_return_t chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func)
388 {
389 cpusig_callback_fn = func;
390 perfCpuSigHook = chudxnu_private_cpu_signal_handler;
391 __asm__ volatile("eieio"); /* force order */
392 __asm__ volatile("sync"); /* force to memory */
393 return KERN_SUCCESS;
394 }
395
396 __private_extern__
397 kern_return_t chudxnu_cpusig_callback_cancel(void)
398 {
399 cpusig_callback_fn = NULL;
400 perfCpuSigHook = NULL;
401 __asm__ volatile("eieio"); /* force order */
402 __asm__ volatile("sync"); /* force to memory */
403 return KERN_SUCCESS;
404 }
405
406 __private_extern__
407 kern_return_t chudxnu_cpusig_send(int otherCPU, uint32_t request)
408 {
409 int thisCPU;
410 kern_return_t retval = KERN_FAILURE;
411 int retries = 0;
412 boolean_t oldlevel;
413 uint32_t temp[2];
414
415 oldlevel = ml_set_interrupts_enabled(FALSE);
416 thisCPU = cpu_number();
417
418 if(thisCPU!=otherCPU) {
419 temp[0] = 0xFFFFFFFF; /* set sync flag */
420 temp[1] = request; /* set request */
421 __asm__ volatile("eieio"); /* force order */
422 __asm__ volatile("sync"); /* force to memory */
423
424 do {
425 retval=cpu_signal(otherCPU, SIGPcpureq, CPRQchud, (uint32_t)&temp);
426 } while(retval!=KERN_SUCCESS && (retries++)<16);
427
428 if(retries>=16) {
429 retval = KERN_FAILURE;
430 } else {
431 retval = hw_cpu_sync(temp, LockTimeOut); /* wait for the other processor */
432 if(!retval) {
433 retval = KERN_FAILURE;
434 } else {
435 retval = KERN_SUCCESS;
436 }
437 }
438 } else {
439 retval = KERN_INVALID_ARGUMENT;
440 }
441
442 ml_set_interrupts_enabled(oldlevel);
443 return retval;
444 }
445
446 #pragma mark **** timer ****
447 __private_extern__
448 chud_timer_t chudxnu_timer_alloc(chudxnu_timer_callback_func_t func, uint32_t param0)
449 {
450 return (chud_timer_t)thread_call_allocate((thread_call_func_t)func, (thread_call_param_t)param0);
451 }
452
453 __private_extern__
454 kern_return_t chudxnu_timer_callback_enter(chud_timer_t timer, uint32_t param1, uint32_t time, uint32_t units)
455 {
456 uint64_t t_delay;
457 clock_interval_to_deadline(time, units, &t_delay);
458 thread_call_enter1_delayed((thread_call_t)timer, (thread_call_param_t)param1, t_delay);
459 return KERN_SUCCESS;
460 }
461
462 __private_extern__
463 kern_return_t chudxnu_timer_callback_cancel(chud_timer_t timer)
464 {
465 thread_call_cancel((thread_call_t)timer);
466 return KERN_SUCCESS;
467 }
468
469 __private_extern__
470 kern_return_t chudxnu_timer_free(chud_timer_t timer)
471 {
472 thread_call_cancel((thread_call_t)timer);
473 thread_call_free((thread_call_t)timer);
474 return KERN_SUCCESS;
475 }
476
477 #pragma mark **** CHUD syscall (PPC) ****
478
479 typedef int (*PPCcallEnt)(struct savearea *save);
480 extern PPCcallEnt PPCcalls[];
481
482 static chudxnu_syscall_callback_func_t syscall_callback_fn = NULL;
483
484 static int chudxnu_private_syscall_callback(struct savearea *ssp)
485 {
486 if(ssp) {
487 if(syscall_callback_fn) {
488 struct ppc_thread_state64 state;
489 kern_return_t retval;
490 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
491 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
492 ssp->save_r3 = (syscall_callback_fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count);
493 } else {
494 ssp->save_r3 = KERN_FAILURE;
495 }
496 }
497
498 return 1; // check for ASTs (always)
499 }
500
501 __private_extern__
502 kern_return_t chudxnu_syscall_callback_enter(chudxnu_syscall_callback_func_t func)
503 {
504 syscall_callback_fn = func;
505 PPCcalls[9] = chudxnu_private_syscall_callback;
506 __asm__ volatile("eieio"); /* force order */
507 __asm__ volatile("sync"); /* force to memory */
508 return KERN_SUCCESS;
509 }
510
511 __private_extern__
512 kern_return_t chudxnu_syscall_callback_cancel(void)
513 {
514 syscall_callback_fn = NULL;
515 PPCcalls[9] = NULL;
516 __asm__ volatile("eieio"); /* force order */
517 __asm__ volatile("sync"); /* force to memory */
518 return KERN_SUCCESS;
519 }
520
521 #pragma mark **** thread timer - DEPRECATED ****
522
523 static thread_call_t thread_timer_call = NULL;
524 static chudxnu_thread_timer_callback_func_t thread_timer_callback_fn = NULL;
525
526 static void chudxnu_private_thread_timer_callback(thread_call_param_t param0, thread_call_param_t param1)
527 {
528 if(thread_timer_call) {
529 thread_call_free(thread_timer_call);
530 thread_timer_call = NULL;
531
532 if(thread_timer_callback_fn) {
533 (thread_timer_callback_fn)((uint32_t)param0);
534 }
535 }
536 }
537
538 // DEPRECATED
539 __private_extern__
540 kern_return_t chudxnu_thread_timer_callback_enter(chudxnu_thread_timer_callback_func_t func, uint32_t param, uint32_t time, uint32_t units)
541 {
542 if(!thread_timer_call) {
543 uint64_t t_delay;
544 thread_timer_callback_fn = func;
545 thread_timer_call = thread_call_allocate((thread_call_func_t)chudxnu_private_thread_timer_callback, (thread_call_param_t)param);
546 clock_interval_to_deadline(time, units, &t_delay);
547 thread_call_enter_delayed(thread_timer_call, t_delay);
548 return KERN_SUCCESS;
549 } else {
550 return KERN_FAILURE; // thread timer call already pending
551 }
552 }
553
554 // DEPRECATED
555 __private_extern__
556 kern_return_t chudxnu_thread_timer_callback_cancel(void)
557 {
558 if(thread_timer_call) {
559 thread_call_cancel(thread_timer_call);
560 thread_call_free(thread_timer_call);
561 thread_timer_call = NULL;
562 }
563 thread_timer_callback_fn = NULL;
564 return KERN_SUCCESS;
565 }