]> git.saurik.com Git - apple/xnu.git/blob - osfmk/chud/ppc/chud_osfmk_callback_ppc.c
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / chud / ppc / chud_osfmk_callback_ppc.c
1 /*
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 #include <stdint.h>
32 #include <mach/boolean.h>
33 #include <mach/mach_types.h>
34
35 #include <kern/kern_types.h>
36 #include <kern/processor.h>
37 #include <kern/thread_call.h>
38 #include <kern/kalloc.h>
39 #include <kern/thread.h>
40
41 #include <ppc/machine_routines.h>
42 #include <ppc/cpu_data.h>
43 #include <ppc/cpu_internal.h>
44 #include <ppc/exception.h>
45 #include <ppc/thread.h>
46 #include <ppc/trap.h>
47
48 #include <chud/chud_xnu.h>
49 #include <chud/chud_xnu_private.h>
50
51 __private_extern__
52 void chudxnu_cancel_all_callbacks(void)
53 {
54 chudxnu_cpu_timer_callback_cancel_all();
55 chudxnu_trap_callback_cancel();
56 chudxnu_interrupt_callback_cancel();
57 chudxnu_perfmon_ast_callback_cancel();
58 chudxnu_cpusig_callback_cancel();
59 chudxnu_kdebug_callback_cancel();
60 chudxnu_thread_timer_callback_cancel();
61 chudxnu_syscall_callback_cancel();
62 }
63
64 static chudcpu_data_t chudcpu_boot_cpu;
65
66 void *chudxnu_per_proc_alloc(boolean_t boot_processor)
67 {
68 chudcpu_data_t *chud_proc_info;
69
70 if (boot_processor) {
71 chud_proc_info = &chudcpu_boot_cpu;
72 } else {
73 chud_proc_info = (chudcpu_data_t *)kalloc(sizeof(chudcpu_data_t));
74 if (chud_proc_info == (chudcpu_data_t *)NULL) {
75 return (void *)NULL;
76 }
77 }
78 bzero((char *)chud_proc_info, sizeof(chudcpu_data_t));
79 chud_proc_info->t_deadline = 0xFFFFFFFFFFFFFFFFULL;
80 return (void *)chud_proc_info;
81 }
82
83 void chudxnu_per_proc_free(void *per_proc_chud)
84 {
85 if (per_proc_chud == (void *)&chudcpu_boot_cpu) {
86 return;
87 } else {
88 kfree(per_proc_chud,sizeof(chudcpu_data_t));
89 }
90 }
91
92 static void chudxnu_private_cpu_timer_callback(timer_call_param_t param0, timer_call_param_t param1)
93 {
94 chudcpu_data_t *chud_proc_info;
95 boolean_t oldlevel;
96 struct ppc_thread_state64 state;
97 mach_msg_type_number_t count;
98 chudxnu_cpu_timer_callback_func_t fn = NULL;
99
100 oldlevel = ml_set_interrupts_enabled(FALSE);
101 chud_proc_info = (chudcpu_data_t *)(getPerProc()->pp_chud);
102
103 count = PPC_THREAD_STATE64_COUNT;
104 if(chudxnu_thread_get_state(current_thread(), PPC_THREAD_STATE64, (thread_state_t)&state, &count, FALSE)==KERN_SUCCESS) {
105 fn = chud_proc_info->cpu_timer_callback_fn;
106 if(fn) {
107 (fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count);
108 }
109 }
110
111 ml_set_interrupts_enabled(oldlevel);
112 }
113
114 __private_extern__
115 kern_return_t chudxnu_cpu_timer_callback_enter(chudxnu_cpu_timer_callback_func_t func, uint32_t time, uint32_t units)
116 {
117 chudcpu_data_t *chud_proc_info;
118 boolean_t oldlevel;
119
120 oldlevel = ml_set_interrupts_enabled(FALSE);
121 chud_proc_info = (chudcpu_data_t *)(getPerProc()->pp_chud);
122
123 timer_call_cancel(&(chud_proc_info->cpu_timer_call)); // cancel any existing callback for this cpu
124
125 chud_proc_info->cpu_timer_callback_fn = func;
126
127 clock_interval_to_deadline(time, units, &(chud_proc_info->t_deadline));
128 timer_call_setup(&(chud_proc_info->cpu_timer_call), chudxnu_private_cpu_timer_callback, NULL);
129 timer_call_enter(&(chud_proc_info->cpu_timer_call), chud_proc_info->t_deadline);
130
131 ml_set_interrupts_enabled(oldlevel);
132 return KERN_SUCCESS;
133 }
134
135 __private_extern__
136 kern_return_t chudxnu_cpu_timer_callback_cancel(void)
137 {
138 chudcpu_data_t *chud_proc_info;
139 boolean_t oldlevel;
140
141 oldlevel = ml_set_interrupts_enabled(FALSE);
142 chud_proc_info = (chudcpu_data_t *)(getPerProc()->pp_chud);
143
144 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
145 chud_proc_info->t_deadline = chud_proc_info->t_deadline | ~(chud_proc_info->t_deadline); // set to max value
146 chud_proc_info->cpu_timer_callback_fn = NULL;
147
148 ml_set_interrupts_enabled(oldlevel);
149 return KERN_SUCCESS;
150 }
151
152 __private_extern__
153 kern_return_t chudxnu_cpu_timer_callback_cancel_all(void)
154 {
155 unsigned int cpu;
156 chudcpu_data_t *chud_proc_info;
157
158 for(cpu=0; cpu<real_ncpus; cpu++) {
159 if ((PerProcTable[cpu].ppe_vaddr == 0)
160 || (PerProcTable[cpu].ppe_vaddr->pp_chud == 0))
161 continue;
162 chud_proc_info = (chudcpu_data_t *)PerProcTable[cpu].ppe_vaddr->pp_chud;
163 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
164 chud_proc_info->t_deadline = chud_proc_info->t_deadline | ~(chud_proc_info->t_deadline); // set to max value
165 chud_proc_info->cpu_timer_callback_fn = NULL;
166 }
167 return KERN_SUCCESS;
168 }
169
170 #pragma mark **** trap ****
171 static chudxnu_trap_callback_func_t trap_callback_fn = NULL;
172
173 #define TRAP_ENTRY_POINT(t) ((t==T_RESET) ? 0x100 : \
174 (t==T_MACHINE_CHECK) ? 0x200 : \
175 (t==T_DATA_ACCESS) ? 0x300 : \
176 (t==T_DATA_SEGMENT) ? 0x380 : \
177 (t==T_INSTRUCTION_ACCESS) ? 0x400 : \
178 (t==T_INSTRUCTION_SEGMENT) ? 0x480 : \
179 (t==T_INTERRUPT) ? 0x500 : \
180 (t==T_ALIGNMENT) ? 0x600 : \
181 (t==T_PROGRAM) ? 0x700 : \
182 (t==T_FP_UNAVAILABLE) ? 0x800 : \
183 (t==T_DECREMENTER) ? 0x900 : \
184 (t==T_IO_ERROR) ? 0xa00 : \
185 (t==T_RESERVED) ? 0xb00 : \
186 (t==T_SYSTEM_CALL) ? 0xc00 : \
187 (t==T_TRACE) ? 0xd00 : \
188 (t==T_FP_ASSIST) ? 0xe00 : \
189 (t==T_PERF_MON) ? 0xf00 : \
190 (t==T_VMX) ? 0xf20 : \
191 (t==T_INVALID_EXCP0) ? 0x1000 : \
192 (t==T_INVALID_EXCP1) ? 0x1100 : \
193 (t==T_INVALID_EXCP2) ? 0x1200 : \
194 (t==T_INSTRUCTION_BKPT) ? 0x1300 : \
195 (t==T_SYSTEM_MANAGEMENT) ? 0x1400 : \
196 (t==T_SOFT_PATCH) ? 0x1500 : \
197 (t==T_ALTIVEC_ASSIST) ? 0x1600 : \
198 (t==T_THERMAL) ? 0x1700 : \
199 (t==T_ARCHDEP0) ? 0x1800 : \
200 (t==T_INSTRUMENTATION) ? 0x2000 : \
201 0x0)
202
203 static kern_return_t chudxnu_private_trap_callback(int trapno, struct savearea *ssp, unsigned int dsisr, unsigned int dar)
204 {
205 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
206 kern_return_t retval = KERN_FAILURE;
207 uint32_t trapentry = TRAP_ENTRY_POINT(trapno);
208 chudxnu_trap_callback_func_t fn = trap_callback_fn;
209
210 if(trapentry!=0x0) {
211 if(fn) {
212 struct ppc_thread_state64 state;
213 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
214 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
215 retval = (fn)(trapentry, PPC_THREAD_STATE64, (thread_state_t)&state, count);
216 }
217 }
218
219 ml_set_interrupts_enabled(oldlevel);
220
221 return retval;
222 }
223
224 __private_extern__
225 kern_return_t chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func)
226 {
227 trap_callback_fn = func;
228 perfTrapHook = chudxnu_private_trap_callback;
229 __asm__ volatile("eieio"); /* force order */
230 __asm__ volatile("sync"); /* force to memory */
231 return KERN_SUCCESS;
232 }
233
234 __private_extern__
235 kern_return_t chudxnu_trap_callback_cancel(void)
236 {
237 trap_callback_fn = NULL;
238 perfTrapHook = NULL;
239 __asm__ volatile("eieio"); /* force order */
240 __asm__ volatile("sync"); /* force to memory */
241 return KERN_SUCCESS;
242 }
243
244 #pragma mark **** ast ****
245 static chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn = NULL;
246
247 static kern_return_t chudxnu_private_chud_ast_callback(int trapno, struct savearea *ssp, unsigned int dsisr, unsigned int dar)
248 {
249 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
250 ast_t *myast = ast_pending();
251 kern_return_t retval = KERN_FAILURE;
252 chudxnu_perfmon_ast_callback_func_t fn = perfmon_ast_callback_fn;
253
254 if(*myast & AST_CHUD_URGENT) {
255 *myast &= ~(AST_CHUD_URGENT | AST_CHUD);
256 if((*myast & AST_PREEMPTION) != AST_PREEMPTION) *myast &= ~(AST_URGENT);
257 retval = KERN_SUCCESS;
258 } else if(*myast & AST_CHUD) {
259 *myast &= ~(AST_CHUD);
260 retval = KERN_SUCCESS;
261 }
262
263 if(fn) {
264 struct ppc_thread_state64 state;
265 mach_msg_type_number_t count;
266 count = PPC_THREAD_STATE64_COUNT;
267
268 if(chudxnu_thread_get_state(current_thread(), PPC_THREAD_STATE64, (thread_state_t)&state, &count, FALSE)==KERN_SUCCESS) {
269 (fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count);
270 }
271 }
272
273 #if 0
274 // ASTs from ihandler go through thandler and are made to look like traps
275 // always handle AST_CHUD_URGENT if there's a callback
276 // only handle AST_CHUD if it's the only AST pending
277 if(perfmon_ast_callback_fn && ((*myast & AST_CHUD_URGENT) || ((*myast & AST_CHUD) && !(*myast & AST_URGENT)))) {
278 struct ppc_thread_state64 state;
279 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
280 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
281 if(*myast & AST_CHUD_URGENT) {
282 *myast &= ~(AST_CHUD_URGENT | AST_CHUD);
283 if((*myast & AST_PREEMPTION) != AST_PREEMPTION) *myast &= ~(AST_URGENT);
284 retval = KERN_SUCCESS;
285 } else if(*myast & AST_CHUD) {
286 *myast &= ~(AST_CHUD);
287 retval = KERN_SUCCESS;
288 }
289 (perfmon_ast_callback_fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count);
290 }
291 #endif
292
293 ml_set_interrupts_enabled(oldlevel);
294 return retval;
295 }
296
297 __private_extern__
298 kern_return_t chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func)
299 {
300 perfmon_ast_callback_fn = func;
301 perfASTHook = chudxnu_private_chud_ast_callback;
302 __asm__ volatile("eieio"); /* force order */
303 __asm__ volatile("sync"); /* force to memory */
304 return KERN_SUCCESS;
305 }
306
307 __private_extern__
308 kern_return_t chudxnu_perfmon_ast_callback_cancel(void)
309 {
310 perfmon_ast_callback_fn = NULL;
311 perfASTHook = NULL;
312 __asm__ volatile("eieio"); /* force order */
313 __asm__ volatile("sync"); /* force to memory */
314 return KERN_SUCCESS;
315 }
316
317 __private_extern__
318 kern_return_t chudxnu_perfmon_ast_send_urgent(boolean_t urgent)
319 {
320 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
321 ast_t *myast = ast_pending();
322
323 if(urgent) {
324 *myast |= (AST_CHUD_URGENT | AST_URGENT);
325 } else {
326 *myast |= (AST_CHUD);
327 }
328
329 ml_set_interrupts_enabled(oldlevel);
330 return KERN_SUCCESS;
331 }
332
333 __private_extern__
334 kern_return_t chudxnu_perfmon_ast_send(void)
335 {
336 return chudxnu_perfmon_ast_send_urgent(TRUE);
337 }
338
339 #pragma mark **** interrupt ****
340 static chudxnu_interrupt_callback_func_t interrupt_callback_fn = NULL;
341 //extern perfCallback perfIntHook; /* function hook into interrupt() */
342
343 static kern_return_t chudxnu_private_interrupt_callback(int trapno, struct savearea *ssp, unsigned int dsisr, unsigned int dar)
344 {
345 chudxnu_interrupt_callback_func_t fn = interrupt_callback_fn;
346
347 if(fn) {
348 struct ppc_thread_state64 state;
349 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
350 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
351 return (fn)(TRAP_ENTRY_POINT(trapno), PPC_THREAD_STATE64, (thread_state_t)&state, count);
352 } else {
353 return KERN_FAILURE;
354 }
355 }
356
357 __private_extern__
358 kern_return_t chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func)
359 {
360 interrupt_callback_fn = func;
361 perfIntHook = chudxnu_private_interrupt_callback;
362 __asm__ volatile("eieio"); /* force order */
363 __asm__ volatile("sync"); /* force to memory */
364 return KERN_SUCCESS;
365 }
366
367 __private_extern__
368 kern_return_t chudxnu_interrupt_callback_cancel(void)
369 {
370 interrupt_callback_fn = NULL;
371 perfIntHook = NULL;
372 __asm__ volatile("eieio"); /* force order */
373 __asm__ volatile("sync"); /* force to memory */
374 return KERN_SUCCESS;
375 }
376
377 #pragma mark **** cpu signal ****
378 static chudxnu_cpusig_callback_func_t cpusig_callback_fn = NULL;
379 extern perfCallback perfCpuSigHook; /* function hook into cpu_signal_handler() */
380
381 static kern_return_t chudxnu_private_cpu_signal_handler(int request, struct savearea *ssp, unsigned int arg0, unsigned int arg1)
382 {
383 chudxnu_cpusig_callback_func_t fn = cpusig_callback_fn;
384
385 if(fn) {
386 struct ppc_thread_state64 state;
387 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
388 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
389 (fn)(request, PPC_THREAD_STATE64, (thread_state_t)&state, count);
390 }
391 return KERN_SUCCESS; // ignored
392 }
393
394 __private_extern__
395 kern_return_t chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func)
396 {
397 cpusig_callback_fn = func;
398 perfCpuSigHook = chudxnu_private_cpu_signal_handler;
399 __asm__ volatile("eieio"); /* force order */
400 __asm__ volatile("sync"); /* force to memory */
401 return KERN_SUCCESS;
402 }
403
404 __private_extern__
405 kern_return_t chudxnu_cpusig_callback_cancel(void)
406 {
407 cpusig_callback_fn = NULL;
408 perfCpuSigHook = NULL;
409 __asm__ volatile("eieio"); /* force order */
410 __asm__ volatile("sync"); /* force to memory */
411 return KERN_SUCCESS;
412 }
413
414 __private_extern__
415 kern_return_t chudxnu_cpusig_send(int otherCPU, uint32_t request)
416 {
417 int thisCPU;
418 kern_return_t retval = KERN_FAILURE;
419 int retries = 0;
420 boolean_t oldlevel;
421 uint32_t temp[2];
422
423 oldlevel = ml_set_interrupts_enabled(FALSE);
424 thisCPU = cpu_number();
425
426 if(thisCPU!=otherCPU) {
427 temp[0] = 0xFFFFFFFF; /* set sync flag */
428 temp[1] = request; /* set request */
429 __asm__ volatile("eieio"); /* force order */
430 __asm__ volatile("sync"); /* force to memory */
431
432 do {
433 retval=cpu_signal(otherCPU, SIGPcpureq, CPRQchud, (uint32_t)&temp);
434 } while(retval!=KERN_SUCCESS && (retries++)<16);
435
436 if(retries>=16) {
437 retval = KERN_FAILURE;
438 } else {
439 retval = hw_cpu_sync(temp, LockTimeOut); /* wait for the other processor */
440 if(!retval) {
441 retval = KERN_FAILURE;
442 } else {
443 retval = KERN_SUCCESS;
444 }
445 }
446 } else {
447 retval = KERN_INVALID_ARGUMENT;
448 }
449
450 ml_set_interrupts_enabled(oldlevel);
451 return retval;
452 }