]> git.saurik.com Git - apple/xnu.git/blob - osfmk/chud/ppc/chud_osfmk_callback_ppc.c
xnu-792.18.15.tar.gz
[apple/xnu.git] / osfmk / chud / ppc / chud_osfmk_callback_ppc.c
1 /*
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <stdint.h>
30 #include <mach/boolean.h>
31 #include <mach/mach_types.h>
32
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/thread_call.h>
36 #include <kern/kalloc.h>
37 #include <kern/thread.h>
38
39 #include <ppc/machine_routines.h>
40 #include <ppc/cpu_data.h>
41 #include <ppc/cpu_internal.h>
42 #include <ppc/exception.h>
43 #include <ppc/thread.h>
44 #include <ppc/trap.h>
45
46 #include <chud/chud_xnu.h>
47 #include <chud/chud_xnu_private.h>
48
49 __private_extern__
50 void chudxnu_cancel_all_callbacks(void)
51 {
52 chudxnu_cpu_timer_callback_cancel_all();
53 chudxnu_trap_callback_cancel();
54 chudxnu_interrupt_callback_cancel();
55 chudxnu_perfmon_ast_callback_cancel();
56 chudxnu_cpusig_callback_cancel();
57 chudxnu_kdebug_callback_cancel();
58 chudxnu_thread_timer_callback_cancel();
59 chudxnu_syscall_callback_cancel();
60 }
61
62 static chudcpu_data_t chudcpu_boot_cpu;
63
64 void *chudxnu_per_proc_alloc(boolean_t boot_processor)
65 {
66 chudcpu_data_t *chud_proc_info;
67
68 if (boot_processor) {
69 chud_proc_info = &chudcpu_boot_cpu;
70 } else {
71 chud_proc_info = (chudcpu_data_t *)kalloc(sizeof(chudcpu_data_t));
72 if (chud_proc_info == (chudcpu_data_t *)NULL) {
73 return (void *)NULL;
74 }
75 }
76 bzero((char *)chud_proc_info, sizeof(chudcpu_data_t));
77 chud_proc_info->t_deadline = 0xFFFFFFFFFFFFFFFFULL;
78 return (void *)chud_proc_info;
79 }
80
81 void chudxnu_per_proc_free(void *per_proc_chud)
82 {
83 if (per_proc_chud == (void *)&chudcpu_boot_cpu) {
84 return;
85 } else {
86 kfree(per_proc_chud,sizeof(chudcpu_data_t));
87 }
88 }
89
90 static void chudxnu_private_cpu_timer_callback(timer_call_param_t param0, timer_call_param_t param1)
91 {
92 chudcpu_data_t *chud_proc_info;
93 boolean_t oldlevel;
94 struct ppc_thread_state64 state;
95 mach_msg_type_number_t count;
96 chudxnu_cpu_timer_callback_func_t fn = NULL;
97
98 oldlevel = ml_set_interrupts_enabled(FALSE);
99 chud_proc_info = (chudcpu_data_t *)(getPerProc()->pp_chud);
100
101 count = PPC_THREAD_STATE64_COUNT;
102 if(chudxnu_thread_get_state(current_thread(), PPC_THREAD_STATE64, (thread_state_t)&state, &count, FALSE)==KERN_SUCCESS) {
103 fn = chud_proc_info->cpu_timer_callback_fn;
104 if(fn) {
105 (fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count);
106 }
107 }
108
109 ml_set_interrupts_enabled(oldlevel);
110 }
111
112 __private_extern__
113 kern_return_t chudxnu_cpu_timer_callback_enter(chudxnu_cpu_timer_callback_func_t func, uint32_t time, uint32_t units)
114 {
115 chudcpu_data_t *chud_proc_info;
116 boolean_t oldlevel;
117
118 oldlevel = ml_set_interrupts_enabled(FALSE);
119 chud_proc_info = (chudcpu_data_t *)(getPerProc()->pp_chud);
120
121 timer_call_cancel(&(chud_proc_info->cpu_timer_call)); // cancel any existing callback for this cpu
122
123 chud_proc_info->cpu_timer_callback_fn = func;
124
125 clock_interval_to_deadline(time, units, &(chud_proc_info->t_deadline));
126 timer_call_setup(&(chud_proc_info->cpu_timer_call), chudxnu_private_cpu_timer_callback, NULL);
127 timer_call_enter(&(chud_proc_info->cpu_timer_call), chud_proc_info->t_deadline);
128
129 ml_set_interrupts_enabled(oldlevel);
130 return KERN_SUCCESS;
131 }
132
133 __private_extern__
134 kern_return_t chudxnu_cpu_timer_callback_cancel(void)
135 {
136 chudcpu_data_t *chud_proc_info;
137 boolean_t oldlevel;
138
139 oldlevel = ml_set_interrupts_enabled(FALSE);
140 chud_proc_info = (chudcpu_data_t *)(getPerProc()->pp_chud);
141
142 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
143 chud_proc_info->t_deadline = chud_proc_info->t_deadline | ~(chud_proc_info->t_deadline); // set to max value
144 chud_proc_info->cpu_timer_callback_fn = NULL;
145
146 ml_set_interrupts_enabled(oldlevel);
147 return KERN_SUCCESS;
148 }
149
150 __private_extern__
151 kern_return_t chudxnu_cpu_timer_callback_cancel_all(void)
152 {
153 unsigned int cpu;
154 chudcpu_data_t *chud_proc_info;
155
156 for(cpu=0; cpu<real_ncpus; cpu++) {
157 if ((PerProcTable[cpu].ppe_vaddr == 0)
158 || (PerProcTable[cpu].ppe_vaddr->pp_chud == 0))
159 continue;
160 chud_proc_info = (chudcpu_data_t *)PerProcTable[cpu].ppe_vaddr->pp_chud;
161 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
162 chud_proc_info->t_deadline = chud_proc_info->t_deadline | ~(chud_proc_info->t_deadline); // set to max value
163 chud_proc_info->cpu_timer_callback_fn = NULL;
164 }
165 return KERN_SUCCESS;
166 }
167
168 #pragma mark **** trap ****
169 static chudxnu_trap_callback_func_t trap_callback_fn = NULL;
170
171 #define TRAP_ENTRY_POINT(t) ((t==T_RESET) ? 0x100 : \
172 (t==T_MACHINE_CHECK) ? 0x200 : \
173 (t==T_DATA_ACCESS) ? 0x300 : \
174 (t==T_DATA_SEGMENT) ? 0x380 : \
175 (t==T_INSTRUCTION_ACCESS) ? 0x400 : \
176 (t==T_INSTRUCTION_SEGMENT) ? 0x480 : \
177 (t==T_INTERRUPT) ? 0x500 : \
178 (t==T_ALIGNMENT) ? 0x600 : \
179 (t==T_PROGRAM) ? 0x700 : \
180 (t==T_FP_UNAVAILABLE) ? 0x800 : \
181 (t==T_DECREMENTER) ? 0x900 : \
182 (t==T_IO_ERROR) ? 0xa00 : \
183 (t==T_RESERVED) ? 0xb00 : \
184 (t==T_SYSTEM_CALL) ? 0xc00 : \
185 (t==T_TRACE) ? 0xd00 : \
186 (t==T_FP_ASSIST) ? 0xe00 : \
187 (t==T_PERF_MON) ? 0xf00 : \
188 (t==T_VMX) ? 0xf20 : \
189 (t==T_INVALID_EXCP0) ? 0x1000 : \
190 (t==T_INVALID_EXCP1) ? 0x1100 : \
191 (t==T_INVALID_EXCP2) ? 0x1200 : \
192 (t==T_INSTRUCTION_BKPT) ? 0x1300 : \
193 (t==T_SYSTEM_MANAGEMENT) ? 0x1400 : \
194 (t==T_SOFT_PATCH) ? 0x1500 : \
195 (t==T_ALTIVEC_ASSIST) ? 0x1600 : \
196 (t==T_THERMAL) ? 0x1700 : \
197 (t==T_ARCHDEP0) ? 0x1800 : \
198 (t==T_INSTRUMENTATION) ? 0x2000 : \
199 0x0)
200
201 static kern_return_t chudxnu_private_trap_callback(int trapno, struct savearea *ssp, unsigned int dsisr, unsigned int dar)
202 {
203 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
204 kern_return_t retval = KERN_FAILURE;
205 uint32_t trapentry = TRAP_ENTRY_POINT(trapno);
206 chudxnu_trap_callback_func_t fn = trap_callback_fn;
207
208 if(trapentry!=0x0) {
209 if(fn) {
210 struct ppc_thread_state64 state;
211 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
212 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
213 retval = (fn)(trapentry, PPC_THREAD_STATE64, (thread_state_t)&state, count);
214 }
215 }
216
217 ml_set_interrupts_enabled(oldlevel);
218
219 return retval;
220 }
221
222 __private_extern__
223 kern_return_t chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func)
224 {
225 trap_callback_fn = func;
226 perfTrapHook = chudxnu_private_trap_callback;
227 __asm__ volatile("eieio"); /* force order */
228 __asm__ volatile("sync"); /* force to memory */
229 return KERN_SUCCESS;
230 }
231
232 __private_extern__
233 kern_return_t chudxnu_trap_callback_cancel(void)
234 {
235 trap_callback_fn = NULL;
236 perfTrapHook = NULL;
237 __asm__ volatile("eieio"); /* force order */
238 __asm__ volatile("sync"); /* force to memory */
239 return KERN_SUCCESS;
240 }
241
242 #pragma mark **** ast ****
243 static chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn = NULL;
244
245 static kern_return_t chudxnu_private_chud_ast_callback(int trapno, struct savearea *ssp, unsigned int dsisr, unsigned int dar)
246 {
247 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
248 ast_t *myast = ast_pending();
249 kern_return_t retval = KERN_FAILURE;
250 chudxnu_perfmon_ast_callback_func_t fn = perfmon_ast_callback_fn;
251
252 if(*myast & AST_CHUD_URGENT) {
253 *myast &= ~(AST_CHUD_URGENT | AST_CHUD);
254 if((*myast & AST_PREEMPTION) != AST_PREEMPTION) *myast &= ~(AST_URGENT);
255 retval = KERN_SUCCESS;
256 } else if(*myast & AST_CHUD) {
257 *myast &= ~(AST_CHUD);
258 retval = KERN_SUCCESS;
259 }
260
261 if(fn) {
262 struct ppc_thread_state64 state;
263 mach_msg_type_number_t count;
264 count = PPC_THREAD_STATE64_COUNT;
265
266 if(chudxnu_thread_get_state(current_thread(), PPC_THREAD_STATE64, (thread_state_t)&state, &count, FALSE)==KERN_SUCCESS) {
267 (fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count);
268 }
269 }
270
271 #if 0
272 // ASTs from ihandler go through thandler and are made to look like traps
273 // always handle AST_CHUD_URGENT if there's a callback
274 // only handle AST_CHUD if it's the only AST pending
275 if(perfmon_ast_callback_fn && ((*myast & AST_CHUD_URGENT) || ((*myast & AST_CHUD) && !(*myast & AST_URGENT)))) {
276 struct ppc_thread_state64 state;
277 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
278 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
279 if(*myast & AST_CHUD_URGENT) {
280 *myast &= ~(AST_CHUD_URGENT | AST_CHUD);
281 if((*myast & AST_PREEMPTION) != AST_PREEMPTION) *myast &= ~(AST_URGENT);
282 retval = KERN_SUCCESS;
283 } else if(*myast & AST_CHUD) {
284 *myast &= ~(AST_CHUD);
285 retval = KERN_SUCCESS;
286 }
287 (perfmon_ast_callback_fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count);
288 }
289 #endif
290
291 ml_set_interrupts_enabled(oldlevel);
292 return retval;
293 }
294
295 __private_extern__
296 kern_return_t chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func)
297 {
298 perfmon_ast_callback_fn = func;
299 perfASTHook = chudxnu_private_chud_ast_callback;
300 __asm__ volatile("eieio"); /* force order */
301 __asm__ volatile("sync"); /* force to memory */
302 return KERN_SUCCESS;
303 }
304
305 __private_extern__
306 kern_return_t chudxnu_perfmon_ast_callback_cancel(void)
307 {
308 perfmon_ast_callback_fn = NULL;
309 perfASTHook = NULL;
310 __asm__ volatile("eieio"); /* force order */
311 __asm__ volatile("sync"); /* force to memory */
312 return KERN_SUCCESS;
313 }
314
315 __private_extern__
316 kern_return_t chudxnu_perfmon_ast_send_urgent(boolean_t urgent)
317 {
318 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
319 ast_t *myast = ast_pending();
320
321 if(urgent) {
322 *myast |= (AST_CHUD_URGENT | AST_URGENT);
323 } else {
324 *myast |= (AST_CHUD);
325 }
326
327 ml_set_interrupts_enabled(oldlevel);
328 return KERN_SUCCESS;
329 }
330
331 __private_extern__
332 kern_return_t chudxnu_perfmon_ast_send(void)
333 {
334 return chudxnu_perfmon_ast_send_urgent(TRUE);
335 }
336
337 #pragma mark **** interrupt ****
338 static chudxnu_interrupt_callback_func_t interrupt_callback_fn = NULL;
339 //extern perfCallback perfIntHook; /* function hook into interrupt() */
340
341 static kern_return_t chudxnu_private_interrupt_callback(int trapno, struct savearea *ssp, unsigned int dsisr, unsigned int dar)
342 {
343 chudxnu_interrupt_callback_func_t fn = interrupt_callback_fn;
344
345 if(fn) {
346 struct ppc_thread_state64 state;
347 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
348 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
349 return (fn)(TRAP_ENTRY_POINT(trapno), PPC_THREAD_STATE64, (thread_state_t)&state, count);
350 } else {
351 return KERN_FAILURE;
352 }
353 }
354
355 __private_extern__
356 kern_return_t chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func)
357 {
358 interrupt_callback_fn = func;
359 perfIntHook = chudxnu_private_interrupt_callback;
360 __asm__ volatile("eieio"); /* force order */
361 __asm__ volatile("sync"); /* force to memory */
362 return KERN_SUCCESS;
363 }
364
365 __private_extern__
366 kern_return_t chudxnu_interrupt_callback_cancel(void)
367 {
368 interrupt_callback_fn = NULL;
369 perfIntHook = NULL;
370 __asm__ volatile("eieio"); /* force order */
371 __asm__ volatile("sync"); /* force to memory */
372 return KERN_SUCCESS;
373 }
374
375 #pragma mark **** cpu signal ****
376 static chudxnu_cpusig_callback_func_t cpusig_callback_fn = NULL;
377 extern perfCallback perfCpuSigHook; /* function hook into cpu_signal_handler() */
378
379 static kern_return_t chudxnu_private_cpu_signal_handler(int request, struct savearea *ssp, unsigned int arg0, unsigned int arg1)
380 {
381 chudxnu_cpusig_callback_func_t fn = cpusig_callback_fn;
382
383 if(fn) {
384 struct ppc_thread_state64 state;
385 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
386 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
387 (fn)(request, PPC_THREAD_STATE64, (thread_state_t)&state, count);
388 }
389 return KERN_SUCCESS; // ignored
390 }
391
392 __private_extern__
393 kern_return_t chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func)
394 {
395 cpusig_callback_fn = func;
396 perfCpuSigHook = chudxnu_private_cpu_signal_handler;
397 __asm__ volatile("eieio"); /* force order */
398 __asm__ volatile("sync"); /* force to memory */
399 return KERN_SUCCESS;
400 }
401
402 __private_extern__
403 kern_return_t chudxnu_cpusig_callback_cancel(void)
404 {
405 cpusig_callback_fn = NULL;
406 perfCpuSigHook = NULL;
407 __asm__ volatile("eieio"); /* force order */
408 __asm__ volatile("sync"); /* force to memory */
409 return KERN_SUCCESS;
410 }
411
412 __private_extern__
413 kern_return_t chudxnu_cpusig_send(int otherCPU, uint32_t request)
414 {
415 int thisCPU;
416 kern_return_t retval = KERN_FAILURE;
417 int retries = 0;
418 boolean_t oldlevel;
419 uint32_t temp[2];
420
421 oldlevel = ml_set_interrupts_enabled(FALSE);
422 thisCPU = cpu_number();
423
424 if(thisCPU!=otherCPU) {
425 temp[0] = 0xFFFFFFFF; /* set sync flag */
426 temp[1] = request; /* set request */
427 __asm__ volatile("eieio"); /* force order */
428 __asm__ volatile("sync"); /* force to memory */
429
430 do {
431 retval=cpu_signal(otherCPU, SIGPcpureq, CPRQchud, (uint32_t)&temp);
432 } while(retval!=KERN_SUCCESS && (retries++)<16);
433
434 if(retries>=16) {
435 retval = KERN_FAILURE;
436 } else {
437 retval = hw_cpu_sync(temp, LockTimeOut); /* wait for the other processor */
438 if(!retval) {
439 retval = KERN_FAILURE;
440 } else {
441 retval = KERN_SUCCESS;
442 }
443 }
444 } else {
445 retval = KERN_INVALID_ARGUMENT;
446 }
447
448 ml_set_interrupts_enabled(oldlevel);
449 return retval;
450 }