]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/chud/chud_osfmk_callback.c
xnu-517.12.7.tar.gz
[apple/xnu.git] / osfmk / ppc / chud / chud_osfmk_callback.c
CommitLineData
55e303ae
A
1/*
2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
e5568f75
A
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
55e303ae
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
55e303ae
A
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23#include <stdint.h>
24#include <mach/boolean.h>
25#include <mach/mach_types.h>
26
27#include <ppc/machine_routines.h>
28#include <ppc/exception.h>
29#include <kern/ast.h>
30#include <kern/timer_call.h>
31#include <kern/kern_types.h>
32
33extern kern_return_t chud_copy_savearea_to_threadstate(thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count, struct savearea *sv);
34extern kern_return_t chud_copy_threadstate_to_savearea(struct savearea *sv, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count);
35
36__private_extern__
37void chudxnu_cancel_all_callbacks(void)
38{
39 extern void chudxnu_exit_callback_cancel(void);
40 extern void chudxnu_thread_timer_callback_cancel(void);
41
42 chudxnu_cpu_timer_callback_cancel_all();
43 chudxnu_trap_callback_cancel();
44 chudxnu_interrupt_callback_cancel();
45 chudxnu_perfmon_ast_callback_cancel();
46 chudxnu_cpusig_callback_cancel();
47 chudxnu_kdebug_callback_cancel();
48 chudxnu_exit_callback_cancel();
49 chudxnu_thread_timer_callback_cancel();
50}
51
52#pragma mark **** cpu timer ****
53static timer_call_data_t cpu_timer_call[NCPUS] = {{0}, {0}};
54static uint64_t t_deadline[NCPUS] = {0xFFFFFFFFFFFFFFFFULL, 0xFFFFFFFFFFFFFFFFULL};
55
56typedef void (*chudxnu_cpu_timer_callback_func_t)(thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t count);
57static chudxnu_cpu_timer_callback_func_t cpu_timer_callback_fn[NCPUS] = {NULL, NULL};
58
59static void chudxnu_private_cpu_timer_callback(timer_call_param_t param0, timer_call_param_t param1)
60{
61 int cpu;
62 boolean_t oldlevel;
63 struct ppc_thread_state64 state;
64 mach_msg_type_number_t count;
65
66 oldlevel = ml_set_interrupts_enabled(FALSE);
67 cpu = cpu_number();
68
69 count = PPC_THREAD_STATE64_COUNT;
70 if(chudxnu_thread_get_state(current_act(), PPC_THREAD_STATE64, (thread_state_t)&state, &count, FALSE)==KERN_SUCCESS) {
71 if(cpu_timer_callback_fn[cpu]) {
72 (cpu_timer_callback_fn[cpu])(PPC_THREAD_STATE64, (thread_state_t)&state, count);
73 }
74 }
75
76 ml_set_interrupts_enabled(oldlevel);
77}
78
79__private_extern__
80kern_return_t chudxnu_cpu_timer_callback_enter(chudxnu_cpu_timer_callback_func_t func, uint32_t time, uint32_t units)
81{
82 int cpu;
83 boolean_t oldlevel;
84
85 oldlevel = ml_set_interrupts_enabled(FALSE);
86 cpu = cpu_number();
87
88 timer_call_cancel(&(cpu_timer_call[cpu])); // cancel any existing callback for this cpu
89
90 cpu_timer_callback_fn[cpu] = func;
91
92 clock_interval_to_deadline(time, units, &(t_deadline[cpu]));
93 timer_call_setup(&(cpu_timer_call[cpu]), chudxnu_private_cpu_timer_callback, NULL);
94 timer_call_enter(&(cpu_timer_call[cpu]), t_deadline[cpu]);
95
96 ml_set_interrupts_enabled(oldlevel);
97 return KERN_SUCCESS;
98}
99
100__private_extern__
101kern_return_t chudxnu_cpu_timer_callback_cancel(void)
102{
103 int cpu;
104 boolean_t oldlevel;
105
106 oldlevel = ml_set_interrupts_enabled(FALSE);
107 cpu = cpu_number();
108
109 timer_call_cancel(&(cpu_timer_call[cpu]));
110 t_deadline[cpu] = t_deadline[cpu] | ~(t_deadline[cpu]); // set to max value
111 cpu_timer_callback_fn[cpu] = NULL;
112
113 ml_set_interrupts_enabled(oldlevel);
114 return KERN_SUCCESS;
115}
116
117__private_extern__
118kern_return_t chudxnu_cpu_timer_callback_cancel_all(void)
119{
120 int cpu;
121
122 for(cpu=0; cpu<NCPUS; cpu++) {
123 timer_call_cancel(&(cpu_timer_call[cpu]));
124 t_deadline[cpu] = t_deadline[cpu] | ~(t_deadline[cpu]); // set to max value
125 cpu_timer_callback_fn[cpu] = NULL;
126 }
127 return KERN_SUCCESS;
128}
129
130#pragma mark **** trap and ast ****
131typedef kern_return_t (*chudxnu_trap_callback_func_t)(uint32_t trapentry, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t count);
132static chudxnu_trap_callback_func_t trap_callback_fn = NULL;
133
134typedef kern_return_t (*perfTrap)(int trapno, struct savearea *ssp, unsigned int dsisr, unsigned int dar);
135extern perfTrap perfTrapHook; /* function hook into trap() */
136
137typedef void (*chudxnu_perfmon_ast_callback_func_t)(thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t count);
138static chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn = NULL;
139
140#define TRAP_ENTRY_POINT(t) ((t==T_RESET) ? 0x100 : \
141 (t==T_MACHINE_CHECK) ? 0x200 : \
142 (t==T_DATA_ACCESS) ? 0x300 : \
143 (t==T_DATA_SEGMENT) ? 0x380 : \
144 (t==T_INSTRUCTION_ACCESS) ? 0x400 : \
145 (t==T_INSTRUCTION_SEGMENT) ? 0x480 : \
146 (t==T_INTERRUPT) ? 0x500 : \
147 (t==T_ALIGNMENT) ? 0x600 : \
148 (t==T_PROGRAM) ? 0x700 : \
149 (t==T_FP_UNAVAILABLE) ? 0x800 : \
150 (t==T_DECREMENTER) ? 0x900 : \
151 (t==T_IO_ERROR) ? 0xa00 : \
152 (t==T_RESERVED) ? 0xb00 : \
153 (t==T_SYSTEM_CALL) ? 0xc00 : \
154 (t==T_TRACE) ? 0xd00 : \
155 (t==T_FP_ASSIST) ? 0xe00 : \
156 (t==T_PERF_MON) ? 0xf00 : \
157 (t==T_VMX) ? 0xf20 : \
158 (t==T_INVALID_EXCP0) ? 0x1000 : \
159 (t==T_INVALID_EXCP1) ? 0x1100 : \
160 (t==T_INVALID_EXCP2) ? 0x1200 : \
161 (t==T_INSTRUCTION_BKPT) ? 0x1300 : \
162 (t==T_SYSTEM_MANAGEMENT) ? 0x1400 : \
163 (t==T_SOFT_PATCH) ? 0x1500 : \
164 (t==T_ALTIVEC_ASSIST) ? 0x1600 : \
165 (t==T_THERMAL) ? 0x1700 : \
166 (t==T_ARCHDEP0) ? 0x1800 : \
167 (t==T_INSTRUMENTATION) ? 0x2000 : \
168 0x0)
169
170static kern_return_t chudxnu_private_trap_callback(int trapno, struct savearea *ssp, unsigned int dsisr, unsigned int dar)
171{
172 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
173 int cpu = cpu_number();
174
175 kern_return_t retval = KERN_FAILURE;
176 uint32_t trapentry = TRAP_ENTRY_POINT(trapno);
177
178 // ASTs from ihandler go through thandler and are made to look like traps
179 if(perfmon_ast_callback_fn && (need_ast[cpu] & AST_PPC_CHUD)) {
180 struct ppc_thread_state64 state;
181 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
182 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
183 (perfmon_ast_callback_fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count);
184 need_ast[cpu] &= ~(AST_PPC_CHUD);
185 }
186
187 if(trapentry!=0x0) {
188 if(trap_callback_fn) {
189 struct ppc_thread_state64 state;
190 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
191 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
192 retval = (trap_callback_fn)(trapentry, PPC_THREAD_STATE64, (thread_state_t)&state, count);
193 }
194 }
195
196 ml_set_interrupts_enabled(oldlevel);
197
198 return retval;
199}
200
201__private_extern__
202kern_return_t chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func)
203{
204 trap_callback_fn = func;
205 perfTrapHook = chudxnu_private_trap_callback;
206 __asm__ volatile("eieio"); /* force order */
207 __asm__ volatile("sync"); /* force to memory */
208 return KERN_SUCCESS;
209}
210
211__private_extern__
212kern_return_t chudxnu_trap_callback_cancel(void)
213{
214 trap_callback_fn = NULL;
215 if(!perfmon_ast_callback_fn) {
216 perfTrapHook = NULL;
217 }
218 __asm__ volatile("eieio"); /* force order */
219 __asm__ volatile("sync"); /* force to memory */
220 return KERN_SUCCESS;
221}
222
223__private_extern__
224kern_return_t chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func)
225{
226 perfmon_ast_callback_fn = func;
227 perfTrapHook = chudxnu_private_trap_callback;
228 __asm__ volatile("eieio"); /* force order */
229 __asm__ volatile("sync"); /* force to memory */
230 return KERN_SUCCESS;
231}
232
233__private_extern__
234kern_return_t chudxnu_perfmon_ast_callback_cancel(void)
235{
236 perfmon_ast_callback_fn = NULL;
237 if(!trap_callback_fn) {
238 perfTrapHook = NULL;
239 }
240 __asm__ volatile("eieio"); /* force order */
241 __asm__ volatile("sync"); /* force to memory */
242 return KERN_SUCCESS;
243}
244
245__private_extern__
246kern_return_t chudxnu_perfmon_ast_send(void)
247{
248 int cpu;
249 boolean_t oldlevel;
250
251 oldlevel = ml_set_interrupts_enabled(FALSE);
252 cpu = cpu_number();
253
254 need_ast[cpu] |= (AST_PPC_CHUD | AST_URGENT);
255
256 ml_set_interrupts_enabled(oldlevel);
257 return KERN_SUCCESS;
258}
259
260#pragma mark **** interrupt ****
261typedef kern_return_t (*chudxnu_interrupt_callback_func_t)(uint32_t trapentry, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t count);
262static chudxnu_interrupt_callback_func_t interrupt_callback_fn = NULL;
263
264extern perfTrap perfIntHook; /* function hook into interrupt() */
265
266static kern_return_t chudxnu_private_interrupt_callback(int trapno, struct savearea *ssp, unsigned int dsisr, unsigned int dar)
267{
268 if(interrupt_callback_fn) {
269 struct ppc_thread_state64 state;
270 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
271 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
272 return (interrupt_callback_fn)(TRAP_ENTRY_POINT(trapno), PPC_THREAD_STATE64, (thread_state_t)&state, count);
273 } else {
274 return KERN_FAILURE;
275 }
276}
277
278__private_extern__
279kern_return_t chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func)
280{
281 interrupt_callback_fn = func;
282 perfIntHook = chudxnu_private_interrupt_callback;
283 __asm__ volatile("eieio"); /* force order */
284 __asm__ volatile("sync"); /* force to memory */
285 return KERN_SUCCESS;
286}
287
288__private_extern__
289kern_return_t chudxnu_interrupt_callback_cancel(void)
290{
291 interrupt_callback_fn = NULL;
292 perfIntHook = NULL;
293 __asm__ volatile("eieio"); /* force order */
294 __asm__ volatile("sync"); /* force to memory */
295 return KERN_SUCCESS;
296}
297
298#pragma mark **** cpu signal ****
299typedef kern_return_t (*chudxnu_cpusig_callback_func_t)(int request, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t count);
300static chudxnu_cpusig_callback_func_t cpusig_callback_fn = NULL;
301
302extern perfTrap perfCpuSigHook; /* function hook into cpu_signal_handler() */
303
304static kern_return_t chudxnu_private_cpu_signal_handler(int request, struct savearea *ssp, unsigned int arg0, unsigned int arg1)
305{
306 if(cpusig_callback_fn) {
307 struct ppc_thread_state64 state;
308 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
309 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
310 (cpusig_callback_fn)(request, PPC_THREAD_STATE64, (thread_state_t)&state, count);
311 }
312 return KERN_SUCCESS; // ignored
313}
314
315__private_extern__
316kern_return_t chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func)
317{
318 cpusig_callback_fn = func;
319 perfCpuSigHook = chudxnu_private_cpu_signal_handler;
320 __asm__ volatile("eieio"); /* force order */
321 __asm__ volatile("sync"); /* force to memory */
322 return KERN_SUCCESS;
323}
324
325__private_extern__
326kern_return_t chudxnu_cpusig_callback_cancel(void)
327{
328 cpusig_callback_fn = NULL;
329 perfCpuSigHook = NULL;
330 __asm__ volatile("eieio"); /* force order */
331 __asm__ volatile("sync"); /* force to memory */
332 return KERN_SUCCESS;
333}
334
335__private_extern__
336kern_return_t chudxnu_cpusig_send(int otherCPU, uint32_t request)
337{
338 int thisCPU;
339 kern_return_t retval = KERN_FAILURE;
340 int retries = 0;
341 boolean_t oldlevel;
342 uint32_t temp[2];
343
344 oldlevel = ml_set_interrupts_enabled(FALSE);
345 thisCPU = cpu_number();
346
347 if(thisCPU!=otherCPU) {
348 temp[0] = 0xFFFFFFFF; /* set sync flag */
349 temp[1] = request; /* set request */
350 __asm__ volatile("eieio"); /* force order */
351 __asm__ volatile("sync"); /* force to memory */
352
353 do {
354 retval=cpu_signal(otherCPU, SIGPcpureq, CPRQchud, (uint32_t)&temp);
355 } while(retval!=KERN_SUCCESS && (retries++)<16);
356
357 if(retries>=16) {
358 retval = KERN_FAILURE;
359 } else {
360 retval = hw_cpu_sync(temp, LockTimeOut); /* wait for the other processor */
361 if(!retval) {
362 retval = KERN_FAILURE;
363 } else {
364 retval = KERN_SUCCESS;
365 }
366 }
367 } else {
368 retval = KERN_INVALID_ARGUMENT;
369 }
370
371 ml_set_interrupts_enabled(oldlevel);
372 return retval;
373}
374
375#pragma mark **** thread timer ****
376
377static thread_call_t thread_timer_call = NULL;
378
379typedef void (*chudxnu_thread_timer_callback_func_t)(uint32_t arg);
380static chudxnu_thread_timer_callback_func_t thread_timer_callback_fn = NULL;
381
382static void chudxnu_private_thread_timer_callback(thread_call_param_t param0, thread_call_param_t param1)
383{
384 if(thread_timer_call) {
385 thread_call_free(thread_timer_call);
386 thread_timer_call = NULL;
387
388 if(thread_timer_callback_fn) {
389 (thread_timer_callback_fn)((uint32_t)param0);
390 }
391 }
392}
393
394__private_extern__
395kern_return_t chudxnu_thread_timer_callback_enter(chudxnu_thread_timer_callback_func_t func, uint32_t arg, uint32_t time, uint32_t units)
396{
397 if(!thread_timer_call) {
398 uint64_t t_delay;
399 thread_timer_callback_fn = func;
400 thread_timer_call = thread_call_allocate((thread_call_func_t)chudxnu_private_thread_timer_callback, (thread_call_param_t)arg);
401 clock_interval_to_deadline(time, units, &t_delay);
402 thread_call_enter_delayed(thread_timer_call, t_delay);
403 return KERN_SUCCESS;
404 } else {
405 return KERN_FAILURE; // thread timer call already pending
406 }
407}
408
409__private_extern__
410kern_return_t chudxnu_thread_timer_callback_cancel(void)
411{
412 if(thread_timer_call) {
413 thread_call_free(thread_timer_call);
414 thread_timer_call = NULL;
415 }
416 thread_timer_callback_fn = NULL;
417 return KERN_SUCCESS;
418}