]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <stdint.h> | |
30 | #include <mach/boolean.h> | |
31 | #include <mach/mach_types.h> | |
32 | ||
33 | #include <kern/kern_types.h> | |
34 | #include <kern/processor.h> | |
35 | #include <kern/timer_call.h> | |
36 | #include <kern/thread_call.h> | |
37 | #include <kern/kalloc.h> | |
38 | #include <kern/thread.h> | |
39 | ||
40 | #include <ppc/machine_routines.h> | |
41 | #include <ppc/cpu_data.h> | |
42 | #include <ppc/cpu_internal.h> | |
43 | #include <ppc/exception.h> | |
44 | #include <ppc/thread.h> | |
45 | #include <ppc/trap.h> | |
46 | ||
47 | #include <ppc/chud/chud_xnu.h> | |
48 | #include <ppc/chud/chud_xnu_private.h> | |
49 | ||
50 | __private_extern__ | |
51 | void chudxnu_cancel_all_callbacks(void) | |
52 | { | |
53 | chudxnu_cpu_timer_callback_cancel_all(); | |
54 | chudxnu_trap_callback_cancel(); | |
55 | chudxnu_interrupt_callback_cancel(); | |
56 | chudxnu_perfmon_ast_callback_cancel(); | |
57 | chudxnu_cpusig_callback_cancel(); | |
58 | chudxnu_kdebug_callback_cancel(); | |
59 | chudxnu_thread_timer_callback_cancel(); | |
60 | chudxnu_syscall_callback_cancel(); | |
61 | } | |
62 | ||
63 | #pragma mark **** cpu timer **** | |
64 | typedef struct { | |
65 | timer_call_data_t cpu_timer_call; | |
66 | uint64_t t_deadline; | |
67 | chudxnu_cpu_timer_callback_func_t cpu_timer_callback_fn; | |
68 | } chudcpu_data_t; | |
69 | ||
70 | static chudcpu_data_t chudcpu_boot_cpu; | |
71 | ||
72 | void *chudxnu_per_proc_alloc(boolean_t boot_processor) | |
73 | { | |
74 | chudcpu_data_t *chud_proc_info; | |
75 | ||
76 | if (boot_processor) { | |
77 | chud_proc_info = &chudcpu_boot_cpu; | |
78 | } else { | |
79 | chud_proc_info = (chudcpu_data_t *)kalloc(sizeof(chudcpu_data_t)); | |
80 | if (chud_proc_info == (chudcpu_data_t *)NULL) { | |
81 | return (void *)NULL; | |
82 | } | |
83 | } | |
84 | bzero((char *)chud_proc_info, sizeof(chudcpu_data_t)); | |
85 | chud_proc_info->t_deadline = 0xFFFFFFFFFFFFFFFFULL; | |
86 | return (void *)chud_proc_info; | |
87 | } | |
88 | ||
89 | void chudxnu_per_proc_free(void *per_proc_chud) | |
90 | { | |
91 | if (per_proc_chud == (void *)&chudcpu_boot_cpu) { | |
92 | return; | |
93 | } else { | |
94 | kfree(per_proc_chud,sizeof(chudcpu_data_t)); | |
95 | } | |
96 | } | |
97 | ||
98 | static void chudxnu_private_cpu_timer_callback(timer_call_param_t param0, timer_call_param_t param1) | |
99 | { | |
100 | chudcpu_data_t *chud_proc_info; | |
101 | boolean_t oldlevel; | |
102 | struct ppc_thread_state64 state; | |
103 | mach_msg_type_number_t count; | |
104 | ||
105 | oldlevel = ml_set_interrupts_enabled(FALSE); | |
106 | chud_proc_info = (chudcpu_data_t *)(getPerProc()->pp_chud); | |
107 | ||
108 | count = PPC_THREAD_STATE64_COUNT; | |
109 | if(chudxnu_thread_get_state(current_thread(), PPC_THREAD_STATE64, (thread_state_t)&state, &count, FALSE)==KERN_SUCCESS) { | |
110 | if(chud_proc_info->cpu_timer_callback_fn) { | |
111 | (chud_proc_info->cpu_timer_callback_fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count); | |
112 | } | |
113 | } | |
114 | ||
115 | ml_set_interrupts_enabled(oldlevel); | |
116 | } | |
117 | ||
118 | __private_extern__ | |
119 | kern_return_t chudxnu_cpu_timer_callback_enter(chudxnu_cpu_timer_callback_func_t func, uint32_t time, uint32_t units) | |
120 | { | |
121 | chudcpu_data_t *chud_proc_info; | |
122 | boolean_t oldlevel; | |
123 | ||
124 | oldlevel = ml_set_interrupts_enabled(FALSE); | |
125 | chud_proc_info = (chudcpu_data_t *)(getPerProc()->pp_chud); | |
126 | ||
127 | timer_call_cancel(&(chud_proc_info->cpu_timer_call)); // cancel any existing callback for this cpu | |
128 | ||
129 | chud_proc_info->cpu_timer_callback_fn = func; | |
130 | ||
131 | clock_interval_to_deadline(time, units, &(chud_proc_info->t_deadline)); | |
132 | timer_call_setup(&(chud_proc_info->cpu_timer_call), chudxnu_private_cpu_timer_callback, NULL); | |
133 | timer_call_enter(&(chud_proc_info->cpu_timer_call), chud_proc_info->t_deadline); | |
134 | ||
135 | ml_set_interrupts_enabled(oldlevel); | |
136 | return KERN_SUCCESS; | |
137 | } | |
138 | ||
139 | __private_extern__ | |
140 | kern_return_t chudxnu_cpu_timer_callback_cancel(void) | |
141 | { | |
142 | chudcpu_data_t *chud_proc_info; | |
143 | boolean_t oldlevel; | |
144 | ||
145 | oldlevel = ml_set_interrupts_enabled(FALSE); | |
146 | chud_proc_info = (chudcpu_data_t *)(getPerProc()->pp_chud); | |
147 | ||
148 | timer_call_cancel(&(chud_proc_info->cpu_timer_call)); | |
149 | chud_proc_info->t_deadline = chud_proc_info->t_deadline | ~(chud_proc_info->t_deadline); // set to max value | |
150 | chud_proc_info->cpu_timer_callback_fn = NULL; | |
151 | ||
152 | ml_set_interrupts_enabled(oldlevel); | |
153 | return KERN_SUCCESS; | |
154 | } | |
155 | ||
156 | __private_extern__ | |
157 | kern_return_t chudxnu_cpu_timer_callback_cancel_all(void) | |
158 | { | |
159 | unsigned int cpu; | |
160 | chudcpu_data_t *chud_proc_info; | |
161 | ||
162 | for(cpu=0; cpu<real_ncpus; cpu++) { | |
163 | if ((PerProcTable[cpu].ppe_vaddr == 0) | |
164 | || (PerProcTable[cpu].ppe_vaddr->pp_chud == 0)) | |
165 | continue; | |
166 | chud_proc_info = (chudcpu_data_t *)PerProcTable[cpu].ppe_vaddr->pp_chud; | |
167 | timer_call_cancel(&(chud_proc_info->cpu_timer_call)); | |
168 | chud_proc_info->t_deadline = chud_proc_info->t_deadline | ~(chud_proc_info->t_deadline); // set to max value | |
169 | chud_proc_info->cpu_timer_callback_fn = NULL; | |
170 | } | |
171 | return KERN_SUCCESS; | |
172 | } | |
173 | ||
174 | #pragma mark **** trap **** | |
175 | static chudxnu_trap_callback_func_t trap_callback_fn = NULL; | |
176 | ||
177 | #define TRAP_ENTRY_POINT(t) ((t==T_RESET) ? 0x100 : \ | |
178 | (t==T_MACHINE_CHECK) ? 0x200 : \ | |
179 | (t==T_DATA_ACCESS) ? 0x300 : \ | |
180 | (t==T_DATA_SEGMENT) ? 0x380 : \ | |
181 | (t==T_INSTRUCTION_ACCESS) ? 0x400 : \ | |
182 | (t==T_INSTRUCTION_SEGMENT) ? 0x480 : \ | |
183 | (t==T_INTERRUPT) ? 0x500 : \ | |
184 | (t==T_ALIGNMENT) ? 0x600 : \ | |
185 | (t==T_PROGRAM) ? 0x700 : \ | |
186 | (t==T_FP_UNAVAILABLE) ? 0x800 : \ | |
187 | (t==T_DECREMENTER) ? 0x900 : \ | |
188 | (t==T_IO_ERROR) ? 0xa00 : \ | |
189 | (t==T_RESERVED) ? 0xb00 : \ | |
190 | (t==T_SYSTEM_CALL) ? 0xc00 : \ | |
191 | (t==T_TRACE) ? 0xd00 : \ | |
192 | (t==T_FP_ASSIST) ? 0xe00 : \ | |
193 | (t==T_PERF_MON) ? 0xf00 : \ | |
194 | (t==T_VMX) ? 0xf20 : \ | |
195 | (t==T_INVALID_EXCP0) ? 0x1000 : \ | |
196 | (t==T_INVALID_EXCP1) ? 0x1100 : \ | |
197 | (t==T_INVALID_EXCP2) ? 0x1200 : \ | |
198 | (t==T_INSTRUCTION_BKPT) ? 0x1300 : \ | |
199 | (t==T_SYSTEM_MANAGEMENT) ? 0x1400 : \ | |
200 | (t==T_SOFT_PATCH) ? 0x1500 : \ | |
201 | (t==T_ALTIVEC_ASSIST) ? 0x1600 : \ | |
202 | (t==T_THERMAL) ? 0x1700 : \ | |
203 | (t==T_ARCHDEP0) ? 0x1800 : \ | |
204 | (t==T_INSTRUMENTATION) ? 0x2000 : \ | |
205 | 0x0) | |
206 | ||
207 | static kern_return_t chudxnu_private_trap_callback(int trapno, struct savearea *ssp, unsigned int dsisr, unsigned int dar) | |
208 | { | |
209 | boolean_t oldlevel = ml_set_interrupts_enabled(FALSE); | |
210 | kern_return_t retval = KERN_FAILURE; | |
211 | uint32_t trapentry = TRAP_ENTRY_POINT(trapno); | |
212 | ||
213 | if(trapentry!=0x0) { | |
214 | if(trap_callback_fn) { | |
215 | struct ppc_thread_state64 state; | |
216 | mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT; | |
217 | chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp); | |
218 | retval = (trap_callback_fn)(trapentry, PPC_THREAD_STATE64, (thread_state_t)&state, count); | |
219 | } | |
220 | } | |
221 | ||
222 | ml_set_interrupts_enabled(oldlevel); | |
223 | ||
224 | return retval; | |
225 | } | |
226 | ||
227 | __private_extern__ | |
228 | kern_return_t chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func) | |
229 | { | |
230 | trap_callback_fn = func; | |
231 | perfTrapHook = chudxnu_private_trap_callback; | |
232 | __asm__ volatile("eieio"); /* force order */ | |
233 | __asm__ volatile("sync"); /* force to memory */ | |
234 | return KERN_SUCCESS; | |
235 | } | |
236 | ||
237 | __private_extern__ | |
238 | kern_return_t chudxnu_trap_callback_cancel(void) | |
239 | { | |
240 | trap_callback_fn = NULL; | |
241 | perfTrapHook = NULL; | |
242 | __asm__ volatile("eieio"); /* force order */ | |
243 | __asm__ volatile("sync"); /* force to memory */ | |
244 | return KERN_SUCCESS; | |
245 | } | |
246 | ||
247 | #pragma mark **** ast **** | |
248 | static chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn = NULL; | |
249 | ||
250 | static kern_return_t chudxnu_private_chud_ast_callback(int trapno, struct savearea *ssp, unsigned int dsisr, unsigned int dar) | |
251 | { | |
252 | boolean_t oldlevel = ml_set_interrupts_enabled(FALSE); | |
253 | ast_t *myast = ast_pending(); | |
254 | kern_return_t retval = KERN_FAILURE; | |
255 | ||
256 | if(*myast & AST_PPC_CHUD_URGENT) { | |
257 | *myast &= ~(AST_PPC_CHUD_URGENT | AST_PPC_CHUD); | |
258 | if((*myast & AST_PREEMPTION) != AST_PREEMPTION) *myast &= ~(AST_URGENT); | |
259 | retval = KERN_SUCCESS; | |
260 | } else if(*myast & AST_PPC_CHUD) { | |
261 | *myast &= ~(AST_PPC_CHUD); | |
262 | retval = KERN_SUCCESS; | |
263 | } | |
264 | ||
265 | if(perfmon_ast_callback_fn) { | |
266 | struct ppc_thread_state64 state; | |
267 | mach_msg_type_number_t count; | |
268 | count = PPC_THREAD_STATE64_COUNT; | |
269 | ||
270 | if(chudxnu_thread_get_state(current_thread(), PPC_THREAD_STATE64, (thread_state_t)&state, &count, FALSE)==KERN_SUCCESS) { | |
271 | (perfmon_ast_callback_fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count); | |
272 | } | |
273 | } | |
274 | ||
275 | #if 0 | |
276 | // ASTs from ihandler go through thandler and are made to look like traps | |
277 | // always handle AST_PPC_CHUD_URGENT if there's a callback | |
278 | // only handle AST_PPC_CHUD if it's the only AST pending | |
279 | if(perfmon_ast_callback_fn && ((*myast & AST_PPC_CHUD_URGENT) || ((*myast & AST_PPC_CHUD) && !(*myast & AST_URGENT)))) { | |
280 | struct ppc_thread_state64 state; | |
281 | mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT; | |
282 | chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp); | |
283 | if(*myast & AST_PPC_CHUD_URGENT) { | |
284 | *myast &= ~(AST_PPC_CHUD_URGENT | AST_PPC_CHUD); | |
285 | if((*myast & AST_PREEMPTION) != AST_PREEMPTION) *myast &= ~(AST_URGENT); | |
286 | retval = KERN_SUCCESS; | |
287 | } else if(*myast & AST_PPC_CHUD) { | |
288 | *myast &= ~(AST_PPC_CHUD); | |
289 | retval = KERN_SUCCESS; | |
290 | } | |
291 | (perfmon_ast_callback_fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count); | |
292 | } | |
293 | #endif | |
294 | ||
295 | ml_set_interrupts_enabled(oldlevel); | |
296 | return retval; | |
297 | } | |
298 | ||
299 | __private_extern__ | |
300 | kern_return_t chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func) | |
301 | { | |
302 | perfmon_ast_callback_fn = func; | |
303 | perfASTHook = chudxnu_private_chud_ast_callback; | |
304 | __asm__ volatile("eieio"); /* force order */ | |
305 | __asm__ volatile("sync"); /* force to memory */ | |
306 | return KERN_SUCCESS; | |
307 | } | |
308 | ||
309 | __private_extern__ | |
310 | kern_return_t chudxnu_perfmon_ast_callback_cancel(void) | |
311 | { | |
312 | perfmon_ast_callback_fn = NULL; | |
313 | perfASTHook = NULL; | |
314 | __asm__ volatile("eieio"); /* force order */ | |
315 | __asm__ volatile("sync"); /* force to memory */ | |
316 | return KERN_SUCCESS; | |
317 | } | |
318 | ||
319 | __private_extern__ | |
320 | kern_return_t chudxnu_perfmon_ast_send_urgent(boolean_t urgent) | |
321 | { | |
322 | boolean_t oldlevel = ml_set_interrupts_enabled(FALSE); | |
323 | ast_t *myast = ast_pending(); | |
324 | ||
325 | if(urgent) { | |
326 | *myast |= (AST_PPC_CHUD_URGENT | AST_URGENT); | |
327 | } else { | |
328 | *myast |= (AST_PPC_CHUD); | |
329 | } | |
330 | ||
331 | ml_set_interrupts_enabled(oldlevel); | |
332 | return KERN_SUCCESS; | |
333 | } | |
334 | ||
335 | __private_extern__ | |
336 | kern_return_t chudxnu_perfmon_ast_send(void) | |
337 | { | |
338 | return chudxnu_perfmon_ast_send_urgent(TRUE); | |
339 | } | |
340 | ||
341 | #pragma mark **** interrupt **** | |
342 | static chudxnu_interrupt_callback_func_t interrupt_callback_fn = NULL; | |
343 | //extern perfCallback perfIntHook; /* function hook into interrupt() */ | |
344 | ||
345 | static kern_return_t chudxnu_private_interrupt_callback(int trapno, struct savearea *ssp, unsigned int dsisr, unsigned int dar) | |
346 | { | |
347 | if(interrupt_callback_fn) { | |
348 | struct ppc_thread_state64 state; | |
349 | mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT; | |
350 | chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp); | |
351 | return (interrupt_callback_fn)(TRAP_ENTRY_POINT(trapno), PPC_THREAD_STATE64, (thread_state_t)&state, count); | |
352 | } else { | |
353 | return KERN_FAILURE; | |
354 | } | |
355 | } | |
356 | ||
357 | __private_extern__ | |
358 | kern_return_t chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func) | |
359 | { | |
360 | interrupt_callback_fn = func; | |
361 | perfIntHook = chudxnu_private_interrupt_callback; | |
362 | __asm__ volatile("eieio"); /* force order */ | |
363 | __asm__ volatile("sync"); /* force to memory */ | |
364 | return KERN_SUCCESS; | |
365 | } | |
366 | ||
367 | __private_extern__ | |
368 | kern_return_t chudxnu_interrupt_callback_cancel(void) | |
369 | { | |
370 | interrupt_callback_fn = NULL; | |
371 | perfIntHook = NULL; | |
372 | __asm__ volatile("eieio"); /* force order */ | |
373 | __asm__ volatile("sync"); /* force to memory */ | |
374 | return KERN_SUCCESS; | |
375 | } | |
376 | ||
377 | #pragma mark **** cpu signal **** | |
378 | static chudxnu_cpusig_callback_func_t cpusig_callback_fn = NULL; | |
379 | extern perfCallback perfCpuSigHook; /* function hook into cpu_signal_handler() */ | |
380 | ||
381 | static kern_return_t chudxnu_private_cpu_signal_handler(int request, struct savearea *ssp, unsigned int arg0, unsigned int arg1) | |
382 | { | |
383 | if(cpusig_callback_fn) { | |
384 | struct ppc_thread_state64 state; | |
385 | mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT; | |
386 | chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp); | |
387 | (cpusig_callback_fn)(request, PPC_THREAD_STATE64, (thread_state_t)&state, count); | |
388 | } | |
389 | return KERN_SUCCESS; // ignored | |
390 | } | |
391 | ||
392 | __private_extern__ | |
393 | kern_return_t chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func) | |
394 | { | |
395 | cpusig_callback_fn = func; | |
396 | perfCpuSigHook = chudxnu_private_cpu_signal_handler; | |
397 | __asm__ volatile("eieio"); /* force order */ | |
398 | __asm__ volatile("sync"); /* force to memory */ | |
399 | return KERN_SUCCESS; | |
400 | } | |
401 | ||
402 | __private_extern__ | |
403 | kern_return_t chudxnu_cpusig_callback_cancel(void) | |
404 | { | |
405 | cpusig_callback_fn = NULL; | |
406 | perfCpuSigHook = NULL; | |
407 | __asm__ volatile("eieio"); /* force order */ | |
408 | __asm__ volatile("sync"); /* force to memory */ | |
409 | return KERN_SUCCESS; | |
410 | } | |
411 | ||
412 | __private_extern__ | |
413 | kern_return_t chudxnu_cpusig_send(int otherCPU, uint32_t request) | |
414 | { | |
415 | int thisCPU; | |
416 | kern_return_t retval = KERN_FAILURE; | |
417 | int retries = 0; | |
418 | boolean_t oldlevel; | |
419 | uint32_t temp[2]; | |
420 | ||
421 | oldlevel = ml_set_interrupts_enabled(FALSE); | |
422 | thisCPU = cpu_number(); | |
423 | ||
424 | if(thisCPU!=otherCPU) { | |
425 | temp[0] = 0xFFFFFFFF; /* set sync flag */ | |
426 | temp[1] = request; /* set request */ | |
427 | __asm__ volatile("eieio"); /* force order */ | |
428 | __asm__ volatile("sync"); /* force to memory */ | |
429 | ||
430 | do { | |
431 | retval=cpu_signal(otherCPU, SIGPcpureq, CPRQchud, (uint32_t)&temp); | |
432 | } while(retval!=KERN_SUCCESS && (retries++)<16); | |
433 | ||
434 | if(retries>=16) { | |
435 | retval = KERN_FAILURE; | |
436 | } else { | |
437 | retval = hw_cpu_sync(temp, LockTimeOut); /* wait for the other processor */ | |
438 | if(!retval) { | |
439 | retval = KERN_FAILURE; | |
440 | } else { | |
441 | retval = KERN_SUCCESS; | |
442 | } | |
443 | } | |
444 | } else { | |
445 | retval = KERN_INVALID_ARGUMENT; | |
446 | } | |
447 | ||
448 | ml_set_interrupts_enabled(oldlevel); | |
449 | return retval; | |
450 | } | |
451 | ||
452 | #pragma mark **** timer **** | |
453 | __private_extern__ | |
454 | chud_timer_t chudxnu_timer_alloc(chudxnu_timer_callback_func_t func, uint32_t param0) | |
455 | { | |
456 | return (chud_timer_t)thread_call_allocate((thread_call_func_t)func, (thread_call_param_t)param0); | |
457 | } | |
458 | ||
459 | __private_extern__ | |
460 | kern_return_t chudxnu_timer_callback_enter(chud_timer_t timer, uint32_t param1, uint32_t time, uint32_t units) | |
461 | { | |
462 | uint64_t t_delay; | |
463 | clock_interval_to_deadline(time, units, &t_delay); | |
464 | thread_call_enter1_delayed((thread_call_t)timer, (thread_call_param_t)param1, t_delay); | |
465 | return KERN_SUCCESS; | |
466 | } | |
467 | ||
468 | __private_extern__ | |
469 | kern_return_t chudxnu_timer_callback_cancel(chud_timer_t timer) | |
470 | { | |
471 | thread_call_cancel((thread_call_t)timer); | |
472 | return KERN_SUCCESS; | |
473 | } | |
474 | ||
475 | __private_extern__ | |
476 | kern_return_t chudxnu_timer_free(chud_timer_t timer) | |
477 | { | |
478 | thread_call_cancel((thread_call_t)timer); | |
479 | thread_call_free((thread_call_t)timer); | |
480 | return KERN_SUCCESS; | |
481 | } | |
482 | ||
483 | #pragma mark **** CHUD syscall (PPC) **** | |
484 | ||
485 | typedef int (*PPCcallEnt)(struct savearea *save); | |
486 | extern PPCcallEnt PPCcalls[]; | |
487 | ||
488 | static chudxnu_syscall_callback_func_t syscall_callback_fn = NULL; | |
489 | ||
490 | static int chudxnu_private_syscall_callback(struct savearea *ssp) | |
491 | { | |
492 | if(ssp) { | |
493 | if(syscall_callback_fn) { | |
494 | struct ppc_thread_state64 state; | |
495 | kern_return_t retval; | |
496 | mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT; | |
497 | chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp); | |
498 | ssp->save_r3 = (syscall_callback_fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count); | |
499 | } else { | |
500 | ssp->save_r3 = KERN_FAILURE; | |
501 | } | |
502 | } | |
503 | ||
504 | return 1; // check for ASTs (always) | |
505 | } | |
506 | ||
507 | __private_extern__ | |
508 | kern_return_t chudxnu_syscall_callback_enter(chudxnu_syscall_callback_func_t func) | |
509 | { | |
510 | syscall_callback_fn = func; | |
511 | PPCcalls[9] = chudxnu_private_syscall_callback; | |
512 | __asm__ volatile("eieio"); /* force order */ | |
513 | __asm__ volatile("sync"); /* force to memory */ | |
514 | return KERN_SUCCESS; | |
515 | } | |
516 | ||
517 | __private_extern__ | |
518 | kern_return_t chudxnu_syscall_callback_cancel(void) | |
519 | { | |
520 | syscall_callback_fn = NULL; | |
521 | PPCcalls[9] = NULL; | |
522 | __asm__ volatile("eieio"); /* force order */ | |
523 | __asm__ volatile("sync"); /* force to memory */ | |
524 | return KERN_SUCCESS; | |
525 | } | |
526 | ||
527 | #pragma mark **** thread timer - DEPRECATED **** | |
528 | ||
529 | static thread_call_t thread_timer_call = NULL; | |
530 | static chudxnu_thread_timer_callback_func_t thread_timer_callback_fn = NULL; | |
531 | ||
532 | static void chudxnu_private_thread_timer_callback(thread_call_param_t param0, thread_call_param_t param1) | |
533 | { | |
534 | if(thread_timer_call) { | |
535 | thread_call_free(thread_timer_call); | |
536 | thread_timer_call = NULL; | |
537 | ||
538 | if(thread_timer_callback_fn) { | |
539 | (thread_timer_callback_fn)((uint32_t)param0); | |
540 | } | |
541 | } | |
542 | } | |
543 | ||
544 | // DEPRECATED | |
545 | __private_extern__ | |
546 | kern_return_t chudxnu_thread_timer_callback_enter(chudxnu_thread_timer_callback_func_t func, uint32_t param, uint32_t time, uint32_t units) | |
547 | { | |
548 | if(!thread_timer_call) { | |
549 | uint64_t t_delay; | |
550 | thread_timer_callback_fn = func; | |
551 | thread_timer_call = thread_call_allocate((thread_call_func_t)chudxnu_private_thread_timer_callback, (thread_call_param_t)param); | |
552 | clock_interval_to_deadline(time, units, &t_delay); | |
553 | thread_call_enter_delayed(thread_timer_call, t_delay); | |
554 | return KERN_SUCCESS; | |
555 | } else { | |
556 | return KERN_FAILURE; // thread timer call already pending | |
557 | } | |
558 | } | |
559 | ||
560 | // DEPRECATED | |
561 | __private_extern__ | |
562 | kern_return_t chudxnu_thread_timer_callback_cancel(void) | |
563 | { | |
564 | if(thread_timer_call) { | |
565 | thread_call_cancel(thread_timer_call); | |
566 | thread_call_free(thread_timer_call); | |
567 | thread_timer_call = NULL; | |
568 | } | |
569 | thread_timer_callback_fn = NULL; | |
570 | return KERN_SUCCESS; | |
571 | } |