]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/chud/chud_osfmk_callback.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / ppc / chud / chud_osfmk_callback.c
1 /*
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 #include <stdint.h>
32 #include <mach/boolean.h>
33 #include <mach/mach_types.h>
34
35 #include <kern/kern_types.h>
36 #include <kern/processor.h>
37 #include <kern/timer_call.h>
38 #include <kern/thread_call.h>
39 #include <kern/kalloc.h>
40 #include <kern/thread.h>
41
42 #include <ppc/machine_routines.h>
43 #include <ppc/cpu_data.h>
44 #include <ppc/cpu_internal.h>
45 #include <ppc/exception.h>
46 #include <ppc/thread.h>
47 #include <ppc/trap.h>
48
49 #include <ppc/chud/chud_xnu.h>
50 #include <ppc/chud/chud_xnu_private.h>
51
52 __private_extern__
53 void chudxnu_cancel_all_callbacks(void)
54 {
55 chudxnu_cpu_timer_callback_cancel_all();
56 chudxnu_trap_callback_cancel();
57 chudxnu_interrupt_callback_cancel();
58 chudxnu_perfmon_ast_callback_cancel();
59 chudxnu_cpusig_callback_cancel();
60 chudxnu_kdebug_callback_cancel();
61 chudxnu_thread_timer_callback_cancel();
62 chudxnu_syscall_callback_cancel();
63 }
64
65 #pragma mark **** cpu timer ****
66 typedef struct {
67 timer_call_data_t cpu_timer_call;
68 uint64_t t_deadline;
69 chudxnu_cpu_timer_callback_func_t cpu_timer_callback_fn;
70 } chudcpu_data_t;
71
72 static chudcpu_data_t chudcpu_boot_cpu;
73
74 void *chudxnu_per_proc_alloc(boolean_t boot_processor)
75 {
76 chudcpu_data_t *chud_proc_info;
77
78 if (boot_processor) {
79 chud_proc_info = &chudcpu_boot_cpu;
80 } else {
81 chud_proc_info = (chudcpu_data_t *)kalloc(sizeof(chudcpu_data_t));
82 if (chud_proc_info == (chudcpu_data_t *)NULL) {
83 return (void *)NULL;
84 }
85 }
86 bzero((char *)chud_proc_info, sizeof(chudcpu_data_t));
87 chud_proc_info->t_deadline = 0xFFFFFFFFFFFFFFFFULL;
88 return (void *)chud_proc_info;
89 }
90
91 void chudxnu_per_proc_free(void *per_proc_chud)
92 {
93 if (per_proc_chud == (void *)&chudcpu_boot_cpu) {
94 return;
95 } else {
96 kfree(per_proc_chud,sizeof(chudcpu_data_t));
97 }
98 }
99
100 static void chudxnu_private_cpu_timer_callback(timer_call_param_t param0, timer_call_param_t param1)
101 {
102 chudcpu_data_t *chud_proc_info;
103 boolean_t oldlevel;
104 struct ppc_thread_state64 state;
105 mach_msg_type_number_t count;
106
107 oldlevel = ml_set_interrupts_enabled(FALSE);
108 chud_proc_info = (chudcpu_data_t *)(getPerProc()->pp_chud);
109
110 count = PPC_THREAD_STATE64_COUNT;
111 if(chudxnu_thread_get_state(current_thread(), PPC_THREAD_STATE64, (thread_state_t)&state, &count, FALSE)==KERN_SUCCESS) {
112 if(chud_proc_info->cpu_timer_callback_fn) {
113 (chud_proc_info->cpu_timer_callback_fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count);
114 }
115 }
116
117 ml_set_interrupts_enabled(oldlevel);
118 }
119
120 __private_extern__
121 kern_return_t chudxnu_cpu_timer_callback_enter(chudxnu_cpu_timer_callback_func_t func, uint32_t time, uint32_t units)
122 {
123 chudcpu_data_t *chud_proc_info;
124 boolean_t oldlevel;
125
126 oldlevel = ml_set_interrupts_enabled(FALSE);
127 chud_proc_info = (chudcpu_data_t *)(getPerProc()->pp_chud);
128
129 timer_call_cancel(&(chud_proc_info->cpu_timer_call)); // cancel any existing callback for this cpu
130
131 chud_proc_info->cpu_timer_callback_fn = func;
132
133 clock_interval_to_deadline(time, units, &(chud_proc_info->t_deadline));
134 timer_call_setup(&(chud_proc_info->cpu_timer_call), chudxnu_private_cpu_timer_callback, NULL);
135 timer_call_enter(&(chud_proc_info->cpu_timer_call), chud_proc_info->t_deadline);
136
137 ml_set_interrupts_enabled(oldlevel);
138 return KERN_SUCCESS;
139 }
140
141 __private_extern__
142 kern_return_t chudxnu_cpu_timer_callback_cancel(void)
143 {
144 chudcpu_data_t *chud_proc_info;
145 boolean_t oldlevel;
146
147 oldlevel = ml_set_interrupts_enabled(FALSE);
148 chud_proc_info = (chudcpu_data_t *)(getPerProc()->pp_chud);
149
150 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
151 chud_proc_info->t_deadline = chud_proc_info->t_deadline | ~(chud_proc_info->t_deadline); // set to max value
152 chud_proc_info->cpu_timer_callback_fn = NULL;
153
154 ml_set_interrupts_enabled(oldlevel);
155 return KERN_SUCCESS;
156 }
157
158 __private_extern__
159 kern_return_t chudxnu_cpu_timer_callback_cancel_all(void)
160 {
161 unsigned int cpu;
162 chudcpu_data_t *chud_proc_info;
163
164 for(cpu=0; cpu<real_ncpus; cpu++) {
165 if ((PerProcTable[cpu].ppe_vaddr == 0)
166 || (PerProcTable[cpu].ppe_vaddr->pp_chud == 0))
167 continue;
168 chud_proc_info = (chudcpu_data_t *)PerProcTable[cpu].ppe_vaddr->pp_chud;
169 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
170 chud_proc_info->t_deadline = chud_proc_info->t_deadline | ~(chud_proc_info->t_deadline); // set to max value
171 chud_proc_info->cpu_timer_callback_fn = NULL;
172 }
173 return KERN_SUCCESS;
174 }
175
176 #pragma mark **** trap ****
177 static chudxnu_trap_callback_func_t trap_callback_fn = NULL;
178
179 #define TRAP_ENTRY_POINT(t) ((t==T_RESET) ? 0x100 : \
180 (t==T_MACHINE_CHECK) ? 0x200 : \
181 (t==T_DATA_ACCESS) ? 0x300 : \
182 (t==T_DATA_SEGMENT) ? 0x380 : \
183 (t==T_INSTRUCTION_ACCESS) ? 0x400 : \
184 (t==T_INSTRUCTION_SEGMENT) ? 0x480 : \
185 (t==T_INTERRUPT) ? 0x500 : \
186 (t==T_ALIGNMENT) ? 0x600 : \
187 (t==T_PROGRAM) ? 0x700 : \
188 (t==T_FP_UNAVAILABLE) ? 0x800 : \
189 (t==T_DECREMENTER) ? 0x900 : \
190 (t==T_IO_ERROR) ? 0xa00 : \
191 (t==T_RESERVED) ? 0xb00 : \
192 (t==T_SYSTEM_CALL) ? 0xc00 : \
193 (t==T_TRACE) ? 0xd00 : \
194 (t==T_FP_ASSIST) ? 0xe00 : \
195 (t==T_PERF_MON) ? 0xf00 : \
196 (t==T_VMX) ? 0xf20 : \
197 (t==T_INVALID_EXCP0) ? 0x1000 : \
198 (t==T_INVALID_EXCP1) ? 0x1100 : \
199 (t==T_INVALID_EXCP2) ? 0x1200 : \
200 (t==T_INSTRUCTION_BKPT) ? 0x1300 : \
201 (t==T_SYSTEM_MANAGEMENT) ? 0x1400 : \
202 (t==T_SOFT_PATCH) ? 0x1500 : \
203 (t==T_ALTIVEC_ASSIST) ? 0x1600 : \
204 (t==T_THERMAL) ? 0x1700 : \
205 (t==T_ARCHDEP0) ? 0x1800 : \
206 (t==T_INSTRUMENTATION) ? 0x2000 : \
207 0x0)
208
209 static kern_return_t chudxnu_private_trap_callback(int trapno, struct savearea *ssp, unsigned int dsisr, unsigned int dar)
210 {
211 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
212 kern_return_t retval = KERN_FAILURE;
213 uint32_t trapentry = TRAP_ENTRY_POINT(trapno);
214
215 if(trapentry!=0x0) {
216 if(trap_callback_fn) {
217 struct ppc_thread_state64 state;
218 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
219 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
220 retval = (trap_callback_fn)(trapentry, PPC_THREAD_STATE64, (thread_state_t)&state, count);
221 }
222 }
223
224 ml_set_interrupts_enabled(oldlevel);
225
226 return retval;
227 }
228
229 __private_extern__
230 kern_return_t chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func)
231 {
232 trap_callback_fn = func;
233 perfTrapHook = chudxnu_private_trap_callback;
234 __asm__ volatile("eieio"); /* force order */
235 __asm__ volatile("sync"); /* force to memory */
236 return KERN_SUCCESS;
237 }
238
239 __private_extern__
240 kern_return_t chudxnu_trap_callback_cancel(void)
241 {
242 trap_callback_fn = NULL;
243 perfTrapHook = NULL;
244 __asm__ volatile("eieio"); /* force order */
245 __asm__ volatile("sync"); /* force to memory */
246 return KERN_SUCCESS;
247 }
248
249 #pragma mark **** ast ****
250 static chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn = NULL;
251
252 static kern_return_t chudxnu_private_chud_ast_callback(int trapno, struct savearea *ssp, unsigned int dsisr, unsigned int dar)
253 {
254 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
255 ast_t *myast = ast_pending();
256 kern_return_t retval = KERN_FAILURE;
257
258 if(*myast & AST_PPC_CHUD_URGENT) {
259 *myast &= ~(AST_PPC_CHUD_URGENT | AST_PPC_CHUD);
260 if((*myast & AST_PREEMPTION) != AST_PREEMPTION) *myast &= ~(AST_URGENT);
261 retval = KERN_SUCCESS;
262 } else if(*myast & AST_PPC_CHUD) {
263 *myast &= ~(AST_PPC_CHUD);
264 retval = KERN_SUCCESS;
265 }
266
267 if(perfmon_ast_callback_fn) {
268 struct ppc_thread_state64 state;
269 mach_msg_type_number_t count;
270 count = PPC_THREAD_STATE64_COUNT;
271
272 if(chudxnu_thread_get_state(current_thread(), PPC_THREAD_STATE64, (thread_state_t)&state, &count, FALSE)==KERN_SUCCESS) {
273 (perfmon_ast_callback_fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count);
274 }
275 }
276
277 #if 0
278 // ASTs from ihandler go through thandler and are made to look like traps
279 // always handle AST_PPC_CHUD_URGENT if there's a callback
280 // only handle AST_PPC_CHUD if it's the only AST pending
281 if(perfmon_ast_callback_fn && ((*myast & AST_PPC_CHUD_URGENT) || ((*myast & AST_PPC_CHUD) && !(*myast & AST_URGENT)))) {
282 struct ppc_thread_state64 state;
283 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
284 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
285 if(*myast & AST_PPC_CHUD_URGENT) {
286 *myast &= ~(AST_PPC_CHUD_URGENT | AST_PPC_CHUD);
287 if((*myast & AST_PREEMPTION) != AST_PREEMPTION) *myast &= ~(AST_URGENT);
288 retval = KERN_SUCCESS;
289 } else if(*myast & AST_PPC_CHUD) {
290 *myast &= ~(AST_PPC_CHUD);
291 retval = KERN_SUCCESS;
292 }
293 (perfmon_ast_callback_fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count);
294 }
295 #endif
296
297 ml_set_interrupts_enabled(oldlevel);
298 return retval;
299 }
300
301 __private_extern__
302 kern_return_t chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func)
303 {
304 perfmon_ast_callback_fn = func;
305 perfASTHook = chudxnu_private_chud_ast_callback;
306 __asm__ volatile("eieio"); /* force order */
307 __asm__ volatile("sync"); /* force to memory */
308 return KERN_SUCCESS;
309 }
310
311 __private_extern__
312 kern_return_t chudxnu_perfmon_ast_callback_cancel(void)
313 {
314 perfmon_ast_callback_fn = NULL;
315 perfASTHook = NULL;
316 __asm__ volatile("eieio"); /* force order */
317 __asm__ volatile("sync"); /* force to memory */
318 return KERN_SUCCESS;
319 }
320
321 __private_extern__
322 kern_return_t chudxnu_perfmon_ast_send_urgent(boolean_t urgent)
323 {
324 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
325 ast_t *myast = ast_pending();
326
327 if(urgent) {
328 *myast |= (AST_PPC_CHUD_URGENT | AST_URGENT);
329 } else {
330 *myast |= (AST_PPC_CHUD);
331 }
332
333 ml_set_interrupts_enabled(oldlevel);
334 return KERN_SUCCESS;
335 }
336
337 __private_extern__
338 kern_return_t chudxnu_perfmon_ast_send(void)
339 {
340 return chudxnu_perfmon_ast_send_urgent(TRUE);
341 }
342
343 #pragma mark **** interrupt ****
344 static chudxnu_interrupt_callback_func_t interrupt_callback_fn = NULL;
345 //extern perfCallback perfIntHook; /* function hook into interrupt() */
346
347 static kern_return_t chudxnu_private_interrupt_callback(int trapno, struct savearea *ssp, unsigned int dsisr, unsigned int dar)
348 {
349 if(interrupt_callback_fn) {
350 struct ppc_thread_state64 state;
351 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
352 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
353 return (interrupt_callback_fn)(TRAP_ENTRY_POINT(trapno), PPC_THREAD_STATE64, (thread_state_t)&state, count);
354 } else {
355 return KERN_FAILURE;
356 }
357 }
358
359 __private_extern__
360 kern_return_t chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func)
361 {
362 interrupt_callback_fn = func;
363 perfIntHook = chudxnu_private_interrupt_callback;
364 __asm__ volatile("eieio"); /* force order */
365 __asm__ volatile("sync"); /* force to memory */
366 return KERN_SUCCESS;
367 }
368
369 __private_extern__
370 kern_return_t chudxnu_interrupt_callback_cancel(void)
371 {
372 interrupt_callback_fn = NULL;
373 perfIntHook = NULL;
374 __asm__ volatile("eieio"); /* force order */
375 __asm__ volatile("sync"); /* force to memory */
376 return KERN_SUCCESS;
377 }
378
379 #pragma mark **** cpu signal ****
380 static chudxnu_cpusig_callback_func_t cpusig_callback_fn = NULL;
381 extern perfCallback perfCpuSigHook; /* function hook into cpu_signal_handler() */
382
383 static kern_return_t chudxnu_private_cpu_signal_handler(int request, struct savearea *ssp, unsigned int arg0, unsigned int arg1)
384 {
385 if(cpusig_callback_fn) {
386 struct ppc_thread_state64 state;
387 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
388 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
389 (cpusig_callback_fn)(request, PPC_THREAD_STATE64, (thread_state_t)&state, count);
390 }
391 return KERN_SUCCESS; // ignored
392 }
393
394 __private_extern__
395 kern_return_t chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func)
396 {
397 cpusig_callback_fn = func;
398 perfCpuSigHook = chudxnu_private_cpu_signal_handler;
399 __asm__ volatile("eieio"); /* force order */
400 __asm__ volatile("sync"); /* force to memory */
401 return KERN_SUCCESS;
402 }
403
404 __private_extern__
405 kern_return_t chudxnu_cpusig_callback_cancel(void)
406 {
407 cpusig_callback_fn = NULL;
408 perfCpuSigHook = NULL;
409 __asm__ volatile("eieio"); /* force order */
410 __asm__ volatile("sync"); /* force to memory */
411 return KERN_SUCCESS;
412 }
413
414 __private_extern__
415 kern_return_t chudxnu_cpusig_send(int otherCPU, uint32_t request)
416 {
417 int thisCPU;
418 kern_return_t retval = KERN_FAILURE;
419 int retries = 0;
420 boolean_t oldlevel;
421 uint32_t temp[2];
422
423 oldlevel = ml_set_interrupts_enabled(FALSE);
424 thisCPU = cpu_number();
425
426 if(thisCPU!=otherCPU) {
427 temp[0] = 0xFFFFFFFF; /* set sync flag */
428 temp[1] = request; /* set request */
429 __asm__ volatile("eieio"); /* force order */
430 __asm__ volatile("sync"); /* force to memory */
431
432 do {
433 retval=cpu_signal(otherCPU, SIGPcpureq, CPRQchud, (uint32_t)&temp);
434 } while(retval!=KERN_SUCCESS && (retries++)<16);
435
436 if(retries>=16) {
437 retval = KERN_FAILURE;
438 } else {
439 retval = hw_cpu_sync(temp, LockTimeOut); /* wait for the other processor */
440 if(!retval) {
441 retval = KERN_FAILURE;
442 } else {
443 retval = KERN_SUCCESS;
444 }
445 }
446 } else {
447 retval = KERN_INVALID_ARGUMENT;
448 }
449
450 ml_set_interrupts_enabled(oldlevel);
451 return retval;
452 }
453
454 #pragma mark **** timer ****
455 __private_extern__
456 chud_timer_t chudxnu_timer_alloc(chudxnu_timer_callback_func_t func, uint32_t param0)
457 {
458 return (chud_timer_t)thread_call_allocate((thread_call_func_t)func, (thread_call_param_t)param0);
459 }
460
461 __private_extern__
462 kern_return_t chudxnu_timer_callback_enter(chud_timer_t timer, uint32_t param1, uint32_t time, uint32_t units)
463 {
464 uint64_t t_delay;
465 clock_interval_to_deadline(time, units, &t_delay);
466 thread_call_enter1_delayed((thread_call_t)timer, (thread_call_param_t)param1, t_delay);
467 return KERN_SUCCESS;
468 }
469
470 __private_extern__
471 kern_return_t chudxnu_timer_callback_cancel(chud_timer_t timer)
472 {
473 thread_call_cancel((thread_call_t)timer);
474 return KERN_SUCCESS;
475 }
476
477 __private_extern__
478 kern_return_t chudxnu_timer_free(chud_timer_t timer)
479 {
480 thread_call_cancel((thread_call_t)timer);
481 thread_call_free((thread_call_t)timer);
482 return KERN_SUCCESS;
483 }
484
485 #pragma mark **** CHUD syscall (PPC) ****
486
487 typedef int (*PPCcallEnt)(struct savearea *save);
488 extern PPCcallEnt PPCcalls[];
489
490 static chudxnu_syscall_callback_func_t syscall_callback_fn = NULL;
491
492 static int chudxnu_private_syscall_callback(struct savearea *ssp)
493 {
494 if(ssp) {
495 if(syscall_callback_fn) {
496 struct ppc_thread_state64 state;
497 kern_return_t retval;
498 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
499 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
500 ssp->save_r3 = (syscall_callback_fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count);
501 } else {
502 ssp->save_r3 = KERN_FAILURE;
503 }
504 }
505
506 return 1; // check for ASTs (always)
507 }
508
509 __private_extern__
510 kern_return_t chudxnu_syscall_callback_enter(chudxnu_syscall_callback_func_t func)
511 {
512 syscall_callback_fn = func;
513 PPCcalls[9] = chudxnu_private_syscall_callback;
514 __asm__ volatile("eieio"); /* force order */
515 __asm__ volatile("sync"); /* force to memory */
516 return KERN_SUCCESS;
517 }
518
519 __private_extern__
520 kern_return_t chudxnu_syscall_callback_cancel(void)
521 {
522 syscall_callback_fn = NULL;
523 PPCcalls[9] = NULL;
524 __asm__ volatile("eieio"); /* force order */
525 __asm__ volatile("sync"); /* force to memory */
526 return KERN_SUCCESS;
527 }
528
529 #pragma mark **** thread timer - DEPRECATED ****
530
531 static thread_call_t thread_timer_call = NULL;
532 static chudxnu_thread_timer_callback_func_t thread_timer_callback_fn = NULL;
533
534 static void chudxnu_private_thread_timer_callback(thread_call_param_t param0, thread_call_param_t param1)
535 {
536 if(thread_timer_call) {
537 thread_call_free(thread_timer_call);
538 thread_timer_call = NULL;
539
540 if(thread_timer_callback_fn) {
541 (thread_timer_callback_fn)((uint32_t)param0);
542 }
543 }
544 }
545
546 // DEPRECATED
547 __private_extern__
548 kern_return_t chudxnu_thread_timer_callback_enter(chudxnu_thread_timer_callback_func_t func, uint32_t param, uint32_t time, uint32_t units)
549 {
550 if(!thread_timer_call) {
551 uint64_t t_delay;
552 thread_timer_callback_fn = func;
553 thread_timer_call = thread_call_allocate((thread_call_func_t)chudxnu_private_thread_timer_callback, (thread_call_param_t)param);
554 clock_interval_to_deadline(time, units, &t_delay);
555 thread_call_enter_delayed(thread_timer_call, t_delay);
556 return KERN_SUCCESS;
557 } else {
558 return KERN_FAILURE; // thread timer call already pending
559 }
560 }
561
562 // DEPRECATED
563 __private_extern__
564 kern_return_t chudxnu_thread_timer_callback_cancel(void)
565 {
566 if(thread_timer_call) {
567 thread_call_cancel(thread_timer_call);
568 thread_call_free(thread_timer_call);
569 thread_timer_call = NULL;
570 }
571 thread_timer_callback_fn = NULL;
572 return KERN_SUCCESS;
573 }