]> git.saurik.com Git - apple/xnu.git/blame - osfmk/chud/i386/chud_osfmk_callback_i386.c
xnu-2422.1.72.tar.gz
[apple/xnu.git] / osfmk / chud / i386 / chud_osfmk_callback_i386.c
CommitLineData
0c530ab8 1/*
b0d623f7 2 * Copyright (c) 2003-2009 Apple Inc. All rights reserved.
0c530ab8 3 *
2d21ac55
A
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
0c530ab8
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
0c530ab8
A
27 */
28
29#include <stdint.h>
30#include <mach/boolean.h>
31#include <mach/mach_types.h>
32
33#include <kern/kern_types.h>
34#include <kern/processor.h>
35#include <kern/timer_call.h>
36#include <kern/thread_call.h>
37#include <kern/kalloc.h>
38#include <kern/thread.h>
39
b0d623f7
A
40#include <libkern/OSAtomic.h>
41
0c530ab8
A
42#include <machine/machine_routines.h>
43#include <machine/cpu_data.h>
44#include <machine/trap.h>
45
46#include <chud/chud_xnu.h>
47#include <chud/chud_xnu_private.h>
48
49#include <i386/misc_protos.h>
593a1d5f 50#include <i386/lapic.h>
0c530ab8
A
51#include <i386/mp.h>
52#include <i386/machine_cpu.h>
53
54#include <sys/kdebug.h>
55#define CHUD_TIMER_CALLBACK_CANCEL 0
56#define CHUD_TIMER_CALLBACK_ENTER 1
57#define CHUD_TIMER_CALLBACK 2
58#define CHUD_AST_SEND 3
59#define CHUD_AST_CALLBACK 4
60#define CHUD_CPUSIG_SEND 5
61#define CHUD_CPUSIG_CALLBACK 6
62
63__private_extern__
64void chudxnu_cancel_all_callbacks(void)
65{
66 chudxnu_cpusig_callback_cancel();
67 chudxnu_cpu_timer_callback_cancel_all();
68 chudxnu_interrupt_callback_cancel();
69 chudxnu_perfmon_ast_callback_cancel();
70 chudxnu_kdebug_callback_cancel();
0c530ab8 71 chudxnu_trap_callback_cancel();
0c530ab8 72 chudxnu_syscall_callback_cancel();
2d21ac55 73 chudxnu_dtrace_callback_cancel();
0c530ab8
A
74}
75
6d2010ae
A
76static lck_grp_t chud_request_lck_grp;
77static lck_grp_attr_t chud_request_lck_grp_attr;
78static lck_attr_t chud_request_lck_attr;
79
80
0c530ab8
A
81static chudcpu_data_t chudcpu_boot_cpu;
82void *
83chudxnu_cpu_alloc(boolean_t boot_processor)
84{
85 chudcpu_data_t *chud_proc_info;
86
0c530ab8
A
87 if (boot_processor) {
88 chud_proc_info = &chudcpu_boot_cpu;
6d2010ae
A
89
90 lck_attr_setdefault(&chud_request_lck_attr);
91 lck_grp_attr_setdefault(&chud_request_lck_grp_attr);
92 lck_grp_init(&chud_request_lck_grp, "chud_request", &chud_request_lck_grp_attr);
93
0c530ab8
A
94 } else {
95 chud_proc_info = (chudcpu_data_t *)
96 kalloc(sizeof(chudcpu_data_t));
97 if (chud_proc_info == (chudcpu_data_t *)NULL) {
98 return (void *)NULL;
99 }
100 }
101 bzero((char *)chud_proc_info, sizeof(chudcpu_data_t));
102 chud_proc_info->t_deadline = 0xFFFFFFFFFFFFFFFFULL;
6d2010ae
A
103
104 mpqueue_init(&chud_proc_info->cpu_request_queue, &chud_request_lck_grp, &chud_request_lck_attr);
0c530ab8 105
316670eb
A
106 /* timer_call_cancel() can be called before first usage, so init here: <rdar://problem/9320202> */
107 timer_call_setup(&(chud_proc_info->cpu_timer_call), NULL, NULL);
108
0c530ab8
A
109
110 return (void *)chud_proc_info;
111}
112
113void
114chudxnu_cpu_free(void *cp)
115{
116 if (cp == NULL || cp == (void *)&chudcpu_boot_cpu) {
117 return;
118 } else {
119 kfree(cp,sizeof(chudcpu_data_t));
120 }
121}
122
123static void
124chudxnu_private_cpu_timer_callback(
125 timer_call_param_t param0,
126 timer_call_param_t param1)
127{
128#pragma unused (param0)
129#pragma unused (param1)
130 chudcpu_data_t *chud_proc_info;
131 boolean_t oldlevel;
132 x86_thread_state_t state;
133 mach_msg_type_number_t count;
134 chudxnu_cpu_timer_callback_func_t fn;
135
136 oldlevel = ml_set_interrupts_enabled(FALSE);
137 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
138
139 count = x86_THREAD_STATE_COUNT;
140 if (chudxnu_thread_get_state(current_thread(),
141 x86_THREAD_STATE,
142 (thread_state_t)&state,
143 &count,
144 FALSE) == KERN_SUCCESS) {
145 fn = chud_proc_info->cpu_timer_callback_fn;
146 if (fn) {
0c530ab8
A
147 (fn)(
148 x86_THREAD_STATE,
149 (thread_state_t)&state,
150 count);
151 }
152 }
153
154 ml_set_interrupts_enabled(oldlevel);
155}
156
157__private_extern__ kern_return_t
158chudxnu_cpu_timer_callback_enter(
159 chudxnu_cpu_timer_callback_func_t func,
160 uint32_t time,
161 uint32_t units)
162{
163 chudcpu_data_t *chud_proc_info;
164 boolean_t oldlevel;
165
166 oldlevel = ml_set_interrupts_enabled(FALSE);
167 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
168
169 // cancel any existing callback for this cpu
170 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
171
172 chud_proc_info->cpu_timer_callback_fn = func;
173
174 clock_interval_to_deadline(time, units, &(chud_proc_info->t_deadline));
175 timer_call_setup(&(chud_proc_info->cpu_timer_call),
176 chudxnu_private_cpu_timer_callback, NULL);
177 timer_call_enter(&(chud_proc_info->cpu_timer_call),
6d2010ae 178 chud_proc_info->t_deadline,
39236c6e 179 TIMER_CALL_SYS_CRITICAL|TIMER_CALL_LOCAL);
0c530ab8 180
0c530ab8
A
181 ml_set_interrupts_enabled(oldlevel);
182 return KERN_SUCCESS;
183}
184
185__private_extern__ kern_return_t
186chudxnu_cpu_timer_callback_cancel(void)
187{
188 chudcpu_data_t *chud_proc_info;
189 boolean_t oldlevel;
190
191 oldlevel = ml_set_interrupts_enabled(FALSE);
192 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
193
194 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
195
0c530ab8
A
196 // set to max value:
197 chud_proc_info->t_deadline |= ~(chud_proc_info->t_deadline);
198 chud_proc_info->cpu_timer_callback_fn = NULL;
199
200 ml_set_interrupts_enabled(oldlevel);
201 return KERN_SUCCESS;
202}
203
204__private_extern__ kern_return_t
205chudxnu_cpu_timer_callback_cancel_all(void)
206{
207 unsigned int cpu;
208 chudcpu_data_t *chud_proc_info;
209
210 for(cpu=0; cpu < real_ncpus; cpu++) {
211 chud_proc_info = (chudcpu_data_t *) cpu_data_ptr[cpu]->cpu_chud;
212 if (chud_proc_info == NULL)
213 continue;
214 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
215 chud_proc_info->t_deadline |= ~(chud_proc_info->t_deadline);
216 chud_proc_info->cpu_timer_callback_fn = NULL;
217 }
218 return KERN_SUCCESS;
219}
220
b0d623f7 221#if 0
0c530ab8 222#pragma mark **** trap ****
b0d623f7
A
223#endif
224static kern_return_t chud_null_trap(uint32_t trapentry, thread_flavor_t flavor,
225 thread_state_t tstate, mach_msg_type_number_t count);
226static chudxnu_trap_callback_func_t trap_callback_fn = chud_null_trap;
227
228static kern_return_t chud_null_trap(uint32_t trapentry __unused, thread_flavor_t flavor __unused,
229 thread_state_t tstate __unused, mach_msg_type_number_t count __unused) {
230 return KERN_FAILURE;
231}
0c530ab8
A
232
233static kern_return_t
234chudxnu_private_trap_callback(
2d21ac55 235 int trapno,
0c530ab8
A
236 void *regs,
237 int unused1,
238 int unused2)
239{
240#pragma unused (regs)
241#pragma unused (unused1)
242#pragma unused (unused2)
2d21ac55 243 kern_return_t retval = KERN_FAILURE;
0c530ab8
A
244 chudxnu_trap_callback_func_t fn = trap_callback_fn;
245
2d21ac55
A
246 if(fn) {
247 boolean_t oldlevel;
248 x86_thread_state_t state;
249 mach_msg_type_number_t count;
250 thread_t thread = current_thread();
251
0c530ab8 252 oldlevel = ml_set_interrupts_enabled(FALSE);
2d21ac55
A
253
254 /* prevent reentry into CHUD when dtracing */
255 if(thread->t_chud & T_IN_CHUD) {
256 /* restore interrupts */
257 ml_set_interrupts_enabled(oldlevel);
258
259 return KERN_FAILURE; // not handled - pass off to dtrace
260 }
261
262 /* update the chud state bits */
263 thread->t_chud |= T_IN_CHUD;
0c530ab8
A
264
265 count = x86_THREAD_STATE_COUNT;
2d21ac55
A
266
267 if(chudxnu_thread_get_state(thread,
0c530ab8
A
268 x86_THREAD_STATE,
269 (thread_state_t)&state,
270 &count,
271 FALSE) == KERN_SUCCESS) {
2d21ac55
A
272
273 retval = (fn)(
274 trapno,
275 x86_THREAD_STATE,
276 (thread_state_t)&state,
277 count);
0c530ab8 278 }
2d21ac55
A
279
280 /* no longer in CHUD */
281 thread->t_chud &= ~(T_IN_CHUD);
282
283 ml_set_interrupts_enabled(oldlevel);
0c530ab8
A
284 }
285
2d21ac55 286 return retval;
0c530ab8
A
287}
288
289__private_extern__ kern_return_t
290chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func)
291{
b0d623f7
A
292 if(OSCompareAndSwapPtr(NULL, chudxnu_private_trap_callback,
293 (void * volatile *)&perfTrapHook)) {
294
295 chudxnu_trap_callback_func_t old = trap_callback_fn;
296 while(!OSCompareAndSwapPtr(old, func,
297 (void * volatile *)&trap_callback_fn)) {
298 old = trap_callback_fn;
299 }
300 return KERN_SUCCESS;
301 }
302 return KERN_FAILURE;
0c530ab8
A
303}
304
305__private_extern__ kern_return_t
306chudxnu_trap_callback_cancel(void)
307{
b0d623f7
A
308 if(OSCompareAndSwapPtr(chudxnu_private_trap_callback, NULL,
309 (void * volatile *)&perfTrapHook)) {
310
311 chudxnu_trap_callback_func_t old = trap_callback_fn;
312 while(!OSCompareAndSwapPtr(old, chud_null_trap,
313 (void * volatile *)&trap_callback_fn)) {
314 old = trap_callback_fn;
315 }
316 return KERN_SUCCESS;
317 }
318 return KERN_FAILURE;
0c530ab8
A
319}
320
b0d623f7 321#if 0
0c530ab8 322#pragma mark **** ast ****
b0d623f7
A
323#endif
324static kern_return_t chud_null_ast(thread_flavor_t flavor, thread_state_t tstate,
325 mach_msg_type_number_t count);
326static chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn = chud_null_ast;
327
328static kern_return_t chud_null_ast(thread_flavor_t flavor __unused,
329 thread_state_t tstate __unused, mach_msg_type_number_t count __unused) {
330 return KERN_FAILURE;
331}
0c530ab8
A
332
333static kern_return_t
6d2010ae
A
334chudxnu_private_chud_ast_callback(ast_t reasons, ast_t *myast)
335{
336 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
337 kern_return_t retval = KERN_FAILURE;
0c530ab8 338 chudxnu_perfmon_ast_callback_func_t fn = perfmon_ast_callback_fn;
6d2010ae 339
0c530ab8 340 if (fn) {
6d2010ae
A
341 if ((*myast & AST_CHUD_URGENT) && (reasons & (AST_URGENT | AST_CHUD_URGENT))) { // Only execute urgent callbacks if reasons specifies an urgent context.
342 *myast &= ~AST_CHUD_URGENT;
343
344 if (AST_URGENT == *myast) { // If the only flag left is AST_URGENT, we can clear it; we know that we set it, but if there are also other bits set in reasons then someone else might still need AST_URGENT, so we'll leave it set. The normal machinery in ast_taken will ensure it gets cleared eventually, as necessary.
345 *myast = AST_NONE;
346 }
347
348 retval = KERN_SUCCESS;
349 }
350
351 if ((*myast & AST_CHUD) && (reasons & AST_CHUD)) { // Only execute non-urgent callbacks if reasons actually specifies AST_CHUD. This implies non-urgent callbacks since the only time this'll happen is if someone either calls ast_taken with AST_CHUD explicitly (not done at time of writing, but possible) or with AST_ALL, which of course includes AST_CHUD.
352 *myast &= ~AST_CHUD;
353 retval = KERN_SUCCESS;
354 }
355
356 if (KERN_SUCCESS == retval) {
357 x86_thread_state_t state;
358 mach_msg_type_number_t count = x86_THREAD_STATE_COUNT;
359 thread_t thread = current_thread();
360
361 if (KERN_SUCCESS == chudxnu_thread_get_state(thread,
362 x86_THREAD_STATE,
363 (thread_state_t)&state,
364 &count,
365 (thread->task != kernel_task))) {
366 (fn)(x86_THREAD_STATE, (thread_state_t)&state, count);
367 }
0c530ab8
A
368 }
369 }
370
371 ml_set_interrupts_enabled(oldlevel);
372 return retval;
373}
374
375__private_extern__ kern_return_t
376chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func)
377{
b0d623f7
A
378 if(OSCompareAndSwapPtr(NULL, chudxnu_private_chud_ast_callback,
379 (void * volatile *)&perfASTHook)) {
380 chudxnu_perfmon_ast_callback_func_t old = perfmon_ast_callback_fn;
381
382 while(!OSCompareAndSwapPtr(old, func,
383 (void * volatile *)&perfmon_ast_callback_fn)) {
384 old = perfmon_ast_callback_fn;
385 }
386
387 return KERN_SUCCESS;
388 }
389 return KERN_FAILURE;
0c530ab8
A
390}
391
392__private_extern__ kern_return_t
393chudxnu_perfmon_ast_callback_cancel(void)
394{
b0d623f7
A
395 if(OSCompareAndSwapPtr(chudxnu_private_chud_ast_callback, NULL,
396 (void * volatile *)&perfASTHook)) {
397 chudxnu_perfmon_ast_callback_func_t old = perfmon_ast_callback_fn;
398
399 while(!OSCompareAndSwapPtr(old, chud_null_ast,
400 (void * volatile *)&perfmon_ast_callback_fn)) {
401 old = perfmon_ast_callback_fn;
402 }
403
404 return KERN_SUCCESS;
405 }
406 return KERN_FAILURE;
0c530ab8
A
407}
408
409__private_extern__ kern_return_t
410chudxnu_perfmon_ast_send_urgent(boolean_t urgent)
411{
412 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
413 ast_t *myast = ast_pending();
414
415 if(urgent) {
416 *myast |= (AST_CHUD_URGENT | AST_URGENT);
417 } else {
418 *myast |= (AST_CHUD);
419 }
420
0c530ab8
A
421 ml_set_interrupts_enabled(oldlevel);
422 return KERN_SUCCESS;
423}
424
b0d623f7 425#if 0
0c530ab8 426#pragma mark **** interrupt ****
b0d623f7
A
427#endif
428static kern_return_t chud_null_int(uint32_t trapentry, thread_flavor_t flavor,
429 thread_state_t tstate, mach_msg_type_number_t count);
430static chudxnu_interrupt_callback_func_t interrupt_callback_fn = chud_null_int;
431
432static kern_return_t chud_null_int(uint32_t trapentry __unused, thread_flavor_t flavor __unused,
433 thread_state_t tstate __unused, mach_msg_type_number_t count __unused) {
434 return KERN_FAILURE;
435}
0c530ab8 436
6d2010ae
A
437static void
438chudxnu_private_interrupt_callback(void *foo) __attribute__((used));
439
0c530ab8
A
440static void
441chudxnu_private_interrupt_callback(void *foo)
442{
443#pragma unused (foo)
444 chudxnu_interrupt_callback_func_t fn = interrupt_callback_fn;
445
446 if(fn) {
447 boolean_t oldlevel;
448 x86_thread_state_t state;
449 mach_msg_type_number_t count;
450
451 oldlevel = ml_set_interrupts_enabled(FALSE);
452
453 count = x86_THREAD_STATE_COUNT;
454 if(chudxnu_thread_get_state(current_thread(),
455 x86_THREAD_STATE,
456 (thread_state_t)&state,
457 &count,
458 FALSE) == KERN_SUCCESS) {
459 (fn)(
460 X86_INTERRUPT_PERFMON,
461 x86_THREAD_STATE,
462 (thread_state_t)&state,
463 count);
464 }
465 ml_set_interrupts_enabled(oldlevel);
466 }
467}
468
469__private_extern__ kern_return_t
470chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func)
471{
b0d623f7
A
472 if(OSCompareAndSwapPtr(chud_null_int, func,
473 (void * volatile *)&interrupt_callback_fn)) {
474 lapic_set_pmi_func((i386_intr_func_t)chudxnu_private_interrupt_callback);
b0d623f7
A
475 return KERN_SUCCESS;
476 }
477 return KERN_FAILURE;
0c530ab8
A
478}
479
480__private_extern__ kern_return_t
481chudxnu_interrupt_callback_cancel(void)
482{
b0d623f7
A
483 chudxnu_interrupt_callback_func_t old = interrupt_callback_fn;
484
485 while(!OSCompareAndSwapPtr(old, chud_null_int,
486 (void * volatile *)&interrupt_callback_fn)) {
487 old = interrupt_callback_fn;
488 }
489
0c530ab8
A
490 lapic_set_pmi_func(NULL);
491 return KERN_SUCCESS;
492}
493
b0d623f7 494#if 0
0c530ab8 495#pragma mark **** cpu signal ****
b0d623f7 496#endif
0c530ab8
A
497static chudxnu_cpusig_callback_func_t cpusig_callback_fn = NULL;
498
499static kern_return_t
500chudxnu_private_cpu_signal_handler(int request)
501{
502 chudxnu_cpusig_callback_func_t fn = cpusig_callback_fn;
503
504 if (fn) {
505 x86_thread_state_t state;
506 mach_msg_type_number_t count = x86_THREAD_STATE_COUNT;
507
508 if (chudxnu_thread_get_state(current_thread(),
509 x86_THREAD_STATE,
510 (thread_state_t) &state, &count,
511 FALSE) == KERN_SUCCESS) {
0c530ab8
A
512 return (fn)(
513 request, x86_THREAD_STATE,
514 (thread_state_t) &state, count);
515 } else {
516 return KERN_FAILURE;
517 }
518 }
519 return KERN_SUCCESS; //ignored
520}
521/*
522 * chudxnu_cpu_signal_handler() is called from the IPI handler
523 * when a CHUD signal arrives from another processor.
524 */
525__private_extern__ void
526chudxnu_cpu_signal_handler(void)
527{
528 chudcpu_signal_request_t *reqp;
529 chudcpu_data_t *chudinfop;
530
531 chudinfop = (chudcpu_data_t *) current_cpu_datap()->cpu_chud;
532
533 mpdequeue_head(&(chudinfop->cpu_request_queue),
534 (queue_entry_t *) &reqp);
535 while (reqp != NULL) {
536 chudxnu_private_cpu_signal_handler(reqp->req_code);
537 reqp->req_sync = 0;
538 mpdequeue_head(&(chudinfop->cpu_request_queue),
539 (queue_entry_t *) &reqp);
540 }
541}
542
543__private_extern__ kern_return_t
544chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func)
545{
b0d623f7
A
546 if(OSCompareAndSwapPtr(NULL, func,
547 (void * volatile *)&cpusig_callback_fn)) {
548 return KERN_SUCCESS;
549 }
550 return KERN_FAILURE;
0c530ab8
A
551}
552
553__private_extern__ kern_return_t
554chudxnu_cpusig_callback_cancel(void)
555{
b0d623f7
A
556 chudxnu_cpusig_callback_func_t old = cpusig_callback_fn;
557
558 while(!OSCompareAndSwapPtr(old, NULL,
559 (void * volatile *)&cpusig_callback_fn)) {
560 old = cpusig_callback_fn;
561 }
562
0c530ab8
A
563 return KERN_SUCCESS;
564}
565
566__private_extern__ kern_return_t
567chudxnu_cpusig_send(int otherCPU, uint32_t request_code)
568{
569 int thisCPU;
570 kern_return_t retval = KERN_FAILURE;
571 chudcpu_signal_request_t request;
572 uint64_t deadline;
573 chudcpu_data_t *target_chudp;
574 boolean_t old_level;
575
576 disable_preemption();
577 // force interrupts on for a cross CPU signal.
578 old_level = chudxnu_set_interrupts_enabled(TRUE);
579 thisCPU = cpu_number();
580
581 if ((unsigned) otherCPU < real_ncpus &&
582 thisCPU != otherCPU &&
583 cpu_data_ptr[otherCPU]->cpu_running) {
584
585 target_chudp = (chudcpu_data_t *)
586 cpu_data_ptr[otherCPU]->cpu_chud;
587
588 /* Fill out request */
589 request.req_sync = 0xFFFFFFFF; /* set sync flag */
590 //request.req_type = CPRQchud; /* set request type */
591 request.req_code = request_code; /* set request */
592
0c530ab8
A
593 /*
594 * Insert the new request in the target cpu's request queue
595 * and signal target cpu.
596 */
597 mpenqueue_tail(&target_chudp->cpu_request_queue,
598 &request.req_entry);
599 i386_signal_cpu(otherCPU, MP_CHUD, ASYNC);
600
601 /* Wait for response or timeout */
602 deadline = mach_absolute_time() + LockTimeOut;
603 while (request.req_sync != 0) {
604 if (mach_absolute_time() > deadline) {
605 panic("chudxnu_cpusig_send(%d,%d) timed out\n",
606 otherCPU, request_code);
607 }
608 cpu_pause();
609 }
610 retval = KERN_SUCCESS;
611 } else {
612 retval = KERN_INVALID_ARGUMENT;
613 }
614
615 chudxnu_set_interrupts_enabled(old_level);
616 enable_preemption();
617 return retval;
618}