]> git.saurik.com Git - apple/xnu.git/blame - osfmk/chud/ppc/chud_osfmk_callback_ppc.c
xnu-1456.1.26.tar.gz
[apple/xnu.git] / osfmk / chud / ppc / chud_osfmk_callback_ppc.c
CommitLineData
55e303ae 1/*
2d21ac55 2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
55e303ae 3 *
2d21ac55
A
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
55e303ae
A
27 */
28
29#include <stdint.h>
30#include <mach/boolean.h>
31#include <mach/mach_types.h>
32
91447636
A
33#include <kern/kern_types.h>
34#include <kern/processor.h>
91447636
A
35#include <kern/thread_call.h>
36#include <kern/kalloc.h>
37#include <kern/thread.h>
38
b0d623f7
A
39#include <libkern/OSAtomic.h>
40
55e303ae 41#include <ppc/machine_routines.h>
91447636
A
42#include <ppc/cpu_data.h>
43#include <ppc/cpu_internal.h>
55e303ae 44#include <ppc/exception.h>
91447636
A
45#include <ppc/thread.h>
46#include <ppc/trap.h>
55e303ae 47
0c530ab8
A
48#include <chud/chud_xnu.h>
49#include <chud/chud_xnu_private.h>
55e303ae
A
50
51__private_extern__
52void chudxnu_cancel_all_callbacks(void)
53{
55e303ae
A
54 chudxnu_cpu_timer_callback_cancel_all();
55 chudxnu_trap_callback_cancel();
56 chudxnu_interrupt_callback_cancel();
57 chudxnu_perfmon_ast_callback_cancel();
58 chudxnu_cpusig_callback_cancel();
59 chudxnu_kdebug_callback_cancel();
91447636 60 chudxnu_syscall_callback_cancel();
2d21ac55 61 chudxnu_dtrace_callback_cancel();
55e303ae
A
62}
63
91447636
A
64static chudcpu_data_t chudcpu_boot_cpu;
65
66void *chudxnu_per_proc_alloc(boolean_t boot_processor)
67{
68 chudcpu_data_t *chud_proc_info;
69
70 if (boot_processor) {
71 chud_proc_info = &chudcpu_boot_cpu;
72 } else {
73 chud_proc_info = (chudcpu_data_t *)kalloc(sizeof(chudcpu_data_t));
74 if (chud_proc_info == (chudcpu_data_t *)NULL) {
75 return (void *)NULL;
76 }
77 }
78 bzero((char *)chud_proc_info, sizeof(chudcpu_data_t));
79 chud_proc_info->t_deadline = 0xFFFFFFFFFFFFFFFFULL;
80 return (void *)chud_proc_info;
81}
82
83void chudxnu_per_proc_free(void *per_proc_chud)
84{
85 if (per_proc_chud == (void *)&chudcpu_boot_cpu) {
86 return;
87 } else {
88 kfree(per_proc_chud,sizeof(chudcpu_data_t));
89 }
90}
55e303ae 91
2d21ac55
A
92static void
93chudxnu_private_cpu_timer_callback(__unused timer_call_param_t param0,
94 __unused timer_call_param_t param1)
55e303ae 95{
91447636 96 chudcpu_data_t *chud_proc_info;
55e303ae
A
97 boolean_t oldlevel;
98 struct ppc_thread_state64 state;
99 mach_msg_type_number_t count;
0c530ab8 100 chudxnu_cpu_timer_callback_func_t fn = NULL;
55e303ae
A
101
102 oldlevel = ml_set_interrupts_enabled(FALSE);
91447636 103 chud_proc_info = (chudcpu_data_t *)(getPerProc()->pp_chud);
55e303ae
A
104
105 count = PPC_THREAD_STATE64_COUNT;
91447636 106 if(chudxnu_thread_get_state(current_thread(), PPC_THREAD_STATE64, (thread_state_t)&state, &count, FALSE)==KERN_SUCCESS) {
0c530ab8
A
107 fn = chud_proc_info->cpu_timer_callback_fn;
108 if(fn) {
109 (fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count);
55e303ae
A
110 }
111 }
112
113 ml_set_interrupts_enabled(oldlevel);
114}
115
116__private_extern__
117kern_return_t chudxnu_cpu_timer_callback_enter(chudxnu_cpu_timer_callback_func_t func, uint32_t time, uint32_t units)
118{
91447636 119 chudcpu_data_t *chud_proc_info;
55e303ae
A
120 boolean_t oldlevel;
121
122 oldlevel = ml_set_interrupts_enabled(FALSE);
91447636 123 chud_proc_info = (chudcpu_data_t *)(getPerProc()->pp_chud);
55e303ae 124
91447636 125 timer_call_cancel(&(chud_proc_info->cpu_timer_call)); // cancel any existing callback for this cpu
55e303ae 126
91447636 127 chud_proc_info->cpu_timer_callback_fn = func;
55e303ae 128
91447636
A
129 clock_interval_to_deadline(time, units, &(chud_proc_info->t_deadline));
130 timer_call_setup(&(chud_proc_info->cpu_timer_call), chudxnu_private_cpu_timer_callback, NULL);
131 timer_call_enter(&(chud_proc_info->cpu_timer_call), chud_proc_info->t_deadline);
55e303ae
A
132
133 ml_set_interrupts_enabled(oldlevel);
134 return KERN_SUCCESS;
135}
136
137__private_extern__
138kern_return_t chudxnu_cpu_timer_callback_cancel(void)
139{
91447636 140 chudcpu_data_t *chud_proc_info;
55e303ae
A
141 boolean_t oldlevel;
142
143 oldlevel = ml_set_interrupts_enabled(FALSE);
91447636 144 chud_proc_info = (chudcpu_data_t *)(getPerProc()->pp_chud);
55e303ae 145
91447636
A
146 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
147 chud_proc_info->t_deadline = chud_proc_info->t_deadline | ~(chud_proc_info->t_deadline); // set to max value
148 chud_proc_info->cpu_timer_callback_fn = NULL;
55e303ae
A
149
150 ml_set_interrupts_enabled(oldlevel);
151 return KERN_SUCCESS;
152}
153
154__private_extern__
155kern_return_t chudxnu_cpu_timer_callback_cancel_all(void)
156{
91447636
A
157 unsigned int cpu;
158 chudcpu_data_t *chud_proc_info;
159
160 for(cpu=0; cpu<real_ncpus; cpu++) {
161 if ((PerProcTable[cpu].ppe_vaddr == 0)
162 || (PerProcTable[cpu].ppe_vaddr->pp_chud == 0))
163 continue;
164 chud_proc_info = (chudcpu_data_t *)PerProcTable[cpu].ppe_vaddr->pp_chud;
165 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
166 chud_proc_info->t_deadline = chud_proc_info->t_deadline | ~(chud_proc_info->t_deadline); // set to max value
167 chud_proc_info->cpu_timer_callback_fn = NULL;
55e303ae
A
168 }
169 return KERN_SUCCESS;
170}
171
b0d623f7 172#if 0
91447636 173#pragma mark **** trap ****
b0d623f7
A
174#endif
175static kern_return_t chud_null_trap(uint32_t trapentry, thread_flavor_t flavor,
176 thread_state_t tstate, mach_msg_type_number_t count);
177static chudxnu_trap_callback_func_t trap_callback_fn = chud_null_trap;
178
179static kern_return_t chud_null_trap(uint32_t trapentry __unused, thread_flavor_t flavor __unused,
180 thread_state_t tstate __unused, mach_msg_type_number_t count __unused) {
181 return KERN_FAILURE;
182}
183
55e303ae 184
55e303ae
A
185#define TRAP_ENTRY_POINT(t) ((t==T_RESET) ? 0x100 : \
186 (t==T_MACHINE_CHECK) ? 0x200 : \
187 (t==T_DATA_ACCESS) ? 0x300 : \
188 (t==T_DATA_SEGMENT) ? 0x380 : \
189 (t==T_INSTRUCTION_ACCESS) ? 0x400 : \
190 (t==T_INSTRUCTION_SEGMENT) ? 0x480 : \
191 (t==T_INTERRUPT) ? 0x500 : \
192 (t==T_ALIGNMENT) ? 0x600 : \
193 (t==T_PROGRAM) ? 0x700 : \
194 (t==T_FP_UNAVAILABLE) ? 0x800 : \
195 (t==T_DECREMENTER) ? 0x900 : \
196 (t==T_IO_ERROR) ? 0xa00 : \
197 (t==T_RESERVED) ? 0xb00 : \
198 (t==T_SYSTEM_CALL) ? 0xc00 : \
199 (t==T_TRACE) ? 0xd00 : \
200 (t==T_FP_ASSIST) ? 0xe00 : \
201 (t==T_PERF_MON) ? 0xf00 : \
202 (t==T_VMX) ? 0xf20 : \
203 (t==T_INVALID_EXCP0) ? 0x1000 : \
204 (t==T_INVALID_EXCP1) ? 0x1100 : \
205 (t==T_INVALID_EXCP2) ? 0x1200 : \
206 (t==T_INSTRUCTION_BKPT) ? 0x1300 : \
207 (t==T_SYSTEM_MANAGEMENT) ? 0x1400 : \
208 (t==T_SOFT_PATCH) ? 0x1500 : \
209 (t==T_ALTIVEC_ASSIST) ? 0x1600 : \
210 (t==T_THERMAL) ? 0x1700 : \
211 (t==T_ARCHDEP0) ? 0x1800 : \
212 (t==T_INSTRUMENTATION) ? 0x2000 : \
213 0x0)
214
2d21ac55
A
215static kern_return_t
216chudxnu_private_trap_callback(int trapno, struct savearea *ssp,
217 __unused unsigned int dsisr,
218 __unused addr64_t dar)
55e303ae
A
219{
220 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
55e303ae
A
221 kern_return_t retval = KERN_FAILURE;
222 uint32_t trapentry = TRAP_ENTRY_POINT(trapno);
0c530ab8 223 chudxnu_trap_callback_func_t fn = trap_callback_fn;
55e303ae 224
55e303ae 225 if(trapentry!=0x0) {
0c530ab8 226 if(fn) {
55e303ae
A
227 struct ppc_thread_state64 state;
228 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
229 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
0c530ab8 230 retval = (fn)(trapentry, PPC_THREAD_STATE64, (thread_state_t)&state, count);
55e303ae
A
231 }
232 }
233
234 ml_set_interrupts_enabled(oldlevel);
235
236 return retval;
237}
238
b0d623f7
A
239__private_extern__ kern_return_t
240chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func)
55e303ae 241{
b0d623f7
A
242 if(OSCompareAndSwapPtr(NULL, chudxnu_private_trap_callback,
243 (void * volatile *)&perfTrapHook)) {
244
245 chudxnu_trap_callback_func_t old = trap_callback_fn;
246 while(!OSCompareAndSwapPtr(old, func,
247 (void * volatile *)&trap_callback_fn)) {
248 old = trap_callback_fn;
249 }
250
251 return KERN_SUCCESS;
252 }
253 return KERN_FAILURE;
55e303ae
A
254}
255
b0d623f7
A
256__private_extern__ kern_return_t
257chudxnu_trap_callback_cancel(void)
55e303ae 258{
b0d623f7
A
259 if(OSCompareAndSwapPtr(chudxnu_private_trap_callback, NULL,
260 (void * volatile *)&perfTrapHook)) {
261
262 chudxnu_trap_callback_func_t old = trap_callback_fn;
263 while(!OSCompareAndSwapPtr(old, chud_null_trap,
264 (void * volatile *)&trap_callback_fn)) {
265 old = trap_callback_fn;
266 }
267
268 return KERN_SUCCESS;
269 }
270 return KERN_FAILURE;
55e303ae
A
271}
272
b0d623f7 273#if 0
91447636 274#pragma mark **** ast ****
b0d623f7
A
275#endif
276static kern_return_t chud_null_ast(thread_flavor_t flavor, thread_state_t tstate,
277 mach_msg_type_number_t count);
278static chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn = chud_null_ast;
279
280static kern_return_t chud_null_ast(thread_flavor_t flavor __unused,
281 thread_state_t tstate __unused, mach_msg_type_number_t count __unused) {
282 return KERN_FAILURE;
283}
284
91447636 285
2d21ac55
A
286static kern_return_t
287chudxnu_private_chud_ast_callback(__unused int trapno,
288 __unused struct savearea *ssp,
289 __unused unsigned int dsisr,
290 __unused addr64_t dar)
91447636
A
291{
292 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
293 ast_t *myast = ast_pending();
294 kern_return_t retval = KERN_FAILURE;
0c530ab8 295 chudxnu_perfmon_ast_callback_func_t fn = perfmon_ast_callback_fn;
91447636 296
0c530ab8
A
297 if(*myast & AST_CHUD_URGENT) {
298 *myast &= ~(AST_CHUD_URGENT | AST_CHUD);
91447636
A
299 if((*myast & AST_PREEMPTION) != AST_PREEMPTION) *myast &= ~(AST_URGENT);
300 retval = KERN_SUCCESS;
0c530ab8
A
301 } else if(*myast & AST_CHUD) {
302 *myast &= ~(AST_CHUD);
91447636
A
303 retval = KERN_SUCCESS;
304 }
305
0c530ab8 306 if(fn) {
91447636
A
307 struct ppc_thread_state64 state;
308 mach_msg_type_number_t count;
309 count = PPC_THREAD_STATE64_COUNT;
310
311 if(chudxnu_thread_get_state(current_thread(), PPC_THREAD_STATE64, (thread_state_t)&state, &count, FALSE)==KERN_SUCCESS) {
0c530ab8 312 (fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count);
91447636
A
313 }
314 }
315
316#if 0
317 // ASTs from ihandler go through thandler and are made to look like traps
0c530ab8
A
318 // always handle AST_CHUD_URGENT if there's a callback
319 // only handle AST_CHUD if it's the only AST pending
320 if(perfmon_ast_callback_fn && ((*myast & AST_CHUD_URGENT) || ((*myast & AST_CHUD) && !(*myast & AST_URGENT)))) {
91447636
A
321 struct ppc_thread_state64 state;
322 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
323 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
0c530ab8
A
324 if(*myast & AST_CHUD_URGENT) {
325 *myast &= ~(AST_CHUD_URGENT | AST_CHUD);
91447636
A
326 if((*myast & AST_PREEMPTION) != AST_PREEMPTION) *myast &= ~(AST_URGENT);
327 retval = KERN_SUCCESS;
0c530ab8
A
328 } else if(*myast & AST_CHUD) {
329 *myast &= ~(AST_CHUD);
91447636
A
330 retval = KERN_SUCCESS;
331 }
332 (perfmon_ast_callback_fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count);
333 }
334#endif
335
336 ml_set_interrupts_enabled(oldlevel);
337 return retval;
338}
339
b0d623f7
A
340__private_extern__ kern_return_t
341chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func)
55e303ae 342{
b0d623f7
A
343 if(OSCompareAndSwapPtr(NULL, chudxnu_private_chud_ast_callback,
344 (void * volatile *)&perfASTHook)) {
345 chudxnu_perfmon_ast_callback_func_t old = perfmon_ast_callback_fn;
346
347 while(!OSCompareAndSwapPtr(old, func,
348 (void * volatile *)&perfmon_ast_callback_fn)) {
349 old = perfmon_ast_callback_fn;
350 }
351
352 return KERN_SUCCESS;
353 }
354 return KERN_FAILURE;
55e303ae
A
355}
356
b0d623f7
A
357__private_extern__ kern_return_t
358chudxnu_perfmon_ast_callback_cancel(void)
55e303ae 359{
b0d623f7
A
360 if(OSCompareAndSwapPtr(chudxnu_private_chud_ast_callback, NULL,
361 (void * volatile *)&perfASTHook)) {
362 chudxnu_perfmon_ast_callback_func_t old = perfmon_ast_callback_fn;
363
364 while(!OSCompareAndSwapPtr(old, chud_null_ast,
365 (void * volatile *)&perfmon_ast_callback_fn)) {
366 old = perfmon_ast_callback_fn;
367 }
368
369 return KERN_SUCCESS;
370 }
371 return KERN_FAILURE;
55e303ae
A
372}
373
374__private_extern__
91447636 375kern_return_t chudxnu_perfmon_ast_send_urgent(boolean_t urgent)
55e303ae 376{
91447636
A
377 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
378 ast_t *myast = ast_pending();
55e303ae 379
91447636 380 if(urgent) {
0c530ab8 381 *myast |= (AST_CHUD_URGENT | AST_URGENT);
91447636 382 } else {
0c530ab8 383 *myast |= (AST_CHUD);
91447636 384 }
55e303ae
A
385
386 ml_set_interrupts_enabled(oldlevel);
387 return KERN_SUCCESS;
388}
389
b0d623f7
A
390#if 0
391#pragma mark **** interrupt ****
392#endif
393static kern_return_t chud_null_int(uint32_t trapentry, thread_flavor_t flavor,
394 thread_state_t tstate, mach_msg_type_number_t count);
395static chudxnu_interrupt_callback_func_t interrupt_callback_fn = chud_null_int;
396
397static kern_return_t chud_null_int(uint32_t trapentry __unused, thread_flavor_t flavor __unused,
398 thread_state_t tstate __unused, mach_msg_type_number_t count __unused) {
399 return KERN_FAILURE;
91447636
A
400}
401
55e303ae 402
2d21ac55
A
403static kern_return_t
404chudxnu_private_interrupt_callback(int trapno, struct savearea *ssp,
405 __unused unsigned int dsisr,
406 __unused addr64_t dar)
55e303ae 407{
0c530ab8
A
408 chudxnu_interrupt_callback_func_t fn = interrupt_callback_fn;
409
410 if(fn) {
55e303ae
A
411 struct ppc_thread_state64 state;
412 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
413 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
0c530ab8 414 return (fn)(TRAP_ENTRY_POINT(trapno), PPC_THREAD_STATE64, (thread_state_t)&state, count);
55e303ae
A
415 } else {
416 return KERN_FAILURE;
417 }
418}
419
420__private_extern__
421kern_return_t chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func)
422{
b0d623f7
A
423 if(OSCompareAndSwapPtr(NULL, chudxnu_private_interrupt_callback,
424 (void * volatile *)&perfIntHook)) {
425 chudxnu_interrupt_callback_func_t old = interrupt_callback_fn;
426
427 while(!OSCompareAndSwapPtr(old, func,
428 (void * volatile *)&interrupt_callback_fn)) {
429 old = interrupt_callback_fn;
430 }
431
432 return KERN_SUCCESS;
433 }
434 return KERN_FAILURE;
55e303ae
A
435}
436
437__private_extern__
438kern_return_t chudxnu_interrupt_callback_cancel(void)
439{
b0d623f7
A
440 if(OSCompareAndSwapPtr(chudxnu_private_interrupt_callback, NULL,
441 (void * volatile *)&perfIntHook)) {
442 chudxnu_interrupt_callback_func_t old = interrupt_callback_fn;
443
444 while(!OSCompareAndSwapPtr(old, chud_null_int,
445 (void * volatile *)&interrupt_callback_fn)) {
446 old = interrupt_callback_fn;
447 }
448
449 return KERN_SUCCESS;
450 }
451 return KERN_FAILURE;
55e303ae
A
452}
453
b0d623f7 454#if 0
55e303ae 455#pragma mark **** cpu signal ****
b0d623f7 456#endif
55e303ae 457static chudxnu_cpusig_callback_func_t cpusig_callback_fn = NULL;
91447636 458extern perfCallback perfCpuSigHook; /* function hook into cpu_signal_handler() */
55e303ae 459
2d21ac55
A
460static kern_return_t
461chudxnu_private_cpu_signal_handler(int request, struct savearea *ssp,
462 __unused unsigned int arg0,
463 __unused addr64_t arg1)
55e303ae 464{
0c530ab8
A
465 chudxnu_cpusig_callback_func_t fn = cpusig_callback_fn;
466
467 if(fn) {
55e303ae
A
468 struct ppc_thread_state64 state;
469 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
470 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
0c530ab8 471 (fn)(request, PPC_THREAD_STATE64, (thread_state_t)&state, count);
55e303ae
A
472 }
473 return KERN_SUCCESS; // ignored
474}
475
476__private_extern__
477kern_return_t chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func)
478{
b0d623f7
A
479 if(OSCompareAndSwapPtr(NULL, chudxnu_private_cpu_signal_handler,
480 (void * volatile *)&perfCpuSigHook)) {
481 chudxnu_cpusig_callback_func_t old = cpusig_callback_fn;
482
483 while(!OSCompareAndSwapPtr(old, func,
484 (void * volatile *)&cpusig_callback_fn)) {
485 old = cpusig_callback_fn;
486 }
487
488 return KERN_SUCCESS;
489 }
490 return KERN_FAILURE;
55e303ae
A
491}
492
493__private_extern__
494kern_return_t chudxnu_cpusig_callback_cancel(void)
495{
b0d623f7
A
496 if(OSCompareAndSwapPtr(chudxnu_private_cpu_signal_handler, NULL,
497 (void * volatile *)&perfCpuSigHook)) {
498 chudxnu_cpusig_callback_func_t old = cpusig_callback_fn;
499
500 while(!OSCompareAndSwapPtr(old, NULL,
501 (void * volatile *)&cpusig_callback_fn)) {
502 old = cpusig_callback_fn;
503 }
504
505 return KERN_SUCCESS;
506 }
507 return KERN_FAILURE;
55e303ae
A
508}
509
510__private_extern__
511kern_return_t chudxnu_cpusig_send(int otherCPU, uint32_t request)
512{
513 int thisCPU;
514 kern_return_t retval = KERN_FAILURE;
515 int retries = 0;
516 boolean_t oldlevel;
517 uint32_t temp[2];
518
519 oldlevel = ml_set_interrupts_enabled(FALSE);
520 thisCPU = cpu_number();
521
522 if(thisCPU!=otherCPU) {
523 temp[0] = 0xFFFFFFFF; /* set sync flag */
524 temp[1] = request; /* set request */
525 __asm__ volatile("eieio"); /* force order */
526 __asm__ volatile("sync"); /* force to memory */
527
528 do {
529 retval=cpu_signal(otherCPU, SIGPcpureq, CPRQchud, (uint32_t)&temp);
530 } while(retval!=KERN_SUCCESS && (retries++)<16);
531
532 if(retries>=16) {
533 retval = KERN_FAILURE;
534 } else {
535 retval = hw_cpu_sync(temp, LockTimeOut); /* wait for the other processor */
536 if(!retval) {
537 retval = KERN_FAILURE;
538 } else {
539 retval = KERN_SUCCESS;
540 }
541 }
542 } else {
543 retval = KERN_INVALID_ARGUMENT;
544 }
545
546 ml_set_interrupts_enabled(oldlevel);
547 return retval;
548}
b0d623f7 549