2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <mach/boolean.h>
31 #include <mach/mach_types.h>
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/timer_call.h>
36 #include <kern/thread_call.h>
37 #include <kern/kalloc.h>
38 #include <kern/thread.h>
40 #include <machine/machine_routines.h>
41 #include <machine/cpu_data.h>
42 #include <machine/trap.h>
44 #include <chud/chud_xnu.h>
45 #include <chud/chud_xnu_private.h>
47 #include <i386/misc_protos.h>
49 #include <i386/machine_cpu.h>
51 #include <sys/kdebug.h>
52 #define CHUD_TIMER_CALLBACK_CANCEL 0
53 #define CHUD_TIMER_CALLBACK_ENTER 1
54 #define CHUD_TIMER_CALLBACK 2
55 #define CHUD_AST_SEND 3
56 #define CHUD_AST_CALLBACK 4
57 #define CHUD_CPUSIG_SEND 5
58 #define CHUD_CPUSIG_CALLBACK 6
61 void chudxnu_cancel_all_callbacks(void)
63 chudxnu_cpusig_callback_cancel();
64 chudxnu_cpu_timer_callback_cancel_all();
65 chudxnu_interrupt_callback_cancel();
66 chudxnu_perfmon_ast_callback_cancel();
67 chudxnu_kdebug_callback_cancel();
68 chudxnu_thread_timer_callback_cancel();
69 chudxnu_trap_callback_cancel();
71 chudxnu_syscall_callback_cancel();
75 static chudcpu_data_t chudcpu_boot_cpu
;
77 chudxnu_cpu_alloc(boolean_t boot_processor
)
79 chudcpu_data_t
*chud_proc_info
;
83 chud_proc_info
= &chudcpu_boot_cpu
;
85 chud_proc_info
= (chudcpu_data_t
*)
86 kalloc(sizeof(chudcpu_data_t
));
87 if (chud_proc_info
== (chudcpu_data_t
*)NULL
) {
91 bzero((char *)chud_proc_info
, sizeof(chudcpu_data_t
));
92 chud_proc_info
->t_deadline
= 0xFFFFFFFFFFFFFFFFULL
;
93 mpqueue_init(&chud_proc_info
->cpu_request_queue
);
96 return (void *)chud_proc_info
;
100 chudxnu_cpu_free(void *cp
)
102 if (cp
== NULL
|| cp
== (void *)&chudcpu_boot_cpu
) {
105 kfree(cp
,sizeof(chudcpu_data_t
));
110 chudxnu_private_cpu_timer_callback(
111 timer_call_param_t param0
,
112 timer_call_param_t param1
)
114 #pragma unused (param0)
115 #pragma unused (param1)
116 chudcpu_data_t
*chud_proc_info
;
118 x86_thread_state_t state
;
119 mach_msg_type_number_t count
;
120 chudxnu_cpu_timer_callback_func_t fn
;
122 oldlevel
= ml_set_interrupts_enabled(FALSE
);
123 chud_proc_info
= (chudcpu_data_t
*)(current_cpu_datap()->cpu_chud
);
125 count
= x86_THREAD_STATE_COUNT
;
126 if (chudxnu_thread_get_state(current_thread(),
128 (thread_state_t
)&state
,
130 FALSE
) == KERN_SUCCESS
) {
131 fn
= chud_proc_info
->cpu_timer_callback_fn
;
133 KERNEL_DEBUG_CONSTANT(
134 MACHDBG_CODE(DBG_MACH_CHUD
,
135 CHUD_TIMER_CALLBACK
) | DBG_FUNC_NONE
,
136 (uint32_t)fn
, 0,0,0,0);
137 //state.eip, state.cs, 0, 0);
140 (thread_state_t
)&state
,
145 ml_set_interrupts_enabled(oldlevel
);
148 __private_extern__ kern_return_t
149 chudxnu_cpu_timer_callback_enter(
150 chudxnu_cpu_timer_callback_func_t func
,
154 chudcpu_data_t
*chud_proc_info
;
157 oldlevel
= ml_set_interrupts_enabled(FALSE
);
158 chud_proc_info
= (chudcpu_data_t
*)(current_cpu_datap()->cpu_chud
);
160 // cancel any existing callback for this cpu
161 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
));
163 chud_proc_info
->cpu_timer_callback_fn
= func
;
165 clock_interval_to_deadline(time
, units
, &(chud_proc_info
->t_deadline
));
166 timer_call_setup(&(chud_proc_info
->cpu_timer_call
),
167 chudxnu_private_cpu_timer_callback
, NULL
);
168 timer_call_enter(&(chud_proc_info
->cpu_timer_call
),
169 chud_proc_info
->t_deadline
);
171 KERNEL_DEBUG_CONSTANT(
172 MACHDBG_CODE(DBG_MACH_CHUD
,
173 CHUD_TIMER_CALLBACK_ENTER
) | DBG_FUNC_NONE
,
174 (uint32_t) func
, time
, units
, 0, 0);
176 ml_set_interrupts_enabled(oldlevel
);
180 __private_extern__ kern_return_t
181 chudxnu_cpu_timer_callback_cancel(void)
183 chudcpu_data_t
*chud_proc_info
;
186 oldlevel
= ml_set_interrupts_enabled(FALSE
);
187 chud_proc_info
= (chudcpu_data_t
*)(current_cpu_datap()->cpu_chud
);
189 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
));
191 KERNEL_DEBUG_CONSTANT(
192 MACHDBG_CODE(DBG_MACH_CHUD
,
193 CHUD_TIMER_CALLBACK_CANCEL
) | DBG_FUNC_NONE
,
197 chud_proc_info
->t_deadline
|= ~(chud_proc_info
->t_deadline
);
198 chud_proc_info
->cpu_timer_callback_fn
= NULL
;
200 ml_set_interrupts_enabled(oldlevel
);
204 __private_extern__ kern_return_t
205 chudxnu_cpu_timer_callback_cancel_all(void)
208 chudcpu_data_t
*chud_proc_info
;
210 for(cpu
=0; cpu
< real_ncpus
; cpu
++) {
211 chud_proc_info
= (chudcpu_data_t
*) cpu_data_ptr
[cpu
]->cpu_chud
;
212 if (chud_proc_info
== NULL
)
214 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
));
215 chud_proc_info
->t_deadline
|= ~(chud_proc_info
->t_deadline
);
216 chud_proc_info
->cpu_timer_callback_fn
= NULL
;
221 #pragma mark **** trap ****
222 static chudxnu_trap_callback_func_t trap_callback_fn
= NULL
;
225 chudxnu_private_trap_callback(
231 #pragma unused (regs)
232 #pragma unused (unused1)
233 #pragma unused (unused2)
234 kern_return_t retval
= KERN_FAILURE
;
235 chudxnu_trap_callback_func_t fn
= trap_callback_fn
;
239 x86_thread_state_t state
; // once we have an 64bit- independent way to determine if a thread is
240 // running kernel code, we'll switch to x86_thread_state_t.
241 mach_msg_type_number_t count
;
243 oldlevel
= ml_set_interrupts_enabled(FALSE
);
245 count
= x86_THREAD_STATE_COUNT
;
246 if(chudxnu_thread_get_state(current_thread(),
248 (thread_state_t
)&state
,
250 FALSE
) == KERN_SUCCESS
) {
255 (thread_state_t
)&state
,
258 ml_set_interrupts_enabled(oldlevel
);
264 __private_extern__ kern_return_t
265 chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func
)
267 trap_callback_fn
= func
;
268 perfTrapHook
= chudxnu_private_trap_callback
;
272 __private_extern__ kern_return_t
273 chudxnu_trap_callback_cancel(void)
275 trap_callback_fn
= NULL
;
280 #pragma mark **** ast ****
282 chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn
= NULL
;
285 chudxnu_private_chud_ast_callback(
291 #pragma unused (trapno)
292 #pragma unused (regs)
293 #pragma unused (unused1)
294 #pragma unused (unused2)
295 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
296 ast_t
*myast
= ast_pending();
297 kern_return_t retval
= KERN_FAILURE
;
298 chudxnu_perfmon_ast_callback_func_t fn
= perfmon_ast_callback_fn
;
300 if (*myast
& AST_CHUD_URGENT
) {
301 *myast
&= ~(AST_CHUD_URGENT
| AST_CHUD
);
302 if ((*myast
& AST_PREEMPTION
) != AST_PREEMPTION
)
303 *myast
&= ~(AST_URGENT
);
304 retval
= KERN_SUCCESS
;
305 } else if (*myast
& AST_CHUD
) {
306 *myast
&= ~(AST_CHUD
);
307 retval
= KERN_SUCCESS
;
311 x86_thread_state_t state
;
312 mach_msg_type_number_t count
;
313 count
= x86_THREAD_STATE_COUNT
;
315 if (chudxnu_thread_get_state(
318 (thread_state_t
) &state
, &count
,
319 TRUE
) == KERN_SUCCESS
) {
321 KERNEL_DEBUG_CONSTANT(
322 MACHDBG_CODE(DBG_MACH_CHUD
,
323 CHUD_AST_CALLBACK
) | DBG_FUNC_NONE
,
324 (uint32_t) fn
, 0, 0, 0, 0);
328 (thread_state_t
) &state
,
333 ml_set_interrupts_enabled(oldlevel
);
337 __private_extern__ kern_return_t
338 chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func
)
340 perfmon_ast_callback_fn
= func
;
341 perfASTHook
= chudxnu_private_chud_ast_callback
;
345 __private_extern__ kern_return_t
346 chudxnu_perfmon_ast_callback_cancel(void)
348 perfmon_ast_callback_fn
= NULL
;
353 __private_extern__ kern_return_t
354 chudxnu_perfmon_ast_send_urgent(boolean_t urgent
)
356 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
357 ast_t
*myast
= ast_pending();
360 *myast
|= (AST_CHUD_URGENT
| AST_URGENT
);
362 *myast
|= (AST_CHUD
);
365 KERNEL_DEBUG_CONSTANT(
366 MACHDBG_CODE(DBG_MACH_CHUD
, CHUD_AST_SEND
) | DBG_FUNC_NONE
,
369 ml_set_interrupts_enabled(oldlevel
);
373 __private_extern__ kern_return_t
374 chudxnu_perfmon_ast_send(void)
376 return chudxnu_perfmon_ast_send_urgent(TRUE
);
379 #pragma mark **** interrupt ****
380 static chudxnu_interrupt_callback_func_t interrupt_callback_fn
= NULL
;
383 chudxnu_private_interrupt_callback(void *foo
)
386 chudxnu_interrupt_callback_func_t fn
= interrupt_callback_fn
;
390 x86_thread_state_t state
;
391 mach_msg_type_number_t count
;
393 oldlevel
= ml_set_interrupts_enabled(FALSE
);
395 count
= x86_THREAD_STATE_COUNT
;
396 if(chudxnu_thread_get_state(current_thread(),
398 (thread_state_t
)&state
,
400 FALSE
) == KERN_SUCCESS
) {
402 X86_INTERRUPT_PERFMON
,
404 (thread_state_t
)&state
,
407 ml_set_interrupts_enabled(oldlevel
);
411 __private_extern__ kern_return_t
412 chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func
)
414 interrupt_callback_fn
= func
;
415 lapic_set_pmi_func((i386_intr_func_t
)chudxnu_private_interrupt_callback
);
419 __private_extern__ kern_return_t
420 chudxnu_interrupt_callback_cancel(void)
422 interrupt_callback_fn
= NULL
;
423 lapic_set_pmi_func(NULL
);
427 #pragma mark **** cpu signal ****
428 static chudxnu_cpusig_callback_func_t cpusig_callback_fn
= NULL
;
431 chudxnu_private_cpu_signal_handler(int request
)
433 chudxnu_cpusig_callback_func_t fn
= cpusig_callback_fn
;
436 x86_thread_state_t state
;
437 mach_msg_type_number_t count
= x86_THREAD_STATE_COUNT
;
439 if (chudxnu_thread_get_state(current_thread(),
441 (thread_state_t
) &state
, &count
,
442 FALSE
) == KERN_SUCCESS
) {
443 KERNEL_DEBUG_CONSTANT(
444 MACHDBG_CODE(DBG_MACH_CHUD
,
445 CHUD_CPUSIG_CALLBACK
) | DBG_FUNC_NONE
,
446 (uint32_t)fn
, request
, 0, 0, 0);
448 request
, x86_THREAD_STATE
,
449 (thread_state_t
) &state
, count
);
454 return KERN_SUCCESS
; //ignored
457 * chudxnu_cpu_signal_handler() is called from the IPI handler
458 * when a CHUD signal arrives from another processor.
460 __private_extern__
void
461 chudxnu_cpu_signal_handler(void)
463 chudcpu_signal_request_t
*reqp
;
464 chudcpu_data_t
*chudinfop
;
466 chudinfop
= (chudcpu_data_t
*) current_cpu_datap()->cpu_chud
;
468 mpdequeue_head(&(chudinfop
->cpu_request_queue
),
469 (queue_entry_t
*) &reqp
);
470 while (reqp
!= NULL
) {
471 chudxnu_private_cpu_signal_handler(reqp
->req_code
);
473 mpdequeue_head(&(chudinfop
->cpu_request_queue
),
474 (queue_entry_t
*) &reqp
);
478 __private_extern__ kern_return_t
479 chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func
)
481 cpusig_callback_fn
= func
;
485 __private_extern__ kern_return_t
486 chudxnu_cpusig_callback_cancel(void)
488 cpusig_callback_fn
= NULL
;
492 __private_extern__ kern_return_t
493 chudxnu_cpusig_send(int otherCPU
, uint32_t request_code
)
496 kern_return_t retval
= KERN_FAILURE
;
497 chudcpu_signal_request_t request
;
499 chudcpu_data_t
*target_chudp
;
502 disable_preemption();
503 // force interrupts on for a cross CPU signal.
504 old_level
= chudxnu_set_interrupts_enabled(TRUE
);
505 thisCPU
= cpu_number();
507 if ((unsigned) otherCPU
< real_ncpus
&&
508 thisCPU
!= otherCPU
&&
509 cpu_data_ptr
[otherCPU
]->cpu_running
) {
511 target_chudp
= (chudcpu_data_t
*)
512 cpu_data_ptr
[otherCPU
]->cpu_chud
;
514 /* Fill out request */
515 request
.req_sync
= 0xFFFFFFFF; /* set sync flag */
516 //request.req_type = CPRQchud; /* set request type */
517 request
.req_code
= request_code
; /* set request */
519 KERNEL_DEBUG_CONSTANT(
520 MACHDBG_CODE(DBG_MACH_CHUD
,
521 CHUD_CPUSIG_SEND
) | DBG_FUNC_NONE
,
522 otherCPU
, request_code
, 0, 0, 0);
525 * Insert the new request in the target cpu's request queue
526 * and signal target cpu.
528 mpenqueue_tail(&target_chudp
->cpu_request_queue
,
530 i386_signal_cpu(otherCPU
, MP_CHUD
, ASYNC
);
532 /* Wait for response or timeout */
533 deadline
= mach_absolute_time() + LockTimeOut
;
534 while (request
.req_sync
!= 0) {
535 if (mach_absolute_time() > deadline
) {
536 panic("chudxnu_cpusig_send(%d,%d) timed out\n",
537 otherCPU
, request_code
);
541 retval
= KERN_SUCCESS
;
543 retval
= KERN_INVALID_ARGUMENT
;
546 chudxnu_set_interrupts_enabled(old_level
);
552 #pragma mark **** CHUD syscall (PPC) ****
554 typedef int (*PPCcallEnt
)(struct savearea
*save
);
555 extern PPCcallEnt PPCcalls
[];
557 static chudxnu_syscall_callback_func_t syscall_callback_fn
= NULL
;
560 chudxnu_private_syscall_callback(struct savearea
*ssp
)
563 if(syscall_callback_fn
) {
564 struct ppc_thread_state64 state
;
565 kern_return_t retval
;
566 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
567 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
568 ssp
->save_r3
= (syscall_callback_fn
)(PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
570 ssp
->save_r3
= KERN_FAILURE
;
574 return 1; // check for ASTs (always)
577 __private_extern__ kern_return_t
578 chudxnu_syscall_callback_enter(chudxnu_syscall_callback_func_t func
)
580 syscall_callback_fn
= func
;
581 PPCcalls
[9] = chudxnu_private_syscall_callback
;
582 __asm__
volatile("eieio"); /* force order */
583 __asm__
volatile("sync"); /* force to memory */
587 __private_extern__ kern_return_t
588 chudxnu_syscall_callback_cancel(void)
590 syscall_callback_fn
= NULL
;
592 __asm__
volatile("eieio"); /* force order */
593 __asm__
volatile("sync"); /* force to memory */