2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
32 #include <mach/boolean.h>
33 #include <mach/mach_types.h>
35 #include <kern/kern_types.h>
36 #include <kern/processor.h>
37 #include <kern/timer_call.h>
38 #include <kern/thread_call.h>
39 #include <kern/kalloc.h>
40 #include <kern/thread.h>
42 #include <machine/machine_routines.h>
43 #include <machine/cpu_data.h>
44 #include <machine/trap.h>
46 #include <chud/chud_xnu.h>
47 #include <chud/chud_xnu_private.h>
49 #include <i386/misc_protos.h>
51 #include <i386/machine_cpu.h>
53 #include <sys/kdebug.h>
54 #define CHUD_TIMER_CALLBACK_CANCEL 0
55 #define CHUD_TIMER_CALLBACK_ENTER 1
56 #define CHUD_TIMER_CALLBACK 2
57 #define CHUD_AST_SEND 3
58 #define CHUD_AST_CALLBACK 4
59 #define CHUD_CPUSIG_SEND 5
60 #define CHUD_CPUSIG_CALLBACK 6
63 void chudxnu_cancel_all_callbacks(void)
65 chudxnu_cpusig_callback_cancel();
66 chudxnu_cpu_timer_callback_cancel_all();
67 chudxnu_interrupt_callback_cancel();
68 chudxnu_perfmon_ast_callback_cancel();
69 chudxnu_kdebug_callback_cancel();
70 chudxnu_thread_timer_callback_cancel();
71 chudxnu_trap_callback_cancel();
73 chudxnu_syscall_callback_cancel();
77 static chudcpu_data_t chudcpu_boot_cpu
;
79 chudxnu_cpu_alloc(boolean_t boot_processor
)
81 chudcpu_data_t
*chud_proc_info
;
85 chud_proc_info
= &chudcpu_boot_cpu
;
87 chud_proc_info
= (chudcpu_data_t
*)
88 kalloc(sizeof(chudcpu_data_t
));
89 if (chud_proc_info
== (chudcpu_data_t
*)NULL
) {
93 bzero((char *)chud_proc_info
, sizeof(chudcpu_data_t
));
94 chud_proc_info
->t_deadline
= 0xFFFFFFFFFFFFFFFFULL
;
95 mpqueue_init(&chud_proc_info
->cpu_request_queue
);
98 return (void *)chud_proc_info
;
102 chudxnu_cpu_free(void *cp
)
104 if (cp
== NULL
|| cp
== (void *)&chudcpu_boot_cpu
) {
107 kfree(cp
,sizeof(chudcpu_data_t
));
112 chudxnu_private_cpu_timer_callback(
113 timer_call_param_t param0
,
114 timer_call_param_t param1
)
116 #pragma unused (param0)
117 #pragma unused (param1)
118 chudcpu_data_t
*chud_proc_info
;
120 x86_thread_state_t state
;
121 mach_msg_type_number_t count
;
122 chudxnu_cpu_timer_callback_func_t fn
;
124 oldlevel
= ml_set_interrupts_enabled(FALSE
);
125 chud_proc_info
= (chudcpu_data_t
*)(current_cpu_datap()->cpu_chud
);
127 count
= x86_THREAD_STATE_COUNT
;
128 if (chudxnu_thread_get_state(current_thread(),
130 (thread_state_t
)&state
,
132 FALSE
) == KERN_SUCCESS
) {
133 fn
= chud_proc_info
->cpu_timer_callback_fn
;
135 KERNEL_DEBUG_CONSTANT(
136 MACHDBG_CODE(DBG_MACH_CHUD
,
137 CHUD_TIMER_CALLBACK
) | DBG_FUNC_NONE
,
138 (uint32_t)fn
, 0,0,0,0);
139 //state.eip, state.cs, 0, 0);
142 (thread_state_t
)&state
,
147 ml_set_interrupts_enabled(oldlevel
);
150 __private_extern__ kern_return_t
151 chudxnu_cpu_timer_callback_enter(
152 chudxnu_cpu_timer_callback_func_t func
,
156 chudcpu_data_t
*chud_proc_info
;
159 oldlevel
= ml_set_interrupts_enabled(FALSE
);
160 chud_proc_info
= (chudcpu_data_t
*)(current_cpu_datap()->cpu_chud
);
162 // cancel any existing callback for this cpu
163 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
));
165 chud_proc_info
->cpu_timer_callback_fn
= func
;
167 clock_interval_to_deadline(time
, units
, &(chud_proc_info
->t_deadline
));
168 timer_call_setup(&(chud_proc_info
->cpu_timer_call
),
169 chudxnu_private_cpu_timer_callback
, NULL
);
170 timer_call_enter(&(chud_proc_info
->cpu_timer_call
),
171 chud_proc_info
->t_deadline
);
173 KERNEL_DEBUG_CONSTANT(
174 MACHDBG_CODE(DBG_MACH_CHUD
,
175 CHUD_TIMER_CALLBACK_ENTER
) | DBG_FUNC_NONE
,
176 (uint32_t) func
, time
, units
, 0, 0);
178 ml_set_interrupts_enabled(oldlevel
);
182 __private_extern__ kern_return_t
183 chudxnu_cpu_timer_callback_cancel(void)
185 chudcpu_data_t
*chud_proc_info
;
188 oldlevel
= ml_set_interrupts_enabled(FALSE
);
189 chud_proc_info
= (chudcpu_data_t
*)(current_cpu_datap()->cpu_chud
);
191 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
));
193 KERNEL_DEBUG_CONSTANT(
194 MACHDBG_CODE(DBG_MACH_CHUD
,
195 CHUD_TIMER_CALLBACK_CANCEL
) | DBG_FUNC_NONE
,
199 chud_proc_info
->t_deadline
|= ~(chud_proc_info
->t_deadline
);
200 chud_proc_info
->cpu_timer_callback_fn
= NULL
;
202 ml_set_interrupts_enabled(oldlevel
);
206 __private_extern__ kern_return_t
207 chudxnu_cpu_timer_callback_cancel_all(void)
210 chudcpu_data_t
*chud_proc_info
;
212 for(cpu
=0; cpu
< real_ncpus
; cpu
++) {
213 chud_proc_info
= (chudcpu_data_t
*) cpu_data_ptr
[cpu
]->cpu_chud
;
214 if (chud_proc_info
== NULL
)
216 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
));
217 chud_proc_info
->t_deadline
|= ~(chud_proc_info
->t_deadline
);
218 chud_proc_info
->cpu_timer_callback_fn
= NULL
;
223 #pragma mark **** trap ****
224 static chudxnu_trap_callback_func_t trap_callback_fn
= NULL
;
227 chudxnu_private_trap_callback(
233 #pragma unused (regs)
234 #pragma unused (unused1)
235 #pragma unused (unused2)
236 kern_return_t retval
= KERN_FAILURE
;
237 chudxnu_trap_callback_func_t fn
= trap_callback_fn
;
241 x86_thread_state_t state
; // once we have an 64bit- independent way to determine if a thread is
242 // running kernel code, we'll switch to x86_thread_state_t.
243 mach_msg_type_number_t count
;
245 oldlevel
= ml_set_interrupts_enabled(FALSE
);
247 count
= x86_THREAD_STATE_COUNT
;
248 if(chudxnu_thread_get_state(current_thread(),
250 (thread_state_t
)&state
,
252 FALSE
) == KERN_SUCCESS
) {
257 (thread_state_t
)&state
,
260 ml_set_interrupts_enabled(oldlevel
);
266 __private_extern__ kern_return_t
267 chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func
)
269 trap_callback_fn
= func
;
270 perfTrapHook
= chudxnu_private_trap_callback
;
274 __private_extern__ kern_return_t
275 chudxnu_trap_callback_cancel(void)
277 trap_callback_fn
= NULL
;
282 #pragma mark **** ast ****
284 chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn
= NULL
;
287 chudxnu_private_chud_ast_callback(
293 #pragma unused (trapno)
294 #pragma unused (regs)
295 #pragma unused (unused1)
296 #pragma unused (unused2)
297 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
298 ast_t
*myast
= ast_pending();
299 kern_return_t retval
= KERN_FAILURE
;
300 chudxnu_perfmon_ast_callback_func_t fn
= perfmon_ast_callback_fn
;
302 if (*myast
& AST_CHUD_URGENT
) {
303 *myast
&= ~(AST_CHUD_URGENT
| AST_CHUD
);
304 if ((*myast
& AST_PREEMPTION
) != AST_PREEMPTION
)
305 *myast
&= ~(AST_URGENT
);
306 retval
= KERN_SUCCESS
;
307 } else if (*myast
& AST_CHUD
) {
308 *myast
&= ~(AST_CHUD
);
309 retval
= KERN_SUCCESS
;
313 x86_thread_state_t state
;
314 mach_msg_type_number_t count
;
315 count
= x86_THREAD_STATE_COUNT
;
317 if (chudxnu_thread_get_state(
320 (thread_state_t
) &state
, &count
,
321 TRUE
) == KERN_SUCCESS
) {
323 KERNEL_DEBUG_CONSTANT(
324 MACHDBG_CODE(DBG_MACH_CHUD
,
325 CHUD_AST_CALLBACK
) | DBG_FUNC_NONE
,
326 (uint32_t) fn
, 0, 0, 0, 0);
330 (thread_state_t
) &state
,
335 ml_set_interrupts_enabled(oldlevel
);
339 __private_extern__ kern_return_t
340 chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func
)
342 perfmon_ast_callback_fn
= func
;
343 perfASTHook
= chudxnu_private_chud_ast_callback
;
347 __private_extern__ kern_return_t
348 chudxnu_perfmon_ast_callback_cancel(void)
350 perfmon_ast_callback_fn
= NULL
;
355 __private_extern__ kern_return_t
356 chudxnu_perfmon_ast_send_urgent(boolean_t urgent
)
358 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
359 ast_t
*myast
= ast_pending();
362 *myast
|= (AST_CHUD_URGENT
| AST_URGENT
);
364 *myast
|= (AST_CHUD
);
367 KERNEL_DEBUG_CONSTANT(
368 MACHDBG_CODE(DBG_MACH_CHUD
, CHUD_AST_SEND
) | DBG_FUNC_NONE
,
371 ml_set_interrupts_enabled(oldlevel
);
375 __private_extern__ kern_return_t
376 chudxnu_perfmon_ast_send(void)
378 return chudxnu_perfmon_ast_send_urgent(TRUE
);
381 #pragma mark **** interrupt ****
382 static chudxnu_interrupt_callback_func_t interrupt_callback_fn
= NULL
;
385 chudxnu_private_interrupt_callback(void *foo
)
388 chudxnu_interrupt_callback_func_t fn
= interrupt_callback_fn
;
392 x86_thread_state_t state
;
393 mach_msg_type_number_t count
;
395 oldlevel
= ml_set_interrupts_enabled(FALSE
);
397 count
= x86_THREAD_STATE_COUNT
;
398 if(chudxnu_thread_get_state(current_thread(),
400 (thread_state_t
)&state
,
402 FALSE
) == KERN_SUCCESS
) {
404 X86_INTERRUPT_PERFMON
,
406 (thread_state_t
)&state
,
409 ml_set_interrupts_enabled(oldlevel
);
413 __private_extern__ kern_return_t
414 chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func
)
416 interrupt_callback_fn
= func
;
417 lapic_set_pmi_func((i386_intr_func_t
)chudxnu_private_interrupt_callback
);
421 __private_extern__ kern_return_t
422 chudxnu_interrupt_callback_cancel(void)
424 interrupt_callback_fn
= NULL
;
425 lapic_set_pmi_func(NULL
);
429 #pragma mark **** cpu signal ****
430 static chudxnu_cpusig_callback_func_t cpusig_callback_fn
= NULL
;
433 chudxnu_private_cpu_signal_handler(int request
)
435 chudxnu_cpusig_callback_func_t fn
= cpusig_callback_fn
;
438 x86_thread_state_t state
;
439 mach_msg_type_number_t count
= x86_THREAD_STATE_COUNT
;
441 if (chudxnu_thread_get_state(current_thread(),
443 (thread_state_t
) &state
, &count
,
444 FALSE
) == KERN_SUCCESS
) {
445 KERNEL_DEBUG_CONSTANT(
446 MACHDBG_CODE(DBG_MACH_CHUD
,
447 CHUD_CPUSIG_CALLBACK
) | DBG_FUNC_NONE
,
448 (uint32_t)fn
, request
, 0, 0, 0);
450 request
, x86_THREAD_STATE
,
451 (thread_state_t
) &state
, count
);
456 return KERN_SUCCESS
; //ignored
459 * chudxnu_cpu_signal_handler() is called from the IPI handler
460 * when a CHUD signal arrives from another processor.
462 __private_extern__
void
463 chudxnu_cpu_signal_handler(void)
465 chudcpu_signal_request_t
*reqp
;
466 chudcpu_data_t
*chudinfop
;
468 chudinfop
= (chudcpu_data_t
*) current_cpu_datap()->cpu_chud
;
470 mpdequeue_head(&(chudinfop
->cpu_request_queue
),
471 (queue_entry_t
*) &reqp
);
472 while (reqp
!= NULL
) {
473 chudxnu_private_cpu_signal_handler(reqp
->req_code
);
475 mpdequeue_head(&(chudinfop
->cpu_request_queue
),
476 (queue_entry_t
*) &reqp
);
480 __private_extern__ kern_return_t
481 chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func
)
483 cpusig_callback_fn
= func
;
487 __private_extern__ kern_return_t
488 chudxnu_cpusig_callback_cancel(void)
490 cpusig_callback_fn
= NULL
;
494 __private_extern__ kern_return_t
495 chudxnu_cpusig_send(int otherCPU
, uint32_t request_code
)
498 kern_return_t retval
= KERN_FAILURE
;
499 chudcpu_signal_request_t request
;
501 chudcpu_data_t
*target_chudp
;
504 disable_preemption();
505 // force interrupts on for a cross CPU signal.
506 old_level
= chudxnu_set_interrupts_enabled(TRUE
);
507 thisCPU
= cpu_number();
509 if ((unsigned) otherCPU
< real_ncpus
&&
510 thisCPU
!= otherCPU
&&
511 cpu_data_ptr
[otherCPU
]->cpu_running
) {
513 target_chudp
= (chudcpu_data_t
*)
514 cpu_data_ptr
[otherCPU
]->cpu_chud
;
516 /* Fill out request */
517 request
.req_sync
= 0xFFFFFFFF; /* set sync flag */
518 //request.req_type = CPRQchud; /* set request type */
519 request
.req_code
= request_code
; /* set request */
521 KERNEL_DEBUG_CONSTANT(
522 MACHDBG_CODE(DBG_MACH_CHUD
,
523 CHUD_CPUSIG_SEND
) | DBG_FUNC_NONE
,
524 otherCPU
, request_code
, 0, 0, 0);
527 * Insert the new request in the target cpu's request queue
528 * and signal target cpu.
530 mpenqueue_tail(&target_chudp
->cpu_request_queue
,
532 i386_signal_cpu(otherCPU
, MP_CHUD
, ASYNC
);
534 /* Wait for response or timeout */
535 deadline
= mach_absolute_time() + LockTimeOut
;
536 while (request
.req_sync
!= 0) {
537 if (mach_absolute_time() > deadline
) {
538 panic("chudxnu_cpusig_send(%d,%d) timed out\n",
539 otherCPU
, request_code
);
543 retval
= KERN_SUCCESS
;
545 retval
= KERN_INVALID_ARGUMENT
;
548 chudxnu_set_interrupts_enabled(old_level
);
554 #pragma mark **** CHUD syscall (PPC) ****
556 typedef int (*PPCcallEnt
)(struct savearea
*save
);
557 extern PPCcallEnt PPCcalls
[];
559 static chudxnu_syscall_callback_func_t syscall_callback_fn
= NULL
;
562 chudxnu_private_syscall_callback(struct savearea
*ssp
)
565 if(syscall_callback_fn
) {
566 struct ppc_thread_state64 state
;
567 kern_return_t retval
;
568 mach_msg_type_number_t count
= PPC_THREAD_STATE64_COUNT
;
569 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64
, (thread_state_t
)&state
, &count
, ssp
);
570 ssp
->save_r3
= (syscall_callback_fn
)(PPC_THREAD_STATE64
, (thread_state_t
)&state
, count
);
572 ssp
->save_r3
= KERN_FAILURE
;
576 return 1; // check for ASTs (always)
579 __private_extern__ kern_return_t
580 chudxnu_syscall_callback_enter(chudxnu_syscall_callback_func_t func
)
582 syscall_callback_fn
= func
;
583 PPCcalls
[9] = chudxnu_private_syscall_callback
;
584 __asm__
volatile("eieio"); /* force order */
585 __asm__
volatile("sync"); /* force to memory */
589 __private_extern__ kern_return_t
590 chudxnu_syscall_callback_cancel(void)
592 syscall_callback_fn
= NULL
;
594 __asm__
volatile("eieio"); /* force order */
595 __asm__
volatile("sync"); /* force to memory */