2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <mach/boolean.h>
31 #include <mach/mach_types.h>
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/timer_call.h>
36 #include <kern/thread_call.h>
37 #include <kern/kalloc.h>
38 #include <kern/thread.h>
40 #include <machine/machine_routines.h>
41 #include <machine/cpu_data.h>
42 #include <machine/trap.h>
44 #include <chud/chud_xnu.h>
45 #include <chud/chud_xnu_private.h>
46 #include <chud/chud_thread.h>
48 #include <i386/misc_protos.h>
50 #include <i386/machine_cpu.h>
52 #include <sys/kdebug.h>
53 #define CHUD_TIMER_CALLBACK_CANCEL 0
54 #define CHUD_TIMER_CALLBACK_ENTER 1
55 #define CHUD_TIMER_CALLBACK 2
56 #define CHUD_AST_SEND 3
57 #define CHUD_AST_CALLBACK 4
58 #define CHUD_CPUSIG_SEND 5
59 #define CHUD_CPUSIG_CALLBACK 6
62 void chudxnu_cancel_all_callbacks(void)
64 chudxnu_cpusig_callback_cancel();
65 chudxnu_cpu_timer_callback_cancel_all();
66 chudxnu_interrupt_callback_cancel();
67 chudxnu_perfmon_ast_callback_cancel();
68 chudxnu_kdebug_callback_cancel();
69 chudxnu_trap_callback_cancel();
70 chudxnu_syscall_callback_cancel();
71 chudxnu_dtrace_callback_cancel();
74 static chudcpu_data_t chudcpu_boot_cpu
;
76 chudxnu_cpu_alloc(boolean_t boot_processor
)
78 chudcpu_data_t
*chud_proc_info
;
81 chud_proc_info
= &chudcpu_boot_cpu
;
83 chud_proc_info
= (chudcpu_data_t
*)
84 kalloc(sizeof(chudcpu_data_t
));
85 if (chud_proc_info
== (chudcpu_data_t
*)NULL
) {
89 bzero((char *)chud_proc_info
, sizeof(chudcpu_data_t
));
90 chud_proc_info
->t_deadline
= 0xFFFFFFFFFFFFFFFFULL
;
91 mpqueue_init(&chud_proc_info
->cpu_request_queue
);
94 return (void *)chud_proc_info
;
98 chudxnu_cpu_free(void *cp
)
100 if (cp
== NULL
|| cp
== (void *)&chudcpu_boot_cpu
) {
103 kfree(cp
,sizeof(chudcpu_data_t
));
108 chudxnu_private_cpu_timer_callback(
109 timer_call_param_t param0
,
110 timer_call_param_t param1
)
112 #pragma unused (param0)
113 #pragma unused (param1)
114 chudcpu_data_t
*chud_proc_info
;
116 x86_thread_state_t state
;
117 mach_msg_type_number_t count
;
118 chudxnu_cpu_timer_callback_func_t fn
;
120 oldlevel
= ml_set_interrupts_enabled(FALSE
);
121 chud_proc_info
= (chudcpu_data_t
*)(current_cpu_datap()->cpu_chud
);
123 count
= x86_THREAD_STATE_COUNT
;
124 if (chudxnu_thread_get_state(current_thread(),
126 (thread_state_t
)&state
,
128 FALSE
) == KERN_SUCCESS
) {
129 fn
= chud_proc_info
->cpu_timer_callback_fn
;
131 KERNEL_DEBUG_CONSTANT(
132 MACHDBG_CODE(DBG_MACH_CHUD
,
133 CHUD_TIMER_CALLBACK
) | DBG_FUNC_NONE
,
134 (uint32_t)fn
, 0,0,0,0);
135 //state.eip, state.cs, 0, 0);
138 (thread_state_t
)&state
,
143 ml_set_interrupts_enabled(oldlevel
);
146 __private_extern__ kern_return_t
147 chudxnu_cpu_timer_callback_enter(
148 chudxnu_cpu_timer_callback_func_t func
,
152 chudcpu_data_t
*chud_proc_info
;
155 oldlevel
= ml_set_interrupts_enabled(FALSE
);
156 chud_proc_info
= (chudcpu_data_t
*)(current_cpu_datap()->cpu_chud
);
158 // cancel any existing callback for this cpu
159 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
));
161 chud_proc_info
->cpu_timer_callback_fn
= func
;
163 clock_interval_to_deadline(time
, units
, &(chud_proc_info
->t_deadline
));
164 timer_call_setup(&(chud_proc_info
->cpu_timer_call
),
165 chudxnu_private_cpu_timer_callback
, NULL
);
166 timer_call_enter(&(chud_proc_info
->cpu_timer_call
),
167 chud_proc_info
->t_deadline
);
169 KERNEL_DEBUG_CONSTANT(
170 MACHDBG_CODE(DBG_MACH_CHUD
,
171 CHUD_TIMER_CALLBACK_ENTER
) | DBG_FUNC_NONE
,
172 (uint32_t) func
, time
, units
, 0, 0);
174 ml_set_interrupts_enabled(oldlevel
);
178 __private_extern__ kern_return_t
179 chudxnu_cpu_timer_callback_cancel(void)
181 chudcpu_data_t
*chud_proc_info
;
184 oldlevel
= ml_set_interrupts_enabled(FALSE
);
185 chud_proc_info
= (chudcpu_data_t
*)(current_cpu_datap()->cpu_chud
);
187 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
));
189 KERNEL_DEBUG_CONSTANT(
190 MACHDBG_CODE(DBG_MACH_CHUD
,
191 CHUD_TIMER_CALLBACK_CANCEL
) | DBG_FUNC_NONE
,
195 chud_proc_info
->t_deadline
|= ~(chud_proc_info
->t_deadline
);
196 chud_proc_info
->cpu_timer_callback_fn
= NULL
;
198 ml_set_interrupts_enabled(oldlevel
);
202 __private_extern__ kern_return_t
203 chudxnu_cpu_timer_callback_cancel_all(void)
206 chudcpu_data_t
*chud_proc_info
;
208 for(cpu
=0; cpu
< real_ncpus
; cpu
++) {
209 chud_proc_info
= (chudcpu_data_t
*) cpu_data_ptr
[cpu
]->cpu_chud
;
210 if (chud_proc_info
== NULL
)
212 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
));
213 chud_proc_info
->t_deadline
|= ~(chud_proc_info
->t_deadline
);
214 chud_proc_info
->cpu_timer_callback_fn
= NULL
;
219 #pragma mark **** trap ****
220 static chudxnu_trap_callback_func_t trap_callback_fn
= NULL
;
223 chudxnu_private_trap_callback(
229 #pragma unused (regs)
230 #pragma unused (unused1)
231 #pragma unused (unused2)
232 kern_return_t retval
= KERN_FAILURE
;
233 chudxnu_trap_callback_func_t fn
= trap_callback_fn
;
237 x86_thread_state_t state
;
238 mach_msg_type_number_t count
;
239 thread_t thread
= current_thread();
241 oldlevel
= ml_set_interrupts_enabled(FALSE
);
243 /* prevent reentry into CHUD when dtracing */
244 if(thread
->t_chud
& T_IN_CHUD
) {
245 /* restore interrupts */
246 ml_set_interrupts_enabled(oldlevel
);
248 return KERN_FAILURE
; // not handled - pass off to dtrace
251 /* update the chud state bits */
252 thread
->t_chud
|= T_IN_CHUD
;
254 count
= x86_THREAD_STATE_COUNT
;
256 if(chudxnu_thread_get_state(thread
,
258 (thread_state_t
)&state
,
260 FALSE
) == KERN_SUCCESS
) {
265 (thread_state_t
)&state
,
269 /* no longer in CHUD */
270 thread
->t_chud
&= ~(T_IN_CHUD
);
272 ml_set_interrupts_enabled(oldlevel
);
278 __private_extern__ kern_return_t
279 chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func
)
281 trap_callback_fn
= func
;
282 perfTrapHook
= chudxnu_private_trap_callback
;
286 __private_extern__ kern_return_t
287 chudxnu_trap_callback_cancel(void)
289 trap_callback_fn
= NULL
;
294 #pragma mark **** ast ****
296 chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn
= NULL
;
299 chudxnu_private_chud_ast_callback(
305 #pragma unused (trapno)
306 #pragma unused (regs)
307 #pragma unused (unused1)
308 #pragma unused (unused2)
309 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
310 ast_t
*myast
= ast_pending();
311 kern_return_t retval
= KERN_FAILURE
;
312 chudxnu_perfmon_ast_callback_func_t fn
= perfmon_ast_callback_fn
;
314 if (*myast
& AST_CHUD_URGENT
) {
315 *myast
&= ~(AST_CHUD_URGENT
| AST_CHUD
);
316 if ((*myast
& AST_PREEMPTION
) != AST_PREEMPTION
)
317 *myast
&= ~(AST_URGENT
);
318 retval
= KERN_SUCCESS
;
319 } else if (*myast
& AST_CHUD
) {
320 *myast
&= ~(AST_CHUD
);
321 retval
= KERN_SUCCESS
;
325 x86_thread_state_t state
;
326 mach_msg_type_number_t count
;
327 count
= x86_THREAD_STATE_COUNT
;
329 if (chudxnu_thread_get_state(
332 (thread_state_t
) &state
, &count
,
333 TRUE
) == KERN_SUCCESS
) {
335 KERNEL_DEBUG_CONSTANT(
336 MACHDBG_CODE(DBG_MACH_CHUD
,
337 CHUD_AST_CALLBACK
) | DBG_FUNC_NONE
,
338 (uint32_t) fn
, 0, 0, 0, 0);
342 (thread_state_t
) &state
,
347 ml_set_interrupts_enabled(oldlevel
);
351 __private_extern__ kern_return_t
352 chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func
)
354 perfmon_ast_callback_fn
= func
;
355 perfASTHook
= chudxnu_private_chud_ast_callback
;
359 __private_extern__ kern_return_t
360 chudxnu_perfmon_ast_callback_cancel(void)
362 perfmon_ast_callback_fn
= NULL
;
367 __private_extern__ kern_return_t
368 chudxnu_perfmon_ast_send_urgent(boolean_t urgent
)
370 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
371 ast_t
*myast
= ast_pending();
374 *myast
|= (AST_CHUD_URGENT
| AST_URGENT
);
376 *myast
|= (AST_CHUD
);
379 KERNEL_DEBUG_CONSTANT(
380 MACHDBG_CODE(DBG_MACH_CHUD
, CHUD_AST_SEND
) | DBG_FUNC_NONE
,
383 ml_set_interrupts_enabled(oldlevel
);
387 __private_extern__ kern_return_t
388 chudxnu_perfmon_ast_send(void)
390 return chudxnu_perfmon_ast_send_urgent(TRUE
);
393 #pragma mark **** interrupt ****
394 static chudxnu_interrupt_callback_func_t interrupt_callback_fn
= NULL
;
397 chudxnu_private_interrupt_callback(void *foo
)
400 chudxnu_interrupt_callback_func_t fn
= interrupt_callback_fn
;
404 x86_thread_state_t state
;
405 mach_msg_type_number_t count
;
407 oldlevel
= ml_set_interrupts_enabled(FALSE
);
409 count
= x86_THREAD_STATE_COUNT
;
410 if(chudxnu_thread_get_state(current_thread(),
412 (thread_state_t
)&state
,
414 FALSE
) == KERN_SUCCESS
) {
416 X86_INTERRUPT_PERFMON
,
418 (thread_state_t
)&state
,
421 ml_set_interrupts_enabled(oldlevel
);
425 __private_extern__ kern_return_t
426 chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func
)
428 interrupt_callback_fn
= func
;
429 lapic_set_pmi_func((i386_intr_func_t
)chudxnu_private_interrupt_callback
);
433 __private_extern__ kern_return_t
434 chudxnu_interrupt_callback_cancel(void)
436 interrupt_callback_fn
= NULL
;
437 lapic_set_pmi_func(NULL
);
441 #pragma mark **** cpu signal ****
442 static chudxnu_cpusig_callback_func_t cpusig_callback_fn
= NULL
;
445 chudxnu_private_cpu_signal_handler(int request
)
447 chudxnu_cpusig_callback_func_t fn
= cpusig_callback_fn
;
450 x86_thread_state_t state
;
451 mach_msg_type_number_t count
= x86_THREAD_STATE_COUNT
;
453 if (chudxnu_thread_get_state(current_thread(),
455 (thread_state_t
) &state
, &count
,
456 FALSE
) == KERN_SUCCESS
) {
457 KERNEL_DEBUG_CONSTANT(
458 MACHDBG_CODE(DBG_MACH_CHUD
,
459 CHUD_CPUSIG_CALLBACK
) | DBG_FUNC_NONE
,
460 (uint32_t)fn
, request
, 0, 0, 0);
462 request
, x86_THREAD_STATE
,
463 (thread_state_t
) &state
, count
);
468 return KERN_SUCCESS
; //ignored
471 * chudxnu_cpu_signal_handler() is called from the IPI handler
472 * when a CHUD signal arrives from another processor.
474 __private_extern__
void
475 chudxnu_cpu_signal_handler(void)
477 chudcpu_signal_request_t
*reqp
;
478 chudcpu_data_t
*chudinfop
;
480 chudinfop
= (chudcpu_data_t
*) current_cpu_datap()->cpu_chud
;
482 mpdequeue_head(&(chudinfop
->cpu_request_queue
),
483 (queue_entry_t
*) &reqp
);
484 while (reqp
!= NULL
) {
485 chudxnu_private_cpu_signal_handler(reqp
->req_code
);
487 mpdequeue_head(&(chudinfop
->cpu_request_queue
),
488 (queue_entry_t
*) &reqp
);
492 __private_extern__ kern_return_t
493 chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func
)
495 cpusig_callback_fn
= func
;
499 __private_extern__ kern_return_t
500 chudxnu_cpusig_callback_cancel(void)
502 cpusig_callback_fn
= NULL
;
506 __private_extern__ kern_return_t
507 chudxnu_cpusig_send(int otherCPU
, uint32_t request_code
)
510 kern_return_t retval
= KERN_FAILURE
;
511 chudcpu_signal_request_t request
;
513 chudcpu_data_t
*target_chudp
;
516 disable_preemption();
517 // force interrupts on for a cross CPU signal.
518 old_level
= chudxnu_set_interrupts_enabled(TRUE
);
519 thisCPU
= cpu_number();
521 if ((unsigned) otherCPU
< real_ncpus
&&
522 thisCPU
!= otherCPU
&&
523 cpu_data_ptr
[otherCPU
]->cpu_running
) {
525 target_chudp
= (chudcpu_data_t
*)
526 cpu_data_ptr
[otherCPU
]->cpu_chud
;
528 /* Fill out request */
529 request
.req_sync
= 0xFFFFFFFF; /* set sync flag */
530 //request.req_type = CPRQchud; /* set request type */
531 request
.req_code
= request_code
; /* set request */
533 KERNEL_DEBUG_CONSTANT(
534 MACHDBG_CODE(DBG_MACH_CHUD
,
535 CHUD_CPUSIG_SEND
) | DBG_FUNC_NONE
,
536 otherCPU
, request_code
, 0, 0, 0);
539 * Insert the new request in the target cpu's request queue
540 * and signal target cpu.
542 mpenqueue_tail(&target_chudp
->cpu_request_queue
,
544 i386_signal_cpu(otherCPU
, MP_CHUD
, ASYNC
);
546 /* Wait for response or timeout */
547 deadline
= mach_absolute_time() + LockTimeOut
;
548 while (request
.req_sync
!= 0) {
549 if (mach_absolute_time() > deadline
) {
550 panic("chudxnu_cpusig_send(%d,%d) timed out\n",
551 otherCPU
, request_code
);
555 retval
= KERN_SUCCESS
;
557 retval
= KERN_INVALID_ARGUMENT
;
560 chudxnu_set_interrupts_enabled(old_level
);