2 * Copyright (c) 2003-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <mach/boolean.h>
31 #include <mach/mach_types.h>
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/timer_call.h>
36 #include <kern/thread_call.h>
37 #include <kern/kalloc.h>
38 #include <kern/thread.h>
40 #include <libkern/OSAtomic.h>
42 #include <machine/machine_routines.h>
43 #include <machine/cpu_data.h>
44 #include <machine/trap.h>
46 #include <chud/chud_xnu.h>
47 #include <chud/chud_xnu_private.h>
49 #include <i386/misc_protos.h>
50 #include <i386/lapic.h>
52 #include <i386/machine_cpu.h>
54 #include <sys/kdebug.h>
55 #define CHUD_TIMER_CALLBACK_CANCEL 0
56 #define CHUD_TIMER_CALLBACK_ENTER 1
57 #define CHUD_TIMER_CALLBACK 2
58 #define CHUD_AST_SEND 3
59 #define CHUD_AST_CALLBACK 4
60 #define CHUD_CPUSIG_SEND 5
61 #define CHUD_CPUSIG_CALLBACK 6
64 void chudxnu_cancel_all_callbacks(void)
66 chudxnu_cpusig_callback_cancel();
67 chudxnu_cpu_timer_callback_cancel_all();
68 chudxnu_interrupt_callback_cancel();
69 chudxnu_perfmon_ast_callback_cancel();
70 chudxnu_kdebug_callback_cancel();
71 chudxnu_trap_callback_cancel();
72 chudxnu_syscall_callback_cancel();
73 chudxnu_dtrace_callback_cancel();
76 static lck_grp_t chud_request_lck_grp
;
77 static lck_grp_attr_t chud_request_lck_grp_attr
;
78 static lck_attr_t chud_request_lck_attr
;
81 static chudcpu_data_t chudcpu_boot_cpu
;
83 chudxnu_cpu_alloc(boolean_t boot_processor
)
85 chudcpu_data_t
*chud_proc_info
;
88 chud_proc_info
= &chudcpu_boot_cpu
;
90 lck_attr_setdefault(&chud_request_lck_attr
);
91 lck_grp_attr_setdefault(&chud_request_lck_grp_attr
);
92 lck_grp_init(&chud_request_lck_grp
, "chud_request", &chud_request_lck_grp_attr
);
95 chud_proc_info
= (chudcpu_data_t
*)
96 kalloc(sizeof(chudcpu_data_t
));
97 if (chud_proc_info
== (chudcpu_data_t
*)NULL
) {
101 bzero((char *)chud_proc_info
, sizeof(chudcpu_data_t
));
102 chud_proc_info
->t_deadline
= 0xFFFFFFFFFFFFFFFFULL
;
104 mpqueue_init(&chud_proc_info
->cpu_request_queue
, &chud_request_lck_grp
, &chud_request_lck_attr
);
106 /* timer_call_cancel() can be called before first usage, so init here: <rdar://problem/9320202> */
107 timer_call_setup(&(chud_proc_info
->cpu_timer_call
), NULL
, NULL
);
110 return (void *)chud_proc_info
;
114 chudxnu_cpu_free(void *cp
)
116 if (cp
== NULL
|| cp
== (void *)&chudcpu_boot_cpu
) {
119 kfree(cp
,sizeof(chudcpu_data_t
));
124 chudxnu_private_cpu_timer_callback(
125 timer_call_param_t param0
,
126 timer_call_param_t param1
)
128 #pragma unused (param0)
129 #pragma unused (param1)
130 chudcpu_data_t
*chud_proc_info
;
132 x86_thread_state_t state
;
133 mach_msg_type_number_t count
;
134 chudxnu_cpu_timer_callback_func_t fn
;
136 oldlevel
= ml_set_interrupts_enabled(FALSE
);
137 chud_proc_info
= (chudcpu_data_t
*)(current_cpu_datap()->cpu_chud
);
139 count
= x86_THREAD_STATE_COUNT
;
140 if (chudxnu_thread_get_state(current_thread(),
142 (thread_state_t
)&state
,
144 FALSE
) == KERN_SUCCESS
) {
145 fn
= chud_proc_info
->cpu_timer_callback_fn
;
149 (thread_state_t
)&state
,
154 ml_set_interrupts_enabled(oldlevel
);
157 __private_extern__ kern_return_t
158 chudxnu_cpu_timer_callback_enter(
159 chudxnu_cpu_timer_callback_func_t func
,
163 chudcpu_data_t
*chud_proc_info
;
166 oldlevel
= ml_set_interrupts_enabled(FALSE
);
167 chud_proc_info
= (chudcpu_data_t
*)(current_cpu_datap()->cpu_chud
);
169 // cancel any existing callback for this cpu
170 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
));
172 chud_proc_info
->cpu_timer_callback_fn
= func
;
174 clock_interval_to_deadline(time
, units
, &(chud_proc_info
->t_deadline
));
175 timer_call_setup(&(chud_proc_info
->cpu_timer_call
),
176 chudxnu_private_cpu_timer_callback
, NULL
);
177 timer_call_enter(&(chud_proc_info
->cpu_timer_call
),
178 chud_proc_info
->t_deadline
,
179 TIMER_CALL_SYS_CRITICAL
|TIMER_CALL_LOCAL
);
181 ml_set_interrupts_enabled(oldlevel
);
185 __private_extern__ kern_return_t
186 chudxnu_cpu_timer_callback_cancel(void)
188 chudcpu_data_t
*chud_proc_info
;
191 oldlevel
= ml_set_interrupts_enabled(FALSE
);
192 chud_proc_info
= (chudcpu_data_t
*)(current_cpu_datap()->cpu_chud
);
194 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
));
197 chud_proc_info
->t_deadline
|= ~(chud_proc_info
->t_deadline
);
198 chud_proc_info
->cpu_timer_callback_fn
= NULL
;
200 ml_set_interrupts_enabled(oldlevel
);
204 __private_extern__ kern_return_t
205 chudxnu_cpu_timer_callback_cancel_all(void)
208 chudcpu_data_t
*chud_proc_info
;
210 for(cpu
=0; cpu
< real_ncpus
; cpu
++) {
211 chud_proc_info
= (chudcpu_data_t
*) cpu_data_ptr
[cpu
]->cpu_chud
;
212 if (chud_proc_info
== NULL
)
214 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
));
215 chud_proc_info
->t_deadline
|= ~(chud_proc_info
->t_deadline
);
216 chud_proc_info
->cpu_timer_callback_fn
= NULL
;
222 #pragma mark **** trap ****
224 static kern_return_t
chud_null_trap(uint32_t trapentry
, thread_flavor_t flavor
,
225 thread_state_t tstate
, mach_msg_type_number_t count
);
226 static chudxnu_trap_callback_func_t trap_callback_fn
= chud_null_trap
;
228 static kern_return_t
chud_null_trap(uint32_t trapentry __unused
, thread_flavor_t flavor __unused
,
229 thread_state_t tstate __unused
, mach_msg_type_number_t count __unused
) {
234 chudxnu_private_trap_callback(
240 #pragma unused (regs)
241 #pragma unused (unused1)
242 #pragma unused (unused2)
243 kern_return_t retval
= KERN_FAILURE
;
244 chudxnu_trap_callback_func_t fn
= trap_callback_fn
;
248 x86_thread_state_t state
;
249 mach_msg_type_number_t count
;
250 thread_t thread
= current_thread();
252 oldlevel
= ml_set_interrupts_enabled(FALSE
);
254 /* prevent reentry into CHUD when dtracing */
255 if(thread
->t_chud
& T_IN_CHUD
) {
256 /* restore interrupts */
257 ml_set_interrupts_enabled(oldlevel
);
259 return KERN_FAILURE
; // not handled - pass off to dtrace
262 /* update the chud state bits */
263 thread
->t_chud
|= T_IN_CHUD
;
265 count
= x86_THREAD_STATE_COUNT
;
267 if(chudxnu_thread_get_state(thread
,
269 (thread_state_t
)&state
,
271 FALSE
) == KERN_SUCCESS
) {
276 (thread_state_t
)&state
,
280 /* no longer in CHUD */
281 thread
->t_chud
&= ~(T_IN_CHUD
);
283 ml_set_interrupts_enabled(oldlevel
);
289 __private_extern__ kern_return_t
290 chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func
)
292 if(OSCompareAndSwapPtr(NULL
, chudxnu_private_trap_callback
,
293 (void * volatile *)&perfTrapHook
)) {
295 chudxnu_trap_callback_func_t old
= trap_callback_fn
;
296 while(!OSCompareAndSwapPtr(old
, func
,
297 (void * volatile *)&trap_callback_fn
)) {
298 old
= trap_callback_fn
;
305 __private_extern__ kern_return_t
306 chudxnu_trap_callback_cancel(void)
308 if(OSCompareAndSwapPtr(chudxnu_private_trap_callback
, NULL
,
309 (void * volatile *)&perfTrapHook
)) {
311 chudxnu_trap_callback_func_t old
= trap_callback_fn
;
312 while(!OSCompareAndSwapPtr(old
, chud_null_trap
,
313 (void * volatile *)&trap_callback_fn
)) {
314 old
= trap_callback_fn
;
322 #pragma mark **** ast ****
324 static kern_return_t
chud_null_ast(thread_flavor_t flavor
, thread_state_t tstate
,
325 mach_msg_type_number_t count
);
326 static chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn
= chud_null_ast
;
328 static kern_return_t
chud_null_ast(thread_flavor_t flavor __unused
,
329 thread_state_t tstate __unused
, mach_msg_type_number_t count __unused
) {
334 chudxnu_private_chud_ast_callback(ast_t reasons
, ast_t
*myast
)
336 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
337 kern_return_t retval
= KERN_FAILURE
;
338 chudxnu_perfmon_ast_callback_func_t fn
= perfmon_ast_callback_fn
;
341 if ((*myast
& AST_CHUD_URGENT
) && (reasons
& (AST_URGENT
| AST_CHUD_URGENT
))) { // Only execute urgent callbacks if reasons specifies an urgent context.
342 *myast
&= ~AST_CHUD_URGENT
;
344 if (AST_URGENT
== *myast
) { // If the only flag left is AST_URGENT, we can clear it; we know that we set it, but if there are also other bits set in reasons then someone else might still need AST_URGENT, so we'll leave it set. The normal machinery in ast_taken will ensure it gets cleared eventually, as necessary.
348 retval
= KERN_SUCCESS
;
351 if ((*myast
& AST_CHUD
) && (reasons
& AST_CHUD
)) { // Only execute non-urgent callbacks if reasons actually specifies AST_CHUD. This implies non-urgent callbacks since the only time this'll happen is if someone either calls ast_taken with AST_CHUD explicitly (not done at time of writing, but possible) or with AST_ALL, which of course includes AST_CHUD.
353 retval
= KERN_SUCCESS
;
356 if (KERN_SUCCESS
== retval
) {
357 x86_thread_state_t state
;
358 mach_msg_type_number_t count
= x86_THREAD_STATE_COUNT
;
359 thread_t thread
= current_thread();
361 if (KERN_SUCCESS
== chudxnu_thread_get_state(thread
,
363 (thread_state_t
)&state
,
365 (thread
->task
!= kernel_task
))) {
366 (fn
)(x86_THREAD_STATE
, (thread_state_t
)&state
, count
);
371 ml_set_interrupts_enabled(oldlevel
);
375 __private_extern__ kern_return_t
376 chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func
)
378 if(OSCompareAndSwapPtr(NULL
, chudxnu_private_chud_ast_callback
,
379 (void * volatile *)&perfASTHook
)) {
380 chudxnu_perfmon_ast_callback_func_t old
= perfmon_ast_callback_fn
;
382 while(!OSCompareAndSwapPtr(old
, func
,
383 (void * volatile *)&perfmon_ast_callback_fn
)) {
384 old
= perfmon_ast_callback_fn
;
392 __private_extern__ kern_return_t
393 chudxnu_perfmon_ast_callback_cancel(void)
395 if(OSCompareAndSwapPtr(chudxnu_private_chud_ast_callback
, NULL
,
396 (void * volatile *)&perfASTHook
)) {
397 chudxnu_perfmon_ast_callback_func_t old
= perfmon_ast_callback_fn
;
399 while(!OSCompareAndSwapPtr(old
, chud_null_ast
,
400 (void * volatile *)&perfmon_ast_callback_fn
)) {
401 old
= perfmon_ast_callback_fn
;
409 __private_extern__ kern_return_t
410 chudxnu_perfmon_ast_send_urgent(boolean_t urgent
)
412 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
413 ast_t
*myast
= ast_pending();
416 *myast
|= (AST_CHUD_URGENT
| AST_URGENT
);
418 *myast
|= (AST_CHUD
);
421 ml_set_interrupts_enabled(oldlevel
);
426 #pragma mark **** interrupt ****
428 static kern_return_t
chud_null_int(uint32_t trapentry
, thread_flavor_t flavor
,
429 thread_state_t tstate
, mach_msg_type_number_t count
);
430 static chudxnu_interrupt_callback_func_t interrupt_callback_fn
= chud_null_int
;
432 static kern_return_t
chud_null_int(uint32_t trapentry __unused
, thread_flavor_t flavor __unused
,
433 thread_state_t tstate __unused
, mach_msg_type_number_t count __unused
) {
438 chudxnu_private_interrupt_callback(void *foo
) __attribute__((used
));
441 chudxnu_private_interrupt_callback(void *foo
)
444 chudxnu_interrupt_callback_func_t fn
= interrupt_callback_fn
;
448 x86_thread_state_t state
;
449 mach_msg_type_number_t count
;
451 oldlevel
= ml_set_interrupts_enabled(FALSE
);
453 count
= x86_THREAD_STATE_COUNT
;
454 if(chudxnu_thread_get_state(current_thread(),
456 (thread_state_t
)&state
,
458 FALSE
) == KERN_SUCCESS
) {
460 X86_INTERRUPT_PERFMON
,
462 (thread_state_t
)&state
,
465 ml_set_interrupts_enabled(oldlevel
);
469 __private_extern__ kern_return_t
470 chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func
)
472 if(OSCompareAndSwapPtr(chud_null_int
, func
,
473 (void * volatile *)&interrupt_callback_fn
)) {
474 lapic_set_pmi_func((i386_intr_func_t
)chudxnu_private_interrupt_callback
);
480 __private_extern__ kern_return_t
481 chudxnu_interrupt_callback_cancel(void)
483 chudxnu_interrupt_callback_func_t old
= interrupt_callback_fn
;
485 while(!OSCompareAndSwapPtr(old
, chud_null_int
,
486 (void * volatile *)&interrupt_callback_fn
)) {
487 old
= interrupt_callback_fn
;
490 lapic_set_pmi_func(NULL
);
495 #pragma mark **** cpu signal ****
497 static chudxnu_cpusig_callback_func_t cpusig_callback_fn
= NULL
;
500 chudxnu_private_cpu_signal_handler(int request
)
502 chudxnu_cpusig_callback_func_t fn
= cpusig_callback_fn
;
505 x86_thread_state_t state
;
506 mach_msg_type_number_t count
= x86_THREAD_STATE_COUNT
;
508 if (chudxnu_thread_get_state(current_thread(),
510 (thread_state_t
) &state
, &count
,
511 FALSE
) == KERN_SUCCESS
) {
513 request
, x86_THREAD_STATE
,
514 (thread_state_t
) &state
, count
);
519 return KERN_SUCCESS
; //ignored
522 * chudxnu_cpu_signal_handler() is called from the IPI handler
523 * when a CHUD signal arrives from another processor.
525 __private_extern__
void
526 chudxnu_cpu_signal_handler(void)
528 chudcpu_signal_request_t
*reqp
;
529 chudcpu_data_t
*chudinfop
;
531 chudinfop
= (chudcpu_data_t
*) current_cpu_datap()->cpu_chud
;
533 mpdequeue_head(&(chudinfop
->cpu_request_queue
),
534 (queue_entry_t
*) &reqp
);
535 while (reqp
!= NULL
) {
536 chudxnu_private_cpu_signal_handler(reqp
->req_code
);
538 mpdequeue_head(&(chudinfop
->cpu_request_queue
),
539 (queue_entry_t
*) &reqp
);
543 __private_extern__ kern_return_t
544 chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func
)
546 if(OSCompareAndSwapPtr(NULL
, func
,
547 (void * volatile *)&cpusig_callback_fn
)) {
553 __private_extern__ kern_return_t
554 chudxnu_cpusig_callback_cancel(void)
556 chudxnu_cpusig_callback_func_t old
= cpusig_callback_fn
;
558 while(!OSCompareAndSwapPtr(old
, NULL
,
559 (void * volatile *)&cpusig_callback_fn
)) {
560 old
= cpusig_callback_fn
;
566 __private_extern__ kern_return_t
567 chudxnu_cpusig_send(int otherCPU
, uint32_t request_code
)
570 kern_return_t retval
= KERN_FAILURE
;
571 chudcpu_signal_request_t request
;
573 chudcpu_data_t
*target_chudp
;
576 disable_preemption();
577 // force interrupts on for a cross CPU signal.
578 old_level
= chudxnu_set_interrupts_enabled(TRUE
);
579 thisCPU
= cpu_number();
581 if ((unsigned) otherCPU
< real_ncpus
&&
582 thisCPU
!= otherCPU
&&
583 cpu_data_ptr
[otherCPU
]->cpu_running
) {
585 target_chudp
= (chudcpu_data_t
*)
586 cpu_data_ptr
[otherCPU
]->cpu_chud
;
588 /* Fill out request */
589 request
.req_sync
= 0xFFFFFFFF; /* set sync flag */
590 //request.req_type = CPRQchud; /* set request type */
591 request
.req_code
= request_code
; /* set request */
594 * Insert the new request in the target cpu's request queue
595 * and signal target cpu.
597 mpenqueue_tail(&target_chudp
->cpu_request_queue
,
599 i386_signal_cpu(otherCPU
, MP_CHUD
, ASYNC
);
601 /* Wait for response or timeout */
602 deadline
= mach_absolute_time() + LockTimeOut
;
603 while (request
.req_sync
!= 0) {
604 if (mach_absolute_time() > deadline
) {
605 panic("chudxnu_cpusig_send(%d,%d) timed out\n",
606 otherCPU
, request_code
);
610 retval
= KERN_SUCCESS
;
612 retval
= KERN_INVALID_ARGUMENT
;
615 chudxnu_set_interrupts_enabled(old_level
);