2 * Copyright (c) 2003-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <mach/boolean.h>
31 #include <mach/mach_types.h>
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/timer_call.h>
36 #include <kern/thread_call.h>
37 #include <kern/kalloc.h>
38 #include <kern/thread.h>
40 #include <libkern/OSAtomic.h>
42 #include <machine/machine_routines.h>
43 #include <machine/cpu_data.h>
44 #include <machine/trap.h>
46 #include <chud/chud_xnu.h>
47 #include <chud/chud_xnu_private.h>
49 #include <i386/misc_protos.h>
50 #include <i386/lapic.h>
52 #include <i386/machine_cpu.h>
54 #include <sys/kdebug.h>
55 #define CHUD_TIMER_CALLBACK_CANCEL 0
56 #define CHUD_TIMER_CALLBACK_ENTER 1
57 #define CHUD_TIMER_CALLBACK 2
58 #define CHUD_AST_SEND 3
59 #define CHUD_AST_CALLBACK 4
60 #define CHUD_CPUSIG_SEND 5
61 #define CHUD_CPUSIG_CALLBACK 6
64 void chudxnu_cancel_all_callbacks(void)
66 chudxnu_cpusig_callback_cancel();
67 chudxnu_cpu_timer_callback_cancel_all();
68 chudxnu_interrupt_callback_cancel();
69 chudxnu_perfmon_ast_callback_cancel();
70 chudxnu_kdebug_callback_cancel();
71 chudxnu_trap_callback_cancel();
72 chudxnu_syscall_callback_cancel();
73 chudxnu_dtrace_callback_cancel();
76 static chudcpu_data_t chudcpu_boot_cpu
;
78 chudxnu_cpu_alloc(boolean_t boot_processor
)
80 chudcpu_data_t
*chud_proc_info
;
83 chud_proc_info
= &chudcpu_boot_cpu
;
85 chud_proc_info
= (chudcpu_data_t
*)
86 kalloc(sizeof(chudcpu_data_t
));
87 if (chud_proc_info
== (chudcpu_data_t
*)NULL
) {
91 bzero((char *)chud_proc_info
, sizeof(chudcpu_data_t
));
92 chud_proc_info
->t_deadline
= 0xFFFFFFFFFFFFFFFFULL
;
93 mpqueue_init(&chud_proc_info
->cpu_request_queue
);
96 return (void *)chud_proc_info
;
100 chudxnu_cpu_free(void *cp
)
102 if (cp
== NULL
|| cp
== (void *)&chudcpu_boot_cpu
) {
105 kfree(cp
,sizeof(chudcpu_data_t
));
110 chudxnu_private_cpu_timer_callback(
111 timer_call_param_t param0
,
112 timer_call_param_t param1
)
114 #pragma unused (param0)
115 #pragma unused (param1)
116 chudcpu_data_t
*chud_proc_info
;
118 x86_thread_state_t state
;
119 mach_msg_type_number_t count
;
120 chudxnu_cpu_timer_callback_func_t fn
;
122 oldlevel
= ml_set_interrupts_enabled(FALSE
);
123 chud_proc_info
= (chudcpu_data_t
*)(current_cpu_datap()->cpu_chud
);
125 count
= x86_THREAD_STATE_COUNT
;
126 if (chudxnu_thread_get_state(current_thread(),
128 (thread_state_t
)&state
,
130 FALSE
) == KERN_SUCCESS
) {
131 fn
= chud_proc_info
->cpu_timer_callback_fn
;
135 (thread_state_t
)&state
,
140 ml_set_interrupts_enabled(oldlevel
);
143 __private_extern__ kern_return_t
144 chudxnu_cpu_timer_callback_enter(
145 chudxnu_cpu_timer_callback_func_t func
,
149 chudcpu_data_t
*chud_proc_info
;
152 oldlevel
= ml_set_interrupts_enabled(FALSE
);
153 chud_proc_info
= (chudcpu_data_t
*)(current_cpu_datap()->cpu_chud
);
155 // cancel any existing callback for this cpu
156 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
));
158 chud_proc_info
->cpu_timer_callback_fn
= func
;
160 clock_interval_to_deadline(time
, units
, &(chud_proc_info
->t_deadline
));
161 timer_call_setup(&(chud_proc_info
->cpu_timer_call
),
162 chudxnu_private_cpu_timer_callback
, NULL
);
163 timer_call_enter(&(chud_proc_info
->cpu_timer_call
),
164 chud_proc_info
->t_deadline
);
166 ml_set_interrupts_enabled(oldlevel
);
170 __private_extern__ kern_return_t
171 chudxnu_cpu_timer_callback_cancel(void)
173 chudcpu_data_t
*chud_proc_info
;
176 oldlevel
= ml_set_interrupts_enabled(FALSE
);
177 chud_proc_info
= (chudcpu_data_t
*)(current_cpu_datap()->cpu_chud
);
179 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
));
182 chud_proc_info
->t_deadline
|= ~(chud_proc_info
->t_deadline
);
183 chud_proc_info
->cpu_timer_callback_fn
= NULL
;
185 ml_set_interrupts_enabled(oldlevel
);
189 __private_extern__ kern_return_t
190 chudxnu_cpu_timer_callback_cancel_all(void)
193 chudcpu_data_t
*chud_proc_info
;
195 for(cpu
=0; cpu
< real_ncpus
; cpu
++) {
196 chud_proc_info
= (chudcpu_data_t
*) cpu_data_ptr
[cpu
]->cpu_chud
;
197 if (chud_proc_info
== NULL
)
199 timer_call_cancel(&(chud_proc_info
->cpu_timer_call
));
200 chud_proc_info
->t_deadline
|= ~(chud_proc_info
->t_deadline
);
201 chud_proc_info
->cpu_timer_callback_fn
= NULL
;
207 #pragma mark **** trap ****
209 static kern_return_t
chud_null_trap(uint32_t trapentry
, thread_flavor_t flavor
,
210 thread_state_t tstate
, mach_msg_type_number_t count
);
211 static chudxnu_trap_callback_func_t trap_callback_fn
= chud_null_trap
;
213 static kern_return_t
chud_null_trap(uint32_t trapentry __unused
, thread_flavor_t flavor __unused
,
214 thread_state_t tstate __unused
, mach_msg_type_number_t count __unused
) {
219 chudxnu_private_trap_callback(
225 #pragma unused (regs)
226 #pragma unused (unused1)
227 #pragma unused (unused2)
228 kern_return_t retval
= KERN_FAILURE
;
229 chudxnu_trap_callback_func_t fn
= trap_callback_fn
;
233 x86_thread_state_t state
;
234 mach_msg_type_number_t count
;
235 thread_t thread
= current_thread();
237 oldlevel
= ml_set_interrupts_enabled(FALSE
);
239 /* prevent reentry into CHUD when dtracing */
240 if(thread
->t_chud
& T_IN_CHUD
) {
241 /* restore interrupts */
242 ml_set_interrupts_enabled(oldlevel
);
244 return KERN_FAILURE
; // not handled - pass off to dtrace
247 /* update the chud state bits */
248 thread
->t_chud
|= T_IN_CHUD
;
250 count
= x86_THREAD_STATE_COUNT
;
252 if(chudxnu_thread_get_state(thread
,
254 (thread_state_t
)&state
,
256 FALSE
) == KERN_SUCCESS
) {
261 (thread_state_t
)&state
,
265 /* no longer in CHUD */
266 thread
->t_chud
&= ~(T_IN_CHUD
);
268 ml_set_interrupts_enabled(oldlevel
);
274 __private_extern__ kern_return_t
275 chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func
)
277 if(OSCompareAndSwapPtr(NULL
, chudxnu_private_trap_callback
,
278 (void * volatile *)&perfTrapHook
)) {
280 chudxnu_trap_callback_func_t old
= trap_callback_fn
;
281 while(!OSCompareAndSwapPtr(old
, func
,
282 (void * volatile *)&trap_callback_fn
)) {
283 old
= trap_callback_fn
;
290 __private_extern__ kern_return_t
291 chudxnu_trap_callback_cancel(void)
293 if(OSCompareAndSwapPtr(chudxnu_private_trap_callback
, NULL
,
294 (void * volatile *)&perfTrapHook
)) {
296 chudxnu_trap_callback_func_t old
= trap_callback_fn
;
297 while(!OSCompareAndSwapPtr(old
, chud_null_trap
,
298 (void * volatile *)&trap_callback_fn
)) {
299 old
= trap_callback_fn
;
307 #pragma mark **** ast ****
309 static kern_return_t
chud_null_ast(thread_flavor_t flavor
, thread_state_t tstate
,
310 mach_msg_type_number_t count
);
311 static chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn
= chud_null_ast
;
313 static kern_return_t
chud_null_ast(thread_flavor_t flavor __unused
,
314 thread_state_t tstate __unused
, mach_msg_type_number_t count __unused
) {
319 chudxnu_private_chud_ast_callback(
325 #pragma unused (trapno)
326 #pragma unused (regs)
327 #pragma unused (unused1)
328 #pragma unused (unused2)
329 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
330 ast_t
*myast
= ast_pending();
331 kern_return_t retval
= KERN_FAILURE
;
332 chudxnu_perfmon_ast_callback_func_t fn
= perfmon_ast_callback_fn
;
334 if (*myast
& AST_CHUD_URGENT
) {
335 *myast
&= ~(AST_CHUD_URGENT
| AST_CHUD
);
336 if ((*myast
& AST_PREEMPTION
) != AST_PREEMPTION
)
337 *myast
&= ~(AST_URGENT
);
338 retval
= KERN_SUCCESS
;
339 } else if (*myast
& AST_CHUD
) {
340 *myast
&= ~(AST_CHUD
);
341 retval
= KERN_SUCCESS
;
345 x86_thread_state_t state
;
346 mach_msg_type_number_t count
;
347 count
= x86_THREAD_STATE_COUNT
;
349 if (chudxnu_thread_get_state(
352 (thread_state_t
) &state
, &count
,
353 TRUE
) == KERN_SUCCESS
) {
357 (thread_state_t
) &state
,
362 ml_set_interrupts_enabled(oldlevel
);
366 __private_extern__ kern_return_t
367 chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func
)
369 if(OSCompareAndSwapPtr(NULL
, chudxnu_private_chud_ast_callback
,
370 (void * volatile *)&perfASTHook
)) {
371 chudxnu_perfmon_ast_callback_func_t old
= perfmon_ast_callback_fn
;
373 while(!OSCompareAndSwapPtr(old
, func
,
374 (void * volatile *)&perfmon_ast_callback_fn
)) {
375 old
= perfmon_ast_callback_fn
;
383 __private_extern__ kern_return_t
384 chudxnu_perfmon_ast_callback_cancel(void)
386 if(OSCompareAndSwapPtr(chudxnu_private_chud_ast_callback
, NULL
,
387 (void * volatile *)&perfASTHook
)) {
388 chudxnu_perfmon_ast_callback_func_t old
= perfmon_ast_callback_fn
;
390 while(!OSCompareAndSwapPtr(old
, chud_null_ast
,
391 (void * volatile *)&perfmon_ast_callback_fn
)) {
392 old
= perfmon_ast_callback_fn
;
400 __private_extern__ kern_return_t
401 chudxnu_perfmon_ast_send_urgent(boolean_t urgent
)
403 boolean_t oldlevel
= ml_set_interrupts_enabled(FALSE
);
404 ast_t
*myast
= ast_pending();
407 *myast
|= (AST_CHUD_URGENT
| AST_URGENT
);
409 *myast
|= (AST_CHUD
);
412 ml_set_interrupts_enabled(oldlevel
);
417 #pragma mark **** interrupt ****
419 static kern_return_t
chud_null_int(uint32_t trapentry
, thread_flavor_t flavor
,
420 thread_state_t tstate
, mach_msg_type_number_t count
);
421 static chudxnu_interrupt_callback_func_t interrupt_callback_fn
= chud_null_int
;
423 static kern_return_t
chud_null_int(uint32_t trapentry __unused
, thread_flavor_t flavor __unused
,
424 thread_state_t tstate __unused
, mach_msg_type_number_t count __unused
) {
429 chudxnu_private_interrupt_callback(void *foo
)
432 chudxnu_interrupt_callback_func_t fn
= interrupt_callback_fn
;
436 x86_thread_state_t state
;
437 mach_msg_type_number_t count
;
439 oldlevel
= ml_set_interrupts_enabled(FALSE
);
441 count
= x86_THREAD_STATE_COUNT
;
442 if(chudxnu_thread_get_state(current_thread(),
444 (thread_state_t
)&state
,
446 FALSE
) == KERN_SUCCESS
) {
448 X86_INTERRUPT_PERFMON
,
450 (thread_state_t
)&state
,
453 ml_set_interrupts_enabled(oldlevel
);
457 __private_extern__ kern_return_t
458 chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func
)
460 if(OSCompareAndSwapPtr(chud_null_int
, func
,
461 (void * volatile *)&interrupt_callback_fn
)) {
462 lapic_set_pmi_func((i386_intr_func_t
)chudxnu_private_interrupt_callback
);
469 __private_extern__ kern_return_t
470 chudxnu_interrupt_callback_cancel(void)
472 chudxnu_interrupt_callback_func_t old
= interrupt_callback_fn
;
474 while(!OSCompareAndSwapPtr(old
, chud_null_int
,
475 (void * volatile *)&interrupt_callback_fn
)) {
476 old
= interrupt_callback_fn
;
479 lapic_set_pmi_func(NULL
);
484 #pragma mark **** cpu signal ****
486 static chudxnu_cpusig_callback_func_t cpusig_callback_fn
= NULL
;
489 chudxnu_private_cpu_signal_handler(int request
)
491 chudxnu_cpusig_callback_func_t fn
= cpusig_callback_fn
;
494 x86_thread_state_t state
;
495 mach_msg_type_number_t count
= x86_THREAD_STATE_COUNT
;
497 if (chudxnu_thread_get_state(current_thread(),
499 (thread_state_t
) &state
, &count
,
500 FALSE
) == KERN_SUCCESS
) {
502 request
, x86_THREAD_STATE
,
503 (thread_state_t
) &state
, count
);
508 return KERN_SUCCESS
; //ignored
511 * chudxnu_cpu_signal_handler() is called from the IPI handler
512 * when a CHUD signal arrives from another processor.
514 __private_extern__
void
515 chudxnu_cpu_signal_handler(void)
517 chudcpu_signal_request_t
*reqp
;
518 chudcpu_data_t
*chudinfop
;
520 chudinfop
= (chudcpu_data_t
*) current_cpu_datap()->cpu_chud
;
522 mpdequeue_head(&(chudinfop
->cpu_request_queue
),
523 (queue_entry_t
*) &reqp
);
524 while (reqp
!= NULL
) {
525 chudxnu_private_cpu_signal_handler(reqp
->req_code
);
527 mpdequeue_head(&(chudinfop
->cpu_request_queue
),
528 (queue_entry_t
*) &reqp
);
532 __private_extern__ kern_return_t
533 chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func
)
535 if(OSCompareAndSwapPtr(NULL
, func
,
536 (void * volatile *)&cpusig_callback_fn
)) {
542 __private_extern__ kern_return_t
543 chudxnu_cpusig_callback_cancel(void)
545 chudxnu_cpusig_callback_func_t old
= cpusig_callback_fn
;
547 while(!OSCompareAndSwapPtr(old
, NULL
,
548 (void * volatile *)&cpusig_callback_fn
)) {
549 old
= cpusig_callback_fn
;
555 __private_extern__ kern_return_t
556 chudxnu_cpusig_send(int otherCPU
, uint32_t request_code
)
559 kern_return_t retval
= KERN_FAILURE
;
560 chudcpu_signal_request_t request
;
562 chudcpu_data_t
*target_chudp
;
565 disable_preemption();
566 // force interrupts on for a cross CPU signal.
567 old_level
= chudxnu_set_interrupts_enabled(TRUE
);
568 thisCPU
= cpu_number();
570 if ((unsigned) otherCPU
< real_ncpus
&&
571 thisCPU
!= otherCPU
&&
572 cpu_data_ptr
[otherCPU
]->cpu_running
) {
574 target_chudp
= (chudcpu_data_t
*)
575 cpu_data_ptr
[otherCPU
]->cpu_chud
;
577 /* Fill out request */
578 request
.req_sync
= 0xFFFFFFFF; /* set sync flag */
579 //request.req_type = CPRQchud; /* set request type */
580 request
.req_code
= request_code
; /* set request */
583 * Insert the new request in the target cpu's request queue
584 * and signal target cpu.
586 mpenqueue_tail(&target_chudp
->cpu_request_queue
,
588 i386_signal_cpu(otherCPU
, MP_CHUD
, ASYNC
);
590 /* Wait for response or timeout */
591 deadline
= mach_absolute_time() + LockTimeOut
;
592 while (request
.req_sync
!= 0) {
593 if (mach_absolute_time() > deadline
) {
594 panic("chudxnu_cpusig_send(%d,%d) timed out\n",
595 otherCPU
, request_code
);
599 retval
= KERN_SUCCESS
;
601 retval
= KERN_INVALID_ARGUMENT
;
604 chudxnu_set_interrupts_enabled(old_level
);