]> git.saurik.com Git - apple/xnu.git/blob - osfmk/chud/i386/chud_osfmk_callback_i386.c
aa576cbc71368c0acc81500a1bf9cff56b45e7bd
[apple/xnu.git] / osfmk / chud / i386 / chud_osfmk_callback_i386.c
1 /*
2 * Copyright (c) 2003-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <stdint.h>
30 #include <mach/boolean.h>
31 #include <mach/mach_types.h>
32
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/timer_call.h>
36 #include <kern/thread_call.h>
37 #include <kern/kalloc.h>
38 #include <kern/thread.h>
39
40 #include <libkern/OSAtomic.h>
41
42 #include <machine/machine_routines.h>
43 #include <machine/cpu_data.h>
44 #include <machine/trap.h>
45
46 #include <chud/chud_xnu.h>
47 #include <chud/chud_xnu_private.h>
48
49 #include <i386/misc_protos.h>
50 #include <i386/lapic.h>
51 #include <i386/mp.h>
52 #include <i386/machine_cpu.h>
53
54 #include <sys/kdebug.h>
55 #define CHUD_TIMER_CALLBACK_CANCEL 0
56 #define CHUD_TIMER_CALLBACK_ENTER 1
57 #define CHUD_TIMER_CALLBACK 2
58 #define CHUD_AST_SEND 3
59 #define CHUD_AST_CALLBACK 4
60 #define CHUD_CPUSIG_SEND 5
61 #define CHUD_CPUSIG_CALLBACK 6
62
63 __private_extern__
64 void chudxnu_cancel_all_callbacks(void)
65 {
66 chudxnu_cpusig_callback_cancel();
67 chudxnu_cpu_timer_callback_cancel_all();
68 chudxnu_interrupt_callback_cancel();
69 chudxnu_perfmon_ast_callback_cancel();
70 chudxnu_kdebug_callback_cancel();
71 chudxnu_trap_callback_cancel();
72 chudxnu_syscall_callback_cancel();
73 chudxnu_dtrace_callback_cancel();
74 }
75
76 static lck_grp_t chud_request_lck_grp;
77 static lck_grp_attr_t chud_request_lck_grp_attr;
78 static lck_attr_t chud_request_lck_attr;
79
80
81 static chudcpu_data_t chudcpu_boot_cpu;
82 void *
83 chudxnu_cpu_alloc(boolean_t boot_processor)
84 {
85 chudcpu_data_t *chud_proc_info;
86
87 if (boot_processor) {
88 chud_proc_info = &chudcpu_boot_cpu;
89
90 lck_attr_setdefault(&chud_request_lck_attr);
91 lck_grp_attr_setdefault(&chud_request_lck_grp_attr);
92 lck_grp_init(&chud_request_lck_grp, "chud_request", &chud_request_lck_grp_attr);
93
94 } else {
95 chud_proc_info = (chudcpu_data_t *)
96 kalloc(sizeof(chudcpu_data_t));
97 if (chud_proc_info == (chudcpu_data_t *)NULL) {
98 return (void *)NULL;
99 }
100 }
101 bzero((char *)chud_proc_info, sizeof(chudcpu_data_t));
102 chud_proc_info->t_deadline = 0xFFFFFFFFFFFFFFFFULL;
103
104 mpqueue_init(&chud_proc_info->cpu_request_queue, &chud_request_lck_grp, &chud_request_lck_attr);
105
106
107 return (void *)chud_proc_info;
108 }
109
110 void
111 chudxnu_cpu_free(void *cp)
112 {
113 if (cp == NULL || cp == (void *)&chudcpu_boot_cpu) {
114 return;
115 } else {
116 kfree(cp,sizeof(chudcpu_data_t));
117 }
118 }
119
120 static void
121 chudxnu_private_cpu_timer_callback(
122 timer_call_param_t param0,
123 timer_call_param_t param1)
124 {
125 #pragma unused (param0)
126 #pragma unused (param1)
127 chudcpu_data_t *chud_proc_info;
128 boolean_t oldlevel;
129 x86_thread_state_t state;
130 mach_msg_type_number_t count;
131 chudxnu_cpu_timer_callback_func_t fn;
132
133 oldlevel = ml_set_interrupts_enabled(FALSE);
134 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
135
136 count = x86_THREAD_STATE_COUNT;
137 if (chudxnu_thread_get_state(current_thread(),
138 x86_THREAD_STATE,
139 (thread_state_t)&state,
140 &count,
141 FALSE) == KERN_SUCCESS) {
142 fn = chud_proc_info->cpu_timer_callback_fn;
143 if (fn) {
144 (fn)(
145 x86_THREAD_STATE,
146 (thread_state_t)&state,
147 count);
148 }
149 }
150
151 ml_set_interrupts_enabled(oldlevel);
152 }
153
154 __private_extern__ kern_return_t
155 chudxnu_cpu_timer_callback_enter(
156 chudxnu_cpu_timer_callback_func_t func,
157 uint32_t time,
158 uint32_t units)
159 {
160 chudcpu_data_t *chud_proc_info;
161 boolean_t oldlevel;
162
163 oldlevel = ml_set_interrupts_enabled(FALSE);
164 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
165
166 // cancel any existing callback for this cpu
167 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
168
169 chud_proc_info->cpu_timer_callback_fn = func;
170
171 clock_interval_to_deadline(time, units, &(chud_proc_info->t_deadline));
172 timer_call_setup(&(chud_proc_info->cpu_timer_call),
173 chudxnu_private_cpu_timer_callback, NULL);
174 timer_call_enter(&(chud_proc_info->cpu_timer_call),
175 chud_proc_info->t_deadline,
176 TIMER_CALL_CRITICAL|TIMER_CALL_LOCAL);
177
178 ml_set_interrupts_enabled(oldlevel);
179 return KERN_SUCCESS;
180 }
181
182 __private_extern__ kern_return_t
183 chudxnu_cpu_timer_callback_cancel(void)
184 {
185 chudcpu_data_t *chud_proc_info;
186 boolean_t oldlevel;
187
188 oldlevel = ml_set_interrupts_enabled(FALSE);
189 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
190
191 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
192
193 // set to max value:
194 chud_proc_info->t_deadline |= ~(chud_proc_info->t_deadline);
195 chud_proc_info->cpu_timer_callback_fn = NULL;
196
197 ml_set_interrupts_enabled(oldlevel);
198 return KERN_SUCCESS;
199 }
200
201 __private_extern__ kern_return_t
202 chudxnu_cpu_timer_callback_cancel_all(void)
203 {
204 unsigned int cpu;
205 chudcpu_data_t *chud_proc_info;
206
207 for(cpu=0; cpu < real_ncpus; cpu++) {
208 chud_proc_info = (chudcpu_data_t *) cpu_data_ptr[cpu]->cpu_chud;
209 if (chud_proc_info == NULL)
210 continue;
211 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
212 chud_proc_info->t_deadline |= ~(chud_proc_info->t_deadline);
213 chud_proc_info->cpu_timer_callback_fn = NULL;
214 }
215 return KERN_SUCCESS;
216 }
217
218 #if 0
219 #pragma mark **** trap ****
220 #endif
221 static kern_return_t chud_null_trap(uint32_t trapentry, thread_flavor_t flavor,
222 thread_state_t tstate, mach_msg_type_number_t count);
223 static chudxnu_trap_callback_func_t trap_callback_fn = chud_null_trap;
224
225 static kern_return_t chud_null_trap(uint32_t trapentry __unused, thread_flavor_t flavor __unused,
226 thread_state_t tstate __unused, mach_msg_type_number_t count __unused) {
227 return KERN_FAILURE;
228 }
229
230 static kern_return_t
231 chudxnu_private_trap_callback(
232 int trapno,
233 void *regs,
234 int unused1,
235 int unused2)
236 {
237 #pragma unused (regs)
238 #pragma unused (unused1)
239 #pragma unused (unused2)
240 kern_return_t retval = KERN_FAILURE;
241 chudxnu_trap_callback_func_t fn = trap_callback_fn;
242
243 if(fn) {
244 boolean_t oldlevel;
245 x86_thread_state_t state;
246 mach_msg_type_number_t count;
247 thread_t thread = current_thread();
248
249 oldlevel = ml_set_interrupts_enabled(FALSE);
250
251 /* prevent reentry into CHUD when dtracing */
252 if(thread->t_chud & T_IN_CHUD) {
253 /* restore interrupts */
254 ml_set_interrupts_enabled(oldlevel);
255
256 return KERN_FAILURE; // not handled - pass off to dtrace
257 }
258
259 /* update the chud state bits */
260 thread->t_chud |= T_IN_CHUD;
261
262 count = x86_THREAD_STATE_COUNT;
263
264 if(chudxnu_thread_get_state(thread,
265 x86_THREAD_STATE,
266 (thread_state_t)&state,
267 &count,
268 FALSE) == KERN_SUCCESS) {
269
270 retval = (fn)(
271 trapno,
272 x86_THREAD_STATE,
273 (thread_state_t)&state,
274 count);
275 }
276
277 /* no longer in CHUD */
278 thread->t_chud &= ~(T_IN_CHUD);
279
280 ml_set_interrupts_enabled(oldlevel);
281 }
282
283 return retval;
284 }
285
286 __private_extern__ kern_return_t
287 chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func)
288 {
289 if(OSCompareAndSwapPtr(NULL, chudxnu_private_trap_callback,
290 (void * volatile *)&perfTrapHook)) {
291
292 chudxnu_trap_callback_func_t old = trap_callback_fn;
293 while(!OSCompareAndSwapPtr(old, func,
294 (void * volatile *)&trap_callback_fn)) {
295 old = trap_callback_fn;
296 }
297 return KERN_SUCCESS;
298 }
299 return KERN_FAILURE;
300 }
301
302 __private_extern__ kern_return_t
303 chudxnu_trap_callback_cancel(void)
304 {
305 if(OSCompareAndSwapPtr(chudxnu_private_trap_callback, NULL,
306 (void * volatile *)&perfTrapHook)) {
307
308 chudxnu_trap_callback_func_t old = trap_callback_fn;
309 while(!OSCompareAndSwapPtr(old, chud_null_trap,
310 (void * volatile *)&trap_callback_fn)) {
311 old = trap_callback_fn;
312 }
313 return KERN_SUCCESS;
314 }
315 return KERN_FAILURE;
316 }
317
318 #if 0
319 #pragma mark **** ast ****
320 #endif
321 static kern_return_t chud_null_ast(thread_flavor_t flavor, thread_state_t tstate,
322 mach_msg_type_number_t count);
323 static chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn = chud_null_ast;
324
325 static kern_return_t chud_null_ast(thread_flavor_t flavor __unused,
326 thread_state_t tstate __unused, mach_msg_type_number_t count __unused) {
327 return KERN_FAILURE;
328 }
329
330 static kern_return_t
331 chudxnu_private_chud_ast_callback(ast_t reasons, ast_t *myast)
332 {
333 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
334 kern_return_t retval = KERN_FAILURE;
335 chudxnu_perfmon_ast_callback_func_t fn = perfmon_ast_callback_fn;
336
337 if (fn) {
338 if ((*myast & AST_CHUD_URGENT) && (reasons & (AST_URGENT | AST_CHUD_URGENT))) { // Only execute urgent callbacks if reasons specifies an urgent context.
339 *myast &= ~AST_CHUD_URGENT;
340
341 if (AST_URGENT == *myast) { // If the only flag left is AST_URGENT, we can clear it; we know that we set it, but if there are also other bits set in reasons then someone else might still need AST_URGENT, so we'll leave it set. The normal machinery in ast_taken will ensure it gets cleared eventually, as necessary.
342 *myast = AST_NONE;
343 }
344
345 retval = KERN_SUCCESS;
346 }
347
348 if ((*myast & AST_CHUD) && (reasons & AST_CHUD)) { // Only execute non-urgent callbacks if reasons actually specifies AST_CHUD. This implies non-urgent callbacks since the only time this'll happen is if someone either calls ast_taken with AST_CHUD explicitly (not done at time of writing, but possible) or with AST_ALL, which of course includes AST_CHUD.
349 *myast &= ~AST_CHUD;
350 retval = KERN_SUCCESS;
351 }
352
353 if (KERN_SUCCESS == retval) {
354 x86_thread_state_t state;
355 mach_msg_type_number_t count = x86_THREAD_STATE_COUNT;
356 thread_t thread = current_thread();
357
358 if (KERN_SUCCESS == chudxnu_thread_get_state(thread,
359 x86_THREAD_STATE,
360 (thread_state_t)&state,
361 &count,
362 (thread->task != kernel_task))) {
363 (fn)(x86_THREAD_STATE, (thread_state_t)&state, count);
364 }
365 }
366 }
367
368 ml_set_interrupts_enabled(oldlevel);
369 return retval;
370 }
371
372 __private_extern__ kern_return_t
373 chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func)
374 {
375 if(OSCompareAndSwapPtr(NULL, chudxnu_private_chud_ast_callback,
376 (void * volatile *)&perfASTHook)) {
377 chudxnu_perfmon_ast_callback_func_t old = perfmon_ast_callback_fn;
378
379 while(!OSCompareAndSwapPtr(old, func,
380 (void * volatile *)&perfmon_ast_callback_fn)) {
381 old = perfmon_ast_callback_fn;
382 }
383
384 return KERN_SUCCESS;
385 }
386 return KERN_FAILURE;
387 }
388
389 __private_extern__ kern_return_t
390 chudxnu_perfmon_ast_callback_cancel(void)
391 {
392 if(OSCompareAndSwapPtr(chudxnu_private_chud_ast_callback, NULL,
393 (void * volatile *)&perfASTHook)) {
394 chudxnu_perfmon_ast_callback_func_t old = perfmon_ast_callback_fn;
395
396 while(!OSCompareAndSwapPtr(old, chud_null_ast,
397 (void * volatile *)&perfmon_ast_callback_fn)) {
398 old = perfmon_ast_callback_fn;
399 }
400
401 return KERN_SUCCESS;
402 }
403 return KERN_FAILURE;
404 }
405
406 __private_extern__ kern_return_t
407 chudxnu_perfmon_ast_send_urgent(boolean_t urgent)
408 {
409 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
410 ast_t *myast = ast_pending();
411
412 if(urgent) {
413 *myast |= (AST_CHUD_URGENT | AST_URGENT);
414 } else {
415 *myast |= (AST_CHUD);
416 }
417
418 ml_set_interrupts_enabled(oldlevel);
419 return KERN_SUCCESS;
420 }
421
422 #if 0
423 #pragma mark **** interrupt ****
424 #endif
425 static kern_return_t chud_null_int(uint32_t trapentry, thread_flavor_t flavor,
426 thread_state_t tstate, mach_msg_type_number_t count);
427 static chudxnu_interrupt_callback_func_t interrupt_callback_fn = chud_null_int;
428
429 static kern_return_t chud_null_int(uint32_t trapentry __unused, thread_flavor_t flavor __unused,
430 thread_state_t tstate __unused, mach_msg_type_number_t count __unused) {
431 return KERN_FAILURE;
432 }
433
434 static void
435 chudxnu_private_interrupt_callback(void *foo) __attribute__((used));
436
437 static void
438 chudxnu_private_interrupt_callback(void *foo)
439 {
440 #pragma unused (foo)
441 chudxnu_interrupt_callback_func_t fn = interrupt_callback_fn;
442
443 if(fn) {
444 boolean_t oldlevel;
445 x86_thread_state_t state;
446 mach_msg_type_number_t count;
447
448 oldlevel = ml_set_interrupts_enabled(FALSE);
449
450 count = x86_THREAD_STATE_COUNT;
451 if(chudxnu_thread_get_state(current_thread(),
452 x86_THREAD_STATE,
453 (thread_state_t)&state,
454 &count,
455 FALSE) == KERN_SUCCESS) {
456 (fn)(
457 X86_INTERRUPT_PERFMON,
458 x86_THREAD_STATE,
459 (thread_state_t)&state,
460 count);
461 }
462 ml_set_interrupts_enabled(oldlevel);
463 }
464 }
465
466 __private_extern__ kern_return_t
467 chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func)
468 {
469 if(OSCompareAndSwapPtr(chud_null_int, func,
470 (void * volatile *)&interrupt_callback_fn)) {
471 lapic_set_pmi_func((i386_intr_func_t)chudxnu_private_interrupt_callback);
472 return KERN_SUCCESS;
473 }
474 return KERN_FAILURE;
475 }
476
477 __private_extern__ kern_return_t
478 chudxnu_interrupt_callback_cancel(void)
479 {
480 chudxnu_interrupt_callback_func_t old = interrupt_callback_fn;
481
482 while(!OSCompareAndSwapPtr(old, chud_null_int,
483 (void * volatile *)&interrupt_callback_fn)) {
484 old = interrupt_callback_fn;
485 }
486
487 lapic_set_pmi_func(NULL);
488 return KERN_SUCCESS;
489 }
490
491 #if 0
492 #pragma mark **** cpu signal ****
493 #endif
494 static chudxnu_cpusig_callback_func_t cpusig_callback_fn = NULL;
495
496 static kern_return_t
497 chudxnu_private_cpu_signal_handler(int request)
498 {
499 chudxnu_cpusig_callback_func_t fn = cpusig_callback_fn;
500
501 if (fn) {
502 x86_thread_state_t state;
503 mach_msg_type_number_t count = x86_THREAD_STATE_COUNT;
504
505 if (chudxnu_thread_get_state(current_thread(),
506 x86_THREAD_STATE,
507 (thread_state_t) &state, &count,
508 FALSE) == KERN_SUCCESS) {
509 return (fn)(
510 request, x86_THREAD_STATE,
511 (thread_state_t) &state, count);
512 } else {
513 return KERN_FAILURE;
514 }
515 }
516 return KERN_SUCCESS; //ignored
517 }
518 /*
519 * chudxnu_cpu_signal_handler() is called from the IPI handler
520 * when a CHUD signal arrives from another processor.
521 */
522 __private_extern__ void
523 chudxnu_cpu_signal_handler(void)
524 {
525 chudcpu_signal_request_t *reqp;
526 chudcpu_data_t *chudinfop;
527
528 chudinfop = (chudcpu_data_t *) current_cpu_datap()->cpu_chud;
529
530 mpdequeue_head(&(chudinfop->cpu_request_queue),
531 (queue_entry_t *) &reqp);
532 while (reqp != NULL) {
533 chudxnu_private_cpu_signal_handler(reqp->req_code);
534 reqp->req_sync = 0;
535 mpdequeue_head(&(chudinfop->cpu_request_queue),
536 (queue_entry_t *) &reqp);
537 }
538 }
539
540 __private_extern__ kern_return_t
541 chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func)
542 {
543 if(OSCompareAndSwapPtr(NULL, func,
544 (void * volatile *)&cpusig_callback_fn)) {
545 return KERN_SUCCESS;
546 }
547 return KERN_FAILURE;
548 }
549
550 __private_extern__ kern_return_t
551 chudxnu_cpusig_callback_cancel(void)
552 {
553 chudxnu_cpusig_callback_func_t old = cpusig_callback_fn;
554
555 while(!OSCompareAndSwapPtr(old, NULL,
556 (void * volatile *)&cpusig_callback_fn)) {
557 old = cpusig_callback_fn;
558 }
559
560 return KERN_SUCCESS;
561 }
562
563 __private_extern__ kern_return_t
564 chudxnu_cpusig_send(int otherCPU, uint32_t request_code)
565 {
566 int thisCPU;
567 kern_return_t retval = KERN_FAILURE;
568 chudcpu_signal_request_t request;
569 uint64_t deadline;
570 chudcpu_data_t *target_chudp;
571 boolean_t old_level;
572
573 disable_preemption();
574 // force interrupts on for a cross CPU signal.
575 old_level = chudxnu_set_interrupts_enabled(TRUE);
576 thisCPU = cpu_number();
577
578 if ((unsigned) otherCPU < real_ncpus &&
579 thisCPU != otherCPU &&
580 cpu_data_ptr[otherCPU]->cpu_running) {
581
582 target_chudp = (chudcpu_data_t *)
583 cpu_data_ptr[otherCPU]->cpu_chud;
584
585 /* Fill out request */
586 request.req_sync = 0xFFFFFFFF; /* set sync flag */
587 //request.req_type = CPRQchud; /* set request type */
588 request.req_code = request_code; /* set request */
589
590 /*
591 * Insert the new request in the target cpu's request queue
592 * and signal target cpu.
593 */
594 mpenqueue_tail(&target_chudp->cpu_request_queue,
595 &request.req_entry);
596 i386_signal_cpu(otherCPU, MP_CHUD, ASYNC);
597
598 /* Wait for response or timeout */
599 deadline = mach_absolute_time() + LockTimeOut;
600 while (request.req_sync != 0) {
601 if (mach_absolute_time() > deadline) {
602 panic("chudxnu_cpusig_send(%d,%d) timed out\n",
603 otherCPU, request_code);
604 }
605 cpu_pause();
606 }
607 retval = KERN_SUCCESS;
608 } else {
609 retval = KERN_INVALID_ARGUMENT;
610 }
611
612 chudxnu_set_interrupts_enabled(old_level);
613 enable_preemption();
614 return retval;
615 }