]> git.saurik.com Git - apple/xnu.git/blame - osfmk/chud/i386/chud_osfmk_callback_i386.c
xnu-1456.1.26.tar.gz
[apple/xnu.git] / osfmk / chud / i386 / chud_osfmk_callback_i386.c
CommitLineData
0c530ab8 1/*
b0d623f7 2 * Copyright (c) 2003-2009 Apple Inc. All rights reserved.
0c530ab8 3 *
2d21ac55
A
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
0c530ab8
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
0c530ab8
A
27 */
28
29#include <stdint.h>
30#include <mach/boolean.h>
31#include <mach/mach_types.h>
32
33#include <kern/kern_types.h>
34#include <kern/processor.h>
35#include <kern/timer_call.h>
36#include <kern/thread_call.h>
37#include <kern/kalloc.h>
38#include <kern/thread.h>
39
b0d623f7
A
40#include <libkern/OSAtomic.h>
41
0c530ab8
A
42#include <machine/machine_routines.h>
43#include <machine/cpu_data.h>
44#include <machine/trap.h>
45
46#include <chud/chud_xnu.h>
47#include <chud/chud_xnu_private.h>
48
49#include <i386/misc_protos.h>
593a1d5f 50#include <i386/lapic.h>
0c530ab8
A
51#include <i386/mp.h>
52#include <i386/machine_cpu.h>
53
54#include <sys/kdebug.h>
55#define CHUD_TIMER_CALLBACK_CANCEL 0
56#define CHUD_TIMER_CALLBACK_ENTER 1
57#define CHUD_TIMER_CALLBACK 2
58#define CHUD_AST_SEND 3
59#define CHUD_AST_CALLBACK 4
60#define CHUD_CPUSIG_SEND 5
61#define CHUD_CPUSIG_CALLBACK 6
62
63__private_extern__
64void chudxnu_cancel_all_callbacks(void)
65{
66 chudxnu_cpusig_callback_cancel();
67 chudxnu_cpu_timer_callback_cancel_all();
68 chudxnu_interrupt_callback_cancel();
69 chudxnu_perfmon_ast_callback_cancel();
70 chudxnu_kdebug_callback_cancel();
0c530ab8 71 chudxnu_trap_callback_cancel();
0c530ab8 72 chudxnu_syscall_callback_cancel();
2d21ac55 73 chudxnu_dtrace_callback_cancel();
0c530ab8
A
74}
75
76static chudcpu_data_t chudcpu_boot_cpu;
77void *
78chudxnu_cpu_alloc(boolean_t boot_processor)
79{
80 chudcpu_data_t *chud_proc_info;
81
0c530ab8
A
82 if (boot_processor) {
83 chud_proc_info = &chudcpu_boot_cpu;
84 } else {
85 chud_proc_info = (chudcpu_data_t *)
86 kalloc(sizeof(chudcpu_data_t));
87 if (chud_proc_info == (chudcpu_data_t *)NULL) {
88 return (void *)NULL;
89 }
90 }
91 bzero((char *)chud_proc_info, sizeof(chudcpu_data_t));
92 chud_proc_info->t_deadline = 0xFFFFFFFFFFFFFFFFULL;
93 mpqueue_init(&chud_proc_info->cpu_request_queue);
94
95
96 return (void *)chud_proc_info;
97}
98
99void
100chudxnu_cpu_free(void *cp)
101{
102 if (cp == NULL || cp == (void *)&chudcpu_boot_cpu) {
103 return;
104 } else {
105 kfree(cp,sizeof(chudcpu_data_t));
106 }
107}
108
109static void
110chudxnu_private_cpu_timer_callback(
111 timer_call_param_t param0,
112 timer_call_param_t param1)
113{
114#pragma unused (param0)
115#pragma unused (param1)
116 chudcpu_data_t *chud_proc_info;
117 boolean_t oldlevel;
118 x86_thread_state_t state;
119 mach_msg_type_number_t count;
120 chudxnu_cpu_timer_callback_func_t fn;
121
122 oldlevel = ml_set_interrupts_enabled(FALSE);
123 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
124
125 count = x86_THREAD_STATE_COUNT;
126 if (chudxnu_thread_get_state(current_thread(),
127 x86_THREAD_STATE,
128 (thread_state_t)&state,
129 &count,
130 FALSE) == KERN_SUCCESS) {
131 fn = chud_proc_info->cpu_timer_callback_fn;
132 if (fn) {
0c530ab8
A
133 (fn)(
134 x86_THREAD_STATE,
135 (thread_state_t)&state,
136 count);
137 }
138 }
139
140 ml_set_interrupts_enabled(oldlevel);
141}
142
143__private_extern__ kern_return_t
144chudxnu_cpu_timer_callback_enter(
145 chudxnu_cpu_timer_callback_func_t func,
146 uint32_t time,
147 uint32_t units)
148{
149 chudcpu_data_t *chud_proc_info;
150 boolean_t oldlevel;
151
152 oldlevel = ml_set_interrupts_enabled(FALSE);
153 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
154
155 // cancel any existing callback for this cpu
156 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
157
158 chud_proc_info->cpu_timer_callback_fn = func;
159
160 clock_interval_to_deadline(time, units, &(chud_proc_info->t_deadline));
161 timer_call_setup(&(chud_proc_info->cpu_timer_call),
162 chudxnu_private_cpu_timer_callback, NULL);
163 timer_call_enter(&(chud_proc_info->cpu_timer_call),
164 chud_proc_info->t_deadline);
165
0c530ab8
A
166 ml_set_interrupts_enabled(oldlevel);
167 return KERN_SUCCESS;
168}
169
170__private_extern__ kern_return_t
171chudxnu_cpu_timer_callback_cancel(void)
172{
173 chudcpu_data_t *chud_proc_info;
174 boolean_t oldlevel;
175
176 oldlevel = ml_set_interrupts_enabled(FALSE);
177 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
178
179 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
180
0c530ab8
A
181 // set to max value:
182 chud_proc_info->t_deadline |= ~(chud_proc_info->t_deadline);
183 chud_proc_info->cpu_timer_callback_fn = NULL;
184
185 ml_set_interrupts_enabled(oldlevel);
186 return KERN_SUCCESS;
187}
188
189__private_extern__ kern_return_t
190chudxnu_cpu_timer_callback_cancel_all(void)
191{
192 unsigned int cpu;
193 chudcpu_data_t *chud_proc_info;
194
195 for(cpu=0; cpu < real_ncpus; cpu++) {
196 chud_proc_info = (chudcpu_data_t *) cpu_data_ptr[cpu]->cpu_chud;
197 if (chud_proc_info == NULL)
198 continue;
199 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
200 chud_proc_info->t_deadline |= ~(chud_proc_info->t_deadline);
201 chud_proc_info->cpu_timer_callback_fn = NULL;
202 }
203 return KERN_SUCCESS;
204}
205
b0d623f7 206#if 0
0c530ab8 207#pragma mark **** trap ****
b0d623f7
A
208#endif
209static kern_return_t chud_null_trap(uint32_t trapentry, thread_flavor_t flavor,
210 thread_state_t tstate, mach_msg_type_number_t count);
211static chudxnu_trap_callback_func_t trap_callback_fn = chud_null_trap;
212
213static kern_return_t chud_null_trap(uint32_t trapentry __unused, thread_flavor_t flavor __unused,
214 thread_state_t tstate __unused, mach_msg_type_number_t count __unused) {
215 return KERN_FAILURE;
216}
0c530ab8
A
217
218static kern_return_t
219chudxnu_private_trap_callback(
2d21ac55 220 int trapno,
0c530ab8
A
221 void *regs,
222 int unused1,
223 int unused2)
224{
225#pragma unused (regs)
226#pragma unused (unused1)
227#pragma unused (unused2)
2d21ac55 228 kern_return_t retval = KERN_FAILURE;
0c530ab8
A
229 chudxnu_trap_callback_func_t fn = trap_callback_fn;
230
2d21ac55
A
231 if(fn) {
232 boolean_t oldlevel;
233 x86_thread_state_t state;
234 mach_msg_type_number_t count;
235 thread_t thread = current_thread();
236
0c530ab8 237 oldlevel = ml_set_interrupts_enabled(FALSE);
2d21ac55
A
238
239 /* prevent reentry into CHUD when dtracing */
240 if(thread->t_chud & T_IN_CHUD) {
241 /* restore interrupts */
242 ml_set_interrupts_enabled(oldlevel);
243
244 return KERN_FAILURE; // not handled - pass off to dtrace
245 }
246
247 /* update the chud state bits */
248 thread->t_chud |= T_IN_CHUD;
0c530ab8
A
249
250 count = x86_THREAD_STATE_COUNT;
2d21ac55
A
251
252 if(chudxnu_thread_get_state(thread,
0c530ab8
A
253 x86_THREAD_STATE,
254 (thread_state_t)&state,
255 &count,
256 FALSE) == KERN_SUCCESS) {
2d21ac55
A
257
258 retval = (fn)(
259 trapno,
260 x86_THREAD_STATE,
261 (thread_state_t)&state,
262 count);
0c530ab8 263 }
2d21ac55
A
264
265 /* no longer in CHUD */
266 thread->t_chud &= ~(T_IN_CHUD);
267
268 ml_set_interrupts_enabled(oldlevel);
0c530ab8
A
269 }
270
2d21ac55 271 return retval;
0c530ab8
A
272}
273
274__private_extern__ kern_return_t
275chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func)
276{
b0d623f7
A
277 if(OSCompareAndSwapPtr(NULL, chudxnu_private_trap_callback,
278 (void * volatile *)&perfTrapHook)) {
279
280 chudxnu_trap_callback_func_t old = trap_callback_fn;
281 while(!OSCompareAndSwapPtr(old, func,
282 (void * volatile *)&trap_callback_fn)) {
283 old = trap_callback_fn;
284 }
285 return KERN_SUCCESS;
286 }
287 return KERN_FAILURE;
0c530ab8
A
288}
289
290__private_extern__ kern_return_t
291chudxnu_trap_callback_cancel(void)
292{
b0d623f7
A
293 if(OSCompareAndSwapPtr(chudxnu_private_trap_callback, NULL,
294 (void * volatile *)&perfTrapHook)) {
295
296 chudxnu_trap_callback_func_t old = trap_callback_fn;
297 while(!OSCompareAndSwapPtr(old, chud_null_trap,
298 (void * volatile *)&trap_callback_fn)) {
299 old = trap_callback_fn;
300 }
301 return KERN_SUCCESS;
302 }
303 return KERN_FAILURE;
0c530ab8
A
304}
305
b0d623f7 306#if 0
0c530ab8 307#pragma mark **** ast ****
b0d623f7
A
308#endif
309static kern_return_t chud_null_ast(thread_flavor_t flavor, thread_state_t tstate,
310 mach_msg_type_number_t count);
311static chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn = chud_null_ast;
312
313static kern_return_t chud_null_ast(thread_flavor_t flavor __unused,
314 thread_state_t tstate __unused, mach_msg_type_number_t count __unused) {
315 return KERN_FAILURE;
316}
0c530ab8
A
317
318static kern_return_t
319chudxnu_private_chud_ast_callback(
320 int trapno,
321 void *regs,
322 int unused1,
323 int unused2)
324{
325#pragma unused (trapno)
326#pragma unused (regs)
327#pragma unused (unused1)
328#pragma unused (unused2)
329 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
330 ast_t *myast = ast_pending();
331 kern_return_t retval = KERN_FAILURE;
332 chudxnu_perfmon_ast_callback_func_t fn = perfmon_ast_callback_fn;
333
334 if (*myast & AST_CHUD_URGENT) {
335 *myast &= ~(AST_CHUD_URGENT | AST_CHUD);
336 if ((*myast & AST_PREEMPTION) != AST_PREEMPTION)
337 *myast &= ~(AST_URGENT);
338 retval = KERN_SUCCESS;
339 } else if (*myast & AST_CHUD) {
340 *myast &= ~(AST_CHUD);
341 retval = KERN_SUCCESS;
342 }
343
344 if (fn) {
345 x86_thread_state_t state;
346 mach_msg_type_number_t count;
347 count = x86_THREAD_STATE_COUNT;
2d21ac55 348
0c530ab8
A
349 if (chudxnu_thread_get_state(
350 current_thread(),
351 x86_THREAD_STATE,
352 (thread_state_t) &state, &count,
353 TRUE) == KERN_SUCCESS) {
354
0c530ab8
A
355 (fn)(
356 x86_THREAD_STATE,
357 (thread_state_t) &state,
358 count);
359 }
360 }
361
362 ml_set_interrupts_enabled(oldlevel);
363 return retval;
364}
365
366__private_extern__ kern_return_t
367chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func)
368{
b0d623f7
A
369 if(OSCompareAndSwapPtr(NULL, chudxnu_private_chud_ast_callback,
370 (void * volatile *)&perfASTHook)) {
371 chudxnu_perfmon_ast_callback_func_t old = perfmon_ast_callback_fn;
372
373 while(!OSCompareAndSwapPtr(old, func,
374 (void * volatile *)&perfmon_ast_callback_fn)) {
375 old = perfmon_ast_callback_fn;
376 }
377
378 return KERN_SUCCESS;
379 }
380 return KERN_FAILURE;
0c530ab8
A
381}
382
383__private_extern__ kern_return_t
384chudxnu_perfmon_ast_callback_cancel(void)
385{
b0d623f7
A
386 if(OSCompareAndSwapPtr(chudxnu_private_chud_ast_callback, NULL,
387 (void * volatile *)&perfASTHook)) {
388 chudxnu_perfmon_ast_callback_func_t old = perfmon_ast_callback_fn;
389
390 while(!OSCompareAndSwapPtr(old, chud_null_ast,
391 (void * volatile *)&perfmon_ast_callback_fn)) {
392 old = perfmon_ast_callback_fn;
393 }
394
395 return KERN_SUCCESS;
396 }
397 return KERN_FAILURE;
0c530ab8
A
398}
399
400__private_extern__ kern_return_t
401chudxnu_perfmon_ast_send_urgent(boolean_t urgent)
402{
403 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
404 ast_t *myast = ast_pending();
405
406 if(urgent) {
407 *myast |= (AST_CHUD_URGENT | AST_URGENT);
408 } else {
409 *myast |= (AST_CHUD);
410 }
411
0c530ab8
A
412 ml_set_interrupts_enabled(oldlevel);
413 return KERN_SUCCESS;
414}
415
b0d623f7 416#if 0
0c530ab8 417#pragma mark **** interrupt ****
b0d623f7
A
418#endif
419static kern_return_t chud_null_int(uint32_t trapentry, thread_flavor_t flavor,
420 thread_state_t tstate, mach_msg_type_number_t count);
421static chudxnu_interrupt_callback_func_t interrupt_callback_fn = chud_null_int;
422
423static kern_return_t chud_null_int(uint32_t trapentry __unused, thread_flavor_t flavor __unused,
424 thread_state_t tstate __unused, mach_msg_type_number_t count __unused) {
425 return KERN_FAILURE;
426}
0c530ab8
A
427
428static void
429chudxnu_private_interrupt_callback(void *foo)
430{
431#pragma unused (foo)
432 chudxnu_interrupt_callback_func_t fn = interrupt_callback_fn;
433
434 if(fn) {
435 boolean_t oldlevel;
436 x86_thread_state_t state;
437 mach_msg_type_number_t count;
438
439 oldlevel = ml_set_interrupts_enabled(FALSE);
440
441 count = x86_THREAD_STATE_COUNT;
442 if(chudxnu_thread_get_state(current_thread(),
443 x86_THREAD_STATE,
444 (thread_state_t)&state,
445 &count,
446 FALSE) == KERN_SUCCESS) {
447 (fn)(
448 X86_INTERRUPT_PERFMON,
449 x86_THREAD_STATE,
450 (thread_state_t)&state,
451 count);
452 }
453 ml_set_interrupts_enabled(oldlevel);
454 }
455}
456
457__private_extern__ kern_return_t
458chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func)
459{
b0d623f7
A
460 if(OSCompareAndSwapPtr(chud_null_int, func,
461 (void * volatile *)&interrupt_callback_fn)) {
462 lapic_set_pmi_func((i386_intr_func_t)chudxnu_private_interrupt_callback);
463
464 return KERN_SUCCESS;
465 }
466 return KERN_FAILURE;
0c530ab8
A
467}
468
469__private_extern__ kern_return_t
470chudxnu_interrupt_callback_cancel(void)
471{
b0d623f7
A
472 chudxnu_interrupt_callback_func_t old = interrupt_callback_fn;
473
474 while(!OSCompareAndSwapPtr(old, chud_null_int,
475 (void * volatile *)&interrupt_callback_fn)) {
476 old = interrupt_callback_fn;
477 }
478
0c530ab8
A
479 lapic_set_pmi_func(NULL);
480 return KERN_SUCCESS;
481}
482
b0d623f7 483#if 0
0c530ab8 484#pragma mark **** cpu signal ****
b0d623f7 485#endif
0c530ab8
A
486static chudxnu_cpusig_callback_func_t cpusig_callback_fn = NULL;
487
488static kern_return_t
489chudxnu_private_cpu_signal_handler(int request)
490{
491 chudxnu_cpusig_callback_func_t fn = cpusig_callback_fn;
492
493 if (fn) {
494 x86_thread_state_t state;
495 mach_msg_type_number_t count = x86_THREAD_STATE_COUNT;
496
497 if (chudxnu_thread_get_state(current_thread(),
498 x86_THREAD_STATE,
499 (thread_state_t) &state, &count,
500 FALSE) == KERN_SUCCESS) {
0c530ab8
A
501 return (fn)(
502 request, x86_THREAD_STATE,
503 (thread_state_t) &state, count);
504 } else {
505 return KERN_FAILURE;
506 }
507 }
508 return KERN_SUCCESS; //ignored
509}
510/*
511 * chudxnu_cpu_signal_handler() is called from the IPI handler
512 * when a CHUD signal arrives from another processor.
513 */
514__private_extern__ void
515chudxnu_cpu_signal_handler(void)
516{
517 chudcpu_signal_request_t *reqp;
518 chudcpu_data_t *chudinfop;
519
520 chudinfop = (chudcpu_data_t *) current_cpu_datap()->cpu_chud;
521
522 mpdequeue_head(&(chudinfop->cpu_request_queue),
523 (queue_entry_t *) &reqp);
524 while (reqp != NULL) {
525 chudxnu_private_cpu_signal_handler(reqp->req_code);
526 reqp->req_sync = 0;
527 mpdequeue_head(&(chudinfop->cpu_request_queue),
528 (queue_entry_t *) &reqp);
529 }
530}
531
532__private_extern__ kern_return_t
533chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func)
534{
b0d623f7
A
535 if(OSCompareAndSwapPtr(NULL, func,
536 (void * volatile *)&cpusig_callback_fn)) {
537 return KERN_SUCCESS;
538 }
539 return KERN_FAILURE;
0c530ab8
A
540}
541
542__private_extern__ kern_return_t
543chudxnu_cpusig_callback_cancel(void)
544{
b0d623f7
A
545 chudxnu_cpusig_callback_func_t old = cpusig_callback_fn;
546
547 while(!OSCompareAndSwapPtr(old, NULL,
548 (void * volatile *)&cpusig_callback_fn)) {
549 old = cpusig_callback_fn;
550 }
551
0c530ab8
A
552 return KERN_SUCCESS;
553}
554
555__private_extern__ kern_return_t
556chudxnu_cpusig_send(int otherCPU, uint32_t request_code)
557{
558 int thisCPU;
559 kern_return_t retval = KERN_FAILURE;
560 chudcpu_signal_request_t request;
561 uint64_t deadline;
562 chudcpu_data_t *target_chudp;
563 boolean_t old_level;
564
565 disable_preemption();
566 // force interrupts on for a cross CPU signal.
567 old_level = chudxnu_set_interrupts_enabled(TRUE);
568 thisCPU = cpu_number();
569
570 if ((unsigned) otherCPU < real_ncpus &&
571 thisCPU != otherCPU &&
572 cpu_data_ptr[otherCPU]->cpu_running) {
573
574 target_chudp = (chudcpu_data_t *)
575 cpu_data_ptr[otherCPU]->cpu_chud;
576
577 /* Fill out request */
578 request.req_sync = 0xFFFFFFFF; /* set sync flag */
579 //request.req_type = CPRQchud; /* set request type */
580 request.req_code = request_code; /* set request */
581
0c530ab8
A
582 /*
583 * Insert the new request in the target cpu's request queue
584 * and signal target cpu.
585 */
586 mpenqueue_tail(&target_chudp->cpu_request_queue,
587 &request.req_entry);
588 i386_signal_cpu(otherCPU, MP_CHUD, ASYNC);
589
590 /* Wait for response or timeout */
591 deadline = mach_absolute_time() + LockTimeOut;
592 while (request.req_sync != 0) {
593 if (mach_absolute_time() > deadline) {
594 panic("chudxnu_cpusig_send(%d,%d) timed out\n",
595 otherCPU, request_code);
596 }
597 cpu_pause();
598 }
599 retval = KERN_SUCCESS;
600 } else {
601 retval = KERN_INVALID_ARGUMENT;
602 }
603
604 chudxnu_set_interrupts_enabled(old_level);
605 enable_preemption();
606 return retval;
607}