]> git.saurik.com Git - apple/xnu.git/blame - osfmk/chud/i386/chud_osfmk_callback_i386.c
xnu-1228.tar.gz
[apple/xnu.git] / osfmk / chud / i386 / chud_osfmk_callback_i386.c
CommitLineData
0c530ab8 1/*
2d21ac55 2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
0c530ab8 3 *
2d21ac55
A
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
0c530ab8
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
0c530ab8
A
27 */
28
29#include <stdint.h>
30#include <mach/boolean.h>
31#include <mach/mach_types.h>
32
33#include <kern/kern_types.h>
34#include <kern/processor.h>
35#include <kern/timer_call.h>
36#include <kern/thread_call.h>
37#include <kern/kalloc.h>
38#include <kern/thread.h>
39
40#include <machine/machine_routines.h>
41#include <machine/cpu_data.h>
42#include <machine/trap.h>
43
44#include <chud/chud_xnu.h>
45#include <chud/chud_xnu_private.h>
2d21ac55 46#include <chud/chud_thread.h>
0c530ab8
A
47
48#include <i386/misc_protos.h>
49#include <i386/mp.h>
50#include <i386/machine_cpu.h>
51
52#include <sys/kdebug.h>
53#define CHUD_TIMER_CALLBACK_CANCEL 0
54#define CHUD_TIMER_CALLBACK_ENTER 1
55#define CHUD_TIMER_CALLBACK 2
56#define CHUD_AST_SEND 3
57#define CHUD_AST_CALLBACK 4
58#define CHUD_CPUSIG_SEND 5
59#define CHUD_CPUSIG_CALLBACK 6
60
61__private_extern__
62void chudxnu_cancel_all_callbacks(void)
63{
64 chudxnu_cpusig_callback_cancel();
65 chudxnu_cpu_timer_callback_cancel_all();
66 chudxnu_interrupt_callback_cancel();
67 chudxnu_perfmon_ast_callback_cancel();
68 chudxnu_kdebug_callback_cancel();
0c530ab8 69 chudxnu_trap_callback_cancel();
0c530ab8 70 chudxnu_syscall_callback_cancel();
2d21ac55 71 chudxnu_dtrace_callback_cancel();
0c530ab8
A
72}
73
74static chudcpu_data_t chudcpu_boot_cpu;
75void *
76chudxnu_cpu_alloc(boolean_t boot_processor)
77{
78 chudcpu_data_t *chud_proc_info;
79
0c530ab8
A
80 if (boot_processor) {
81 chud_proc_info = &chudcpu_boot_cpu;
82 } else {
83 chud_proc_info = (chudcpu_data_t *)
84 kalloc(sizeof(chudcpu_data_t));
85 if (chud_proc_info == (chudcpu_data_t *)NULL) {
86 return (void *)NULL;
87 }
88 }
89 bzero((char *)chud_proc_info, sizeof(chudcpu_data_t));
90 chud_proc_info->t_deadline = 0xFFFFFFFFFFFFFFFFULL;
91 mpqueue_init(&chud_proc_info->cpu_request_queue);
92
93
94 return (void *)chud_proc_info;
95}
96
97void
98chudxnu_cpu_free(void *cp)
99{
100 if (cp == NULL || cp == (void *)&chudcpu_boot_cpu) {
101 return;
102 } else {
103 kfree(cp,sizeof(chudcpu_data_t));
104 }
105}
106
107static void
108chudxnu_private_cpu_timer_callback(
109 timer_call_param_t param0,
110 timer_call_param_t param1)
111{
112#pragma unused (param0)
113#pragma unused (param1)
114 chudcpu_data_t *chud_proc_info;
115 boolean_t oldlevel;
116 x86_thread_state_t state;
117 mach_msg_type_number_t count;
118 chudxnu_cpu_timer_callback_func_t fn;
119
120 oldlevel = ml_set_interrupts_enabled(FALSE);
121 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
122
123 count = x86_THREAD_STATE_COUNT;
124 if (chudxnu_thread_get_state(current_thread(),
125 x86_THREAD_STATE,
126 (thread_state_t)&state,
127 &count,
128 FALSE) == KERN_SUCCESS) {
129 fn = chud_proc_info->cpu_timer_callback_fn;
130 if (fn) {
131 KERNEL_DEBUG_CONSTANT(
132 MACHDBG_CODE(DBG_MACH_CHUD,
133 CHUD_TIMER_CALLBACK) | DBG_FUNC_NONE,
134 (uint32_t)fn, 0,0,0,0);
135 //state.eip, state.cs, 0, 0);
136 (fn)(
137 x86_THREAD_STATE,
138 (thread_state_t)&state,
139 count);
140 }
141 }
142
143 ml_set_interrupts_enabled(oldlevel);
144}
145
146__private_extern__ kern_return_t
147chudxnu_cpu_timer_callback_enter(
148 chudxnu_cpu_timer_callback_func_t func,
149 uint32_t time,
150 uint32_t units)
151{
152 chudcpu_data_t *chud_proc_info;
153 boolean_t oldlevel;
154
155 oldlevel = ml_set_interrupts_enabled(FALSE);
156 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
157
158 // cancel any existing callback for this cpu
159 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
160
161 chud_proc_info->cpu_timer_callback_fn = func;
162
163 clock_interval_to_deadline(time, units, &(chud_proc_info->t_deadline));
164 timer_call_setup(&(chud_proc_info->cpu_timer_call),
165 chudxnu_private_cpu_timer_callback, NULL);
166 timer_call_enter(&(chud_proc_info->cpu_timer_call),
167 chud_proc_info->t_deadline);
168
169 KERNEL_DEBUG_CONSTANT(
170 MACHDBG_CODE(DBG_MACH_CHUD,
171 CHUD_TIMER_CALLBACK_ENTER) | DBG_FUNC_NONE,
172 (uint32_t) func, time, units, 0, 0);
173
174 ml_set_interrupts_enabled(oldlevel);
175 return KERN_SUCCESS;
176}
177
178__private_extern__ kern_return_t
179chudxnu_cpu_timer_callback_cancel(void)
180{
181 chudcpu_data_t *chud_proc_info;
182 boolean_t oldlevel;
183
184 oldlevel = ml_set_interrupts_enabled(FALSE);
185 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
186
187 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
188
189 KERNEL_DEBUG_CONSTANT(
190 MACHDBG_CODE(DBG_MACH_CHUD,
191 CHUD_TIMER_CALLBACK_CANCEL) | DBG_FUNC_NONE,
192 0, 0, 0, 0, 0);
193
194 // set to max value:
195 chud_proc_info->t_deadline |= ~(chud_proc_info->t_deadline);
196 chud_proc_info->cpu_timer_callback_fn = NULL;
197
198 ml_set_interrupts_enabled(oldlevel);
199 return KERN_SUCCESS;
200}
201
202__private_extern__ kern_return_t
203chudxnu_cpu_timer_callback_cancel_all(void)
204{
205 unsigned int cpu;
206 chudcpu_data_t *chud_proc_info;
207
208 for(cpu=0; cpu < real_ncpus; cpu++) {
209 chud_proc_info = (chudcpu_data_t *) cpu_data_ptr[cpu]->cpu_chud;
210 if (chud_proc_info == NULL)
211 continue;
212 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
213 chud_proc_info->t_deadline |= ~(chud_proc_info->t_deadline);
214 chud_proc_info->cpu_timer_callback_fn = NULL;
215 }
216 return KERN_SUCCESS;
217}
218
219#pragma mark **** trap ****
220static chudxnu_trap_callback_func_t trap_callback_fn = NULL;
221
222static kern_return_t
223chudxnu_private_trap_callback(
2d21ac55 224 int trapno,
0c530ab8
A
225 void *regs,
226 int unused1,
227 int unused2)
228{
229#pragma unused (regs)
230#pragma unused (unused1)
231#pragma unused (unused2)
2d21ac55 232 kern_return_t retval = KERN_FAILURE;
0c530ab8
A
233 chudxnu_trap_callback_func_t fn = trap_callback_fn;
234
2d21ac55
A
235 if(fn) {
236 boolean_t oldlevel;
237 x86_thread_state_t state;
238 mach_msg_type_number_t count;
239 thread_t thread = current_thread();
240
0c530ab8 241 oldlevel = ml_set_interrupts_enabled(FALSE);
2d21ac55
A
242
243 /* prevent reentry into CHUD when dtracing */
244 if(thread->t_chud & T_IN_CHUD) {
245 /* restore interrupts */
246 ml_set_interrupts_enabled(oldlevel);
247
248 return KERN_FAILURE; // not handled - pass off to dtrace
249 }
250
251 /* update the chud state bits */
252 thread->t_chud |= T_IN_CHUD;
0c530ab8
A
253
254 count = x86_THREAD_STATE_COUNT;
2d21ac55
A
255
256 if(chudxnu_thread_get_state(thread,
0c530ab8
A
257 x86_THREAD_STATE,
258 (thread_state_t)&state,
259 &count,
260 FALSE) == KERN_SUCCESS) {
2d21ac55
A
261
262 retval = (fn)(
263 trapno,
264 x86_THREAD_STATE,
265 (thread_state_t)&state,
266 count);
0c530ab8 267 }
2d21ac55
A
268
269 /* no longer in CHUD */
270 thread->t_chud &= ~(T_IN_CHUD);
271
272 ml_set_interrupts_enabled(oldlevel);
0c530ab8
A
273 }
274
2d21ac55 275 return retval;
0c530ab8
A
276}
277
278__private_extern__ kern_return_t
279chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func)
280{
281 trap_callback_fn = func;
282 perfTrapHook = chudxnu_private_trap_callback;
283 return KERN_SUCCESS;
284}
285
286__private_extern__ kern_return_t
287chudxnu_trap_callback_cancel(void)
288{
289 trap_callback_fn = NULL;
290 perfTrapHook = NULL;
291 return KERN_SUCCESS;
292}
293
294#pragma mark **** ast ****
295static
296chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn = NULL;
297
298static kern_return_t
299chudxnu_private_chud_ast_callback(
300 int trapno,
301 void *regs,
302 int unused1,
303 int unused2)
304{
305#pragma unused (trapno)
306#pragma unused (regs)
307#pragma unused (unused1)
308#pragma unused (unused2)
309 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
310 ast_t *myast = ast_pending();
311 kern_return_t retval = KERN_FAILURE;
312 chudxnu_perfmon_ast_callback_func_t fn = perfmon_ast_callback_fn;
313
314 if (*myast & AST_CHUD_URGENT) {
315 *myast &= ~(AST_CHUD_URGENT | AST_CHUD);
316 if ((*myast & AST_PREEMPTION) != AST_PREEMPTION)
317 *myast &= ~(AST_URGENT);
318 retval = KERN_SUCCESS;
319 } else if (*myast & AST_CHUD) {
320 *myast &= ~(AST_CHUD);
321 retval = KERN_SUCCESS;
322 }
323
324 if (fn) {
325 x86_thread_state_t state;
326 mach_msg_type_number_t count;
327 count = x86_THREAD_STATE_COUNT;
2d21ac55 328
0c530ab8
A
329 if (chudxnu_thread_get_state(
330 current_thread(),
331 x86_THREAD_STATE,
332 (thread_state_t) &state, &count,
333 TRUE) == KERN_SUCCESS) {
334
335 KERNEL_DEBUG_CONSTANT(
336 MACHDBG_CODE(DBG_MACH_CHUD,
337 CHUD_AST_CALLBACK) | DBG_FUNC_NONE,
338 (uint32_t) fn, 0, 0, 0, 0);
339
340 (fn)(
341 x86_THREAD_STATE,
342 (thread_state_t) &state,
343 count);
344 }
345 }
346
347 ml_set_interrupts_enabled(oldlevel);
348 return retval;
349}
350
351__private_extern__ kern_return_t
352chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func)
353{
354 perfmon_ast_callback_fn = func;
355 perfASTHook = chudxnu_private_chud_ast_callback;
356 return KERN_SUCCESS;
357}
358
359__private_extern__ kern_return_t
360chudxnu_perfmon_ast_callback_cancel(void)
361{
362 perfmon_ast_callback_fn = NULL;
363 perfASTHook = NULL;
364 return KERN_SUCCESS;
365}
366
367__private_extern__ kern_return_t
368chudxnu_perfmon_ast_send_urgent(boolean_t urgent)
369{
370 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
371 ast_t *myast = ast_pending();
372
373 if(urgent) {
374 *myast |= (AST_CHUD_URGENT | AST_URGENT);
375 } else {
376 *myast |= (AST_CHUD);
377 }
378
379 KERNEL_DEBUG_CONSTANT(
380 MACHDBG_CODE(DBG_MACH_CHUD, CHUD_AST_SEND) | DBG_FUNC_NONE,
381 urgent, 0, 0, 0, 0);
382
383 ml_set_interrupts_enabled(oldlevel);
384 return KERN_SUCCESS;
385}
386
387__private_extern__ kern_return_t
388chudxnu_perfmon_ast_send(void)
389{
390 return chudxnu_perfmon_ast_send_urgent(TRUE);
391}
392
393#pragma mark **** interrupt ****
394static chudxnu_interrupt_callback_func_t interrupt_callback_fn = NULL;
395
396static void
397chudxnu_private_interrupt_callback(void *foo)
398{
399#pragma unused (foo)
400 chudxnu_interrupt_callback_func_t fn = interrupt_callback_fn;
401
402 if(fn) {
403 boolean_t oldlevel;
404 x86_thread_state_t state;
405 mach_msg_type_number_t count;
406
407 oldlevel = ml_set_interrupts_enabled(FALSE);
408
409 count = x86_THREAD_STATE_COUNT;
410 if(chudxnu_thread_get_state(current_thread(),
411 x86_THREAD_STATE,
412 (thread_state_t)&state,
413 &count,
414 FALSE) == KERN_SUCCESS) {
415 (fn)(
416 X86_INTERRUPT_PERFMON,
417 x86_THREAD_STATE,
418 (thread_state_t)&state,
419 count);
420 }
421 ml_set_interrupts_enabled(oldlevel);
422 }
423}
424
425__private_extern__ kern_return_t
426chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func)
427{
428 interrupt_callback_fn = func;
429 lapic_set_pmi_func((i386_intr_func_t)chudxnu_private_interrupt_callback);
430 return KERN_SUCCESS;
431}
432
433__private_extern__ kern_return_t
434chudxnu_interrupt_callback_cancel(void)
435{
436 interrupt_callback_fn = NULL;
437 lapic_set_pmi_func(NULL);
438 return KERN_SUCCESS;
439}
440
441#pragma mark **** cpu signal ****
442static chudxnu_cpusig_callback_func_t cpusig_callback_fn = NULL;
443
444static kern_return_t
445chudxnu_private_cpu_signal_handler(int request)
446{
447 chudxnu_cpusig_callback_func_t fn = cpusig_callback_fn;
448
449 if (fn) {
450 x86_thread_state_t state;
451 mach_msg_type_number_t count = x86_THREAD_STATE_COUNT;
452
453 if (chudxnu_thread_get_state(current_thread(),
454 x86_THREAD_STATE,
455 (thread_state_t) &state, &count,
456 FALSE) == KERN_SUCCESS) {
457 KERNEL_DEBUG_CONSTANT(
458 MACHDBG_CODE(DBG_MACH_CHUD,
459 CHUD_CPUSIG_CALLBACK) | DBG_FUNC_NONE,
460 (uint32_t)fn, request, 0, 0, 0);
461 return (fn)(
462 request, x86_THREAD_STATE,
463 (thread_state_t) &state, count);
464 } else {
465 return KERN_FAILURE;
466 }
467 }
468 return KERN_SUCCESS; //ignored
469}
470/*
471 * chudxnu_cpu_signal_handler() is called from the IPI handler
472 * when a CHUD signal arrives from another processor.
473 */
474__private_extern__ void
475chudxnu_cpu_signal_handler(void)
476{
477 chudcpu_signal_request_t *reqp;
478 chudcpu_data_t *chudinfop;
479
480 chudinfop = (chudcpu_data_t *) current_cpu_datap()->cpu_chud;
481
482 mpdequeue_head(&(chudinfop->cpu_request_queue),
483 (queue_entry_t *) &reqp);
484 while (reqp != NULL) {
485 chudxnu_private_cpu_signal_handler(reqp->req_code);
486 reqp->req_sync = 0;
487 mpdequeue_head(&(chudinfop->cpu_request_queue),
488 (queue_entry_t *) &reqp);
489 }
490}
491
492__private_extern__ kern_return_t
493chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func)
494{
495 cpusig_callback_fn = func;
496 return KERN_SUCCESS;
497}
498
499__private_extern__ kern_return_t
500chudxnu_cpusig_callback_cancel(void)
501{
502 cpusig_callback_fn = NULL;
503 return KERN_SUCCESS;
504}
505
506__private_extern__ kern_return_t
507chudxnu_cpusig_send(int otherCPU, uint32_t request_code)
508{
509 int thisCPU;
510 kern_return_t retval = KERN_FAILURE;
511 chudcpu_signal_request_t request;
512 uint64_t deadline;
513 chudcpu_data_t *target_chudp;
514 boolean_t old_level;
515
516 disable_preemption();
517 // force interrupts on for a cross CPU signal.
518 old_level = chudxnu_set_interrupts_enabled(TRUE);
519 thisCPU = cpu_number();
520
521 if ((unsigned) otherCPU < real_ncpus &&
522 thisCPU != otherCPU &&
523 cpu_data_ptr[otherCPU]->cpu_running) {
524
525 target_chudp = (chudcpu_data_t *)
526 cpu_data_ptr[otherCPU]->cpu_chud;
527
528 /* Fill out request */
529 request.req_sync = 0xFFFFFFFF; /* set sync flag */
530 //request.req_type = CPRQchud; /* set request type */
531 request.req_code = request_code; /* set request */
532
533 KERNEL_DEBUG_CONSTANT(
534 MACHDBG_CODE(DBG_MACH_CHUD,
535 CHUD_CPUSIG_SEND) | DBG_FUNC_NONE,
536 otherCPU, request_code, 0, 0, 0);
537
538 /*
539 * Insert the new request in the target cpu's request queue
540 * and signal target cpu.
541 */
542 mpenqueue_tail(&target_chudp->cpu_request_queue,
543 &request.req_entry);
544 i386_signal_cpu(otherCPU, MP_CHUD, ASYNC);
545
546 /* Wait for response or timeout */
547 deadline = mach_absolute_time() + LockTimeOut;
548 while (request.req_sync != 0) {
549 if (mach_absolute_time() > deadline) {
550 panic("chudxnu_cpusig_send(%d,%d) timed out\n",
551 otherCPU, request_code);
552 }
553 cpu_pause();
554 }
555 retval = KERN_SUCCESS;
556 } else {
557 retval = KERN_INVALID_ARGUMENT;
558 }
559
560 chudxnu_set_interrupts_enabled(old_level);
561 enable_preemption();
562 return retval;
563}
564