]> git.saurik.com Git - apple/xnu.git/blame - osfmk/chud/i386/chud_osfmk_callback_i386.c
xnu-1228.15.4.tar.gz
[apple/xnu.git] / osfmk / chud / i386 / chud_osfmk_callback_i386.c
CommitLineData
0c530ab8 1/*
2d21ac55 2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
0c530ab8 3 *
2d21ac55
A
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
0c530ab8
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
0c530ab8
A
27 */
28
29#include <stdint.h>
30#include <mach/boolean.h>
31#include <mach/mach_types.h>
32
33#include <kern/kern_types.h>
34#include <kern/processor.h>
35#include <kern/timer_call.h>
36#include <kern/thread_call.h>
37#include <kern/kalloc.h>
38#include <kern/thread.h>
39
40#include <machine/machine_routines.h>
41#include <machine/cpu_data.h>
42#include <machine/trap.h>
43
44#include <chud/chud_xnu.h>
45#include <chud/chud_xnu_private.h>
2d21ac55 46#include <chud/chud_thread.h>
0c530ab8
A
47
48#include <i386/misc_protos.h>
593a1d5f 49#include <i386/lapic.h>
0c530ab8
A
50#include <i386/mp.h>
51#include <i386/machine_cpu.h>
52
53#include <sys/kdebug.h>
54#define CHUD_TIMER_CALLBACK_CANCEL 0
55#define CHUD_TIMER_CALLBACK_ENTER 1
56#define CHUD_TIMER_CALLBACK 2
57#define CHUD_AST_SEND 3
58#define CHUD_AST_CALLBACK 4
59#define CHUD_CPUSIG_SEND 5
60#define CHUD_CPUSIG_CALLBACK 6
61
62__private_extern__
63void chudxnu_cancel_all_callbacks(void)
64{
65 chudxnu_cpusig_callback_cancel();
66 chudxnu_cpu_timer_callback_cancel_all();
67 chudxnu_interrupt_callback_cancel();
68 chudxnu_perfmon_ast_callback_cancel();
69 chudxnu_kdebug_callback_cancel();
0c530ab8 70 chudxnu_trap_callback_cancel();
0c530ab8 71 chudxnu_syscall_callback_cancel();
2d21ac55 72 chudxnu_dtrace_callback_cancel();
0c530ab8
A
73}
74
75static chudcpu_data_t chudcpu_boot_cpu;
76void *
77chudxnu_cpu_alloc(boolean_t boot_processor)
78{
79 chudcpu_data_t *chud_proc_info;
80
0c530ab8
A
81 if (boot_processor) {
82 chud_proc_info = &chudcpu_boot_cpu;
83 } else {
84 chud_proc_info = (chudcpu_data_t *)
85 kalloc(sizeof(chudcpu_data_t));
86 if (chud_proc_info == (chudcpu_data_t *)NULL) {
87 return (void *)NULL;
88 }
89 }
90 bzero((char *)chud_proc_info, sizeof(chudcpu_data_t));
91 chud_proc_info->t_deadline = 0xFFFFFFFFFFFFFFFFULL;
92 mpqueue_init(&chud_proc_info->cpu_request_queue);
93
94
95 return (void *)chud_proc_info;
96}
97
98void
99chudxnu_cpu_free(void *cp)
100{
101 if (cp == NULL || cp == (void *)&chudcpu_boot_cpu) {
102 return;
103 } else {
104 kfree(cp,sizeof(chudcpu_data_t));
105 }
106}
107
108static void
109chudxnu_private_cpu_timer_callback(
110 timer_call_param_t param0,
111 timer_call_param_t param1)
112{
113#pragma unused (param0)
114#pragma unused (param1)
115 chudcpu_data_t *chud_proc_info;
116 boolean_t oldlevel;
117 x86_thread_state_t state;
118 mach_msg_type_number_t count;
119 chudxnu_cpu_timer_callback_func_t fn;
120
121 oldlevel = ml_set_interrupts_enabled(FALSE);
122 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
123
124 count = x86_THREAD_STATE_COUNT;
125 if (chudxnu_thread_get_state(current_thread(),
126 x86_THREAD_STATE,
127 (thread_state_t)&state,
128 &count,
129 FALSE) == KERN_SUCCESS) {
130 fn = chud_proc_info->cpu_timer_callback_fn;
131 if (fn) {
132 KERNEL_DEBUG_CONSTANT(
133 MACHDBG_CODE(DBG_MACH_CHUD,
134 CHUD_TIMER_CALLBACK) | DBG_FUNC_NONE,
135 (uint32_t)fn, 0,0,0,0);
136 //state.eip, state.cs, 0, 0);
137 (fn)(
138 x86_THREAD_STATE,
139 (thread_state_t)&state,
140 count);
141 }
142 }
143
144 ml_set_interrupts_enabled(oldlevel);
145}
146
147__private_extern__ kern_return_t
148chudxnu_cpu_timer_callback_enter(
149 chudxnu_cpu_timer_callback_func_t func,
150 uint32_t time,
151 uint32_t units)
152{
153 chudcpu_data_t *chud_proc_info;
154 boolean_t oldlevel;
155
156 oldlevel = ml_set_interrupts_enabled(FALSE);
157 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
158
159 // cancel any existing callback for this cpu
160 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
161
162 chud_proc_info->cpu_timer_callback_fn = func;
163
164 clock_interval_to_deadline(time, units, &(chud_proc_info->t_deadline));
165 timer_call_setup(&(chud_proc_info->cpu_timer_call),
166 chudxnu_private_cpu_timer_callback, NULL);
167 timer_call_enter(&(chud_proc_info->cpu_timer_call),
168 chud_proc_info->t_deadline);
169
170 KERNEL_DEBUG_CONSTANT(
171 MACHDBG_CODE(DBG_MACH_CHUD,
172 CHUD_TIMER_CALLBACK_ENTER) | DBG_FUNC_NONE,
173 (uint32_t) func, time, units, 0, 0);
174
175 ml_set_interrupts_enabled(oldlevel);
176 return KERN_SUCCESS;
177}
178
179__private_extern__ kern_return_t
180chudxnu_cpu_timer_callback_cancel(void)
181{
182 chudcpu_data_t *chud_proc_info;
183 boolean_t oldlevel;
184
185 oldlevel = ml_set_interrupts_enabled(FALSE);
186 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
187
188 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
189
190 KERNEL_DEBUG_CONSTANT(
191 MACHDBG_CODE(DBG_MACH_CHUD,
192 CHUD_TIMER_CALLBACK_CANCEL) | DBG_FUNC_NONE,
193 0, 0, 0, 0, 0);
194
195 // set to max value:
196 chud_proc_info->t_deadline |= ~(chud_proc_info->t_deadline);
197 chud_proc_info->cpu_timer_callback_fn = NULL;
198
199 ml_set_interrupts_enabled(oldlevel);
200 return KERN_SUCCESS;
201}
202
203__private_extern__ kern_return_t
204chudxnu_cpu_timer_callback_cancel_all(void)
205{
206 unsigned int cpu;
207 chudcpu_data_t *chud_proc_info;
208
209 for(cpu=0; cpu < real_ncpus; cpu++) {
210 chud_proc_info = (chudcpu_data_t *) cpu_data_ptr[cpu]->cpu_chud;
211 if (chud_proc_info == NULL)
212 continue;
213 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
214 chud_proc_info->t_deadline |= ~(chud_proc_info->t_deadline);
215 chud_proc_info->cpu_timer_callback_fn = NULL;
216 }
217 return KERN_SUCCESS;
218}
219
220#pragma mark **** trap ****
221static chudxnu_trap_callback_func_t trap_callback_fn = NULL;
222
223static kern_return_t
224chudxnu_private_trap_callback(
2d21ac55 225 int trapno,
0c530ab8
A
226 void *regs,
227 int unused1,
228 int unused2)
229{
230#pragma unused (regs)
231#pragma unused (unused1)
232#pragma unused (unused2)
2d21ac55 233 kern_return_t retval = KERN_FAILURE;
0c530ab8
A
234 chudxnu_trap_callback_func_t fn = trap_callback_fn;
235
2d21ac55
A
236 if(fn) {
237 boolean_t oldlevel;
238 x86_thread_state_t state;
239 mach_msg_type_number_t count;
240 thread_t thread = current_thread();
241
0c530ab8 242 oldlevel = ml_set_interrupts_enabled(FALSE);
2d21ac55
A
243
244 /* prevent reentry into CHUD when dtracing */
245 if(thread->t_chud & T_IN_CHUD) {
246 /* restore interrupts */
247 ml_set_interrupts_enabled(oldlevel);
248
249 return KERN_FAILURE; // not handled - pass off to dtrace
250 }
251
252 /* update the chud state bits */
253 thread->t_chud |= T_IN_CHUD;
0c530ab8
A
254
255 count = x86_THREAD_STATE_COUNT;
2d21ac55
A
256
257 if(chudxnu_thread_get_state(thread,
0c530ab8
A
258 x86_THREAD_STATE,
259 (thread_state_t)&state,
260 &count,
261 FALSE) == KERN_SUCCESS) {
2d21ac55
A
262
263 retval = (fn)(
264 trapno,
265 x86_THREAD_STATE,
266 (thread_state_t)&state,
267 count);
0c530ab8 268 }
2d21ac55
A
269
270 /* no longer in CHUD */
271 thread->t_chud &= ~(T_IN_CHUD);
272
273 ml_set_interrupts_enabled(oldlevel);
0c530ab8
A
274 }
275
2d21ac55 276 return retval;
0c530ab8
A
277}
278
279__private_extern__ kern_return_t
280chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func)
281{
282 trap_callback_fn = func;
283 perfTrapHook = chudxnu_private_trap_callback;
284 return KERN_SUCCESS;
285}
286
287__private_extern__ kern_return_t
288chudxnu_trap_callback_cancel(void)
289{
290 trap_callback_fn = NULL;
291 perfTrapHook = NULL;
292 return KERN_SUCCESS;
293}
294
295#pragma mark **** ast ****
296static
297chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn = NULL;
298
299static kern_return_t
300chudxnu_private_chud_ast_callback(
301 int trapno,
302 void *regs,
303 int unused1,
304 int unused2)
305{
306#pragma unused (trapno)
307#pragma unused (regs)
308#pragma unused (unused1)
309#pragma unused (unused2)
310 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
311 ast_t *myast = ast_pending();
312 kern_return_t retval = KERN_FAILURE;
313 chudxnu_perfmon_ast_callback_func_t fn = perfmon_ast_callback_fn;
314
315 if (*myast & AST_CHUD_URGENT) {
316 *myast &= ~(AST_CHUD_URGENT | AST_CHUD);
317 if ((*myast & AST_PREEMPTION) != AST_PREEMPTION)
318 *myast &= ~(AST_URGENT);
319 retval = KERN_SUCCESS;
320 } else if (*myast & AST_CHUD) {
321 *myast &= ~(AST_CHUD);
322 retval = KERN_SUCCESS;
323 }
324
325 if (fn) {
326 x86_thread_state_t state;
327 mach_msg_type_number_t count;
328 count = x86_THREAD_STATE_COUNT;
2d21ac55 329
0c530ab8
A
330 if (chudxnu_thread_get_state(
331 current_thread(),
332 x86_THREAD_STATE,
333 (thread_state_t) &state, &count,
334 TRUE) == KERN_SUCCESS) {
335
336 KERNEL_DEBUG_CONSTANT(
337 MACHDBG_CODE(DBG_MACH_CHUD,
338 CHUD_AST_CALLBACK) | DBG_FUNC_NONE,
339 (uint32_t) fn, 0, 0, 0, 0);
340
341 (fn)(
342 x86_THREAD_STATE,
343 (thread_state_t) &state,
344 count);
345 }
346 }
347
348 ml_set_interrupts_enabled(oldlevel);
349 return retval;
350}
351
352__private_extern__ kern_return_t
353chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func)
354{
355 perfmon_ast_callback_fn = func;
356 perfASTHook = chudxnu_private_chud_ast_callback;
357 return KERN_SUCCESS;
358}
359
360__private_extern__ kern_return_t
361chudxnu_perfmon_ast_callback_cancel(void)
362{
363 perfmon_ast_callback_fn = NULL;
364 perfASTHook = NULL;
365 return KERN_SUCCESS;
366}
367
368__private_extern__ kern_return_t
369chudxnu_perfmon_ast_send_urgent(boolean_t urgent)
370{
371 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
372 ast_t *myast = ast_pending();
373
374 if(urgent) {
375 *myast |= (AST_CHUD_URGENT | AST_URGENT);
376 } else {
377 *myast |= (AST_CHUD);
378 }
379
380 KERNEL_DEBUG_CONSTANT(
381 MACHDBG_CODE(DBG_MACH_CHUD, CHUD_AST_SEND) | DBG_FUNC_NONE,
382 urgent, 0, 0, 0, 0);
383
384 ml_set_interrupts_enabled(oldlevel);
385 return KERN_SUCCESS;
386}
387
388__private_extern__ kern_return_t
389chudxnu_perfmon_ast_send(void)
390{
391 return chudxnu_perfmon_ast_send_urgent(TRUE);
392}
393
394#pragma mark **** interrupt ****
395static chudxnu_interrupt_callback_func_t interrupt_callback_fn = NULL;
396
397static void
398chudxnu_private_interrupt_callback(void *foo)
399{
400#pragma unused (foo)
401 chudxnu_interrupt_callback_func_t fn = interrupt_callback_fn;
402
403 if(fn) {
404 boolean_t oldlevel;
405 x86_thread_state_t state;
406 mach_msg_type_number_t count;
407
408 oldlevel = ml_set_interrupts_enabled(FALSE);
409
410 count = x86_THREAD_STATE_COUNT;
411 if(chudxnu_thread_get_state(current_thread(),
412 x86_THREAD_STATE,
413 (thread_state_t)&state,
414 &count,
415 FALSE) == KERN_SUCCESS) {
416 (fn)(
417 X86_INTERRUPT_PERFMON,
418 x86_THREAD_STATE,
419 (thread_state_t)&state,
420 count);
421 }
422 ml_set_interrupts_enabled(oldlevel);
423 }
424}
425
426__private_extern__ kern_return_t
427chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func)
428{
429 interrupt_callback_fn = func;
430 lapic_set_pmi_func((i386_intr_func_t)chudxnu_private_interrupt_callback);
431 return KERN_SUCCESS;
432}
433
434__private_extern__ kern_return_t
435chudxnu_interrupt_callback_cancel(void)
436{
437 interrupt_callback_fn = NULL;
438 lapic_set_pmi_func(NULL);
439 return KERN_SUCCESS;
440}
441
442#pragma mark **** cpu signal ****
443static chudxnu_cpusig_callback_func_t cpusig_callback_fn = NULL;
444
445static kern_return_t
446chudxnu_private_cpu_signal_handler(int request)
447{
448 chudxnu_cpusig_callback_func_t fn = cpusig_callback_fn;
449
450 if (fn) {
451 x86_thread_state_t state;
452 mach_msg_type_number_t count = x86_THREAD_STATE_COUNT;
453
454 if (chudxnu_thread_get_state(current_thread(),
455 x86_THREAD_STATE,
456 (thread_state_t) &state, &count,
457 FALSE) == KERN_SUCCESS) {
458 KERNEL_DEBUG_CONSTANT(
459 MACHDBG_CODE(DBG_MACH_CHUD,
460 CHUD_CPUSIG_CALLBACK) | DBG_FUNC_NONE,
461 (uint32_t)fn, request, 0, 0, 0);
462 return (fn)(
463 request, x86_THREAD_STATE,
464 (thread_state_t) &state, count);
465 } else {
466 return KERN_FAILURE;
467 }
468 }
469 return KERN_SUCCESS; //ignored
470}
471/*
472 * chudxnu_cpu_signal_handler() is called from the IPI handler
473 * when a CHUD signal arrives from another processor.
474 */
475__private_extern__ void
476chudxnu_cpu_signal_handler(void)
477{
478 chudcpu_signal_request_t *reqp;
479 chudcpu_data_t *chudinfop;
480
481 chudinfop = (chudcpu_data_t *) current_cpu_datap()->cpu_chud;
482
483 mpdequeue_head(&(chudinfop->cpu_request_queue),
484 (queue_entry_t *) &reqp);
485 while (reqp != NULL) {
486 chudxnu_private_cpu_signal_handler(reqp->req_code);
487 reqp->req_sync = 0;
488 mpdequeue_head(&(chudinfop->cpu_request_queue),
489 (queue_entry_t *) &reqp);
490 }
491}
492
493__private_extern__ kern_return_t
494chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func)
495{
496 cpusig_callback_fn = func;
497 return KERN_SUCCESS;
498}
499
500__private_extern__ kern_return_t
501chudxnu_cpusig_callback_cancel(void)
502{
503 cpusig_callback_fn = NULL;
504 return KERN_SUCCESS;
505}
506
507__private_extern__ kern_return_t
508chudxnu_cpusig_send(int otherCPU, uint32_t request_code)
509{
510 int thisCPU;
511 kern_return_t retval = KERN_FAILURE;
512 chudcpu_signal_request_t request;
513 uint64_t deadline;
514 chudcpu_data_t *target_chudp;
515 boolean_t old_level;
516
517 disable_preemption();
518 // force interrupts on for a cross CPU signal.
519 old_level = chudxnu_set_interrupts_enabled(TRUE);
520 thisCPU = cpu_number();
521
522 if ((unsigned) otherCPU < real_ncpus &&
523 thisCPU != otherCPU &&
524 cpu_data_ptr[otherCPU]->cpu_running) {
525
526 target_chudp = (chudcpu_data_t *)
527 cpu_data_ptr[otherCPU]->cpu_chud;
528
529 /* Fill out request */
530 request.req_sync = 0xFFFFFFFF; /* set sync flag */
531 //request.req_type = CPRQchud; /* set request type */
532 request.req_code = request_code; /* set request */
533
534 KERNEL_DEBUG_CONSTANT(
535 MACHDBG_CODE(DBG_MACH_CHUD,
536 CHUD_CPUSIG_SEND) | DBG_FUNC_NONE,
537 otherCPU, request_code, 0, 0, 0);
538
539 /*
540 * Insert the new request in the target cpu's request queue
541 * and signal target cpu.
542 */
543 mpenqueue_tail(&target_chudp->cpu_request_queue,
544 &request.req_entry);
545 i386_signal_cpu(otherCPU, MP_CHUD, ASYNC);
546
547 /* Wait for response or timeout */
548 deadline = mach_absolute_time() + LockTimeOut;
549 while (request.req_sync != 0) {
550 if (mach_absolute_time() > deadline) {
551 panic("chudxnu_cpusig_send(%d,%d) timed out\n",
552 otherCPU, request_code);
553 }
554 cpu_pause();
555 }
556 retval = KERN_SUCCESS;
557 } else {
558 retval = KERN_INVALID_ARGUMENT;
559 }
560
561 chudxnu_set_interrupts_enabled(old_level);
562 enable_preemption();
563 return retval;
564}
565