]> git.saurik.com Git - apple/xnu.git/blob - osfmk/chud/i386/chud_osfmk_callback_i386.c
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / chud / i386 / chud_osfmk_callback_i386.c
1 /*
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 #include <stdint.h>
32 #include <mach/boolean.h>
33 #include <mach/mach_types.h>
34
35 #include <kern/kern_types.h>
36 #include <kern/processor.h>
37 #include <kern/timer_call.h>
38 #include <kern/thread_call.h>
39 #include <kern/kalloc.h>
40 #include <kern/thread.h>
41
42 #include <machine/machine_routines.h>
43 #include <machine/cpu_data.h>
44 #include <machine/trap.h>
45
46 #include <chud/chud_xnu.h>
47 #include <chud/chud_xnu_private.h>
48
49 #include <i386/misc_protos.h>
50 #include <i386/mp.h>
51 #include <i386/machine_cpu.h>
52
53 #include <sys/kdebug.h>
54 #define CHUD_TIMER_CALLBACK_CANCEL 0
55 #define CHUD_TIMER_CALLBACK_ENTER 1
56 #define CHUD_TIMER_CALLBACK 2
57 #define CHUD_AST_SEND 3
58 #define CHUD_AST_CALLBACK 4
59 #define CHUD_CPUSIG_SEND 5
60 #define CHUD_CPUSIG_CALLBACK 6
61
62 __private_extern__
63 void chudxnu_cancel_all_callbacks(void)
64 {
65 chudxnu_cpusig_callback_cancel();
66 chudxnu_cpu_timer_callback_cancel_all();
67 chudxnu_interrupt_callback_cancel();
68 chudxnu_perfmon_ast_callback_cancel();
69 chudxnu_kdebug_callback_cancel();
70 chudxnu_thread_timer_callback_cancel();
71 chudxnu_trap_callback_cancel();
72 #if XXX
73 chudxnu_syscall_callback_cancel();
74 #endif
75 }
76
77 static chudcpu_data_t chudcpu_boot_cpu;
78 void *
79 chudxnu_cpu_alloc(boolean_t boot_processor)
80 {
81 chudcpu_data_t *chud_proc_info;
82
83
84 if (boot_processor) {
85 chud_proc_info = &chudcpu_boot_cpu;
86 } else {
87 chud_proc_info = (chudcpu_data_t *)
88 kalloc(sizeof(chudcpu_data_t));
89 if (chud_proc_info == (chudcpu_data_t *)NULL) {
90 return (void *)NULL;
91 }
92 }
93 bzero((char *)chud_proc_info, sizeof(chudcpu_data_t));
94 chud_proc_info->t_deadline = 0xFFFFFFFFFFFFFFFFULL;
95 mpqueue_init(&chud_proc_info->cpu_request_queue);
96
97
98 return (void *)chud_proc_info;
99 }
100
101 void
102 chudxnu_cpu_free(void *cp)
103 {
104 if (cp == NULL || cp == (void *)&chudcpu_boot_cpu) {
105 return;
106 } else {
107 kfree(cp,sizeof(chudcpu_data_t));
108 }
109 }
110
111 static void
112 chudxnu_private_cpu_timer_callback(
113 timer_call_param_t param0,
114 timer_call_param_t param1)
115 {
116 #pragma unused (param0)
117 #pragma unused (param1)
118 chudcpu_data_t *chud_proc_info;
119 boolean_t oldlevel;
120 x86_thread_state_t state;
121 mach_msg_type_number_t count;
122 chudxnu_cpu_timer_callback_func_t fn;
123
124 oldlevel = ml_set_interrupts_enabled(FALSE);
125 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
126
127 count = x86_THREAD_STATE_COUNT;
128 if (chudxnu_thread_get_state(current_thread(),
129 x86_THREAD_STATE,
130 (thread_state_t)&state,
131 &count,
132 FALSE) == KERN_SUCCESS) {
133 fn = chud_proc_info->cpu_timer_callback_fn;
134 if (fn) {
135 KERNEL_DEBUG_CONSTANT(
136 MACHDBG_CODE(DBG_MACH_CHUD,
137 CHUD_TIMER_CALLBACK) | DBG_FUNC_NONE,
138 (uint32_t)fn, 0,0,0,0);
139 //state.eip, state.cs, 0, 0);
140 (fn)(
141 x86_THREAD_STATE,
142 (thread_state_t)&state,
143 count);
144 }
145 }
146
147 ml_set_interrupts_enabled(oldlevel);
148 }
149
150 __private_extern__ kern_return_t
151 chudxnu_cpu_timer_callback_enter(
152 chudxnu_cpu_timer_callback_func_t func,
153 uint32_t time,
154 uint32_t units)
155 {
156 chudcpu_data_t *chud_proc_info;
157 boolean_t oldlevel;
158
159 oldlevel = ml_set_interrupts_enabled(FALSE);
160 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
161
162 // cancel any existing callback for this cpu
163 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
164
165 chud_proc_info->cpu_timer_callback_fn = func;
166
167 clock_interval_to_deadline(time, units, &(chud_proc_info->t_deadline));
168 timer_call_setup(&(chud_proc_info->cpu_timer_call),
169 chudxnu_private_cpu_timer_callback, NULL);
170 timer_call_enter(&(chud_proc_info->cpu_timer_call),
171 chud_proc_info->t_deadline);
172
173 KERNEL_DEBUG_CONSTANT(
174 MACHDBG_CODE(DBG_MACH_CHUD,
175 CHUD_TIMER_CALLBACK_ENTER) | DBG_FUNC_NONE,
176 (uint32_t) func, time, units, 0, 0);
177
178 ml_set_interrupts_enabled(oldlevel);
179 return KERN_SUCCESS;
180 }
181
182 __private_extern__ kern_return_t
183 chudxnu_cpu_timer_callback_cancel(void)
184 {
185 chudcpu_data_t *chud_proc_info;
186 boolean_t oldlevel;
187
188 oldlevel = ml_set_interrupts_enabled(FALSE);
189 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
190
191 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
192
193 KERNEL_DEBUG_CONSTANT(
194 MACHDBG_CODE(DBG_MACH_CHUD,
195 CHUD_TIMER_CALLBACK_CANCEL) | DBG_FUNC_NONE,
196 0, 0, 0, 0, 0);
197
198 // set to max value:
199 chud_proc_info->t_deadline |= ~(chud_proc_info->t_deadline);
200 chud_proc_info->cpu_timer_callback_fn = NULL;
201
202 ml_set_interrupts_enabled(oldlevel);
203 return KERN_SUCCESS;
204 }
205
206 __private_extern__ kern_return_t
207 chudxnu_cpu_timer_callback_cancel_all(void)
208 {
209 unsigned int cpu;
210 chudcpu_data_t *chud_proc_info;
211
212 for(cpu=0; cpu < real_ncpus; cpu++) {
213 chud_proc_info = (chudcpu_data_t *) cpu_data_ptr[cpu]->cpu_chud;
214 if (chud_proc_info == NULL)
215 continue;
216 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
217 chud_proc_info->t_deadline |= ~(chud_proc_info->t_deadline);
218 chud_proc_info->cpu_timer_callback_fn = NULL;
219 }
220 return KERN_SUCCESS;
221 }
222
223 #pragma mark **** trap ****
224 static chudxnu_trap_callback_func_t trap_callback_fn = NULL;
225
226 static kern_return_t
227 chudxnu_private_trap_callback(
228 int trapno,
229 void *regs,
230 int unused1,
231 int unused2)
232 {
233 #pragma unused (regs)
234 #pragma unused (unused1)
235 #pragma unused (unused2)
236 kern_return_t retval = KERN_FAILURE;
237 chudxnu_trap_callback_func_t fn = trap_callback_fn;
238
239 if(fn) {
240 boolean_t oldlevel;
241 x86_thread_state_t state; // once we have an 64bit- independent way to determine if a thread is
242 // running kernel code, we'll switch to x86_thread_state_t.
243 mach_msg_type_number_t count;
244
245 oldlevel = ml_set_interrupts_enabled(FALSE);
246
247 count = x86_THREAD_STATE_COUNT;
248 if(chudxnu_thread_get_state(current_thread(),
249 x86_THREAD_STATE,
250 (thread_state_t)&state,
251 &count,
252 FALSE) == KERN_SUCCESS) {
253
254 retval = (fn)(
255 trapno,
256 x86_THREAD_STATE,
257 (thread_state_t)&state,
258 count);
259 }
260 ml_set_interrupts_enabled(oldlevel);
261 }
262
263 return retval;
264 }
265
266 __private_extern__ kern_return_t
267 chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func)
268 {
269 trap_callback_fn = func;
270 perfTrapHook = chudxnu_private_trap_callback;
271 return KERN_SUCCESS;
272 }
273
274 __private_extern__ kern_return_t
275 chudxnu_trap_callback_cancel(void)
276 {
277 trap_callback_fn = NULL;
278 perfTrapHook = NULL;
279 return KERN_SUCCESS;
280 }
281
282 #pragma mark **** ast ****
283 static
284 chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn = NULL;
285
286 static kern_return_t
287 chudxnu_private_chud_ast_callback(
288 int trapno,
289 void *regs,
290 int unused1,
291 int unused2)
292 {
293 #pragma unused (trapno)
294 #pragma unused (regs)
295 #pragma unused (unused1)
296 #pragma unused (unused2)
297 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
298 ast_t *myast = ast_pending();
299 kern_return_t retval = KERN_FAILURE;
300 chudxnu_perfmon_ast_callback_func_t fn = perfmon_ast_callback_fn;
301
302 if (*myast & AST_CHUD_URGENT) {
303 *myast &= ~(AST_CHUD_URGENT | AST_CHUD);
304 if ((*myast & AST_PREEMPTION) != AST_PREEMPTION)
305 *myast &= ~(AST_URGENT);
306 retval = KERN_SUCCESS;
307 } else if (*myast & AST_CHUD) {
308 *myast &= ~(AST_CHUD);
309 retval = KERN_SUCCESS;
310 }
311
312 if (fn) {
313 x86_thread_state_t state;
314 mach_msg_type_number_t count;
315 count = x86_THREAD_STATE_COUNT;
316
317 if (chudxnu_thread_get_state(
318 current_thread(),
319 x86_THREAD_STATE,
320 (thread_state_t) &state, &count,
321 TRUE) == KERN_SUCCESS) {
322
323 KERNEL_DEBUG_CONSTANT(
324 MACHDBG_CODE(DBG_MACH_CHUD,
325 CHUD_AST_CALLBACK) | DBG_FUNC_NONE,
326 (uint32_t) fn, 0, 0, 0, 0);
327
328 (fn)(
329 x86_THREAD_STATE,
330 (thread_state_t) &state,
331 count);
332 }
333 }
334
335 ml_set_interrupts_enabled(oldlevel);
336 return retval;
337 }
338
339 __private_extern__ kern_return_t
340 chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func)
341 {
342 perfmon_ast_callback_fn = func;
343 perfASTHook = chudxnu_private_chud_ast_callback;
344 return KERN_SUCCESS;
345 }
346
347 __private_extern__ kern_return_t
348 chudxnu_perfmon_ast_callback_cancel(void)
349 {
350 perfmon_ast_callback_fn = NULL;
351 perfASTHook = NULL;
352 return KERN_SUCCESS;
353 }
354
355 __private_extern__ kern_return_t
356 chudxnu_perfmon_ast_send_urgent(boolean_t urgent)
357 {
358 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
359 ast_t *myast = ast_pending();
360
361 if(urgent) {
362 *myast |= (AST_CHUD_URGENT | AST_URGENT);
363 } else {
364 *myast |= (AST_CHUD);
365 }
366
367 KERNEL_DEBUG_CONSTANT(
368 MACHDBG_CODE(DBG_MACH_CHUD, CHUD_AST_SEND) | DBG_FUNC_NONE,
369 urgent, 0, 0, 0, 0);
370
371 ml_set_interrupts_enabled(oldlevel);
372 return KERN_SUCCESS;
373 }
374
375 __private_extern__ kern_return_t
376 chudxnu_perfmon_ast_send(void)
377 {
378 return chudxnu_perfmon_ast_send_urgent(TRUE);
379 }
380
381 #pragma mark **** interrupt ****
382 static chudxnu_interrupt_callback_func_t interrupt_callback_fn = NULL;
383
384 static void
385 chudxnu_private_interrupt_callback(void *foo)
386 {
387 #pragma unused (foo)
388 chudxnu_interrupt_callback_func_t fn = interrupt_callback_fn;
389
390 if(fn) {
391 boolean_t oldlevel;
392 x86_thread_state_t state;
393 mach_msg_type_number_t count;
394
395 oldlevel = ml_set_interrupts_enabled(FALSE);
396
397 count = x86_THREAD_STATE_COUNT;
398 if(chudxnu_thread_get_state(current_thread(),
399 x86_THREAD_STATE,
400 (thread_state_t)&state,
401 &count,
402 FALSE) == KERN_SUCCESS) {
403 (fn)(
404 X86_INTERRUPT_PERFMON,
405 x86_THREAD_STATE,
406 (thread_state_t)&state,
407 count);
408 }
409 ml_set_interrupts_enabled(oldlevel);
410 }
411 }
412
413 __private_extern__ kern_return_t
414 chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func)
415 {
416 interrupt_callback_fn = func;
417 lapic_set_pmi_func((i386_intr_func_t)chudxnu_private_interrupt_callback);
418 return KERN_SUCCESS;
419 }
420
421 __private_extern__ kern_return_t
422 chudxnu_interrupt_callback_cancel(void)
423 {
424 interrupt_callback_fn = NULL;
425 lapic_set_pmi_func(NULL);
426 return KERN_SUCCESS;
427 }
428
429 #pragma mark **** cpu signal ****
430 static chudxnu_cpusig_callback_func_t cpusig_callback_fn = NULL;
431
432 static kern_return_t
433 chudxnu_private_cpu_signal_handler(int request)
434 {
435 chudxnu_cpusig_callback_func_t fn = cpusig_callback_fn;
436
437 if (fn) {
438 x86_thread_state_t state;
439 mach_msg_type_number_t count = x86_THREAD_STATE_COUNT;
440
441 if (chudxnu_thread_get_state(current_thread(),
442 x86_THREAD_STATE,
443 (thread_state_t) &state, &count,
444 FALSE) == KERN_SUCCESS) {
445 KERNEL_DEBUG_CONSTANT(
446 MACHDBG_CODE(DBG_MACH_CHUD,
447 CHUD_CPUSIG_CALLBACK) | DBG_FUNC_NONE,
448 (uint32_t)fn, request, 0, 0, 0);
449 return (fn)(
450 request, x86_THREAD_STATE,
451 (thread_state_t) &state, count);
452 } else {
453 return KERN_FAILURE;
454 }
455 }
456 return KERN_SUCCESS; //ignored
457 }
458 /*
459 * chudxnu_cpu_signal_handler() is called from the IPI handler
460 * when a CHUD signal arrives from another processor.
461 */
462 __private_extern__ void
463 chudxnu_cpu_signal_handler(void)
464 {
465 chudcpu_signal_request_t *reqp;
466 chudcpu_data_t *chudinfop;
467
468 chudinfop = (chudcpu_data_t *) current_cpu_datap()->cpu_chud;
469
470 mpdequeue_head(&(chudinfop->cpu_request_queue),
471 (queue_entry_t *) &reqp);
472 while (reqp != NULL) {
473 chudxnu_private_cpu_signal_handler(reqp->req_code);
474 reqp->req_sync = 0;
475 mpdequeue_head(&(chudinfop->cpu_request_queue),
476 (queue_entry_t *) &reqp);
477 }
478 }
479
480 __private_extern__ kern_return_t
481 chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func)
482 {
483 cpusig_callback_fn = func;
484 return KERN_SUCCESS;
485 }
486
487 __private_extern__ kern_return_t
488 chudxnu_cpusig_callback_cancel(void)
489 {
490 cpusig_callback_fn = NULL;
491 return KERN_SUCCESS;
492 }
493
494 __private_extern__ kern_return_t
495 chudxnu_cpusig_send(int otherCPU, uint32_t request_code)
496 {
497 int thisCPU;
498 kern_return_t retval = KERN_FAILURE;
499 chudcpu_signal_request_t request;
500 uint64_t deadline;
501 chudcpu_data_t *target_chudp;
502 boolean_t old_level;
503
504 disable_preemption();
505 // force interrupts on for a cross CPU signal.
506 old_level = chudxnu_set_interrupts_enabled(TRUE);
507 thisCPU = cpu_number();
508
509 if ((unsigned) otherCPU < real_ncpus &&
510 thisCPU != otherCPU &&
511 cpu_data_ptr[otherCPU]->cpu_running) {
512
513 target_chudp = (chudcpu_data_t *)
514 cpu_data_ptr[otherCPU]->cpu_chud;
515
516 /* Fill out request */
517 request.req_sync = 0xFFFFFFFF; /* set sync flag */
518 //request.req_type = CPRQchud; /* set request type */
519 request.req_code = request_code; /* set request */
520
521 KERNEL_DEBUG_CONSTANT(
522 MACHDBG_CODE(DBG_MACH_CHUD,
523 CHUD_CPUSIG_SEND) | DBG_FUNC_NONE,
524 otherCPU, request_code, 0, 0, 0);
525
526 /*
527 * Insert the new request in the target cpu's request queue
528 * and signal target cpu.
529 */
530 mpenqueue_tail(&target_chudp->cpu_request_queue,
531 &request.req_entry);
532 i386_signal_cpu(otherCPU, MP_CHUD, ASYNC);
533
534 /* Wait for response or timeout */
535 deadline = mach_absolute_time() + LockTimeOut;
536 while (request.req_sync != 0) {
537 if (mach_absolute_time() > deadline) {
538 panic("chudxnu_cpusig_send(%d,%d) timed out\n",
539 otherCPU, request_code);
540 }
541 cpu_pause();
542 }
543 retval = KERN_SUCCESS;
544 } else {
545 retval = KERN_INVALID_ARGUMENT;
546 }
547
548 chudxnu_set_interrupts_enabled(old_level);
549 enable_preemption();
550 return retval;
551 }
552
553 #ifdef XXX
554 #pragma mark **** CHUD syscall (PPC) ****
555
556 typedef int (*PPCcallEnt)(struct savearea *save);
557 extern PPCcallEnt PPCcalls[];
558
559 static chudxnu_syscall_callback_func_t syscall_callback_fn = NULL;
560
561 static int
562 chudxnu_private_syscall_callback(struct savearea *ssp)
563 {
564 if(ssp) {
565 if(syscall_callback_fn) {
566 struct ppc_thread_state64 state;
567 kern_return_t retval;
568 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
569 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
570 ssp->save_r3 = (syscall_callback_fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count);
571 } else {
572 ssp->save_r3 = KERN_FAILURE;
573 }
574 }
575
576 return 1; // check for ASTs (always)
577 }
578
579 __private_extern__ kern_return_t
580 chudxnu_syscall_callback_enter(chudxnu_syscall_callback_func_t func)
581 {
582 syscall_callback_fn = func;
583 PPCcalls[9] = chudxnu_private_syscall_callback;
584 __asm__ volatile("eieio"); /* force order */
585 __asm__ volatile("sync"); /* force to memory */
586 return KERN_SUCCESS;
587 }
588
589 __private_extern__ kern_return_t
590 chudxnu_syscall_callback_cancel(void)
591 {
592 syscall_callback_fn = NULL;
593 PPCcalls[9] = NULL;
594 __asm__ volatile("eieio"); /* force order */
595 __asm__ volatile("sync"); /* force to memory */
596 return KERN_SUCCESS;
597 }
598 #endif