]> git.saurik.com Git - apple/xnu.git/blob - osfmk/chud/i386/chud_osfmk_callback_i386.c
xnu-792.22.5.tar.gz
[apple/xnu.git] / osfmk / chud / i386 / chud_osfmk_callback_i386.c
1 /*
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <stdint.h>
30 #include <mach/boolean.h>
31 #include <mach/mach_types.h>
32
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/timer_call.h>
36 #include <kern/thread_call.h>
37 #include <kern/kalloc.h>
38 #include <kern/thread.h>
39
40 #include <machine/machine_routines.h>
41 #include <machine/cpu_data.h>
42 #include <machine/trap.h>
43
44 #include <chud/chud_xnu.h>
45 #include <chud/chud_xnu_private.h>
46
47 #include <i386/misc_protos.h>
48 #include <i386/mp.h>
49 #include <i386/machine_cpu.h>
50
51 #include <sys/kdebug.h>
52 #define CHUD_TIMER_CALLBACK_CANCEL 0
53 #define CHUD_TIMER_CALLBACK_ENTER 1
54 #define CHUD_TIMER_CALLBACK 2
55 #define CHUD_AST_SEND 3
56 #define CHUD_AST_CALLBACK 4
57 #define CHUD_CPUSIG_SEND 5
58 #define CHUD_CPUSIG_CALLBACK 6
59
60 __private_extern__
61 void chudxnu_cancel_all_callbacks(void)
62 {
63 chudxnu_cpusig_callback_cancel();
64 chudxnu_cpu_timer_callback_cancel_all();
65 chudxnu_interrupt_callback_cancel();
66 chudxnu_perfmon_ast_callback_cancel();
67 chudxnu_kdebug_callback_cancel();
68 chudxnu_thread_timer_callback_cancel();
69 chudxnu_trap_callback_cancel();
70 #if XXX
71 chudxnu_syscall_callback_cancel();
72 #endif
73 }
74
75 static chudcpu_data_t chudcpu_boot_cpu;
76 void *
77 chudxnu_cpu_alloc(boolean_t boot_processor)
78 {
79 chudcpu_data_t *chud_proc_info;
80
81
82 if (boot_processor) {
83 chud_proc_info = &chudcpu_boot_cpu;
84 } else {
85 chud_proc_info = (chudcpu_data_t *)
86 kalloc(sizeof(chudcpu_data_t));
87 if (chud_proc_info == (chudcpu_data_t *)NULL) {
88 return (void *)NULL;
89 }
90 }
91 bzero((char *)chud_proc_info, sizeof(chudcpu_data_t));
92 chud_proc_info->t_deadline = 0xFFFFFFFFFFFFFFFFULL;
93 mpqueue_init(&chud_proc_info->cpu_request_queue);
94
95
96 return (void *)chud_proc_info;
97 }
98
99 void
100 chudxnu_cpu_free(void *cp)
101 {
102 if (cp == NULL || cp == (void *)&chudcpu_boot_cpu) {
103 return;
104 } else {
105 kfree(cp,sizeof(chudcpu_data_t));
106 }
107 }
108
109 static void
110 chudxnu_private_cpu_timer_callback(
111 timer_call_param_t param0,
112 timer_call_param_t param1)
113 {
114 #pragma unused (param0)
115 #pragma unused (param1)
116 chudcpu_data_t *chud_proc_info;
117 boolean_t oldlevel;
118 x86_thread_state_t state;
119 mach_msg_type_number_t count;
120 chudxnu_cpu_timer_callback_func_t fn;
121
122 oldlevel = ml_set_interrupts_enabled(FALSE);
123 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
124
125 count = x86_THREAD_STATE_COUNT;
126 if (chudxnu_thread_get_state(current_thread(),
127 x86_THREAD_STATE,
128 (thread_state_t)&state,
129 &count,
130 FALSE) == KERN_SUCCESS) {
131 fn = chud_proc_info->cpu_timer_callback_fn;
132 if (fn) {
133 KERNEL_DEBUG_CONSTANT(
134 MACHDBG_CODE(DBG_MACH_CHUD,
135 CHUD_TIMER_CALLBACK) | DBG_FUNC_NONE,
136 (uint32_t)fn, 0,0,0,0);
137 //state.eip, state.cs, 0, 0);
138 (fn)(
139 x86_THREAD_STATE,
140 (thread_state_t)&state,
141 count);
142 }
143 }
144
145 ml_set_interrupts_enabled(oldlevel);
146 }
147
148 __private_extern__ kern_return_t
149 chudxnu_cpu_timer_callback_enter(
150 chudxnu_cpu_timer_callback_func_t func,
151 uint32_t time,
152 uint32_t units)
153 {
154 chudcpu_data_t *chud_proc_info;
155 boolean_t oldlevel;
156
157 oldlevel = ml_set_interrupts_enabled(FALSE);
158 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
159
160 // cancel any existing callback for this cpu
161 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
162
163 chud_proc_info->cpu_timer_callback_fn = func;
164
165 clock_interval_to_deadline(time, units, &(chud_proc_info->t_deadline));
166 timer_call_setup(&(chud_proc_info->cpu_timer_call),
167 chudxnu_private_cpu_timer_callback, NULL);
168 timer_call_enter(&(chud_proc_info->cpu_timer_call),
169 chud_proc_info->t_deadline);
170
171 KERNEL_DEBUG_CONSTANT(
172 MACHDBG_CODE(DBG_MACH_CHUD,
173 CHUD_TIMER_CALLBACK_ENTER) | DBG_FUNC_NONE,
174 (uint32_t) func, time, units, 0, 0);
175
176 ml_set_interrupts_enabled(oldlevel);
177 return KERN_SUCCESS;
178 }
179
180 __private_extern__ kern_return_t
181 chudxnu_cpu_timer_callback_cancel(void)
182 {
183 chudcpu_data_t *chud_proc_info;
184 boolean_t oldlevel;
185
186 oldlevel = ml_set_interrupts_enabled(FALSE);
187 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
188
189 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
190
191 KERNEL_DEBUG_CONSTANT(
192 MACHDBG_CODE(DBG_MACH_CHUD,
193 CHUD_TIMER_CALLBACK_CANCEL) | DBG_FUNC_NONE,
194 0, 0, 0, 0, 0);
195
196 // set to max value:
197 chud_proc_info->t_deadline |= ~(chud_proc_info->t_deadline);
198 chud_proc_info->cpu_timer_callback_fn = NULL;
199
200 ml_set_interrupts_enabled(oldlevel);
201 return KERN_SUCCESS;
202 }
203
204 __private_extern__ kern_return_t
205 chudxnu_cpu_timer_callback_cancel_all(void)
206 {
207 unsigned int cpu;
208 chudcpu_data_t *chud_proc_info;
209
210 for(cpu=0; cpu < real_ncpus; cpu++) {
211 chud_proc_info = (chudcpu_data_t *) cpu_data_ptr[cpu]->cpu_chud;
212 if (chud_proc_info == NULL)
213 continue;
214 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
215 chud_proc_info->t_deadline |= ~(chud_proc_info->t_deadline);
216 chud_proc_info->cpu_timer_callback_fn = NULL;
217 }
218 return KERN_SUCCESS;
219 }
220
221 #pragma mark **** trap ****
222 static chudxnu_trap_callback_func_t trap_callback_fn = NULL;
223
224 static kern_return_t
225 chudxnu_private_trap_callback(
226 int trapno,
227 void *regs,
228 int unused1,
229 int unused2)
230 {
231 #pragma unused (regs)
232 #pragma unused (unused1)
233 #pragma unused (unused2)
234 kern_return_t retval = KERN_FAILURE;
235 chudxnu_trap_callback_func_t fn = trap_callback_fn;
236
237 if(fn) {
238 boolean_t oldlevel;
239 x86_thread_state_t state; // once we have an 64bit- independent way to determine if a thread is
240 // running kernel code, we'll switch to x86_thread_state_t.
241 mach_msg_type_number_t count;
242
243 oldlevel = ml_set_interrupts_enabled(FALSE);
244
245 count = x86_THREAD_STATE_COUNT;
246 if(chudxnu_thread_get_state(current_thread(),
247 x86_THREAD_STATE,
248 (thread_state_t)&state,
249 &count,
250 FALSE) == KERN_SUCCESS) {
251
252 retval = (fn)(
253 trapno,
254 x86_THREAD_STATE,
255 (thread_state_t)&state,
256 count);
257 }
258 ml_set_interrupts_enabled(oldlevel);
259 }
260
261 return retval;
262 }
263
264 __private_extern__ kern_return_t
265 chudxnu_trap_callback_enter(chudxnu_trap_callback_func_t func)
266 {
267 trap_callback_fn = func;
268 perfTrapHook = chudxnu_private_trap_callback;
269 return KERN_SUCCESS;
270 }
271
272 __private_extern__ kern_return_t
273 chudxnu_trap_callback_cancel(void)
274 {
275 trap_callback_fn = NULL;
276 perfTrapHook = NULL;
277 return KERN_SUCCESS;
278 }
279
280 #pragma mark **** ast ****
281 static
282 chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn = NULL;
283
284 static kern_return_t
285 chudxnu_private_chud_ast_callback(
286 int trapno,
287 void *regs,
288 int unused1,
289 int unused2)
290 {
291 #pragma unused (trapno)
292 #pragma unused (regs)
293 #pragma unused (unused1)
294 #pragma unused (unused2)
295 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
296 ast_t *myast = ast_pending();
297 kern_return_t retval = KERN_FAILURE;
298 chudxnu_perfmon_ast_callback_func_t fn = perfmon_ast_callback_fn;
299
300 if (*myast & AST_CHUD_URGENT) {
301 *myast &= ~(AST_CHUD_URGENT | AST_CHUD);
302 if ((*myast & AST_PREEMPTION) != AST_PREEMPTION)
303 *myast &= ~(AST_URGENT);
304 retval = KERN_SUCCESS;
305 } else if (*myast & AST_CHUD) {
306 *myast &= ~(AST_CHUD);
307 retval = KERN_SUCCESS;
308 }
309
310 if (fn) {
311 x86_thread_state_t state;
312 mach_msg_type_number_t count;
313 count = x86_THREAD_STATE_COUNT;
314
315 if (chudxnu_thread_get_state(
316 current_thread(),
317 x86_THREAD_STATE,
318 (thread_state_t) &state, &count,
319 TRUE) == KERN_SUCCESS) {
320
321 KERNEL_DEBUG_CONSTANT(
322 MACHDBG_CODE(DBG_MACH_CHUD,
323 CHUD_AST_CALLBACK) | DBG_FUNC_NONE,
324 (uint32_t) fn, 0, 0, 0, 0);
325
326 (fn)(
327 x86_THREAD_STATE,
328 (thread_state_t) &state,
329 count);
330 }
331 }
332
333 ml_set_interrupts_enabled(oldlevel);
334 return retval;
335 }
336
337 __private_extern__ kern_return_t
338 chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func)
339 {
340 perfmon_ast_callback_fn = func;
341 perfASTHook = chudxnu_private_chud_ast_callback;
342 return KERN_SUCCESS;
343 }
344
345 __private_extern__ kern_return_t
346 chudxnu_perfmon_ast_callback_cancel(void)
347 {
348 perfmon_ast_callback_fn = NULL;
349 perfASTHook = NULL;
350 return KERN_SUCCESS;
351 }
352
353 __private_extern__ kern_return_t
354 chudxnu_perfmon_ast_send_urgent(boolean_t urgent)
355 {
356 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
357 ast_t *myast = ast_pending();
358
359 if(urgent) {
360 *myast |= (AST_CHUD_URGENT | AST_URGENT);
361 } else {
362 *myast |= (AST_CHUD);
363 }
364
365 KERNEL_DEBUG_CONSTANT(
366 MACHDBG_CODE(DBG_MACH_CHUD, CHUD_AST_SEND) | DBG_FUNC_NONE,
367 urgent, 0, 0, 0, 0);
368
369 ml_set_interrupts_enabled(oldlevel);
370 return KERN_SUCCESS;
371 }
372
373 __private_extern__ kern_return_t
374 chudxnu_perfmon_ast_send(void)
375 {
376 return chudxnu_perfmon_ast_send_urgent(TRUE);
377 }
378
379 #pragma mark **** interrupt ****
380 static chudxnu_interrupt_callback_func_t interrupt_callback_fn = NULL;
381
382 static void
383 chudxnu_private_interrupt_callback(void *foo)
384 {
385 #pragma unused (foo)
386 chudxnu_interrupt_callback_func_t fn = interrupt_callback_fn;
387
388 if(fn) {
389 boolean_t oldlevel;
390 x86_thread_state_t state;
391 mach_msg_type_number_t count;
392
393 oldlevel = ml_set_interrupts_enabled(FALSE);
394
395 count = x86_THREAD_STATE_COUNT;
396 if(chudxnu_thread_get_state(current_thread(),
397 x86_THREAD_STATE,
398 (thread_state_t)&state,
399 &count,
400 FALSE) == KERN_SUCCESS) {
401 (fn)(
402 X86_INTERRUPT_PERFMON,
403 x86_THREAD_STATE,
404 (thread_state_t)&state,
405 count);
406 }
407 ml_set_interrupts_enabled(oldlevel);
408 }
409 }
410
411 __private_extern__ kern_return_t
412 chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func)
413 {
414 interrupt_callback_fn = func;
415 lapic_set_pmi_func((i386_intr_func_t)chudxnu_private_interrupt_callback);
416 return KERN_SUCCESS;
417 }
418
419 __private_extern__ kern_return_t
420 chudxnu_interrupt_callback_cancel(void)
421 {
422 interrupt_callback_fn = NULL;
423 lapic_set_pmi_func(NULL);
424 return KERN_SUCCESS;
425 }
426
427 #pragma mark **** cpu signal ****
428 static chudxnu_cpusig_callback_func_t cpusig_callback_fn = NULL;
429
430 static kern_return_t
431 chudxnu_private_cpu_signal_handler(int request)
432 {
433 chudxnu_cpusig_callback_func_t fn = cpusig_callback_fn;
434
435 if (fn) {
436 x86_thread_state_t state;
437 mach_msg_type_number_t count = x86_THREAD_STATE_COUNT;
438
439 if (chudxnu_thread_get_state(current_thread(),
440 x86_THREAD_STATE,
441 (thread_state_t) &state, &count,
442 FALSE) == KERN_SUCCESS) {
443 KERNEL_DEBUG_CONSTANT(
444 MACHDBG_CODE(DBG_MACH_CHUD,
445 CHUD_CPUSIG_CALLBACK) | DBG_FUNC_NONE,
446 (uint32_t)fn, request, 0, 0, 0);
447 return (fn)(
448 request, x86_THREAD_STATE,
449 (thread_state_t) &state, count);
450 } else {
451 return KERN_FAILURE;
452 }
453 }
454 return KERN_SUCCESS; //ignored
455 }
456 /*
457 * chudxnu_cpu_signal_handler() is called from the IPI handler
458 * when a CHUD signal arrives from another processor.
459 */
460 __private_extern__ void
461 chudxnu_cpu_signal_handler(void)
462 {
463 chudcpu_signal_request_t *reqp;
464 chudcpu_data_t *chudinfop;
465
466 chudinfop = (chudcpu_data_t *) current_cpu_datap()->cpu_chud;
467
468 mpdequeue_head(&(chudinfop->cpu_request_queue),
469 (queue_entry_t *) &reqp);
470 while (reqp != NULL) {
471 chudxnu_private_cpu_signal_handler(reqp->req_code);
472 reqp->req_sync = 0;
473 mpdequeue_head(&(chudinfop->cpu_request_queue),
474 (queue_entry_t *) &reqp);
475 }
476 }
477
478 __private_extern__ kern_return_t
479 chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func)
480 {
481 cpusig_callback_fn = func;
482 return KERN_SUCCESS;
483 }
484
485 __private_extern__ kern_return_t
486 chudxnu_cpusig_callback_cancel(void)
487 {
488 cpusig_callback_fn = NULL;
489 return KERN_SUCCESS;
490 }
491
492 __private_extern__ kern_return_t
493 chudxnu_cpusig_send(int otherCPU, uint32_t request_code)
494 {
495 int thisCPU;
496 kern_return_t retval = KERN_FAILURE;
497 chudcpu_signal_request_t request;
498 uint64_t deadline;
499 chudcpu_data_t *target_chudp;
500 boolean_t old_level;
501
502 disable_preemption();
503 // force interrupts on for a cross CPU signal.
504 old_level = chudxnu_set_interrupts_enabled(TRUE);
505 thisCPU = cpu_number();
506
507 if ((unsigned) otherCPU < real_ncpus &&
508 thisCPU != otherCPU &&
509 cpu_data_ptr[otherCPU]->cpu_running) {
510
511 target_chudp = (chudcpu_data_t *)
512 cpu_data_ptr[otherCPU]->cpu_chud;
513
514 /* Fill out request */
515 request.req_sync = 0xFFFFFFFF; /* set sync flag */
516 //request.req_type = CPRQchud; /* set request type */
517 request.req_code = request_code; /* set request */
518
519 KERNEL_DEBUG_CONSTANT(
520 MACHDBG_CODE(DBG_MACH_CHUD,
521 CHUD_CPUSIG_SEND) | DBG_FUNC_NONE,
522 otherCPU, request_code, 0, 0, 0);
523
524 /*
525 * Insert the new request in the target cpu's request queue
526 * and signal target cpu.
527 */
528 mpenqueue_tail(&target_chudp->cpu_request_queue,
529 &request.req_entry);
530 i386_signal_cpu(otherCPU, MP_CHUD, ASYNC);
531
532 /* Wait for response or timeout */
533 deadline = mach_absolute_time() + LockTimeOut;
534 while (request.req_sync != 0) {
535 if (mach_absolute_time() > deadline) {
536 panic("chudxnu_cpusig_send(%d,%d) timed out\n",
537 otherCPU, request_code);
538 }
539 cpu_pause();
540 }
541 retval = KERN_SUCCESS;
542 } else {
543 retval = KERN_INVALID_ARGUMENT;
544 }
545
546 chudxnu_set_interrupts_enabled(old_level);
547 enable_preemption();
548 return retval;
549 }
550
551 #ifdef XXX
552 #pragma mark **** CHUD syscall (PPC) ****
553
554 typedef int (*PPCcallEnt)(struct savearea *save);
555 extern PPCcallEnt PPCcalls[];
556
557 static chudxnu_syscall_callback_func_t syscall_callback_fn = NULL;
558
559 static int
560 chudxnu_private_syscall_callback(struct savearea *ssp)
561 {
562 if(ssp) {
563 if(syscall_callback_fn) {
564 struct ppc_thread_state64 state;
565 kern_return_t retval;
566 mach_msg_type_number_t count = PPC_THREAD_STATE64_COUNT;
567 chudxnu_copy_savearea_to_threadstate(PPC_THREAD_STATE64, (thread_state_t)&state, &count, ssp);
568 ssp->save_r3 = (syscall_callback_fn)(PPC_THREAD_STATE64, (thread_state_t)&state, count);
569 } else {
570 ssp->save_r3 = KERN_FAILURE;
571 }
572 }
573
574 return 1; // check for ASTs (always)
575 }
576
577 __private_extern__ kern_return_t
578 chudxnu_syscall_callback_enter(chudxnu_syscall_callback_func_t func)
579 {
580 syscall_callback_fn = func;
581 PPCcalls[9] = chudxnu_private_syscall_callback;
582 __asm__ volatile("eieio"); /* force order */
583 __asm__ volatile("sync"); /* force to memory */
584 return KERN_SUCCESS;
585 }
586
587 __private_extern__ kern_return_t
588 chudxnu_syscall_callback_cancel(void)
589 {
590 syscall_callback_fn = NULL;
591 PPCcalls[9] = NULL;
592 __asm__ volatile("eieio"); /* force order */
593 __asm__ volatile("sync"); /* force to memory */
594 return KERN_SUCCESS;
595 }
596 #endif