]> git.saurik.com Git - apple/xnu.git/blob - osfmk/chud/i386/chud_osfmk_callback_i386.c
xnu-4570.20.62.tar.gz
[apple/xnu.git] / osfmk / chud / i386 / chud_osfmk_callback_i386.c
1 /*
2 * Copyright (c) 2003-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <stdint.h>
30 #include <mach/boolean.h>
31 #include <mach/mach_types.h>
32
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/timer_call.h>
36 #include <kern/thread_call.h>
37 #include <kern/kalloc.h>
38 #include <kern/thread.h>
39
40 #include <libkern/OSAtomic.h>
41
42 #include <machine/machine_routines.h>
43 #include <machine/cpu_data.h>
44 #include <machine/trap.h>
45
46 #include <chud/chud_xnu.h>
47 #include <chud/chud_xnu_private.h>
48
49 #include <i386/misc_protos.h>
50 #include <i386/lapic.h>
51 #include <i386/mp.h>
52 #include <i386/machine_cpu.h>
53
54 #include <sys/kdebug.h>
55 #define CHUD_TIMER_CALLBACK_CANCEL 0
56 #define CHUD_TIMER_CALLBACK_ENTER 1
57 #define CHUD_TIMER_CALLBACK 2
58 #define CHUD_AST_SEND 3
59 #define CHUD_AST_CALLBACK 4
60 #define CHUD_CPUSIG_SEND 5
61 #define CHUD_CPUSIG_CALLBACK 6
62
63 __private_extern__
64 void chudxnu_cancel_all_callbacks(void)
65 {
66 chudxnu_cpusig_callback_cancel();
67 chudxnu_cpu_timer_callback_cancel_all();
68 chudxnu_interrupt_callback_cancel();
69 chudxnu_perfmon_ast_callback_cancel();
70 }
71
72 static lck_grp_t chud_request_lck_grp;
73 static lck_grp_attr_t chud_request_lck_grp_attr;
74 static lck_attr_t chud_request_lck_attr;
75
76
77 static chudcpu_data_t chudcpu_boot_cpu;
78 void *
79 chudxnu_cpu_alloc(boolean_t boot_processor)
80 {
81 chudcpu_data_t *chud_proc_info;
82
83 if (boot_processor) {
84 chud_proc_info = &chudcpu_boot_cpu;
85
86 lck_attr_setdefault(&chud_request_lck_attr);
87 lck_grp_attr_setdefault(&chud_request_lck_grp_attr);
88 lck_grp_init(&chud_request_lck_grp, "chud_request", &chud_request_lck_grp_attr);
89
90 } else {
91 chud_proc_info = (chudcpu_data_t *)
92 kalloc(sizeof(chudcpu_data_t));
93 if (chud_proc_info == (chudcpu_data_t *)NULL) {
94 return (void *)NULL;
95 }
96 }
97 bzero((char *)chud_proc_info, sizeof(chudcpu_data_t));
98 chud_proc_info->t_deadline = 0xFFFFFFFFFFFFFFFFULL;
99
100 mpqueue_init(&chud_proc_info->cpu_request_queue, &chud_request_lck_grp, &chud_request_lck_attr);
101
102 /* timer_call_cancel() can be called before first usage, so init here: <rdar://problem/9320202> */
103 timer_call_setup(&(chud_proc_info->cpu_timer_call), NULL, NULL);
104
105
106 return (void *)chud_proc_info;
107 }
108
109 void
110 chudxnu_cpu_free(void *cp)
111 {
112 if (cp == NULL || cp == (void *)&chudcpu_boot_cpu) {
113 return;
114 } else {
115 kfree(cp,sizeof(chudcpu_data_t));
116 }
117 }
118
119 static void
120 chudxnu_private_cpu_timer_callback(
121 timer_call_param_t param0,
122 timer_call_param_t param1)
123 {
124 #pragma unused (param0)
125 #pragma unused (param1)
126 chudcpu_data_t *chud_proc_info;
127 boolean_t oldlevel;
128 x86_thread_state_t state;
129 mach_msg_type_number_t count;
130 chudxnu_cpu_timer_callback_func_t fn;
131
132 oldlevel = ml_set_interrupts_enabled(FALSE);
133 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
134
135 count = x86_THREAD_STATE_COUNT;
136 if (chudxnu_thread_get_state(current_thread(),
137 x86_THREAD_STATE,
138 (thread_state_t)&state,
139 &count,
140 FALSE) == KERN_SUCCESS) {
141 fn = chud_proc_info->cpu_timer_callback_fn;
142 if (fn) {
143 (fn)(
144 x86_THREAD_STATE,
145 (thread_state_t)&state,
146 count);
147 }
148 }
149
150 ml_set_interrupts_enabled(oldlevel);
151 }
152
153 __private_extern__ kern_return_t
154 chudxnu_cpu_timer_callback_enter(
155 chudxnu_cpu_timer_callback_func_t func,
156 uint32_t time,
157 uint32_t units)
158 {
159 chudcpu_data_t *chud_proc_info;
160 boolean_t oldlevel;
161
162 oldlevel = ml_set_interrupts_enabled(FALSE);
163 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
164
165 // cancel any existing callback for this cpu
166 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
167
168 chud_proc_info->cpu_timer_callback_fn = func;
169
170 clock_interval_to_deadline(time, units, &(chud_proc_info->t_deadline));
171 timer_call_setup(&(chud_proc_info->cpu_timer_call),
172 chudxnu_private_cpu_timer_callback, NULL);
173 timer_call_enter(&(chud_proc_info->cpu_timer_call),
174 chud_proc_info->t_deadline,
175 TIMER_CALL_SYS_CRITICAL|TIMER_CALL_LOCAL);
176
177 ml_set_interrupts_enabled(oldlevel);
178 return KERN_SUCCESS;
179 }
180
181 __private_extern__ kern_return_t
182 chudxnu_cpu_timer_callback_cancel(void)
183 {
184 chudcpu_data_t *chud_proc_info;
185 boolean_t oldlevel;
186
187 oldlevel = ml_set_interrupts_enabled(FALSE);
188 chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);
189
190 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
191
192 // set to max value:
193 chud_proc_info->t_deadline |= ~(chud_proc_info->t_deadline);
194 chud_proc_info->cpu_timer_callback_fn = NULL;
195
196 ml_set_interrupts_enabled(oldlevel);
197 return KERN_SUCCESS;
198 }
199
200 __private_extern__ kern_return_t
201 chudxnu_cpu_timer_callback_cancel_all(void)
202 {
203 unsigned int cpu;
204 chudcpu_data_t *chud_proc_info;
205
206 for(cpu=0; cpu < real_ncpus; cpu++) {
207 chud_proc_info = (chudcpu_data_t *) cpu_data_ptr[cpu]->cpu_chud;
208 if (chud_proc_info == NULL)
209 continue;
210 timer_call_cancel(&(chud_proc_info->cpu_timer_call));
211 chud_proc_info->t_deadline |= ~(chud_proc_info->t_deadline);
212 chud_proc_info->cpu_timer_callback_fn = NULL;
213 }
214 return KERN_SUCCESS;
215 }
216
217 #if 0
218 #pragma mark **** ast ****
219 #endif
220 static kern_return_t chud_null_ast(thread_flavor_t flavor, thread_state_t tstate,
221 mach_msg_type_number_t count);
222 static chudxnu_perfmon_ast_callback_func_t perfmon_ast_callback_fn = chud_null_ast;
223
224 static kern_return_t chud_null_ast(thread_flavor_t flavor __unused,
225 thread_state_t tstate __unused, mach_msg_type_number_t count __unused) {
226 return KERN_FAILURE;
227 }
228
229 static kern_return_t
230 chudxnu_private_chud_ast_callback(ast_t reasons, ast_t *myast)
231 {
232 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
233 kern_return_t retval = KERN_FAILURE;
234 chudxnu_perfmon_ast_callback_func_t fn = perfmon_ast_callback_fn;
235
236 if (fn) {
237 if ((*myast & AST_CHUD_URGENT) && (reasons & (AST_URGENT | AST_CHUD_URGENT))) { // Only execute urgent callbacks if reasons specifies an urgent context.
238 *myast &= ~AST_CHUD_URGENT;
239
240 if (AST_URGENT == *myast) { // If the only flag left is AST_URGENT, we can clear it; we know that we set it, but if there are also other bits set in reasons then someone else might still need AST_URGENT, so we'll leave it set. The normal machinery in ast_taken will ensure it gets cleared eventually, as necessary.
241 *myast = AST_NONE;
242 }
243
244 retval = KERN_SUCCESS;
245 }
246
247 if ((*myast & AST_CHUD) && (reasons & AST_CHUD)) { // Only execute non-urgent callbacks if reasons actually specifies AST_CHUD. This implies non-urgent callbacks since the only time this'll happen is if someone either calls ast_taken with AST_CHUD explicitly (not done at time of writing, but possible) or with AST_ALL, which of course includes AST_CHUD.
248 *myast &= ~AST_CHUD;
249 retval = KERN_SUCCESS;
250 }
251
252 if (KERN_SUCCESS == retval) {
253 x86_thread_state_t state;
254 mach_msg_type_number_t count = x86_THREAD_STATE_COUNT;
255 thread_t thread = current_thread();
256
257 if (KERN_SUCCESS == chudxnu_thread_get_state(thread,
258 x86_THREAD_STATE,
259 (thread_state_t)&state,
260 &count,
261 (thread->task != kernel_task))) {
262 (fn)(x86_THREAD_STATE, (thread_state_t)&state, count);
263 }
264 }
265 }
266
267 ml_set_interrupts_enabled(oldlevel);
268 return retval;
269 }
270
271 volatile perfASTCallback perfASTHook;
272
273 __private_extern__ kern_return_t
274 chudxnu_perfmon_ast_callback_enter(chudxnu_perfmon_ast_callback_func_t func)
275 {
276 if(OSCompareAndSwapPtr(NULL, chudxnu_private_chud_ast_callback,
277 (void * volatile *)&perfASTHook)) {
278 chudxnu_perfmon_ast_callback_func_t old = perfmon_ast_callback_fn;
279
280 while(!OSCompareAndSwapPtr(old, func,
281 (void * volatile *)&perfmon_ast_callback_fn)) {
282 old = perfmon_ast_callback_fn;
283 }
284
285 return KERN_SUCCESS;
286 }
287 return KERN_FAILURE;
288 }
289
290 __private_extern__ kern_return_t
291 chudxnu_perfmon_ast_callback_cancel(void)
292 {
293 if(OSCompareAndSwapPtr(chudxnu_private_chud_ast_callback, NULL,
294 (void * volatile *)&perfASTHook)) {
295 chudxnu_perfmon_ast_callback_func_t old = perfmon_ast_callback_fn;
296
297 while(!OSCompareAndSwapPtr(old, chud_null_ast,
298 (void * volatile *)&perfmon_ast_callback_fn)) {
299 old = perfmon_ast_callback_fn;
300 }
301
302 return KERN_SUCCESS;
303 }
304 return KERN_FAILURE;
305 }
306
307 __private_extern__ kern_return_t
308 chudxnu_perfmon_ast_send_urgent(boolean_t urgent)
309 {
310 boolean_t oldlevel = ml_set_interrupts_enabled(FALSE);
311 ast_t *myast = ast_pending();
312
313 if(urgent) {
314 *myast |= (AST_CHUD_URGENT | AST_URGENT);
315 } else {
316 *myast |= (AST_CHUD);
317 }
318
319 ml_set_interrupts_enabled(oldlevel);
320 return KERN_SUCCESS;
321 }
322
323 #if 0
324 #pragma mark **** interrupt ****
325 #endif
326 static kern_return_t chud_null_int(uint32_t trapentry, thread_flavor_t flavor,
327 thread_state_t tstate, mach_msg_type_number_t count);
328 static chudxnu_interrupt_callback_func_t interrupt_callback_fn = chud_null_int;
329
330 static kern_return_t chud_null_int(uint32_t trapentry __unused, thread_flavor_t flavor __unused,
331 thread_state_t tstate __unused, mach_msg_type_number_t count __unused) {
332 return KERN_FAILURE;
333 }
334
335 static void
336 chudxnu_private_interrupt_callback(void *foo) __attribute__((used));
337
338 static void
339 chudxnu_private_interrupt_callback(void *foo)
340 {
341 #pragma unused (foo)
342 chudxnu_interrupt_callback_func_t fn = interrupt_callback_fn;
343
344 if(fn) {
345 boolean_t oldlevel;
346 x86_thread_state_t state;
347 mach_msg_type_number_t count;
348
349 oldlevel = ml_set_interrupts_enabled(FALSE);
350
351 count = x86_THREAD_STATE_COUNT;
352 if(chudxnu_thread_get_state(current_thread(),
353 x86_THREAD_STATE,
354 (thread_state_t)&state,
355 &count,
356 FALSE) == KERN_SUCCESS) {
357 (fn)(
358 X86_INTERRUPT_PERFMON,
359 x86_THREAD_STATE,
360 (thread_state_t)&state,
361 count);
362 }
363 ml_set_interrupts_enabled(oldlevel);
364 }
365 }
366
367 __private_extern__ kern_return_t
368 chudxnu_interrupt_callback_enter(chudxnu_interrupt_callback_func_t func)
369 {
370 if(OSCompareAndSwapPtr(chud_null_int, func,
371 (void * volatile *)&interrupt_callback_fn)) {
372 lapic_set_pmi_func((i386_intr_func_t)chudxnu_private_interrupt_callback);
373 return KERN_SUCCESS;
374 }
375 return KERN_FAILURE;
376 }
377
378 __private_extern__ kern_return_t
379 chudxnu_interrupt_callback_cancel(void)
380 {
381 chudxnu_interrupt_callback_func_t old = interrupt_callback_fn;
382
383 while(!OSCompareAndSwapPtr(old, chud_null_int,
384 (void * volatile *)&interrupt_callback_fn)) {
385 old = interrupt_callback_fn;
386 }
387
388 lapic_set_pmi_func(NULL);
389 return KERN_SUCCESS;
390 }
391
392 #if 0
393 #pragma mark **** cpu signal ****
394 #endif
395 static chudxnu_cpusig_callback_func_t cpusig_callback_fn = NULL;
396
397 static kern_return_t
398 chudxnu_private_cpu_signal_handler(int request)
399 {
400 chudxnu_cpusig_callback_func_t fn = cpusig_callback_fn;
401
402 if (fn) {
403 x86_thread_state_t state;
404 mach_msg_type_number_t count = x86_THREAD_STATE_COUNT;
405
406 if (chudxnu_thread_get_state(current_thread(),
407 x86_THREAD_STATE,
408 (thread_state_t) &state, &count,
409 FALSE) == KERN_SUCCESS) {
410 return (fn)(
411 request, x86_THREAD_STATE,
412 (thread_state_t) &state, count);
413 } else {
414 return KERN_FAILURE;
415 }
416 }
417 return KERN_SUCCESS; //ignored
418 }
419 /*
420 * chudxnu_cpu_signal_handler() is called from the IPI handler
421 * when a CHUD signal arrives from another processor.
422 */
423 __private_extern__ void
424 chudxnu_cpu_signal_handler(void)
425 {
426 chudcpu_signal_request_t *reqp;
427 chudcpu_data_t *chudinfop;
428
429 chudinfop = (chudcpu_data_t *) current_cpu_datap()->cpu_chud;
430
431 mpdequeue_head(&(chudinfop->cpu_request_queue),
432 (queue_entry_t *) &reqp);
433 while (reqp != NULL) {
434 chudxnu_private_cpu_signal_handler(reqp->req_code);
435 reqp->req_sync = 0;
436 mpdequeue_head(&(chudinfop->cpu_request_queue),
437 (queue_entry_t *) &reqp);
438 }
439 }
440
441 __private_extern__ kern_return_t
442 chudxnu_cpusig_callback_enter(chudxnu_cpusig_callback_func_t func)
443 {
444 if(OSCompareAndSwapPtr(NULL, func,
445 (void * volatile *)&cpusig_callback_fn)) {
446 return KERN_SUCCESS;
447 }
448 return KERN_FAILURE;
449 }
450
451 __private_extern__ kern_return_t
452 chudxnu_cpusig_callback_cancel(void)
453 {
454 chudxnu_cpusig_callback_func_t old = cpusig_callback_fn;
455
456 while(!OSCompareAndSwapPtr(old, NULL,
457 (void * volatile *)&cpusig_callback_fn)) {
458 old = cpusig_callback_fn;
459 }
460
461 return KERN_SUCCESS;
462 }
463
464 __private_extern__ kern_return_t
465 chudxnu_cpusig_send(int otherCPU, uint32_t request_code)
466 {
467 int thisCPU;
468 kern_return_t retval = KERN_FAILURE;
469 chudcpu_signal_request_t request;
470 uint64_t deadline;
471 chudcpu_data_t *target_chudp;
472 boolean_t old_level;
473
474 disable_preemption();
475 // force interrupts on for a cross CPU signal.
476 old_level = chudxnu_set_interrupts_enabled(TRUE);
477 thisCPU = cpu_number();
478
479 if ((unsigned) otherCPU < real_ncpus &&
480 thisCPU != otherCPU &&
481 cpu_data_ptr[otherCPU]->cpu_running) {
482
483 target_chudp = (chudcpu_data_t *)
484 cpu_data_ptr[otherCPU]->cpu_chud;
485
486 /* Fill out request */
487 request.req_sync = 0xFFFFFFFF; /* set sync flag */
488 //request.req_type = CPRQchud; /* set request type */
489 request.req_code = request_code; /* set request */
490
491 /*
492 * Insert the new request in the target cpu's request queue
493 * and signal target cpu.
494 */
495 mpenqueue_tail(&target_chudp->cpu_request_queue,
496 &request.req_entry);
497 i386_signal_cpu(otherCPU, MP_CHUD, ASYNC);
498
499 /* Wait for response or timeout */
500 deadline = mach_absolute_time() + LockTimeOut;
501 while (request.req_sync != 0) {
502 if (mach_absolute_time() > deadline) {
503 panic("chudxnu_cpusig_send(%d,%d) timed out\n",
504 otherCPU, request_code);
505 }
506 cpu_pause();
507 }
508 retval = KERN_SUCCESS;
509 } else {
510 retval = KERN_INVALID_ARGUMENT;
511 }
512
513 chudxnu_set_interrupts_enabled(old_level);
514 enable_preemption();
515 return retval;
516 }