]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/machine.c
24879020598a512c4240c290a7bf64a3c0f215da
[apple/xnu.git] / osfmk / kern / machine.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/machine.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1987
62 *
63 * Support for machine independent machine abstraction.
64 */
65
66 #include <string.h>
67
68 #include <mach/mach_types.h>
69 #include <mach/boolean.h>
70 #include <mach/kern_return.h>
71 #include <mach/machine.h>
72 #include <mach/host_info.h>
73 #include <mach/host_reboot.h>
74 #include <mach/host_priv_server.h>
75 #include <mach/processor_server.h>
76
77 #include <kern/kern_types.h>
78 #include <kern/counters.h>
79 #include <kern/cpu_data.h>
80 #include <kern/cpu_quiesce.h>
81 #include <kern/ipc_host.h>
82 #include <kern/host.h>
83 #include <kern/machine.h>
84 #include <kern/misc_protos.h>
85 #include <kern/processor.h>
86 #include <kern/queue.h>
87 #include <kern/sched.h>
88 #include <kern/startup.h>
89 #include <kern/task.h>
90 #include <kern/thread.h>
91
92 #include <machine/commpage.h>
93 #include <machine/machine_routines.h>
94
95 #if HIBERNATION
96 #include <IOKit/IOHibernatePrivate.h>
97 #endif
98 #include <IOKit/IOPlatformExpert.h>
99
100 #if CONFIG_DTRACE
101 extern void (*dtrace_cpu_state_changed_hook)(int, boolean_t);
102 #endif
103
104 #if defined(__x86_64__)
105 #include <i386/panic_notify.h>
106 #include <libkern/OSDebug.h>
107 #endif
108
109 /*
110 * Exported variables:
111 */
112
113 struct machine_info machine_info;
114
115 /* Forwards */
116 static void
117 processor_doshutdown(processor_t processor);
118
119 static void
120 processor_offline(void * parameter, __unused wait_result_t result);
121
122 static void
123 processor_offline_intstack(processor_t processor) __dead2;
124
125 /*
126 * processor_up:
127 *
128 * Flag processor as up and running, and available
129 * for scheduling.
130 */
131 void
132 processor_up(
133 processor_t processor)
134 {
135 processor_set_t pset;
136 spl_t s;
137
138 s = splsched();
139 init_ast_check(processor);
140 pset = processor->processor_set;
141 pset_lock(pset);
142
143 ++pset->online_processor_count;
144 pset_update_processor_state(pset, processor, PROCESSOR_RUNNING);
145 os_atomic_inc(&processor_avail_count, relaxed);
146 if (processor->is_recommended) {
147 os_atomic_inc(&processor_avail_count_user, relaxed);
148 SCHED(pset_made_schedulable)(processor, pset, false);
149 }
150 if (processor->processor_primary == processor) {
151 os_atomic_inc(&primary_processor_avail_count, relaxed);
152 if (processor->is_recommended) {
153 os_atomic_inc(&primary_processor_avail_count_user, relaxed);
154 }
155 }
156 commpage_update_active_cpus();
157 pset_unlock(pset);
158 ml_cpu_up();
159 splx(s);
160
161 #if CONFIG_DTRACE
162 if (dtrace_cpu_state_changed_hook) {
163 (*dtrace_cpu_state_changed_hook)(processor->cpu_id, TRUE);
164 }
165 #endif
166 }
167 #include <atm/atm_internal.h>
168
169 kern_return_t
170 host_reboot(
171 host_priv_t host_priv,
172 int options)
173 {
174 if (host_priv == HOST_PRIV_NULL) {
175 return KERN_INVALID_HOST;
176 }
177
178 assert(host_priv == &realhost);
179
180 #if DEVELOPMENT || DEBUG
181 if (options & HOST_REBOOT_DEBUGGER) {
182 Debugger("Debugger");
183 return KERN_SUCCESS;
184 }
185 #endif
186
187 if (options & HOST_REBOOT_UPSDELAY) {
188 // UPS power cutoff path
189 PEHaltRestart( kPEUPSDelayHaltCPU );
190 } else {
191 halt_all_cpus(!(options & HOST_REBOOT_HALT));
192 }
193
194 return KERN_SUCCESS;
195 }
196
197 kern_return_t
198 processor_assign(
199 __unused processor_t processor,
200 __unused processor_set_t new_pset,
201 __unused boolean_t wait)
202 {
203 return KERN_FAILURE;
204 }
205
206 kern_return_t
207 processor_shutdown(
208 processor_t processor)
209 {
210 processor_set_t pset;
211 spl_t s;
212
213 ml_cpu_begin_state_transition(processor->cpu_id);
214 s = splsched();
215 pset = processor->processor_set;
216 pset_lock(pset);
217 if (processor->state == PROCESSOR_OFF_LINE) {
218 /*
219 * Success if already shutdown.
220 */
221 pset_unlock(pset);
222 splx(s);
223 ml_cpu_end_state_transition(processor->cpu_id);
224
225 return KERN_SUCCESS;
226 }
227
228 if (!ml_cpu_can_exit(processor->cpu_id)) {
229 /*
230 * Failure if disallowed by arch code.
231 */
232 pset_unlock(pset);
233 splx(s);
234 ml_cpu_end_state_transition(processor->cpu_id);
235
236 return KERN_FAILURE;
237 }
238
239 if (processor->state == PROCESSOR_START) {
240 /*
241 * Failure if currently being started.
242 */
243 pset_unlock(pset);
244 splx(s);
245
246 return KERN_FAILURE;
247 }
248
249 /*
250 * If the processor is dispatching, let it finish.
251 */
252 while (processor->state == PROCESSOR_DISPATCHING) {
253 pset_unlock(pset);
254 splx(s);
255 delay(1);
256 s = splsched();
257 pset_lock(pset);
258 }
259
260 /*
261 * Success if already being shutdown.
262 */
263 if (processor->state == PROCESSOR_SHUTDOWN) {
264 pset_unlock(pset);
265 splx(s);
266 ml_cpu_end_state_transition(processor->cpu_id);
267
268 return KERN_SUCCESS;
269 }
270
271 ml_broadcast_cpu_event(CPU_EXIT_REQUESTED, processor->cpu_id);
272 pset_update_processor_state(pset, processor, PROCESSOR_SHUTDOWN);
273 pset_unlock(pset);
274
275 processor_doshutdown(processor);
276 splx(s);
277
278 cpu_exit_wait(processor->cpu_id);
279 ml_cpu_end_state_transition(processor->cpu_id);
280 ml_broadcast_cpu_event(CPU_EXITED, processor->cpu_id);
281
282 return KERN_SUCCESS;
283 }
284
285 /*
286 * Called with interrupts disabled.
287 */
288 static void
289 processor_doshutdown(
290 processor_t processor)
291 {
292 thread_t self = current_thread();
293
294 /*
295 * Get onto the processor to shutdown
296 */
297 processor_t prev = thread_bind(processor);
298 thread_block(THREAD_CONTINUE_NULL);
299
300 /* interrupts still disabled */
301 assert(ml_get_interrupts_enabled() == FALSE);
302
303 assert(processor == current_processor());
304 assert(processor->state == PROCESSOR_SHUTDOWN);
305
306 #if CONFIG_DTRACE
307 if (dtrace_cpu_state_changed_hook) {
308 (*dtrace_cpu_state_changed_hook)(processor->cpu_id, FALSE);
309 }
310 #endif
311
312 ml_cpu_down();
313
314 #if HIBERNATION
315 if (processor_avail_count < 2) {
316 hibernate_vm_lock();
317 hibernate_vm_unlock();
318 }
319 #endif
320
321 processor_set_t pset = processor->processor_set;
322
323 pset_lock(pset);
324 pset_update_processor_state(pset, processor, PROCESSOR_OFF_LINE);
325 --pset->online_processor_count;
326 os_atomic_dec(&processor_avail_count, relaxed);
327 if (processor->is_recommended) {
328 os_atomic_dec(&processor_avail_count_user, relaxed);
329 }
330 if (processor->processor_primary == processor) {
331 os_atomic_dec(&primary_processor_avail_count, relaxed);
332 if (processor->is_recommended) {
333 os_atomic_dec(&primary_processor_avail_count_user, relaxed);
334 }
335 }
336 commpage_update_active_cpus();
337 SCHED(processor_queue_shutdown)(processor);
338 /* pset lock dropped */
339 SCHED(rt_queue_shutdown)(processor);
340
341 thread_bind(prev);
342
343 /* interrupts still disabled */
344
345 /*
346 * Continue processor shutdown on the processor's idle thread.
347 * The handoff won't fail because the idle thread has a reserved stack.
348 * Switching to the idle thread leaves interrupts disabled,
349 * so we can't accidentally take an interrupt after the context switch.
350 */
351 thread_t shutdown_thread = processor->idle_thread;
352 shutdown_thread->continuation = processor_offline;
353 shutdown_thread->parameter = processor;
354
355 thread_run(self, NULL, NULL, shutdown_thread);
356 }
357
358 /*
359 * Called in the context of the idle thread to shut down the processor
360 *
361 * A shut-down processor looks like it's 'running' the idle thread parked
362 * in this routine, but it's actually been powered off and has no hardware state.
363 */
364 static void
365 processor_offline(
366 void * parameter,
367 __unused wait_result_t result)
368 {
369 processor_t processor = (processor_t) parameter;
370 thread_t self = current_thread();
371 __assert_only thread_t old_thread = THREAD_NULL;
372
373 assert(processor == current_processor());
374 assert(self->state & TH_IDLE);
375 assert(processor->idle_thread == self);
376 assert(ml_get_interrupts_enabled() == FALSE);
377 assert(self->continuation == NULL);
378 assert(processor->processor_offlined == false);
379 assert(processor->running_timers_active == false);
380
381 bool enforce_quiesce_safety = gEnforceQuiesceSafety;
382
383 /*
384 * Scheduling is now disabled for this processor.
385 * Ensure that primitives that need scheduling (like mutexes) know this.
386 */
387 if (enforce_quiesce_safety) {
388 disable_preemption();
389 }
390
391 /* convince slave_main to come back here */
392 processor->processor_offlined = true;
393
394 /*
395 * Switch to the interrupt stack and shut down the processor.
396 *
397 * When the processor comes back, it will eventually call load_context which
398 * restores the context saved by machine_processor_shutdown, returning here.
399 */
400 old_thread = machine_processor_shutdown(self, processor_offline_intstack, processor);
401
402 /* old_thread should be NULL because we got here through Load_context */
403 assert(old_thread == THREAD_NULL);
404
405 assert(processor == current_processor());
406 assert(processor->idle_thread == current_thread());
407
408 assert(ml_get_interrupts_enabled() == FALSE);
409 assert(self->continuation == NULL);
410
411 /* Extract the machine_param value stashed by slave_main */
412 void * machine_param = self->parameter;
413 self->parameter = NULL;
414
415 /* Re-initialize the processor */
416 slave_machine_init(machine_param);
417
418 assert(processor->processor_offlined == true);
419 processor->processor_offlined = false;
420
421 if (enforce_quiesce_safety) {
422 enable_preemption();
423 }
424
425 /*
426 * Now that the processor is back, invoke the idle thread to find out what to do next.
427 * idle_thread will enable interrupts.
428 */
429 thread_block(idle_thread);
430 /*NOTREACHED*/
431 }
432
433 /*
434 * Complete the shutdown and place the processor offline.
435 *
436 * Called at splsched in the shutdown context
437 * (i.e. on the idle thread, on the interrupt stack)
438 *
439 * The onlining half of this is done in load_context().
440 */
441 static void
442 processor_offline_intstack(
443 processor_t processor)
444 {
445 assert(processor == current_processor());
446 assert(processor->active_thread == current_thread());
447
448 timer_stop(processor->current_state, processor->last_dispatch);
449
450 cpu_quiescent_counter_leave(processor->last_dispatch);
451
452 PMAP_DEACTIVATE_KERNEL(processor->cpu_id);
453
454 cpu_sleep();
455 panic("zombie processor");
456 /*NOTREACHED*/
457 }
458
459 kern_return_t
460 host_get_boot_info(
461 host_priv_t host_priv,
462 kernel_boot_info_t boot_info)
463 {
464 const char *src = "";
465 if (host_priv == HOST_PRIV_NULL) {
466 return KERN_INVALID_HOST;
467 }
468
469 assert(host_priv == &realhost);
470
471 /*
472 * Copy first operator string terminated by '\0' followed by
473 * standardized strings generated from boot string.
474 */
475 src = machine_boot_info(boot_info, KERNEL_BOOT_INFO_MAX);
476 if (src != boot_info) {
477 (void) strncpy(boot_info, src, KERNEL_BOOT_INFO_MAX);
478 }
479
480 return KERN_SUCCESS;
481 }
482
483 #if CONFIG_DTRACE
484 #include <mach/sdt.h>
485 #endif
486
487 unsigned long long
488 ml_io_read(uintptr_t vaddr, int size)
489 {
490 unsigned long long result = 0;
491 unsigned char s1;
492 unsigned short s2;
493
494 #if defined(__x86_64__)
495 uint64_t sabs, eabs;
496 boolean_t istate, timeread = FALSE;
497 #if DEVELOPMENT || DEBUG
498 extern uint64_t simulate_stretched_io;
499 uintptr_t paddr = pmap_verify_noncacheable(vaddr);
500 #endif /* x86_64 DEVELOPMENT || DEBUG */
501 if (__improbable(reportphyreaddelayabs != 0)) {
502 istate = ml_set_interrupts_enabled(FALSE);
503 sabs = mach_absolute_time();
504 timeread = TRUE;
505 }
506
507 #if DEVELOPMENT || DEBUG
508 if (__improbable(timeread && simulate_stretched_io)) {
509 sabs -= simulate_stretched_io;
510 }
511 #endif /* x86_64 DEVELOPMENT || DEBUG */
512
513 #endif /* x86_64 */
514
515 switch (size) {
516 case 1:
517 s1 = *(volatile unsigned char *)vaddr;
518 result = s1;
519 break;
520 case 2:
521 s2 = *(volatile unsigned short *)vaddr;
522 result = s2;
523 break;
524 case 4:
525 result = *(volatile unsigned int *)vaddr;
526 break;
527 case 8:
528 result = *(volatile unsigned long long *)vaddr;
529 break;
530 default:
531 panic("Invalid size %d for ml_io_read(%p)", size, (void *)vaddr);
532 break;
533 }
534
535 #if defined(__x86_64__)
536 if (__improbable(timeread == TRUE)) {
537 eabs = mach_absolute_time();
538
539 #if DEVELOPMENT || DEBUG
540 iotrace(IOTRACE_IO_READ, vaddr, paddr, size, result, sabs, eabs - sabs);
541 #endif
542
543 if (__improbable((eabs - sabs) > reportphyreaddelayabs)) {
544 #if !(DEVELOPMENT || DEBUG)
545 uintptr_t paddr = kvtophys(vaddr);
546 #endif
547
548 (void)ml_set_interrupts_enabled(istate);
549
550 if (phyreadpanic && (machine_timeout_suspended() == FALSE)) {
551 panic_notify();
552 panic("Read from IO vaddr 0x%lx paddr 0x%lx took %llu ns, "
553 "result: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
554 vaddr, paddr, (eabs - sabs), result, sabs, eabs,
555 reportphyreaddelayabs);
556 }
557
558 if (reportphyreadosbt) {
559 OSReportWithBacktrace("ml_io_read(v=%p, p=%p) size %d result 0x%llx "
560 "took %lluus",
561 (void *)vaddr, (void *)paddr, size, result,
562 (eabs - sabs) / NSEC_PER_USEC);
563 }
564 #if CONFIG_DTRACE
565 DTRACE_PHYSLAT5(physioread, uint64_t, (eabs - sabs),
566 uint64_t, vaddr, uint32_t, size, uint64_t, paddr, uint64_t, result);
567 #endif /* CONFIG_DTRACE */
568 } else if (__improbable(tracephyreaddelayabs > 0 && (eabs - sabs) > tracephyreaddelayabs)) {
569 #if !(DEVELOPMENT || DEBUG)
570 uintptr_t paddr = kvtophys(vaddr);
571 #endif
572
573 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_MMIO_READ),
574 (eabs - sabs), VM_KERNEL_UNSLIDE_OR_PERM(vaddr), paddr, result);
575
576 (void)ml_set_interrupts_enabled(istate);
577 } else {
578 (void)ml_set_interrupts_enabled(istate);
579 }
580 }
581 #endif /* x86_64 */
582 return result;
583 }
584
585 unsigned int
586 ml_io_read8(uintptr_t vaddr)
587 {
588 return (unsigned) ml_io_read(vaddr, 1);
589 }
590
591 unsigned int
592 ml_io_read16(uintptr_t vaddr)
593 {
594 return (unsigned) ml_io_read(vaddr, 2);
595 }
596
597 unsigned int
598 ml_io_read32(uintptr_t vaddr)
599 {
600 return (unsigned) ml_io_read(vaddr, 4);
601 }
602
603 unsigned long long
604 ml_io_read64(uintptr_t vaddr)
605 {
606 return ml_io_read(vaddr, 8);
607 }
608
609 /* ml_io_write* */
610
611 void
612 ml_io_write(uintptr_t vaddr, uint64_t val, int size)
613 {
614 #if defined(__x86_64__)
615 uint64_t sabs, eabs;
616 boolean_t istate, timewrite = FALSE;
617 #if DEVELOPMENT || DEBUG
618 extern uint64_t simulate_stretched_io;
619 uintptr_t paddr = pmap_verify_noncacheable(vaddr);
620 #endif /* x86_64 DEVELOPMENT || DEBUG */
621 if (__improbable(reportphywritedelayabs != 0)) {
622 istate = ml_set_interrupts_enabled(FALSE);
623 sabs = mach_absolute_time();
624 timewrite = TRUE;
625 }
626
627 #if DEVELOPMENT || DEBUG
628 if (__improbable(timewrite && simulate_stretched_io)) {
629 sabs -= simulate_stretched_io;
630 }
631 #endif /* x86_64 DEVELOPMENT || DEBUG */
632 #endif /* x86_64 */
633
634 switch (size) {
635 case 1:
636 *(volatile uint8_t *)vaddr = (uint8_t)val;
637 break;
638 case 2:
639 *(volatile uint16_t *)vaddr = (uint16_t)val;
640 break;
641 case 4:
642 *(volatile uint32_t *)vaddr = (uint32_t)val;
643 break;
644 case 8:
645 *(volatile uint64_t *)vaddr = (uint64_t)val;
646 break;
647 default:
648 panic("Invalid size %d for ml_io_write(%p, 0x%llx)", size, (void *)vaddr, val);
649 break;
650 }
651
652 #if defined(__x86_64__)
653 if (__improbable(timewrite == TRUE)) {
654 eabs = mach_absolute_time();
655
656 #if DEVELOPMENT || DEBUG
657 iotrace(IOTRACE_IO_WRITE, vaddr, paddr, size, val, sabs, eabs - sabs);
658 #endif
659
660 if (__improbable((eabs - sabs) > reportphywritedelayabs)) {
661 #if !(DEVELOPMENT || DEBUG)
662 uintptr_t paddr = kvtophys(vaddr);
663 #endif
664
665 (void)ml_set_interrupts_enabled(istate);
666
667 if (phywritepanic && (machine_timeout_suspended() == FALSE)) {
668 panic_notify();
669 panic("Write to IO vaddr %p paddr %p val 0x%llx took %llu ns,"
670 " (start: %llu, end: %llu), ceiling: %llu",
671 (void *)vaddr, (void *)paddr, val, (eabs - sabs), sabs, eabs,
672 reportphywritedelayabs);
673 }
674
675 if (reportphywriteosbt) {
676 OSReportWithBacktrace("ml_io_write size %d (v=%p, p=%p, 0x%llx) "
677 "took %lluus",
678 size, (void *)vaddr, (void *)paddr, val, (eabs - sabs) / NSEC_PER_USEC);
679 }
680 #if CONFIG_DTRACE
681 DTRACE_PHYSLAT5(physiowrite, uint64_t, (eabs - sabs),
682 uint64_t, vaddr, uint32_t, size, uint64_t, paddr, uint64_t, val);
683 #endif /* CONFIG_DTRACE */
684 } else if (__improbable(tracephywritedelayabs > 0 && (eabs - sabs) > tracephywritedelayabs)) {
685 #if !(DEVELOPMENT || DEBUG)
686 uintptr_t paddr = kvtophys(vaddr);
687 #endif
688
689 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_MMIO_WRITE),
690 (eabs - sabs), VM_KERNEL_UNSLIDE_OR_PERM(vaddr), paddr, val);
691
692 (void)ml_set_interrupts_enabled(istate);
693 } else {
694 (void)ml_set_interrupts_enabled(istate);
695 }
696 }
697 #endif /* x86_64 */
698 }
699
700 void
701 ml_io_write8(uintptr_t vaddr, uint8_t val)
702 {
703 ml_io_write(vaddr, val, 1);
704 }
705
706 void
707 ml_io_write16(uintptr_t vaddr, uint16_t val)
708 {
709 ml_io_write(vaddr, val, 2);
710 }
711
712 void
713 ml_io_write32(uintptr_t vaddr, uint32_t val)
714 {
715 ml_io_write(vaddr, val, 4);
716 }
717
718 void
719 ml_io_write64(uintptr_t vaddr, uint64_t val)
720 {
721 ml_io_write(vaddr, val, 8);
722 }
723
724 struct cpu_callback_chain_elem {
725 cpu_callback_t fn;
726 void *param;
727 struct cpu_callback_chain_elem *next;
728 };
729
730 static struct cpu_callback_chain_elem *cpu_callback_chain;
731 static LCK_GRP_DECLARE(cpu_callback_chain_lock_grp, "cpu_callback_chain");
732 static LCK_SPIN_DECLARE(cpu_callback_chain_lock, &cpu_callback_chain_lock_grp);
733
734 void
735 cpu_event_register_callback(cpu_callback_t fn, void *param)
736 {
737 struct cpu_callback_chain_elem *new_elem;
738
739 new_elem = zalloc_permanent_type(struct cpu_callback_chain_elem);
740 if (!new_elem) {
741 panic("can't allocate cpu_callback_chain_elem");
742 }
743
744 lck_spin_lock(&cpu_callback_chain_lock);
745 new_elem->next = cpu_callback_chain;
746 new_elem->fn = fn;
747 new_elem->param = param;
748 os_atomic_store(&cpu_callback_chain, new_elem, release);
749 lck_spin_unlock(&cpu_callback_chain_lock);
750 }
751
752 __attribute__((noreturn))
753 void
754 cpu_event_unregister_callback(__unused cpu_callback_t fn)
755 {
756 panic("Unfortunately, cpu_event_unregister_callback is unimplemented.");
757 }
758
759 void
760 ml_broadcast_cpu_event(enum cpu_event event, unsigned int cpu_or_cluster)
761 {
762 struct cpu_callback_chain_elem *cursor;
763
764 cursor = os_atomic_load(&cpu_callback_chain, dependency);
765 for (; cursor != NULL; cursor = cursor->next) {
766 cursor->fn(cursor->param, event, cpu_or_cluster);
767 }
768 }