]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/machine.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / machine.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/machine.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1987
62 *
63 * Support for machine independent machine abstraction.
64 */
65
66 #include <string.h>
67
68 #include <mach/mach_types.h>
69 #include <mach/boolean.h>
70 #include <mach/kern_return.h>
71 #include <mach/machine.h>
72 #include <mach/host_info.h>
73 #include <mach/host_reboot.h>
74 #include <mach/host_priv_server.h>
75 #include <mach/processor_server.h>
76
77 #include <kern/kern_types.h>
78 #include <kern/cpu_data.h>
79 #include <kern/cpu_quiesce.h>
80 #include <kern/ipc_host.h>
81 #include <kern/host.h>
82 #include <kern/machine.h>
83 #include <kern/misc_protos.h>
84 #include <kern/processor.h>
85 #include <kern/queue.h>
86 #include <kern/sched.h>
87 #include <kern/startup.h>
88 #include <kern/task.h>
89 #include <kern/thread.h>
90
91 #include <machine/commpage.h>
92 #include <machine/machine_routines.h>
93
94 #if HIBERNATION
95 #include <IOKit/IOHibernatePrivate.h>
96 #endif
97 #include <IOKit/IOPlatformExpert.h>
98
99 #if CONFIG_DTRACE
100 extern void (*dtrace_cpu_state_changed_hook)(int, boolean_t);
101 #endif
102
103 #if defined(__x86_64__)
104 #include <i386/panic_notify.h>
105 #include <libkern/OSDebug.h>
106 #endif
107
108 /*
109 * Exported variables:
110 */
111
112 struct machine_info machine_info;
113
114 /* Forwards */
115 static void
116 processor_doshutdown(processor_t processor);
117
118 static void
119 processor_offline(void * parameter, __unused wait_result_t result);
120
121 static void
122 processor_offline_intstack(processor_t processor) __dead2;
123
124 /*
125 * processor_up:
126 *
127 * Flag processor as up and running, and available
128 * for scheduling.
129 */
130 void
131 processor_up(
132 processor_t processor)
133 {
134 processor_set_t pset;
135 spl_t s;
136
137 s = splsched();
138 init_ast_check(processor);
139 pset = processor->processor_set;
140 pset_lock(pset);
141
142 ++pset->online_processor_count;
143 pset_update_processor_state(pset, processor, PROCESSOR_RUNNING);
144 os_atomic_inc(&processor_avail_count, relaxed);
145 if (processor->is_recommended) {
146 os_atomic_inc(&processor_avail_count_user, relaxed);
147 SCHED(pset_made_schedulable)(processor, pset, false);
148 }
149 if (processor->processor_primary == processor) {
150 os_atomic_inc(&primary_processor_avail_count, relaxed);
151 if (processor->is_recommended) {
152 os_atomic_inc(&primary_processor_avail_count_user, relaxed);
153 }
154 }
155 commpage_update_active_cpus();
156 pset_unlock(pset);
157 ml_cpu_up();
158 splx(s);
159
160 #if CONFIG_DTRACE
161 if (dtrace_cpu_state_changed_hook) {
162 (*dtrace_cpu_state_changed_hook)(processor->cpu_id, TRUE);
163 }
164 #endif
165 }
166 #include <atm/atm_internal.h>
167
168 kern_return_t
169 host_reboot(
170 host_priv_t host_priv,
171 int options)
172 {
173 if (host_priv == HOST_PRIV_NULL) {
174 return KERN_INVALID_HOST;
175 }
176
177 #if DEVELOPMENT || DEBUG
178 if (options & HOST_REBOOT_DEBUGGER) {
179 Debugger("Debugger");
180 return KERN_SUCCESS;
181 }
182 #endif
183
184 if (options & HOST_REBOOT_UPSDELAY) {
185 // UPS power cutoff path
186 PEHaltRestart( kPEUPSDelayHaltCPU );
187 } else {
188 halt_all_cpus(!(options & HOST_REBOOT_HALT));
189 }
190
191 return KERN_SUCCESS;
192 }
193
194 kern_return_t
195 processor_assign(
196 __unused processor_t processor,
197 __unused processor_set_t new_pset,
198 __unused boolean_t wait)
199 {
200 return KERN_FAILURE;
201 }
202
203 kern_return_t
204 processor_shutdown(
205 processor_t processor)
206 {
207 processor_set_t pset;
208 spl_t s;
209
210 ml_cpu_begin_state_transition(processor->cpu_id);
211 s = splsched();
212 pset = processor->processor_set;
213 pset_lock(pset);
214 if (processor->state == PROCESSOR_OFF_LINE) {
215 /*
216 * Success if already shutdown.
217 */
218 pset_unlock(pset);
219 splx(s);
220 ml_cpu_end_state_transition(processor->cpu_id);
221
222 return KERN_SUCCESS;
223 }
224
225 if (!ml_cpu_can_exit(processor->cpu_id)) {
226 /*
227 * Failure if disallowed by arch code.
228 */
229 pset_unlock(pset);
230 splx(s);
231 ml_cpu_end_state_transition(processor->cpu_id);
232
233 return KERN_FAILURE;
234 }
235
236 if (processor->state == PROCESSOR_START) {
237 /*
238 * Failure if currently being started.
239 */
240 pset_unlock(pset);
241 splx(s);
242
243 return KERN_FAILURE;
244 }
245
246 /*
247 * If the processor is dispatching, let it finish.
248 */
249 while (processor->state == PROCESSOR_DISPATCHING) {
250 pset_unlock(pset);
251 splx(s);
252 delay(1);
253 s = splsched();
254 pset_lock(pset);
255 }
256
257 /*
258 * Success if already being shutdown.
259 */
260 if (processor->state == PROCESSOR_SHUTDOWN) {
261 pset_unlock(pset);
262 splx(s);
263 ml_cpu_end_state_transition(processor->cpu_id);
264
265 return KERN_SUCCESS;
266 }
267
268 ml_broadcast_cpu_event(CPU_EXIT_REQUESTED, processor->cpu_id);
269 pset_update_processor_state(pset, processor, PROCESSOR_SHUTDOWN);
270 pset_unlock(pset);
271
272 processor_doshutdown(processor);
273 splx(s);
274
275 cpu_exit_wait(processor->cpu_id);
276 ml_cpu_end_state_transition(processor->cpu_id);
277 ml_broadcast_cpu_event(CPU_EXITED, processor->cpu_id);
278
279 return KERN_SUCCESS;
280 }
281
282 /*
283 * Called with interrupts disabled.
284 */
285 static void
286 processor_doshutdown(
287 processor_t processor)
288 {
289 thread_t self = current_thread();
290
291 /*
292 * Get onto the processor to shutdown
293 */
294 processor_t prev = thread_bind(processor);
295 thread_block(THREAD_CONTINUE_NULL);
296
297 /* interrupts still disabled */
298 assert(ml_get_interrupts_enabled() == FALSE);
299
300 assert(processor == current_processor());
301 assert(processor->state == PROCESSOR_SHUTDOWN);
302
303 #if CONFIG_DTRACE
304 if (dtrace_cpu_state_changed_hook) {
305 (*dtrace_cpu_state_changed_hook)(processor->cpu_id, FALSE);
306 }
307 #endif
308
309 ml_cpu_down();
310
311 #if HIBERNATION
312 if (processor_avail_count < 2) {
313 hibernate_vm_lock();
314 hibernate_vm_unlock();
315 }
316 #endif
317
318 processor_set_t pset = processor->processor_set;
319
320 pset_lock(pset);
321 pset_update_processor_state(pset, processor, PROCESSOR_OFF_LINE);
322 --pset->online_processor_count;
323 os_atomic_dec(&processor_avail_count, relaxed);
324 if (processor->is_recommended) {
325 os_atomic_dec(&processor_avail_count_user, relaxed);
326 }
327 if (processor->processor_primary == processor) {
328 os_atomic_dec(&primary_processor_avail_count, relaxed);
329 if (processor->is_recommended) {
330 os_atomic_dec(&primary_processor_avail_count_user, relaxed);
331 }
332 }
333 commpage_update_active_cpus();
334 SCHED(processor_queue_shutdown)(processor);
335 /* pset lock dropped */
336 SCHED(rt_queue_shutdown)(processor);
337
338 thread_bind(prev);
339
340 /* interrupts still disabled */
341
342 /*
343 * Continue processor shutdown on the processor's idle thread.
344 * The handoff won't fail because the idle thread has a reserved stack.
345 * Switching to the idle thread leaves interrupts disabled,
346 * so we can't accidentally take an interrupt after the context switch.
347 */
348 thread_t shutdown_thread = processor->idle_thread;
349 shutdown_thread->continuation = processor_offline;
350 shutdown_thread->parameter = processor;
351
352 thread_run(self, NULL, NULL, shutdown_thread);
353 }
354
355 /*
356 * Called in the context of the idle thread to shut down the processor
357 *
358 * A shut-down processor looks like it's 'running' the idle thread parked
359 * in this routine, but it's actually been powered off and has no hardware state.
360 */
361 static void
362 processor_offline(
363 void * parameter,
364 __unused wait_result_t result)
365 {
366 processor_t processor = (processor_t) parameter;
367 thread_t self = current_thread();
368 __assert_only thread_t old_thread = THREAD_NULL;
369
370 assert(processor == current_processor());
371 assert(self->state & TH_IDLE);
372 assert(processor->idle_thread == self);
373 assert(ml_get_interrupts_enabled() == FALSE);
374 assert(self->continuation == NULL);
375 assert(processor->processor_offlined == false);
376 assert(processor->running_timers_active == false);
377
378 bool enforce_quiesce_safety = gEnforceQuiesceSafety;
379
380 /*
381 * Scheduling is now disabled for this processor.
382 * Ensure that primitives that need scheduling (like mutexes) know this.
383 */
384 if (enforce_quiesce_safety) {
385 disable_preemption();
386 }
387
388 /* convince slave_main to come back here */
389 processor->processor_offlined = true;
390
391 /*
392 * Switch to the interrupt stack and shut down the processor.
393 *
394 * When the processor comes back, it will eventually call load_context which
395 * restores the context saved by machine_processor_shutdown, returning here.
396 */
397 old_thread = machine_processor_shutdown(self, processor_offline_intstack, processor);
398
399 /* old_thread should be NULL because we got here through Load_context */
400 assert(old_thread == THREAD_NULL);
401
402 assert(processor == current_processor());
403 assert(processor->idle_thread == current_thread());
404
405 assert(ml_get_interrupts_enabled() == FALSE);
406 assert(self->continuation == NULL);
407
408 /* Extract the machine_param value stashed by slave_main */
409 void * machine_param = self->parameter;
410 self->parameter = NULL;
411
412 /* Re-initialize the processor */
413 slave_machine_init(machine_param);
414
415 assert(processor->processor_offlined == true);
416 processor->processor_offlined = false;
417
418 if (enforce_quiesce_safety) {
419 enable_preemption();
420 }
421
422 /*
423 * Now that the processor is back, invoke the idle thread to find out what to do next.
424 * idle_thread will enable interrupts.
425 */
426 thread_block(idle_thread);
427 /*NOTREACHED*/
428 }
429
430 /*
431 * Complete the shutdown and place the processor offline.
432 *
433 * Called at splsched in the shutdown context
434 * (i.e. on the idle thread, on the interrupt stack)
435 *
436 * The onlining half of this is done in load_context().
437 */
438 static void
439 processor_offline_intstack(
440 processor_t processor)
441 {
442 assert(processor == current_processor());
443 assert(processor->active_thread == current_thread());
444
445 timer_stop(processor->current_state, processor->last_dispatch);
446
447 cpu_quiescent_counter_leave(processor->last_dispatch);
448
449 PMAP_DEACTIVATE_KERNEL(processor->cpu_id);
450
451 cpu_sleep();
452 panic("zombie processor");
453 /*NOTREACHED*/
454 }
455
456 kern_return_t
457 host_get_boot_info(
458 host_priv_t host_priv,
459 kernel_boot_info_t boot_info)
460 {
461 const char *src = "";
462 if (host_priv == HOST_PRIV_NULL) {
463 return KERN_INVALID_HOST;
464 }
465
466 /*
467 * Copy first operator string terminated by '\0' followed by
468 * standardized strings generated from boot string.
469 */
470 src = machine_boot_info(boot_info, KERNEL_BOOT_INFO_MAX);
471 if (src != boot_info) {
472 (void) strncpy(boot_info, src, KERNEL_BOOT_INFO_MAX);
473 }
474
475 return KERN_SUCCESS;
476 }
477
478 #if CONFIG_DTRACE
479 #include <mach/sdt.h>
480 #endif
481
482 unsigned long long
483 ml_io_read(uintptr_t vaddr, int size)
484 {
485 unsigned long long result = 0;
486 unsigned char s1;
487 unsigned short s2;
488
489 #if defined(__x86_64__)
490 uint64_t sabs, eabs;
491 boolean_t istate, timeread = FALSE;
492 #if DEVELOPMENT || DEBUG
493 extern uint64_t simulate_stretched_io;
494 uintptr_t paddr = pmap_verify_noncacheable(vaddr);
495 #endif /* x86_64 DEVELOPMENT || DEBUG */
496 if (__improbable(reportphyreaddelayabs != 0)) {
497 istate = ml_set_interrupts_enabled(FALSE);
498 sabs = mach_absolute_time();
499 timeread = TRUE;
500 }
501
502 #if DEVELOPMENT || DEBUG
503 if (__improbable(timeread && simulate_stretched_io)) {
504 sabs -= simulate_stretched_io;
505 }
506 #endif /* x86_64 DEVELOPMENT || DEBUG */
507
508 #endif /* x86_64 */
509
510 switch (size) {
511 case 1:
512 s1 = *(volatile unsigned char *)vaddr;
513 result = s1;
514 break;
515 case 2:
516 s2 = *(volatile unsigned short *)vaddr;
517 result = s2;
518 break;
519 case 4:
520 result = *(volatile unsigned int *)vaddr;
521 break;
522 case 8:
523 result = *(volatile unsigned long long *)vaddr;
524 break;
525 default:
526 panic("Invalid size %d for ml_io_read(%p)", size, (void *)vaddr);
527 break;
528 }
529
530 #if defined(__x86_64__)
531 if (__improbable(timeread == TRUE)) {
532 eabs = mach_absolute_time();
533
534 #if DEVELOPMENT || DEBUG
535 iotrace(IOTRACE_IO_READ, vaddr, paddr, size, result, sabs, eabs - sabs);
536 #endif
537
538 if (__improbable((eabs - sabs) > reportphyreaddelayabs)) {
539 #if !(DEVELOPMENT || DEBUG)
540 uintptr_t paddr = kvtophys(vaddr);
541 #endif
542
543 (void)ml_set_interrupts_enabled(istate);
544
545 if (phyreadpanic && (machine_timeout_suspended() == FALSE)) {
546 panic_notify();
547 panic("Read from IO vaddr 0x%lx paddr 0x%lx took %llu ns, "
548 "result: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
549 vaddr, paddr, (eabs - sabs), result, sabs, eabs,
550 reportphyreaddelayabs);
551 }
552
553 if (reportphyreadosbt) {
554 OSReportWithBacktrace("ml_io_read(v=%p, p=%p) size %d result 0x%llx "
555 "took %lluus",
556 (void *)vaddr, (void *)paddr, size, result,
557 (eabs - sabs) / NSEC_PER_USEC);
558 }
559 #if CONFIG_DTRACE
560 DTRACE_PHYSLAT5(physioread, uint64_t, (eabs - sabs),
561 uint64_t, vaddr, uint32_t, size, uint64_t, paddr, uint64_t, result);
562 #endif /* CONFIG_DTRACE */
563 } else if (__improbable(tracephyreaddelayabs > 0 && (eabs - sabs) > tracephyreaddelayabs)) {
564 #if !(DEVELOPMENT || DEBUG)
565 uintptr_t paddr = kvtophys(vaddr);
566 #endif
567
568 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_MMIO_READ),
569 (eabs - sabs), VM_KERNEL_UNSLIDE_OR_PERM(vaddr), paddr, result);
570
571 (void)ml_set_interrupts_enabled(istate);
572 } else {
573 (void)ml_set_interrupts_enabled(istate);
574 }
575 }
576 #endif /* x86_64 */
577 return result;
578 }
579
580 unsigned int
581 ml_io_read8(uintptr_t vaddr)
582 {
583 return (unsigned) ml_io_read(vaddr, 1);
584 }
585
586 unsigned int
587 ml_io_read16(uintptr_t vaddr)
588 {
589 return (unsigned) ml_io_read(vaddr, 2);
590 }
591
592 unsigned int
593 ml_io_read32(uintptr_t vaddr)
594 {
595 return (unsigned) ml_io_read(vaddr, 4);
596 }
597
598 unsigned long long
599 ml_io_read64(uintptr_t vaddr)
600 {
601 return ml_io_read(vaddr, 8);
602 }
603
604 /* ml_io_write* */
605
606 void
607 ml_io_write(uintptr_t vaddr, uint64_t val, int size)
608 {
609 #if defined(__x86_64__)
610 uint64_t sabs, eabs;
611 boolean_t istate, timewrite = FALSE;
612 #if DEVELOPMENT || DEBUG
613 extern uint64_t simulate_stretched_io;
614 uintptr_t paddr = pmap_verify_noncacheable(vaddr);
615 #endif /* x86_64 DEVELOPMENT || DEBUG */
616 if (__improbable(reportphywritedelayabs != 0)) {
617 istate = ml_set_interrupts_enabled(FALSE);
618 sabs = mach_absolute_time();
619 timewrite = TRUE;
620 }
621
622 #if DEVELOPMENT || DEBUG
623 if (__improbable(timewrite && simulate_stretched_io)) {
624 sabs -= simulate_stretched_io;
625 }
626 #endif /* x86_64 DEVELOPMENT || DEBUG */
627 #endif /* x86_64 */
628
629 switch (size) {
630 case 1:
631 *(volatile uint8_t *)vaddr = (uint8_t)val;
632 break;
633 case 2:
634 *(volatile uint16_t *)vaddr = (uint16_t)val;
635 break;
636 case 4:
637 *(volatile uint32_t *)vaddr = (uint32_t)val;
638 break;
639 case 8:
640 *(volatile uint64_t *)vaddr = (uint64_t)val;
641 break;
642 default:
643 panic("Invalid size %d for ml_io_write(%p, 0x%llx)", size, (void *)vaddr, val);
644 break;
645 }
646
647 #if defined(__x86_64__)
648 if (__improbable(timewrite == TRUE)) {
649 eabs = mach_absolute_time();
650
651 #if DEVELOPMENT || DEBUG
652 iotrace(IOTRACE_IO_WRITE, vaddr, paddr, size, val, sabs, eabs - sabs);
653 #endif
654
655 if (__improbable((eabs - sabs) > reportphywritedelayabs)) {
656 #if !(DEVELOPMENT || DEBUG)
657 uintptr_t paddr = kvtophys(vaddr);
658 #endif
659
660 (void)ml_set_interrupts_enabled(istate);
661
662 if (phywritepanic && (machine_timeout_suspended() == FALSE)) {
663 panic_notify();
664 panic("Write to IO vaddr %p paddr %p val 0x%llx took %llu ns,"
665 " (start: %llu, end: %llu), ceiling: %llu",
666 (void *)vaddr, (void *)paddr, val, (eabs - sabs), sabs, eabs,
667 reportphywritedelayabs);
668 }
669
670 if (reportphywriteosbt) {
671 OSReportWithBacktrace("ml_io_write size %d (v=%p, p=%p, 0x%llx) "
672 "took %lluus",
673 size, (void *)vaddr, (void *)paddr, val, (eabs - sabs) / NSEC_PER_USEC);
674 }
675 #if CONFIG_DTRACE
676 DTRACE_PHYSLAT5(physiowrite, uint64_t, (eabs - sabs),
677 uint64_t, vaddr, uint32_t, size, uint64_t, paddr, uint64_t, val);
678 #endif /* CONFIG_DTRACE */
679 } else if (__improbable(tracephywritedelayabs > 0 && (eabs - sabs) > tracephywritedelayabs)) {
680 #if !(DEVELOPMENT || DEBUG)
681 uintptr_t paddr = kvtophys(vaddr);
682 #endif
683
684 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_MMIO_WRITE),
685 (eabs - sabs), VM_KERNEL_UNSLIDE_OR_PERM(vaddr), paddr, val);
686
687 (void)ml_set_interrupts_enabled(istate);
688 } else {
689 (void)ml_set_interrupts_enabled(istate);
690 }
691 }
692 #endif /* x86_64 */
693 }
694
695 void
696 ml_io_write8(uintptr_t vaddr, uint8_t val)
697 {
698 ml_io_write(vaddr, val, 1);
699 }
700
701 void
702 ml_io_write16(uintptr_t vaddr, uint16_t val)
703 {
704 ml_io_write(vaddr, val, 2);
705 }
706
707 void
708 ml_io_write32(uintptr_t vaddr, uint32_t val)
709 {
710 ml_io_write(vaddr, val, 4);
711 }
712
713 void
714 ml_io_write64(uintptr_t vaddr, uint64_t val)
715 {
716 ml_io_write(vaddr, val, 8);
717 }
718
719 struct cpu_callback_chain_elem {
720 cpu_callback_t fn;
721 void *param;
722 struct cpu_callback_chain_elem *next;
723 };
724
725 static struct cpu_callback_chain_elem *cpu_callback_chain;
726 static LCK_GRP_DECLARE(cpu_callback_chain_lock_grp, "cpu_callback_chain");
727 static LCK_SPIN_DECLARE(cpu_callback_chain_lock, &cpu_callback_chain_lock_grp);
728
729 void
730 cpu_event_register_callback(cpu_callback_t fn, void *param)
731 {
732 struct cpu_callback_chain_elem *new_elem;
733
734 new_elem = zalloc_permanent_type(struct cpu_callback_chain_elem);
735 if (!new_elem) {
736 panic("can't allocate cpu_callback_chain_elem");
737 }
738
739 lck_spin_lock(&cpu_callback_chain_lock);
740 new_elem->next = cpu_callback_chain;
741 new_elem->fn = fn;
742 new_elem->param = param;
743 os_atomic_store(&cpu_callback_chain, new_elem, release);
744 lck_spin_unlock(&cpu_callback_chain_lock);
745 }
746
747 __attribute__((noreturn))
748 void
749 cpu_event_unregister_callback(__unused cpu_callback_t fn)
750 {
751 panic("Unfortunately, cpu_event_unregister_callback is unimplemented.");
752 }
753
754 void
755 ml_broadcast_cpu_event(enum cpu_event event, unsigned int cpu_or_cluster)
756 {
757 struct cpu_callback_chain_elem *cursor;
758
759 cursor = os_atomic_load(&cpu_callback_chain, dependency);
760 for (; cursor != NULL; cursor = cursor->next) {
761 cursor->fn(cursor->param, event, cpu_or_cluster);
762 }
763 }