]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/machine.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / machine.c
CommitLineData
1c79356b 1/*
0a7de745 2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
0a7de745 31/*
1c79356b
A
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
0a7de745 35 *
1c79356b
A
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
0a7de745 41 *
1c79356b
A
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
0a7de745 45 *
1c79356b 46 * Carnegie Mellon requests users of this software to return to
0a7de745 47 *
1c79356b
A
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
0a7de745 52 *
1c79356b
A
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: kern/machine.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1987
62 *
63 * Support for machine independent machine abstraction.
64 */
65
1c79356b 66#include <string.h>
91447636
A
67
68#include <mach/mach_types.h>
1c79356b
A
69#include <mach/boolean.h>
70#include <mach/kern_return.h>
1c79356b
A
71#include <mach/machine.h>
72#include <mach/host_info.h>
73#include <mach/host_reboot.h>
91447636
A
74#include <mach/host_priv_server.h>
75#include <mach/processor_server.h>
76
77#include <kern/kern_types.h>
1c79356b 78#include <kern/cpu_data.h>
d9a64523 79#include <kern/cpu_quiesce.h>
1c79356b
A
80#include <kern/ipc_host.h>
81#include <kern/host.h>
1c79356b 82#include <kern/machine.h>
91447636 83#include <kern/misc_protos.h>
1c79356b
A
84#include <kern/processor.h>
85#include <kern/queue.h>
86#include <kern/sched.h>
cb323159 87#include <kern/startup.h>
1c79356b
A
88#include <kern/task.h>
89#include <kern/thread.h>
1c79356b 90
6d2010ae 91#include <machine/commpage.h>
f427ee49 92#include <machine/machine_routines.h>
6d2010ae 93
2d21ac55 94#if HIBERNATION
3a60a9f5 95#include <IOKit/IOHibernatePrivate.h>
2d21ac55 96#endif
0c530ab8 97#include <IOKit/IOPlatformExpert.h>
1c79356b 98
fe8ab488
A
99#if CONFIG_DTRACE
100extern void (*dtrace_cpu_state_changed_hook)(int, boolean_t);
101#endif
102
0a7de745 103#if defined(__x86_64__)
f427ee49 104#include <i386/panic_notify.h>
0a7de745
A
105#include <libkern/OSDebug.h>
106#endif
107
1c79356b
A
108/*
109 * Exported variables:
110 */
111
0a7de745 112struct machine_info machine_info;
1c79356b
A
113
114/* Forwards */
cb323159
A
115static void
116processor_doshutdown(processor_t processor);
117
118static void
119processor_offline(void * parameter, __unused wait_result_t result);
120
121static void
122processor_offline_intstack(processor_t processor) __dead2;
1c79356b
A
123
124/*
91447636 125 * processor_up:
1c79356b 126 *
91447636
A
127 * Flag processor as up and running, and available
128 * for scheduling.
1c79356b
A
129 */
130void
91447636 131processor_up(
0a7de745 132 processor_t processor)
1c79356b 133{
0a7de745
A
134 processor_set_t pset;
135 spl_t s;
1c79356b
A
136
137 s = splsched();
1c79356b 138 init_ast_check(processor);
2d21ac55
A
139 pset = processor->processor_set;
140 pset_lock(pset);
f427ee49 141
fe8ab488 142 ++pset->online_processor_count;
d9a64523 143 pset_update_processor_state(pset, processor, PROCESSOR_RUNNING);
cb323159 144 os_atomic_inc(&processor_avail_count, relaxed);
0a7de745 145 if (processor->is_recommended) {
cb323159 146 os_atomic_inc(&processor_avail_count_user, relaxed);
f427ee49 147 SCHED(pset_made_schedulable)(processor, pset, false);
0a7de745 148 }
f427ee49
A
149 if (processor->processor_primary == processor) {
150 os_atomic_inc(&primary_processor_avail_count, relaxed);
151 if (processor->is_recommended) {
152 os_atomic_inc(&primary_processor_avail_count_user, relaxed);
153 }
cb323159 154 }
f427ee49
A
155 commpage_update_active_cpus();
156 pset_unlock(pset);
91447636 157 ml_cpu_up();
1c79356b 158 splx(s);
fe8ab488
A
159
160#if CONFIG_DTRACE
0a7de745 161 if (dtrace_cpu_state_changed_hook) {
fe8ab488 162 (*dtrace_cpu_state_changed_hook)(processor->cpu_id, TRUE);
0a7de745 163 }
fe8ab488 164#endif
1c79356b 165}
fe8ab488 166#include <atm/atm_internal.h>
1c79356b
A
167
168kern_return_t
169host_reboot(
0a7de745
A
170 host_priv_t host_priv,
171 int options)
1c79356b 172{
0a7de745
A
173 if (host_priv == HOST_PRIV_NULL) {
174 return KERN_INVALID_HOST;
175 }
1c79356b 176
4bd07ac2 177#if DEVELOPMENT || DEBUG
1c79356b
A
178 if (options & HOST_REBOOT_DEBUGGER) {
179 Debugger("Debugger");
0a7de745 180 return KERN_SUCCESS;
1c79356b 181 }
4bd07ac2 182#endif
1c79356b 183
0a7de745
A
184 if (options & HOST_REBOOT_UPSDELAY) {
185 // UPS power cutoff path
186 PEHaltRestart( kPEUPSDelayHaltCPU );
187 } else {
188 halt_all_cpus(!(options & HOST_REBOOT_HALT));
189 }
9bccf70c 190
0a7de745 191 return KERN_SUCCESS;
1c79356b
A
192}
193
194kern_return_t
195processor_assign(
0a7de745
A
196 __unused processor_t processor,
197 __unused processor_set_t new_pset,
198 __unused boolean_t wait)
1c79356b 199{
0a7de745 200 return KERN_FAILURE;
1c79356b
A
201}
202
1c79356b
A
203kern_return_t
204processor_shutdown(
0a7de745 205 processor_t processor)
1c79356b 206{
0a7de745
A
207 processor_set_t pset;
208 spl_t s;
1c79356b 209
f427ee49 210 ml_cpu_begin_state_transition(processor->cpu_id);
1c79356b 211 s = splsched();
2d21ac55
A
212 pset = processor->processor_set;
213 pset_lock(pset);
91447636 214 if (processor->state == PROCESSOR_OFF_LINE) {
1c79356b 215 /*
91447636 216 * Success if already shutdown.
1c79356b 217 */
2d21ac55 218 pset_unlock(pset);
1c79356b 219 splx(s);
f427ee49 220 ml_cpu_end_state_transition(processor->cpu_id);
1c79356b 221
0a7de745 222 return KERN_SUCCESS;
1c79356b
A
223 }
224
f427ee49
A
225 if (!ml_cpu_can_exit(processor->cpu_id)) {
226 /*
227 * Failure if disallowed by arch code.
228 */
229 pset_unlock(pset);
230 splx(s);
231 ml_cpu_end_state_transition(processor->cpu_id);
232
233 return KERN_FAILURE;
234 }
235
55e303ae
A
236 if (processor->state == PROCESSOR_START) {
237 /*
238 * Failure if currently being started.
239 */
2d21ac55 240 pset_unlock(pset);
55e303ae 241 splx(s);
1c79356b 242
0a7de745 243 return KERN_FAILURE;
55e303ae 244 }
1c79356b 245
55e303ae 246 /*
2d21ac55 247 * If the processor is dispatching, let it finish.
55e303ae 248 */
2d21ac55
A
249 while (processor->state == PROCESSOR_DISPATCHING) {
250 pset_unlock(pset);
fe8ab488 251 splx(s);
2d21ac55 252 delay(1);
fe8ab488 253 s = splsched();
2d21ac55 254 pset_lock(pset);
91447636 255 }
2d21ac55
A
256
257 /*
258 * Success if already being shutdown.
259 */
260 if (processor->state == PROCESSOR_SHUTDOWN) {
261 pset_unlock(pset);
91447636 262 splx(s);
f427ee49 263 ml_cpu_end_state_transition(processor->cpu_id);
91447636 264
0a7de745 265 return KERN_SUCCESS;
55e303ae 266 }
1c79356b 267
f427ee49 268 ml_broadcast_cpu_event(CPU_EXIT_REQUESTED, processor->cpu_id);
d9a64523 269 pset_update_processor_state(pset, processor, PROCESSOR_SHUTDOWN);
2d21ac55 270 pset_unlock(pset);
1c79356b 271
55e303ae 272 processor_doshutdown(processor);
1c79356b 273 splx(s);
1c79356b 274
b0d623f7 275 cpu_exit_wait(processor->cpu_id);
f427ee49
A
276 ml_cpu_end_state_transition(processor->cpu_id);
277 ml_broadcast_cpu_event(CPU_EXITED, processor->cpu_id);
5353443c 278
0a7de745 279 return KERN_SUCCESS;
1c79356b
A
280}
281
282/*
bd504ef0 283 * Called with interrupts disabled.
1c79356b 284 */
cb323159 285static void
55e303ae 286processor_doshutdown(
cb323159 287 processor_t processor)
1c79356b 288{
cb323159 289 thread_t self = current_thread();
1c79356b
A
290
291 /*
292 * Get onto the processor to shutdown
293 */
cb323159 294 processor_t prev = thread_bind(processor);
9bccf70c 295 thread_block(THREAD_CONTINUE_NULL);
1c79356b 296
cb323159
A
297 /* interrupts still disabled */
298 assert(ml_get_interrupts_enabled() == FALSE);
299
300 assert(processor == current_processor());
55e303ae 301 assert(processor->state == PROCESSOR_SHUTDOWN);
1c79356b 302
fe8ab488 303#if CONFIG_DTRACE
0a7de745 304 if (dtrace_cpu_state_changed_hook) {
fe8ab488 305 (*dtrace_cpu_state_changed_hook)(processor->cpu_id, FALSE);
0a7de745 306 }
fe8ab488
A
307#endif
308
bd504ef0
A
309 ml_cpu_down();
310
2d21ac55 311#if HIBERNATION
bd504ef0
A
312 if (processor_avail_count < 2) {
313 hibernate_vm_lock();
3a60a9f5 314 hibernate_vm_unlock();
bd504ef0 315 }
2d21ac55 316#endif
91447636 317
cb323159
A
318 processor_set_t pset = processor->processor_set;
319
bd504ef0 320 pset_lock(pset);
d9a64523 321 pset_update_processor_state(pset, processor, PROCESSOR_OFF_LINE);
fe8ab488 322 --pset->online_processor_count;
cb323159 323 os_atomic_dec(&processor_avail_count, relaxed);
0a7de745 324 if (processor->is_recommended) {
cb323159 325 os_atomic_dec(&processor_avail_count_user, relaxed);
0a7de745 326 }
f427ee49
A
327 if (processor->processor_primary == processor) {
328 os_atomic_dec(&primary_processor_avail_count, relaxed);
329 if (processor->is_recommended) {
330 os_atomic_dec(&primary_processor_avail_count_user, relaxed);
331 }
332 }
bd504ef0
A
333 commpage_update_active_cpus();
334 SCHED(processor_queue_shutdown)(processor);
335 /* pset lock dropped */
5ba3f43e 336 SCHED(rt_queue_shutdown)(processor);
bd504ef0 337
cb323159
A
338 thread_bind(prev);
339
340 /* interrupts still disabled */
341
1c79356b 342 /*
cb323159
A
343 * Continue processor shutdown on the processor's idle thread.
344 * The handoff won't fail because the idle thread has a reserved stack.
345 * Switching to the idle thread leaves interrupts disabled,
346 * so we can't accidentally take an interrupt after the context switch.
1c79356b 347 */
cb323159
A
348 thread_t shutdown_thread = processor->idle_thread;
349 shutdown_thread->continuation = processor_offline;
350 shutdown_thread->parameter = processor;
a3d08fcd 351
cb323159 352 thread_run(self, NULL, NULL, shutdown_thread);
1c79356b
A
353}
354
355/*
cb323159 356 * Called in the context of the idle thread to shut down the processor
490019cf 357 *
cb323159
A
358 * A shut-down processor looks like it's 'running' the idle thread parked
359 * in this routine, but it's actually been powered off and has no hardware state.
1c79356b 360 */
cb323159 361static void
55e303ae 362processor_offline(
cb323159
A
363 void * parameter,
364 __unused wait_result_t result)
1c79356b 365{
cb323159
A
366 processor_t processor = (processor_t) parameter;
367 thread_t self = current_thread();
368 __assert_only thread_t old_thread = THREAD_NULL;
369
490019cf 370 assert(processor == current_processor());
cb323159
A
371 assert(self->state & TH_IDLE);
372 assert(processor->idle_thread == self);
373 assert(ml_get_interrupts_enabled() == FALSE);
374 assert(self->continuation == NULL);
375 assert(processor->processor_offlined == false);
f427ee49 376 assert(processor->running_timers_active == false);
490019cf 377
cb323159 378 bool enforce_quiesce_safety = gEnforceQuiesceSafety;
91447636 379
cb323159
A
380 /*
381 * Scheduling is now disabled for this processor.
382 * Ensure that primitives that need scheduling (like mutexes) know this.
383 */
384 if (enforce_quiesce_safety) {
385 disable_preemption();
d9a64523
A
386 }
387
cb323159
A
388 /* convince slave_main to come back here */
389 processor->processor_offlined = true;
91447636 390
cb323159
A
391 /*
392 * Switch to the interrupt stack and shut down the processor.
393 *
394 * When the processor comes back, it will eventually call load_context which
395 * restores the context saved by machine_processor_shutdown, returning here.
396 */
397 old_thread = machine_processor_shutdown(self, processor_offline_intstack, processor);
490019cf 398
cb323159
A
399 /* old_thread should be NULL because we got here through Load_context */
400 assert(old_thread == THREAD_NULL);
490019cf 401
cb323159
A
402 assert(processor == current_processor());
403 assert(processor->idle_thread == current_thread());
404
405 assert(ml_get_interrupts_enabled() == FALSE);
406 assert(self->continuation == NULL);
407
408 /* Extract the machine_param value stashed by slave_main */
409 void * machine_param = self->parameter;
410 self->parameter = NULL;
411
412 /* Re-initialize the processor */
413 slave_machine_init(machine_param);
414
415 assert(processor->processor_offlined == true);
416 processor->processor_offlined = false;
417
418 if (enforce_quiesce_safety) {
419 enable_preemption();
420 }
490019cf 421
cb323159
A
422 /*
423 * Now that the processor is back, invoke the idle thread to find out what to do next.
424 * idle_thread will enable interrupts.
425 */
426 thread_block(idle_thread);
427 /*NOTREACHED*/
428}
91447636 429
cb323159
A
430/*
431 * Complete the shutdown and place the processor offline.
432 *
433 * Called at splsched in the shutdown context
434 * (i.e. on the idle thread, on the interrupt stack)
435 *
436 * The onlining half of this is done in load_context().
437 */
438static void
439processor_offline_intstack(
440 processor_t processor)
441{
442 assert(processor == current_processor());
443 assert(processor->active_thread == current_thread());
91447636 444
f427ee49 445 timer_stop(processor->current_state, processor->last_dispatch);
1c79356b 446
d9a64523
A
447 cpu_quiescent_counter_leave(processor->last_dispatch);
448
b0d623f7 449 PMAP_DEACTIVATE_KERNEL(processor->cpu_id);
91447636 450
1c79356b
A
451 cpu_sleep();
452 panic("zombie processor");
453 /*NOTREACHED*/
454}
455
456kern_return_t
457host_get_boot_info(
0a7de745
A
458 host_priv_t host_priv,
459 kernel_boot_info_t boot_info)
1c79356b 460{
91447636 461 const char *src = "";
0a7de745
A
462 if (host_priv == HOST_PRIV_NULL) {
463 return KERN_INVALID_HOST;
464 }
1c79356b 465
1c79356b
A
466 /*
467 * Copy first operator string terminated by '\0' followed by
468 * standardized strings generated from boot string.
469 */
470 src = machine_boot_info(boot_info, KERNEL_BOOT_INFO_MAX);
0a7de745 471 if (src != boot_info) {
1c79356b 472 (void) strncpy(boot_info, src, KERNEL_BOOT_INFO_MAX);
0a7de745 473 }
1c79356b 474
0a7de745 475 return KERN_SUCCESS;
1c79356b 476}
813fb2f6
A
477
478#if CONFIG_DTRACE
479#include <mach/sdt.h>
480#endif
481
0a7de745
A
482unsigned long long
483ml_io_read(uintptr_t vaddr, int size)
484{
813fb2f6
A
485 unsigned long long result = 0;
486 unsigned char s1;
487 unsigned short s2;
488
489#if defined(__x86_64__)
490 uint64_t sabs, eabs;
491 boolean_t istate, timeread = FALSE;
492#if DEVELOPMENT || DEBUG
0a7de745
A
493 extern uint64_t simulate_stretched_io;
494 uintptr_t paddr = pmap_verify_noncacheable(vaddr);
813fb2f6
A
495#endif /* x86_64 DEVELOPMENT || DEBUG */
496 if (__improbable(reportphyreaddelayabs != 0)) {
497 istate = ml_set_interrupts_enabled(FALSE);
498 sabs = mach_absolute_time();
499 timeread = TRUE;
500 }
0a7de745
A
501
502#if DEVELOPMENT || DEBUG
503 if (__improbable(timeread && simulate_stretched_io)) {
504 sabs -= simulate_stretched_io;
505 }
506#endif /* x86_64 DEVELOPMENT || DEBUG */
507
813fb2f6
A
508#endif /* x86_64 */
509
510 switch (size) {
0a7de745 511 case 1:
813fb2f6
A
512 s1 = *(volatile unsigned char *)vaddr;
513 result = s1;
514 break;
0a7de745 515 case 2:
813fb2f6
A
516 s2 = *(volatile unsigned short *)vaddr;
517 result = s2;
518 break;
0a7de745 519 case 4:
813fb2f6
A
520 result = *(volatile unsigned int *)vaddr;
521 break;
522 case 8:
523 result = *(volatile unsigned long long *)vaddr;
524 break;
525 default:
0a7de745 526 panic("Invalid size %d for ml_io_read(%p)", size, (void *)vaddr);
813fb2f6 527 break;
0a7de745 528 }
813fb2f6
A
529
530#if defined(__x86_64__)
531 if (__improbable(timeread == TRUE)) {
532 eabs = mach_absolute_time();
0a7de745
A
533
534#if DEVELOPMENT || DEBUG
535 iotrace(IOTRACE_IO_READ, vaddr, paddr, size, result, sabs, eabs - sabs);
536#endif
813fb2f6
A
537
538 if (__improbable((eabs - sabs) > reportphyreaddelayabs)) {
0a7de745
A
539#if !(DEVELOPMENT || DEBUG)
540 uintptr_t paddr = kvtophys(vaddr);
541#endif
542
543 (void)ml_set_interrupts_enabled(istate);
544
5ba3f43e 545 if (phyreadpanic && (machine_timeout_suspended() == FALSE)) {
f427ee49 546 panic_notify();
0a7de745
A
547 panic("Read from IO vaddr 0x%lx paddr 0x%lx took %llu ns, "
548 "result: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
549 vaddr, paddr, (eabs - sabs), result, sabs, eabs,
550 reportphyreaddelayabs);
551 }
552
553 if (reportphyreadosbt) {
554 OSReportWithBacktrace("ml_io_read(v=%p, p=%p) size %d result 0x%llx "
555 "took %lluus",
556 (void *)vaddr, (void *)paddr, size, result,
557 (eabs - sabs) / NSEC_PER_USEC);
813fb2f6
A
558 }
559#if CONFIG_DTRACE
0a7de745
A
560 DTRACE_PHYSLAT5(physioread, uint64_t, (eabs - sabs),
561 uint64_t, vaddr, uint32_t, size, uint64_t, paddr, uint64_t, result);
813fb2f6 562#endif /* CONFIG_DTRACE */
0a7de745
A
563 } else if (__improbable(tracephyreaddelayabs > 0 && (eabs - sabs) > tracephyreaddelayabs)) {
564#if !(DEVELOPMENT || DEBUG)
565 uintptr_t paddr = kvtophys(vaddr);
566#endif
567
568 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_MMIO_READ),
569 (eabs - sabs), VM_KERNEL_UNSLIDE_OR_PERM(vaddr), paddr, result);
570
571 (void)ml_set_interrupts_enabled(istate);
572 } else {
573 (void)ml_set_interrupts_enabled(istate);
813fb2f6
A
574 }
575 }
576#endif /* x86_64 */
577 return result;
578}
579
0a7de745
A
580unsigned int
581ml_io_read8(uintptr_t vaddr)
582{
813fb2f6
A
583 return (unsigned) ml_io_read(vaddr, 1);
584}
585
0a7de745
A
586unsigned int
587ml_io_read16(uintptr_t vaddr)
588{
813fb2f6
A
589 return (unsigned) ml_io_read(vaddr, 2);
590}
591
0a7de745
A
592unsigned int
593ml_io_read32(uintptr_t vaddr)
594{
813fb2f6
A
595 return (unsigned) ml_io_read(vaddr, 4);
596}
597
0a7de745
A
598unsigned long long
599ml_io_read64(uintptr_t vaddr)
600{
813fb2f6
A
601 return ml_io_read(vaddr, 8);
602}
0a7de745
A
603
604/* ml_io_write* */
605
606void
607ml_io_write(uintptr_t vaddr, uint64_t val, int size)
608{
609#if defined(__x86_64__)
610 uint64_t sabs, eabs;
611 boolean_t istate, timewrite = FALSE;
612#if DEVELOPMENT || DEBUG
613 extern uint64_t simulate_stretched_io;
614 uintptr_t paddr = pmap_verify_noncacheable(vaddr);
615#endif /* x86_64 DEVELOPMENT || DEBUG */
616 if (__improbable(reportphywritedelayabs != 0)) {
617 istate = ml_set_interrupts_enabled(FALSE);
618 sabs = mach_absolute_time();
619 timewrite = TRUE;
620 }
621
622#if DEVELOPMENT || DEBUG
623 if (__improbable(timewrite && simulate_stretched_io)) {
624 sabs -= simulate_stretched_io;
625 }
626#endif /* x86_64 DEVELOPMENT || DEBUG */
627#endif /* x86_64 */
628
629 switch (size) {
630 case 1:
631 *(volatile uint8_t *)vaddr = (uint8_t)val;
632 break;
633 case 2:
634 *(volatile uint16_t *)vaddr = (uint16_t)val;
635 break;
636 case 4:
637 *(volatile uint32_t *)vaddr = (uint32_t)val;
638 break;
639 case 8:
640 *(volatile uint64_t *)vaddr = (uint64_t)val;
641 break;
642 default:
643 panic("Invalid size %d for ml_io_write(%p, 0x%llx)", size, (void *)vaddr, val);
644 break;
645 }
646
647#if defined(__x86_64__)
648 if (__improbable(timewrite == TRUE)) {
649 eabs = mach_absolute_time();
650
651#if DEVELOPMENT || DEBUG
652 iotrace(IOTRACE_IO_WRITE, vaddr, paddr, size, val, sabs, eabs - sabs);
653#endif
654
655 if (__improbable((eabs - sabs) > reportphywritedelayabs)) {
656#if !(DEVELOPMENT || DEBUG)
657 uintptr_t paddr = kvtophys(vaddr);
658#endif
659
660 (void)ml_set_interrupts_enabled(istate);
661
662 if (phywritepanic && (machine_timeout_suspended() == FALSE)) {
f427ee49 663 panic_notify();
0a7de745
A
664 panic("Write to IO vaddr %p paddr %p val 0x%llx took %llu ns,"
665 " (start: %llu, end: %llu), ceiling: %llu",
666 (void *)vaddr, (void *)paddr, val, (eabs - sabs), sabs, eabs,
667 reportphywritedelayabs);
668 }
669
670 if (reportphywriteosbt) {
671 OSReportWithBacktrace("ml_io_write size %d (v=%p, p=%p, 0x%llx) "
672 "took %lluus",
673 size, (void *)vaddr, (void *)paddr, val, (eabs - sabs) / NSEC_PER_USEC);
674 }
675#if CONFIG_DTRACE
676 DTRACE_PHYSLAT5(physiowrite, uint64_t, (eabs - sabs),
677 uint64_t, vaddr, uint32_t, size, uint64_t, paddr, uint64_t, val);
678#endif /* CONFIG_DTRACE */
679 } else if (__improbable(tracephywritedelayabs > 0 && (eabs - sabs) > tracephywritedelayabs)) {
680#if !(DEVELOPMENT || DEBUG)
681 uintptr_t paddr = kvtophys(vaddr);
682#endif
683
684 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_MMIO_WRITE),
685 (eabs - sabs), VM_KERNEL_UNSLIDE_OR_PERM(vaddr), paddr, val);
686
687 (void)ml_set_interrupts_enabled(istate);
688 } else {
689 (void)ml_set_interrupts_enabled(istate);
690 }
691 }
692#endif /* x86_64 */
693}
694
695void
696ml_io_write8(uintptr_t vaddr, uint8_t val)
697{
698 ml_io_write(vaddr, val, 1);
699}
700
701void
702ml_io_write16(uintptr_t vaddr, uint16_t val)
703{
704 ml_io_write(vaddr, val, 2);
705}
706
707void
708ml_io_write32(uintptr_t vaddr, uint32_t val)
709{
710 ml_io_write(vaddr, val, 4);
711}
712
713void
714ml_io_write64(uintptr_t vaddr, uint64_t val)
715{
716 ml_io_write(vaddr, val, 8);
717}
f427ee49
A
718
719struct cpu_callback_chain_elem {
720 cpu_callback_t fn;
721 void *param;
722 struct cpu_callback_chain_elem *next;
723};
724
725static struct cpu_callback_chain_elem *cpu_callback_chain;
726static LCK_GRP_DECLARE(cpu_callback_chain_lock_grp, "cpu_callback_chain");
727static LCK_SPIN_DECLARE(cpu_callback_chain_lock, &cpu_callback_chain_lock_grp);
728
729void
730cpu_event_register_callback(cpu_callback_t fn, void *param)
731{
732 struct cpu_callback_chain_elem *new_elem;
733
734 new_elem = zalloc_permanent_type(struct cpu_callback_chain_elem);
735 if (!new_elem) {
736 panic("can't allocate cpu_callback_chain_elem");
737 }
738
739 lck_spin_lock(&cpu_callback_chain_lock);
740 new_elem->next = cpu_callback_chain;
741 new_elem->fn = fn;
742 new_elem->param = param;
743 os_atomic_store(&cpu_callback_chain, new_elem, release);
744 lck_spin_unlock(&cpu_callback_chain_lock);
745}
746
747__attribute__((noreturn))
748void
749cpu_event_unregister_callback(__unused cpu_callback_t fn)
750{
751 panic("Unfortunately, cpu_event_unregister_callback is unimplemented.");
752}
753
754void
755ml_broadcast_cpu_event(enum cpu_event event, unsigned int cpu_or_cluster)
756{
757 struct cpu_callback_chain_elem *cursor;
758
759 cursor = os_atomic_load(&cpu_callback_chain, dependency);
760 for (; cursor != NULL; cursor = cursor->next) {
761 cursor->fn(cursor->param, event, cpu_or_cluster);
762 }
763}