]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/machine.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / osfmk / kern / machine.c
CommitLineData
1c79356b 1/*
0a7de745 2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
0a7de745 31/*
1c79356b
A
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
0a7de745 35 *
1c79356b
A
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
0a7de745 41 *
1c79356b
A
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
0a7de745 45 *
1c79356b 46 * Carnegie Mellon requests users of this software to return to
0a7de745 47 *
1c79356b
A
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
0a7de745 52 *
1c79356b
A
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: kern/machine.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1987
62 *
63 * Support for machine independent machine abstraction.
64 */
65
1c79356b 66#include <string.h>
91447636
A
67
68#include <mach/mach_types.h>
1c79356b
A
69#include <mach/boolean.h>
70#include <mach/kern_return.h>
1c79356b
A
71#include <mach/machine.h>
72#include <mach/host_info.h>
73#include <mach/host_reboot.h>
91447636
A
74#include <mach/host_priv_server.h>
75#include <mach/processor_server.h>
76
77#include <kern/kern_types.h>
1c79356b
A
78#include <kern/counters.h>
79#include <kern/cpu_data.h>
d9a64523 80#include <kern/cpu_quiesce.h>
1c79356b
A
81#include <kern/ipc_host.h>
82#include <kern/host.h>
1c79356b 83#include <kern/machine.h>
91447636 84#include <kern/misc_protos.h>
1c79356b
A
85#include <kern/processor.h>
86#include <kern/queue.h>
87#include <kern/sched.h>
cb323159 88#include <kern/startup.h>
1c79356b
A
89#include <kern/task.h>
90#include <kern/thread.h>
1c79356b 91
6d2010ae 92#include <machine/commpage.h>
f427ee49 93#include <machine/machine_routines.h>
6d2010ae 94
2d21ac55 95#if HIBERNATION
3a60a9f5 96#include <IOKit/IOHibernatePrivate.h>
2d21ac55 97#endif
0c530ab8 98#include <IOKit/IOPlatformExpert.h>
1c79356b 99
fe8ab488
A
100#if CONFIG_DTRACE
101extern void (*dtrace_cpu_state_changed_hook)(int, boolean_t);
102#endif
103
0a7de745 104#if defined(__x86_64__)
f427ee49 105#include <i386/panic_notify.h>
0a7de745
A
106#include <libkern/OSDebug.h>
107#endif
108
1c79356b
A
109/*
110 * Exported variables:
111 */
112
0a7de745 113struct machine_info machine_info;
1c79356b
A
114
115/* Forwards */
cb323159
A
116static void
117processor_doshutdown(processor_t processor);
118
119static void
120processor_offline(void * parameter, __unused wait_result_t result);
121
122static void
123processor_offline_intstack(processor_t processor) __dead2;
1c79356b
A
124
125/*
91447636 126 * processor_up:
1c79356b 127 *
91447636
A
128 * Flag processor as up and running, and available
129 * for scheduling.
1c79356b
A
130 */
131void
91447636 132processor_up(
0a7de745 133 processor_t processor)
1c79356b 134{
0a7de745
A
135 processor_set_t pset;
136 spl_t s;
1c79356b
A
137
138 s = splsched();
1c79356b 139 init_ast_check(processor);
2d21ac55
A
140 pset = processor->processor_set;
141 pset_lock(pset);
f427ee49 142
fe8ab488 143 ++pset->online_processor_count;
d9a64523 144 pset_update_processor_state(pset, processor, PROCESSOR_RUNNING);
cb323159 145 os_atomic_inc(&processor_avail_count, relaxed);
0a7de745 146 if (processor->is_recommended) {
cb323159 147 os_atomic_inc(&processor_avail_count_user, relaxed);
f427ee49 148 SCHED(pset_made_schedulable)(processor, pset, false);
0a7de745 149 }
f427ee49
A
150 if (processor->processor_primary == processor) {
151 os_atomic_inc(&primary_processor_avail_count, relaxed);
152 if (processor->is_recommended) {
153 os_atomic_inc(&primary_processor_avail_count_user, relaxed);
154 }
cb323159 155 }
f427ee49
A
156 commpage_update_active_cpus();
157 pset_unlock(pset);
91447636 158 ml_cpu_up();
1c79356b 159 splx(s);
fe8ab488
A
160
161#if CONFIG_DTRACE
0a7de745 162 if (dtrace_cpu_state_changed_hook) {
fe8ab488 163 (*dtrace_cpu_state_changed_hook)(processor->cpu_id, TRUE);
0a7de745 164 }
fe8ab488 165#endif
1c79356b 166}
fe8ab488 167#include <atm/atm_internal.h>
1c79356b
A
168
169kern_return_t
170host_reboot(
0a7de745
A
171 host_priv_t host_priv,
172 int options)
1c79356b 173{
0a7de745
A
174 if (host_priv == HOST_PRIV_NULL) {
175 return KERN_INVALID_HOST;
176 }
1c79356b
A
177
178 assert(host_priv == &realhost);
179
4bd07ac2 180#if DEVELOPMENT || DEBUG
1c79356b
A
181 if (options & HOST_REBOOT_DEBUGGER) {
182 Debugger("Debugger");
0a7de745 183 return KERN_SUCCESS;
1c79356b 184 }
4bd07ac2 185#endif
1c79356b 186
0a7de745
A
187 if (options & HOST_REBOOT_UPSDELAY) {
188 // UPS power cutoff path
189 PEHaltRestart( kPEUPSDelayHaltCPU );
190 } else {
191 halt_all_cpus(!(options & HOST_REBOOT_HALT));
192 }
9bccf70c 193
0a7de745 194 return KERN_SUCCESS;
1c79356b
A
195}
196
197kern_return_t
198processor_assign(
0a7de745
A
199 __unused processor_t processor,
200 __unused processor_set_t new_pset,
201 __unused boolean_t wait)
1c79356b 202{
0a7de745 203 return KERN_FAILURE;
1c79356b
A
204}
205
1c79356b
A
206kern_return_t
207processor_shutdown(
0a7de745 208 processor_t processor)
1c79356b 209{
0a7de745
A
210 processor_set_t pset;
211 spl_t s;
1c79356b 212
f427ee49 213 ml_cpu_begin_state_transition(processor->cpu_id);
1c79356b 214 s = splsched();
2d21ac55
A
215 pset = processor->processor_set;
216 pset_lock(pset);
91447636 217 if (processor->state == PROCESSOR_OFF_LINE) {
1c79356b 218 /*
91447636 219 * Success if already shutdown.
1c79356b 220 */
2d21ac55 221 pset_unlock(pset);
1c79356b 222 splx(s);
f427ee49 223 ml_cpu_end_state_transition(processor->cpu_id);
1c79356b 224
0a7de745 225 return KERN_SUCCESS;
1c79356b
A
226 }
227
f427ee49
A
228 if (!ml_cpu_can_exit(processor->cpu_id)) {
229 /*
230 * Failure if disallowed by arch code.
231 */
232 pset_unlock(pset);
233 splx(s);
234 ml_cpu_end_state_transition(processor->cpu_id);
235
236 return KERN_FAILURE;
237 }
238
55e303ae
A
239 if (processor->state == PROCESSOR_START) {
240 /*
241 * Failure if currently being started.
242 */
2d21ac55 243 pset_unlock(pset);
55e303ae 244 splx(s);
1c79356b 245
0a7de745 246 return KERN_FAILURE;
55e303ae 247 }
1c79356b 248
55e303ae 249 /*
2d21ac55 250 * If the processor is dispatching, let it finish.
55e303ae 251 */
2d21ac55
A
252 while (processor->state == PROCESSOR_DISPATCHING) {
253 pset_unlock(pset);
fe8ab488 254 splx(s);
2d21ac55 255 delay(1);
fe8ab488 256 s = splsched();
2d21ac55 257 pset_lock(pset);
91447636 258 }
2d21ac55
A
259
260 /*
261 * Success if already being shutdown.
262 */
263 if (processor->state == PROCESSOR_SHUTDOWN) {
264 pset_unlock(pset);
91447636 265 splx(s);
f427ee49 266 ml_cpu_end_state_transition(processor->cpu_id);
91447636 267
0a7de745 268 return KERN_SUCCESS;
55e303ae 269 }
1c79356b 270
f427ee49 271 ml_broadcast_cpu_event(CPU_EXIT_REQUESTED, processor->cpu_id);
d9a64523 272 pset_update_processor_state(pset, processor, PROCESSOR_SHUTDOWN);
2d21ac55 273 pset_unlock(pset);
1c79356b 274
55e303ae 275 processor_doshutdown(processor);
1c79356b 276 splx(s);
1c79356b 277
b0d623f7 278 cpu_exit_wait(processor->cpu_id);
f427ee49
A
279 ml_cpu_end_state_transition(processor->cpu_id);
280 ml_broadcast_cpu_event(CPU_EXITED, processor->cpu_id);
5353443c 281
0a7de745 282 return KERN_SUCCESS;
1c79356b
A
283}
284
285/*
bd504ef0 286 * Called with interrupts disabled.
1c79356b 287 */
cb323159 288static void
55e303ae 289processor_doshutdown(
cb323159 290 processor_t processor)
1c79356b 291{
cb323159 292 thread_t self = current_thread();
1c79356b
A
293
294 /*
295 * Get onto the processor to shutdown
296 */
cb323159 297 processor_t prev = thread_bind(processor);
9bccf70c 298 thread_block(THREAD_CONTINUE_NULL);
1c79356b 299
cb323159
A
300 /* interrupts still disabled */
301 assert(ml_get_interrupts_enabled() == FALSE);
302
303 assert(processor == current_processor());
55e303ae 304 assert(processor->state == PROCESSOR_SHUTDOWN);
1c79356b 305
fe8ab488 306#if CONFIG_DTRACE
0a7de745 307 if (dtrace_cpu_state_changed_hook) {
fe8ab488 308 (*dtrace_cpu_state_changed_hook)(processor->cpu_id, FALSE);
0a7de745 309 }
fe8ab488
A
310#endif
311
bd504ef0
A
312 ml_cpu_down();
313
2d21ac55 314#if HIBERNATION
bd504ef0
A
315 if (processor_avail_count < 2) {
316 hibernate_vm_lock();
3a60a9f5 317 hibernate_vm_unlock();
bd504ef0 318 }
2d21ac55 319#endif
91447636 320
cb323159
A
321 processor_set_t pset = processor->processor_set;
322
bd504ef0 323 pset_lock(pset);
d9a64523 324 pset_update_processor_state(pset, processor, PROCESSOR_OFF_LINE);
fe8ab488 325 --pset->online_processor_count;
cb323159 326 os_atomic_dec(&processor_avail_count, relaxed);
0a7de745 327 if (processor->is_recommended) {
cb323159 328 os_atomic_dec(&processor_avail_count_user, relaxed);
0a7de745 329 }
f427ee49
A
330 if (processor->processor_primary == processor) {
331 os_atomic_dec(&primary_processor_avail_count, relaxed);
332 if (processor->is_recommended) {
333 os_atomic_dec(&primary_processor_avail_count_user, relaxed);
334 }
335 }
bd504ef0
A
336 commpage_update_active_cpus();
337 SCHED(processor_queue_shutdown)(processor);
338 /* pset lock dropped */
5ba3f43e 339 SCHED(rt_queue_shutdown)(processor);
bd504ef0 340
cb323159
A
341 thread_bind(prev);
342
343 /* interrupts still disabled */
344
1c79356b 345 /*
cb323159
A
346 * Continue processor shutdown on the processor's idle thread.
347 * The handoff won't fail because the idle thread has a reserved stack.
348 * Switching to the idle thread leaves interrupts disabled,
349 * so we can't accidentally take an interrupt after the context switch.
1c79356b 350 */
cb323159
A
351 thread_t shutdown_thread = processor->idle_thread;
352 shutdown_thread->continuation = processor_offline;
353 shutdown_thread->parameter = processor;
a3d08fcd 354
cb323159 355 thread_run(self, NULL, NULL, shutdown_thread);
1c79356b
A
356}
357
358/*
cb323159 359 * Called in the context of the idle thread to shut down the processor
490019cf 360 *
cb323159
A
361 * A shut-down processor looks like it's 'running' the idle thread parked
362 * in this routine, but it's actually been powered off and has no hardware state.
1c79356b 363 */
cb323159 364static void
55e303ae 365processor_offline(
cb323159
A
366 void * parameter,
367 __unused wait_result_t result)
1c79356b 368{
cb323159
A
369 processor_t processor = (processor_t) parameter;
370 thread_t self = current_thread();
371 __assert_only thread_t old_thread = THREAD_NULL;
372
490019cf 373 assert(processor == current_processor());
cb323159
A
374 assert(self->state & TH_IDLE);
375 assert(processor->idle_thread == self);
376 assert(ml_get_interrupts_enabled() == FALSE);
377 assert(self->continuation == NULL);
378 assert(processor->processor_offlined == false);
f427ee49 379 assert(processor->running_timers_active == false);
490019cf 380
cb323159 381 bool enforce_quiesce_safety = gEnforceQuiesceSafety;
91447636 382
cb323159
A
383 /*
384 * Scheduling is now disabled for this processor.
385 * Ensure that primitives that need scheduling (like mutexes) know this.
386 */
387 if (enforce_quiesce_safety) {
388 disable_preemption();
d9a64523
A
389 }
390
cb323159
A
391 /* convince slave_main to come back here */
392 processor->processor_offlined = true;
91447636 393
cb323159
A
394 /*
395 * Switch to the interrupt stack and shut down the processor.
396 *
397 * When the processor comes back, it will eventually call load_context which
398 * restores the context saved by machine_processor_shutdown, returning here.
399 */
400 old_thread = machine_processor_shutdown(self, processor_offline_intstack, processor);
490019cf 401
cb323159
A
402 /* old_thread should be NULL because we got here through Load_context */
403 assert(old_thread == THREAD_NULL);
490019cf 404
cb323159
A
405 assert(processor == current_processor());
406 assert(processor->idle_thread == current_thread());
407
408 assert(ml_get_interrupts_enabled() == FALSE);
409 assert(self->continuation == NULL);
410
411 /* Extract the machine_param value stashed by slave_main */
412 void * machine_param = self->parameter;
413 self->parameter = NULL;
414
415 /* Re-initialize the processor */
416 slave_machine_init(machine_param);
417
418 assert(processor->processor_offlined == true);
419 processor->processor_offlined = false;
420
421 if (enforce_quiesce_safety) {
422 enable_preemption();
423 }
490019cf 424
cb323159
A
425 /*
426 * Now that the processor is back, invoke the idle thread to find out what to do next.
427 * idle_thread will enable interrupts.
428 */
429 thread_block(idle_thread);
430 /*NOTREACHED*/
431}
91447636 432
cb323159
A
433/*
434 * Complete the shutdown and place the processor offline.
435 *
436 * Called at splsched in the shutdown context
437 * (i.e. on the idle thread, on the interrupt stack)
438 *
439 * The onlining half of this is done in load_context().
440 */
441static void
442processor_offline_intstack(
443 processor_t processor)
444{
445 assert(processor == current_processor());
446 assert(processor->active_thread == current_thread());
91447636 447
f427ee49 448 timer_stop(processor->current_state, processor->last_dispatch);
1c79356b 449
d9a64523
A
450 cpu_quiescent_counter_leave(processor->last_dispatch);
451
b0d623f7 452 PMAP_DEACTIVATE_KERNEL(processor->cpu_id);
91447636 453
1c79356b
A
454 cpu_sleep();
455 panic("zombie processor");
456 /*NOTREACHED*/
457}
458
459kern_return_t
460host_get_boot_info(
0a7de745
A
461 host_priv_t host_priv,
462 kernel_boot_info_t boot_info)
1c79356b 463{
91447636 464 const char *src = "";
0a7de745
A
465 if (host_priv == HOST_PRIV_NULL) {
466 return KERN_INVALID_HOST;
467 }
1c79356b
A
468
469 assert(host_priv == &realhost);
470
471 /*
472 * Copy first operator string terminated by '\0' followed by
473 * standardized strings generated from boot string.
474 */
475 src = machine_boot_info(boot_info, KERNEL_BOOT_INFO_MAX);
0a7de745 476 if (src != boot_info) {
1c79356b 477 (void) strncpy(boot_info, src, KERNEL_BOOT_INFO_MAX);
0a7de745 478 }
1c79356b 479
0a7de745 480 return KERN_SUCCESS;
1c79356b 481}
813fb2f6
A
482
483#if CONFIG_DTRACE
484#include <mach/sdt.h>
485#endif
486
0a7de745
A
487unsigned long long
488ml_io_read(uintptr_t vaddr, int size)
489{
813fb2f6
A
490 unsigned long long result = 0;
491 unsigned char s1;
492 unsigned short s2;
493
494#if defined(__x86_64__)
495 uint64_t sabs, eabs;
496 boolean_t istate, timeread = FALSE;
497#if DEVELOPMENT || DEBUG
0a7de745
A
498 extern uint64_t simulate_stretched_io;
499 uintptr_t paddr = pmap_verify_noncacheable(vaddr);
813fb2f6
A
500#endif /* x86_64 DEVELOPMENT || DEBUG */
501 if (__improbable(reportphyreaddelayabs != 0)) {
502 istate = ml_set_interrupts_enabled(FALSE);
503 sabs = mach_absolute_time();
504 timeread = TRUE;
505 }
0a7de745
A
506
507#if DEVELOPMENT || DEBUG
508 if (__improbable(timeread && simulate_stretched_io)) {
509 sabs -= simulate_stretched_io;
510 }
511#endif /* x86_64 DEVELOPMENT || DEBUG */
512
813fb2f6
A
513#endif /* x86_64 */
514
515 switch (size) {
0a7de745 516 case 1:
813fb2f6
A
517 s1 = *(volatile unsigned char *)vaddr;
518 result = s1;
519 break;
0a7de745 520 case 2:
813fb2f6
A
521 s2 = *(volatile unsigned short *)vaddr;
522 result = s2;
523 break;
0a7de745 524 case 4:
813fb2f6
A
525 result = *(volatile unsigned int *)vaddr;
526 break;
527 case 8:
528 result = *(volatile unsigned long long *)vaddr;
529 break;
530 default:
0a7de745 531 panic("Invalid size %d for ml_io_read(%p)", size, (void *)vaddr);
813fb2f6 532 break;
0a7de745 533 }
813fb2f6
A
534
535#if defined(__x86_64__)
536 if (__improbable(timeread == TRUE)) {
537 eabs = mach_absolute_time();
0a7de745
A
538
539#if DEVELOPMENT || DEBUG
540 iotrace(IOTRACE_IO_READ, vaddr, paddr, size, result, sabs, eabs - sabs);
541#endif
813fb2f6
A
542
543 if (__improbable((eabs - sabs) > reportphyreaddelayabs)) {
0a7de745
A
544#if !(DEVELOPMENT || DEBUG)
545 uintptr_t paddr = kvtophys(vaddr);
546#endif
547
548 (void)ml_set_interrupts_enabled(istate);
549
5ba3f43e 550 if (phyreadpanic && (machine_timeout_suspended() == FALSE)) {
f427ee49 551 panic_notify();
0a7de745
A
552 panic("Read from IO vaddr 0x%lx paddr 0x%lx took %llu ns, "
553 "result: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
554 vaddr, paddr, (eabs - sabs), result, sabs, eabs,
555 reportphyreaddelayabs);
556 }
557
558 if (reportphyreadosbt) {
559 OSReportWithBacktrace("ml_io_read(v=%p, p=%p) size %d result 0x%llx "
560 "took %lluus",
561 (void *)vaddr, (void *)paddr, size, result,
562 (eabs - sabs) / NSEC_PER_USEC);
813fb2f6
A
563 }
564#if CONFIG_DTRACE
0a7de745
A
565 DTRACE_PHYSLAT5(physioread, uint64_t, (eabs - sabs),
566 uint64_t, vaddr, uint32_t, size, uint64_t, paddr, uint64_t, result);
813fb2f6 567#endif /* CONFIG_DTRACE */
0a7de745
A
568 } else if (__improbable(tracephyreaddelayabs > 0 && (eabs - sabs) > tracephyreaddelayabs)) {
569#if !(DEVELOPMENT || DEBUG)
570 uintptr_t paddr = kvtophys(vaddr);
571#endif
572
573 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_MMIO_READ),
574 (eabs - sabs), VM_KERNEL_UNSLIDE_OR_PERM(vaddr), paddr, result);
575
576 (void)ml_set_interrupts_enabled(istate);
577 } else {
578 (void)ml_set_interrupts_enabled(istate);
813fb2f6
A
579 }
580 }
581#endif /* x86_64 */
582 return result;
583}
584
0a7de745
A
585unsigned int
586ml_io_read8(uintptr_t vaddr)
587{
813fb2f6
A
588 return (unsigned) ml_io_read(vaddr, 1);
589}
590
0a7de745
A
591unsigned int
592ml_io_read16(uintptr_t vaddr)
593{
813fb2f6
A
594 return (unsigned) ml_io_read(vaddr, 2);
595}
596
0a7de745
A
597unsigned int
598ml_io_read32(uintptr_t vaddr)
599{
813fb2f6
A
600 return (unsigned) ml_io_read(vaddr, 4);
601}
602
0a7de745
A
603unsigned long long
604ml_io_read64(uintptr_t vaddr)
605{
813fb2f6
A
606 return ml_io_read(vaddr, 8);
607}
0a7de745
A
608
609/* ml_io_write* */
610
611void
612ml_io_write(uintptr_t vaddr, uint64_t val, int size)
613{
614#if defined(__x86_64__)
615 uint64_t sabs, eabs;
616 boolean_t istate, timewrite = FALSE;
617#if DEVELOPMENT || DEBUG
618 extern uint64_t simulate_stretched_io;
619 uintptr_t paddr = pmap_verify_noncacheable(vaddr);
620#endif /* x86_64 DEVELOPMENT || DEBUG */
621 if (__improbable(reportphywritedelayabs != 0)) {
622 istate = ml_set_interrupts_enabled(FALSE);
623 sabs = mach_absolute_time();
624 timewrite = TRUE;
625 }
626
627#if DEVELOPMENT || DEBUG
628 if (__improbable(timewrite && simulate_stretched_io)) {
629 sabs -= simulate_stretched_io;
630 }
631#endif /* x86_64 DEVELOPMENT || DEBUG */
632#endif /* x86_64 */
633
634 switch (size) {
635 case 1:
636 *(volatile uint8_t *)vaddr = (uint8_t)val;
637 break;
638 case 2:
639 *(volatile uint16_t *)vaddr = (uint16_t)val;
640 break;
641 case 4:
642 *(volatile uint32_t *)vaddr = (uint32_t)val;
643 break;
644 case 8:
645 *(volatile uint64_t *)vaddr = (uint64_t)val;
646 break;
647 default:
648 panic("Invalid size %d for ml_io_write(%p, 0x%llx)", size, (void *)vaddr, val);
649 break;
650 }
651
652#if defined(__x86_64__)
653 if (__improbable(timewrite == TRUE)) {
654 eabs = mach_absolute_time();
655
656#if DEVELOPMENT || DEBUG
657 iotrace(IOTRACE_IO_WRITE, vaddr, paddr, size, val, sabs, eabs - sabs);
658#endif
659
660 if (__improbable((eabs - sabs) > reportphywritedelayabs)) {
661#if !(DEVELOPMENT || DEBUG)
662 uintptr_t paddr = kvtophys(vaddr);
663#endif
664
665 (void)ml_set_interrupts_enabled(istate);
666
667 if (phywritepanic && (machine_timeout_suspended() == FALSE)) {
f427ee49 668 panic_notify();
0a7de745
A
669 panic("Write to IO vaddr %p paddr %p val 0x%llx took %llu ns,"
670 " (start: %llu, end: %llu), ceiling: %llu",
671 (void *)vaddr, (void *)paddr, val, (eabs - sabs), sabs, eabs,
672 reportphywritedelayabs);
673 }
674
675 if (reportphywriteosbt) {
676 OSReportWithBacktrace("ml_io_write size %d (v=%p, p=%p, 0x%llx) "
677 "took %lluus",
678 size, (void *)vaddr, (void *)paddr, val, (eabs - sabs) / NSEC_PER_USEC);
679 }
680#if CONFIG_DTRACE
681 DTRACE_PHYSLAT5(physiowrite, uint64_t, (eabs - sabs),
682 uint64_t, vaddr, uint32_t, size, uint64_t, paddr, uint64_t, val);
683#endif /* CONFIG_DTRACE */
684 } else if (__improbable(tracephywritedelayabs > 0 && (eabs - sabs) > tracephywritedelayabs)) {
685#if !(DEVELOPMENT || DEBUG)
686 uintptr_t paddr = kvtophys(vaddr);
687#endif
688
689 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_MMIO_WRITE),
690 (eabs - sabs), VM_KERNEL_UNSLIDE_OR_PERM(vaddr), paddr, val);
691
692 (void)ml_set_interrupts_enabled(istate);
693 } else {
694 (void)ml_set_interrupts_enabled(istate);
695 }
696 }
697#endif /* x86_64 */
698}
699
700void
701ml_io_write8(uintptr_t vaddr, uint8_t val)
702{
703 ml_io_write(vaddr, val, 1);
704}
705
706void
707ml_io_write16(uintptr_t vaddr, uint16_t val)
708{
709 ml_io_write(vaddr, val, 2);
710}
711
712void
713ml_io_write32(uintptr_t vaddr, uint32_t val)
714{
715 ml_io_write(vaddr, val, 4);
716}
717
718void
719ml_io_write64(uintptr_t vaddr, uint64_t val)
720{
721 ml_io_write(vaddr, val, 8);
722}
f427ee49
A
723
724struct cpu_callback_chain_elem {
725 cpu_callback_t fn;
726 void *param;
727 struct cpu_callback_chain_elem *next;
728};
729
730static struct cpu_callback_chain_elem *cpu_callback_chain;
731static LCK_GRP_DECLARE(cpu_callback_chain_lock_grp, "cpu_callback_chain");
732static LCK_SPIN_DECLARE(cpu_callback_chain_lock, &cpu_callback_chain_lock_grp);
733
734void
735cpu_event_register_callback(cpu_callback_t fn, void *param)
736{
737 struct cpu_callback_chain_elem *new_elem;
738
739 new_elem = zalloc_permanent_type(struct cpu_callback_chain_elem);
740 if (!new_elem) {
741 panic("can't allocate cpu_callback_chain_elem");
742 }
743
744 lck_spin_lock(&cpu_callback_chain_lock);
745 new_elem->next = cpu_callback_chain;
746 new_elem->fn = fn;
747 new_elem->param = param;
748 os_atomic_store(&cpu_callback_chain, new_elem, release);
749 lck_spin_unlock(&cpu_callback_chain_lock);
750}
751
752__attribute__((noreturn))
753void
754cpu_event_unregister_callback(__unused cpu_callback_t fn)
755{
756 panic("Unfortunately, cpu_event_unregister_callback is unimplemented.");
757}
758
759void
760ml_broadcast_cpu_event(enum cpu_event event, unsigned int cpu_or_cluster)
761{
762 struct cpu_callback_chain_elem *cursor;
763
764 cursor = os_atomic_load(&cpu_callback_chain, dependency);
765 for (; cursor != NULL; cursor = cursor->next) {
766 cursor->fn(cursor->param, event, cpu_or_cluster);
767 }
768}