2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: kern/machine.c
60 * Author: Avadis Tevanian, Jr.
63 * Support for machine independent machine abstraction.
68 #include <mach/mach_types.h>
69 #include <mach/boolean.h>
70 #include <mach/kern_return.h>
71 #include <mach/machine.h>
72 #include <mach/host_info.h>
73 #include <mach/host_reboot.h>
74 #include <mach/host_priv_server.h>
75 #include <mach/processor_server.h>
77 #include <kern/kern_types.h>
78 #include <kern/counters.h>
79 #include <kern/cpu_data.h>
80 #include <kern/cpu_quiesce.h>
81 #include <kern/ipc_host.h>
82 #include <kern/host.h>
83 #include <kern/machine.h>
84 #include <kern/misc_protos.h>
85 #include <kern/processor.h>
86 #include <kern/queue.h>
87 #include <kern/sched.h>
88 #include <kern/startup.h>
89 #include <kern/task.h>
90 #include <kern/thread.h>
92 #include <machine/commpage.h>
95 #include <IOKit/IOHibernatePrivate.h>
97 #include <IOKit/IOPlatformExpert.h>
100 extern void (*dtrace_cpu_state_changed_hook
)(int, boolean_t
);
103 #if defined(__x86_64__)
104 #include <i386/misc_protos.h>
105 #include <libkern/OSDebug.h>
109 * Exported variables:
112 struct machine_info machine_info
;
116 processor_doshutdown(processor_t processor
);
119 processor_offline(void * parameter
, __unused wait_result_t result
);
122 processor_offline_intstack(processor_t processor
) __dead2
;
127 * Flag processor as up and running, and available
132 processor_t processor
)
134 processor_set_t pset
;
136 boolean_t pset_online
= false;
139 init_ast_check(processor
);
140 pset
= processor
->processor_set
;
142 if (pset
->online_processor_count
== 0) {
143 /* About to bring the first processor of a pset online */
146 ++pset
->online_processor_count
;
147 pset_update_processor_state(pset
, processor
, PROCESSOR_RUNNING
);
148 os_atomic_inc(&processor_avail_count
, relaxed
);
149 if (processor
->is_recommended
) {
150 os_atomic_inc(&processor_avail_count_user
, relaxed
);
152 commpage_update_active_cpus();
154 /* New pset is coming up online; callout to the
155 * scheduler in case it wants to adjust runqs.
157 SCHED(pset_made_schedulable
)(processor
, pset
, true);
158 /* pset lock dropped */
166 if (dtrace_cpu_state_changed_hook
) {
167 (*dtrace_cpu_state_changed_hook
)(processor
->cpu_id
, TRUE
);
171 #include <atm/atm_internal.h>
175 host_priv_t host_priv
,
178 if (host_priv
== HOST_PRIV_NULL
) {
179 return KERN_INVALID_HOST
;
182 assert(host_priv
== &realhost
);
184 #if DEVELOPMENT || DEBUG
185 if (options
& HOST_REBOOT_DEBUGGER
) {
186 Debugger("Debugger");
191 if (options
& HOST_REBOOT_UPSDELAY
) {
192 // UPS power cutoff path
193 PEHaltRestart( kPEUPSDelayHaltCPU
);
195 halt_all_cpus(!(options
& HOST_REBOOT_HALT
));
203 __unused processor_t processor
,
204 __unused processor_set_t new_pset
,
205 __unused boolean_t wait
)
212 processor_t processor
)
214 processor_set_t pset
;
218 pset
= processor
->processor_set
;
220 if (processor
->state
== PROCESSOR_OFF_LINE
) {
222 * Success if already shutdown.
230 if (processor
->state
== PROCESSOR_START
) {
232 * Failure if currently being started.
241 * If the processor is dispatching, let it finish.
243 while (processor
->state
== PROCESSOR_DISPATCHING
) {
252 * Success if already being shutdown.
254 if (processor
->state
== PROCESSOR_SHUTDOWN
) {
261 pset_update_processor_state(pset
, processor
, PROCESSOR_SHUTDOWN
);
264 processor_doshutdown(processor
);
267 cpu_exit_wait(processor
->cpu_id
);
273 * Called with interrupts disabled.
276 processor_doshutdown(
277 processor_t processor
)
279 thread_t self
= current_thread();
282 * Get onto the processor to shutdown
284 processor_t prev
= thread_bind(processor
);
285 thread_block(THREAD_CONTINUE_NULL
);
287 /* interrupts still disabled */
288 assert(ml_get_interrupts_enabled() == FALSE
);
290 assert(processor
== current_processor());
291 assert(processor
->state
== PROCESSOR_SHUTDOWN
);
294 if (dtrace_cpu_state_changed_hook
) {
295 (*dtrace_cpu_state_changed_hook
)(processor
->cpu_id
, FALSE
);
302 if (processor_avail_count
< 2) {
304 hibernate_vm_unlock();
308 processor_set_t pset
= processor
->processor_set
;
311 pset_update_processor_state(pset
, processor
, PROCESSOR_OFF_LINE
);
312 --pset
->online_processor_count
;
313 os_atomic_dec(&processor_avail_count
, relaxed
);
314 if (processor
->is_recommended
) {
315 os_atomic_dec(&processor_avail_count_user
, relaxed
);
317 commpage_update_active_cpus();
318 SCHED(processor_queue_shutdown
)(processor
);
319 /* pset lock dropped */
320 SCHED(rt_queue_shutdown
)(processor
);
324 /* interrupts still disabled */
327 * Continue processor shutdown on the processor's idle thread.
328 * The handoff won't fail because the idle thread has a reserved stack.
329 * Switching to the idle thread leaves interrupts disabled,
330 * so we can't accidentally take an interrupt after the context switch.
332 thread_t shutdown_thread
= processor
->idle_thread
;
333 shutdown_thread
->continuation
= processor_offline
;
334 shutdown_thread
->parameter
= processor
;
336 thread_run(self
, NULL
, NULL
, shutdown_thread
);
340 * Called in the context of the idle thread to shut down the processor
342 * A shut-down processor looks like it's 'running' the idle thread parked
343 * in this routine, but it's actually been powered off and has no hardware state.
348 __unused wait_result_t result
)
350 processor_t processor
= (processor_t
) parameter
;
351 thread_t self
= current_thread();
352 __assert_only thread_t old_thread
= THREAD_NULL
;
354 assert(processor
== current_processor());
355 assert(self
->state
& TH_IDLE
);
356 assert(processor
->idle_thread
== self
);
357 assert(ml_get_interrupts_enabled() == FALSE
);
358 assert(self
->continuation
== NULL
);
359 assert(processor
->processor_offlined
== false);
361 bool enforce_quiesce_safety
= gEnforceQuiesceSafety
;
364 * Scheduling is now disabled for this processor.
365 * Ensure that primitives that need scheduling (like mutexes) know this.
367 if (enforce_quiesce_safety
) {
368 disable_preemption();
371 /* convince slave_main to come back here */
372 processor
->processor_offlined
= true;
375 * Switch to the interrupt stack and shut down the processor.
377 * When the processor comes back, it will eventually call load_context which
378 * restores the context saved by machine_processor_shutdown, returning here.
380 old_thread
= machine_processor_shutdown(self
, processor_offline_intstack
, processor
);
382 /* old_thread should be NULL because we got here through Load_context */
383 assert(old_thread
== THREAD_NULL
);
385 assert(processor
== current_processor());
386 assert(processor
->idle_thread
== current_thread());
388 assert(ml_get_interrupts_enabled() == FALSE
);
389 assert(self
->continuation
== NULL
);
391 /* Extract the machine_param value stashed by slave_main */
392 void * machine_param
= self
->parameter
;
393 self
->parameter
= NULL
;
395 /* Re-initialize the processor */
396 slave_machine_init(machine_param
);
398 assert(processor
->processor_offlined
== true);
399 processor
->processor_offlined
= false;
401 if (enforce_quiesce_safety
) {
406 * Now that the processor is back, invoke the idle thread to find out what to do next.
407 * idle_thread will enable interrupts.
409 thread_block(idle_thread
);
414 * Complete the shutdown and place the processor offline.
416 * Called at splsched in the shutdown context
417 * (i.e. on the idle thread, on the interrupt stack)
419 * The onlining half of this is done in load_context().
422 processor_offline_intstack(
423 processor_t processor
)
425 assert(processor
== current_processor());
426 assert(processor
->active_thread
== current_thread());
428 timer_stop(PROCESSOR_DATA(processor
, current_state
), processor
->last_dispatch
);
430 cpu_quiescent_counter_leave(processor
->last_dispatch
);
432 PMAP_DEACTIVATE_KERNEL(processor
->cpu_id
);
435 panic("zombie processor");
441 host_priv_t host_priv
,
442 kernel_boot_info_t boot_info
)
444 const char *src
= "";
445 if (host_priv
== HOST_PRIV_NULL
) {
446 return KERN_INVALID_HOST
;
449 assert(host_priv
== &realhost
);
452 * Copy first operator string terminated by '\0' followed by
453 * standardized strings generated from boot string.
455 src
= machine_boot_info(boot_info
, KERNEL_BOOT_INFO_MAX
);
456 if (src
!= boot_info
) {
457 (void) strncpy(boot_info
, src
, KERNEL_BOOT_INFO_MAX
);
464 #include <mach/sdt.h>
468 ml_io_read(uintptr_t vaddr
, int size
)
470 unsigned long long result
= 0;
474 #if defined(__x86_64__)
476 boolean_t istate
, timeread
= FALSE
;
477 #if DEVELOPMENT || DEBUG
478 extern uint64_t simulate_stretched_io
;
479 uintptr_t paddr
= pmap_verify_noncacheable(vaddr
);
480 #endif /* x86_64 DEVELOPMENT || DEBUG */
481 if (__improbable(reportphyreaddelayabs
!= 0)) {
482 istate
= ml_set_interrupts_enabled(FALSE
);
483 sabs
= mach_absolute_time();
487 #if DEVELOPMENT || DEBUG
488 if (__improbable(timeread
&& simulate_stretched_io
)) {
489 sabs
-= simulate_stretched_io
;
491 #endif /* x86_64 DEVELOPMENT || DEBUG */
497 s1
= *(volatile unsigned char *)vaddr
;
501 s2
= *(volatile unsigned short *)vaddr
;
505 result
= *(volatile unsigned int *)vaddr
;
508 result
= *(volatile unsigned long long *)vaddr
;
511 panic("Invalid size %d for ml_io_read(%p)", size
, (void *)vaddr
);
515 #if defined(__x86_64__)
516 if (__improbable(timeread
== TRUE
)) {
517 eabs
= mach_absolute_time();
519 #if DEVELOPMENT || DEBUG
520 iotrace(IOTRACE_IO_READ
, vaddr
, paddr
, size
, result
, sabs
, eabs
- sabs
);
523 if (__improbable((eabs
- sabs
) > reportphyreaddelayabs
)) {
524 #if !(DEVELOPMENT || DEBUG)
525 uintptr_t paddr
= kvtophys(vaddr
);
528 (void)ml_set_interrupts_enabled(istate
);
530 if (phyreadpanic
&& (machine_timeout_suspended() == FALSE
)) {
531 panic_io_port_read();
532 panic("Read from IO vaddr 0x%lx paddr 0x%lx took %llu ns, "
533 "result: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
534 vaddr
, paddr
, (eabs
- sabs
), result
, sabs
, eabs
,
535 reportphyreaddelayabs
);
538 if (reportphyreadosbt
) {
539 OSReportWithBacktrace("ml_io_read(v=%p, p=%p) size %d result 0x%llx "
541 (void *)vaddr
, (void *)paddr
, size
, result
,
542 (eabs
- sabs
) / NSEC_PER_USEC
);
545 DTRACE_PHYSLAT5(physioread
, uint64_t, (eabs
- sabs
),
546 uint64_t, vaddr
, uint32_t, size
, uint64_t, paddr
, uint64_t, result
);
547 #endif /* CONFIG_DTRACE */
548 } else if (__improbable(tracephyreaddelayabs
> 0 && (eabs
- sabs
) > tracephyreaddelayabs
)) {
549 #if !(DEVELOPMENT || DEBUG)
550 uintptr_t paddr
= kvtophys(vaddr
);
553 KDBG(MACHDBG_CODE(DBG_MACH_IO
, DBC_MACH_IO_MMIO_READ
),
554 (eabs
- sabs
), VM_KERNEL_UNSLIDE_OR_PERM(vaddr
), paddr
, result
);
556 (void)ml_set_interrupts_enabled(istate
);
558 (void)ml_set_interrupts_enabled(istate
);
566 ml_io_read8(uintptr_t vaddr
)
568 return (unsigned) ml_io_read(vaddr
, 1);
572 ml_io_read16(uintptr_t vaddr
)
574 return (unsigned) ml_io_read(vaddr
, 2);
578 ml_io_read32(uintptr_t vaddr
)
580 return (unsigned) ml_io_read(vaddr
, 4);
584 ml_io_read64(uintptr_t vaddr
)
586 return ml_io_read(vaddr
, 8);
592 ml_io_write(uintptr_t vaddr
, uint64_t val
, int size
)
594 #if defined(__x86_64__)
596 boolean_t istate
, timewrite
= FALSE
;
597 #if DEVELOPMENT || DEBUG
598 extern uint64_t simulate_stretched_io
;
599 uintptr_t paddr
= pmap_verify_noncacheable(vaddr
);
600 #endif /* x86_64 DEVELOPMENT || DEBUG */
601 if (__improbable(reportphywritedelayabs
!= 0)) {
602 istate
= ml_set_interrupts_enabled(FALSE
);
603 sabs
= mach_absolute_time();
607 #if DEVELOPMENT || DEBUG
608 if (__improbable(timewrite
&& simulate_stretched_io
)) {
609 sabs
-= simulate_stretched_io
;
611 #endif /* x86_64 DEVELOPMENT || DEBUG */
616 *(volatile uint8_t *)vaddr
= (uint8_t)val
;
619 *(volatile uint16_t *)vaddr
= (uint16_t)val
;
622 *(volatile uint32_t *)vaddr
= (uint32_t)val
;
625 *(volatile uint64_t *)vaddr
= (uint64_t)val
;
628 panic("Invalid size %d for ml_io_write(%p, 0x%llx)", size
, (void *)vaddr
, val
);
632 #if defined(__x86_64__)
633 if (__improbable(timewrite
== TRUE
)) {
634 eabs
= mach_absolute_time();
636 #if DEVELOPMENT || DEBUG
637 iotrace(IOTRACE_IO_WRITE
, vaddr
, paddr
, size
, val
, sabs
, eabs
- sabs
);
640 if (__improbable((eabs
- sabs
) > reportphywritedelayabs
)) {
641 #if !(DEVELOPMENT || DEBUG)
642 uintptr_t paddr
= kvtophys(vaddr
);
645 (void)ml_set_interrupts_enabled(istate
);
647 if (phywritepanic
&& (machine_timeout_suspended() == FALSE
)) {
648 panic_io_port_read();
649 panic("Write to IO vaddr %p paddr %p val 0x%llx took %llu ns,"
650 " (start: %llu, end: %llu), ceiling: %llu",
651 (void *)vaddr
, (void *)paddr
, val
, (eabs
- sabs
), sabs
, eabs
,
652 reportphywritedelayabs
);
655 if (reportphywriteosbt
) {
656 OSReportWithBacktrace("ml_io_write size %d (v=%p, p=%p, 0x%llx) "
658 size
, (void *)vaddr
, (void *)paddr
, val
, (eabs
- sabs
) / NSEC_PER_USEC
);
661 DTRACE_PHYSLAT5(physiowrite
, uint64_t, (eabs
- sabs
),
662 uint64_t, vaddr
, uint32_t, size
, uint64_t, paddr
, uint64_t, val
);
663 #endif /* CONFIG_DTRACE */
664 } else if (__improbable(tracephywritedelayabs
> 0 && (eabs
- sabs
) > tracephywritedelayabs
)) {
665 #if !(DEVELOPMENT || DEBUG)
666 uintptr_t paddr
= kvtophys(vaddr
);
669 KDBG(MACHDBG_CODE(DBG_MACH_IO
, DBC_MACH_IO_MMIO_WRITE
),
670 (eabs
- sabs
), VM_KERNEL_UNSLIDE_OR_PERM(vaddr
), paddr
, val
);
672 (void)ml_set_interrupts_enabled(istate
);
674 (void)ml_set_interrupts_enabled(istate
);
681 ml_io_write8(uintptr_t vaddr
, uint8_t val
)
683 ml_io_write(vaddr
, val
, 1);
687 ml_io_write16(uintptr_t vaddr
, uint16_t val
)
689 ml_io_write(vaddr
, val
, 2);
693 ml_io_write32(uintptr_t vaddr
, uint32_t val
)
695 ml_io_write(vaddr
, val
, 4);
699 ml_io_write64(uintptr_t vaddr
, uint64_t val
)
701 ml_io_write(vaddr
, val
, 8);