]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/machine.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / kern / machine.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/machine.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1987
62 *
63 * Support for machine independent machine abstraction.
64 */
65
66 #include <string.h>
67
68 #include <mach/mach_types.h>
69 #include <mach/boolean.h>
70 #include <mach/kern_return.h>
71 #include <mach/machine.h>
72 #include <mach/host_info.h>
73 #include <mach/host_reboot.h>
74 #include <mach/host_priv_server.h>
75 #include <mach/processor_server.h>
76
77 #include <kern/kern_types.h>
78 #include <kern/counters.h>
79 #include <kern/cpu_data.h>
80 #include <kern/cpu_quiesce.h>
81 #include <kern/ipc_host.h>
82 #include <kern/host.h>
83 #include <kern/machine.h>
84 #include <kern/misc_protos.h>
85 #include <kern/processor.h>
86 #include <kern/queue.h>
87 #include <kern/sched.h>
88 #include <kern/startup.h>
89 #include <kern/task.h>
90 #include <kern/thread.h>
91
92 #include <machine/commpage.h>
93
94 #if HIBERNATION
95 #include <IOKit/IOHibernatePrivate.h>
96 #endif
97 #include <IOKit/IOPlatformExpert.h>
98
99 #if CONFIG_DTRACE
100 extern void (*dtrace_cpu_state_changed_hook)(int, boolean_t);
101 #endif
102
103 #if defined(__x86_64__)
104 #include <i386/misc_protos.h>
105 #include <libkern/OSDebug.h>
106 #endif
107
108 /*
109 * Exported variables:
110 */
111
112 struct machine_info machine_info;
113
114 /* Forwards */
115 static void
116 processor_doshutdown(processor_t processor);
117
118 static void
119 processor_offline(void * parameter, __unused wait_result_t result);
120
121 static void
122 processor_offline_intstack(processor_t processor) __dead2;
123
124 /*
125 * processor_up:
126 *
127 * Flag processor as up and running, and available
128 * for scheduling.
129 */
130 void
131 processor_up(
132 processor_t processor)
133 {
134 processor_set_t pset;
135 spl_t s;
136 boolean_t pset_online = false;
137
138 s = splsched();
139 init_ast_check(processor);
140 pset = processor->processor_set;
141 pset_lock(pset);
142 if (pset->online_processor_count == 0) {
143 /* About to bring the first processor of a pset online */
144 pset_online = true;
145 }
146 ++pset->online_processor_count;
147 pset_update_processor_state(pset, processor, PROCESSOR_RUNNING);
148 os_atomic_inc(&processor_avail_count, relaxed);
149 if (processor->is_recommended) {
150 os_atomic_inc(&processor_avail_count_user, relaxed);
151 }
152 commpage_update_active_cpus();
153 if (pset_online) {
154 /* New pset is coming up online; callout to the
155 * scheduler in case it wants to adjust runqs.
156 */
157 SCHED(pset_made_schedulable)(processor, pset, true);
158 /* pset lock dropped */
159 } else {
160 pset_unlock(pset);
161 }
162 ml_cpu_up();
163 splx(s);
164
165 #if CONFIG_DTRACE
166 if (dtrace_cpu_state_changed_hook) {
167 (*dtrace_cpu_state_changed_hook)(processor->cpu_id, TRUE);
168 }
169 #endif
170 }
171 #include <atm/atm_internal.h>
172
173 kern_return_t
174 host_reboot(
175 host_priv_t host_priv,
176 int options)
177 {
178 if (host_priv == HOST_PRIV_NULL) {
179 return KERN_INVALID_HOST;
180 }
181
182 assert(host_priv == &realhost);
183
184 #if DEVELOPMENT || DEBUG
185 if (options & HOST_REBOOT_DEBUGGER) {
186 Debugger("Debugger");
187 return KERN_SUCCESS;
188 }
189 #endif
190
191 if (options & HOST_REBOOT_UPSDELAY) {
192 // UPS power cutoff path
193 PEHaltRestart( kPEUPSDelayHaltCPU );
194 } else {
195 halt_all_cpus(!(options & HOST_REBOOT_HALT));
196 }
197
198 return KERN_SUCCESS;
199 }
200
201 kern_return_t
202 processor_assign(
203 __unused processor_t processor,
204 __unused processor_set_t new_pset,
205 __unused boolean_t wait)
206 {
207 return KERN_FAILURE;
208 }
209
210 kern_return_t
211 processor_shutdown(
212 processor_t processor)
213 {
214 processor_set_t pset;
215 spl_t s;
216
217 s = splsched();
218 pset = processor->processor_set;
219 pset_lock(pset);
220 if (processor->state == PROCESSOR_OFF_LINE) {
221 /*
222 * Success if already shutdown.
223 */
224 pset_unlock(pset);
225 splx(s);
226
227 return KERN_SUCCESS;
228 }
229
230 if (processor->state == PROCESSOR_START) {
231 /*
232 * Failure if currently being started.
233 */
234 pset_unlock(pset);
235 splx(s);
236
237 return KERN_FAILURE;
238 }
239
240 /*
241 * If the processor is dispatching, let it finish.
242 */
243 while (processor->state == PROCESSOR_DISPATCHING) {
244 pset_unlock(pset);
245 splx(s);
246 delay(1);
247 s = splsched();
248 pset_lock(pset);
249 }
250
251 /*
252 * Success if already being shutdown.
253 */
254 if (processor->state == PROCESSOR_SHUTDOWN) {
255 pset_unlock(pset);
256 splx(s);
257
258 return KERN_SUCCESS;
259 }
260
261 pset_update_processor_state(pset, processor, PROCESSOR_SHUTDOWN);
262 pset_unlock(pset);
263
264 processor_doshutdown(processor);
265 splx(s);
266
267 cpu_exit_wait(processor->cpu_id);
268
269 return KERN_SUCCESS;
270 }
271
272 /*
273 * Called with interrupts disabled.
274 */
275 static void
276 processor_doshutdown(
277 processor_t processor)
278 {
279 thread_t self = current_thread();
280
281 /*
282 * Get onto the processor to shutdown
283 */
284 processor_t prev = thread_bind(processor);
285 thread_block(THREAD_CONTINUE_NULL);
286
287 /* interrupts still disabled */
288 assert(ml_get_interrupts_enabled() == FALSE);
289
290 assert(processor == current_processor());
291 assert(processor->state == PROCESSOR_SHUTDOWN);
292
293 #if CONFIG_DTRACE
294 if (dtrace_cpu_state_changed_hook) {
295 (*dtrace_cpu_state_changed_hook)(processor->cpu_id, FALSE);
296 }
297 #endif
298
299 ml_cpu_down();
300
301 #if HIBERNATION
302 if (processor_avail_count < 2) {
303 hibernate_vm_lock();
304 hibernate_vm_unlock();
305 }
306 #endif
307
308 processor_set_t pset = processor->processor_set;
309
310 pset_lock(pset);
311 pset_update_processor_state(pset, processor, PROCESSOR_OFF_LINE);
312 --pset->online_processor_count;
313 os_atomic_dec(&processor_avail_count, relaxed);
314 if (processor->is_recommended) {
315 os_atomic_dec(&processor_avail_count_user, relaxed);
316 }
317 commpage_update_active_cpus();
318 SCHED(processor_queue_shutdown)(processor);
319 /* pset lock dropped */
320 SCHED(rt_queue_shutdown)(processor);
321
322 thread_bind(prev);
323
324 /* interrupts still disabled */
325
326 /*
327 * Continue processor shutdown on the processor's idle thread.
328 * The handoff won't fail because the idle thread has a reserved stack.
329 * Switching to the idle thread leaves interrupts disabled,
330 * so we can't accidentally take an interrupt after the context switch.
331 */
332 thread_t shutdown_thread = processor->idle_thread;
333 shutdown_thread->continuation = processor_offline;
334 shutdown_thread->parameter = processor;
335
336 thread_run(self, NULL, NULL, shutdown_thread);
337 }
338
339 /*
340 * Called in the context of the idle thread to shut down the processor
341 *
342 * A shut-down processor looks like it's 'running' the idle thread parked
343 * in this routine, but it's actually been powered off and has no hardware state.
344 */
345 static void
346 processor_offline(
347 void * parameter,
348 __unused wait_result_t result)
349 {
350 processor_t processor = (processor_t) parameter;
351 thread_t self = current_thread();
352 __assert_only thread_t old_thread = THREAD_NULL;
353
354 assert(processor == current_processor());
355 assert(self->state & TH_IDLE);
356 assert(processor->idle_thread == self);
357 assert(ml_get_interrupts_enabled() == FALSE);
358 assert(self->continuation == NULL);
359 assert(processor->processor_offlined == false);
360
361 bool enforce_quiesce_safety = gEnforceQuiesceSafety;
362
363 /*
364 * Scheduling is now disabled for this processor.
365 * Ensure that primitives that need scheduling (like mutexes) know this.
366 */
367 if (enforce_quiesce_safety) {
368 disable_preemption();
369 }
370
371 /* convince slave_main to come back here */
372 processor->processor_offlined = true;
373
374 /*
375 * Switch to the interrupt stack and shut down the processor.
376 *
377 * When the processor comes back, it will eventually call load_context which
378 * restores the context saved by machine_processor_shutdown, returning here.
379 */
380 old_thread = machine_processor_shutdown(self, processor_offline_intstack, processor);
381
382 /* old_thread should be NULL because we got here through Load_context */
383 assert(old_thread == THREAD_NULL);
384
385 assert(processor == current_processor());
386 assert(processor->idle_thread == current_thread());
387
388 assert(ml_get_interrupts_enabled() == FALSE);
389 assert(self->continuation == NULL);
390
391 /* Extract the machine_param value stashed by slave_main */
392 void * machine_param = self->parameter;
393 self->parameter = NULL;
394
395 /* Re-initialize the processor */
396 slave_machine_init(machine_param);
397
398 assert(processor->processor_offlined == true);
399 processor->processor_offlined = false;
400
401 if (enforce_quiesce_safety) {
402 enable_preemption();
403 }
404
405 /*
406 * Now that the processor is back, invoke the idle thread to find out what to do next.
407 * idle_thread will enable interrupts.
408 */
409 thread_block(idle_thread);
410 /*NOTREACHED*/
411 }
412
413 /*
414 * Complete the shutdown and place the processor offline.
415 *
416 * Called at splsched in the shutdown context
417 * (i.e. on the idle thread, on the interrupt stack)
418 *
419 * The onlining half of this is done in load_context().
420 */
421 static void
422 processor_offline_intstack(
423 processor_t processor)
424 {
425 assert(processor == current_processor());
426 assert(processor->active_thread == current_thread());
427
428 timer_stop(PROCESSOR_DATA(processor, current_state), processor->last_dispatch);
429
430 cpu_quiescent_counter_leave(processor->last_dispatch);
431
432 PMAP_DEACTIVATE_KERNEL(processor->cpu_id);
433
434 cpu_sleep();
435 panic("zombie processor");
436 /*NOTREACHED*/
437 }
438
439 kern_return_t
440 host_get_boot_info(
441 host_priv_t host_priv,
442 kernel_boot_info_t boot_info)
443 {
444 const char *src = "";
445 if (host_priv == HOST_PRIV_NULL) {
446 return KERN_INVALID_HOST;
447 }
448
449 assert(host_priv == &realhost);
450
451 /*
452 * Copy first operator string terminated by '\0' followed by
453 * standardized strings generated from boot string.
454 */
455 src = machine_boot_info(boot_info, KERNEL_BOOT_INFO_MAX);
456 if (src != boot_info) {
457 (void) strncpy(boot_info, src, KERNEL_BOOT_INFO_MAX);
458 }
459
460 return KERN_SUCCESS;
461 }
462
463 #if CONFIG_DTRACE
464 #include <mach/sdt.h>
465 #endif
466
467 unsigned long long
468 ml_io_read(uintptr_t vaddr, int size)
469 {
470 unsigned long long result = 0;
471 unsigned char s1;
472 unsigned short s2;
473
474 #if defined(__x86_64__)
475 uint64_t sabs, eabs;
476 boolean_t istate, timeread = FALSE;
477 #if DEVELOPMENT || DEBUG
478 extern uint64_t simulate_stretched_io;
479 uintptr_t paddr = pmap_verify_noncacheable(vaddr);
480 #endif /* x86_64 DEVELOPMENT || DEBUG */
481 if (__improbable(reportphyreaddelayabs != 0)) {
482 istate = ml_set_interrupts_enabled(FALSE);
483 sabs = mach_absolute_time();
484 timeread = TRUE;
485 }
486
487 #if DEVELOPMENT || DEBUG
488 if (__improbable(timeread && simulate_stretched_io)) {
489 sabs -= simulate_stretched_io;
490 }
491 #endif /* x86_64 DEVELOPMENT || DEBUG */
492
493 #endif /* x86_64 */
494
495 switch (size) {
496 case 1:
497 s1 = *(volatile unsigned char *)vaddr;
498 result = s1;
499 break;
500 case 2:
501 s2 = *(volatile unsigned short *)vaddr;
502 result = s2;
503 break;
504 case 4:
505 result = *(volatile unsigned int *)vaddr;
506 break;
507 case 8:
508 result = *(volatile unsigned long long *)vaddr;
509 break;
510 default:
511 panic("Invalid size %d for ml_io_read(%p)", size, (void *)vaddr);
512 break;
513 }
514
515 #if defined(__x86_64__)
516 if (__improbable(timeread == TRUE)) {
517 eabs = mach_absolute_time();
518
519 #if DEVELOPMENT || DEBUG
520 iotrace(IOTRACE_IO_READ, vaddr, paddr, size, result, sabs, eabs - sabs);
521 #endif
522
523 if (__improbable((eabs - sabs) > reportphyreaddelayabs)) {
524 #if !(DEVELOPMENT || DEBUG)
525 uintptr_t paddr = kvtophys(vaddr);
526 #endif
527
528 (void)ml_set_interrupts_enabled(istate);
529
530 if (phyreadpanic && (machine_timeout_suspended() == FALSE)) {
531 panic_io_port_read();
532 panic("Read from IO vaddr 0x%lx paddr 0x%lx took %llu ns, "
533 "result: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
534 vaddr, paddr, (eabs - sabs), result, sabs, eabs,
535 reportphyreaddelayabs);
536 }
537
538 if (reportphyreadosbt) {
539 OSReportWithBacktrace("ml_io_read(v=%p, p=%p) size %d result 0x%llx "
540 "took %lluus",
541 (void *)vaddr, (void *)paddr, size, result,
542 (eabs - sabs) / NSEC_PER_USEC);
543 }
544 #if CONFIG_DTRACE
545 DTRACE_PHYSLAT5(physioread, uint64_t, (eabs - sabs),
546 uint64_t, vaddr, uint32_t, size, uint64_t, paddr, uint64_t, result);
547 #endif /* CONFIG_DTRACE */
548 } else if (__improbable(tracephyreaddelayabs > 0 && (eabs - sabs) > tracephyreaddelayabs)) {
549 #if !(DEVELOPMENT || DEBUG)
550 uintptr_t paddr = kvtophys(vaddr);
551 #endif
552
553 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_MMIO_READ),
554 (eabs - sabs), VM_KERNEL_UNSLIDE_OR_PERM(vaddr), paddr, result);
555
556 (void)ml_set_interrupts_enabled(istate);
557 } else {
558 (void)ml_set_interrupts_enabled(istate);
559 }
560 }
561 #endif /* x86_64 */
562 return result;
563 }
564
565 unsigned int
566 ml_io_read8(uintptr_t vaddr)
567 {
568 return (unsigned) ml_io_read(vaddr, 1);
569 }
570
571 unsigned int
572 ml_io_read16(uintptr_t vaddr)
573 {
574 return (unsigned) ml_io_read(vaddr, 2);
575 }
576
577 unsigned int
578 ml_io_read32(uintptr_t vaddr)
579 {
580 return (unsigned) ml_io_read(vaddr, 4);
581 }
582
583 unsigned long long
584 ml_io_read64(uintptr_t vaddr)
585 {
586 return ml_io_read(vaddr, 8);
587 }
588
589 /* ml_io_write* */
590
591 void
592 ml_io_write(uintptr_t vaddr, uint64_t val, int size)
593 {
594 #if defined(__x86_64__)
595 uint64_t sabs, eabs;
596 boolean_t istate, timewrite = FALSE;
597 #if DEVELOPMENT || DEBUG
598 extern uint64_t simulate_stretched_io;
599 uintptr_t paddr = pmap_verify_noncacheable(vaddr);
600 #endif /* x86_64 DEVELOPMENT || DEBUG */
601 if (__improbable(reportphywritedelayabs != 0)) {
602 istate = ml_set_interrupts_enabled(FALSE);
603 sabs = mach_absolute_time();
604 timewrite = TRUE;
605 }
606
607 #if DEVELOPMENT || DEBUG
608 if (__improbable(timewrite && simulate_stretched_io)) {
609 sabs -= simulate_stretched_io;
610 }
611 #endif /* x86_64 DEVELOPMENT || DEBUG */
612 #endif /* x86_64 */
613
614 switch (size) {
615 case 1:
616 *(volatile uint8_t *)vaddr = (uint8_t)val;
617 break;
618 case 2:
619 *(volatile uint16_t *)vaddr = (uint16_t)val;
620 break;
621 case 4:
622 *(volatile uint32_t *)vaddr = (uint32_t)val;
623 break;
624 case 8:
625 *(volatile uint64_t *)vaddr = (uint64_t)val;
626 break;
627 default:
628 panic("Invalid size %d for ml_io_write(%p, 0x%llx)", size, (void *)vaddr, val);
629 break;
630 }
631
632 #if defined(__x86_64__)
633 if (__improbable(timewrite == TRUE)) {
634 eabs = mach_absolute_time();
635
636 #if DEVELOPMENT || DEBUG
637 iotrace(IOTRACE_IO_WRITE, vaddr, paddr, size, val, sabs, eabs - sabs);
638 #endif
639
640 if (__improbable((eabs - sabs) > reportphywritedelayabs)) {
641 #if !(DEVELOPMENT || DEBUG)
642 uintptr_t paddr = kvtophys(vaddr);
643 #endif
644
645 (void)ml_set_interrupts_enabled(istate);
646
647 if (phywritepanic && (machine_timeout_suspended() == FALSE)) {
648 panic_io_port_read();
649 panic("Write to IO vaddr %p paddr %p val 0x%llx took %llu ns,"
650 " (start: %llu, end: %llu), ceiling: %llu",
651 (void *)vaddr, (void *)paddr, val, (eabs - sabs), sabs, eabs,
652 reportphywritedelayabs);
653 }
654
655 if (reportphywriteosbt) {
656 OSReportWithBacktrace("ml_io_write size %d (v=%p, p=%p, 0x%llx) "
657 "took %lluus",
658 size, (void *)vaddr, (void *)paddr, val, (eabs - sabs) / NSEC_PER_USEC);
659 }
660 #if CONFIG_DTRACE
661 DTRACE_PHYSLAT5(physiowrite, uint64_t, (eabs - sabs),
662 uint64_t, vaddr, uint32_t, size, uint64_t, paddr, uint64_t, val);
663 #endif /* CONFIG_DTRACE */
664 } else if (__improbable(tracephywritedelayabs > 0 && (eabs - sabs) > tracephywritedelayabs)) {
665 #if !(DEVELOPMENT || DEBUG)
666 uintptr_t paddr = kvtophys(vaddr);
667 #endif
668
669 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_MMIO_WRITE),
670 (eabs - sabs), VM_KERNEL_UNSLIDE_OR_PERM(vaddr), paddr, val);
671
672 (void)ml_set_interrupts_enabled(istate);
673 } else {
674 (void)ml_set_interrupts_enabled(istate);
675 }
676 }
677 #endif /* x86_64 */
678 }
679
680 void
681 ml_io_write8(uintptr_t vaddr, uint8_t val)
682 {
683 ml_io_write(vaddr, val, 1);
684 }
685
686 void
687 ml_io_write16(uintptr_t vaddr, uint16_t val)
688 {
689 ml_io_write(vaddr, val, 2);
690 }
691
692 void
693 ml_io_write32(uintptr_t vaddr, uint32_t val)
694 {
695 ml_io_write(vaddr, val, 4);
696 }
697
698 void
699 ml_io_write64(uintptr_t vaddr, uint64_t val)
700 {
701 ml_io_write(vaddr, val, 8);
702 }