]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/machine_routines.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / i386 / machine_routines.c
CommitLineData
1c79356b 1/*
39236c6e 2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
2d21ac55 28
1c79356b
A
29#include <i386/machine_routines.h>
30#include <i386/io_map_entries.h>
55e303ae
A
31#include <i386/cpuid.h>
32#include <i386/fpu.h>
2d21ac55 33#include <mach/processor.h>
55e303ae 34#include <kern/processor.h>
91447636 35#include <kern/machine.h>
39037602 36
91447636
A
37#include <kern/cpu_number.h>
38#include <kern/thread.h>
39236c6e 39#include <kern/thread_call.h>
39037602
A
40#include <kern/policy_internal.h>
41
fe8ab488 42#include <prng/random.h>
f427ee49 43#include <prng/entropy.h>
55e303ae 44#include <i386/machine_cpu.h>
593a1d5f 45#include <i386/lapic.h>
fe8ab488 46#include <i386/bit_routines.h>
55e303ae 47#include <i386/mp_events.h>
0c530ab8 48#include <i386/pmCPU.h>
6d2010ae 49#include <i386/trap.h>
2d21ac55
A
50#include <i386/tsc.h>
51#include <i386/cpu_threads.h>
b0d623f7 52#include <i386/proc_reg.h>
91447636 53#include <mach/vm_param.h>
b0d623f7 54#include <i386/pmap.h>
316670eb 55#include <i386/pmap_internal.h>
b0d623f7 56#include <i386/misc_protos.h>
39236c6e 57#include <kern/timer_queue.h>
f427ee49 58#include <vm/vm_map.h>
39236c6e
A
59#if KPC
60#include <kern/kpc.h>
61#endif
fe8ab488 62#include <architecture/i386/pio.h>
39037602 63#include <i386/cpu_data.h>
0c530ab8 64#if DEBUG
0a7de745 65#define DBG(x...) kprintf("DBG: " x)
0c530ab8
A
66#else
67#define DBG(x...)
68#endif
69
5ba3f43e
A
70#if MONOTONIC
71#include <kern/monotonic.h>
72#endif /* MONOTONIC */
73
0a7de745 74extern void wakeup(void *);
55e303ae 75
0a7de745
A
76uint64_t LockTimeOut;
77uint64_t TLBTimeOut;
78uint64_t LockTimeOutTSC;
79uint32_t LockTimeOutUsec;
80uint64_t MutexSpin;
ea3f0419
A
81uint64_t low_MutexSpin;
82int64_t high_MutexSpin;
0a7de745
A
83uint64_t LastDebuggerEntryAllowance;
84uint64_t delay_spin_threshold;
0c530ab8 85
6d2010ae
A
86extern uint64_t panic_restart_timeout;
87
88boolean_t virtualized = FALSE;
89
0a7de745 90decl_simple_lock_data(static, ml_timer_evaluation_slock);
39236c6e
A
91uint32_t ml_timer_eager_evaluations;
92uint64_t ml_timer_eager_evaluation_max;
93static boolean_t ml_timer_evaluation_in_progress = FALSE;
94
f427ee49
A
95LCK_GRP_DECLARE(max_cpus_grp, "max_cpus");
96LCK_MTX_DECLARE(max_cpus_lock, &max_cpus_grp);
97static int max_cpus_initialized = 0;
55e303ae
A
98#define MAX_CPUS_SET 0x1
99#define MAX_CPUS_WAIT 0x2
1c79356b
A
100
101/* IO memory map services */
102
103/* Map memory map IO space */
0a7de745
A
104vm_offset_t
105ml_io_map(
106 vm_offset_t phys_addr,
1c79356b
A
107 vm_size_t size)
108{
0a7de745 109 return io_map(phys_addr, size, VM_WIMG_IO);
1c79356b
A
110}
111
112/* boot memory allocation */
0a7de745
A
113vm_offset_t
114ml_static_malloc(
115 __unused vm_size_t size)
1c79356b 116{
0a7de745 117 return (vm_offset_t)NULL;
1c79356b
A
118}
119
0c530ab8 120
0a7de745
A
121void
122ml_get_bouncepool_info(vm_offset_t *phys_addr, vm_size_t *size)
0c530ab8 123{
0a7de745 124 *phys_addr = 0;
0b4c1975 125 *size = 0;
0c530ab8
A
126}
127
128
1c79356b
A
129vm_offset_t
130ml_static_ptovirt(
131 vm_offset_t paddr)
132{
b0d623f7
A
133#if defined(__x86_64__)
134 return (vm_offset_t)(((unsigned long) paddr) | VM_MIN_KERNEL_ADDRESS);
135#else
136 return (vm_offset_t)((paddr) | LINEAR_KERNEL_ADDRESS);
137#endif
0a7de745 138}
1c79356b 139
d9a64523
A
140vm_offset_t
141ml_static_slide(
142 vm_offset_t vaddr)
143{
144 return VM_KERNEL_SLIDE(vaddr);
145}
146
f427ee49
A
147/*
148 * base must be page-aligned, and size must be a multiple of PAGE_SIZE
149 */
150kern_return_t
151ml_static_verify_page_protections(
152 uint64_t base, uint64_t size, vm_prot_t prot)
153{
154 vm_prot_t pageprot;
155 uint64_t offset;
156
157 DBG("ml_static_verify_page_protections: vaddr 0x%llx sz 0x%llx prot 0x%x\n", base, size, prot);
158
159 /*
160 * base must be within the static bounds, defined to be:
161 * (vm_kernel_stext, kc_highest_nonlinkedit_vmaddr)
162 */
163#if DEVELOPMENT || DEBUG || KASAN
164 assert(kc_highest_nonlinkedit_vmaddr > 0 && base > vm_kernel_stext && base < kc_highest_nonlinkedit_vmaddr);
165#else /* On release kernels, assume this is a protection mismatch failure. */
166 if (kc_highest_nonlinkedit_vmaddr == 0 || base < vm_kernel_stext || base >= kc_highest_nonlinkedit_vmaddr) {
167 return KERN_FAILURE;
168 }
169#endif
170
171 for (offset = 0; offset < size; offset += PAGE_SIZE) {
172 if (pmap_get_prot(kernel_pmap, base + offset, &pageprot) == KERN_FAILURE) {
173 return KERN_FAILURE;
174 }
175 if ((pageprot & prot) != prot) {
176 return KERN_FAILURE;
177 }
178 }
179
180 return KERN_SUCCESS;
181}
182
d9a64523
A
183vm_offset_t
184ml_static_unslide(
185 vm_offset_t vaddr)
186{
187 return VM_KERNEL_UNSLIDE(vaddr);
188}
189
91447636 190/*
cb323159
A
191 * Reclaim memory, by virtual address, that was used in early boot that is no longer needed
192 * by the kernel.
91447636 193 */
1c79356b
A
194void
195ml_static_mfree(
91447636
A
196 vm_offset_t vaddr,
197 vm_size_t size)
1c79356b 198{
b0d623f7 199 addr64_t vaddr_cur;
91447636 200 ppnum_t ppn;
316670eb 201 uint32_t freed_pages = 0;
cb323159 202 vm_size_t map_size;
0a7de745 203
b0d623f7 204 assert(vaddr >= VM_MIN_KERNEL_ADDRESS);
91447636 205
0a7de745 206 assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */
91447636 207
cb323159
A
208 for (vaddr_cur = vaddr; vaddr_cur < round_page_64(vaddr + size);) {
209 map_size = pmap_query_pagesize(kernel_pmap, vaddr_cur);
210
211 /* just skip if nothing mapped here */
212 if (map_size == 0) {
213 vaddr_cur += PAGE_SIZE;
214 continue;
215 }
216
217 /*
218 * Can't free from the middle of a large page.
219 */
220 assert((vaddr_cur & (map_size - 1)) == 0);
221
b0d623f7 222 ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
cb323159
A
223 assert(ppn != (ppnum_t)NULL);
224
225 pmap_remove(kernel_pmap, vaddr_cur, vaddr_cur + map_size);
226 while (map_size > 0) {
227 if (++kernel_pmap->stats.resident_count > kernel_pmap->stats.resident_max) {
228 kernel_pmap->stats.resident_max = kernel_pmap->stats.resident_count;
2d21ac55 229 }
cb323159 230
316670eb 231 assert(pmap_valid_page(ppn));
316670eb 232 if (IS_MANAGED_PAGE(ppn)) {
0a7de745 233 vm_page_create(ppn, (ppn + 1));
316670eb
A
234 freed_pages++;
235 }
cb323159
A
236 map_size -= PAGE_SIZE;
237 vaddr_cur += PAGE_SIZE;
238 ppn++;
91447636
A
239 }
240 }
3e170ce0
A
241 vm_page_lockspin_queues();
242 vm_page_wire_count -= freed_pages;
243 vm_page_wire_count_initial -= freed_pages;
0a7de745
A
244 if (vm_page_wire_count_on_boot != 0) {
245 assert(vm_page_wire_count_on_boot >= freed_pages);
246 vm_page_wire_count_on_boot -= freed_pages;
247 }
3e170ce0
A
248 vm_page_unlock_queues();
249
0a7de745 250#if DEBUG
316670eb
A
251 kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn);
252#endif
1c79356b
A
253}
254
f427ee49
A
255/* Change page protections for addresses previously loaded by efiboot */
256kern_return_t
257ml_static_protect(vm_offset_t vmaddr, vm_size_t size, vm_prot_t prot)
258{
259 boolean_t NX = !!!(prot & VM_PROT_EXECUTE), ro = !!!(prot & VM_PROT_WRITE);
260
261 assert(prot & VM_PROT_READ);
262
263 pmap_mark_range(kernel_pmap, vmaddr, size, NX, ro);
264
265 return KERN_SUCCESS;
266}
0c530ab8 267
1c79356b 268/* virtual to physical on wired pages */
0a7de745
A
269vm_offset_t
270ml_vtophys(
1c79356b
A
271 vm_offset_t vaddr)
272{
0a7de745 273 return (vm_offset_t)kvtophys(vaddr);
1c79356b
A
274}
275
2d21ac55
A
276/*
277 * Routine: ml_nofault_copy
278 * Function: Perform a physical mode copy if the source and
279 * destination have valid translations in the kernel pmap.
280 * If translations are present, they are assumed to
281 * be wired; i.e. no attempt is made to guarantee that the
282 * translations obtained remained valid for
283 * the duration of the copy process.
284 */
285
0a7de745
A
286vm_size_t
287ml_nofault_copy(
2d21ac55
A
288 vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size)
289{
290 addr64_t cur_phys_dst, cur_phys_src;
291 uint32_t count, nbytes = 0;
292
293 while (size > 0) {
0a7de745 294 if (!(cur_phys_src = kvtophys(virtsrc))) {
2d21ac55 295 break;
0a7de745
A
296 }
297 if (!(cur_phys_dst = kvtophys(virtdst))) {
2d21ac55 298 break;
0a7de745
A
299 }
300 if (!pmap_valid_page(i386_btop(cur_phys_dst)) || !pmap_valid_page(i386_btop(cur_phys_src))) {
2d21ac55 301 break;
0a7de745 302 }
b0d623f7 303 count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
0a7de745 304 if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) {
b0d623f7 305 count = (uint32_t)(PAGE_SIZE - (cur_phys_dst & PAGE_MASK));
0a7de745
A
306 }
307 if (count > size) {
b0d623f7 308 count = (uint32_t)size;
0a7de745 309 }
2d21ac55
A
310
311 bcopy_phys(cur_phys_src, cur_phys_dst, count);
312
313 nbytes += count;
314 virtsrc += count;
315 virtdst += count;
316 size -= count;
317 }
318
319 return nbytes;
320}
321
39236c6e
A
322/*
323 * Routine: ml_validate_nofault
324 * Function: Validate that ths address range has a valid translations
325 * in the kernel pmap. If translations are present, they are
326 * assumed to be wired; i.e. no attempt is made to guarantee
327 * that the translation persist after the check.
328 * Returns: TRUE if the range is mapped and will not cause a fault,
329 * FALSE otherwise.
330 */
331
0a7de745
A
332boolean_t
333ml_validate_nofault(
39236c6e
A
334 vm_offset_t virtsrc, vm_size_t size)
335{
336 addr64_t cur_phys_src;
337 uint32_t count;
338
339 while (size > 0) {
0a7de745 340 if (!(cur_phys_src = kvtophys(virtsrc))) {
39236c6e 341 return FALSE;
0a7de745
A
342 }
343 if (!pmap_valid_page(i386_btop(cur_phys_src))) {
39236c6e 344 return FALSE;
0a7de745 345 }
39236c6e 346 count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
0a7de745 347 if (count > size) {
39236c6e 348 count = (uint32_t)size;
0a7de745 349 }
39236c6e
A
350
351 virtsrc += count;
352 size -= count;
353 }
354
355 return TRUE;
356}
357
1c79356b
A
358/* Interrupt handling */
359
55e303ae 360/* Initialize Interrupts */
0a7de745
A
361void
362ml_init_interrupt(void)
55e303ae
A
363{
364 (void) ml_set_interrupts_enabled(TRUE);
365}
366
b0d623f7 367
1c79356b 368/* Get Interrupts Enabled */
0a7de745
A
369boolean_t
370ml_get_interrupts_enabled(void)
1c79356b 371{
0a7de745 372 unsigned long flags;
1c79356b 373
0a7de745
A
374 __asm__ volatile ("pushf; pop %0": "=r" (flags));
375 return (flags & EFL_IF) != 0;
1c79356b
A
376}
377
378/* Set Interrupts Enabled */
0a7de745
A
379boolean_t
380ml_set_interrupts_enabled(boolean_t enable)
1c79356b 381{
6d2010ae
A
382 unsigned long flags;
383 boolean_t istate;
0a7de745
A
384
385 __asm__ volatile ("pushf; pop %0" : "=r" (flags));
1c79356b 386
39236c6e
A
387 assert(get_interrupt_level() ? (enable == FALSE) : TRUE);
388
6d2010ae 389 istate = ((flags & EFL_IF) != 0);
0c530ab8 390
6d2010ae 391 if (enable) {
0a7de745 392 __asm__ volatile ("sti;nop");
0c530ab8 393
0a7de745 394 if ((get_preemption_level() == 0) && (*ast_pending() & AST_URGENT)) {
fe8ab488 395 __asm__ volatile ("int %0" :: "N" (T_PREEMPT));
0a7de745
A
396 }
397 } else {
398 if (istate) {
399 __asm__ volatile ("cli");
400 }
0c530ab8 401 }
1c79356b 402
6d2010ae 403 return istate;
1c79356b
A
404}
405
0a7de745
A
406/* Early Set Interrupts Enabled */
407boolean_t
408ml_early_set_interrupts_enabled(boolean_t enable)
409{
410 if (enable == TRUE) {
411 kprintf("Caller attempted to enable interrupts too early in "
412 "kernel startup. Halting.\n");
413 hlt();
414 /*NOTREACHED*/
415 }
416
417 /* On x86, do not allow interrupts to be enabled very early */
418 return FALSE;
419}
420
1c79356b 421/* Check if running at interrupt context */
0a7de745
A
422boolean_t
423ml_at_interrupt_context(void)
1c79356b
A
424{
425 return get_interrupt_level() != 0;
426}
427
0a7de745
A
428void
429ml_get_power_state(boolean_t *icp, boolean_t *pidlep)
430{
4b17d6b6
A
431 *icp = (get_interrupt_level() != 0);
432 /* These will be technically inaccurate for interrupts that occur
433 * successively within a single "idle exit" event, but shouldn't
434 * matter statistically.
435 */
436 *pidlep = (current_cpu_datap()->lcpu.package->num_idle == topoParms.nLThreadsPerPackage);
437}
438
1c79356b 439/* Generate a fake interrupt */
cb323159 440__dead2
0a7de745
A
441void
442ml_cause_interrupt(void)
1c79356b
A
443{
444 panic("ml_cause_interrupt not defined yet on Intel");
445}
446
fe8ab488
A
447/*
448 * TODO: transition users of this to kernel_thread_start_priority
449 * ml_thread_policy is an unsupported KPI
450 */
0a7de745
A
451void
452ml_thread_policy(
d52fe63f 453 thread_t thread,
0a7de745 454 __unused unsigned policy_id,
d52fe63f
A
455 unsigned policy_info)
456{
55e303ae 457 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
fe8ab488
A
458 thread_precedence_policy_data_t info;
459 __assert_only kern_return_t kret;
55e303ae 460
fe8ab488 461 info.importance = 1;
55e303ae 462
fe8ab488 463 kret = thread_policy_set_internal(thread, THREAD_PRECEDENCE_POLICY,
0a7de745
A
464 (thread_policy_t)&info,
465 THREAD_PRECEDENCE_POLICY_COUNT);
fe8ab488 466 assert(kret == KERN_SUCCESS);
55e303ae 467 }
d52fe63f
A
468}
469
1c79356b 470/* Initialize Interrupts */
0a7de745
A
471void
472ml_install_interrupt_handler(
1c79356b
A
473 void *nub,
474 int source,
475 void *target,
476 IOInterruptHandler handler,
0a7de745 477 void *refCon)
1c79356b
A
478{
479 boolean_t current_state;
480
5ba3f43e 481 current_state = ml_set_interrupts_enabled(FALSE);
1c79356b
A
482
483 PE_install_interrupt_handler(nub, source, target,
0a7de745 484 (IOInterruptHandler) handler, refCon);
1c79356b
A
485
486 (void) ml_set_interrupts_enabled(current_state);
55e303ae
A
487}
488
91447636 489
1c79356b
A
490void
491machine_signal_idle(
0a7de745 492 processor_t processor)
1c79356b 493{
b0d623f7 494 cpu_interrupt(processor->cpu_id);
55e303ae
A
495}
496
cb323159 497__dead2
3e170ce0
A
498void
499machine_signal_idle_deferred(
500 __unused processor_t processor)
501{
502 panic("Unimplemented");
503}
504
cb323159 505__dead2
3e170ce0
A
506void
507machine_signal_idle_cancel(
508 __unused processor_t processor)
509{
510 panic("Unimplemented");
511}
512
b0d623f7
A
513static kern_return_t
514register_cpu(
0a7de745 515 uint32_t lapic_id,
b0d623f7
A
516 processor_t *processor_out,
517 boolean_t boot_cpu )
55e303ae 518{
0a7de745
A
519 int target_cpu;
520 cpu_data_t *this_cpu_datap;
55e303ae 521
91447636
A
522 this_cpu_datap = cpu_data_alloc(boot_cpu);
523 if (this_cpu_datap == NULL) {
55e303ae 524 return KERN_FAILURE;
91447636
A
525 }
526 target_cpu = this_cpu_datap->cpu_number;
55e303ae 527 assert((boot_cpu && (target_cpu == 0)) ||
0a7de745 528 (!boot_cpu && (target_cpu != 0)));
55e303ae
A
529
530 lapic_cpu_map(lapic_id, target_cpu);
91447636 531
b0d623f7 532 /* The cpu_id is not known at registration phase. Just do
0a7de745 533 * lapic_id for now
b0d623f7 534 */
91447636
A
535 this_cpu_datap->cpu_phys_number = lapic_id;
536
537 this_cpu_datap->cpu_console_buf = console_cpu_alloc(boot_cpu);
0a7de745 538 if (this_cpu_datap->cpu_console_buf == NULL) {
91447636 539 goto failed;
0a7de745 540 }
91447636 541
39236c6e 542#if KPC
0a7de745 543 if (kpc_register_cpu(this_cpu_datap) != TRUE) {
39236c6e 544 goto failed;
0a7de745 545 }
39236c6e
A
546#endif
547
91447636 548 if (!boot_cpu) {
593a1d5f 549 cpu_thread_alloc(this_cpu_datap->cpu_number);
0a7de745 550 if (this_cpu_datap->lcpu.core == NULL) {
2d21ac55 551 goto failed;
0a7de745 552 }
91447636
A
553 }
554
f427ee49
A
555 /*
556 * processor_init() deferred to topology start
557 * because "slot numbers" a.k.a. logical processor numbers
558 * are not yet finalized.
559 */
91447636 560 *processor_out = this_cpu_datap->cpu_processor;
2d21ac55 561
55e303ae 562 return KERN_SUCCESS;
91447636
A
563
564failed:
91447636 565 console_cpu_free(this_cpu_datap->cpu_console_buf);
39236c6e 566#if KPC
39037602 567 kpc_unregister_cpu(this_cpu_datap);
a39ff7e2 568#endif /* KPC */
39236c6e 569
91447636 570 return KERN_FAILURE;
1c79356b
A
571}
572
b0d623f7
A
573
574kern_return_t
575ml_processor_register(
0a7de745
A
576 cpu_id_t cpu_id,
577 uint32_t lapic_id,
578 processor_t *processor_out,
579 boolean_t boot_cpu,
b0d623f7
A
580 boolean_t start )
581{
0a7de745
A
582 static boolean_t done_topo_sort = FALSE;
583 static uint32_t num_registered = 0;
b0d623f7 584
0a7de745
A
585 /* Register all CPUs first, and track max */
586 if (start == FALSE) {
587 num_registered++;
b0d623f7 588
0a7de745 589 DBG( "registering CPU lapic id %d\n", lapic_id );
b0d623f7 590
0a7de745
A
591 return register_cpu( lapic_id, processor_out, boot_cpu );
592 }
b0d623f7 593
0a7de745
A
594 /* Sort by topology before we start anything */
595 if (!done_topo_sort) {
596 DBG( "about to start CPUs. %d registered\n", num_registered );
b0d623f7 597
0a7de745
A
598 cpu_topology_sort( num_registered );
599 done_topo_sort = TRUE;
600 }
b0d623f7 601
0a7de745
A
602 /* Assign the cpu ID */
603 uint32_t cpunum = -1;
604 cpu_data_t *this_cpu_datap = NULL;
b0d623f7 605
0a7de745
A
606 /* find cpu num and pointer */
607 cpunum = ml_get_cpuid( lapic_id );
b0d623f7 608
0a7de745
A
609 if (cpunum == 0xFFFFFFFF) { /* never heard of it? */
610 panic( "trying to start invalid/unregistered CPU %d\n", lapic_id );
611 }
b0d623f7 612
0a7de745 613 this_cpu_datap = cpu_datap(cpunum);
b0d623f7 614
0a7de745
A
615 /* fix the CPU id */
616 this_cpu_datap->cpu_id = cpu_id;
b0d623f7 617
0a7de745
A
618 /* allocate and initialize other per-cpu structures */
619 if (!boot_cpu) {
620 mp_cpus_call_cpu_init(cpunum);
cb323159 621 random_cpu_init(cpunum);
0a7de745 622 }
fe8ab488 623
0a7de745
A
624 /* output arg */
625 *processor_out = this_cpu_datap->cpu_processor;
b0d623f7 626
0a7de745
A
627 /* OK, try and start this CPU */
628 return cpu_topology_start_cpu( cpunum );
b0d623f7
A
629}
630
631
43866e37 632void
91447636 633ml_cpu_get_info(ml_cpu_info_t *cpu_infop)
43866e37 634{
0a7de745 635 boolean_t os_supports_sse;
55e303ae
A
636 i386_cpu_info_t *cpuid_infop;
637
0a7de745 638 if (cpu_infop == NULL) {
55e303ae 639 return;
0a7de745
A
640 }
641
55e303ae 642 /*
0c530ab8 643 * Are we supporting MMX/SSE/SSE2/SSE3?
55e303ae
A
644 * As distinct from whether the cpu has these capabilities.
645 */
060df5ea 646 os_supports_sse = !!(get_cr4() & CR4_OSXMM);
6d2010ae 647
0a7de745 648 if (ml_fpu_avx_enabled()) {
6d2010ae 649 cpu_infop->vector_unit = 9;
0a7de745 650 } else if ((cpuid_features() & CPUID_FEATURE_SSE4_2) && os_supports_sse) {
2d21ac55 651 cpu_infop->vector_unit = 8;
0a7de745 652 } else if ((cpuid_features() & CPUID_FEATURE_SSE4_1) && os_supports_sse) {
2d21ac55 653 cpu_infop->vector_unit = 7;
0a7de745 654 } else if ((cpuid_features() & CPUID_FEATURE_SSSE3) && os_supports_sse) {
0c530ab8 655 cpu_infop->vector_unit = 6;
0a7de745 656 } else if ((cpuid_features() & CPUID_FEATURE_SSE3) && os_supports_sse) {
0c530ab8 657 cpu_infop->vector_unit = 5;
0a7de745 658 } else if ((cpuid_features() & CPUID_FEATURE_SSE2) && os_supports_sse) {
91447636 659 cpu_infop->vector_unit = 4;
0a7de745 660 } else if ((cpuid_features() & CPUID_FEATURE_SSE) && os_supports_sse) {
91447636 661 cpu_infop->vector_unit = 3;
0a7de745 662 } else if (cpuid_features() & CPUID_FEATURE_MMX) {
91447636 663 cpu_infop->vector_unit = 2;
0a7de745 664 } else {
91447636 665 cpu_infop->vector_unit = 0;
0a7de745 666 }
55e303ae
A
667
668 cpuid_infop = cpuid_info();
669
0a7de745 670 cpu_infop->cache_line_size = cpuid_infop->cache_linesize;
55e303ae 671
91447636
A
672 cpu_infop->l1_icache_size = cpuid_infop->cache_size[L1I];
673 cpu_infop->l1_dcache_size = cpuid_infop->cache_size[L1D];
0a7de745
A
674
675 if (cpuid_infop->cache_size[L2U] > 0) {
676 cpu_infop->l2_settings = 1;
677 cpu_infop->l2_cache_size = cpuid_infop->cache_size[L2U];
678 } else {
679 cpu_infop->l2_settings = 0;
680 cpu_infop->l2_cache_size = 0xFFFFFFFF;
681 }
682
683 if (cpuid_infop->cache_size[L3U] > 0) {
684 cpu_infop->l3_settings = 1;
685 cpu_infop->l3_cache_size = cpuid_infop->cache_size[L3U];
686 } else {
687 cpu_infop->l3_settings = 0;
688 cpu_infop->l3_cache_size = 0xFFFFFFFF;
689 }
43866e37
A
690}
691
f427ee49
A
692int
693ml_early_cpu_max_number(void)
43866e37 694{
f427ee49 695 int n = max_ncpus;
55e303ae 696
f427ee49
A
697 assert(startup_phase >= STARTUP_SUB_TUNABLES);
698 if (max_cpus_from_firmware) {
699 n = MIN(n, max_cpus_from_firmware);
700 }
701 return n - 1;
702}
703
704void
705ml_set_max_cpus(unsigned int max_cpus)
706{
707 lck_mtx_lock(&max_cpus_lock);
0a7de745
A
708 if (max_cpus_initialized != MAX_CPUS_SET) {
709 if (max_cpus > 0 && max_cpus <= MAX_CPUS) {
91447636 710 /*
2d21ac55 711 * Note: max_cpus is the number of enabled processors
91447636
A
712 * that ACPI found; max_ncpus is the maximum number
713 * that the kernel supports or that the "cpus="
714 * boot-arg has set. Here we take int minimum.
715 */
0a7de745 716 machine_info.max_cpus = (integer_t)MIN(max_cpus, max_ncpus);
91447636 717 }
0a7de745 718 if (max_cpus_initialized == MAX_CPUS_WAIT) {
f427ee49 719 thread_wakeup((event_t) &max_cpus_initialized);
0a7de745
A
720 }
721 max_cpus_initialized = MAX_CPUS_SET;
722 }
f427ee49 723 lck_mtx_unlock(&max_cpus_lock);
43866e37
A
724}
725
f427ee49
A
726unsigned int
727ml_wait_max_cpus(void)
43866e37 728{
f427ee49
A
729 lck_mtx_lock(&max_cpus_lock);
730 while (max_cpus_initialized != MAX_CPUS_SET) {
0a7de745 731 max_cpus_initialized = MAX_CPUS_WAIT;
f427ee49 732 lck_mtx_sleep(&max_cpus_lock, LCK_SLEEP_DEFAULT, &max_cpus_initialized, THREAD_UNINT);
0a7de745 733 }
f427ee49 734 lck_mtx_unlock(&max_cpus_lock);
0a7de745 735 return machine_info.max_cpus;
43866e37 736}
5ba3f43e 737
5ba3f43e
A
738void
739ml_panic_trap_to_debugger(__unused const char *panic_format_str,
0a7de745
A
740 __unused va_list *panic_args,
741 __unused unsigned int reason,
742 __unused void *ctx,
743 __unused uint64_t panic_options_mask,
744 __unused unsigned long panic_caller)
5ba3f43e
A
745{
746 return;
747}
748
f427ee49
A
749static uint64_t
750virtual_timeout_inflate64(unsigned int vti, uint64_t timeout, uint64_t max_timeout)
751{
752 if (vti >= 64) {
753 return max_timeout;
754 }
755
756 if ((timeout << vti) >> vti != timeout) {
757 return max_timeout;
758 }
759
760 if ((timeout << vti) > max_timeout) {
761 return max_timeout;
762 }
763
764 return timeout << vti;
765}
766
767static uint32_t
768virtual_timeout_inflate32(unsigned int vti, uint32_t timeout, uint32_t max_timeout)
769{
770 if (vti >= 32) {
771 return max_timeout;
772 }
773
774 if ((timeout << vti) >> vti != timeout) {
775 return max_timeout;
776 }
777
778 return timeout << vti;
779}
780
781/*
782 * Some timeouts are later adjusted or used in calculations setting
783 * other values. In order to avoid overflow, cap the max timeout as
784 * 2^47ns (~39 hours).
785 */
786static const uint64_t max_timeout_ns = 1ULL << 47;
787
788/*
789 * Inflate a timeout in absolutetime.
790 */
791static uint64_t
792virtual_timeout_inflate_abs(unsigned int vti, uint64_t timeout)
793{
794 uint64_t max_timeout;
795 nanoseconds_to_absolutetime(max_timeout_ns, &max_timeout);
796 return virtual_timeout_inflate64(vti, timeout, max_timeout);
797}
798
799/*
800 * Inflate a value in TSC ticks.
801 */
802static uint64_t
803virtual_timeout_inflate_tsc(unsigned int vti, uint64_t timeout)
804{
805 const uint64_t max_timeout = tmrCvt(max_timeout_ns, tscFCvtn2t);
806 return virtual_timeout_inflate64(vti, timeout, max_timeout);
807}
808
809/*
810 * Inflate a timeout in microseconds.
811 */
812static uint32_t
813virtual_timeout_inflate_us(unsigned int vti, uint64_t timeout)
814{
815 const uint32_t max_timeout = ~0;
816 return virtual_timeout_inflate32(vti, timeout, max_timeout);
817}
818
2a1bd2d3
A
819uint64_t
820ml_get_timebase_entropy(void)
821{
822 return __builtin_ia32_rdtsc();
823}
824
0c530ab8
A
825/*
826 * Routine: ml_init_lock_timeout
827 * Function:
828 */
829void
830ml_init_lock_timeout(void)
831{
0a7de745
A
832 uint64_t abstime;
833 uint32_t mtxspin;
143464d5 834#if DEVELOPMENT || DEBUG
0a7de745 835 uint64_t default_timeout_ns = NSEC_PER_SEC >> 2;
143464d5 836#else
0a7de745 837 uint64_t default_timeout_ns = NSEC_PER_SEC >> 1;
143464d5 838#endif
0a7de745
A
839 uint32_t slto;
840 uint32_t prt;
6d2010ae 841
0a7de745 842 if (PE_parse_boot_argn("slto_us", &slto, sizeof(slto))) {
b0d623f7 843 default_timeout_ns = slto * NSEC_PER_USEC;
0a7de745 844 }
0c530ab8 845
39037602
A
846 /*
847 * LockTimeOut is absolutetime, LockTimeOutTSC is in TSC ticks,
848 * and LockTimeOutUsec is in microseconds and it's 32-bits.
849 */
850 LockTimeOutUsec = (uint32_t) (default_timeout_ns / NSEC_PER_USEC);
b0d623f7 851 nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
39037602
A
852 LockTimeOut = abstime;
853 LockTimeOutTSC = tmrCvt(abstime, tscFCvtn2t);
0c530ab8 854
fe8ab488
A
855 /*
856 * TLBTimeOut dictates the TLB flush timeout period. It defaults to
857 * LockTimeOut but can be overriden separately. In particular, a
858 * zero value inhibits the timeout-panic and cuts a trace evnt instead
859 * - see pmap_flush_tlbs().
860 */
0a7de745 861 if (PE_parse_boot_argn("tlbto_us", &slto, sizeof(slto))) {
fe8ab488
A
862 default_timeout_ns = slto * NSEC_PER_USEC;
863 nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
864 TLBTimeOut = (uint32_t) abstime;
865 } else {
866 TLBTimeOut = LockTimeOut;
867 }
868
813fb2f6 869#if DEVELOPMENT || DEBUG
5ba3f43e 870 reportphyreaddelayabs = LockTimeOut >> 1;
813fb2f6 871#endif
0a7de745 872 if (PE_parse_boot_argn("phyreadmaxus", &slto, sizeof(slto))) {
3e170ce0
A
873 default_timeout_ns = slto * NSEC_PER_USEC;
874 nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
875 reportphyreaddelayabs = abstime;
876 }
877
0a7de745
A
878 if (PE_parse_boot_argn("phywritemaxus", &slto, sizeof(slto))) {
879 nanoseconds_to_absolutetime((uint64_t)slto * NSEC_PER_USEC, &abstime);
880 reportphywritedelayabs = abstime;
881 }
882
883 if (PE_parse_boot_argn("tracephyreadus", &slto, sizeof(slto))) {
884 nanoseconds_to_absolutetime((uint64_t)slto * NSEC_PER_USEC, &abstime);
885 tracephyreaddelayabs = abstime;
886 }
887
888 if (PE_parse_boot_argn("tracephywriteus", &slto, sizeof(slto))) {
889 nanoseconds_to_absolutetime((uint64_t)slto * NSEC_PER_USEC, &abstime);
890 tracephywritedelayabs = abstime;
891 }
892
893 if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof(mtxspin))) {
894 if (mtxspin > USEC_PER_SEC >> 4) {
895 mtxspin = USEC_PER_SEC >> 4;
896 }
897 nanoseconds_to_absolutetime(mtxspin * NSEC_PER_USEC, &abstime);
0c530ab8 898 } else {
0a7de745 899 nanoseconds_to_absolutetime(10 * NSEC_PER_USEC, &abstime);
0c530ab8
A
900 }
901 MutexSpin = (unsigned int)abstime;
ea3f0419
A
902 low_MutexSpin = MutexSpin;
903 /*
904 * high_MutexSpin should be initialized as low_MutexSpin * real_ncpus, but
905 * real_ncpus is not set at this time
906 */
907 high_MutexSpin = -1;
b0d623f7 908
060df5ea 909 nanoseconds_to_absolutetime(4ULL * NSEC_PER_SEC, &LastDebuggerEntryAllowance);
0a7de745 910 if (PE_parse_boot_argn("panic_restart_timeout", &prt, sizeof(prt))) {
6d2010ae 911 nanoseconds_to_absolutetime(prt * NSEC_PER_SEC, &panic_restart_timeout);
0a7de745 912 }
39037602 913
6d2010ae 914 virtualized = ((cpuid_features() & CPUID_FEATURE_VMM) != 0);
39037602 915 if (virtualized) {
f427ee49 916 unsigned int vti;
0a7de745
A
917
918 if (!PE_parse_boot_argn("vti", &vti, sizeof(vti))) {
39037602 919 vti = 6;
0a7de745 920 }
39037602
A
921 printf("Timeouts adjusted for virtualization (<<%d)\n", vti);
922 kprintf("Timeouts adjusted for virtualization (<<%d):\n", vti);
f427ee49
A
923#define VIRTUAL_TIMEOUT_INFLATE_ABS(_timeout) \
924MACRO_BEGIN \
925 kprintf("%24s: 0x%016llx ", #_timeout, _timeout); \
926 _timeout = virtual_timeout_inflate_abs(vti, _timeout); \
927 kprintf("-> 0x%016llx\n", _timeout); \
928MACRO_END
929
930#define VIRTUAL_TIMEOUT_INFLATE_TSC(_timeout) \
931MACRO_BEGIN \
932 kprintf("%24s: 0x%016llx ", #_timeout, _timeout); \
933 _timeout = virtual_timeout_inflate_tsc(vti, _timeout); \
934 kprintf("-> 0x%016llx\n", _timeout); \
39037602 935MACRO_END
f427ee49
A
936#define VIRTUAL_TIMEOUT_INFLATE_US(_timeout) \
937MACRO_BEGIN \
938 kprintf("%24s: 0x%08x ", #_timeout, _timeout); \
939 _timeout = virtual_timeout_inflate_us(vti, _timeout); \
940 kprintf("-> 0x%08x\n", _timeout); \
39037602 941MACRO_END
f427ee49
A
942 VIRTUAL_TIMEOUT_INFLATE_US(LockTimeOutUsec);
943 VIRTUAL_TIMEOUT_INFLATE_ABS(LockTimeOut);
944 VIRTUAL_TIMEOUT_INFLATE_TSC(LockTimeOutTSC);
945 VIRTUAL_TIMEOUT_INFLATE_ABS(TLBTimeOut);
946 VIRTUAL_TIMEOUT_INFLATE_ABS(MutexSpin);
947 VIRTUAL_TIMEOUT_INFLATE_ABS(low_MutexSpin);
948 VIRTUAL_TIMEOUT_INFLATE_ABS(reportphyreaddelayabs);
39037602
A
949 }
950
060df5ea 951 interrupt_latency_tracker_setup();
39236c6e 952 simple_lock_init(&ml_timer_evaluation_slock, 0);
0c530ab8
A
953}
954
316670eb
A
955/*
956 * Threshold above which we should attempt to block
957 * instead of spinning for clock_delay_until().
958 */
39236c6e 959
316670eb 960void
bd504ef0 961ml_init_delay_spin_threshold(int threshold_us)
316670eb 962{
bd504ef0 963 nanoseconds_to_absolutetime(threshold_us * NSEC_PER_USEC, &delay_spin_threshold);
316670eb
A
964}
965
966boolean_t
967ml_delay_should_spin(uint64_t interval)
968{
969 return (interval < delay_spin_threshold) ? TRUE : FALSE;
970}
971
f427ee49 972TUNABLE(uint32_t, yield_delay_us, "yield_delay_us", 0);
0a7de745
A
973
974void
975ml_delay_on_yield(void)
976{
977#if DEVELOPMENT || DEBUG
978 if (yield_delay_us) {
979 delay(yield_delay_us);
980 }
981#endif
982}
e8c3f781 983
91447636 984/*
bd504ef0 985 * This is called from the machine-independent layer
91447636
A
986 * to perform machine-dependent info updates. Defer to cpu_thread_init().
987 */
988void
989ml_cpu_up(void)
990{
991 return;
992}
993
994/*
bd504ef0 995 * This is called from the machine-independent layer
91447636
A
996 * to perform machine-dependent info updates.
997 */
998void
999ml_cpu_down(void)
1000{
bd504ef0
A
1001 i386_deactivate_cpu();
1002
91447636
A
1003 return;
1004}
1005
91447636
A
1006/*
1007 * The following are required for parts of the kernel
1008 * that cannot resolve these functions as inlines:
1009 */
cb323159 1010extern thread_t current_act(void) __attribute__((const));
91447636 1011thread_t
9bccf70c 1012current_act(void)
91447636 1013{
0a7de745 1014 return current_thread_fast();
91447636 1015}
55e303ae
A
1016
1017#undef current_thread
cb323159 1018extern thread_t current_thread(void) __attribute__((const));
55e303ae
A
1019thread_t
1020current_thread(void)
1021{
0a7de745 1022 return current_thread_fast();
55e303ae 1023}
0c530ab8 1024
0c530ab8 1025
0a7de745
A
1026boolean_t
1027ml_is64bit(void)
1028{
1029 return cpu_mode_is64bit();
0c530ab8
A
1030}
1031
1032
0a7de745
A
1033boolean_t
1034ml_thread_is64bit(thread_t thread)
1035{
1036 return thread_is_64bit_addr(thread);
0c530ab8
A
1037}
1038
1039
0a7de745
A
1040boolean_t
1041ml_state_is64bit(void *saved_state)
1042{
0c530ab8
A
1043 return is_saved_state64(saved_state);
1044}
1045
0a7de745
A
1046void
1047ml_cpu_set_ldt(int selector)
0c530ab8
A
1048{
1049 /*
1050 * Avoid loading the LDT
1051 * if we're setting the KERNEL LDT and it's already set.
1052 */
1053 if (selector == KERNEL_LDT &&
0a7de745 1054 current_cpu_datap()->cpu_ldt == KERNEL_LDT) {
0c530ab8 1055 return;
0a7de745 1056 }
0c530ab8 1057
b0d623f7 1058 lldt(selector);
b0d623f7 1059 current_cpu_datap()->cpu_ldt = selector;
0c530ab8
A
1060}
1061
0a7de745
A
1062void
1063ml_fp_setvalid(boolean_t value)
0c530ab8 1064{
0a7de745 1065 fp_setvalid(value);
0c530ab8
A
1066}
1067
0a7de745
A
1068uint64_t
1069ml_cpu_int_event_time(void)
2d21ac55
A
1070{
1071 return current_cpu_datap()->cpu_int_event_time;
1072}
1073
0a7de745
A
1074vm_offset_t
1075ml_stack_remaining(void)
b0d623f7
A
1076{
1077 uintptr_t local = (uintptr_t) &local;
1078
1079 if (ml_at_interrupt_context() != 0) {
0a7de745 1080 return local - (current_cpu_datap()->cpu_int_stack_top - INTSTACK_SIZE);
b0d623f7 1081 } else {
0a7de745 1082 return local - current_thread()->kernel_stack;
b0d623f7
A
1083 }
1084}
2d21ac55 1085
5ba3f43e
A
1086#if KASAN
1087vm_offset_t ml_stack_base(void);
1088vm_size_t ml_stack_size(void);
1089
1090vm_offset_t
1091ml_stack_base(void)
1092{
1093 if (ml_at_interrupt_context()) {
1094 return current_cpu_datap()->cpu_int_stack_top - INTSTACK_SIZE;
1095 } else {
0a7de745 1096 return current_thread()->kernel_stack;
5ba3f43e
A
1097 }
1098}
1099
1100vm_size_t
1101ml_stack_size(void)
1102{
1103 if (ml_at_interrupt_context()) {
0a7de745 1104 return INTSTACK_SIZE;
5ba3f43e 1105 } else {
0a7de745 1106 return kernel_stack_size;
5ba3f43e
A
1107 }
1108}
1109#endif
1110
6d2010ae
A
1111void
1112kernel_preempt_check(void)
1113{
0a7de745 1114 boolean_t intr;
6d2010ae
A
1115 unsigned long flags;
1116
1117 assert(get_preemption_level() == 0);
1118
d9a64523 1119 if (__improbable(*ast_pending() & AST_URGENT)) {
6d2010ae 1120 /*
0a7de745 1121 * can handle interrupts and preemptions
6d2010ae
A
1122 * at this point
1123 */
0a7de745 1124 __asm__ volatile ("pushf; pop %0" : "=r" (flags));
d9a64523
A
1125
1126 intr = ((flags & EFL_IF) != 0);
6d2010ae
A
1127
1128 /*
1129 * now cause the PRE-EMPTION trap
1130 */
0a7de745 1131 if (intr == TRUE) {
d9a64523
A
1132 __asm__ volatile ("int %0" :: "N" (T_PREEMPT));
1133 }
6d2010ae
A
1134 }
1135}
1136
0a7de745
A
1137boolean_t
1138machine_timeout_suspended(void)
1139{
1140 return pmap_tlb_flush_timeout || spinlock_timed_out || panic_active() || mp_recent_debugger_activity() || ml_recent_wake();
060df5ea 1141}
39236c6e
A
1142
1143/* Eagerly evaluate all pending timer and thread callouts
1144 */
0a7de745
A
1145void
1146ml_timer_evaluate(void)
1147{
1148 KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN | DBG_FUNC_START, 0, 0, 0, 0, 0);
39236c6e
A
1149
1150 uint64_t te_end, te_start = mach_absolute_time();
0a7de745 1151 simple_lock(&ml_timer_evaluation_slock, LCK_GRP_NULL);
39236c6e
A
1152 ml_timer_evaluation_in_progress = TRUE;
1153 thread_call_delayed_timer_rescan_all();
1154 mp_cpus_call(CPUMASK_ALL, ASYNC, timer_queue_expire_rescan, NULL);
1155 ml_timer_evaluation_in_progress = FALSE;
1156 ml_timer_eager_evaluations++;
1157 te_end = mach_absolute_time();
1158 ml_timer_eager_evaluation_max = MAX(ml_timer_eager_evaluation_max, (te_end - te_start));
1159 simple_unlock(&ml_timer_evaluation_slock);
1160
0a7de745 1161 KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN | DBG_FUNC_END, 0, 0, 0, 0, 0);
39236c6e
A
1162}
1163
1164boolean_t
0a7de745
A
1165ml_timer_forced_evaluation(void)
1166{
39236c6e
A
1167 return ml_timer_evaluation_in_progress;
1168}
fe8ab488 1169
39037602 1170uint64_t
0a7de745
A
1171ml_energy_stat(__unused thread_t t)
1172{
39037602
A
1173 return 0;
1174}
1175
fe8ab488 1176void
0a7de745
A
1177ml_gpu_stat_update(uint64_t gpu_ns_delta)
1178{
fe8ab488
A
1179 current_thread()->machine.thread_gpu_ns += gpu_ns_delta;
1180}
1181
1182uint64_t
0a7de745
A
1183ml_gpu_stat(thread_t t)
1184{
fe8ab488
A
1185 return t->machine.thread_gpu_ns;
1186}
39037602
A
1187
1188int plctrace_enabled = 0;
1189
0a7de745
A
1190void
1191_disable_preemption(void)
1192{
39037602
A
1193 disable_preemption_internal();
1194}
1195
0a7de745
A
1196void
1197_enable_preemption(void)
1198{
39037602
A
1199 enable_preemption_internal();
1200}
1201
0a7de745
A
1202void
1203plctrace_disable(void)
1204{
39037602
A
1205 plctrace_enabled = 0;
1206}
5ba3f43e
A
1207
1208static boolean_t ml_quiescing;
1209
0a7de745
A
1210void
1211ml_set_is_quiescing(boolean_t quiescing)
5ba3f43e 1212{
0a7de745 1213 ml_quiescing = quiescing;
5ba3f43e
A
1214}
1215
0a7de745
A
1216boolean_t
1217ml_is_quiescing(void)
5ba3f43e 1218{
0a7de745 1219 return ml_quiescing;
5ba3f43e
A
1220}
1221
0a7de745
A
1222uint64_t
1223ml_get_booter_memory_size(void)
5ba3f43e 1224{
0a7de745 1225 return 0;
5ba3f43e 1226}
f427ee49
A
1227
1228void
1229machine_lockdown(void)
1230{
1231 x86_64_protect_data_const();
1232}
1233
1234bool
1235ml_cpu_can_exit(__unused int cpu_id)
1236{
1237 return true;
1238}
1239
f427ee49
A
1240void
1241ml_cpu_begin_state_transition(__unused int cpu_id)
1242{
1243}
1244
1245void
1246ml_cpu_end_state_transition(__unused int cpu_id)
1247{
1248}
1249
1250void
1251ml_cpu_begin_loop(void)
1252{
1253}
1254
1255void
1256ml_cpu_end_loop(void)
1257{
1258}
1259
1260size_t
1261ml_get_vm_reserved_regions(bool vm_is64bit, struct vm_reserved_region **regions)
1262{
1263#pragma unused(vm_is64bit)
1264 assert(regions != NULL);
1265
1266 *regions = NULL;
1267 return 0;
1268}