]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm/machine_routines.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / arm / machine_routines.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <arm/proc_reg.h>
30#include <arm/machine_cpu.h>
31#include <arm/cpu_internal.h>
32#include <arm/cpuid.h>
33#include <arm/io_map_entries.h>
34#include <arm/cpu_data.h>
35#include <arm/cpu_data_internal.h>
f427ee49 36#include <arm/machine_routines.h>
5ba3f43e
A
37#include <arm/misc_protos.h>
38#include <arm/rtclock.h>
39#include <arm/caches_internal.h>
40#include <console/serial_protos.h>
41#include <kern/machine.h>
42#include <prng/random.h>
43#include <kern/startup.h>
44#include <kern/sched.h>
45#include <kern/thread.h>
46#include <mach/machine.h>
47#include <machine/atomic.h>
48#include <vm/pmap.h>
49#include <vm/vm_page.h>
f427ee49 50#include <vm/vm_map.h>
5ba3f43e
A
51#include <sys/kdebug.h>
52#include <kern/coalition.h>
53#include <pexpert/device_tree.h>
0a7de745 54#include <arm/cpuid_internal.h>
cb323159 55#include <arm/cpu_capabilities.h>
5ba3f43e
A
56
57#include <IOKit/IOPlatformExpert.h>
5ba3f43e
A
58
59#if KPC
60#include <kern/kpc.h>
61#endif
62
f427ee49
A
63/* arm32 only supports a highly simplified topology, fixed at 1 cluster */
64static ml_topology_cpu_t topology_cpu_array[MAX_CPUS];
65static ml_topology_cluster_t topology_cluster = {
66 .cluster_id = 0,
67 .cluster_type = CLUSTER_TYPE_SMP,
68 .first_cpu_id = 0,
69};
70static ml_topology_info_t topology_info = {
71 .version = CPU_TOPOLOGY_VERSION,
72 .num_clusters = 1,
73 .max_cluster_id = 0,
74 .cpus = topology_cpu_array,
75 .clusters = &topology_cluster,
76 .boot_cpu = &topology_cpu_array[0],
77 .boot_cluster = &topology_cluster,
78};
5ba3f43e
A
79
80uint32_t LockTimeOut;
81uint32_t LockTimeOutUsec;
0a7de745 82uint64_t TLockTimeOut;
5ba3f43e 83uint64_t MutexSpin;
f427ee49 84extern uint32_t lockdown_done;
ea3f0419
A
85uint64_t low_MutexSpin;
86int64_t high_MutexSpin;
87
5ba3f43e
A
88void
89machine_startup(__unused boot_args * args)
90{
5ba3f43e
A
91 machine_conf();
92
93 /*
94 * Kick off the kernel bootstrap.
95 */
96 kernel_bootstrap();
97 /* NOTREACHED */
98}
99
100char *
101machine_boot_info(
0a7de745
A
102 __unused char *buf,
103 __unused vm_size_t size)
5ba3f43e 104{
0a7de745 105 return PE_boot_args();
5ba3f43e
A
106}
107
0a7de745 108void
5ba3f43e
A
109slave_machine_init(__unused void *param)
110{
0a7de745
A
111 cpu_machine_init(); /* Initialize the processor */
112 clock_init(); /* Init the clock */
5ba3f43e
A
113}
114
115/*
116 * Routine: machine_processor_shutdown
117 * Function:
118 */
119thread_t
120machine_processor_shutdown(
0a7de745
A
121 __unused thread_t thread,
122 void (*doshutdown)(processor_t),
123 processor_t processor)
5ba3f43e 124{
0a7de745 125 return Shutdown_context(doshutdown, processor);
5ba3f43e
A
126}
127
5ba3f43e
A
128/*
129 * Routine: ml_init_lock_timeout
130 * Function:
131 */
132void
133ml_init_lock_timeout(void)
134{
135 uint64_t abstime;
136 uint64_t mtxspin;
0a7de745 137 uint64_t default_timeout_ns = NSEC_PER_SEC >> 2;
5ba3f43e
A
138 uint32_t slto;
139
0a7de745 140 if (PE_parse_boot_argn("slto_us", &slto, sizeof(slto))) {
5ba3f43e 141 default_timeout_ns = slto * NSEC_PER_USEC;
0a7de745 142 }
5ba3f43e
A
143
144 nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
0a7de745 145 LockTimeOutUsec = (uint32_t)(default_timeout_ns / NSEC_PER_USEC);
5ba3f43e 146 LockTimeOut = (uint32_t)abstime;
0a7de745 147 TLockTimeOut = LockTimeOut;
5ba3f43e 148
0a7de745
A
149 if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof(mtxspin))) {
150 if (mtxspin > USEC_PER_SEC >> 4) {
151 mtxspin = USEC_PER_SEC >> 4;
152 }
153 nanoseconds_to_absolutetime(mtxspin * NSEC_PER_USEC, &abstime);
5ba3f43e 154 } else {
0a7de745 155 nanoseconds_to_absolutetime(10 * NSEC_PER_USEC, &abstime);
5ba3f43e
A
156 }
157 MutexSpin = abstime;
ea3f0419
A
158 low_MutexSpin = MutexSpin;
159 /*
160 * high_MutexSpin should be initialized as low_MutexSpin * real_ncpus, but
161 * real_ncpus is not set at this time
162 *
163 * NOTE: active spinning is disabled in arm. It can be activated
164 * by setting high_MutexSpin through the sysctl.
165 */
166 high_MutexSpin = low_MutexSpin;
5ba3f43e
A
167}
168
c3c9b80d
A
169/*
170 * This is called when all of the ml_processor_info_t structures have been
171 * initialized and all the processors have been started through processor_start().
172 *
173 * Required by the scheduler subsystem.
174 */
175void
176ml_cpu_init_completed(void)
177{
178}
179
5ba3f43e
A
180/*
181 * This is called from the machine-independent routine cpu_up()
182 * to perform machine-dependent info updates.
183 */
184void
185ml_cpu_up(void)
186{
cb323159
A
187 os_atomic_inc(&machine_info.physical_cpu, relaxed);
188 os_atomic_inc(&machine_info.logical_cpu, relaxed);
5ba3f43e
A
189}
190
191/*
192 * This is called from the machine-independent routine cpu_down()
193 * to perform machine-dependent info updates.
194 */
195void
196ml_cpu_down(void)
197{
0a7de745 198 cpu_data_t *cpu_data_ptr;
5ba3f43e 199
cb323159
A
200 os_atomic_dec(&machine_info.physical_cpu, relaxed);
201 os_atomic_dec(&machine_info.logical_cpu, relaxed);
0a7de745 202
5ba3f43e
A
203 /*
204 * If we want to deal with outstanding IPIs, we need to
205 * do relatively early in the processor_doshutdown path,
206 * as we pend decrementer interrupts using the IPI
207 * mechanism if we cannot immediately service them (if
208 * IRQ is masked). Do so now.
209 *
210 * We aren't on the interrupt stack here; would it make
211 * more sense to disable signaling and then enable
212 * interrupts? It might be a bit cleaner.
213 */
214 cpu_data_ptr = getCpuDatap();
215 cpu_data_ptr->cpu_running = FALSE;
216
217 cpu_signal_handler_internal(TRUE);
218}
219
220/*
221 * Routine: ml_cpu_get_info
222 * Function:
223 */
224void
225ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info)
226{
227 cache_info_t *cpuid_cache_info;
228
229 cpuid_cache_info = cache_info();
230 ml_cpu_info->vector_unit = 0;
231 ml_cpu_info->cache_line_size = cpuid_cache_info->c_linesz;
232 ml_cpu_info->l1_icache_size = cpuid_cache_info->c_isize;
233 ml_cpu_info->l1_dcache_size = cpuid_cache_info->c_dsize;
234
235#if (__ARM_ARCH__ >= 7)
236 ml_cpu_info->l2_settings = 1;
237 ml_cpu_info->l2_cache_size = cpuid_cache_info->c_l2size;
238#else
239 ml_cpu_info->l2_settings = 0;
240 ml_cpu_info->l2_cache_size = 0xFFFFFFFF;
241#endif
242 ml_cpu_info->l3_settings = 0;
243 ml_cpu_info->l3_cache_size = 0xFFFFFFFF;
244}
245
246unsigned int
247ml_get_machine_mem(void)
248{
0a7de745 249 return machine_info.memory_size;
5ba3f43e
A
250}
251
252/* Return max offset */
253vm_map_offset_t
254ml_get_max_offset(
0a7de745 255 boolean_t is64,
5ba3f43e
A
256 unsigned int option)
257{
0a7de745 258 unsigned int pmap_max_offset_option = 0;
5ba3f43e
A
259
260 switch (option) {
261 case MACHINE_MAX_OFFSET_DEFAULT:
262 pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEFAULT;
0a7de745
A
263 break;
264 case MACHINE_MAX_OFFSET_MIN:
5ba3f43e 265 pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_MIN;
0a7de745
A
266 break;
267 case MACHINE_MAX_OFFSET_MAX:
5ba3f43e 268 pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_MAX;
0a7de745
A
269 break;
270 case MACHINE_MAX_OFFSET_DEVICE:
5ba3f43e 271 pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEVICE;
0a7de745
A
272 break;
273 default:
5ba3f43e 274 panic("ml_get_max_offset(): Illegal option 0x%x\n", option);
0a7de745
A
275 break;
276 }
5ba3f43e
A
277 return pmap_max_offset(is64, pmap_max_offset_option);
278}
279
5ba3f43e
A
280void
281ml_panic_trap_to_debugger(__unused const char *panic_format_str,
0a7de745
A
282 __unused va_list *panic_args,
283 __unused unsigned int reason,
284 __unused void *ctx,
285 __unused uint64_t panic_options_mask,
286 __unused unsigned long panic_caller)
5ba3f43e
A
287{
288 return;
289}
290
291__attribute__((noreturn))
292void
293halt_all_cpus(boolean_t reboot)
294{
295 if (reboot) {
296 printf("MACH Reboot\n");
297 PEHaltRestart(kPERestartCPU);
298 } else {
299 printf("CPU halted\n");
300 PEHaltRestart(kPEHaltCPU);
301 }
0a7de745
A
302 while (1) {
303 ;
304 }
5ba3f43e
A
305}
306
307__attribute__((noreturn))
308void
309halt_cpu(void)
310{
311 halt_all_cpus(FALSE);
312}
313
314/*
315 * Routine: machine_signal_idle
316 * Function:
317 */
318void
319machine_signal_idle(
0a7de745 320 processor_t processor)
5ba3f43e
A
321{
322 cpu_signal(processor_to_cpu_datap(processor), SIGPnop, (void *)NULL, (void *)NULL);
323 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
324}
325
326void
327machine_signal_idle_deferred(
0a7de745 328 processor_t processor)
5ba3f43e
A
329{
330 cpu_signal_deferred(processor_to_cpu_datap(processor));
331 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_DEFERRED_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
332}
333
334void
335machine_signal_idle_cancel(
0a7de745 336 processor_t processor)
5ba3f43e
A
337{
338 cpu_signal_cancel(processor_to_cpu_datap(processor));
339 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_CANCEL_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
340}
341
342/*
343 * Routine: ml_install_interrupt_handler
344 * Function: Initialize Interrupt Handler
345 */
0a7de745 346void
5ba3f43e 347ml_install_interrupt_handler(
0a7de745
A
348 void *nub,
349 int source,
350 void *target,
351 IOInterruptHandler handler,
352 void *refCon)
5ba3f43e
A
353{
354 cpu_data_t *cpu_data_ptr;
355 boolean_t current_state;
356
357 current_state = ml_set_interrupts_enabled(FALSE);
358 cpu_data_ptr = getCpuDatap();
359
360 cpu_data_ptr->interrupt_nub = nub;
361 cpu_data_ptr->interrupt_source = source;
362 cpu_data_ptr->interrupt_target = target;
363 cpu_data_ptr->interrupt_handler = handler;
364 cpu_data_ptr->interrupt_refCon = refCon;
365
5ba3f43e 366 (void) ml_set_interrupts_enabled(current_state);
5ba3f43e
A
367}
368
369/*
370 * Routine: ml_init_interrupt
371 * Function: Initialize Interrupts
372 */
0a7de745 373void
5ba3f43e
A
374ml_init_interrupt(void)
375{
376}
377
378/*
379 * Routine: ml_init_timebase
380 * Function: register and setup Timebase, Decremeter services
381 */
0a7de745
A
382void
383ml_init_timebase(
384 void *args,
385 tbd_ops_t tbd_funcs,
386 vm_offset_t int_address,
387 vm_offset_t int_value)
5ba3f43e
A
388{
389 cpu_data_t *cpu_data_ptr;
390
391 cpu_data_ptr = (cpu_data_t *)args;
392
393 if ((cpu_data_ptr == &BootCpuData)
394 && (rtclock_timebase_func.tbd_fiq_handler == (void *)NULL)) {
395 rtclock_timebase_func = *tbd_funcs;
396 rtclock_timebase_addr = int_address;
397 rtclock_timebase_val = int_value;
398 }
399}
400
401void
402ml_parse_cpu_topology(void)
403{
404 DTEntry entry, child;
405 OpaqueDTEntryIterator iter;
406 uint32_t cpu_boot_arg;
407 int err;
408
f427ee49 409 err = SecureDTLookupEntry(NULL, "/cpus", &entry);
5ba3f43e
A
410 assert(err == kSuccess);
411
f427ee49 412 err = SecureDTInitEntryIterator(entry, &iter);
5ba3f43e
A
413 assert(err == kSuccess);
414
f427ee49
A
415 cpu_boot_arg = MAX_CPUS;
416 PE_parse_boot_argn("cpus", &cpu_boot_arg, sizeof(cpu_boot_arg));
417
418 ml_topology_cluster_t *cluster = &topology_info.clusters[0];
419 unsigned int cpu_id = 0;
420 while (kSuccess == SecureDTIterateEntries(&iter, &child)) {
5ba3f43e
A
421#if MACH_ASSERT
422 unsigned int propSize;
f427ee49
A
423 void const *prop = NULL;
424 if (cpu_id == 0) {
425 if (kSuccess != SecureDTGetProperty(child, "state", &prop, &propSize)) {
426 panic("unable to retrieve state for cpu %u", cpu_id);
0a7de745 427 }
5ba3f43e 428
f427ee49 429 if (strncmp((char const *)prop, "running", propSize) != 0) {
5ba3f43e 430 panic("cpu 0 has not been marked as running!");
0a7de745 431 }
5ba3f43e 432 }
f427ee49
A
433 assert(kSuccess == SecureDTGetProperty(child, "reg", &prop, &propSize));
434 assert(cpu_id == *((uint32_t const *)prop));
5ba3f43e 435#endif
f427ee49
A
436 if (cpu_id >= cpu_boot_arg) {
437 break;
438 }
439
440 ml_topology_cpu_t *cpu = &topology_info.cpus[cpu_id];
441
442 cpu->cpu_id = cpu_id;
443 cpu->phys_id = cpu_id;
444 cpu->cluster_type = cluster->cluster_type;
5ba3f43e 445
f427ee49
A
446 cluster->num_cpus++;
447 cluster->cpu_mask |= 1ULL << cpu_id;
448
449 topology_info.num_cpus++;
450 topology_info.max_cpu_id = cpu_id;
451
452 cpu_id++;
0a7de745 453 }
5ba3f43e 454
f427ee49 455 if (cpu_id == 0) {
5ba3f43e 456 panic("No cpus found!");
0a7de745 457 }
5ba3f43e
A
458}
459
f427ee49
A
460const ml_topology_info_t *
461ml_get_topology_info(void)
462{
463 return &topology_info;
464}
465
5ba3f43e
A
466unsigned int
467ml_get_cpu_count(void)
468{
f427ee49
A
469 return topology_info.num_cpus;
470}
471
472unsigned int
473ml_get_cluster_count(void)
474{
475 return topology_info.num_clusters;
5ba3f43e
A
476}
477
478int
479ml_get_boot_cpu_number(void)
480{
481 return 0;
482}
483
484cluster_type_t
485ml_get_boot_cluster(void)
486{
487 return CLUSTER_TYPE_SMP;
488}
489
490int
491ml_get_cpu_number(uint32_t phys_id)
492{
f427ee49
A
493 if (phys_id > (uint32_t)ml_get_max_cpu_number()) {
494 return -1;
495 }
496
5ba3f43e
A
497 return (int)phys_id;
498}
499
f427ee49
A
500int
501ml_get_cluster_number(__unused uint32_t phys_id)
502{
503 return 0;
504}
505
5ba3f43e
A
506int
507ml_get_max_cpu_number(void)
508{
f427ee49
A
509 return topology_info.num_cpus - 1;
510}
511
512int
513ml_get_max_cluster_number(void)
514{
515 return topology_info.max_cluster_id;
516}
517
518unsigned int
519ml_get_first_cpu_id(unsigned int cluster_id)
520{
521 return topology_info.clusters[cluster_id].first_cpu_id;
5ba3f43e
A
522}
523
524kern_return_t
0a7de745
A
525ml_processor_register(ml_processor_info_t *in_processor_info,
526 processor_t * processor_out, ipi_handler_t *ipi_handler_out,
527 perfmon_interrupt_handler_func *pmi_handler_out)
5ba3f43e
A
528{
529 cpu_data_t *this_cpu_datap;
530 boolean_t is_boot_cpu;
531
f427ee49
A
532 const unsigned int max_cpu_id = ml_get_max_cpu_number();
533 if (in_processor_info->phys_id > max_cpu_id) {
5ba3f43e
A
534 /*
535 * The physical CPU ID indicates that we have more CPUs than
536 * this xnu build support. This probably means we have an
537 * incorrect board configuration.
538 *
539 * TODO: Should this just return a failure instead? A panic
540 * is simply a convenient way to catch bugs in the pexpert
541 * headers.
542 */
f427ee49 543 panic("phys_id %u is too large for max_cpu_id (%u)", in_processor_info->phys_id, max_cpu_id);
5ba3f43e
A
544 }
545
546 /* Fail the registration if the number of CPUs has been limited by boot-arg. */
f427ee49 547 if ((in_processor_info->phys_id >= topology_info.num_cpus) ||
0a7de745 548 (in_processor_info->log_id > (uint32_t)ml_get_max_cpu_number())) {
5ba3f43e 549 return KERN_FAILURE;
0a7de745 550 }
5ba3f43e
A
551
552 if (in_processor_info->log_id != (uint32_t)ml_get_boot_cpu_number()) {
553 is_boot_cpu = FALSE;
554 this_cpu_datap = cpu_data_alloc(FALSE);
555 cpu_data_init(this_cpu_datap);
556 } else {
557 this_cpu_datap = &BootCpuData;
558 is_boot_cpu = TRUE;
559 }
560
561 this_cpu_datap->cpu_id = in_processor_info->cpu_id;
562
5ba3f43e 563 this_cpu_datap->cpu_console_buf = console_cpu_alloc(is_boot_cpu);
0a7de745 564 if (this_cpu_datap->cpu_console_buf == (void *)(NULL)) {
5ba3f43e 565 goto processor_register_error;
0a7de745 566 }
5ba3f43e
A
567
568 if (!is_boot_cpu) {
0a7de745 569 if (cpu_data_register(this_cpu_datap) != KERN_SUCCESS) {
5ba3f43e 570 goto processor_register_error;
0a7de745 571 }
5ba3f43e
A
572 }
573
f427ee49
A
574 this_cpu_datap->cpu_idle_notify = in_processor_info->processor_idle;
575 this_cpu_datap->cpu_cache_dispatch = (cache_dispatch_t) in_processor_info->platform_cache_dispatch;
5ba3f43e
A
576 nanoseconds_to_absolutetime((uint64_t) in_processor_info->powergate_latency, &this_cpu_datap->cpu_idle_latency);
577 this_cpu_datap->cpu_reset_assist = kvtophys(in_processor_info->powergate_stub_addr);
578
f427ee49 579 this_cpu_datap->idle_timer_notify = in_processor_info->idle_timer;
5ba3f43e
A
580 this_cpu_datap->idle_timer_refcon = in_processor_info->idle_timer_refcon;
581
f427ee49 582 this_cpu_datap->platform_error_handler = in_processor_info->platform_error_handler;
5ba3f43e
A
583 this_cpu_datap->cpu_regmap_paddr = in_processor_info->regmap_paddr;
584 this_cpu_datap->cpu_phys_id = in_processor_info->phys_id;
585 this_cpu_datap->cpu_l2_access_penalty = in_processor_info->l2_access_penalty;
586
f427ee49 587 processor_t processor = PERCPU_GET_RELATIVE(processor, cpu_data, this_cpu_datap);
5ba3f43e 588 if (!is_boot_cpu) {
f427ee49
A
589 processor_init(processor, this_cpu_datap->cpu_number,
590 processor_pset(master_processor));
5ba3f43e
A
591
592 if (this_cpu_datap->cpu_l2_access_penalty) {
593 /*
594 * Cores that have a non-zero L2 access penalty compared
595 * to the boot processor should be de-prioritized by the
596 * scheduler, so that threads use the cores with better L2
597 * preferentially.
598 */
f427ee49 599 processor_set_primary(processor, master_processor);
5ba3f43e
A
600 }
601 }
602
f427ee49 603 *processor_out = processor;
0a7de745
A
604 *ipi_handler_out = cpu_signal_handler;
605 *pmi_handler_out = NULL;
606 if (in_processor_info->idle_tickle != (idle_tickle_t *) NULL) {
5ba3f43e 607 *in_processor_info->idle_tickle = (idle_tickle_t) cpu_idle_tickle;
0a7de745 608 }
5ba3f43e
A
609
610#if KPC
0a7de745 611 if (kpc_register_cpu(this_cpu_datap) != TRUE) {
5ba3f43e 612 goto processor_register_error;
0a7de745 613 }
5ba3f43e
A
614#endif
615
0a7de745 616 if (!is_boot_cpu) {
cb323159 617 random_cpu_init(this_cpu_datap->cpu_number);
0a7de745 618 }
5ba3f43e
A
619
620 return KERN_SUCCESS;
621
622processor_register_error:
623#if KPC
624 kpc_unregister_cpu(this_cpu_datap);
625#endif
0a7de745 626 if (!is_boot_cpu) {
5ba3f43e 627 cpu_data_free(this_cpu_datap);
0a7de745 628 }
5ba3f43e
A
629 return KERN_FAILURE;
630}
631
632void
633ml_init_arm_debug_interface(
0a7de745
A
634 void * in_cpu_datap,
635 vm_offset_t virt_address)
5ba3f43e
A
636{
637 ((cpu_data_t *)in_cpu_datap)->cpu_debug_interface_map = virt_address;
638 do_debugid();
639}
640
641/*
642 * Routine: init_ast_check
643 * Function:
644 */
645void
646init_ast_check(
0a7de745 647 __unused processor_t processor)
5ba3f43e
A
648{
649}
650
651/*
652 * Routine: cause_ast_check
653 * Function:
654 */
655void
656cause_ast_check(
0a7de745 657 processor_t processor)
5ba3f43e
A
658{
659 if (current_processor() != processor) {
660 cpu_signal(processor_to_cpu_datap(processor), SIGPast, (void *)NULL, (void *)NULL);
661 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 1 /* ast */, 0, 0, 0);
662 }
663}
664
5ba3f43e
A
665extern uint32_t cpu_idle_count;
666
0a7de745
A
667void
668ml_get_power_state(boolean_t *icp, boolean_t *pidlep)
669{
5ba3f43e
A
670 *icp = ml_at_interrupt_context();
671 *pidlep = (cpu_idle_count == real_ncpus);
672}
673
674/*
675 * Routine: ml_cause_interrupt
676 * Function: Generate a fake interrupt
677 */
0a7de745 678void
5ba3f43e
A
679ml_cause_interrupt(void)
680{
0a7de745 681 return; /* BS_XXX */
5ba3f43e
A
682}
683
684/* Map memory map IO space */
685vm_offset_t
686ml_io_map(
0a7de745
A
687 vm_offset_t phys_addr,
688 vm_size_t size)
5ba3f43e 689{
0a7de745 690 return io_map(phys_addr, size, VM_WIMG_IO);
5ba3f43e
A
691}
692
cb323159
A
693/* Map memory map IO space (with protections specified) */
694vm_offset_t
695ml_io_map_with_prot(
696 vm_offset_t phys_addr,
697 vm_size_t size,
698 vm_prot_t prot)
699{
700 return io_map_with_prot(phys_addr, size, VM_WIMG_IO, prot);
701}
702
5ba3f43e
A
703vm_offset_t
704ml_io_map_wcomb(
0a7de745
A
705 vm_offset_t phys_addr,
706 vm_size_t size)
5ba3f43e 707{
0a7de745 708 return io_map(phys_addr, size, VM_WIMG_WCOMB);
5ba3f43e
A
709}
710
f427ee49
A
711void
712ml_io_unmap(vm_offset_t addr, vm_size_t sz)
713{
714 pmap_remove(kernel_pmap, addr, addr + sz);
715 kmem_free(kernel_map, addr, sz);
716}
717
5ba3f43e 718/* boot memory allocation */
0a7de745 719vm_offset_t
5ba3f43e 720ml_static_malloc(
0a7de745 721 __unused vm_size_t size)
5ba3f43e 722{
0a7de745 723 return (vm_offset_t) NULL;
5ba3f43e
A
724}
725
726vm_map_address_t
727ml_map_high_window(
0a7de745
A
728 vm_offset_t phys_addr,
729 vm_size_t len)
5ba3f43e
A
730{
731 return pmap_map_high_window_bd(phys_addr, len, VM_PROT_READ | VM_PROT_WRITE);
732}
733
734vm_offset_t
735ml_static_ptovirt(
0a7de745 736 vm_offset_t paddr)
5ba3f43e
A
737{
738 return phystokv(paddr);
739}
740
741vm_offset_t
742ml_static_vtop(
0a7de745 743 vm_offset_t vaddr)
5ba3f43e 744{
cb323159 745 assertf(((vm_address_t)(vaddr) - gVirtBase) < gPhysSize, "%s: illegal vaddr: %p", __func__, (void*)vaddr);
0a7de745 746 return (vm_address_t)(vaddr) - gVirtBase + gPhysBase;
5ba3f43e
A
747}
748
cb323159
A
749/*
750 * Return the maximum contiguous KVA range that can be accessed from this
751 * physical address. For arm64, we employ a segmented physical aperture
752 * relocation table which can limit the available range for a given PA to
753 * something less than the extent of physical memory. But here, we still
754 * have a flat physical aperture, so no such requirement exists.
755 */
756vm_map_address_t
757phystokv_range(pmap_paddr_t pa, vm_size_t *max_len)
758{
759 vm_size_t len = gPhysSize - (pa - gPhysBase);
760 if (*max_len > len) {
761 *max_len = len;
762 }
763 assertf((pa - gPhysBase) < gPhysSize, "%s: illegal PA: 0x%lx", __func__, (unsigned long)pa);
764 return pa - gPhysBase + gVirtBase;
765}
766
d9a64523
A
767vm_offset_t
768ml_static_slide(
769 vm_offset_t vaddr)
770{
771 return VM_KERNEL_SLIDE(vaddr);
772}
773
f427ee49
A
774kern_return_t
775ml_static_verify_page_protections(
776 uint64_t base, uint64_t size, vm_prot_t prot)
777{
778 /* XXX Implement Me */
779 (void)base;
780 (void)size;
781 (void)prot;
782 return KERN_FAILURE;
783}
784
785
d9a64523
A
786vm_offset_t
787ml_static_unslide(
788 vm_offset_t vaddr)
789{
790 return VM_KERNEL_UNSLIDE(vaddr);
791}
5ba3f43e
A
792
793kern_return_t
794ml_static_protect(
795 vm_offset_t vaddr, /* kernel virtual address */
796 vm_size_t size,
797 vm_prot_t new_prot)
798{
799 pt_entry_t arm_prot = 0;
800 pt_entry_t arm_block_prot = 0;
801 vm_offset_t vaddr_cur;
802 ppnum_t ppn;
803 kern_return_t result = KERN_SUCCESS;
804
0a7de745 805 if (vaddr < VM_MIN_KERNEL_ADDRESS) {
5ba3f43e 806 return KERN_FAILURE;
0a7de745 807 }
5ba3f43e
A
808
809 assert((vaddr & (ARM_PGBYTES - 1)) == 0); /* must be page aligned */
810
811 if ((new_prot & VM_PROT_WRITE) && (new_prot & VM_PROT_EXECUTE)) {
812 panic("ml_static_protect(): WX request on %p", (void *) vaddr);
813 }
f427ee49
A
814 if (lockdown_done && (new_prot & VM_PROT_EXECUTE)) {
815 panic("ml_static_protect(): attempt to inject executable mapping on %p", (void *) vaddr);
816 }
5ba3f43e
A
817
818 /* Set up the protection bits, and block bits so we can validate block mappings. */
819 if (new_prot & VM_PROT_WRITE) {
820 arm_prot |= ARM_PTE_AP(AP_RWNA);
821 arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RWNA);
822 } else {
823 arm_prot |= ARM_PTE_AP(AP_RONA);
824 arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RONA);
825 }
826
827 if (!(new_prot & VM_PROT_EXECUTE)) {
828 arm_prot |= ARM_PTE_NX;
829 arm_block_prot |= ARM_TTE_BLOCK_NX;
830 }
831
832 for (vaddr_cur = vaddr;
0a7de745
A
833 vaddr_cur < ((vaddr + size) & ~ARM_PGMASK);
834 vaddr_cur += ARM_PGBYTES) {
5ba3f43e
A
835 ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
836 if (ppn != (vm_offset_t) NULL) {
837 tt_entry_t *ttp = &kernel_pmap->tte[ttenum(vaddr_cur)];
838 tt_entry_t tte = *ttp;
839
840 if ((tte & ARM_TTE_TYPE_MASK) != ARM_TTE_TYPE_TABLE) {
841 if (((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) &&
842 ((tte & (ARM_TTE_BLOCK_APMASK | ARM_TTE_BLOCK_NX_MASK)) == arm_block_prot)) {
843 /*
844 * We can support ml_static_protect on a block mapping if the mapping already has
845 * the desired protections. We still want to run checks on a per-page basis.
846 */
847 continue;
848 }
849
850 result = KERN_FAILURE;
851 break;
852 }
853
854 pt_entry_t *pte_p = (pt_entry_t *) ttetokv(tte) + ptenum(vaddr_cur);
855 pt_entry_t ptmp = *pte_p;
856
857 ptmp = (ptmp & ~(ARM_PTE_APMASK | ARM_PTE_NX_MASK)) | arm_prot;
858 *pte_p = ptmp;
5ba3f43e
A
859 }
860 }
861
0a7de745 862 if (vaddr_cur > vaddr) {
5ba3f43e 863 flush_mmu_tlb_region(vaddr, (vm_size_t)(vaddr_cur - vaddr));
0a7de745 864 }
5ba3f43e
A
865
866 return result;
867}
868
869/*
870 * Routine: ml_static_mfree
871 * Function:
872 */
873void
874ml_static_mfree(
0a7de745
A
875 vm_offset_t vaddr,
876 vm_size_t size)
5ba3f43e
A
877{
878 vm_offset_t vaddr_cur;
879 ppnum_t ppn;
880 uint32_t freed_pages = 0;
f427ee49 881 uint32_t freed_kernelcache_pages = 0;
5ba3f43e
A
882
883 /* It is acceptable (if bad) to fail to free. */
0a7de745 884 if (vaddr < VM_MIN_KERNEL_ADDRESS) {
5ba3f43e 885 return;
0a7de745 886 }
5ba3f43e 887
0a7de745 888 assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */
5ba3f43e
A
889
890 for (vaddr_cur = vaddr;
0a7de745
A
891 vaddr_cur < trunc_page_32(vaddr + size);
892 vaddr_cur += PAGE_SIZE) {
5ba3f43e
A
893 ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
894 if (ppn != (vm_offset_t) NULL) {
895 /*
896 * It is not acceptable to fail to update the protections on a page
897 * we will release to the VM. We need to either panic or continue.
898 * For now, we'll panic (to help flag if there is memory we can
899 * reclaim).
900 */
901 if (ml_static_protect(vaddr_cur, PAGE_SIZE, VM_PROT_WRITE | VM_PROT_READ) != KERN_SUCCESS) {
902 panic("Failed ml_static_mfree on %p", (void *) vaddr_cur);
903 }
5ba3f43e
A
904 vm_page_create(ppn, (ppn + 1));
905 freed_pages++;
f427ee49
A
906 if (vaddr_cur >= segLOWEST && vaddr_cur < end_kern) {
907 freed_kernelcache_pages++;
908 }
5ba3f43e
A
909 }
910 }
911 vm_page_lockspin_queues();
912 vm_page_wire_count -= freed_pages;
913 vm_page_wire_count_initial -= freed_pages;
f427ee49 914 vm_page_kernelcache_count -= freed_kernelcache_pages;
5ba3f43e 915 vm_page_unlock_queues();
0a7de745 916#if DEBUG
5ba3f43e
A
917 kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn);
918#endif
919}
920
921
922/* virtual to physical on wired pages */
923vm_offset_t
924ml_vtophys(vm_offset_t vaddr)
925{
926 return kvtophys(vaddr);
927}
928
929/*
930 * Routine: ml_nofault_copy
931 * Function: Perform a physical mode copy if the source and destination have
932 * valid translations in the kernel pmap. If translations are present, they are
933 * assumed to be wired; e.g., no attempt is made to guarantee that the
934 * translations obtained remain valid for the duration of the copy process.
935 */
0a7de745 936vm_size_t
5ba3f43e
A
937ml_nofault_copy(vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size)
938{
939 addr64_t cur_phys_dst, cur_phys_src;
940 uint32_t count, nbytes = 0;
941
942 while (size > 0) {
0a7de745 943 if (!(cur_phys_src = kvtophys(virtsrc))) {
5ba3f43e 944 break;
0a7de745
A
945 }
946 if (!(cur_phys_dst = kvtophys(virtdst))) {
5ba3f43e 947 break;
0a7de745 948 }
5ba3f43e 949 if (!pmap_valid_address(trunc_page_64(cur_phys_dst)) ||
0a7de745 950 !pmap_valid_address(trunc_page_64(cur_phys_src))) {
5ba3f43e 951 break;
0a7de745 952 }
5ba3f43e 953 count = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
0a7de745 954 if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) {
5ba3f43e 955 count = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
0a7de745
A
956 }
957 if (count > size) {
5ba3f43e 958 count = size;
0a7de745 959 }
5ba3f43e
A
960
961 bcopy_phys(cur_phys_src, cur_phys_dst, count);
962
963 nbytes += count;
964 virtsrc += count;
965 virtdst += count;
966 size -= count;
967 }
968
969 return nbytes;
970}
971
972/*
973 * Routine: ml_validate_nofault
974 * Function: Validate that ths address range has a valid translations
975 * in the kernel pmap. If translations are present, they are
976 * assumed to be wired; i.e. no attempt is made to guarantee
977 * that the translation persist after the check.
978 * Returns: TRUE if the range is mapped and will not cause a fault,
979 * FALSE otherwise.
980 */
981
0a7de745
A
982boolean_t
983ml_validate_nofault(
5ba3f43e
A
984 vm_offset_t virtsrc, vm_size_t size)
985{
986 addr64_t cur_phys_src;
987 uint32_t count;
988
989 while (size > 0) {
0a7de745 990 if (!(cur_phys_src = kvtophys(virtsrc))) {
5ba3f43e 991 return FALSE;
0a7de745
A
992 }
993 if (!pmap_valid_address(trunc_page_64(cur_phys_src))) {
5ba3f43e 994 return FALSE;
0a7de745 995 }
5ba3f43e 996 count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
0a7de745 997 if (count > size) {
5ba3f43e 998 count = (uint32_t)size;
0a7de745 999 }
5ba3f43e
A
1000
1001 virtsrc += count;
1002 size -= count;
1003 }
1004
1005 return TRUE;
1006}
1007
1008void
1009ml_get_bouncepool_info(vm_offset_t * phys_addr, vm_size_t * size)
1010{
1011 *phys_addr = 0;
1012 *size = 0;
1013}
1014
1015/*
1016 * Stubs for CPU Stepper
1017 */
1018void
1019active_rt_threads(__unused boolean_t active)
1020{
1021}
1022
1023void
0a7de745
A
1024thread_tell_urgency(__unused thread_urgency_t urgency,
1025 __unused uint64_t rt_period,
1026 __unused uint64_t rt_deadline,
1027 __unused uint64_t sched_latency,
1028 __unused thread_t nthread)
5ba3f43e
A
1029{
1030}
1031
1032void
1033machine_run_count(__unused uint32_t count)
1034{
1035}
1036
1037processor_t
1038machine_choose_processor(__unused processor_set_t pset, processor_t processor)
1039{
0a7de745 1040 return processor;
5ba3f43e
A
1041}
1042
0a7de745
A
1043boolean_t
1044machine_timeout_suspended(void)
1045{
5ba3f43e
A
1046 return FALSE;
1047}
1048
0a7de745
A
1049kern_return_t
1050ml_interrupt_prewarm(__unused uint64_t deadline)
5ba3f43e
A
1051{
1052 return KERN_FAILURE;
1053}
1054
1055uint64_t
1056ml_get_hwclock(void)
1057{
1058 uint64_t high_first = 0;
1059 uint64_t high_second = 0;
1060 uint64_t low = 0;
1061
1062 __builtin_arm_isb(ISB_SY);
1063
1064 do {
1065 high_first = __builtin_arm_mrrc(15, 0, 14) >> 32;
1066 low = __builtin_arm_mrrc(15, 0, 14) & 0xFFFFFFFFULL;
1067 high_second = __builtin_arm_mrrc(15, 0, 14) >> 32;
1068 } while (high_first != high_second);
1069
1070 return (high_first << 32) | (low);
1071}
1072
1073boolean_t
1074ml_delay_should_spin(uint64_t interval)
1075{
1076 cpu_data_t *cdp = getCpuDatap();
1077
1078 if (cdp->cpu_idle_latency) {
1079 return (interval < cdp->cpu_idle_latency) ? TRUE : FALSE;
1080 } else {
1081 /*
1082 * Early boot, latency is unknown. Err on the side of blocking,
1083 * which should always be safe, even if slow
1084 */
1085 return FALSE;
1086 }
1087}
1088
0a7de745
A
1089void
1090ml_delay_on_yield(void)
1091{
1092}
e8c3f781 1093
0a7de745
A
1094boolean_t
1095ml_thread_is64bit(thread_t thread)
5ba3f43e 1096{
0a7de745 1097 return thread_is_64bit_addr(thread);
5ba3f43e
A
1098}
1099
0a7de745
A
1100void
1101ml_timer_evaluate(void)
1102{
5ba3f43e
A
1103}
1104
1105boolean_t
0a7de745
A
1106ml_timer_forced_evaluation(void)
1107{
5ba3f43e
A
1108 return FALSE;
1109}
1110
1111uint64_t
0a7de745
A
1112ml_energy_stat(__unused thread_t t)
1113{
5ba3f43e
A
1114 return 0;
1115}
1116
1117
1118void
0a7de745
A
1119ml_gpu_stat_update(__unused uint64_t gpu_ns_delta)
1120{
5ba3f43e
A
1121 /*
1122 * For now: update the resource coalition stats of the
1123 * current thread's coalition
1124 */
1125 task_coalition_update_gpu_stats(current_task(), gpu_ns_delta);
5ba3f43e
A
1126}
1127
1128uint64_t
0a7de745
A
1129ml_gpu_stat(__unused thread_t t)
1130{
5ba3f43e
A
1131 return 0;
1132}
1133
1134#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1135static void
1136timer_state_event(boolean_t switch_to_kernel)
1137{
1138 thread_t thread = current_thread();
0a7de745
A
1139 if (!thread->precise_user_kernel_time) {
1140 return;
1141 }
5ba3f43e 1142
f427ee49 1143 processor_t pd = current_processor();
5ba3f43e
A
1144 uint64_t now = ml_get_timebase();
1145
1146 timer_stop(pd->current_state, now);
1147 pd->current_state = (switch_to_kernel) ? &pd->system_state : &pd->user_state;
1148 timer_start(pd->current_state, now);
1149
1150 timer_stop(pd->thread_timer, now);
1151 pd->thread_timer = (switch_to_kernel) ? &thread->system_timer : &thread->user_timer;
1152 timer_start(pd->thread_timer, now);
1153}
1154
1155void
1156timer_state_event_user_to_kernel(void)
1157{
1158 timer_state_event(TRUE);
1159}
1160
1161void
1162timer_state_event_kernel_to_user(void)
1163{
1164 timer_state_event(FALSE);
1165}
1166#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
1167
0a7de745
A
1168uint32_t
1169get_arm_cpu_version(void)
1170{
1171 uint32_t value = machine_read_midr();
1172
1173 /* Compose the register values into 8 bits; variant[7:4], revision[3:0]. */
1174 return ((value & MIDR_REV_MASK) >> MIDR_REV_SHIFT) | ((value & MIDR_VAR_MASK) >> (MIDR_VAR_SHIFT - 4));
1175}
1176
5ba3f43e
A
1177boolean_t
1178user_cont_hwclock_allowed(void)
1179{
1180 return FALSE;
1181}
1182
cb323159
A
1183uint8_t
1184user_timebase_type(void)
5ba3f43e
A
1185{
1186#if __ARM_TIME__
cb323159 1187 return USER_TIMEBASE_SPEC;
5ba3f43e 1188#else
cb323159 1189 return USER_TIMEBASE_NONE;
5ba3f43e
A
1190#endif
1191}
1192
1193/*
1194 * The following are required for parts of the kernel
1195 * that cannot resolve these functions as inlines:
1196 */
cb323159 1197extern thread_t current_act(void) __attribute__((const));
5ba3f43e
A
1198thread_t
1199current_act(void)
1200{
1201 return current_thread_fast();
1202}
1203
1204#undef current_thread
cb323159 1205extern thread_t current_thread(void) __attribute__((const));
5ba3f43e
A
1206thread_t
1207current_thread(void)
1208{
1209 return current_thread_fast();
1210}
1211
1212#if __ARM_USER_PROTECT__
1213uintptr_t
1214arm_user_protect_begin(thread_t thread)
1215{
0a7de745 1216 uintptr_t ttbr0, asid = 0; // kernel asid
5ba3f43e 1217
0a7de745
A
1218 ttbr0 = __builtin_arm_mrc(15, 0, 2, 0, 0); // Get TTBR0
1219 if (ttbr0 != thread->machine.kptw_ttb) {
1220 __builtin_arm_mcr(15, 0, thread->machine.kptw_ttb, 2, 0, 0); // Set TTBR0
1221 __builtin_arm_mcr(15, 0, asid, 13, 0, 1); // Set CONTEXTIDR
1222 __builtin_arm_isb(ISB_SY);
1223 }
1224 return ttbr0;
5ba3f43e
A
1225}
1226
1227void
1228arm_user_protect_end(thread_t thread, uintptr_t ttbr0, boolean_t disable_interrupts)
1229{
0a7de745
A
1230 if ((ttbr0 != thread->machine.kptw_ttb) && (thread->machine.uptw_ttb != thread->machine.kptw_ttb)) {
1231 if (disable_interrupts) {
1232 __asm__ volatile ("cpsid if" ::: "memory"); // Disable FIQ/IRQ
1233 }
1234 __builtin_arm_mcr(15, 0, thread->machine.uptw_ttb, 2, 0, 0); // Set TTBR0
1235 __builtin_arm_mcr(15, 0, thread->machine.asid, 13, 0, 1); // Set CONTEXTIDR with thread asid
1236 __builtin_arm_dsb(DSB_ISH);
1237 __builtin_arm_isb(ISB_SY);
1238 }
5ba3f43e
A
1239}
1240#endif // __ARM_USER_PROTECT__
f427ee49
A
1241
1242void
1243machine_lockdown(void)
1244{
1245 arm_vm_prot_finalize(PE_state.bootArgs);
1246 lockdown_done = 1;
1247}
1248
1249void
1250ml_lockdown_init(void)
1251{
1252}
1253
1254void
1255ml_hibernate_active_pre(void)
1256{
1257}
1258
1259void
1260ml_hibernate_active_post(void)
1261{
1262}
1263
1264size_t
1265ml_get_vm_reserved_regions(bool vm_is64bit, struct vm_reserved_region **regions)
1266{
1267#pragma unused(vm_is64bit)
1268 assert(regions != NULL);
1269
1270 *regions = NULL;
1271 return 0;
1272}