]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/machine_routines.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / osfmk / arm / machine_routines.c
1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/proc_reg.h>
30 #include <arm/machine_cpu.h>
31 #include <arm/cpu_internal.h>
32 #include <arm/cpuid.h>
33 #include <arm/io_map_entries.h>
34 #include <arm/cpu_data.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/machine_routines.h>
37 #include <arm/misc_protos.h>
38 #include <arm/rtclock.h>
39 #include <arm/caches_internal.h>
40 #include <console/serial_protos.h>
41 #include <kern/machine.h>
42 #include <prng/random.h>
43 #include <kern/startup.h>
44 #include <kern/sched.h>
45 #include <kern/thread.h>
46 #include <mach/machine.h>
47 #include <machine/atomic.h>
48 #include <vm/pmap.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_map.h>
51 #include <sys/kdebug.h>
52 #include <kern/coalition.h>
53 #include <pexpert/device_tree.h>
54 #include <arm/cpuid_internal.h>
55 #include <arm/cpu_capabilities.h>
56
57 #include <IOKit/IOPlatformExpert.h>
58
59 #if KPC
60 #include <kern/kpc.h>
61 #endif
62
63 /* arm32 only supports a highly simplified topology, fixed at 1 cluster */
64 static ml_topology_cpu_t topology_cpu_array[MAX_CPUS];
65 static ml_topology_cluster_t topology_cluster = {
66 .cluster_id = 0,
67 .cluster_type = CLUSTER_TYPE_SMP,
68 .first_cpu_id = 0,
69 };
70 static ml_topology_info_t topology_info = {
71 .version = CPU_TOPOLOGY_VERSION,
72 .num_clusters = 1,
73 .max_cluster_id = 0,
74 .cpus = topology_cpu_array,
75 .clusters = &topology_cluster,
76 .boot_cpu = &topology_cpu_array[0],
77 .boot_cluster = &topology_cluster,
78 };
79
80 uint32_t LockTimeOut;
81 uint32_t LockTimeOutUsec;
82 uint64_t TLockTimeOut;
83 uint64_t MutexSpin;
84 extern uint32_t lockdown_done;
85 uint64_t low_MutexSpin;
86 int64_t high_MutexSpin;
87
88 void
89 machine_startup(__unused boot_args * args)
90 {
91 machine_conf();
92
93 /*
94 * Kick off the kernel bootstrap.
95 */
96 kernel_bootstrap();
97 /* NOTREACHED */
98 }
99
100 char *
101 machine_boot_info(
102 __unused char *buf,
103 __unused vm_size_t size)
104 {
105 return PE_boot_args();
106 }
107
108 void
109 slave_machine_init(__unused void *param)
110 {
111 cpu_machine_init(); /* Initialize the processor */
112 clock_init(); /* Init the clock */
113 }
114
115 /*
116 * Routine: machine_processor_shutdown
117 * Function:
118 */
119 thread_t
120 machine_processor_shutdown(
121 __unused thread_t thread,
122 void (*doshutdown)(processor_t),
123 processor_t processor)
124 {
125 return Shutdown_context(doshutdown, processor);
126 }
127
128 /*
129 * Routine: ml_init_lock_timeout
130 * Function:
131 */
132 void
133 ml_init_lock_timeout(void)
134 {
135 uint64_t abstime;
136 uint64_t mtxspin;
137 uint64_t default_timeout_ns = NSEC_PER_SEC >> 2;
138 uint32_t slto;
139
140 if (PE_parse_boot_argn("slto_us", &slto, sizeof(slto))) {
141 default_timeout_ns = slto * NSEC_PER_USEC;
142 }
143
144 nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
145 LockTimeOutUsec = (uint32_t)(default_timeout_ns / NSEC_PER_USEC);
146 LockTimeOut = (uint32_t)abstime;
147 TLockTimeOut = LockTimeOut;
148
149 if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof(mtxspin))) {
150 if (mtxspin > USEC_PER_SEC >> 4) {
151 mtxspin = USEC_PER_SEC >> 4;
152 }
153 nanoseconds_to_absolutetime(mtxspin * NSEC_PER_USEC, &abstime);
154 } else {
155 nanoseconds_to_absolutetime(10 * NSEC_PER_USEC, &abstime);
156 }
157 MutexSpin = abstime;
158 low_MutexSpin = MutexSpin;
159 /*
160 * high_MutexSpin should be initialized as low_MutexSpin * real_ncpus, but
161 * real_ncpus is not set at this time
162 *
163 * NOTE: active spinning is disabled in arm. It can be activated
164 * by setting high_MutexSpin through the sysctl.
165 */
166 high_MutexSpin = low_MutexSpin;
167 }
168
169 /*
170 * This is called from the machine-independent routine cpu_up()
171 * to perform machine-dependent info updates.
172 */
173 void
174 ml_cpu_up(void)
175 {
176 os_atomic_inc(&machine_info.physical_cpu, relaxed);
177 os_atomic_inc(&machine_info.logical_cpu, relaxed);
178 }
179
180 /*
181 * This is called from the machine-independent routine cpu_down()
182 * to perform machine-dependent info updates.
183 */
184 void
185 ml_cpu_down(void)
186 {
187 cpu_data_t *cpu_data_ptr;
188
189 os_atomic_dec(&machine_info.physical_cpu, relaxed);
190 os_atomic_dec(&machine_info.logical_cpu, relaxed);
191
192 /*
193 * If we want to deal with outstanding IPIs, we need to
194 * do relatively early in the processor_doshutdown path,
195 * as we pend decrementer interrupts using the IPI
196 * mechanism if we cannot immediately service them (if
197 * IRQ is masked). Do so now.
198 *
199 * We aren't on the interrupt stack here; would it make
200 * more sense to disable signaling and then enable
201 * interrupts? It might be a bit cleaner.
202 */
203 cpu_data_ptr = getCpuDatap();
204 cpu_data_ptr->cpu_running = FALSE;
205
206 cpu_signal_handler_internal(TRUE);
207 }
208
209 /*
210 * Routine: ml_cpu_get_info
211 * Function:
212 */
213 void
214 ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info)
215 {
216 cache_info_t *cpuid_cache_info;
217
218 cpuid_cache_info = cache_info();
219 ml_cpu_info->vector_unit = 0;
220 ml_cpu_info->cache_line_size = cpuid_cache_info->c_linesz;
221 ml_cpu_info->l1_icache_size = cpuid_cache_info->c_isize;
222 ml_cpu_info->l1_dcache_size = cpuid_cache_info->c_dsize;
223
224 #if (__ARM_ARCH__ >= 7)
225 ml_cpu_info->l2_settings = 1;
226 ml_cpu_info->l2_cache_size = cpuid_cache_info->c_l2size;
227 #else
228 ml_cpu_info->l2_settings = 0;
229 ml_cpu_info->l2_cache_size = 0xFFFFFFFF;
230 #endif
231 ml_cpu_info->l3_settings = 0;
232 ml_cpu_info->l3_cache_size = 0xFFFFFFFF;
233 }
234
235 unsigned int
236 ml_get_machine_mem(void)
237 {
238 return machine_info.memory_size;
239 }
240
241 /* Return max offset */
242 vm_map_offset_t
243 ml_get_max_offset(
244 boolean_t is64,
245 unsigned int option)
246 {
247 unsigned int pmap_max_offset_option = 0;
248
249 switch (option) {
250 case MACHINE_MAX_OFFSET_DEFAULT:
251 pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEFAULT;
252 break;
253 case MACHINE_MAX_OFFSET_MIN:
254 pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_MIN;
255 break;
256 case MACHINE_MAX_OFFSET_MAX:
257 pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_MAX;
258 break;
259 case MACHINE_MAX_OFFSET_DEVICE:
260 pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEVICE;
261 break;
262 default:
263 panic("ml_get_max_offset(): Illegal option 0x%x\n", option);
264 break;
265 }
266 return pmap_max_offset(is64, pmap_max_offset_option);
267 }
268
269 void
270 ml_panic_trap_to_debugger(__unused const char *panic_format_str,
271 __unused va_list *panic_args,
272 __unused unsigned int reason,
273 __unused void *ctx,
274 __unused uint64_t panic_options_mask,
275 __unused unsigned long panic_caller)
276 {
277 return;
278 }
279
280 __attribute__((noreturn))
281 void
282 halt_all_cpus(boolean_t reboot)
283 {
284 if (reboot) {
285 printf("MACH Reboot\n");
286 PEHaltRestart(kPERestartCPU);
287 } else {
288 printf("CPU halted\n");
289 PEHaltRestart(kPEHaltCPU);
290 }
291 while (1) {
292 ;
293 }
294 }
295
296 __attribute__((noreturn))
297 void
298 halt_cpu(void)
299 {
300 halt_all_cpus(FALSE);
301 }
302
303 /*
304 * Routine: machine_signal_idle
305 * Function:
306 */
307 void
308 machine_signal_idle(
309 processor_t processor)
310 {
311 cpu_signal(processor_to_cpu_datap(processor), SIGPnop, (void *)NULL, (void *)NULL);
312 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
313 }
314
315 void
316 machine_signal_idle_deferred(
317 processor_t processor)
318 {
319 cpu_signal_deferred(processor_to_cpu_datap(processor));
320 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_DEFERRED_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
321 }
322
323 void
324 machine_signal_idle_cancel(
325 processor_t processor)
326 {
327 cpu_signal_cancel(processor_to_cpu_datap(processor));
328 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_CANCEL_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
329 }
330
331 /*
332 * Routine: ml_install_interrupt_handler
333 * Function: Initialize Interrupt Handler
334 */
335 void
336 ml_install_interrupt_handler(
337 void *nub,
338 int source,
339 void *target,
340 IOInterruptHandler handler,
341 void *refCon)
342 {
343 cpu_data_t *cpu_data_ptr;
344 boolean_t current_state;
345
346 current_state = ml_set_interrupts_enabled(FALSE);
347 cpu_data_ptr = getCpuDatap();
348
349 cpu_data_ptr->interrupt_nub = nub;
350 cpu_data_ptr->interrupt_source = source;
351 cpu_data_ptr->interrupt_target = target;
352 cpu_data_ptr->interrupt_handler = handler;
353 cpu_data_ptr->interrupt_refCon = refCon;
354
355 (void) ml_set_interrupts_enabled(current_state);
356 }
357
358 /*
359 * Routine: ml_init_interrupt
360 * Function: Initialize Interrupts
361 */
362 void
363 ml_init_interrupt(void)
364 {
365 }
366
367 /*
368 * Routine: ml_init_timebase
369 * Function: register and setup Timebase, Decremeter services
370 */
371 void
372 ml_init_timebase(
373 void *args,
374 tbd_ops_t tbd_funcs,
375 vm_offset_t int_address,
376 vm_offset_t int_value)
377 {
378 cpu_data_t *cpu_data_ptr;
379
380 cpu_data_ptr = (cpu_data_t *)args;
381
382 if ((cpu_data_ptr == &BootCpuData)
383 && (rtclock_timebase_func.tbd_fiq_handler == (void *)NULL)) {
384 rtclock_timebase_func = *tbd_funcs;
385 rtclock_timebase_addr = int_address;
386 rtclock_timebase_val = int_value;
387 }
388 }
389
390 void
391 ml_parse_cpu_topology(void)
392 {
393 DTEntry entry, child;
394 OpaqueDTEntryIterator iter;
395 uint32_t cpu_boot_arg;
396 int err;
397
398 err = SecureDTLookupEntry(NULL, "/cpus", &entry);
399 assert(err == kSuccess);
400
401 err = SecureDTInitEntryIterator(entry, &iter);
402 assert(err == kSuccess);
403
404 cpu_boot_arg = MAX_CPUS;
405 PE_parse_boot_argn("cpus", &cpu_boot_arg, sizeof(cpu_boot_arg));
406
407 ml_topology_cluster_t *cluster = &topology_info.clusters[0];
408 unsigned int cpu_id = 0;
409 while (kSuccess == SecureDTIterateEntries(&iter, &child)) {
410 #if MACH_ASSERT
411 unsigned int propSize;
412 void const *prop = NULL;
413 if (cpu_id == 0) {
414 if (kSuccess != SecureDTGetProperty(child, "state", &prop, &propSize)) {
415 panic("unable to retrieve state for cpu %u", cpu_id);
416 }
417
418 if (strncmp((char const *)prop, "running", propSize) != 0) {
419 panic("cpu 0 has not been marked as running!");
420 }
421 }
422 assert(kSuccess == SecureDTGetProperty(child, "reg", &prop, &propSize));
423 assert(cpu_id == *((uint32_t const *)prop));
424 #endif
425 if (cpu_id >= cpu_boot_arg) {
426 break;
427 }
428
429 ml_topology_cpu_t *cpu = &topology_info.cpus[cpu_id];
430
431 cpu->cpu_id = cpu_id;
432 cpu->phys_id = cpu_id;
433 cpu->cluster_type = cluster->cluster_type;
434
435 cluster->num_cpus++;
436 cluster->cpu_mask |= 1ULL << cpu_id;
437
438 topology_info.num_cpus++;
439 topology_info.max_cpu_id = cpu_id;
440
441 cpu_id++;
442 }
443
444 if (cpu_id == 0) {
445 panic("No cpus found!");
446 }
447 }
448
449 const ml_topology_info_t *
450 ml_get_topology_info(void)
451 {
452 return &topology_info;
453 }
454
455 unsigned int
456 ml_get_cpu_count(void)
457 {
458 return topology_info.num_cpus;
459 }
460
461 unsigned int
462 ml_get_cluster_count(void)
463 {
464 return topology_info.num_clusters;
465 }
466
467 int
468 ml_get_boot_cpu_number(void)
469 {
470 return 0;
471 }
472
473 cluster_type_t
474 ml_get_boot_cluster(void)
475 {
476 return CLUSTER_TYPE_SMP;
477 }
478
479 int
480 ml_get_cpu_number(uint32_t phys_id)
481 {
482 if (phys_id > (uint32_t)ml_get_max_cpu_number()) {
483 return -1;
484 }
485
486 return (int)phys_id;
487 }
488
489 int
490 ml_get_cluster_number(__unused uint32_t phys_id)
491 {
492 return 0;
493 }
494
495 int
496 ml_get_max_cpu_number(void)
497 {
498 return topology_info.num_cpus - 1;
499 }
500
501 int
502 ml_get_max_cluster_number(void)
503 {
504 return topology_info.max_cluster_id;
505 }
506
507 unsigned int
508 ml_get_first_cpu_id(unsigned int cluster_id)
509 {
510 return topology_info.clusters[cluster_id].first_cpu_id;
511 }
512
513 kern_return_t
514 ml_processor_register(ml_processor_info_t *in_processor_info,
515 processor_t * processor_out, ipi_handler_t *ipi_handler_out,
516 perfmon_interrupt_handler_func *pmi_handler_out)
517 {
518 cpu_data_t *this_cpu_datap;
519 boolean_t is_boot_cpu;
520
521 const unsigned int max_cpu_id = ml_get_max_cpu_number();
522 if (in_processor_info->phys_id > max_cpu_id) {
523 /*
524 * The physical CPU ID indicates that we have more CPUs than
525 * this xnu build support. This probably means we have an
526 * incorrect board configuration.
527 *
528 * TODO: Should this just return a failure instead? A panic
529 * is simply a convenient way to catch bugs in the pexpert
530 * headers.
531 */
532 panic("phys_id %u is too large for max_cpu_id (%u)", in_processor_info->phys_id, max_cpu_id);
533 }
534
535 /* Fail the registration if the number of CPUs has been limited by boot-arg. */
536 if ((in_processor_info->phys_id >= topology_info.num_cpus) ||
537 (in_processor_info->log_id > (uint32_t)ml_get_max_cpu_number())) {
538 return KERN_FAILURE;
539 }
540
541 if (in_processor_info->log_id != (uint32_t)ml_get_boot_cpu_number()) {
542 is_boot_cpu = FALSE;
543 this_cpu_datap = cpu_data_alloc(FALSE);
544 cpu_data_init(this_cpu_datap);
545 } else {
546 this_cpu_datap = &BootCpuData;
547 is_boot_cpu = TRUE;
548 }
549
550 this_cpu_datap->cpu_id = in_processor_info->cpu_id;
551
552 this_cpu_datap->cpu_console_buf = console_cpu_alloc(is_boot_cpu);
553 if (this_cpu_datap->cpu_console_buf == (void *)(NULL)) {
554 goto processor_register_error;
555 }
556
557 if (!is_boot_cpu) {
558 if (cpu_data_register(this_cpu_datap) != KERN_SUCCESS) {
559 goto processor_register_error;
560 }
561 }
562
563 this_cpu_datap->cpu_idle_notify = in_processor_info->processor_idle;
564 this_cpu_datap->cpu_cache_dispatch = (cache_dispatch_t) in_processor_info->platform_cache_dispatch;
565 nanoseconds_to_absolutetime((uint64_t) in_processor_info->powergate_latency, &this_cpu_datap->cpu_idle_latency);
566 this_cpu_datap->cpu_reset_assist = kvtophys(in_processor_info->powergate_stub_addr);
567
568 this_cpu_datap->idle_timer_notify = in_processor_info->idle_timer;
569 this_cpu_datap->idle_timer_refcon = in_processor_info->idle_timer_refcon;
570
571 this_cpu_datap->platform_error_handler = in_processor_info->platform_error_handler;
572 this_cpu_datap->cpu_regmap_paddr = in_processor_info->regmap_paddr;
573 this_cpu_datap->cpu_phys_id = in_processor_info->phys_id;
574 this_cpu_datap->cpu_l2_access_penalty = in_processor_info->l2_access_penalty;
575
576 processor_t processor = PERCPU_GET_RELATIVE(processor, cpu_data, this_cpu_datap);
577 if (!is_boot_cpu) {
578 processor_init(processor, this_cpu_datap->cpu_number,
579 processor_pset(master_processor));
580
581 if (this_cpu_datap->cpu_l2_access_penalty) {
582 /*
583 * Cores that have a non-zero L2 access penalty compared
584 * to the boot processor should be de-prioritized by the
585 * scheduler, so that threads use the cores with better L2
586 * preferentially.
587 */
588 processor_set_primary(processor, master_processor);
589 }
590 }
591
592 *processor_out = processor;
593 *ipi_handler_out = cpu_signal_handler;
594 *pmi_handler_out = NULL;
595 if (in_processor_info->idle_tickle != (idle_tickle_t *) NULL) {
596 *in_processor_info->idle_tickle = (idle_tickle_t) cpu_idle_tickle;
597 }
598
599 #if KPC
600 if (kpc_register_cpu(this_cpu_datap) != TRUE) {
601 goto processor_register_error;
602 }
603 #endif
604
605 if (!is_boot_cpu) {
606 random_cpu_init(this_cpu_datap->cpu_number);
607 }
608
609 return KERN_SUCCESS;
610
611 processor_register_error:
612 #if KPC
613 kpc_unregister_cpu(this_cpu_datap);
614 #endif
615 if (!is_boot_cpu) {
616 cpu_data_free(this_cpu_datap);
617 }
618 return KERN_FAILURE;
619 }
620
621 void
622 ml_init_arm_debug_interface(
623 void * in_cpu_datap,
624 vm_offset_t virt_address)
625 {
626 ((cpu_data_t *)in_cpu_datap)->cpu_debug_interface_map = virt_address;
627 do_debugid();
628 }
629
630 /*
631 * Routine: init_ast_check
632 * Function:
633 */
634 void
635 init_ast_check(
636 __unused processor_t processor)
637 {
638 }
639
640 /*
641 * Routine: cause_ast_check
642 * Function:
643 */
644 void
645 cause_ast_check(
646 processor_t processor)
647 {
648 if (current_processor() != processor) {
649 cpu_signal(processor_to_cpu_datap(processor), SIGPast, (void *)NULL, (void *)NULL);
650 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 1 /* ast */, 0, 0, 0);
651 }
652 }
653
654 extern uint32_t cpu_idle_count;
655
656 void
657 ml_get_power_state(boolean_t *icp, boolean_t *pidlep)
658 {
659 *icp = ml_at_interrupt_context();
660 *pidlep = (cpu_idle_count == real_ncpus);
661 }
662
663 /*
664 * Routine: ml_cause_interrupt
665 * Function: Generate a fake interrupt
666 */
667 void
668 ml_cause_interrupt(void)
669 {
670 return; /* BS_XXX */
671 }
672
673 /* Map memory map IO space */
674 vm_offset_t
675 ml_io_map(
676 vm_offset_t phys_addr,
677 vm_size_t size)
678 {
679 return io_map(phys_addr, size, VM_WIMG_IO);
680 }
681
682 /* Map memory map IO space (with protections specified) */
683 vm_offset_t
684 ml_io_map_with_prot(
685 vm_offset_t phys_addr,
686 vm_size_t size,
687 vm_prot_t prot)
688 {
689 return io_map_with_prot(phys_addr, size, VM_WIMG_IO, prot);
690 }
691
692 vm_offset_t
693 ml_io_map_wcomb(
694 vm_offset_t phys_addr,
695 vm_size_t size)
696 {
697 return io_map(phys_addr, size, VM_WIMG_WCOMB);
698 }
699
700 void
701 ml_io_unmap(vm_offset_t addr, vm_size_t sz)
702 {
703 pmap_remove(kernel_pmap, addr, addr + sz);
704 kmem_free(kernel_map, addr, sz);
705 }
706
707 /* boot memory allocation */
708 vm_offset_t
709 ml_static_malloc(
710 __unused vm_size_t size)
711 {
712 return (vm_offset_t) NULL;
713 }
714
715 vm_map_address_t
716 ml_map_high_window(
717 vm_offset_t phys_addr,
718 vm_size_t len)
719 {
720 return pmap_map_high_window_bd(phys_addr, len, VM_PROT_READ | VM_PROT_WRITE);
721 }
722
723 vm_offset_t
724 ml_static_ptovirt(
725 vm_offset_t paddr)
726 {
727 return phystokv(paddr);
728 }
729
730 vm_offset_t
731 ml_static_vtop(
732 vm_offset_t vaddr)
733 {
734 assertf(((vm_address_t)(vaddr) - gVirtBase) < gPhysSize, "%s: illegal vaddr: %p", __func__, (void*)vaddr);
735 return (vm_address_t)(vaddr) - gVirtBase + gPhysBase;
736 }
737
738 /*
739 * Return the maximum contiguous KVA range that can be accessed from this
740 * physical address. For arm64, we employ a segmented physical aperture
741 * relocation table which can limit the available range for a given PA to
742 * something less than the extent of physical memory. But here, we still
743 * have a flat physical aperture, so no such requirement exists.
744 */
745 vm_map_address_t
746 phystokv_range(pmap_paddr_t pa, vm_size_t *max_len)
747 {
748 vm_size_t len = gPhysSize - (pa - gPhysBase);
749 if (*max_len > len) {
750 *max_len = len;
751 }
752 assertf((pa - gPhysBase) < gPhysSize, "%s: illegal PA: 0x%lx", __func__, (unsigned long)pa);
753 return pa - gPhysBase + gVirtBase;
754 }
755
756 vm_offset_t
757 ml_static_slide(
758 vm_offset_t vaddr)
759 {
760 return VM_KERNEL_SLIDE(vaddr);
761 }
762
763 kern_return_t
764 ml_static_verify_page_protections(
765 uint64_t base, uint64_t size, vm_prot_t prot)
766 {
767 /* XXX Implement Me */
768 (void)base;
769 (void)size;
770 (void)prot;
771 return KERN_FAILURE;
772 }
773
774
775 vm_offset_t
776 ml_static_unslide(
777 vm_offset_t vaddr)
778 {
779 return VM_KERNEL_UNSLIDE(vaddr);
780 }
781
782 kern_return_t
783 ml_static_protect(
784 vm_offset_t vaddr, /* kernel virtual address */
785 vm_size_t size,
786 vm_prot_t new_prot)
787 {
788 pt_entry_t arm_prot = 0;
789 pt_entry_t arm_block_prot = 0;
790 vm_offset_t vaddr_cur;
791 ppnum_t ppn;
792 kern_return_t result = KERN_SUCCESS;
793
794 if (vaddr < VM_MIN_KERNEL_ADDRESS) {
795 return KERN_FAILURE;
796 }
797
798 assert((vaddr & (ARM_PGBYTES - 1)) == 0); /* must be page aligned */
799
800 if ((new_prot & VM_PROT_WRITE) && (new_prot & VM_PROT_EXECUTE)) {
801 panic("ml_static_protect(): WX request on %p", (void *) vaddr);
802 }
803 if (lockdown_done && (new_prot & VM_PROT_EXECUTE)) {
804 panic("ml_static_protect(): attempt to inject executable mapping on %p", (void *) vaddr);
805 }
806
807 /* Set up the protection bits, and block bits so we can validate block mappings. */
808 if (new_prot & VM_PROT_WRITE) {
809 arm_prot |= ARM_PTE_AP(AP_RWNA);
810 arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RWNA);
811 } else {
812 arm_prot |= ARM_PTE_AP(AP_RONA);
813 arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RONA);
814 }
815
816 if (!(new_prot & VM_PROT_EXECUTE)) {
817 arm_prot |= ARM_PTE_NX;
818 arm_block_prot |= ARM_TTE_BLOCK_NX;
819 }
820
821 for (vaddr_cur = vaddr;
822 vaddr_cur < ((vaddr + size) & ~ARM_PGMASK);
823 vaddr_cur += ARM_PGBYTES) {
824 ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
825 if (ppn != (vm_offset_t) NULL) {
826 tt_entry_t *ttp = &kernel_pmap->tte[ttenum(vaddr_cur)];
827 tt_entry_t tte = *ttp;
828
829 if ((tte & ARM_TTE_TYPE_MASK) != ARM_TTE_TYPE_TABLE) {
830 if (((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) &&
831 ((tte & (ARM_TTE_BLOCK_APMASK | ARM_TTE_BLOCK_NX_MASK)) == arm_block_prot)) {
832 /*
833 * We can support ml_static_protect on a block mapping if the mapping already has
834 * the desired protections. We still want to run checks on a per-page basis.
835 */
836 continue;
837 }
838
839 result = KERN_FAILURE;
840 break;
841 }
842
843 pt_entry_t *pte_p = (pt_entry_t *) ttetokv(tte) + ptenum(vaddr_cur);
844 pt_entry_t ptmp = *pte_p;
845
846 ptmp = (ptmp & ~(ARM_PTE_APMASK | ARM_PTE_NX_MASK)) | arm_prot;
847 *pte_p = ptmp;
848 }
849 }
850
851 if (vaddr_cur > vaddr) {
852 flush_mmu_tlb_region(vaddr, (vm_size_t)(vaddr_cur - vaddr));
853 }
854
855 return result;
856 }
857
858 /*
859 * Routine: ml_static_mfree
860 * Function:
861 */
862 void
863 ml_static_mfree(
864 vm_offset_t vaddr,
865 vm_size_t size)
866 {
867 vm_offset_t vaddr_cur;
868 ppnum_t ppn;
869 uint32_t freed_pages = 0;
870 uint32_t freed_kernelcache_pages = 0;
871
872 /* It is acceptable (if bad) to fail to free. */
873 if (vaddr < VM_MIN_KERNEL_ADDRESS) {
874 return;
875 }
876
877 assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */
878
879 for (vaddr_cur = vaddr;
880 vaddr_cur < trunc_page_32(vaddr + size);
881 vaddr_cur += PAGE_SIZE) {
882 ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
883 if (ppn != (vm_offset_t) NULL) {
884 /*
885 * It is not acceptable to fail to update the protections on a page
886 * we will release to the VM. We need to either panic or continue.
887 * For now, we'll panic (to help flag if there is memory we can
888 * reclaim).
889 */
890 if (ml_static_protect(vaddr_cur, PAGE_SIZE, VM_PROT_WRITE | VM_PROT_READ) != KERN_SUCCESS) {
891 panic("Failed ml_static_mfree on %p", (void *) vaddr_cur);
892 }
893 vm_page_create(ppn, (ppn + 1));
894 freed_pages++;
895 if (vaddr_cur >= segLOWEST && vaddr_cur < end_kern) {
896 freed_kernelcache_pages++;
897 }
898 }
899 }
900 vm_page_lockspin_queues();
901 vm_page_wire_count -= freed_pages;
902 vm_page_wire_count_initial -= freed_pages;
903 vm_page_kernelcache_count -= freed_kernelcache_pages;
904 vm_page_unlock_queues();
905 #if DEBUG
906 kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn);
907 #endif
908 }
909
910
911 /* virtual to physical on wired pages */
912 vm_offset_t
913 ml_vtophys(vm_offset_t vaddr)
914 {
915 return kvtophys(vaddr);
916 }
917
918 /*
919 * Routine: ml_nofault_copy
920 * Function: Perform a physical mode copy if the source and destination have
921 * valid translations in the kernel pmap. If translations are present, they are
922 * assumed to be wired; e.g., no attempt is made to guarantee that the
923 * translations obtained remain valid for the duration of the copy process.
924 */
925 vm_size_t
926 ml_nofault_copy(vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size)
927 {
928 addr64_t cur_phys_dst, cur_phys_src;
929 uint32_t count, nbytes = 0;
930
931 while (size > 0) {
932 if (!(cur_phys_src = kvtophys(virtsrc))) {
933 break;
934 }
935 if (!(cur_phys_dst = kvtophys(virtdst))) {
936 break;
937 }
938 if (!pmap_valid_address(trunc_page_64(cur_phys_dst)) ||
939 !pmap_valid_address(trunc_page_64(cur_phys_src))) {
940 break;
941 }
942 count = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
943 if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) {
944 count = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
945 }
946 if (count > size) {
947 count = size;
948 }
949
950 bcopy_phys(cur_phys_src, cur_phys_dst, count);
951
952 nbytes += count;
953 virtsrc += count;
954 virtdst += count;
955 size -= count;
956 }
957
958 return nbytes;
959 }
960
961 /*
962 * Routine: ml_validate_nofault
963 * Function: Validate that ths address range has a valid translations
964 * in the kernel pmap. If translations are present, they are
965 * assumed to be wired; i.e. no attempt is made to guarantee
966 * that the translation persist after the check.
967 * Returns: TRUE if the range is mapped and will not cause a fault,
968 * FALSE otherwise.
969 */
970
971 boolean_t
972 ml_validate_nofault(
973 vm_offset_t virtsrc, vm_size_t size)
974 {
975 addr64_t cur_phys_src;
976 uint32_t count;
977
978 while (size > 0) {
979 if (!(cur_phys_src = kvtophys(virtsrc))) {
980 return FALSE;
981 }
982 if (!pmap_valid_address(trunc_page_64(cur_phys_src))) {
983 return FALSE;
984 }
985 count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
986 if (count > size) {
987 count = (uint32_t)size;
988 }
989
990 virtsrc += count;
991 size -= count;
992 }
993
994 return TRUE;
995 }
996
997 void
998 ml_get_bouncepool_info(vm_offset_t * phys_addr, vm_size_t * size)
999 {
1000 *phys_addr = 0;
1001 *size = 0;
1002 }
1003
1004 /*
1005 * Stubs for CPU Stepper
1006 */
1007 void
1008 active_rt_threads(__unused boolean_t active)
1009 {
1010 }
1011
1012 void
1013 thread_tell_urgency(__unused thread_urgency_t urgency,
1014 __unused uint64_t rt_period,
1015 __unused uint64_t rt_deadline,
1016 __unused uint64_t sched_latency,
1017 __unused thread_t nthread)
1018 {
1019 }
1020
1021 void
1022 machine_run_count(__unused uint32_t count)
1023 {
1024 }
1025
1026 processor_t
1027 machine_choose_processor(__unused processor_set_t pset, processor_t processor)
1028 {
1029 return processor;
1030 }
1031
1032 boolean_t
1033 machine_timeout_suspended(void)
1034 {
1035 return FALSE;
1036 }
1037
1038 kern_return_t
1039 ml_interrupt_prewarm(__unused uint64_t deadline)
1040 {
1041 return KERN_FAILURE;
1042 }
1043
1044 uint64_t
1045 ml_get_hwclock(void)
1046 {
1047 uint64_t high_first = 0;
1048 uint64_t high_second = 0;
1049 uint64_t low = 0;
1050
1051 __builtin_arm_isb(ISB_SY);
1052
1053 do {
1054 high_first = __builtin_arm_mrrc(15, 0, 14) >> 32;
1055 low = __builtin_arm_mrrc(15, 0, 14) & 0xFFFFFFFFULL;
1056 high_second = __builtin_arm_mrrc(15, 0, 14) >> 32;
1057 } while (high_first != high_second);
1058
1059 return (high_first << 32) | (low);
1060 }
1061
1062 boolean_t
1063 ml_delay_should_spin(uint64_t interval)
1064 {
1065 cpu_data_t *cdp = getCpuDatap();
1066
1067 if (cdp->cpu_idle_latency) {
1068 return (interval < cdp->cpu_idle_latency) ? TRUE : FALSE;
1069 } else {
1070 /*
1071 * Early boot, latency is unknown. Err on the side of blocking,
1072 * which should always be safe, even if slow
1073 */
1074 return FALSE;
1075 }
1076 }
1077
1078 void
1079 ml_delay_on_yield(void)
1080 {
1081 }
1082
1083 boolean_t
1084 ml_thread_is64bit(thread_t thread)
1085 {
1086 return thread_is_64bit_addr(thread);
1087 }
1088
1089 void
1090 ml_timer_evaluate(void)
1091 {
1092 }
1093
1094 boolean_t
1095 ml_timer_forced_evaluation(void)
1096 {
1097 return FALSE;
1098 }
1099
1100 uint64_t
1101 ml_energy_stat(__unused thread_t t)
1102 {
1103 return 0;
1104 }
1105
1106
1107 void
1108 ml_gpu_stat_update(__unused uint64_t gpu_ns_delta)
1109 {
1110 /*
1111 * For now: update the resource coalition stats of the
1112 * current thread's coalition
1113 */
1114 task_coalition_update_gpu_stats(current_task(), gpu_ns_delta);
1115 }
1116
1117 uint64_t
1118 ml_gpu_stat(__unused thread_t t)
1119 {
1120 return 0;
1121 }
1122
1123 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1124 static void
1125 timer_state_event(boolean_t switch_to_kernel)
1126 {
1127 thread_t thread = current_thread();
1128 if (!thread->precise_user_kernel_time) {
1129 return;
1130 }
1131
1132 processor_t pd = current_processor();
1133 uint64_t now = ml_get_timebase();
1134
1135 timer_stop(pd->current_state, now);
1136 pd->current_state = (switch_to_kernel) ? &pd->system_state : &pd->user_state;
1137 timer_start(pd->current_state, now);
1138
1139 timer_stop(pd->thread_timer, now);
1140 pd->thread_timer = (switch_to_kernel) ? &thread->system_timer : &thread->user_timer;
1141 timer_start(pd->thread_timer, now);
1142 }
1143
1144 void
1145 timer_state_event_user_to_kernel(void)
1146 {
1147 timer_state_event(TRUE);
1148 }
1149
1150 void
1151 timer_state_event_kernel_to_user(void)
1152 {
1153 timer_state_event(FALSE);
1154 }
1155 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
1156
1157 uint32_t
1158 get_arm_cpu_version(void)
1159 {
1160 uint32_t value = machine_read_midr();
1161
1162 /* Compose the register values into 8 bits; variant[7:4], revision[3:0]. */
1163 return ((value & MIDR_REV_MASK) >> MIDR_REV_SHIFT) | ((value & MIDR_VAR_MASK) >> (MIDR_VAR_SHIFT - 4));
1164 }
1165
1166 boolean_t
1167 user_cont_hwclock_allowed(void)
1168 {
1169 return FALSE;
1170 }
1171
1172 uint8_t
1173 user_timebase_type(void)
1174 {
1175 #if __ARM_TIME__
1176 return USER_TIMEBASE_SPEC;
1177 #else
1178 return USER_TIMEBASE_NONE;
1179 #endif
1180 }
1181
1182 /*
1183 * The following are required for parts of the kernel
1184 * that cannot resolve these functions as inlines:
1185 */
1186 extern thread_t current_act(void) __attribute__((const));
1187 thread_t
1188 current_act(void)
1189 {
1190 return current_thread_fast();
1191 }
1192
1193 #undef current_thread
1194 extern thread_t current_thread(void) __attribute__((const));
1195 thread_t
1196 current_thread(void)
1197 {
1198 return current_thread_fast();
1199 }
1200
1201 #if __ARM_USER_PROTECT__
1202 uintptr_t
1203 arm_user_protect_begin(thread_t thread)
1204 {
1205 uintptr_t ttbr0, asid = 0; // kernel asid
1206
1207 ttbr0 = __builtin_arm_mrc(15, 0, 2, 0, 0); // Get TTBR0
1208 if (ttbr0 != thread->machine.kptw_ttb) {
1209 __builtin_arm_mcr(15, 0, thread->machine.kptw_ttb, 2, 0, 0); // Set TTBR0
1210 __builtin_arm_mcr(15, 0, asid, 13, 0, 1); // Set CONTEXTIDR
1211 __builtin_arm_isb(ISB_SY);
1212 }
1213 return ttbr0;
1214 }
1215
1216 void
1217 arm_user_protect_end(thread_t thread, uintptr_t ttbr0, boolean_t disable_interrupts)
1218 {
1219 if ((ttbr0 != thread->machine.kptw_ttb) && (thread->machine.uptw_ttb != thread->machine.kptw_ttb)) {
1220 if (disable_interrupts) {
1221 __asm__ volatile ("cpsid if" ::: "memory"); // Disable FIQ/IRQ
1222 }
1223 __builtin_arm_mcr(15, 0, thread->machine.uptw_ttb, 2, 0, 0); // Set TTBR0
1224 __builtin_arm_mcr(15, 0, thread->machine.asid, 13, 0, 1); // Set CONTEXTIDR with thread asid
1225 __builtin_arm_dsb(DSB_ISH);
1226 __builtin_arm_isb(ISB_SY);
1227 }
1228 }
1229 #endif // __ARM_USER_PROTECT__
1230
1231 void
1232 machine_lockdown(void)
1233 {
1234 arm_vm_prot_finalize(PE_state.bootArgs);
1235 lockdown_done = 1;
1236 }
1237
1238 void
1239 ml_lockdown_init(void)
1240 {
1241 }
1242
1243 void
1244 ml_hibernate_active_pre(void)
1245 {
1246 }
1247
1248 void
1249 ml_hibernate_active_post(void)
1250 {
1251 }
1252
1253 size_t
1254 ml_get_vm_reserved_regions(bool vm_is64bit, struct vm_reserved_region **regions)
1255 {
1256 #pragma unused(vm_is64bit)
1257 assert(regions != NULL);
1258
1259 *regions = NULL;
1260 return 0;
1261 }