]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/machine_routines.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / arm / machine_routines.c
1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/proc_reg.h>
30 #include <arm/machine_cpu.h>
31 #include <arm/cpu_internal.h>
32 #include <arm/cpuid.h>
33 #include <arm/io_map_entries.h>
34 #include <arm/cpu_data.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/misc_protos.h>
37 #include <arm/rtclock.h>
38 #include <arm/caches_internal.h>
39 #include <console/serial_protos.h>
40 #include <kern/machine.h>
41 #include <prng/random.h>
42 #include <kern/startup.h>
43 #include <kern/sched.h>
44 #include <kern/thread.h>
45 #include <mach/machine.h>
46 #include <machine/atomic.h>
47 #include <vm/pmap.h>
48 #include <vm/vm_page.h>
49 #include <sys/kdebug.h>
50 #include <kern/coalition.h>
51 #include <pexpert/device_tree.h>
52 #include <arm/cpuid_internal.h>
53
54 #include <IOKit/IOPlatformExpert.h>
55
56 #if KPC
57 #include <kern/kpc.h>
58 #endif
59
60 static int max_cpus_initialized = 0;
61 #define MAX_CPUS_SET 0x1
62 #define MAX_CPUS_WAIT 0x2
63
64 static unsigned int avail_cpus = 0;
65
66 uint32_t LockTimeOut;
67 uint32_t LockTimeOutUsec;
68 uint64_t TLockTimeOut;
69 uint64_t MutexSpin;
70 boolean_t is_clock_configured = FALSE;
71
72 extern int mach_assert;
73 extern volatile uint32_t debug_enabled;
74
75 void machine_conf(void);
76
77 void
78 machine_startup(__unused boot_args * args)
79 {
80 int boot_arg;
81
82 PE_parse_boot_argn("assert", &mach_assert, sizeof(mach_assert));
83
84 if (PE_parse_boot_argn("preempt", &boot_arg, sizeof(boot_arg))) {
85 default_preemption_rate = boot_arg;
86 }
87 if (PE_parse_boot_argn("bg_preempt", &boot_arg, sizeof(boot_arg))) {
88 default_bg_preemption_rate = boot_arg;
89 }
90
91 machine_conf();
92
93 /*
94 * Kick off the kernel bootstrap.
95 */
96 kernel_bootstrap();
97 /* NOTREACHED */
98 }
99
100 char *
101 machine_boot_info(
102 __unused char *buf,
103 __unused vm_size_t size)
104 {
105 return PE_boot_args();
106 }
107
108 void
109 machine_conf(void)
110 {
111 machine_info.memory_size = mem_size;
112 }
113
114 void
115 machine_init(void)
116 {
117 debug_log_init();
118 clock_config();
119 is_clock_configured = TRUE;
120 if (debug_enabled) {
121 pmap_map_globals();
122 }
123 }
124
125 void
126 slave_machine_init(__unused void *param)
127 {
128 cpu_machine_init(); /* Initialize the processor */
129 clock_init(); /* Init the clock */
130 }
131
132 /*
133 * Routine: machine_processor_shutdown
134 * Function:
135 */
136 thread_t
137 machine_processor_shutdown(
138 __unused thread_t thread,
139 void (*doshutdown)(processor_t),
140 processor_t processor)
141 {
142 return Shutdown_context(doshutdown, processor);
143 }
144
145 /*
146 * Routine: ml_init_max_cpus
147 * Function:
148 */
149 void
150 ml_init_max_cpus(unsigned int max_cpus)
151 {
152 boolean_t current_state;
153
154 current_state = ml_set_interrupts_enabled(FALSE);
155 if (max_cpus_initialized != MAX_CPUS_SET) {
156 machine_info.max_cpus = max_cpus;
157 machine_info.physical_cpu_max = max_cpus;
158 machine_info.logical_cpu_max = max_cpus;
159 if (max_cpus_initialized == MAX_CPUS_WAIT) {
160 thread_wakeup((event_t) &max_cpus_initialized);
161 }
162 max_cpus_initialized = MAX_CPUS_SET;
163 }
164 (void) ml_set_interrupts_enabled(current_state);
165 }
166
167 /*
168 * Routine: ml_get_max_cpus
169 * Function:
170 */
171 unsigned int
172 ml_get_max_cpus(void)
173 {
174 boolean_t current_state;
175
176 current_state = ml_set_interrupts_enabled(FALSE);
177 if (max_cpus_initialized != MAX_CPUS_SET) {
178 max_cpus_initialized = MAX_CPUS_WAIT;
179 assert_wait((event_t) &max_cpus_initialized, THREAD_UNINT);
180 (void) thread_block(THREAD_CONTINUE_NULL);
181 }
182 (void) ml_set_interrupts_enabled(current_state);
183 return machine_info.max_cpus;
184 }
185
186 /*
187 * Routine: ml_init_lock_timeout
188 * Function:
189 */
190 void
191 ml_init_lock_timeout(void)
192 {
193 uint64_t abstime;
194 uint64_t mtxspin;
195 uint64_t default_timeout_ns = NSEC_PER_SEC >> 2;
196 uint32_t slto;
197
198 if (PE_parse_boot_argn("slto_us", &slto, sizeof(slto))) {
199 default_timeout_ns = slto * NSEC_PER_USEC;
200 }
201
202 nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
203 LockTimeOutUsec = (uint32_t)(default_timeout_ns / NSEC_PER_USEC);
204 LockTimeOut = (uint32_t)abstime;
205 TLockTimeOut = LockTimeOut;
206
207 if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof(mtxspin))) {
208 if (mtxspin > USEC_PER_SEC >> 4) {
209 mtxspin = USEC_PER_SEC >> 4;
210 }
211 nanoseconds_to_absolutetime(mtxspin * NSEC_PER_USEC, &abstime);
212 } else {
213 nanoseconds_to_absolutetime(10 * NSEC_PER_USEC, &abstime);
214 }
215 MutexSpin = abstime;
216 }
217
218 /*
219 * This is called from the machine-independent routine cpu_up()
220 * to perform machine-dependent info updates.
221 */
222 void
223 ml_cpu_up(void)
224 {
225 hw_atomic_add(&machine_info.physical_cpu, 1);
226 hw_atomic_add(&machine_info.logical_cpu, 1);
227 }
228
229 /*
230 * This is called from the machine-independent routine cpu_down()
231 * to perform machine-dependent info updates.
232 */
233 void
234 ml_cpu_down(void)
235 {
236 cpu_data_t *cpu_data_ptr;
237
238 hw_atomic_sub(&machine_info.physical_cpu, 1);
239 hw_atomic_sub(&machine_info.logical_cpu, 1);
240
241 /*
242 * If we want to deal with outstanding IPIs, we need to
243 * do relatively early in the processor_doshutdown path,
244 * as we pend decrementer interrupts using the IPI
245 * mechanism if we cannot immediately service them (if
246 * IRQ is masked). Do so now.
247 *
248 * We aren't on the interrupt stack here; would it make
249 * more sense to disable signaling and then enable
250 * interrupts? It might be a bit cleaner.
251 */
252 cpu_data_ptr = getCpuDatap();
253 cpu_data_ptr->cpu_running = FALSE;
254
255 cpu_signal_handler_internal(TRUE);
256 }
257
258 /*
259 * Routine: ml_cpu_get_info
260 * Function:
261 */
262 void
263 ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info)
264 {
265 cache_info_t *cpuid_cache_info;
266
267 cpuid_cache_info = cache_info();
268 ml_cpu_info->vector_unit = 0;
269 ml_cpu_info->cache_line_size = cpuid_cache_info->c_linesz;
270 ml_cpu_info->l1_icache_size = cpuid_cache_info->c_isize;
271 ml_cpu_info->l1_dcache_size = cpuid_cache_info->c_dsize;
272
273 #if (__ARM_ARCH__ >= 7)
274 ml_cpu_info->l2_settings = 1;
275 ml_cpu_info->l2_cache_size = cpuid_cache_info->c_l2size;
276 #else
277 ml_cpu_info->l2_settings = 0;
278 ml_cpu_info->l2_cache_size = 0xFFFFFFFF;
279 #endif
280 ml_cpu_info->l3_settings = 0;
281 ml_cpu_info->l3_cache_size = 0xFFFFFFFF;
282 }
283
284 unsigned int
285 ml_get_machine_mem(void)
286 {
287 return machine_info.memory_size;
288 }
289
290 /* Return max offset */
291 vm_map_offset_t
292 ml_get_max_offset(
293 boolean_t is64,
294 unsigned int option)
295 {
296 unsigned int pmap_max_offset_option = 0;
297
298 switch (option) {
299 case MACHINE_MAX_OFFSET_DEFAULT:
300 pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEFAULT;
301 break;
302 case MACHINE_MAX_OFFSET_MIN:
303 pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_MIN;
304 break;
305 case MACHINE_MAX_OFFSET_MAX:
306 pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_MAX;
307 break;
308 case MACHINE_MAX_OFFSET_DEVICE:
309 pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEVICE;
310 break;
311 default:
312 panic("ml_get_max_offset(): Illegal option 0x%x\n", option);
313 break;
314 }
315 return pmap_max_offset(is64, pmap_max_offset_option);
316 }
317
318 boolean_t
319 ml_wants_panic_trap_to_debugger(void)
320 {
321 return FALSE;
322 }
323
324 void
325 ml_panic_trap_to_debugger(__unused const char *panic_format_str,
326 __unused va_list *panic_args,
327 __unused unsigned int reason,
328 __unused void *ctx,
329 __unused uint64_t panic_options_mask,
330 __unused unsigned long panic_caller)
331 {
332 return;
333 }
334
335 __attribute__((noreturn))
336 void
337 halt_all_cpus(boolean_t reboot)
338 {
339 if (reboot) {
340 printf("MACH Reboot\n");
341 PEHaltRestart(kPERestartCPU);
342 } else {
343 printf("CPU halted\n");
344 PEHaltRestart(kPEHaltCPU);
345 }
346 while (1) {
347 ;
348 }
349 }
350
351 __attribute__((noreturn))
352 void
353 halt_cpu(void)
354 {
355 halt_all_cpus(FALSE);
356 }
357
358 /*
359 * Routine: machine_signal_idle
360 * Function:
361 */
362 void
363 machine_signal_idle(
364 processor_t processor)
365 {
366 cpu_signal(processor_to_cpu_datap(processor), SIGPnop, (void *)NULL, (void *)NULL);
367 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
368 }
369
370 void
371 machine_signal_idle_deferred(
372 processor_t processor)
373 {
374 cpu_signal_deferred(processor_to_cpu_datap(processor));
375 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_DEFERRED_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
376 }
377
378 void
379 machine_signal_idle_cancel(
380 processor_t processor)
381 {
382 cpu_signal_cancel(processor_to_cpu_datap(processor));
383 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_CANCEL_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
384 }
385
386 /*
387 * Routine: ml_install_interrupt_handler
388 * Function: Initialize Interrupt Handler
389 */
390 void
391 ml_install_interrupt_handler(
392 void *nub,
393 int source,
394 void *target,
395 IOInterruptHandler handler,
396 void *refCon)
397 {
398 cpu_data_t *cpu_data_ptr;
399 boolean_t current_state;
400
401 current_state = ml_set_interrupts_enabled(FALSE);
402 cpu_data_ptr = getCpuDatap();
403
404 cpu_data_ptr->interrupt_nub = nub;
405 cpu_data_ptr->interrupt_source = source;
406 cpu_data_ptr->interrupt_target = target;
407 cpu_data_ptr->interrupt_handler = handler;
408 cpu_data_ptr->interrupt_refCon = refCon;
409
410 cpu_data_ptr->interrupts_enabled = TRUE;
411 (void) ml_set_interrupts_enabled(current_state);
412
413 initialize_screen(NULL, kPEAcquireScreen);
414 }
415
416 /*
417 * Routine: ml_init_interrupt
418 * Function: Initialize Interrupts
419 */
420 void
421 ml_init_interrupt(void)
422 {
423 }
424
425 /*
426 * Routine: ml_init_timebase
427 * Function: register and setup Timebase, Decremeter services
428 */
429 void
430 ml_init_timebase(
431 void *args,
432 tbd_ops_t tbd_funcs,
433 vm_offset_t int_address,
434 vm_offset_t int_value)
435 {
436 cpu_data_t *cpu_data_ptr;
437
438 cpu_data_ptr = (cpu_data_t *)args;
439
440 if ((cpu_data_ptr == &BootCpuData)
441 && (rtclock_timebase_func.tbd_fiq_handler == (void *)NULL)) {
442 rtclock_timebase_func = *tbd_funcs;
443 rtclock_timebase_addr = int_address;
444 rtclock_timebase_val = int_value;
445 }
446 }
447
448 void
449 fiq_context_bootstrap(boolean_t enable_fiq)
450 {
451 fiq_context_init(enable_fiq);
452 }
453
454 void
455 ml_parse_cpu_topology(void)
456 {
457 DTEntry entry, child;
458 OpaqueDTEntryIterator iter;
459 uint32_t cpu_boot_arg;
460 int err;
461
462 err = DTLookupEntry(NULL, "/cpus", &entry);
463 assert(err == kSuccess);
464
465 err = DTInitEntryIterator(entry, &iter);
466 assert(err == kSuccess);
467
468 while (kSuccess == DTIterateEntries(&iter, &child)) {
469 #if MACH_ASSERT
470 unsigned int propSize;
471 void *prop = NULL;
472 if (avail_cpus == 0) {
473 if (kSuccess != DTGetProperty(child, "state", &prop, &propSize)) {
474 panic("unable to retrieve state for cpu %u", avail_cpus);
475 }
476
477 if (strncmp((char*)prop, "running", propSize) != 0) {
478 panic("cpu 0 has not been marked as running!");
479 }
480 }
481 assert(kSuccess == DTGetProperty(child, "reg", &prop, &propSize));
482 assert(avail_cpus == *((uint32_t*)prop));
483 #endif
484 ++avail_cpus;
485 }
486
487 cpu_boot_arg = avail_cpus;
488 if (PE_parse_boot_argn("cpus", &cpu_boot_arg, sizeof(cpu_boot_arg)) &&
489 (avail_cpus > cpu_boot_arg)) {
490 avail_cpus = cpu_boot_arg;
491 }
492
493 if (avail_cpus == 0) {
494 panic("No cpus found!");
495 }
496 }
497
498 unsigned int
499 ml_get_cpu_count(void)
500 {
501 return avail_cpus;
502 }
503
504 int
505 ml_get_boot_cpu_number(void)
506 {
507 return 0;
508 }
509
510 cluster_type_t
511 ml_get_boot_cluster(void)
512 {
513 return CLUSTER_TYPE_SMP;
514 }
515
516 int
517 ml_get_cpu_number(uint32_t phys_id)
518 {
519 return (int)phys_id;
520 }
521
522 int
523 ml_get_max_cpu_number(void)
524 {
525 return avail_cpus - 1;
526 }
527
528 kern_return_t
529 ml_processor_register(ml_processor_info_t *in_processor_info,
530 processor_t * processor_out, ipi_handler_t *ipi_handler_out,
531 perfmon_interrupt_handler_func *pmi_handler_out)
532 {
533 cpu_data_t *this_cpu_datap;
534 boolean_t is_boot_cpu;
535
536 if (in_processor_info->phys_id >= MAX_CPUS) {
537 /*
538 * The physical CPU ID indicates that we have more CPUs than
539 * this xnu build support. This probably means we have an
540 * incorrect board configuration.
541 *
542 * TODO: Should this just return a failure instead? A panic
543 * is simply a convenient way to catch bugs in the pexpert
544 * headers.
545 */
546 panic("phys_id %u is too large for MAX_CPUS (%u)", in_processor_info->phys_id, MAX_CPUS);
547 }
548
549 /* Fail the registration if the number of CPUs has been limited by boot-arg. */
550 if ((in_processor_info->phys_id >= avail_cpus) ||
551 (in_processor_info->log_id > (uint32_t)ml_get_max_cpu_number())) {
552 return KERN_FAILURE;
553 }
554
555 if (in_processor_info->log_id != (uint32_t)ml_get_boot_cpu_number()) {
556 is_boot_cpu = FALSE;
557 this_cpu_datap = cpu_data_alloc(FALSE);
558 cpu_data_init(this_cpu_datap);
559 } else {
560 this_cpu_datap = &BootCpuData;
561 is_boot_cpu = TRUE;
562 }
563
564 this_cpu_datap->cpu_id = in_processor_info->cpu_id;
565
566 this_cpu_datap->cpu_console_buf = console_cpu_alloc(is_boot_cpu);
567 if (this_cpu_datap->cpu_console_buf == (void *)(NULL)) {
568 goto processor_register_error;
569 }
570
571 if (!is_boot_cpu) {
572 if (cpu_data_register(this_cpu_datap) != KERN_SUCCESS) {
573 goto processor_register_error;
574 }
575 }
576
577 this_cpu_datap->cpu_idle_notify = (void *) in_processor_info->processor_idle;
578 this_cpu_datap->cpu_cache_dispatch = in_processor_info->platform_cache_dispatch;
579 nanoseconds_to_absolutetime((uint64_t) in_processor_info->powergate_latency, &this_cpu_datap->cpu_idle_latency);
580 this_cpu_datap->cpu_reset_assist = kvtophys(in_processor_info->powergate_stub_addr);
581
582 this_cpu_datap->idle_timer_notify = (void *) in_processor_info->idle_timer;
583 this_cpu_datap->idle_timer_refcon = in_processor_info->idle_timer_refcon;
584
585 this_cpu_datap->platform_error_handler = (void *) in_processor_info->platform_error_handler;
586 this_cpu_datap->cpu_regmap_paddr = in_processor_info->regmap_paddr;
587 this_cpu_datap->cpu_phys_id = in_processor_info->phys_id;
588 this_cpu_datap->cpu_l2_access_penalty = in_processor_info->l2_access_penalty;
589
590 if (!is_boot_cpu) {
591 processor_init((struct processor *)this_cpu_datap->cpu_processor,
592 this_cpu_datap->cpu_number, processor_pset(master_processor));
593
594 if (this_cpu_datap->cpu_l2_access_penalty) {
595 /*
596 * Cores that have a non-zero L2 access penalty compared
597 * to the boot processor should be de-prioritized by the
598 * scheduler, so that threads use the cores with better L2
599 * preferentially.
600 */
601 processor_set_primary(this_cpu_datap->cpu_processor,
602 master_processor);
603 }
604 }
605
606 *processor_out = this_cpu_datap->cpu_processor;
607 *ipi_handler_out = cpu_signal_handler;
608 *pmi_handler_out = NULL;
609 if (in_processor_info->idle_tickle != (idle_tickle_t *) NULL) {
610 *in_processor_info->idle_tickle = (idle_tickle_t) cpu_idle_tickle;
611 }
612
613 #if KPC
614 if (kpc_register_cpu(this_cpu_datap) != TRUE) {
615 goto processor_register_error;
616 }
617 #endif
618
619 if (!is_boot_cpu) {
620 early_random_cpu_init(this_cpu_datap->cpu_number);
621 }
622
623 return KERN_SUCCESS;
624
625 processor_register_error:
626 #if KPC
627 kpc_unregister_cpu(this_cpu_datap);
628 #endif
629 if (!is_boot_cpu) {
630 cpu_data_free(this_cpu_datap);
631 }
632 return KERN_FAILURE;
633 }
634
635 void
636 ml_init_arm_debug_interface(
637 void * in_cpu_datap,
638 vm_offset_t virt_address)
639 {
640 ((cpu_data_t *)in_cpu_datap)->cpu_debug_interface_map = virt_address;
641 do_debugid();
642 }
643
644 /*
645 * Routine: init_ast_check
646 * Function:
647 */
648 void
649 init_ast_check(
650 __unused processor_t processor)
651 {
652 }
653
654 /*
655 * Routine: cause_ast_check
656 * Function:
657 */
658 void
659 cause_ast_check(
660 processor_t processor)
661 {
662 if (current_processor() != processor) {
663 cpu_signal(processor_to_cpu_datap(processor), SIGPast, (void *)NULL, (void *)NULL);
664 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 1 /* ast */, 0, 0, 0);
665 }
666 }
667
668 extern uint32_t cpu_idle_count;
669
670 void
671 ml_get_power_state(boolean_t *icp, boolean_t *pidlep)
672 {
673 *icp = ml_at_interrupt_context();
674 *pidlep = (cpu_idle_count == real_ncpus);
675 }
676
677 /*
678 * Routine: ml_cause_interrupt
679 * Function: Generate a fake interrupt
680 */
681 void
682 ml_cause_interrupt(void)
683 {
684 return; /* BS_XXX */
685 }
686
687 /* Map memory map IO space */
688 vm_offset_t
689 ml_io_map(
690 vm_offset_t phys_addr,
691 vm_size_t size)
692 {
693 return io_map(phys_addr, size, VM_WIMG_IO);
694 }
695
696 vm_offset_t
697 ml_io_map_wcomb(
698 vm_offset_t phys_addr,
699 vm_size_t size)
700 {
701 return io_map(phys_addr, size, VM_WIMG_WCOMB);
702 }
703
704 /* boot memory allocation */
705 vm_offset_t
706 ml_static_malloc(
707 __unused vm_size_t size)
708 {
709 return (vm_offset_t) NULL;
710 }
711
712 vm_map_address_t
713 ml_map_high_window(
714 vm_offset_t phys_addr,
715 vm_size_t len)
716 {
717 return pmap_map_high_window_bd(phys_addr, len, VM_PROT_READ | VM_PROT_WRITE);
718 }
719
720 vm_offset_t
721 ml_static_ptovirt(
722 vm_offset_t paddr)
723 {
724 return phystokv(paddr);
725 }
726
727 vm_offset_t
728 ml_static_vtop(
729 vm_offset_t vaddr)
730 {
731 if (((vm_address_t)(vaddr) - gVirtBase) >= gPhysSize) {
732 panic("ml_static_ptovirt(): illegal vaddr: %p\n", (void*)vaddr);
733 }
734 return (vm_address_t)(vaddr) - gVirtBase + gPhysBase;
735 }
736
737 vm_offset_t
738 ml_static_slide(
739 vm_offset_t vaddr)
740 {
741 return VM_KERNEL_SLIDE(vaddr);
742 }
743
744 vm_offset_t
745 ml_static_unslide(
746 vm_offset_t vaddr)
747 {
748 return VM_KERNEL_UNSLIDE(vaddr);
749 }
750
751 kern_return_t
752 ml_static_protect(
753 vm_offset_t vaddr, /* kernel virtual address */
754 vm_size_t size,
755 vm_prot_t new_prot)
756 {
757 pt_entry_t arm_prot = 0;
758 pt_entry_t arm_block_prot = 0;
759 vm_offset_t vaddr_cur;
760 ppnum_t ppn;
761 kern_return_t result = KERN_SUCCESS;
762
763 if (vaddr < VM_MIN_KERNEL_ADDRESS) {
764 return KERN_FAILURE;
765 }
766
767 assert((vaddr & (ARM_PGBYTES - 1)) == 0); /* must be page aligned */
768
769 if ((new_prot & VM_PROT_WRITE) && (new_prot & VM_PROT_EXECUTE)) {
770 panic("ml_static_protect(): WX request on %p", (void *) vaddr);
771 }
772
773 /* Set up the protection bits, and block bits so we can validate block mappings. */
774 if (new_prot & VM_PROT_WRITE) {
775 arm_prot |= ARM_PTE_AP(AP_RWNA);
776 arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RWNA);
777 } else {
778 arm_prot |= ARM_PTE_AP(AP_RONA);
779 arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RONA);
780 }
781
782 if (!(new_prot & VM_PROT_EXECUTE)) {
783 arm_prot |= ARM_PTE_NX;
784 arm_block_prot |= ARM_TTE_BLOCK_NX;
785 }
786
787 for (vaddr_cur = vaddr;
788 vaddr_cur < ((vaddr + size) & ~ARM_PGMASK);
789 vaddr_cur += ARM_PGBYTES) {
790 ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
791 if (ppn != (vm_offset_t) NULL) {
792 tt_entry_t *ttp = &kernel_pmap->tte[ttenum(vaddr_cur)];
793 tt_entry_t tte = *ttp;
794
795 if ((tte & ARM_TTE_TYPE_MASK) != ARM_TTE_TYPE_TABLE) {
796 if (((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) &&
797 ((tte & (ARM_TTE_BLOCK_APMASK | ARM_TTE_BLOCK_NX_MASK)) == arm_block_prot)) {
798 /*
799 * We can support ml_static_protect on a block mapping if the mapping already has
800 * the desired protections. We still want to run checks on a per-page basis.
801 */
802 continue;
803 }
804
805 result = KERN_FAILURE;
806 break;
807 }
808
809 pt_entry_t *pte_p = (pt_entry_t *) ttetokv(tte) + ptenum(vaddr_cur);
810 pt_entry_t ptmp = *pte_p;
811
812 ptmp = (ptmp & ~(ARM_PTE_APMASK | ARM_PTE_NX_MASK)) | arm_prot;
813 *pte_p = ptmp;
814 #ifndef __ARM_L1_PTW__
815 FlushPoC_DcacheRegion((vm_offset_t) pte_p, sizeof(*pte_p));
816 #endif
817 }
818 }
819
820 if (vaddr_cur > vaddr) {
821 flush_mmu_tlb_region(vaddr, (vm_size_t)(vaddr_cur - vaddr));
822 }
823
824 return result;
825 }
826
827 /*
828 * Routine: ml_static_mfree
829 * Function:
830 */
831 void
832 ml_static_mfree(
833 vm_offset_t vaddr,
834 vm_size_t size)
835 {
836 vm_offset_t vaddr_cur;
837 ppnum_t ppn;
838 uint32_t freed_pages = 0;
839
840 /* It is acceptable (if bad) to fail to free. */
841 if (vaddr < VM_MIN_KERNEL_ADDRESS) {
842 return;
843 }
844
845 assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */
846
847 for (vaddr_cur = vaddr;
848 vaddr_cur < trunc_page_32(vaddr + size);
849 vaddr_cur += PAGE_SIZE) {
850 ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
851 if (ppn != (vm_offset_t) NULL) {
852 /*
853 * It is not acceptable to fail to update the protections on a page
854 * we will release to the VM. We need to either panic or continue.
855 * For now, we'll panic (to help flag if there is memory we can
856 * reclaim).
857 */
858 if (ml_static_protect(vaddr_cur, PAGE_SIZE, VM_PROT_WRITE | VM_PROT_READ) != KERN_SUCCESS) {
859 panic("Failed ml_static_mfree on %p", (void *) vaddr_cur);
860 }
861 #if 0
862 /*
863 * Must NOT tear down the "V==P" mapping for vaddr_cur as the zone alias scheme
864 * relies on the persistence of these mappings for all time.
865 */
866 // pmap_remove(kernel_pmap, (addr64_t) vaddr_cur, (addr64_t) (vaddr_cur + PAGE_SIZE));
867 #endif
868 vm_page_create(ppn, (ppn + 1));
869 freed_pages++;
870 }
871 }
872 vm_page_lockspin_queues();
873 vm_page_wire_count -= freed_pages;
874 vm_page_wire_count_initial -= freed_pages;
875 vm_page_unlock_queues();
876 #if DEBUG
877 kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn);
878 #endif
879 }
880
881
882 /* virtual to physical on wired pages */
883 vm_offset_t
884 ml_vtophys(vm_offset_t vaddr)
885 {
886 return kvtophys(vaddr);
887 }
888
889 /*
890 * Routine: ml_nofault_copy
891 * Function: Perform a physical mode copy if the source and destination have
892 * valid translations in the kernel pmap. If translations are present, they are
893 * assumed to be wired; e.g., no attempt is made to guarantee that the
894 * translations obtained remain valid for the duration of the copy process.
895 */
896 vm_size_t
897 ml_nofault_copy(vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size)
898 {
899 addr64_t cur_phys_dst, cur_phys_src;
900 uint32_t count, nbytes = 0;
901
902 while (size > 0) {
903 if (!(cur_phys_src = kvtophys(virtsrc))) {
904 break;
905 }
906 if (!(cur_phys_dst = kvtophys(virtdst))) {
907 break;
908 }
909 if (!pmap_valid_address(trunc_page_64(cur_phys_dst)) ||
910 !pmap_valid_address(trunc_page_64(cur_phys_src))) {
911 break;
912 }
913 count = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
914 if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) {
915 count = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
916 }
917 if (count > size) {
918 count = size;
919 }
920
921 bcopy_phys(cur_phys_src, cur_phys_dst, count);
922
923 nbytes += count;
924 virtsrc += count;
925 virtdst += count;
926 size -= count;
927 }
928
929 return nbytes;
930 }
931
932 /*
933 * Routine: ml_validate_nofault
934 * Function: Validate that ths address range has a valid translations
935 * in the kernel pmap. If translations are present, they are
936 * assumed to be wired; i.e. no attempt is made to guarantee
937 * that the translation persist after the check.
938 * Returns: TRUE if the range is mapped and will not cause a fault,
939 * FALSE otherwise.
940 */
941
942 boolean_t
943 ml_validate_nofault(
944 vm_offset_t virtsrc, vm_size_t size)
945 {
946 addr64_t cur_phys_src;
947 uint32_t count;
948
949 while (size > 0) {
950 if (!(cur_phys_src = kvtophys(virtsrc))) {
951 return FALSE;
952 }
953 if (!pmap_valid_address(trunc_page_64(cur_phys_src))) {
954 return FALSE;
955 }
956 count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
957 if (count > size) {
958 count = (uint32_t)size;
959 }
960
961 virtsrc += count;
962 size -= count;
963 }
964
965 return TRUE;
966 }
967
968 void
969 ml_get_bouncepool_info(vm_offset_t * phys_addr, vm_size_t * size)
970 {
971 *phys_addr = 0;
972 *size = 0;
973 }
974
975 /*
976 * Stubs for CPU Stepper
977 */
978 void
979 active_rt_threads(__unused boolean_t active)
980 {
981 }
982
983 void
984 thread_tell_urgency(__unused thread_urgency_t urgency,
985 __unused uint64_t rt_period,
986 __unused uint64_t rt_deadline,
987 __unused uint64_t sched_latency,
988 __unused thread_t nthread)
989 {
990 }
991
992 void
993 machine_run_count(__unused uint32_t count)
994 {
995 }
996
997 processor_t
998 machine_choose_processor(__unused processor_set_t pset, processor_t processor)
999 {
1000 return processor;
1001 }
1002
1003 boolean_t
1004 machine_timeout_suspended(void)
1005 {
1006 return FALSE;
1007 }
1008
1009 kern_return_t
1010 ml_interrupt_prewarm(__unused uint64_t deadline)
1011 {
1012 return KERN_FAILURE;
1013 }
1014
1015 uint64_t
1016 ml_get_hwclock(void)
1017 {
1018 uint64_t high_first = 0;
1019 uint64_t high_second = 0;
1020 uint64_t low = 0;
1021
1022 __builtin_arm_isb(ISB_SY);
1023
1024 do {
1025 high_first = __builtin_arm_mrrc(15, 0, 14) >> 32;
1026 low = __builtin_arm_mrrc(15, 0, 14) & 0xFFFFFFFFULL;
1027 high_second = __builtin_arm_mrrc(15, 0, 14) >> 32;
1028 } while (high_first != high_second);
1029
1030 return (high_first << 32) | (low);
1031 }
1032
1033 boolean_t
1034 ml_delay_should_spin(uint64_t interval)
1035 {
1036 cpu_data_t *cdp = getCpuDatap();
1037
1038 if (cdp->cpu_idle_latency) {
1039 return (interval < cdp->cpu_idle_latency) ? TRUE : FALSE;
1040 } else {
1041 /*
1042 * Early boot, latency is unknown. Err on the side of blocking,
1043 * which should always be safe, even if slow
1044 */
1045 return FALSE;
1046 }
1047 }
1048
1049 void
1050 ml_delay_on_yield(void)
1051 {
1052 }
1053
1054 boolean_t
1055 ml_thread_is64bit(thread_t thread)
1056 {
1057 return thread_is_64bit_addr(thread);
1058 }
1059
1060 void
1061 ml_timer_evaluate(void)
1062 {
1063 }
1064
1065 boolean_t
1066 ml_timer_forced_evaluation(void)
1067 {
1068 return FALSE;
1069 }
1070
1071 uint64_t
1072 ml_energy_stat(__unused thread_t t)
1073 {
1074 return 0;
1075 }
1076
1077
1078 void
1079 ml_gpu_stat_update(__unused uint64_t gpu_ns_delta)
1080 {
1081 #if CONFIG_EMBEDDED
1082 /*
1083 * For now: update the resource coalition stats of the
1084 * current thread's coalition
1085 */
1086 task_coalition_update_gpu_stats(current_task(), gpu_ns_delta);
1087 #endif
1088 }
1089
1090 uint64_t
1091 ml_gpu_stat(__unused thread_t t)
1092 {
1093 return 0;
1094 }
1095
1096 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1097 static void
1098 timer_state_event(boolean_t switch_to_kernel)
1099 {
1100 thread_t thread = current_thread();
1101 if (!thread->precise_user_kernel_time) {
1102 return;
1103 }
1104
1105 processor_data_t *pd = &getCpuDatap()->cpu_processor->processor_data;
1106 uint64_t now = ml_get_timebase();
1107
1108 timer_stop(pd->current_state, now);
1109 pd->current_state = (switch_to_kernel) ? &pd->system_state : &pd->user_state;
1110 timer_start(pd->current_state, now);
1111
1112 timer_stop(pd->thread_timer, now);
1113 pd->thread_timer = (switch_to_kernel) ? &thread->system_timer : &thread->user_timer;
1114 timer_start(pd->thread_timer, now);
1115 }
1116
1117 void
1118 timer_state_event_user_to_kernel(void)
1119 {
1120 timer_state_event(TRUE);
1121 }
1122
1123 void
1124 timer_state_event_kernel_to_user(void)
1125 {
1126 timer_state_event(FALSE);
1127 }
1128 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
1129
1130 uint32_t
1131 get_arm_cpu_version(void)
1132 {
1133 uint32_t value = machine_read_midr();
1134
1135 /* Compose the register values into 8 bits; variant[7:4], revision[3:0]. */
1136 return ((value & MIDR_REV_MASK) >> MIDR_REV_SHIFT) | ((value & MIDR_VAR_MASK) >> (MIDR_VAR_SHIFT - 4));
1137 }
1138
1139 boolean_t
1140 user_cont_hwclock_allowed(void)
1141 {
1142 return FALSE;
1143 }
1144
1145 boolean_t
1146 user_timebase_allowed(void)
1147 {
1148 #if __ARM_TIME__
1149 return TRUE;
1150 #else
1151 return FALSE;
1152 #endif
1153 }
1154
1155 /*
1156 * The following are required for parts of the kernel
1157 * that cannot resolve these functions as inlines:
1158 */
1159 extern thread_t current_act(void);
1160 thread_t
1161 current_act(void)
1162 {
1163 return current_thread_fast();
1164 }
1165
1166 #undef current_thread
1167 extern thread_t current_thread(void);
1168 thread_t
1169 current_thread(void)
1170 {
1171 return current_thread_fast();
1172 }
1173
1174 #if __ARM_USER_PROTECT__
1175 uintptr_t
1176 arm_user_protect_begin(thread_t thread)
1177 {
1178 uintptr_t ttbr0, asid = 0; // kernel asid
1179
1180 ttbr0 = __builtin_arm_mrc(15, 0, 2, 0, 0); // Get TTBR0
1181 if (ttbr0 != thread->machine.kptw_ttb) {
1182 __builtin_arm_mcr(15, 0, thread->machine.kptw_ttb, 2, 0, 0); // Set TTBR0
1183 __builtin_arm_mcr(15, 0, asid, 13, 0, 1); // Set CONTEXTIDR
1184 __builtin_arm_isb(ISB_SY);
1185 }
1186 return ttbr0;
1187 }
1188
1189 void
1190 arm_user_protect_end(thread_t thread, uintptr_t ttbr0, boolean_t disable_interrupts)
1191 {
1192 if ((ttbr0 != thread->machine.kptw_ttb) && (thread->machine.uptw_ttb != thread->machine.kptw_ttb)) {
1193 if (disable_interrupts) {
1194 __asm__ volatile ("cpsid if" ::: "memory"); // Disable FIQ/IRQ
1195 }
1196 __builtin_arm_mcr(15, 0, thread->machine.uptw_ttb, 2, 0, 0); // Set TTBR0
1197 __builtin_arm_mcr(15, 0, thread->machine.asid, 13, 0, 1); // Set CONTEXTIDR with thread asid
1198 __builtin_arm_dsb(DSB_ISH);
1199 __builtin_arm_isb(ISB_SY);
1200 }
1201 }
1202 #endif // __ARM_USER_PROTECT__