]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm/machine_routines.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / arm / machine_routines.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <arm/proc_reg.h>
30#include <arm/machine_cpu.h>
31#include <arm/cpu_internal.h>
32#include <arm/cpuid.h>
33#include <arm/io_map_entries.h>
34#include <arm/cpu_data.h>
35#include <arm/cpu_data_internal.h>
36#include <arm/misc_protos.h>
37#include <arm/rtclock.h>
38#include <arm/caches_internal.h>
39#include <console/serial_protos.h>
40#include <kern/machine.h>
41#include <prng/random.h>
42#include <kern/startup.h>
43#include <kern/sched.h>
44#include <kern/thread.h>
45#include <mach/machine.h>
46#include <machine/atomic.h>
47#include <vm/pmap.h>
48#include <vm/vm_page.h>
49#include <sys/kdebug.h>
50#include <kern/coalition.h>
51#include <pexpert/device_tree.h>
52
53#include <IOKit/IOPlatformExpert.h>
5ba3f43e
A
54
55#if KPC
56#include <kern/kpc.h>
57#endif
58
59static int max_cpus_initialized = 0;
60#define MAX_CPUS_SET 0x1
61#define MAX_CPUS_WAIT 0x2
62
63static unsigned int avail_cpus = 0;
64
65uint32_t LockTimeOut;
66uint32_t LockTimeOutUsec;
67uint64_t MutexSpin;
68boolean_t is_clock_configured = FALSE;
69
70extern int mach_assert;
71extern volatile uint32_t debug_enabled;
5ba3f43e
A
72
73void machine_conf(void);
74
75void
76machine_startup(__unused boot_args * args)
77{
78 int boot_arg;
79
5ba3f43e
A
80 PE_parse_boot_argn("assert", &mach_assert, sizeof (mach_assert));
81
82 if (PE_parse_boot_argn("preempt", &boot_arg, sizeof (boot_arg))) {
83 default_preemption_rate = boot_arg;
84 }
85 if (PE_parse_boot_argn("bg_preempt", &boot_arg, sizeof (boot_arg))) {
86 default_bg_preemption_rate = boot_arg;
87 }
88
89 machine_conf();
90
91 /*
92 * Kick off the kernel bootstrap.
93 */
94 kernel_bootstrap();
95 /* NOTREACHED */
96}
97
98char *
99machine_boot_info(
100 __unused char *buf,
101 __unused vm_size_t size)
102{
103 return (PE_boot_args());
104}
105
106void
107machine_conf(void)
108{
109 machine_info.memory_size = mem_size;
110}
111
112void
113machine_init(void)
114{
115 debug_log_init();
116 clock_config();
117 is_clock_configured = TRUE;
118 if (debug_enabled)
119 pmap_map_globals();
120}
121
122void
123slave_machine_init(__unused void *param)
124{
125 cpu_machine_init(); /* Initialize the processor */
126 clock_init(); /* Init the clock */
127}
128
129/*
130 * Routine: machine_processor_shutdown
131 * Function:
132 */
133thread_t
134machine_processor_shutdown(
135 __unused thread_t thread,
136 void (*doshutdown) (processor_t),
137 processor_t processor)
138{
139 return (Shutdown_context(doshutdown, processor));
140}
141
142/*
143 * Routine: ml_init_max_cpus
144 * Function:
145 */
146void
147ml_init_max_cpus(unsigned int max_cpus)
148{
149 boolean_t current_state;
150
151 current_state = ml_set_interrupts_enabled(FALSE);
152 if (max_cpus_initialized != MAX_CPUS_SET) {
153 machine_info.max_cpus = max_cpus;
154 machine_info.physical_cpu_max = max_cpus;
155 machine_info.logical_cpu_max = max_cpus;
156 if (max_cpus_initialized == MAX_CPUS_WAIT)
157 thread_wakeup((event_t) & max_cpus_initialized);
158 max_cpus_initialized = MAX_CPUS_SET;
159 }
160 (void) ml_set_interrupts_enabled(current_state);
161}
162
163/*
164 * Routine: ml_get_max_cpus
165 * Function:
166 */
167unsigned int
168ml_get_max_cpus(void)
169{
170 boolean_t current_state;
171
172 current_state = ml_set_interrupts_enabled(FALSE);
173 if (max_cpus_initialized != MAX_CPUS_SET) {
174 max_cpus_initialized = MAX_CPUS_WAIT;
175 assert_wait((event_t) & max_cpus_initialized, THREAD_UNINT);
176 (void) thread_block(THREAD_CONTINUE_NULL);
177 }
178 (void) ml_set_interrupts_enabled(current_state);
179 return (machine_info.max_cpus);
180}
181
182/*
183 * Routine: ml_init_lock_timeout
184 * Function:
185 */
186void
187ml_init_lock_timeout(void)
188{
189 uint64_t abstime;
190 uint64_t mtxspin;
191 uint64_t default_timeout_ns = NSEC_PER_SEC>>2;
192 uint32_t slto;
193
194 if (PE_parse_boot_argn("slto_us", &slto, sizeof (slto)))
195 default_timeout_ns = slto * NSEC_PER_USEC;
196
197 nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
198 LockTimeOutUsec = (uint32_t)(abstime / NSEC_PER_USEC);
199 LockTimeOut = (uint32_t)abstime;
200
201 if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof (mtxspin))) {
202 if (mtxspin > USEC_PER_SEC>>4)
203 mtxspin = USEC_PER_SEC>>4;
204 nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
205 } else {
206 nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime);
207 }
208 MutexSpin = abstime;
209}
210
211/*
212 * This is called from the machine-independent routine cpu_up()
213 * to perform machine-dependent info updates.
214 */
215void
216ml_cpu_up(void)
217{
218 hw_atomic_add(&machine_info.physical_cpu, 1);
219 hw_atomic_add(&machine_info.logical_cpu, 1);
220}
221
222/*
223 * This is called from the machine-independent routine cpu_down()
224 * to perform machine-dependent info updates.
225 */
226void
227ml_cpu_down(void)
228{
229 cpu_data_t *cpu_data_ptr;
230
231 hw_atomic_sub(&machine_info.physical_cpu, 1);
232 hw_atomic_sub(&machine_info.logical_cpu, 1);
233
234 /*
235 * If we want to deal with outstanding IPIs, we need to
236 * do relatively early in the processor_doshutdown path,
237 * as we pend decrementer interrupts using the IPI
238 * mechanism if we cannot immediately service them (if
239 * IRQ is masked). Do so now.
240 *
241 * We aren't on the interrupt stack here; would it make
242 * more sense to disable signaling and then enable
243 * interrupts? It might be a bit cleaner.
244 */
245 cpu_data_ptr = getCpuDatap();
246 cpu_data_ptr->cpu_running = FALSE;
247
248 cpu_signal_handler_internal(TRUE);
249}
250
251/*
252 * Routine: ml_cpu_get_info
253 * Function:
254 */
255void
256ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info)
257{
258 cache_info_t *cpuid_cache_info;
259
260 cpuid_cache_info = cache_info();
261 ml_cpu_info->vector_unit = 0;
262 ml_cpu_info->cache_line_size = cpuid_cache_info->c_linesz;
263 ml_cpu_info->l1_icache_size = cpuid_cache_info->c_isize;
264 ml_cpu_info->l1_dcache_size = cpuid_cache_info->c_dsize;
265
266#if (__ARM_ARCH__ >= 7)
267 ml_cpu_info->l2_settings = 1;
268 ml_cpu_info->l2_cache_size = cpuid_cache_info->c_l2size;
269#else
270 ml_cpu_info->l2_settings = 0;
271 ml_cpu_info->l2_cache_size = 0xFFFFFFFF;
272#endif
273 ml_cpu_info->l3_settings = 0;
274 ml_cpu_info->l3_cache_size = 0xFFFFFFFF;
275}
276
277unsigned int
278ml_get_machine_mem(void)
279{
280 return (machine_info.memory_size);
281}
282
283/* Return max offset */
284vm_map_offset_t
285ml_get_max_offset(
286 boolean_t is64,
287 unsigned int option)
288{
289 unsigned int pmap_max_offset_option = 0;
290
291 switch (option) {
292 case MACHINE_MAX_OFFSET_DEFAULT:
293 pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEFAULT;
294 break;
295 case MACHINE_MAX_OFFSET_MIN:
296 pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_MIN;
297 break;
298 case MACHINE_MAX_OFFSET_MAX:
299 pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_MAX;
300 break;
301 case MACHINE_MAX_OFFSET_DEVICE:
302 pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEVICE;
303 break;
304 default:
305 panic("ml_get_max_offset(): Illegal option 0x%x\n", option);
306 break;
307 }
308 return pmap_max_offset(is64, pmap_max_offset_option);
309}
310
311boolean_t
312ml_wants_panic_trap_to_debugger(void)
313{
314 return FALSE;
315}
316
317void
318ml_panic_trap_to_debugger(__unused const char *panic_format_str,
319 __unused va_list *panic_args,
320 __unused unsigned int reason,
321 __unused void *ctx,
322 __unused uint64_t panic_options_mask,
323 __unused unsigned long panic_caller)
324{
325 return;
326}
327
328__attribute__((noreturn))
329void
330halt_all_cpus(boolean_t reboot)
331{
332 if (reboot) {
333 printf("MACH Reboot\n");
334 PEHaltRestart(kPERestartCPU);
335 } else {
336 printf("CPU halted\n");
337 PEHaltRestart(kPEHaltCPU);
338 }
339 while (1);
340}
341
342__attribute__((noreturn))
343void
344halt_cpu(void)
345{
346 halt_all_cpus(FALSE);
347}
348
349/*
350 * Routine: machine_signal_idle
351 * Function:
352 */
353void
354machine_signal_idle(
355 processor_t processor)
356{
357 cpu_signal(processor_to_cpu_datap(processor), SIGPnop, (void *)NULL, (void *)NULL);
358 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
359}
360
361void
362machine_signal_idle_deferred(
363 processor_t processor)
364{
365 cpu_signal_deferred(processor_to_cpu_datap(processor));
366 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_DEFERRED_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
367}
368
369void
370machine_signal_idle_cancel(
371 processor_t processor)
372{
373 cpu_signal_cancel(processor_to_cpu_datap(processor));
374 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_CANCEL_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
375}
376
377/*
378 * Routine: ml_install_interrupt_handler
379 * Function: Initialize Interrupt Handler
380 */
381void
382ml_install_interrupt_handler(
383 void *nub,
384 int source,
385 void *target,
386 IOInterruptHandler handler,
387 void *refCon)
388{
389 cpu_data_t *cpu_data_ptr;
390 boolean_t current_state;
391
392 current_state = ml_set_interrupts_enabled(FALSE);
393 cpu_data_ptr = getCpuDatap();
394
395 cpu_data_ptr->interrupt_nub = nub;
396 cpu_data_ptr->interrupt_source = source;
397 cpu_data_ptr->interrupt_target = target;
398 cpu_data_ptr->interrupt_handler = handler;
399 cpu_data_ptr->interrupt_refCon = refCon;
400
401 cpu_data_ptr->interrupts_enabled = TRUE;
402 (void) ml_set_interrupts_enabled(current_state);
403
404 initialize_screen(NULL, kPEAcquireScreen);
405}
406
407/*
408 * Routine: ml_init_interrupt
409 * Function: Initialize Interrupts
410 */
411void
412ml_init_interrupt(void)
413{
414}
415
416/*
417 * Routine: ml_init_timebase
418 * Function: register and setup Timebase, Decremeter services
419 */
420void ml_init_timebase(
421 void *args,
422 tbd_ops_t tbd_funcs,
423 vm_offset_t int_address,
424 vm_offset_t int_value)
425{
426 cpu_data_t *cpu_data_ptr;
427
428 cpu_data_ptr = (cpu_data_t *)args;
429
430 if ((cpu_data_ptr == &BootCpuData)
431 && (rtclock_timebase_func.tbd_fiq_handler == (void *)NULL)) {
432 rtclock_timebase_func = *tbd_funcs;
433 rtclock_timebase_addr = int_address;
434 rtclock_timebase_val = int_value;
435 }
436}
437
438void
439ml_parse_cpu_topology(void)
440{
441 DTEntry entry, child;
442 OpaqueDTEntryIterator iter;
443 uint32_t cpu_boot_arg;
444 int err;
445
446 err = DTLookupEntry(NULL, "/cpus", &entry);
447 assert(err == kSuccess);
448
449 err = DTInitEntryIterator(entry, &iter);
450 assert(err == kSuccess);
451
452 while (kSuccess == DTIterateEntries(&iter, &child)) {
453
454#if MACH_ASSERT
455 unsigned int propSize;
456 void *prop = NULL;
457 if (avail_cpus == 0) {
458 if (kSuccess != DTGetProperty(child, "state", &prop, &propSize))
459 panic("unable to retrieve state for cpu %u", avail_cpus);
460
461 if (strncmp((char*)prop, "running", propSize) != 0)
462 panic("cpu 0 has not been marked as running!");
463 }
464 assert(kSuccess == DTGetProperty(child, "reg", &prop, &propSize));
465 assert(avail_cpus == *((uint32_t*)prop));
466#endif
467 ++avail_cpus;
468 }
469
470 cpu_boot_arg = avail_cpus;
471 if (PE_parse_boot_argn("cpus", &cpu_boot_arg, sizeof(cpu_boot_arg)) &&
472 (avail_cpus > cpu_boot_arg))
473 avail_cpus = cpu_boot_arg;
474
475 if (avail_cpus == 0)
476 panic("No cpus found!");
477}
478
479unsigned int
480ml_get_cpu_count(void)
481{
482 return avail_cpus;
483}
484
485int
486ml_get_boot_cpu_number(void)
487{
488 return 0;
489}
490
491cluster_type_t
492ml_get_boot_cluster(void)
493{
494 return CLUSTER_TYPE_SMP;
495}
496
497int
498ml_get_cpu_number(uint32_t phys_id)
499{
500 return (int)phys_id;
501}
502
503int
504ml_get_max_cpu_number(void)
505{
506 return avail_cpus - 1;
507}
508
509kern_return_t
510ml_processor_register(
511 ml_processor_info_t * in_processor_info,
512 processor_t * processor_out,
513 ipi_handler_t * ipi_handler)
514{
515 cpu_data_t *this_cpu_datap;
516 boolean_t is_boot_cpu;
517
518 if (in_processor_info->phys_id >= MAX_CPUS) {
519 /*
520 * The physical CPU ID indicates that we have more CPUs than
521 * this xnu build support. This probably means we have an
522 * incorrect board configuration.
523 *
524 * TODO: Should this just return a failure instead? A panic
525 * is simply a convenient way to catch bugs in the pexpert
526 * headers.
527 */
528 panic("phys_id %u is too large for MAX_CPUS (%u)", in_processor_info->phys_id, MAX_CPUS);
529 }
530
531 /* Fail the registration if the number of CPUs has been limited by boot-arg. */
532 if ((in_processor_info->phys_id >= avail_cpus) ||
533 (in_processor_info->log_id > (uint32_t)ml_get_max_cpu_number()))
534 return KERN_FAILURE;
535
536 if (in_processor_info->log_id != (uint32_t)ml_get_boot_cpu_number()) {
537 is_boot_cpu = FALSE;
538 this_cpu_datap = cpu_data_alloc(FALSE);
539 cpu_data_init(this_cpu_datap);
540 } else {
541 this_cpu_datap = &BootCpuData;
542 is_boot_cpu = TRUE;
543 }
544
545 this_cpu_datap->cpu_id = in_processor_info->cpu_id;
546
5ba3f43e
A
547 this_cpu_datap->cpu_console_buf = console_cpu_alloc(is_boot_cpu);
548 if (this_cpu_datap->cpu_console_buf == (void *)(NULL))
549 goto processor_register_error;
550
551 if (!is_boot_cpu) {
552 if (cpu_data_register(this_cpu_datap) != KERN_SUCCESS)
553 goto processor_register_error;
554 }
555
556 this_cpu_datap->cpu_idle_notify = (void *) in_processor_info->processor_idle;
557 this_cpu_datap->cpu_cache_dispatch = in_processor_info->platform_cache_dispatch;
558 nanoseconds_to_absolutetime((uint64_t) in_processor_info->powergate_latency, &this_cpu_datap->cpu_idle_latency);
559 this_cpu_datap->cpu_reset_assist = kvtophys(in_processor_info->powergate_stub_addr);
560
561 this_cpu_datap->idle_timer_notify = (void *) in_processor_info->idle_timer;
562 this_cpu_datap->idle_timer_refcon = in_processor_info->idle_timer_refcon;
563
564 this_cpu_datap->platform_error_handler = (void *) in_processor_info->platform_error_handler;
565 this_cpu_datap->cpu_regmap_paddr = in_processor_info->regmap_paddr;
566 this_cpu_datap->cpu_phys_id = in_processor_info->phys_id;
567 this_cpu_datap->cpu_l2_access_penalty = in_processor_info->l2_access_penalty;
568
569 if (!is_boot_cpu) {
570 processor_init((struct processor *)this_cpu_datap->cpu_processor,
571 this_cpu_datap->cpu_number, processor_pset(master_processor));
572
573 if (this_cpu_datap->cpu_l2_access_penalty) {
574 /*
575 * Cores that have a non-zero L2 access penalty compared
576 * to the boot processor should be de-prioritized by the
577 * scheduler, so that threads use the cores with better L2
578 * preferentially.
579 */
580 processor_set_primary(this_cpu_datap->cpu_processor,
581 master_processor);
582 }
583 }
584
585 *processor_out = this_cpu_datap->cpu_processor;
586 *ipi_handler = cpu_signal_handler;
587 if (in_processor_info->idle_tickle != (idle_tickle_t *) NULL)
588 *in_processor_info->idle_tickle = (idle_tickle_t) cpu_idle_tickle;
589
590#if KPC
591 if (kpc_register_cpu(this_cpu_datap) != TRUE)
592 goto processor_register_error;
593#endif
594
595 if (!is_boot_cpu)
596 prng_cpu_init(this_cpu_datap->cpu_number);
597
598 return KERN_SUCCESS;
599
600processor_register_error:
601#if KPC
602 kpc_unregister_cpu(this_cpu_datap);
603#endif
5ba3f43e
A
604 if (!is_boot_cpu)
605 cpu_data_free(this_cpu_datap);
606 return KERN_FAILURE;
607}
608
609void
610ml_init_arm_debug_interface(
611 void * in_cpu_datap,
612 vm_offset_t virt_address)
613{
614 ((cpu_data_t *)in_cpu_datap)->cpu_debug_interface_map = virt_address;
615 do_debugid();
616}
617
618/*
619 * Routine: init_ast_check
620 * Function:
621 */
622void
623init_ast_check(
624 __unused processor_t processor)
625{
626}
627
628/*
629 * Routine: cause_ast_check
630 * Function:
631 */
632void
633cause_ast_check(
634 processor_t processor)
635{
636 if (current_processor() != processor) {
637 cpu_signal(processor_to_cpu_datap(processor), SIGPast, (void *)NULL, (void *)NULL);
638 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 1 /* ast */, 0, 0, 0);
639 }
640}
641
642
643/*
644 * Routine: ml_at_interrupt_context
645 * Function: Check if running at interrupt context
646 */
647boolean_t
648ml_at_interrupt_context(void)
649{
cc8bc92a 650 boolean_t at_interrupt_context = FALSE;
5ba3f43e 651
cc8bc92a
A
652 disable_preemption();
653 at_interrupt_context = (getCpuDatap()->cpu_int_state != NULL);
654 enable_preemption();
655
656 return at_interrupt_context;
5ba3f43e
A
657}
658
659extern uint32_t cpu_idle_count;
660
661void ml_get_power_state(boolean_t *icp, boolean_t *pidlep) {
662 *icp = ml_at_interrupt_context();
663 *pidlep = (cpu_idle_count == real_ncpus);
664}
665
666/*
667 * Routine: ml_cause_interrupt
668 * Function: Generate a fake interrupt
669 */
670void
671ml_cause_interrupt(void)
672{
673 return; /* BS_XXX */
674}
675
676/* Map memory map IO space */
677vm_offset_t
678ml_io_map(
679 vm_offset_t phys_addr,
680 vm_size_t size)
681{
682 return (io_map(phys_addr, size, VM_WIMG_IO));
683}
684
685vm_offset_t
686ml_io_map_wcomb(
687 vm_offset_t phys_addr,
688 vm_size_t size)
689{
690 return (io_map(phys_addr, size, VM_WIMG_WCOMB));
691}
692
693/* boot memory allocation */
694vm_offset_t
695ml_static_malloc(
696 __unused vm_size_t size)
697{
698 return ((vm_offset_t) NULL);
699}
700
701vm_map_address_t
702ml_map_high_window(
703 vm_offset_t phys_addr,
704 vm_size_t len)
705{
706 return pmap_map_high_window_bd(phys_addr, len, VM_PROT_READ | VM_PROT_WRITE);
707}
708
709vm_offset_t
710ml_static_ptovirt(
711 vm_offset_t paddr)
712{
713 return phystokv(paddr);
714}
715
716vm_offset_t
717ml_static_vtop(
718 vm_offset_t vaddr)
719{
720 if (((vm_address_t)(vaddr) - gVirtBase) >= gPhysSize)
721 panic("ml_static_ptovirt(): illegal vaddr: %p\n", (void*)vaddr);
722 return ((vm_address_t)(vaddr) - gVirtBase + gPhysBase);
723}
724
725
726kern_return_t
727ml_static_protect(
728 vm_offset_t vaddr, /* kernel virtual address */
729 vm_size_t size,
730 vm_prot_t new_prot)
731{
732 pt_entry_t arm_prot = 0;
733 pt_entry_t arm_block_prot = 0;
734 vm_offset_t vaddr_cur;
735 ppnum_t ppn;
736 kern_return_t result = KERN_SUCCESS;
737
738 if (vaddr < VM_MIN_KERNEL_ADDRESS)
739 return KERN_FAILURE;
740
741 assert((vaddr & (ARM_PGBYTES - 1)) == 0); /* must be page aligned */
742
743 if ((new_prot & VM_PROT_WRITE) && (new_prot & VM_PROT_EXECUTE)) {
744 panic("ml_static_protect(): WX request on %p", (void *) vaddr);
745 }
746
747 /* Set up the protection bits, and block bits so we can validate block mappings. */
748 if (new_prot & VM_PROT_WRITE) {
749 arm_prot |= ARM_PTE_AP(AP_RWNA);
750 arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RWNA);
751 } else {
752 arm_prot |= ARM_PTE_AP(AP_RONA);
753 arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RONA);
754 }
755
756 if (!(new_prot & VM_PROT_EXECUTE)) {
757 arm_prot |= ARM_PTE_NX;
758 arm_block_prot |= ARM_TTE_BLOCK_NX;
759 }
760
761 for (vaddr_cur = vaddr;
762 vaddr_cur < ((vaddr + size) & ~ARM_PGMASK);
763 vaddr_cur += ARM_PGBYTES) {
764 ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
765 if (ppn != (vm_offset_t) NULL) {
766 tt_entry_t *ttp = &kernel_pmap->tte[ttenum(vaddr_cur)];
767 tt_entry_t tte = *ttp;
768
769 if ((tte & ARM_TTE_TYPE_MASK) != ARM_TTE_TYPE_TABLE) {
770 if (((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) &&
771 ((tte & (ARM_TTE_BLOCK_APMASK | ARM_TTE_BLOCK_NX_MASK)) == arm_block_prot)) {
772 /*
773 * We can support ml_static_protect on a block mapping if the mapping already has
774 * the desired protections. We still want to run checks on a per-page basis.
775 */
776 continue;
777 }
778
779 result = KERN_FAILURE;
780 break;
781 }
782
783 pt_entry_t *pte_p = (pt_entry_t *) ttetokv(tte) + ptenum(vaddr_cur);
784 pt_entry_t ptmp = *pte_p;
785
786 ptmp = (ptmp & ~(ARM_PTE_APMASK | ARM_PTE_NX_MASK)) | arm_prot;
787 *pte_p = ptmp;
788#ifndef __ARM_L1_PTW__
789 FlushPoC_DcacheRegion((vm_offset_t) pte_p, sizeof(*pte_p));
790#endif
791 }
792 }
793
794 if (vaddr_cur > vaddr)
795 flush_mmu_tlb_region(vaddr, (vm_size_t)(vaddr_cur - vaddr));
796
797 return result;
798}
799
800/*
801 * Routine: ml_static_mfree
802 * Function:
803 */
804void
805ml_static_mfree(
806 vm_offset_t vaddr,
807 vm_size_t size)
808{
809 vm_offset_t vaddr_cur;
810 ppnum_t ppn;
811 uint32_t freed_pages = 0;
812
813 /* It is acceptable (if bad) to fail to free. */
814 if (vaddr < VM_MIN_KERNEL_ADDRESS)
815 return;
816
817 assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */
818
819 for (vaddr_cur = vaddr;
820 vaddr_cur < trunc_page_32(vaddr + size);
821 vaddr_cur += PAGE_SIZE) {
822 ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
823 if (ppn != (vm_offset_t) NULL) {
824 /*
825 * It is not acceptable to fail to update the protections on a page
826 * we will release to the VM. We need to either panic or continue.
827 * For now, we'll panic (to help flag if there is memory we can
828 * reclaim).
829 */
830 if (ml_static_protect(vaddr_cur, PAGE_SIZE, VM_PROT_WRITE | VM_PROT_READ) != KERN_SUCCESS) {
831 panic("Failed ml_static_mfree on %p", (void *) vaddr_cur);
832 }
833#if 0
834 /*
835 * Must NOT tear down the "V==P" mapping for vaddr_cur as the zone alias scheme
836 * relies on the persistence of these mappings for all time.
837 */
838 // pmap_remove(kernel_pmap, (addr64_t) vaddr_cur, (addr64_t) (vaddr_cur + PAGE_SIZE));
839#endif
840 vm_page_create(ppn, (ppn + 1));
841 freed_pages++;
842 }
843 }
844 vm_page_lockspin_queues();
845 vm_page_wire_count -= freed_pages;
846 vm_page_wire_count_initial -= freed_pages;
847 vm_page_unlock_queues();
848#if DEBUG
849 kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn);
850#endif
851}
852
853
854/* virtual to physical on wired pages */
855vm_offset_t
856ml_vtophys(vm_offset_t vaddr)
857{
858 return kvtophys(vaddr);
859}
860
861/*
862 * Routine: ml_nofault_copy
863 * Function: Perform a physical mode copy if the source and destination have
864 * valid translations in the kernel pmap. If translations are present, they are
865 * assumed to be wired; e.g., no attempt is made to guarantee that the
866 * translations obtained remain valid for the duration of the copy process.
867 */
868vm_size_t
869ml_nofault_copy(vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size)
870{
871 addr64_t cur_phys_dst, cur_phys_src;
872 uint32_t count, nbytes = 0;
873
874 while (size > 0) {
875 if (!(cur_phys_src = kvtophys(virtsrc)))
876 break;
877 if (!(cur_phys_dst = kvtophys(virtdst)))
878 break;
879 if (!pmap_valid_address(trunc_page_64(cur_phys_dst)) ||
880 !pmap_valid_address(trunc_page_64(cur_phys_src)))
881 break;
882 count = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
883 if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK)))
884 count = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
885 if (count > size)
886 count = size;
887
888 bcopy_phys(cur_phys_src, cur_phys_dst, count);
889
890 nbytes += count;
891 virtsrc += count;
892 virtdst += count;
893 size -= count;
894 }
895
896 return nbytes;
897}
898
899/*
900 * Routine: ml_validate_nofault
901 * Function: Validate that ths address range has a valid translations
902 * in the kernel pmap. If translations are present, they are
903 * assumed to be wired; i.e. no attempt is made to guarantee
904 * that the translation persist after the check.
905 * Returns: TRUE if the range is mapped and will not cause a fault,
906 * FALSE otherwise.
907 */
908
909boolean_t ml_validate_nofault(
910 vm_offset_t virtsrc, vm_size_t size)
911{
912 addr64_t cur_phys_src;
913 uint32_t count;
914
915 while (size > 0) {
916 if (!(cur_phys_src = kvtophys(virtsrc)))
917 return FALSE;
918 if (!pmap_valid_address(trunc_page_64(cur_phys_src)))
919 return FALSE;
920 count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
921 if (count > size)
922 count = (uint32_t)size;
923
924 virtsrc += count;
925 size -= count;
926 }
927
928 return TRUE;
929}
930
931void
932ml_get_bouncepool_info(vm_offset_t * phys_addr, vm_size_t * size)
933{
934 *phys_addr = 0;
935 *size = 0;
936}
937
938/*
939 * Stubs for CPU Stepper
940 */
941void
942active_rt_threads(__unused boolean_t active)
943{
944}
945
946void
947thread_tell_urgency(__unused int urgency,
948 __unused uint64_t rt_period,
949 __unused uint64_t rt_deadline,
950 __unused uint64_t sched_latency,
951 __unused thread_t nthread)
952{
953}
954
955void
956machine_run_count(__unused uint32_t count)
957{
958}
959
960processor_t
961machine_choose_processor(__unused processor_set_t pset, processor_t processor)
962{
963 return (processor);
964}
965
966vm_offset_t
967ml_stack_remaining(void)
968{
969 uintptr_t local = (uintptr_t) &local;
cc8bc92a 970 vm_offset_t intstack_top_ptr;
5ba3f43e 971
cc8bc92a
A
972 intstack_top_ptr = getCpuDatap()->intstack_top;
973 if ((local < intstack_top_ptr) && (local > intstack_top_ptr - INTSTACK_SIZE)) {
5ba3f43e
A
974 return (local - (getCpuDatap()->intstack_top - INTSTACK_SIZE));
975 } else {
976 return (local - current_thread()->kernel_stack);
977 }
978}
979
980boolean_t machine_timeout_suspended(void) {
981 return FALSE;
982}
983
984kern_return_t
985ml_interrupt_prewarm(__unused uint64_t deadline)
986{
987 return KERN_FAILURE;
988}
989
990uint64_t
991ml_get_hwclock(void)
992{
993 uint64_t high_first = 0;
994 uint64_t high_second = 0;
995 uint64_t low = 0;
996
997 __builtin_arm_isb(ISB_SY);
998
999 do {
1000 high_first = __builtin_arm_mrrc(15, 0, 14) >> 32;
1001 low = __builtin_arm_mrrc(15, 0, 14) & 0xFFFFFFFFULL;
1002 high_second = __builtin_arm_mrrc(15, 0, 14) >> 32;
1003 } while (high_first != high_second);
1004
1005 return (high_first << 32) | (low);
1006}
1007
1008boolean_t
1009ml_delay_should_spin(uint64_t interval)
1010{
1011 cpu_data_t *cdp = getCpuDatap();
1012
1013 if (cdp->cpu_idle_latency) {
1014 return (interval < cdp->cpu_idle_latency) ? TRUE : FALSE;
1015 } else {
1016 /*
1017 * Early boot, latency is unknown. Err on the side of blocking,
1018 * which should always be safe, even if slow
1019 */
1020 return FALSE;
1021 }
1022}
1023
1024boolean_t ml_thread_is64bit(thread_t thread)
1025{
1026 return (thread_is_64bit(thread));
1027}
1028
1029void ml_timer_evaluate(void) {
1030}
1031
1032boolean_t
1033ml_timer_forced_evaluation(void) {
1034 return FALSE;
1035}
1036
1037uint64_t
1038ml_energy_stat(__unused thread_t t) {
1039 return 0;
1040}
1041
1042
1043void
1044ml_gpu_stat_update(__unused uint64_t gpu_ns_delta) {
1045#if CONFIG_EMBEDDED
1046 /*
1047 * For now: update the resource coalition stats of the
1048 * current thread's coalition
1049 */
1050 task_coalition_update_gpu_stats(current_task(), gpu_ns_delta);
1051#endif
1052}
1053
1054uint64_t
1055ml_gpu_stat(__unused thread_t t) {
1056 return 0;
1057}
1058
1059#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1060static void
1061timer_state_event(boolean_t switch_to_kernel)
1062{
1063 thread_t thread = current_thread();
1064 if (!thread->precise_user_kernel_time) return;
1065
1066 processor_data_t *pd = &getCpuDatap()->cpu_processor->processor_data;
1067 uint64_t now = ml_get_timebase();
1068
1069 timer_stop(pd->current_state, now);
1070 pd->current_state = (switch_to_kernel) ? &pd->system_state : &pd->user_state;
1071 timer_start(pd->current_state, now);
1072
1073 timer_stop(pd->thread_timer, now);
1074 pd->thread_timer = (switch_to_kernel) ? &thread->system_timer : &thread->user_timer;
1075 timer_start(pd->thread_timer, now);
1076}
1077
1078void
1079timer_state_event_user_to_kernel(void)
1080{
1081 timer_state_event(TRUE);
1082}
1083
1084void
1085timer_state_event_kernel_to_user(void)
1086{
1087 timer_state_event(FALSE);
1088}
1089#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
1090
1091boolean_t
1092user_cont_hwclock_allowed(void)
1093{
1094 return FALSE;
1095}
1096
1097boolean_t
1098user_timebase_allowed(void)
1099{
1100#if __ARM_TIME__
1101 return TRUE;
1102#else
1103 return FALSE;
1104#endif
1105}
1106
1107/*
1108 * The following are required for parts of the kernel
1109 * that cannot resolve these functions as inlines:
1110 */
1111extern thread_t current_act(void);
1112thread_t
1113current_act(void)
1114{
1115 return current_thread_fast();
1116}
1117
1118#undef current_thread
1119extern thread_t current_thread(void);
1120thread_t
1121current_thread(void)
1122{
1123 return current_thread_fast();
1124}
1125
1126#if __ARM_USER_PROTECT__
1127uintptr_t
1128arm_user_protect_begin(thread_t thread)
1129{
1130 uintptr_t ttbr0, asid = 0; // kernel asid
1131
1132 ttbr0 = __builtin_arm_mrc(15,0,2,0,0); // Get TTBR0
1133 if (ttbr0 != thread->machine.kptw_ttb) {
1134 __builtin_arm_mcr(15,0,thread->machine.kptw_ttb,2,0,0); // Set TTBR0
1135 __builtin_arm_mcr(15,0,asid,13,0,1); // Set CONTEXTIDR
1136 __builtin_arm_isb(ISB_SY);
1137 }
1138 return ttbr0;
1139}
1140
1141void
1142arm_user_protect_end(thread_t thread, uintptr_t ttbr0, boolean_t disable_interrupts)
1143{
1144 if ((ttbr0 != thread->machine.kptw_ttb) && (thread->machine.uptw_ttb != thread->machine.kptw_ttb)) {
1145 if (disable_interrupts)
1146 __asm__ volatile ("cpsid if" ::: "memory"); // Disable FIQ/IRQ
1147 __builtin_arm_mcr(15,0,thread->machine.uptw_ttb,2,0,0); // Set TTBR0
1148 __builtin_arm_mcr(15,0,thread->machine.asid,13,0,1); // Set CONTEXTIDR with thread asid
1149 __builtin_arm_dsb(DSB_ISH);
1150 __builtin_arm_isb(ISB_SY);
1151 }
1152}
1153#endif // __ARM_USER_PROTECT__
1154
1155void ml_task_set_rop_pid(__unused task_t task, __unused task_t parent_task, __unused boolean_t inherit)
1156{
1157 return;
1158}