]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/machine_routines.c
xnu-4570.31.3.tar.gz
[apple/xnu.git] / osfmk / arm / machine_routines.c
1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/proc_reg.h>
30 #include <arm/machine_cpu.h>
31 #include <arm/cpu_internal.h>
32 #include <arm/cpuid.h>
33 #include <arm/io_map_entries.h>
34 #include <arm/cpu_data.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/misc_protos.h>
37 #include <arm/rtclock.h>
38 #include <arm/caches_internal.h>
39 #include <console/serial_protos.h>
40 #include <kern/machine.h>
41 #include <prng/random.h>
42 #include <kern/startup.h>
43 #include <kern/sched.h>
44 #include <kern/thread.h>
45 #include <mach/machine.h>
46 #include <machine/atomic.h>
47 #include <vm/pmap.h>
48 #include <vm/vm_page.h>
49 #include <sys/kdebug.h>
50 #include <kern/coalition.h>
51 #include <pexpert/device_tree.h>
52
53 #include <IOKit/IOPlatformExpert.h>
54
55 #if KPC
56 #include <kern/kpc.h>
57 #endif
58
59 static int max_cpus_initialized = 0;
60 #define MAX_CPUS_SET 0x1
61 #define MAX_CPUS_WAIT 0x2
62
63 static unsigned int avail_cpus = 0;
64
65 uint32_t LockTimeOut;
66 uint32_t LockTimeOutUsec;
67 uint64_t MutexSpin;
68 boolean_t is_clock_configured = FALSE;
69
70 extern int mach_assert;
71 extern volatile uint32_t debug_enabled;
72
73 void machine_conf(void);
74
75 void
76 machine_startup(__unused boot_args * args)
77 {
78 int boot_arg;
79
80 PE_parse_boot_argn("assert", &mach_assert, sizeof (mach_assert));
81
82 if (PE_parse_boot_argn("preempt", &boot_arg, sizeof (boot_arg))) {
83 default_preemption_rate = boot_arg;
84 }
85 if (PE_parse_boot_argn("bg_preempt", &boot_arg, sizeof (boot_arg))) {
86 default_bg_preemption_rate = boot_arg;
87 }
88
89 machine_conf();
90
91 /*
92 * Kick off the kernel bootstrap.
93 */
94 kernel_bootstrap();
95 /* NOTREACHED */
96 }
97
98 char *
99 machine_boot_info(
100 __unused char *buf,
101 __unused vm_size_t size)
102 {
103 return (PE_boot_args());
104 }
105
106 void
107 machine_conf(void)
108 {
109 machine_info.memory_size = mem_size;
110 }
111
112 void
113 machine_init(void)
114 {
115 debug_log_init();
116 clock_config();
117 is_clock_configured = TRUE;
118 if (debug_enabled)
119 pmap_map_globals();
120 }
121
122 void
123 slave_machine_init(__unused void *param)
124 {
125 cpu_machine_init(); /* Initialize the processor */
126 clock_init(); /* Init the clock */
127 }
128
129 /*
130 * Routine: machine_processor_shutdown
131 * Function:
132 */
133 thread_t
134 machine_processor_shutdown(
135 __unused thread_t thread,
136 void (*doshutdown) (processor_t),
137 processor_t processor)
138 {
139 return (Shutdown_context(doshutdown, processor));
140 }
141
142 /*
143 * Routine: ml_init_max_cpus
144 * Function:
145 */
146 void
147 ml_init_max_cpus(unsigned int max_cpus)
148 {
149 boolean_t current_state;
150
151 current_state = ml_set_interrupts_enabled(FALSE);
152 if (max_cpus_initialized != MAX_CPUS_SET) {
153 machine_info.max_cpus = max_cpus;
154 machine_info.physical_cpu_max = max_cpus;
155 machine_info.logical_cpu_max = max_cpus;
156 if (max_cpus_initialized == MAX_CPUS_WAIT)
157 thread_wakeup((event_t) & max_cpus_initialized);
158 max_cpus_initialized = MAX_CPUS_SET;
159 }
160 (void) ml_set_interrupts_enabled(current_state);
161 }
162
163 /*
164 * Routine: ml_get_max_cpus
165 * Function:
166 */
167 unsigned int
168 ml_get_max_cpus(void)
169 {
170 boolean_t current_state;
171
172 current_state = ml_set_interrupts_enabled(FALSE);
173 if (max_cpus_initialized != MAX_CPUS_SET) {
174 max_cpus_initialized = MAX_CPUS_WAIT;
175 assert_wait((event_t) & max_cpus_initialized, THREAD_UNINT);
176 (void) thread_block(THREAD_CONTINUE_NULL);
177 }
178 (void) ml_set_interrupts_enabled(current_state);
179 return (machine_info.max_cpus);
180 }
181
182 /*
183 * Routine: ml_init_lock_timeout
184 * Function:
185 */
186 void
187 ml_init_lock_timeout(void)
188 {
189 uint64_t abstime;
190 uint64_t mtxspin;
191 uint64_t default_timeout_ns = NSEC_PER_SEC>>2;
192 uint32_t slto;
193
194 if (PE_parse_boot_argn("slto_us", &slto, sizeof (slto)))
195 default_timeout_ns = slto * NSEC_PER_USEC;
196
197 nanoseconds_to_absolutetime(default_timeout_ns, &abstime);
198 LockTimeOutUsec = (uint32_t)(abstime / NSEC_PER_USEC);
199 LockTimeOut = (uint32_t)abstime;
200
201 if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof (mtxspin))) {
202 if (mtxspin > USEC_PER_SEC>>4)
203 mtxspin = USEC_PER_SEC>>4;
204 nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
205 } else {
206 nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime);
207 }
208 MutexSpin = abstime;
209 }
210
211 /*
212 * This is called from the machine-independent routine cpu_up()
213 * to perform machine-dependent info updates.
214 */
215 void
216 ml_cpu_up(void)
217 {
218 hw_atomic_add(&machine_info.physical_cpu, 1);
219 hw_atomic_add(&machine_info.logical_cpu, 1);
220 }
221
222 /*
223 * This is called from the machine-independent routine cpu_down()
224 * to perform machine-dependent info updates.
225 */
226 void
227 ml_cpu_down(void)
228 {
229 cpu_data_t *cpu_data_ptr;
230
231 hw_atomic_sub(&machine_info.physical_cpu, 1);
232 hw_atomic_sub(&machine_info.logical_cpu, 1);
233
234 /*
235 * If we want to deal with outstanding IPIs, we need to
236 * do relatively early in the processor_doshutdown path,
237 * as we pend decrementer interrupts using the IPI
238 * mechanism if we cannot immediately service them (if
239 * IRQ is masked). Do so now.
240 *
241 * We aren't on the interrupt stack here; would it make
242 * more sense to disable signaling and then enable
243 * interrupts? It might be a bit cleaner.
244 */
245 cpu_data_ptr = getCpuDatap();
246 cpu_data_ptr->cpu_running = FALSE;
247
248 cpu_signal_handler_internal(TRUE);
249 }
250
251 /*
252 * Routine: ml_cpu_get_info
253 * Function:
254 */
255 void
256 ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info)
257 {
258 cache_info_t *cpuid_cache_info;
259
260 cpuid_cache_info = cache_info();
261 ml_cpu_info->vector_unit = 0;
262 ml_cpu_info->cache_line_size = cpuid_cache_info->c_linesz;
263 ml_cpu_info->l1_icache_size = cpuid_cache_info->c_isize;
264 ml_cpu_info->l1_dcache_size = cpuid_cache_info->c_dsize;
265
266 #if (__ARM_ARCH__ >= 7)
267 ml_cpu_info->l2_settings = 1;
268 ml_cpu_info->l2_cache_size = cpuid_cache_info->c_l2size;
269 #else
270 ml_cpu_info->l2_settings = 0;
271 ml_cpu_info->l2_cache_size = 0xFFFFFFFF;
272 #endif
273 ml_cpu_info->l3_settings = 0;
274 ml_cpu_info->l3_cache_size = 0xFFFFFFFF;
275 }
276
277 unsigned int
278 ml_get_machine_mem(void)
279 {
280 return (machine_info.memory_size);
281 }
282
283 /* Return max offset */
284 vm_map_offset_t
285 ml_get_max_offset(
286 boolean_t is64,
287 unsigned int option)
288 {
289 unsigned int pmap_max_offset_option = 0;
290
291 switch (option) {
292 case MACHINE_MAX_OFFSET_DEFAULT:
293 pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEFAULT;
294 break;
295 case MACHINE_MAX_OFFSET_MIN:
296 pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_MIN;
297 break;
298 case MACHINE_MAX_OFFSET_MAX:
299 pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_MAX;
300 break;
301 case MACHINE_MAX_OFFSET_DEVICE:
302 pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEVICE;
303 break;
304 default:
305 panic("ml_get_max_offset(): Illegal option 0x%x\n", option);
306 break;
307 }
308 return pmap_max_offset(is64, pmap_max_offset_option);
309 }
310
311 boolean_t
312 ml_wants_panic_trap_to_debugger(void)
313 {
314 return FALSE;
315 }
316
317 void
318 ml_panic_trap_to_debugger(__unused const char *panic_format_str,
319 __unused va_list *panic_args,
320 __unused unsigned int reason,
321 __unused void *ctx,
322 __unused uint64_t panic_options_mask,
323 __unused unsigned long panic_caller)
324 {
325 return;
326 }
327
328 __attribute__((noreturn))
329 void
330 halt_all_cpus(boolean_t reboot)
331 {
332 if (reboot) {
333 printf("MACH Reboot\n");
334 PEHaltRestart(kPERestartCPU);
335 } else {
336 printf("CPU halted\n");
337 PEHaltRestart(kPEHaltCPU);
338 }
339 while (1);
340 }
341
342 __attribute__((noreturn))
343 void
344 halt_cpu(void)
345 {
346 halt_all_cpus(FALSE);
347 }
348
349 /*
350 * Routine: machine_signal_idle
351 * Function:
352 */
353 void
354 machine_signal_idle(
355 processor_t processor)
356 {
357 cpu_signal(processor_to_cpu_datap(processor), SIGPnop, (void *)NULL, (void *)NULL);
358 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
359 }
360
361 void
362 machine_signal_idle_deferred(
363 processor_t processor)
364 {
365 cpu_signal_deferred(processor_to_cpu_datap(processor));
366 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_DEFERRED_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
367 }
368
369 void
370 machine_signal_idle_cancel(
371 processor_t processor)
372 {
373 cpu_signal_cancel(processor_to_cpu_datap(processor));
374 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_CANCEL_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0);
375 }
376
377 /*
378 * Routine: ml_install_interrupt_handler
379 * Function: Initialize Interrupt Handler
380 */
381 void
382 ml_install_interrupt_handler(
383 void *nub,
384 int source,
385 void *target,
386 IOInterruptHandler handler,
387 void *refCon)
388 {
389 cpu_data_t *cpu_data_ptr;
390 boolean_t current_state;
391
392 current_state = ml_set_interrupts_enabled(FALSE);
393 cpu_data_ptr = getCpuDatap();
394
395 cpu_data_ptr->interrupt_nub = nub;
396 cpu_data_ptr->interrupt_source = source;
397 cpu_data_ptr->interrupt_target = target;
398 cpu_data_ptr->interrupt_handler = handler;
399 cpu_data_ptr->interrupt_refCon = refCon;
400
401 cpu_data_ptr->interrupts_enabled = TRUE;
402 (void) ml_set_interrupts_enabled(current_state);
403
404 initialize_screen(NULL, kPEAcquireScreen);
405 }
406
407 /*
408 * Routine: ml_init_interrupt
409 * Function: Initialize Interrupts
410 */
411 void
412 ml_init_interrupt(void)
413 {
414 }
415
416 /*
417 * Routine: ml_init_timebase
418 * Function: register and setup Timebase, Decremeter services
419 */
420 void ml_init_timebase(
421 void *args,
422 tbd_ops_t tbd_funcs,
423 vm_offset_t int_address,
424 vm_offset_t int_value)
425 {
426 cpu_data_t *cpu_data_ptr;
427
428 cpu_data_ptr = (cpu_data_t *)args;
429
430 if ((cpu_data_ptr == &BootCpuData)
431 && (rtclock_timebase_func.tbd_fiq_handler == (void *)NULL)) {
432 rtclock_timebase_func = *tbd_funcs;
433 rtclock_timebase_addr = int_address;
434 rtclock_timebase_val = int_value;
435 }
436 }
437
438 void
439 ml_parse_cpu_topology(void)
440 {
441 DTEntry entry, child;
442 OpaqueDTEntryIterator iter;
443 uint32_t cpu_boot_arg;
444 int err;
445
446 err = DTLookupEntry(NULL, "/cpus", &entry);
447 assert(err == kSuccess);
448
449 err = DTInitEntryIterator(entry, &iter);
450 assert(err == kSuccess);
451
452 while (kSuccess == DTIterateEntries(&iter, &child)) {
453
454 #if MACH_ASSERT
455 unsigned int propSize;
456 void *prop = NULL;
457 if (avail_cpus == 0) {
458 if (kSuccess != DTGetProperty(child, "state", &prop, &propSize))
459 panic("unable to retrieve state for cpu %u", avail_cpus);
460
461 if (strncmp((char*)prop, "running", propSize) != 0)
462 panic("cpu 0 has not been marked as running!");
463 }
464 assert(kSuccess == DTGetProperty(child, "reg", &prop, &propSize));
465 assert(avail_cpus == *((uint32_t*)prop));
466 #endif
467 ++avail_cpus;
468 }
469
470 cpu_boot_arg = avail_cpus;
471 if (PE_parse_boot_argn("cpus", &cpu_boot_arg, sizeof(cpu_boot_arg)) &&
472 (avail_cpus > cpu_boot_arg))
473 avail_cpus = cpu_boot_arg;
474
475 if (avail_cpus == 0)
476 panic("No cpus found!");
477 }
478
479 unsigned int
480 ml_get_cpu_count(void)
481 {
482 return avail_cpus;
483 }
484
485 int
486 ml_get_boot_cpu_number(void)
487 {
488 return 0;
489 }
490
491 cluster_type_t
492 ml_get_boot_cluster(void)
493 {
494 return CLUSTER_TYPE_SMP;
495 }
496
497 int
498 ml_get_cpu_number(uint32_t phys_id)
499 {
500 return (int)phys_id;
501 }
502
503 int
504 ml_get_max_cpu_number(void)
505 {
506 return avail_cpus - 1;
507 }
508
509 kern_return_t
510 ml_processor_register(
511 ml_processor_info_t * in_processor_info,
512 processor_t * processor_out,
513 ipi_handler_t * ipi_handler)
514 {
515 cpu_data_t *this_cpu_datap;
516 boolean_t is_boot_cpu;
517
518 if (in_processor_info->phys_id >= MAX_CPUS) {
519 /*
520 * The physical CPU ID indicates that we have more CPUs than
521 * this xnu build support. This probably means we have an
522 * incorrect board configuration.
523 *
524 * TODO: Should this just return a failure instead? A panic
525 * is simply a convenient way to catch bugs in the pexpert
526 * headers.
527 */
528 panic("phys_id %u is too large for MAX_CPUS (%u)", in_processor_info->phys_id, MAX_CPUS);
529 }
530
531 /* Fail the registration if the number of CPUs has been limited by boot-arg. */
532 if ((in_processor_info->phys_id >= avail_cpus) ||
533 (in_processor_info->log_id > (uint32_t)ml_get_max_cpu_number()))
534 return KERN_FAILURE;
535
536 if (in_processor_info->log_id != (uint32_t)ml_get_boot_cpu_number()) {
537 is_boot_cpu = FALSE;
538 this_cpu_datap = cpu_data_alloc(FALSE);
539 cpu_data_init(this_cpu_datap);
540 } else {
541 this_cpu_datap = &BootCpuData;
542 is_boot_cpu = TRUE;
543 }
544
545 this_cpu_datap->cpu_id = in_processor_info->cpu_id;
546
547 this_cpu_datap->cpu_chud = chudxnu_cpu_alloc(is_boot_cpu);
548 if (this_cpu_datap->cpu_chud == (void *)NULL)
549 goto processor_register_error;
550 this_cpu_datap->cpu_console_buf = console_cpu_alloc(is_boot_cpu);
551 if (this_cpu_datap->cpu_console_buf == (void *)(NULL))
552 goto processor_register_error;
553
554 if (!is_boot_cpu) {
555 if (cpu_data_register(this_cpu_datap) != KERN_SUCCESS)
556 goto processor_register_error;
557 }
558
559 this_cpu_datap->cpu_idle_notify = (void *) in_processor_info->processor_idle;
560 this_cpu_datap->cpu_cache_dispatch = in_processor_info->platform_cache_dispatch;
561 nanoseconds_to_absolutetime((uint64_t) in_processor_info->powergate_latency, &this_cpu_datap->cpu_idle_latency);
562 this_cpu_datap->cpu_reset_assist = kvtophys(in_processor_info->powergate_stub_addr);
563
564 this_cpu_datap->idle_timer_notify = (void *) in_processor_info->idle_timer;
565 this_cpu_datap->idle_timer_refcon = in_processor_info->idle_timer_refcon;
566
567 this_cpu_datap->platform_error_handler = (void *) in_processor_info->platform_error_handler;
568 this_cpu_datap->cpu_regmap_paddr = in_processor_info->regmap_paddr;
569 this_cpu_datap->cpu_phys_id = in_processor_info->phys_id;
570 this_cpu_datap->cpu_l2_access_penalty = in_processor_info->l2_access_penalty;
571
572 if (!is_boot_cpu) {
573 processor_init((struct processor *)this_cpu_datap->cpu_processor,
574 this_cpu_datap->cpu_number, processor_pset(master_processor));
575
576 if (this_cpu_datap->cpu_l2_access_penalty) {
577 /*
578 * Cores that have a non-zero L2 access penalty compared
579 * to the boot processor should be de-prioritized by the
580 * scheduler, so that threads use the cores with better L2
581 * preferentially.
582 */
583 processor_set_primary(this_cpu_datap->cpu_processor,
584 master_processor);
585 }
586 }
587
588 *processor_out = this_cpu_datap->cpu_processor;
589 *ipi_handler = cpu_signal_handler;
590 if (in_processor_info->idle_tickle != (idle_tickle_t *) NULL)
591 *in_processor_info->idle_tickle = (idle_tickle_t) cpu_idle_tickle;
592
593 #if KPC
594 if (kpc_register_cpu(this_cpu_datap) != TRUE)
595 goto processor_register_error;
596 #endif
597
598 if (!is_boot_cpu)
599 prng_cpu_init(this_cpu_datap->cpu_number);
600
601 return KERN_SUCCESS;
602
603 processor_register_error:
604 #if KPC
605 kpc_unregister_cpu(this_cpu_datap);
606 #endif
607 if (this_cpu_datap->cpu_chud != (void *)NULL)
608 chudxnu_cpu_free(this_cpu_datap->cpu_chud);
609 if (!is_boot_cpu)
610 cpu_data_free(this_cpu_datap);
611 return KERN_FAILURE;
612 }
613
614 void
615 ml_init_arm_debug_interface(
616 void * in_cpu_datap,
617 vm_offset_t virt_address)
618 {
619 ((cpu_data_t *)in_cpu_datap)->cpu_debug_interface_map = virt_address;
620 do_debugid();
621 }
622
623 /*
624 * Routine: init_ast_check
625 * Function:
626 */
627 void
628 init_ast_check(
629 __unused processor_t processor)
630 {
631 }
632
633 /*
634 * Routine: cause_ast_check
635 * Function:
636 */
637 void
638 cause_ast_check(
639 processor_t processor)
640 {
641 if (current_processor() != processor) {
642 cpu_signal(processor_to_cpu_datap(processor), SIGPast, (void *)NULL, (void *)NULL);
643 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 1 /* ast */, 0, 0, 0);
644 }
645 }
646
647
648 /*
649 * Routine: ml_at_interrupt_context
650 * Function: Check if running at interrupt context
651 */
652 boolean_t
653 ml_at_interrupt_context(void)
654 {
655 boolean_t at_interrupt_context = FALSE;
656
657 disable_preemption();
658 at_interrupt_context = (getCpuDatap()->cpu_int_state != NULL);
659 enable_preemption();
660
661 return at_interrupt_context;
662 }
663
664 extern uint32_t cpu_idle_count;
665
666 void ml_get_power_state(boolean_t *icp, boolean_t *pidlep) {
667 *icp = ml_at_interrupt_context();
668 *pidlep = (cpu_idle_count == real_ncpus);
669 }
670
671 /*
672 * Routine: ml_cause_interrupt
673 * Function: Generate a fake interrupt
674 */
675 void
676 ml_cause_interrupt(void)
677 {
678 return; /* BS_XXX */
679 }
680
681 /* Map memory map IO space */
682 vm_offset_t
683 ml_io_map(
684 vm_offset_t phys_addr,
685 vm_size_t size)
686 {
687 return (io_map(phys_addr, size, VM_WIMG_IO));
688 }
689
690 vm_offset_t
691 ml_io_map_wcomb(
692 vm_offset_t phys_addr,
693 vm_size_t size)
694 {
695 return (io_map(phys_addr, size, VM_WIMG_WCOMB));
696 }
697
698 /* boot memory allocation */
699 vm_offset_t
700 ml_static_malloc(
701 __unused vm_size_t size)
702 {
703 return ((vm_offset_t) NULL);
704 }
705
706 vm_map_address_t
707 ml_map_high_window(
708 vm_offset_t phys_addr,
709 vm_size_t len)
710 {
711 return pmap_map_high_window_bd(phys_addr, len, VM_PROT_READ | VM_PROT_WRITE);
712 }
713
714 vm_offset_t
715 ml_static_ptovirt(
716 vm_offset_t paddr)
717 {
718 return phystokv(paddr);
719 }
720
721 vm_offset_t
722 ml_static_vtop(
723 vm_offset_t vaddr)
724 {
725 if (((vm_address_t)(vaddr) - gVirtBase) >= gPhysSize)
726 panic("ml_static_ptovirt(): illegal vaddr: %p\n", (void*)vaddr);
727 return ((vm_address_t)(vaddr) - gVirtBase + gPhysBase);
728 }
729
730
731 kern_return_t
732 ml_static_protect(
733 vm_offset_t vaddr, /* kernel virtual address */
734 vm_size_t size,
735 vm_prot_t new_prot)
736 {
737 pt_entry_t arm_prot = 0;
738 pt_entry_t arm_block_prot = 0;
739 vm_offset_t vaddr_cur;
740 ppnum_t ppn;
741 kern_return_t result = KERN_SUCCESS;
742
743 if (vaddr < VM_MIN_KERNEL_ADDRESS)
744 return KERN_FAILURE;
745
746 assert((vaddr & (ARM_PGBYTES - 1)) == 0); /* must be page aligned */
747
748 if ((new_prot & VM_PROT_WRITE) && (new_prot & VM_PROT_EXECUTE)) {
749 panic("ml_static_protect(): WX request on %p", (void *) vaddr);
750 }
751
752 /* Set up the protection bits, and block bits so we can validate block mappings. */
753 if (new_prot & VM_PROT_WRITE) {
754 arm_prot |= ARM_PTE_AP(AP_RWNA);
755 arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RWNA);
756 } else {
757 arm_prot |= ARM_PTE_AP(AP_RONA);
758 arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RONA);
759 }
760
761 if (!(new_prot & VM_PROT_EXECUTE)) {
762 arm_prot |= ARM_PTE_NX;
763 arm_block_prot |= ARM_TTE_BLOCK_NX;
764 }
765
766 for (vaddr_cur = vaddr;
767 vaddr_cur < ((vaddr + size) & ~ARM_PGMASK);
768 vaddr_cur += ARM_PGBYTES) {
769 ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
770 if (ppn != (vm_offset_t) NULL) {
771 tt_entry_t *ttp = &kernel_pmap->tte[ttenum(vaddr_cur)];
772 tt_entry_t tte = *ttp;
773
774 if ((tte & ARM_TTE_TYPE_MASK) != ARM_TTE_TYPE_TABLE) {
775 if (((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) &&
776 ((tte & (ARM_TTE_BLOCK_APMASK | ARM_TTE_BLOCK_NX_MASK)) == arm_block_prot)) {
777 /*
778 * We can support ml_static_protect on a block mapping if the mapping already has
779 * the desired protections. We still want to run checks on a per-page basis.
780 */
781 continue;
782 }
783
784 result = KERN_FAILURE;
785 break;
786 }
787
788 pt_entry_t *pte_p = (pt_entry_t *) ttetokv(tte) + ptenum(vaddr_cur);
789 pt_entry_t ptmp = *pte_p;
790
791 ptmp = (ptmp & ~(ARM_PTE_APMASK | ARM_PTE_NX_MASK)) | arm_prot;
792 *pte_p = ptmp;
793 #ifndef __ARM_L1_PTW__
794 FlushPoC_DcacheRegion((vm_offset_t) pte_p, sizeof(*pte_p));
795 #endif
796 }
797 }
798
799 if (vaddr_cur > vaddr)
800 flush_mmu_tlb_region(vaddr, (vm_size_t)(vaddr_cur - vaddr));
801
802 return result;
803 }
804
805 /*
806 * Routine: ml_static_mfree
807 * Function:
808 */
809 void
810 ml_static_mfree(
811 vm_offset_t vaddr,
812 vm_size_t size)
813 {
814 vm_offset_t vaddr_cur;
815 ppnum_t ppn;
816 uint32_t freed_pages = 0;
817
818 /* It is acceptable (if bad) to fail to free. */
819 if (vaddr < VM_MIN_KERNEL_ADDRESS)
820 return;
821
822 assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */
823
824 for (vaddr_cur = vaddr;
825 vaddr_cur < trunc_page_32(vaddr + size);
826 vaddr_cur += PAGE_SIZE) {
827 ppn = pmap_find_phys(kernel_pmap, vaddr_cur);
828 if (ppn != (vm_offset_t) NULL) {
829 /*
830 * It is not acceptable to fail to update the protections on a page
831 * we will release to the VM. We need to either panic or continue.
832 * For now, we'll panic (to help flag if there is memory we can
833 * reclaim).
834 */
835 if (ml_static_protect(vaddr_cur, PAGE_SIZE, VM_PROT_WRITE | VM_PROT_READ) != KERN_SUCCESS) {
836 panic("Failed ml_static_mfree on %p", (void *) vaddr_cur);
837 }
838 #if 0
839 /*
840 * Must NOT tear down the "V==P" mapping for vaddr_cur as the zone alias scheme
841 * relies on the persistence of these mappings for all time.
842 */
843 // pmap_remove(kernel_pmap, (addr64_t) vaddr_cur, (addr64_t) (vaddr_cur + PAGE_SIZE));
844 #endif
845 vm_page_create(ppn, (ppn + 1));
846 freed_pages++;
847 }
848 }
849 vm_page_lockspin_queues();
850 vm_page_wire_count -= freed_pages;
851 vm_page_wire_count_initial -= freed_pages;
852 vm_page_unlock_queues();
853 #if DEBUG
854 kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn);
855 #endif
856 }
857
858
859 /* virtual to physical on wired pages */
860 vm_offset_t
861 ml_vtophys(vm_offset_t vaddr)
862 {
863 return kvtophys(vaddr);
864 }
865
866 /*
867 * Routine: ml_nofault_copy
868 * Function: Perform a physical mode copy if the source and destination have
869 * valid translations in the kernel pmap. If translations are present, they are
870 * assumed to be wired; e.g., no attempt is made to guarantee that the
871 * translations obtained remain valid for the duration of the copy process.
872 */
873 vm_size_t
874 ml_nofault_copy(vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size)
875 {
876 addr64_t cur_phys_dst, cur_phys_src;
877 uint32_t count, nbytes = 0;
878
879 while (size > 0) {
880 if (!(cur_phys_src = kvtophys(virtsrc)))
881 break;
882 if (!(cur_phys_dst = kvtophys(virtdst)))
883 break;
884 if (!pmap_valid_address(trunc_page_64(cur_phys_dst)) ||
885 !pmap_valid_address(trunc_page_64(cur_phys_src)))
886 break;
887 count = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
888 if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK)))
889 count = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
890 if (count > size)
891 count = size;
892
893 bcopy_phys(cur_phys_src, cur_phys_dst, count);
894
895 nbytes += count;
896 virtsrc += count;
897 virtdst += count;
898 size -= count;
899 }
900
901 return nbytes;
902 }
903
904 /*
905 * Routine: ml_validate_nofault
906 * Function: Validate that ths address range has a valid translations
907 * in the kernel pmap. If translations are present, they are
908 * assumed to be wired; i.e. no attempt is made to guarantee
909 * that the translation persist after the check.
910 * Returns: TRUE if the range is mapped and will not cause a fault,
911 * FALSE otherwise.
912 */
913
914 boolean_t ml_validate_nofault(
915 vm_offset_t virtsrc, vm_size_t size)
916 {
917 addr64_t cur_phys_src;
918 uint32_t count;
919
920 while (size > 0) {
921 if (!(cur_phys_src = kvtophys(virtsrc)))
922 return FALSE;
923 if (!pmap_valid_address(trunc_page_64(cur_phys_src)))
924 return FALSE;
925 count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK));
926 if (count > size)
927 count = (uint32_t)size;
928
929 virtsrc += count;
930 size -= count;
931 }
932
933 return TRUE;
934 }
935
936 void
937 ml_get_bouncepool_info(vm_offset_t * phys_addr, vm_size_t * size)
938 {
939 *phys_addr = 0;
940 *size = 0;
941 }
942
943 /*
944 * Stubs for CPU Stepper
945 */
946 void
947 active_rt_threads(__unused boolean_t active)
948 {
949 }
950
951 void
952 thread_tell_urgency(__unused int urgency,
953 __unused uint64_t rt_period,
954 __unused uint64_t rt_deadline,
955 __unused uint64_t sched_latency,
956 __unused thread_t nthread)
957 {
958 }
959
960 void
961 machine_run_count(__unused uint32_t count)
962 {
963 }
964
965 processor_t
966 machine_choose_processor(__unused processor_set_t pset, processor_t processor)
967 {
968 return (processor);
969 }
970
971 vm_offset_t
972 ml_stack_remaining(void)
973 {
974 uintptr_t local = (uintptr_t) &local;
975 vm_offset_t intstack_top_ptr;
976
977 intstack_top_ptr = getCpuDatap()->intstack_top;
978 if ((local < intstack_top_ptr) && (local > intstack_top_ptr - INTSTACK_SIZE)) {
979 return (local - (getCpuDatap()->intstack_top - INTSTACK_SIZE));
980 } else {
981 return (local - current_thread()->kernel_stack);
982 }
983 }
984
985 boolean_t machine_timeout_suspended(void) {
986 return FALSE;
987 }
988
989 kern_return_t
990 ml_interrupt_prewarm(__unused uint64_t deadline)
991 {
992 return KERN_FAILURE;
993 }
994
995 uint64_t
996 ml_get_hwclock(void)
997 {
998 uint64_t high_first = 0;
999 uint64_t high_second = 0;
1000 uint64_t low = 0;
1001
1002 __builtin_arm_isb(ISB_SY);
1003
1004 do {
1005 high_first = __builtin_arm_mrrc(15, 0, 14) >> 32;
1006 low = __builtin_arm_mrrc(15, 0, 14) & 0xFFFFFFFFULL;
1007 high_second = __builtin_arm_mrrc(15, 0, 14) >> 32;
1008 } while (high_first != high_second);
1009
1010 return (high_first << 32) | (low);
1011 }
1012
1013 boolean_t
1014 ml_delay_should_spin(uint64_t interval)
1015 {
1016 cpu_data_t *cdp = getCpuDatap();
1017
1018 if (cdp->cpu_idle_latency) {
1019 return (interval < cdp->cpu_idle_latency) ? TRUE : FALSE;
1020 } else {
1021 /*
1022 * Early boot, latency is unknown. Err on the side of blocking,
1023 * which should always be safe, even if slow
1024 */
1025 return FALSE;
1026 }
1027 }
1028
1029 boolean_t ml_thread_is64bit(thread_t thread)
1030 {
1031 return (thread_is_64bit(thread));
1032 }
1033
1034 void ml_timer_evaluate(void) {
1035 }
1036
1037 boolean_t
1038 ml_timer_forced_evaluation(void) {
1039 return FALSE;
1040 }
1041
1042 uint64_t
1043 ml_energy_stat(__unused thread_t t) {
1044 return 0;
1045 }
1046
1047
1048 void
1049 ml_gpu_stat_update(__unused uint64_t gpu_ns_delta) {
1050 #if CONFIG_EMBEDDED
1051 /*
1052 * For now: update the resource coalition stats of the
1053 * current thread's coalition
1054 */
1055 task_coalition_update_gpu_stats(current_task(), gpu_ns_delta);
1056 #endif
1057 }
1058
1059 uint64_t
1060 ml_gpu_stat(__unused thread_t t) {
1061 return 0;
1062 }
1063
1064 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1065 static void
1066 timer_state_event(boolean_t switch_to_kernel)
1067 {
1068 thread_t thread = current_thread();
1069 if (!thread->precise_user_kernel_time) return;
1070
1071 processor_data_t *pd = &getCpuDatap()->cpu_processor->processor_data;
1072 uint64_t now = ml_get_timebase();
1073
1074 timer_stop(pd->current_state, now);
1075 pd->current_state = (switch_to_kernel) ? &pd->system_state : &pd->user_state;
1076 timer_start(pd->current_state, now);
1077
1078 timer_stop(pd->thread_timer, now);
1079 pd->thread_timer = (switch_to_kernel) ? &thread->system_timer : &thread->user_timer;
1080 timer_start(pd->thread_timer, now);
1081 }
1082
1083 void
1084 timer_state_event_user_to_kernel(void)
1085 {
1086 timer_state_event(TRUE);
1087 }
1088
1089 void
1090 timer_state_event_kernel_to_user(void)
1091 {
1092 timer_state_event(FALSE);
1093 }
1094 #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
1095
1096 boolean_t
1097 user_cont_hwclock_allowed(void)
1098 {
1099 return FALSE;
1100 }
1101
1102 boolean_t
1103 user_timebase_allowed(void)
1104 {
1105 #if __ARM_TIME__
1106 return TRUE;
1107 #else
1108 return FALSE;
1109 #endif
1110 }
1111
1112 /*
1113 * The following are required for parts of the kernel
1114 * that cannot resolve these functions as inlines:
1115 */
1116 extern thread_t current_act(void);
1117 thread_t
1118 current_act(void)
1119 {
1120 return current_thread_fast();
1121 }
1122
1123 #undef current_thread
1124 extern thread_t current_thread(void);
1125 thread_t
1126 current_thread(void)
1127 {
1128 return current_thread_fast();
1129 }
1130
1131 #if __ARM_USER_PROTECT__
1132 uintptr_t
1133 arm_user_protect_begin(thread_t thread)
1134 {
1135 uintptr_t ttbr0, asid = 0; // kernel asid
1136
1137 ttbr0 = __builtin_arm_mrc(15,0,2,0,0); // Get TTBR0
1138 if (ttbr0 != thread->machine.kptw_ttb) {
1139 __builtin_arm_mcr(15,0,thread->machine.kptw_ttb,2,0,0); // Set TTBR0
1140 __builtin_arm_mcr(15,0,asid,13,0,1); // Set CONTEXTIDR
1141 __builtin_arm_isb(ISB_SY);
1142 }
1143 return ttbr0;
1144 }
1145
1146 void
1147 arm_user_protect_end(thread_t thread, uintptr_t ttbr0, boolean_t disable_interrupts)
1148 {
1149 if ((ttbr0 != thread->machine.kptw_ttb) && (thread->machine.uptw_ttb != thread->machine.kptw_ttb)) {
1150 if (disable_interrupts)
1151 __asm__ volatile ("cpsid if" ::: "memory"); // Disable FIQ/IRQ
1152 __builtin_arm_mcr(15,0,thread->machine.uptw_ttb,2,0,0); // Set TTBR0
1153 __builtin_arm_mcr(15,0,thread->machine.asid,13,0,1); // Set CONTEXTIDR with thread asid
1154 __builtin_arm_dsb(DSB_ISH);
1155 __builtin_arm_isb(ISB_SY);
1156 }
1157 }
1158 #endif // __ARM_USER_PROTECT__
1159
1160 void ml_task_set_rop_pid(__unused task_t task, __unused task_t parent_task, __unused boolean_t inherit)
1161 {
1162 return;
1163 }