]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2007-2016 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <arm/proc_reg.h> | |
30 | #include <arm/machine_cpu.h> | |
31 | #include <arm/cpu_internal.h> | |
32 | #include <arm/cpuid.h> | |
33 | #include <arm/io_map_entries.h> | |
34 | #include <arm/cpu_data.h> | |
35 | #include <arm/cpu_data_internal.h> | |
36 | #include <arm/misc_protos.h> | |
37 | #include <arm/rtclock.h> | |
38 | #include <arm/caches_internal.h> | |
39 | #include <console/serial_protos.h> | |
40 | #include <kern/machine.h> | |
41 | #include <prng/random.h> | |
42 | #include <kern/startup.h> | |
43 | #include <kern/sched.h> | |
44 | #include <kern/thread.h> | |
45 | #include <mach/machine.h> | |
46 | #include <machine/atomic.h> | |
47 | #include <vm/pmap.h> | |
48 | #include <vm/vm_page.h> | |
49 | #include <sys/kdebug.h> | |
50 | #include <kern/coalition.h> | |
51 | #include <pexpert/device_tree.h> | |
0a7de745 | 52 | #include <arm/cpuid_internal.h> |
cb323159 | 53 | #include <arm/cpu_capabilities.h> |
5ba3f43e A |
54 | |
55 | #include <IOKit/IOPlatformExpert.h> | |
5ba3f43e A |
56 | |
57 | #if KPC | |
58 | #include <kern/kpc.h> | |
59 | #endif | |
60 | ||
61 | static int max_cpus_initialized = 0; | |
62 | #define MAX_CPUS_SET 0x1 | |
63 | #define MAX_CPUS_WAIT 0x2 | |
64 | ||
65 | static unsigned int avail_cpus = 0; | |
66 | ||
67 | uint32_t LockTimeOut; | |
68 | uint32_t LockTimeOutUsec; | |
0a7de745 | 69 | uint64_t TLockTimeOut; |
5ba3f43e A |
70 | uint64_t MutexSpin; |
71 | boolean_t is_clock_configured = FALSE; | |
72 | ||
cb323159 | 73 | #if CONFIG_NONFATAL_ASSERTS |
5ba3f43e | 74 | extern int mach_assert; |
cb323159 | 75 | #endif |
5ba3f43e | 76 | extern volatile uint32_t debug_enabled; |
5ba3f43e A |
77 | |
78 | void machine_conf(void); | |
79 | ||
80 | void | |
81 | machine_startup(__unused boot_args * args) | |
82 | { | |
83 | int boot_arg; | |
84 | ||
cb323159 | 85 | #if CONFIG_NONFATAL_ASSERTS |
0a7de745 | 86 | PE_parse_boot_argn("assert", &mach_assert, sizeof(mach_assert)); |
cb323159 | 87 | #endif |
5ba3f43e | 88 | |
0a7de745 | 89 | if (PE_parse_boot_argn("preempt", &boot_arg, sizeof(boot_arg))) { |
5ba3f43e A |
90 | default_preemption_rate = boot_arg; |
91 | } | |
0a7de745 | 92 | if (PE_parse_boot_argn("bg_preempt", &boot_arg, sizeof(boot_arg))) { |
5ba3f43e A |
93 | default_bg_preemption_rate = boot_arg; |
94 | } | |
95 | ||
96 | machine_conf(); | |
97 | ||
98 | /* | |
99 | * Kick off the kernel bootstrap. | |
100 | */ | |
101 | kernel_bootstrap(); | |
102 | /* NOTREACHED */ | |
103 | } | |
104 | ||
105 | char * | |
106 | machine_boot_info( | |
0a7de745 A |
107 | __unused char *buf, |
108 | __unused vm_size_t size) | |
5ba3f43e | 109 | { |
0a7de745 | 110 | return PE_boot_args(); |
5ba3f43e A |
111 | } |
112 | ||
113 | void | |
114 | machine_conf(void) | |
115 | { | |
116 | machine_info.memory_size = mem_size; | |
117 | } | |
118 | ||
119 | void | |
120 | machine_init(void) | |
121 | { | |
122 | debug_log_init(); | |
123 | clock_config(); | |
124 | is_clock_configured = TRUE; | |
0a7de745 | 125 | if (debug_enabled) { |
5ba3f43e | 126 | pmap_map_globals(); |
0a7de745 | 127 | } |
5ba3f43e A |
128 | } |
129 | ||
0a7de745 | 130 | void |
5ba3f43e A |
131 | slave_machine_init(__unused void *param) |
132 | { | |
0a7de745 A |
133 | cpu_machine_init(); /* Initialize the processor */ |
134 | clock_init(); /* Init the clock */ | |
5ba3f43e A |
135 | } |
136 | ||
137 | /* | |
138 | * Routine: machine_processor_shutdown | |
139 | * Function: | |
140 | */ | |
141 | thread_t | |
142 | machine_processor_shutdown( | |
0a7de745 A |
143 | __unused thread_t thread, |
144 | void (*doshutdown)(processor_t), | |
145 | processor_t processor) | |
5ba3f43e | 146 | { |
0a7de745 | 147 | return Shutdown_context(doshutdown, processor); |
5ba3f43e A |
148 | } |
149 | ||
150 | /* | |
151 | * Routine: ml_init_max_cpus | |
152 | * Function: | |
153 | */ | |
154 | void | |
155 | ml_init_max_cpus(unsigned int max_cpus) | |
156 | { | |
157 | boolean_t current_state; | |
158 | ||
159 | current_state = ml_set_interrupts_enabled(FALSE); | |
160 | if (max_cpus_initialized != MAX_CPUS_SET) { | |
161 | machine_info.max_cpus = max_cpus; | |
162 | machine_info.physical_cpu_max = max_cpus; | |
163 | machine_info.logical_cpu_max = max_cpus; | |
0a7de745 A |
164 | if (max_cpus_initialized == MAX_CPUS_WAIT) { |
165 | thread_wakeup((event_t) &max_cpus_initialized); | |
166 | } | |
5ba3f43e A |
167 | max_cpus_initialized = MAX_CPUS_SET; |
168 | } | |
169 | (void) ml_set_interrupts_enabled(current_state); | |
170 | } | |
171 | ||
172 | /* | |
173 | * Routine: ml_get_max_cpus | |
174 | * Function: | |
175 | */ | |
176 | unsigned int | |
177 | ml_get_max_cpus(void) | |
178 | { | |
179 | boolean_t current_state; | |
180 | ||
181 | current_state = ml_set_interrupts_enabled(FALSE); | |
182 | if (max_cpus_initialized != MAX_CPUS_SET) { | |
183 | max_cpus_initialized = MAX_CPUS_WAIT; | |
0a7de745 | 184 | assert_wait((event_t) &max_cpus_initialized, THREAD_UNINT); |
5ba3f43e A |
185 | (void) thread_block(THREAD_CONTINUE_NULL); |
186 | } | |
187 | (void) ml_set_interrupts_enabled(current_state); | |
0a7de745 | 188 | return machine_info.max_cpus; |
5ba3f43e A |
189 | } |
190 | ||
191 | /* | |
192 | * Routine: ml_init_lock_timeout | |
193 | * Function: | |
194 | */ | |
195 | void | |
196 | ml_init_lock_timeout(void) | |
197 | { | |
198 | uint64_t abstime; | |
199 | uint64_t mtxspin; | |
0a7de745 | 200 | uint64_t default_timeout_ns = NSEC_PER_SEC >> 2; |
5ba3f43e A |
201 | uint32_t slto; |
202 | ||
0a7de745 | 203 | if (PE_parse_boot_argn("slto_us", &slto, sizeof(slto))) { |
5ba3f43e | 204 | default_timeout_ns = slto * NSEC_PER_USEC; |
0a7de745 | 205 | } |
5ba3f43e A |
206 | |
207 | nanoseconds_to_absolutetime(default_timeout_ns, &abstime); | |
0a7de745 | 208 | LockTimeOutUsec = (uint32_t)(default_timeout_ns / NSEC_PER_USEC); |
5ba3f43e | 209 | LockTimeOut = (uint32_t)abstime; |
0a7de745 | 210 | TLockTimeOut = LockTimeOut; |
5ba3f43e | 211 | |
0a7de745 A |
212 | if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof(mtxspin))) { |
213 | if (mtxspin > USEC_PER_SEC >> 4) { | |
214 | mtxspin = USEC_PER_SEC >> 4; | |
215 | } | |
216 | nanoseconds_to_absolutetime(mtxspin * NSEC_PER_USEC, &abstime); | |
5ba3f43e | 217 | } else { |
0a7de745 | 218 | nanoseconds_to_absolutetime(10 * NSEC_PER_USEC, &abstime); |
5ba3f43e A |
219 | } |
220 | MutexSpin = abstime; | |
221 | } | |
222 | ||
223 | /* | |
224 | * This is called from the machine-independent routine cpu_up() | |
225 | * to perform machine-dependent info updates. | |
226 | */ | |
227 | void | |
228 | ml_cpu_up(void) | |
229 | { | |
cb323159 A |
230 | os_atomic_inc(&machine_info.physical_cpu, relaxed); |
231 | os_atomic_inc(&machine_info.logical_cpu, relaxed); | |
5ba3f43e A |
232 | } |
233 | ||
234 | /* | |
235 | * This is called from the machine-independent routine cpu_down() | |
236 | * to perform machine-dependent info updates. | |
237 | */ | |
238 | void | |
239 | ml_cpu_down(void) | |
240 | { | |
0a7de745 | 241 | cpu_data_t *cpu_data_ptr; |
5ba3f43e | 242 | |
cb323159 A |
243 | os_atomic_dec(&machine_info.physical_cpu, relaxed); |
244 | os_atomic_dec(&machine_info.logical_cpu, relaxed); | |
0a7de745 | 245 | |
5ba3f43e A |
246 | /* |
247 | * If we want to deal with outstanding IPIs, we need to | |
248 | * do relatively early in the processor_doshutdown path, | |
249 | * as we pend decrementer interrupts using the IPI | |
250 | * mechanism if we cannot immediately service them (if | |
251 | * IRQ is masked). Do so now. | |
252 | * | |
253 | * We aren't on the interrupt stack here; would it make | |
254 | * more sense to disable signaling and then enable | |
255 | * interrupts? It might be a bit cleaner. | |
256 | */ | |
257 | cpu_data_ptr = getCpuDatap(); | |
258 | cpu_data_ptr->cpu_running = FALSE; | |
259 | ||
260 | cpu_signal_handler_internal(TRUE); | |
261 | } | |
262 | ||
263 | /* | |
264 | * Routine: ml_cpu_get_info | |
265 | * Function: | |
266 | */ | |
267 | void | |
268 | ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info) | |
269 | { | |
270 | cache_info_t *cpuid_cache_info; | |
271 | ||
272 | cpuid_cache_info = cache_info(); | |
273 | ml_cpu_info->vector_unit = 0; | |
274 | ml_cpu_info->cache_line_size = cpuid_cache_info->c_linesz; | |
275 | ml_cpu_info->l1_icache_size = cpuid_cache_info->c_isize; | |
276 | ml_cpu_info->l1_dcache_size = cpuid_cache_info->c_dsize; | |
277 | ||
278 | #if (__ARM_ARCH__ >= 7) | |
279 | ml_cpu_info->l2_settings = 1; | |
280 | ml_cpu_info->l2_cache_size = cpuid_cache_info->c_l2size; | |
281 | #else | |
282 | ml_cpu_info->l2_settings = 0; | |
283 | ml_cpu_info->l2_cache_size = 0xFFFFFFFF; | |
284 | #endif | |
285 | ml_cpu_info->l3_settings = 0; | |
286 | ml_cpu_info->l3_cache_size = 0xFFFFFFFF; | |
287 | } | |
288 | ||
289 | unsigned int | |
290 | ml_get_machine_mem(void) | |
291 | { | |
0a7de745 | 292 | return machine_info.memory_size; |
5ba3f43e A |
293 | } |
294 | ||
295 | /* Return max offset */ | |
296 | vm_map_offset_t | |
297 | ml_get_max_offset( | |
0a7de745 | 298 | boolean_t is64, |
5ba3f43e A |
299 | unsigned int option) |
300 | { | |
0a7de745 | 301 | unsigned int pmap_max_offset_option = 0; |
5ba3f43e A |
302 | |
303 | switch (option) { | |
304 | case MACHINE_MAX_OFFSET_DEFAULT: | |
305 | pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEFAULT; | |
0a7de745 A |
306 | break; |
307 | case MACHINE_MAX_OFFSET_MIN: | |
5ba3f43e | 308 | pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_MIN; |
0a7de745 A |
309 | break; |
310 | case MACHINE_MAX_OFFSET_MAX: | |
5ba3f43e | 311 | pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_MAX; |
0a7de745 A |
312 | break; |
313 | case MACHINE_MAX_OFFSET_DEVICE: | |
5ba3f43e | 314 | pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEVICE; |
0a7de745 A |
315 | break; |
316 | default: | |
5ba3f43e | 317 | panic("ml_get_max_offset(): Illegal option 0x%x\n", option); |
0a7de745 A |
318 | break; |
319 | } | |
5ba3f43e A |
320 | return pmap_max_offset(is64, pmap_max_offset_option); |
321 | } | |
322 | ||
323 | boolean_t | |
324 | ml_wants_panic_trap_to_debugger(void) | |
325 | { | |
326 | return FALSE; | |
327 | } | |
328 | ||
329 | void | |
330 | ml_panic_trap_to_debugger(__unused const char *panic_format_str, | |
0a7de745 A |
331 | __unused va_list *panic_args, |
332 | __unused unsigned int reason, | |
333 | __unused void *ctx, | |
334 | __unused uint64_t panic_options_mask, | |
335 | __unused unsigned long panic_caller) | |
5ba3f43e A |
336 | { |
337 | return; | |
338 | } | |
339 | ||
340 | __attribute__((noreturn)) | |
341 | void | |
342 | halt_all_cpus(boolean_t reboot) | |
343 | { | |
344 | if (reboot) { | |
345 | printf("MACH Reboot\n"); | |
346 | PEHaltRestart(kPERestartCPU); | |
347 | } else { | |
348 | printf("CPU halted\n"); | |
349 | PEHaltRestart(kPEHaltCPU); | |
350 | } | |
0a7de745 A |
351 | while (1) { |
352 | ; | |
353 | } | |
5ba3f43e A |
354 | } |
355 | ||
356 | __attribute__((noreturn)) | |
357 | void | |
358 | halt_cpu(void) | |
359 | { | |
360 | halt_all_cpus(FALSE); | |
361 | } | |
362 | ||
363 | /* | |
364 | * Routine: machine_signal_idle | |
365 | * Function: | |
366 | */ | |
367 | void | |
368 | machine_signal_idle( | |
0a7de745 | 369 | processor_t processor) |
5ba3f43e A |
370 | { |
371 | cpu_signal(processor_to_cpu_datap(processor), SIGPnop, (void *)NULL, (void *)NULL); | |
372 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); | |
373 | } | |
374 | ||
375 | void | |
376 | machine_signal_idle_deferred( | |
0a7de745 | 377 | processor_t processor) |
5ba3f43e A |
378 | { |
379 | cpu_signal_deferred(processor_to_cpu_datap(processor)); | |
380 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_DEFERRED_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); | |
381 | } | |
382 | ||
383 | void | |
384 | machine_signal_idle_cancel( | |
0a7de745 | 385 | processor_t processor) |
5ba3f43e A |
386 | { |
387 | cpu_signal_cancel(processor_to_cpu_datap(processor)); | |
388 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_CANCEL_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); | |
389 | } | |
390 | ||
391 | /* | |
392 | * Routine: ml_install_interrupt_handler | |
393 | * Function: Initialize Interrupt Handler | |
394 | */ | |
0a7de745 | 395 | void |
5ba3f43e | 396 | ml_install_interrupt_handler( |
0a7de745 A |
397 | void *nub, |
398 | int source, | |
399 | void *target, | |
400 | IOInterruptHandler handler, | |
401 | void *refCon) | |
5ba3f43e A |
402 | { |
403 | cpu_data_t *cpu_data_ptr; | |
404 | boolean_t current_state; | |
405 | ||
406 | current_state = ml_set_interrupts_enabled(FALSE); | |
407 | cpu_data_ptr = getCpuDatap(); | |
408 | ||
409 | cpu_data_ptr->interrupt_nub = nub; | |
410 | cpu_data_ptr->interrupt_source = source; | |
411 | cpu_data_ptr->interrupt_target = target; | |
412 | cpu_data_ptr->interrupt_handler = handler; | |
413 | cpu_data_ptr->interrupt_refCon = refCon; | |
414 | ||
415 | cpu_data_ptr->interrupts_enabled = TRUE; | |
416 | (void) ml_set_interrupts_enabled(current_state); | |
417 | ||
418 | initialize_screen(NULL, kPEAcquireScreen); | |
419 | } | |
420 | ||
421 | /* | |
422 | * Routine: ml_init_interrupt | |
423 | * Function: Initialize Interrupts | |
424 | */ | |
0a7de745 | 425 | void |
5ba3f43e A |
426 | ml_init_interrupt(void) |
427 | { | |
428 | } | |
429 | ||
430 | /* | |
431 | * Routine: ml_init_timebase | |
432 | * Function: register and setup Timebase, Decremeter services | |
433 | */ | |
0a7de745 A |
434 | void |
435 | ml_init_timebase( | |
436 | void *args, | |
437 | tbd_ops_t tbd_funcs, | |
438 | vm_offset_t int_address, | |
439 | vm_offset_t int_value) | |
5ba3f43e A |
440 | { |
441 | cpu_data_t *cpu_data_ptr; | |
442 | ||
443 | cpu_data_ptr = (cpu_data_t *)args; | |
444 | ||
445 | if ((cpu_data_ptr == &BootCpuData) | |
446 | && (rtclock_timebase_func.tbd_fiq_handler == (void *)NULL)) { | |
447 | rtclock_timebase_func = *tbd_funcs; | |
448 | rtclock_timebase_addr = int_address; | |
449 | rtclock_timebase_val = int_value; | |
450 | } | |
451 | } | |
452 | ||
d9a64523 A |
453 | void |
454 | fiq_context_bootstrap(boolean_t enable_fiq) | |
455 | { | |
456 | fiq_context_init(enable_fiq); | |
457 | } | |
458 | ||
5ba3f43e A |
459 | void |
460 | ml_parse_cpu_topology(void) | |
461 | { | |
462 | DTEntry entry, child; | |
463 | OpaqueDTEntryIterator iter; | |
464 | uint32_t cpu_boot_arg; | |
465 | int err; | |
466 | ||
467 | err = DTLookupEntry(NULL, "/cpus", &entry); | |
468 | assert(err == kSuccess); | |
469 | ||
470 | err = DTInitEntryIterator(entry, &iter); | |
471 | assert(err == kSuccess); | |
472 | ||
473 | while (kSuccess == DTIterateEntries(&iter, &child)) { | |
5ba3f43e A |
474 | #if MACH_ASSERT |
475 | unsigned int propSize; | |
476 | void *prop = NULL; | |
477 | if (avail_cpus == 0) { | |
0a7de745 | 478 | if (kSuccess != DTGetProperty(child, "state", &prop, &propSize)) { |
5ba3f43e | 479 | panic("unable to retrieve state for cpu %u", avail_cpus); |
0a7de745 | 480 | } |
5ba3f43e | 481 | |
0a7de745 | 482 | if (strncmp((char*)prop, "running", propSize) != 0) { |
5ba3f43e | 483 | panic("cpu 0 has not been marked as running!"); |
0a7de745 | 484 | } |
5ba3f43e A |
485 | } |
486 | assert(kSuccess == DTGetProperty(child, "reg", &prop, &propSize)); | |
487 | assert(avail_cpus == *((uint32_t*)prop)); | |
488 | #endif | |
489 | ++avail_cpus; | |
490 | } | |
491 | ||
492 | cpu_boot_arg = avail_cpus; | |
493 | if (PE_parse_boot_argn("cpus", &cpu_boot_arg, sizeof(cpu_boot_arg)) && | |
0a7de745 | 494 | (avail_cpus > cpu_boot_arg)) { |
5ba3f43e | 495 | avail_cpus = cpu_boot_arg; |
0a7de745 | 496 | } |
5ba3f43e | 497 | |
0a7de745 | 498 | if (avail_cpus == 0) { |
5ba3f43e | 499 | panic("No cpus found!"); |
0a7de745 | 500 | } |
5ba3f43e A |
501 | } |
502 | ||
503 | unsigned int | |
504 | ml_get_cpu_count(void) | |
505 | { | |
506 | return avail_cpus; | |
507 | } | |
508 | ||
509 | int | |
510 | ml_get_boot_cpu_number(void) | |
511 | { | |
512 | return 0; | |
513 | } | |
514 | ||
515 | cluster_type_t | |
516 | ml_get_boot_cluster(void) | |
517 | { | |
518 | return CLUSTER_TYPE_SMP; | |
519 | } | |
520 | ||
521 | int | |
522 | ml_get_cpu_number(uint32_t phys_id) | |
523 | { | |
524 | return (int)phys_id; | |
525 | } | |
526 | ||
527 | int | |
528 | ml_get_max_cpu_number(void) | |
529 | { | |
530 | return avail_cpus - 1; | |
531 | } | |
532 | ||
533 | kern_return_t | |
0a7de745 A |
534 | ml_processor_register(ml_processor_info_t *in_processor_info, |
535 | processor_t * processor_out, ipi_handler_t *ipi_handler_out, | |
536 | perfmon_interrupt_handler_func *pmi_handler_out) | |
5ba3f43e A |
537 | { |
538 | cpu_data_t *this_cpu_datap; | |
539 | boolean_t is_boot_cpu; | |
540 | ||
541 | if (in_processor_info->phys_id >= MAX_CPUS) { | |
542 | /* | |
543 | * The physical CPU ID indicates that we have more CPUs than | |
544 | * this xnu build support. This probably means we have an | |
545 | * incorrect board configuration. | |
546 | * | |
547 | * TODO: Should this just return a failure instead? A panic | |
548 | * is simply a convenient way to catch bugs in the pexpert | |
549 | * headers. | |
550 | */ | |
551 | panic("phys_id %u is too large for MAX_CPUS (%u)", in_processor_info->phys_id, MAX_CPUS); | |
552 | } | |
553 | ||
554 | /* Fail the registration if the number of CPUs has been limited by boot-arg. */ | |
555 | if ((in_processor_info->phys_id >= avail_cpus) || | |
0a7de745 | 556 | (in_processor_info->log_id > (uint32_t)ml_get_max_cpu_number())) { |
5ba3f43e | 557 | return KERN_FAILURE; |
0a7de745 | 558 | } |
5ba3f43e A |
559 | |
560 | if (in_processor_info->log_id != (uint32_t)ml_get_boot_cpu_number()) { | |
561 | is_boot_cpu = FALSE; | |
562 | this_cpu_datap = cpu_data_alloc(FALSE); | |
563 | cpu_data_init(this_cpu_datap); | |
564 | } else { | |
565 | this_cpu_datap = &BootCpuData; | |
566 | is_boot_cpu = TRUE; | |
567 | } | |
568 | ||
569 | this_cpu_datap->cpu_id = in_processor_info->cpu_id; | |
570 | ||
5ba3f43e | 571 | this_cpu_datap->cpu_console_buf = console_cpu_alloc(is_boot_cpu); |
0a7de745 | 572 | if (this_cpu_datap->cpu_console_buf == (void *)(NULL)) { |
5ba3f43e | 573 | goto processor_register_error; |
0a7de745 | 574 | } |
5ba3f43e A |
575 | |
576 | if (!is_boot_cpu) { | |
0a7de745 | 577 | if (cpu_data_register(this_cpu_datap) != KERN_SUCCESS) { |
5ba3f43e | 578 | goto processor_register_error; |
0a7de745 | 579 | } |
5ba3f43e A |
580 | } |
581 | ||
582 | this_cpu_datap->cpu_idle_notify = (void *) in_processor_info->processor_idle; | |
583 | this_cpu_datap->cpu_cache_dispatch = in_processor_info->platform_cache_dispatch; | |
584 | nanoseconds_to_absolutetime((uint64_t) in_processor_info->powergate_latency, &this_cpu_datap->cpu_idle_latency); | |
585 | this_cpu_datap->cpu_reset_assist = kvtophys(in_processor_info->powergate_stub_addr); | |
586 | ||
587 | this_cpu_datap->idle_timer_notify = (void *) in_processor_info->idle_timer; | |
588 | this_cpu_datap->idle_timer_refcon = in_processor_info->idle_timer_refcon; | |
589 | ||
590 | this_cpu_datap->platform_error_handler = (void *) in_processor_info->platform_error_handler; | |
591 | this_cpu_datap->cpu_regmap_paddr = in_processor_info->regmap_paddr; | |
592 | this_cpu_datap->cpu_phys_id = in_processor_info->phys_id; | |
593 | this_cpu_datap->cpu_l2_access_penalty = in_processor_info->l2_access_penalty; | |
594 | ||
595 | if (!is_boot_cpu) { | |
596 | processor_init((struct processor *)this_cpu_datap->cpu_processor, | |
0a7de745 | 597 | this_cpu_datap->cpu_number, processor_pset(master_processor)); |
5ba3f43e A |
598 | |
599 | if (this_cpu_datap->cpu_l2_access_penalty) { | |
600 | /* | |
601 | * Cores that have a non-zero L2 access penalty compared | |
602 | * to the boot processor should be de-prioritized by the | |
603 | * scheduler, so that threads use the cores with better L2 | |
604 | * preferentially. | |
605 | */ | |
606 | processor_set_primary(this_cpu_datap->cpu_processor, | |
0a7de745 | 607 | master_processor); |
5ba3f43e A |
608 | } |
609 | } | |
610 | ||
611 | *processor_out = this_cpu_datap->cpu_processor; | |
0a7de745 A |
612 | *ipi_handler_out = cpu_signal_handler; |
613 | *pmi_handler_out = NULL; | |
614 | if (in_processor_info->idle_tickle != (idle_tickle_t *) NULL) { | |
5ba3f43e | 615 | *in_processor_info->idle_tickle = (idle_tickle_t) cpu_idle_tickle; |
0a7de745 | 616 | } |
5ba3f43e A |
617 | |
618 | #if KPC | |
0a7de745 | 619 | if (kpc_register_cpu(this_cpu_datap) != TRUE) { |
5ba3f43e | 620 | goto processor_register_error; |
0a7de745 | 621 | } |
5ba3f43e A |
622 | #endif |
623 | ||
0a7de745 | 624 | if (!is_boot_cpu) { |
cb323159 | 625 | random_cpu_init(this_cpu_datap->cpu_number); |
0a7de745 | 626 | } |
5ba3f43e A |
627 | |
628 | return KERN_SUCCESS; | |
629 | ||
630 | processor_register_error: | |
631 | #if KPC | |
632 | kpc_unregister_cpu(this_cpu_datap); | |
633 | #endif | |
0a7de745 | 634 | if (!is_boot_cpu) { |
5ba3f43e | 635 | cpu_data_free(this_cpu_datap); |
0a7de745 | 636 | } |
5ba3f43e A |
637 | return KERN_FAILURE; |
638 | } | |
639 | ||
640 | void | |
641 | ml_init_arm_debug_interface( | |
0a7de745 A |
642 | void * in_cpu_datap, |
643 | vm_offset_t virt_address) | |
5ba3f43e A |
644 | { |
645 | ((cpu_data_t *)in_cpu_datap)->cpu_debug_interface_map = virt_address; | |
646 | do_debugid(); | |
647 | } | |
648 | ||
649 | /* | |
650 | * Routine: init_ast_check | |
651 | * Function: | |
652 | */ | |
653 | void | |
654 | init_ast_check( | |
0a7de745 | 655 | __unused processor_t processor) |
5ba3f43e A |
656 | { |
657 | } | |
658 | ||
659 | /* | |
660 | * Routine: cause_ast_check | |
661 | * Function: | |
662 | */ | |
663 | void | |
664 | cause_ast_check( | |
0a7de745 | 665 | processor_t processor) |
5ba3f43e A |
666 | { |
667 | if (current_processor() != processor) { | |
668 | cpu_signal(processor_to_cpu_datap(processor), SIGPast, (void *)NULL, (void *)NULL); | |
669 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 1 /* ast */, 0, 0, 0); | |
670 | } | |
671 | } | |
672 | ||
5ba3f43e A |
673 | extern uint32_t cpu_idle_count; |
674 | ||
0a7de745 A |
675 | void |
676 | ml_get_power_state(boolean_t *icp, boolean_t *pidlep) | |
677 | { | |
5ba3f43e A |
678 | *icp = ml_at_interrupt_context(); |
679 | *pidlep = (cpu_idle_count == real_ncpus); | |
680 | } | |
681 | ||
682 | /* | |
683 | * Routine: ml_cause_interrupt | |
684 | * Function: Generate a fake interrupt | |
685 | */ | |
0a7de745 | 686 | void |
5ba3f43e A |
687 | ml_cause_interrupt(void) |
688 | { | |
0a7de745 | 689 | return; /* BS_XXX */ |
5ba3f43e A |
690 | } |
691 | ||
692 | /* Map memory map IO space */ | |
693 | vm_offset_t | |
694 | ml_io_map( | |
0a7de745 A |
695 | vm_offset_t phys_addr, |
696 | vm_size_t size) | |
5ba3f43e | 697 | { |
0a7de745 | 698 | return io_map(phys_addr, size, VM_WIMG_IO); |
5ba3f43e A |
699 | } |
700 | ||
cb323159 A |
701 | /* Map memory map IO space (with protections specified) */ |
702 | vm_offset_t | |
703 | ml_io_map_with_prot( | |
704 | vm_offset_t phys_addr, | |
705 | vm_size_t size, | |
706 | vm_prot_t prot) | |
707 | { | |
708 | return io_map_with_prot(phys_addr, size, VM_WIMG_IO, prot); | |
709 | } | |
710 | ||
5ba3f43e A |
711 | vm_offset_t |
712 | ml_io_map_wcomb( | |
0a7de745 A |
713 | vm_offset_t phys_addr, |
714 | vm_size_t size) | |
5ba3f43e | 715 | { |
0a7de745 | 716 | return io_map(phys_addr, size, VM_WIMG_WCOMB); |
5ba3f43e A |
717 | } |
718 | ||
719 | /* boot memory allocation */ | |
0a7de745 | 720 | vm_offset_t |
5ba3f43e | 721 | ml_static_malloc( |
0a7de745 | 722 | __unused vm_size_t size) |
5ba3f43e | 723 | { |
0a7de745 | 724 | return (vm_offset_t) NULL; |
5ba3f43e A |
725 | } |
726 | ||
727 | vm_map_address_t | |
728 | ml_map_high_window( | |
0a7de745 A |
729 | vm_offset_t phys_addr, |
730 | vm_size_t len) | |
5ba3f43e A |
731 | { |
732 | return pmap_map_high_window_bd(phys_addr, len, VM_PROT_READ | VM_PROT_WRITE); | |
733 | } | |
734 | ||
735 | vm_offset_t | |
736 | ml_static_ptovirt( | |
0a7de745 | 737 | vm_offset_t paddr) |
5ba3f43e A |
738 | { |
739 | return phystokv(paddr); | |
740 | } | |
741 | ||
742 | vm_offset_t | |
743 | ml_static_vtop( | |
0a7de745 | 744 | vm_offset_t vaddr) |
5ba3f43e | 745 | { |
cb323159 | 746 | assertf(((vm_address_t)(vaddr) - gVirtBase) < gPhysSize, "%s: illegal vaddr: %p", __func__, (void*)vaddr); |
0a7de745 | 747 | return (vm_address_t)(vaddr) - gVirtBase + gPhysBase; |
5ba3f43e A |
748 | } |
749 | ||
cb323159 A |
750 | /* |
751 | * Return the maximum contiguous KVA range that can be accessed from this | |
752 | * physical address. For arm64, we employ a segmented physical aperture | |
753 | * relocation table which can limit the available range for a given PA to | |
754 | * something less than the extent of physical memory. But here, we still | |
755 | * have a flat physical aperture, so no such requirement exists. | |
756 | */ | |
757 | vm_map_address_t | |
758 | phystokv_range(pmap_paddr_t pa, vm_size_t *max_len) | |
759 | { | |
760 | vm_size_t len = gPhysSize - (pa - gPhysBase); | |
761 | if (*max_len > len) { | |
762 | *max_len = len; | |
763 | } | |
764 | assertf((pa - gPhysBase) < gPhysSize, "%s: illegal PA: 0x%lx", __func__, (unsigned long)pa); | |
765 | return pa - gPhysBase + gVirtBase; | |
766 | } | |
767 | ||
d9a64523 A |
768 | vm_offset_t |
769 | ml_static_slide( | |
770 | vm_offset_t vaddr) | |
771 | { | |
772 | return VM_KERNEL_SLIDE(vaddr); | |
773 | } | |
774 | ||
775 | vm_offset_t | |
776 | ml_static_unslide( | |
777 | vm_offset_t vaddr) | |
778 | { | |
779 | return VM_KERNEL_UNSLIDE(vaddr); | |
780 | } | |
5ba3f43e A |
781 | |
782 | kern_return_t | |
783 | ml_static_protect( | |
784 | vm_offset_t vaddr, /* kernel virtual address */ | |
785 | vm_size_t size, | |
786 | vm_prot_t new_prot) | |
787 | { | |
788 | pt_entry_t arm_prot = 0; | |
789 | pt_entry_t arm_block_prot = 0; | |
790 | vm_offset_t vaddr_cur; | |
791 | ppnum_t ppn; | |
792 | kern_return_t result = KERN_SUCCESS; | |
793 | ||
0a7de745 | 794 | if (vaddr < VM_MIN_KERNEL_ADDRESS) { |
5ba3f43e | 795 | return KERN_FAILURE; |
0a7de745 | 796 | } |
5ba3f43e A |
797 | |
798 | assert((vaddr & (ARM_PGBYTES - 1)) == 0); /* must be page aligned */ | |
799 | ||
800 | if ((new_prot & VM_PROT_WRITE) && (new_prot & VM_PROT_EXECUTE)) { | |
801 | panic("ml_static_protect(): WX request on %p", (void *) vaddr); | |
802 | } | |
803 | ||
804 | /* Set up the protection bits, and block bits so we can validate block mappings. */ | |
805 | if (new_prot & VM_PROT_WRITE) { | |
806 | arm_prot |= ARM_PTE_AP(AP_RWNA); | |
807 | arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RWNA); | |
808 | } else { | |
809 | arm_prot |= ARM_PTE_AP(AP_RONA); | |
810 | arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RONA); | |
811 | } | |
812 | ||
813 | if (!(new_prot & VM_PROT_EXECUTE)) { | |
814 | arm_prot |= ARM_PTE_NX; | |
815 | arm_block_prot |= ARM_TTE_BLOCK_NX; | |
816 | } | |
817 | ||
818 | for (vaddr_cur = vaddr; | |
0a7de745 A |
819 | vaddr_cur < ((vaddr + size) & ~ARM_PGMASK); |
820 | vaddr_cur += ARM_PGBYTES) { | |
5ba3f43e A |
821 | ppn = pmap_find_phys(kernel_pmap, vaddr_cur); |
822 | if (ppn != (vm_offset_t) NULL) { | |
823 | tt_entry_t *ttp = &kernel_pmap->tte[ttenum(vaddr_cur)]; | |
824 | tt_entry_t tte = *ttp; | |
825 | ||
826 | if ((tte & ARM_TTE_TYPE_MASK) != ARM_TTE_TYPE_TABLE) { | |
827 | if (((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) && | |
828 | ((tte & (ARM_TTE_BLOCK_APMASK | ARM_TTE_BLOCK_NX_MASK)) == arm_block_prot)) { | |
829 | /* | |
830 | * We can support ml_static_protect on a block mapping if the mapping already has | |
831 | * the desired protections. We still want to run checks on a per-page basis. | |
832 | */ | |
833 | continue; | |
834 | } | |
835 | ||
836 | result = KERN_FAILURE; | |
837 | break; | |
838 | } | |
839 | ||
840 | pt_entry_t *pte_p = (pt_entry_t *) ttetokv(tte) + ptenum(vaddr_cur); | |
841 | pt_entry_t ptmp = *pte_p; | |
842 | ||
843 | ptmp = (ptmp & ~(ARM_PTE_APMASK | ARM_PTE_NX_MASK)) | arm_prot; | |
844 | *pte_p = ptmp; | |
5ba3f43e A |
845 | } |
846 | } | |
847 | ||
0a7de745 | 848 | if (vaddr_cur > vaddr) { |
5ba3f43e | 849 | flush_mmu_tlb_region(vaddr, (vm_size_t)(vaddr_cur - vaddr)); |
0a7de745 | 850 | } |
5ba3f43e A |
851 | |
852 | return result; | |
853 | } | |
854 | ||
855 | /* | |
856 | * Routine: ml_static_mfree | |
857 | * Function: | |
858 | */ | |
859 | void | |
860 | ml_static_mfree( | |
0a7de745 A |
861 | vm_offset_t vaddr, |
862 | vm_size_t size) | |
5ba3f43e A |
863 | { |
864 | vm_offset_t vaddr_cur; | |
865 | ppnum_t ppn; | |
866 | uint32_t freed_pages = 0; | |
867 | ||
868 | /* It is acceptable (if bad) to fail to free. */ | |
0a7de745 | 869 | if (vaddr < VM_MIN_KERNEL_ADDRESS) { |
5ba3f43e | 870 | return; |
0a7de745 | 871 | } |
5ba3f43e | 872 | |
0a7de745 | 873 | assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */ |
5ba3f43e A |
874 | |
875 | for (vaddr_cur = vaddr; | |
0a7de745 A |
876 | vaddr_cur < trunc_page_32(vaddr + size); |
877 | vaddr_cur += PAGE_SIZE) { | |
5ba3f43e A |
878 | ppn = pmap_find_phys(kernel_pmap, vaddr_cur); |
879 | if (ppn != (vm_offset_t) NULL) { | |
880 | /* | |
881 | * It is not acceptable to fail to update the protections on a page | |
882 | * we will release to the VM. We need to either panic or continue. | |
883 | * For now, we'll panic (to help flag if there is memory we can | |
884 | * reclaim). | |
885 | */ | |
886 | if (ml_static_protect(vaddr_cur, PAGE_SIZE, VM_PROT_WRITE | VM_PROT_READ) != KERN_SUCCESS) { | |
887 | panic("Failed ml_static_mfree on %p", (void *) vaddr_cur); | |
888 | } | |
889 | #if 0 | |
890 | /* | |
891 | * Must NOT tear down the "V==P" mapping for vaddr_cur as the zone alias scheme | |
892 | * relies on the persistence of these mappings for all time. | |
893 | */ | |
894 | // pmap_remove(kernel_pmap, (addr64_t) vaddr_cur, (addr64_t) (vaddr_cur + PAGE_SIZE)); | |
895 | #endif | |
896 | vm_page_create(ppn, (ppn + 1)); | |
897 | freed_pages++; | |
898 | } | |
899 | } | |
900 | vm_page_lockspin_queues(); | |
901 | vm_page_wire_count -= freed_pages; | |
902 | vm_page_wire_count_initial -= freed_pages; | |
903 | vm_page_unlock_queues(); | |
0a7de745 | 904 | #if DEBUG |
5ba3f43e A |
905 | kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn); |
906 | #endif | |
907 | } | |
908 | ||
909 | ||
910 | /* virtual to physical on wired pages */ | |
911 | vm_offset_t | |
912 | ml_vtophys(vm_offset_t vaddr) | |
913 | { | |
914 | return kvtophys(vaddr); | |
915 | } | |
916 | ||
917 | /* | |
918 | * Routine: ml_nofault_copy | |
919 | * Function: Perform a physical mode copy if the source and destination have | |
920 | * valid translations in the kernel pmap. If translations are present, they are | |
921 | * assumed to be wired; e.g., no attempt is made to guarantee that the | |
922 | * translations obtained remain valid for the duration of the copy process. | |
923 | */ | |
0a7de745 | 924 | vm_size_t |
5ba3f43e A |
925 | ml_nofault_copy(vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size) |
926 | { | |
927 | addr64_t cur_phys_dst, cur_phys_src; | |
928 | uint32_t count, nbytes = 0; | |
929 | ||
930 | while (size > 0) { | |
0a7de745 | 931 | if (!(cur_phys_src = kvtophys(virtsrc))) { |
5ba3f43e | 932 | break; |
0a7de745 A |
933 | } |
934 | if (!(cur_phys_dst = kvtophys(virtdst))) { | |
5ba3f43e | 935 | break; |
0a7de745 | 936 | } |
5ba3f43e | 937 | if (!pmap_valid_address(trunc_page_64(cur_phys_dst)) || |
0a7de745 | 938 | !pmap_valid_address(trunc_page_64(cur_phys_src))) { |
5ba3f43e | 939 | break; |
0a7de745 | 940 | } |
5ba3f43e | 941 | count = PAGE_SIZE - (cur_phys_src & PAGE_MASK); |
0a7de745 | 942 | if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) { |
5ba3f43e | 943 | count = PAGE_SIZE - (cur_phys_dst & PAGE_MASK); |
0a7de745 A |
944 | } |
945 | if (count > size) { | |
5ba3f43e | 946 | count = size; |
0a7de745 | 947 | } |
5ba3f43e A |
948 | |
949 | bcopy_phys(cur_phys_src, cur_phys_dst, count); | |
950 | ||
951 | nbytes += count; | |
952 | virtsrc += count; | |
953 | virtdst += count; | |
954 | size -= count; | |
955 | } | |
956 | ||
957 | return nbytes; | |
958 | } | |
959 | ||
960 | /* | |
961 | * Routine: ml_validate_nofault | |
962 | * Function: Validate that ths address range has a valid translations | |
963 | * in the kernel pmap. If translations are present, they are | |
964 | * assumed to be wired; i.e. no attempt is made to guarantee | |
965 | * that the translation persist after the check. | |
966 | * Returns: TRUE if the range is mapped and will not cause a fault, | |
967 | * FALSE otherwise. | |
968 | */ | |
969 | ||
0a7de745 A |
970 | boolean_t |
971 | ml_validate_nofault( | |
5ba3f43e A |
972 | vm_offset_t virtsrc, vm_size_t size) |
973 | { | |
974 | addr64_t cur_phys_src; | |
975 | uint32_t count; | |
976 | ||
977 | while (size > 0) { | |
0a7de745 | 978 | if (!(cur_phys_src = kvtophys(virtsrc))) { |
5ba3f43e | 979 | return FALSE; |
0a7de745 A |
980 | } |
981 | if (!pmap_valid_address(trunc_page_64(cur_phys_src))) { | |
5ba3f43e | 982 | return FALSE; |
0a7de745 | 983 | } |
5ba3f43e | 984 | count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK)); |
0a7de745 | 985 | if (count > size) { |
5ba3f43e | 986 | count = (uint32_t)size; |
0a7de745 | 987 | } |
5ba3f43e A |
988 | |
989 | virtsrc += count; | |
990 | size -= count; | |
991 | } | |
992 | ||
993 | return TRUE; | |
994 | } | |
995 | ||
996 | void | |
997 | ml_get_bouncepool_info(vm_offset_t * phys_addr, vm_size_t * size) | |
998 | { | |
999 | *phys_addr = 0; | |
1000 | *size = 0; | |
1001 | } | |
1002 | ||
1003 | /* | |
1004 | * Stubs for CPU Stepper | |
1005 | */ | |
1006 | void | |
1007 | active_rt_threads(__unused boolean_t active) | |
1008 | { | |
1009 | } | |
1010 | ||
1011 | void | |
0a7de745 A |
1012 | thread_tell_urgency(__unused thread_urgency_t urgency, |
1013 | __unused uint64_t rt_period, | |
1014 | __unused uint64_t rt_deadline, | |
1015 | __unused uint64_t sched_latency, | |
1016 | __unused thread_t nthread) | |
5ba3f43e A |
1017 | { |
1018 | } | |
1019 | ||
1020 | void | |
1021 | machine_run_count(__unused uint32_t count) | |
1022 | { | |
1023 | } | |
1024 | ||
1025 | processor_t | |
1026 | machine_choose_processor(__unused processor_set_t pset, processor_t processor) | |
1027 | { | |
0a7de745 | 1028 | return processor; |
5ba3f43e A |
1029 | } |
1030 | ||
0a7de745 A |
1031 | boolean_t |
1032 | machine_timeout_suspended(void) | |
1033 | { | |
5ba3f43e A |
1034 | return FALSE; |
1035 | } | |
1036 | ||
0a7de745 A |
1037 | kern_return_t |
1038 | ml_interrupt_prewarm(__unused uint64_t deadline) | |
5ba3f43e A |
1039 | { |
1040 | return KERN_FAILURE; | |
1041 | } | |
1042 | ||
1043 | uint64_t | |
1044 | ml_get_hwclock(void) | |
1045 | { | |
1046 | uint64_t high_first = 0; | |
1047 | uint64_t high_second = 0; | |
1048 | uint64_t low = 0; | |
1049 | ||
1050 | __builtin_arm_isb(ISB_SY); | |
1051 | ||
1052 | do { | |
1053 | high_first = __builtin_arm_mrrc(15, 0, 14) >> 32; | |
1054 | low = __builtin_arm_mrrc(15, 0, 14) & 0xFFFFFFFFULL; | |
1055 | high_second = __builtin_arm_mrrc(15, 0, 14) >> 32; | |
1056 | } while (high_first != high_second); | |
1057 | ||
1058 | return (high_first << 32) | (low); | |
1059 | } | |
1060 | ||
1061 | boolean_t | |
1062 | ml_delay_should_spin(uint64_t interval) | |
1063 | { | |
1064 | cpu_data_t *cdp = getCpuDatap(); | |
1065 | ||
1066 | if (cdp->cpu_idle_latency) { | |
1067 | return (interval < cdp->cpu_idle_latency) ? TRUE : FALSE; | |
1068 | } else { | |
1069 | /* | |
1070 | * Early boot, latency is unknown. Err on the side of blocking, | |
1071 | * which should always be safe, even if slow | |
1072 | */ | |
1073 | return FALSE; | |
1074 | } | |
1075 | } | |
1076 | ||
0a7de745 A |
1077 | void |
1078 | ml_delay_on_yield(void) | |
1079 | { | |
1080 | } | |
e8c3f781 | 1081 | |
0a7de745 A |
1082 | boolean_t |
1083 | ml_thread_is64bit(thread_t thread) | |
5ba3f43e | 1084 | { |
0a7de745 | 1085 | return thread_is_64bit_addr(thread); |
5ba3f43e A |
1086 | } |
1087 | ||
0a7de745 A |
1088 | void |
1089 | ml_timer_evaluate(void) | |
1090 | { | |
5ba3f43e A |
1091 | } |
1092 | ||
1093 | boolean_t | |
0a7de745 A |
1094 | ml_timer_forced_evaluation(void) |
1095 | { | |
5ba3f43e A |
1096 | return FALSE; |
1097 | } | |
1098 | ||
1099 | uint64_t | |
0a7de745 A |
1100 | ml_energy_stat(__unused thread_t t) |
1101 | { | |
5ba3f43e A |
1102 | return 0; |
1103 | } | |
1104 | ||
1105 | ||
1106 | void | |
0a7de745 A |
1107 | ml_gpu_stat_update(__unused uint64_t gpu_ns_delta) |
1108 | { | |
5ba3f43e A |
1109 | #if CONFIG_EMBEDDED |
1110 | /* | |
1111 | * For now: update the resource coalition stats of the | |
1112 | * current thread's coalition | |
1113 | */ | |
1114 | task_coalition_update_gpu_stats(current_task(), gpu_ns_delta); | |
1115 | #endif | |
1116 | } | |
1117 | ||
1118 | uint64_t | |
0a7de745 A |
1119 | ml_gpu_stat(__unused thread_t t) |
1120 | { | |
5ba3f43e A |
1121 | return 0; |
1122 | } | |
1123 | ||
1124 | #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME | |
1125 | static void | |
1126 | timer_state_event(boolean_t switch_to_kernel) | |
1127 | { | |
1128 | thread_t thread = current_thread(); | |
0a7de745 A |
1129 | if (!thread->precise_user_kernel_time) { |
1130 | return; | |
1131 | } | |
5ba3f43e A |
1132 | |
1133 | processor_data_t *pd = &getCpuDatap()->cpu_processor->processor_data; | |
1134 | uint64_t now = ml_get_timebase(); | |
1135 | ||
1136 | timer_stop(pd->current_state, now); | |
1137 | pd->current_state = (switch_to_kernel) ? &pd->system_state : &pd->user_state; | |
1138 | timer_start(pd->current_state, now); | |
1139 | ||
1140 | timer_stop(pd->thread_timer, now); | |
1141 | pd->thread_timer = (switch_to_kernel) ? &thread->system_timer : &thread->user_timer; | |
1142 | timer_start(pd->thread_timer, now); | |
1143 | } | |
1144 | ||
1145 | void | |
1146 | timer_state_event_user_to_kernel(void) | |
1147 | { | |
1148 | timer_state_event(TRUE); | |
1149 | } | |
1150 | ||
1151 | void | |
1152 | timer_state_event_kernel_to_user(void) | |
1153 | { | |
1154 | timer_state_event(FALSE); | |
1155 | } | |
1156 | #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */ | |
1157 | ||
0a7de745 A |
1158 | uint32_t |
1159 | get_arm_cpu_version(void) | |
1160 | { | |
1161 | uint32_t value = machine_read_midr(); | |
1162 | ||
1163 | /* Compose the register values into 8 bits; variant[7:4], revision[3:0]. */ | |
1164 | return ((value & MIDR_REV_MASK) >> MIDR_REV_SHIFT) | ((value & MIDR_VAR_MASK) >> (MIDR_VAR_SHIFT - 4)); | |
1165 | } | |
1166 | ||
5ba3f43e A |
1167 | boolean_t |
1168 | user_cont_hwclock_allowed(void) | |
1169 | { | |
1170 | return FALSE; | |
1171 | } | |
1172 | ||
cb323159 A |
1173 | uint8_t |
1174 | user_timebase_type(void) | |
5ba3f43e A |
1175 | { |
1176 | #if __ARM_TIME__ | |
cb323159 | 1177 | return USER_TIMEBASE_SPEC; |
5ba3f43e | 1178 | #else |
cb323159 | 1179 | return USER_TIMEBASE_NONE; |
5ba3f43e A |
1180 | #endif |
1181 | } | |
1182 | ||
1183 | /* | |
1184 | * The following are required for parts of the kernel | |
1185 | * that cannot resolve these functions as inlines: | |
1186 | */ | |
cb323159 | 1187 | extern thread_t current_act(void) __attribute__((const)); |
5ba3f43e A |
1188 | thread_t |
1189 | current_act(void) | |
1190 | { | |
1191 | return current_thread_fast(); | |
1192 | } | |
1193 | ||
1194 | #undef current_thread | |
cb323159 | 1195 | extern thread_t current_thread(void) __attribute__((const)); |
5ba3f43e A |
1196 | thread_t |
1197 | current_thread(void) | |
1198 | { | |
1199 | return current_thread_fast(); | |
1200 | } | |
1201 | ||
1202 | #if __ARM_USER_PROTECT__ | |
1203 | uintptr_t | |
1204 | arm_user_protect_begin(thread_t thread) | |
1205 | { | |
0a7de745 | 1206 | uintptr_t ttbr0, asid = 0; // kernel asid |
5ba3f43e | 1207 | |
0a7de745 A |
1208 | ttbr0 = __builtin_arm_mrc(15, 0, 2, 0, 0); // Get TTBR0 |
1209 | if (ttbr0 != thread->machine.kptw_ttb) { | |
1210 | __builtin_arm_mcr(15, 0, thread->machine.kptw_ttb, 2, 0, 0); // Set TTBR0 | |
1211 | __builtin_arm_mcr(15, 0, asid, 13, 0, 1); // Set CONTEXTIDR | |
1212 | __builtin_arm_isb(ISB_SY); | |
1213 | } | |
1214 | return ttbr0; | |
5ba3f43e A |
1215 | } |
1216 | ||
1217 | void | |
1218 | arm_user_protect_end(thread_t thread, uintptr_t ttbr0, boolean_t disable_interrupts) | |
1219 | { | |
0a7de745 A |
1220 | if ((ttbr0 != thread->machine.kptw_ttb) && (thread->machine.uptw_ttb != thread->machine.kptw_ttb)) { |
1221 | if (disable_interrupts) { | |
1222 | __asm__ volatile ("cpsid if" ::: "memory"); // Disable FIQ/IRQ | |
1223 | } | |
1224 | __builtin_arm_mcr(15, 0, thread->machine.uptw_ttb, 2, 0, 0); // Set TTBR0 | |
1225 | __builtin_arm_mcr(15, 0, thread->machine.asid, 13, 0, 1); // Set CONTEXTIDR with thread asid | |
1226 | __builtin_arm_dsb(DSB_ISH); | |
1227 | __builtin_arm_isb(ISB_SY); | |
1228 | } | |
5ba3f43e A |
1229 | } |
1230 | #endif // __ARM_USER_PROTECT__ |