]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2007-2016 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <arm/proc_reg.h> | |
30 | #include <arm/machine_cpu.h> | |
31 | #include <arm/cpu_internal.h> | |
32 | #include <arm/cpuid.h> | |
33 | #include <arm/io_map_entries.h> | |
34 | #include <arm/cpu_data.h> | |
35 | #include <arm/cpu_data_internal.h> | |
f427ee49 | 36 | #include <arm/machine_routines.h> |
5ba3f43e A |
37 | #include <arm/misc_protos.h> |
38 | #include <arm/rtclock.h> | |
39 | #include <arm/caches_internal.h> | |
40 | #include <console/serial_protos.h> | |
41 | #include <kern/machine.h> | |
42 | #include <prng/random.h> | |
43 | #include <kern/startup.h> | |
44 | #include <kern/sched.h> | |
45 | #include <kern/thread.h> | |
46 | #include <mach/machine.h> | |
47 | #include <machine/atomic.h> | |
48 | #include <vm/pmap.h> | |
49 | #include <vm/vm_page.h> | |
f427ee49 | 50 | #include <vm/vm_map.h> |
5ba3f43e A |
51 | #include <sys/kdebug.h> |
52 | #include <kern/coalition.h> | |
53 | #include <pexpert/device_tree.h> | |
0a7de745 | 54 | #include <arm/cpuid_internal.h> |
cb323159 | 55 | #include <arm/cpu_capabilities.h> |
5ba3f43e A |
56 | |
57 | #include <IOKit/IOPlatformExpert.h> | |
5ba3f43e A |
58 | |
59 | #if KPC | |
60 | #include <kern/kpc.h> | |
61 | #endif | |
62 | ||
f427ee49 A |
63 | /* arm32 only supports a highly simplified topology, fixed at 1 cluster */ |
64 | static ml_topology_cpu_t topology_cpu_array[MAX_CPUS]; | |
65 | static ml_topology_cluster_t topology_cluster = { | |
66 | .cluster_id = 0, | |
67 | .cluster_type = CLUSTER_TYPE_SMP, | |
68 | .first_cpu_id = 0, | |
69 | }; | |
70 | static ml_topology_info_t topology_info = { | |
71 | .version = CPU_TOPOLOGY_VERSION, | |
72 | .num_clusters = 1, | |
73 | .max_cluster_id = 0, | |
74 | .cpus = topology_cpu_array, | |
75 | .clusters = &topology_cluster, | |
76 | .boot_cpu = &topology_cpu_array[0], | |
77 | .boot_cluster = &topology_cluster, | |
78 | }; | |
5ba3f43e A |
79 | |
80 | uint32_t LockTimeOut; | |
81 | uint32_t LockTimeOutUsec; | |
0a7de745 | 82 | uint64_t TLockTimeOut; |
5ba3f43e | 83 | uint64_t MutexSpin; |
f427ee49 | 84 | extern uint32_t lockdown_done; |
ea3f0419 A |
85 | uint64_t low_MutexSpin; |
86 | int64_t high_MutexSpin; | |
87 | ||
5ba3f43e A |
88 | void |
89 | machine_startup(__unused boot_args * args) | |
90 | { | |
5ba3f43e A |
91 | machine_conf(); |
92 | ||
93 | /* | |
94 | * Kick off the kernel bootstrap. | |
95 | */ | |
96 | kernel_bootstrap(); | |
97 | /* NOTREACHED */ | |
98 | } | |
99 | ||
100 | char * | |
101 | machine_boot_info( | |
0a7de745 A |
102 | __unused char *buf, |
103 | __unused vm_size_t size) | |
5ba3f43e | 104 | { |
0a7de745 | 105 | return PE_boot_args(); |
5ba3f43e A |
106 | } |
107 | ||
0a7de745 | 108 | void |
5ba3f43e A |
109 | slave_machine_init(__unused void *param) |
110 | { | |
0a7de745 A |
111 | cpu_machine_init(); /* Initialize the processor */ |
112 | clock_init(); /* Init the clock */ | |
5ba3f43e A |
113 | } |
114 | ||
115 | /* | |
116 | * Routine: machine_processor_shutdown | |
117 | * Function: | |
118 | */ | |
119 | thread_t | |
120 | machine_processor_shutdown( | |
0a7de745 A |
121 | __unused thread_t thread, |
122 | void (*doshutdown)(processor_t), | |
123 | processor_t processor) | |
5ba3f43e | 124 | { |
0a7de745 | 125 | return Shutdown_context(doshutdown, processor); |
5ba3f43e A |
126 | } |
127 | ||
5ba3f43e A |
128 | /* |
129 | * Routine: ml_init_lock_timeout | |
130 | * Function: | |
131 | */ | |
132 | void | |
133 | ml_init_lock_timeout(void) | |
134 | { | |
135 | uint64_t abstime; | |
136 | uint64_t mtxspin; | |
0a7de745 | 137 | uint64_t default_timeout_ns = NSEC_PER_SEC >> 2; |
5ba3f43e A |
138 | uint32_t slto; |
139 | ||
0a7de745 | 140 | if (PE_parse_boot_argn("slto_us", &slto, sizeof(slto))) { |
5ba3f43e | 141 | default_timeout_ns = slto * NSEC_PER_USEC; |
0a7de745 | 142 | } |
5ba3f43e A |
143 | |
144 | nanoseconds_to_absolutetime(default_timeout_ns, &abstime); | |
0a7de745 | 145 | LockTimeOutUsec = (uint32_t)(default_timeout_ns / NSEC_PER_USEC); |
5ba3f43e | 146 | LockTimeOut = (uint32_t)abstime; |
0a7de745 | 147 | TLockTimeOut = LockTimeOut; |
5ba3f43e | 148 | |
0a7de745 A |
149 | if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof(mtxspin))) { |
150 | if (mtxspin > USEC_PER_SEC >> 4) { | |
151 | mtxspin = USEC_PER_SEC >> 4; | |
152 | } | |
153 | nanoseconds_to_absolutetime(mtxspin * NSEC_PER_USEC, &abstime); | |
5ba3f43e | 154 | } else { |
0a7de745 | 155 | nanoseconds_to_absolutetime(10 * NSEC_PER_USEC, &abstime); |
5ba3f43e A |
156 | } |
157 | MutexSpin = abstime; | |
ea3f0419 A |
158 | low_MutexSpin = MutexSpin; |
159 | /* | |
160 | * high_MutexSpin should be initialized as low_MutexSpin * real_ncpus, but | |
161 | * real_ncpus is not set at this time | |
162 | * | |
163 | * NOTE: active spinning is disabled in arm. It can be activated | |
164 | * by setting high_MutexSpin through the sysctl. | |
165 | */ | |
166 | high_MutexSpin = low_MutexSpin; | |
5ba3f43e A |
167 | } |
168 | ||
169 | /* | |
170 | * This is called from the machine-independent routine cpu_up() | |
171 | * to perform machine-dependent info updates. | |
172 | */ | |
173 | void | |
174 | ml_cpu_up(void) | |
175 | { | |
cb323159 A |
176 | os_atomic_inc(&machine_info.physical_cpu, relaxed); |
177 | os_atomic_inc(&machine_info.logical_cpu, relaxed); | |
5ba3f43e A |
178 | } |
179 | ||
180 | /* | |
181 | * This is called from the machine-independent routine cpu_down() | |
182 | * to perform machine-dependent info updates. | |
183 | */ | |
184 | void | |
185 | ml_cpu_down(void) | |
186 | { | |
0a7de745 | 187 | cpu_data_t *cpu_data_ptr; |
5ba3f43e | 188 | |
cb323159 A |
189 | os_atomic_dec(&machine_info.physical_cpu, relaxed); |
190 | os_atomic_dec(&machine_info.logical_cpu, relaxed); | |
0a7de745 | 191 | |
5ba3f43e A |
192 | /* |
193 | * If we want to deal with outstanding IPIs, we need to | |
194 | * do relatively early in the processor_doshutdown path, | |
195 | * as we pend decrementer interrupts using the IPI | |
196 | * mechanism if we cannot immediately service them (if | |
197 | * IRQ is masked). Do so now. | |
198 | * | |
199 | * We aren't on the interrupt stack here; would it make | |
200 | * more sense to disable signaling and then enable | |
201 | * interrupts? It might be a bit cleaner. | |
202 | */ | |
203 | cpu_data_ptr = getCpuDatap(); | |
204 | cpu_data_ptr->cpu_running = FALSE; | |
205 | ||
206 | cpu_signal_handler_internal(TRUE); | |
207 | } | |
208 | ||
209 | /* | |
210 | * Routine: ml_cpu_get_info | |
211 | * Function: | |
212 | */ | |
213 | void | |
214 | ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info) | |
215 | { | |
216 | cache_info_t *cpuid_cache_info; | |
217 | ||
218 | cpuid_cache_info = cache_info(); | |
219 | ml_cpu_info->vector_unit = 0; | |
220 | ml_cpu_info->cache_line_size = cpuid_cache_info->c_linesz; | |
221 | ml_cpu_info->l1_icache_size = cpuid_cache_info->c_isize; | |
222 | ml_cpu_info->l1_dcache_size = cpuid_cache_info->c_dsize; | |
223 | ||
224 | #if (__ARM_ARCH__ >= 7) | |
225 | ml_cpu_info->l2_settings = 1; | |
226 | ml_cpu_info->l2_cache_size = cpuid_cache_info->c_l2size; | |
227 | #else | |
228 | ml_cpu_info->l2_settings = 0; | |
229 | ml_cpu_info->l2_cache_size = 0xFFFFFFFF; | |
230 | #endif | |
231 | ml_cpu_info->l3_settings = 0; | |
232 | ml_cpu_info->l3_cache_size = 0xFFFFFFFF; | |
233 | } | |
234 | ||
235 | unsigned int | |
236 | ml_get_machine_mem(void) | |
237 | { | |
0a7de745 | 238 | return machine_info.memory_size; |
5ba3f43e A |
239 | } |
240 | ||
241 | /* Return max offset */ | |
242 | vm_map_offset_t | |
243 | ml_get_max_offset( | |
0a7de745 | 244 | boolean_t is64, |
5ba3f43e A |
245 | unsigned int option) |
246 | { | |
0a7de745 | 247 | unsigned int pmap_max_offset_option = 0; |
5ba3f43e A |
248 | |
249 | switch (option) { | |
250 | case MACHINE_MAX_OFFSET_DEFAULT: | |
251 | pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEFAULT; | |
0a7de745 A |
252 | break; |
253 | case MACHINE_MAX_OFFSET_MIN: | |
5ba3f43e | 254 | pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_MIN; |
0a7de745 A |
255 | break; |
256 | case MACHINE_MAX_OFFSET_MAX: | |
5ba3f43e | 257 | pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_MAX; |
0a7de745 A |
258 | break; |
259 | case MACHINE_MAX_OFFSET_DEVICE: | |
5ba3f43e | 260 | pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEVICE; |
0a7de745 A |
261 | break; |
262 | default: | |
5ba3f43e | 263 | panic("ml_get_max_offset(): Illegal option 0x%x\n", option); |
0a7de745 A |
264 | break; |
265 | } | |
5ba3f43e A |
266 | return pmap_max_offset(is64, pmap_max_offset_option); |
267 | } | |
268 | ||
5ba3f43e A |
269 | void |
270 | ml_panic_trap_to_debugger(__unused const char *panic_format_str, | |
0a7de745 A |
271 | __unused va_list *panic_args, |
272 | __unused unsigned int reason, | |
273 | __unused void *ctx, | |
274 | __unused uint64_t panic_options_mask, | |
275 | __unused unsigned long panic_caller) | |
5ba3f43e A |
276 | { |
277 | return; | |
278 | } | |
279 | ||
280 | __attribute__((noreturn)) | |
281 | void | |
282 | halt_all_cpus(boolean_t reboot) | |
283 | { | |
284 | if (reboot) { | |
285 | printf("MACH Reboot\n"); | |
286 | PEHaltRestart(kPERestartCPU); | |
287 | } else { | |
288 | printf("CPU halted\n"); | |
289 | PEHaltRestart(kPEHaltCPU); | |
290 | } | |
0a7de745 A |
291 | while (1) { |
292 | ; | |
293 | } | |
5ba3f43e A |
294 | } |
295 | ||
296 | __attribute__((noreturn)) | |
297 | void | |
298 | halt_cpu(void) | |
299 | { | |
300 | halt_all_cpus(FALSE); | |
301 | } | |
302 | ||
303 | /* | |
304 | * Routine: machine_signal_idle | |
305 | * Function: | |
306 | */ | |
307 | void | |
308 | machine_signal_idle( | |
0a7de745 | 309 | processor_t processor) |
5ba3f43e A |
310 | { |
311 | cpu_signal(processor_to_cpu_datap(processor), SIGPnop, (void *)NULL, (void *)NULL); | |
312 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); | |
313 | } | |
314 | ||
315 | void | |
316 | machine_signal_idle_deferred( | |
0a7de745 | 317 | processor_t processor) |
5ba3f43e A |
318 | { |
319 | cpu_signal_deferred(processor_to_cpu_datap(processor)); | |
320 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_DEFERRED_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); | |
321 | } | |
322 | ||
323 | void | |
324 | machine_signal_idle_cancel( | |
0a7de745 | 325 | processor_t processor) |
5ba3f43e A |
326 | { |
327 | cpu_signal_cancel(processor_to_cpu_datap(processor)); | |
328 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_CANCEL_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); | |
329 | } | |
330 | ||
331 | /* | |
332 | * Routine: ml_install_interrupt_handler | |
333 | * Function: Initialize Interrupt Handler | |
334 | */ | |
0a7de745 | 335 | void |
5ba3f43e | 336 | ml_install_interrupt_handler( |
0a7de745 A |
337 | void *nub, |
338 | int source, | |
339 | void *target, | |
340 | IOInterruptHandler handler, | |
341 | void *refCon) | |
5ba3f43e A |
342 | { |
343 | cpu_data_t *cpu_data_ptr; | |
344 | boolean_t current_state; | |
345 | ||
346 | current_state = ml_set_interrupts_enabled(FALSE); | |
347 | cpu_data_ptr = getCpuDatap(); | |
348 | ||
349 | cpu_data_ptr->interrupt_nub = nub; | |
350 | cpu_data_ptr->interrupt_source = source; | |
351 | cpu_data_ptr->interrupt_target = target; | |
352 | cpu_data_ptr->interrupt_handler = handler; | |
353 | cpu_data_ptr->interrupt_refCon = refCon; | |
354 | ||
5ba3f43e | 355 | (void) ml_set_interrupts_enabled(current_state); |
5ba3f43e A |
356 | } |
357 | ||
358 | /* | |
359 | * Routine: ml_init_interrupt | |
360 | * Function: Initialize Interrupts | |
361 | */ | |
0a7de745 | 362 | void |
5ba3f43e A |
363 | ml_init_interrupt(void) |
364 | { | |
365 | } | |
366 | ||
367 | /* | |
368 | * Routine: ml_init_timebase | |
369 | * Function: register and setup Timebase, Decremeter services | |
370 | */ | |
0a7de745 A |
371 | void |
372 | ml_init_timebase( | |
373 | void *args, | |
374 | tbd_ops_t tbd_funcs, | |
375 | vm_offset_t int_address, | |
376 | vm_offset_t int_value) | |
5ba3f43e A |
377 | { |
378 | cpu_data_t *cpu_data_ptr; | |
379 | ||
380 | cpu_data_ptr = (cpu_data_t *)args; | |
381 | ||
382 | if ((cpu_data_ptr == &BootCpuData) | |
383 | && (rtclock_timebase_func.tbd_fiq_handler == (void *)NULL)) { | |
384 | rtclock_timebase_func = *tbd_funcs; | |
385 | rtclock_timebase_addr = int_address; | |
386 | rtclock_timebase_val = int_value; | |
387 | } | |
388 | } | |
389 | ||
390 | void | |
391 | ml_parse_cpu_topology(void) | |
392 | { | |
393 | DTEntry entry, child; | |
394 | OpaqueDTEntryIterator iter; | |
395 | uint32_t cpu_boot_arg; | |
396 | int err; | |
397 | ||
f427ee49 | 398 | err = SecureDTLookupEntry(NULL, "/cpus", &entry); |
5ba3f43e A |
399 | assert(err == kSuccess); |
400 | ||
f427ee49 | 401 | err = SecureDTInitEntryIterator(entry, &iter); |
5ba3f43e A |
402 | assert(err == kSuccess); |
403 | ||
f427ee49 A |
404 | cpu_boot_arg = MAX_CPUS; |
405 | PE_parse_boot_argn("cpus", &cpu_boot_arg, sizeof(cpu_boot_arg)); | |
406 | ||
407 | ml_topology_cluster_t *cluster = &topology_info.clusters[0]; | |
408 | unsigned int cpu_id = 0; | |
409 | while (kSuccess == SecureDTIterateEntries(&iter, &child)) { | |
5ba3f43e A |
410 | #if MACH_ASSERT |
411 | unsigned int propSize; | |
f427ee49 A |
412 | void const *prop = NULL; |
413 | if (cpu_id == 0) { | |
414 | if (kSuccess != SecureDTGetProperty(child, "state", &prop, &propSize)) { | |
415 | panic("unable to retrieve state for cpu %u", cpu_id); | |
0a7de745 | 416 | } |
5ba3f43e | 417 | |
f427ee49 | 418 | if (strncmp((char const *)prop, "running", propSize) != 0) { |
5ba3f43e | 419 | panic("cpu 0 has not been marked as running!"); |
0a7de745 | 420 | } |
5ba3f43e | 421 | } |
f427ee49 A |
422 | assert(kSuccess == SecureDTGetProperty(child, "reg", &prop, &propSize)); |
423 | assert(cpu_id == *((uint32_t const *)prop)); | |
5ba3f43e | 424 | #endif |
f427ee49 A |
425 | if (cpu_id >= cpu_boot_arg) { |
426 | break; | |
427 | } | |
428 | ||
429 | ml_topology_cpu_t *cpu = &topology_info.cpus[cpu_id]; | |
430 | ||
431 | cpu->cpu_id = cpu_id; | |
432 | cpu->phys_id = cpu_id; | |
433 | cpu->cluster_type = cluster->cluster_type; | |
5ba3f43e | 434 | |
f427ee49 A |
435 | cluster->num_cpus++; |
436 | cluster->cpu_mask |= 1ULL << cpu_id; | |
437 | ||
438 | topology_info.num_cpus++; | |
439 | topology_info.max_cpu_id = cpu_id; | |
440 | ||
441 | cpu_id++; | |
0a7de745 | 442 | } |
5ba3f43e | 443 | |
f427ee49 | 444 | if (cpu_id == 0) { |
5ba3f43e | 445 | panic("No cpus found!"); |
0a7de745 | 446 | } |
5ba3f43e A |
447 | } |
448 | ||
f427ee49 A |
449 | const ml_topology_info_t * |
450 | ml_get_topology_info(void) | |
451 | { | |
452 | return &topology_info; | |
453 | } | |
454 | ||
5ba3f43e A |
455 | unsigned int |
456 | ml_get_cpu_count(void) | |
457 | { | |
f427ee49 A |
458 | return topology_info.num_cpus; |
459 | } | |
460 | ||
461 | unsigned int | |
462 | ml_get_cluster_count(void) | |
463 | { | |
464 | return topology_info.num_clusters; | |
5ba3f43e A |
465 | } |
466 | ||
467 | int | |
468 | ml_get_boot_cpu_number(void) | |
469 | { | |
470 | return 0; | |
471 | } | |
472 | ||
473 | cluster_type_t | |
474 | ml_get_boot_cluster(void) | |
475 | { | |
476 | return CLUSTER_TYPE_SMP; | |
477 | } | |
478 | ||
479 | int | |
480 | ml_get_cpu_number(uint32_t phys_id) | |
481 | { | |
f427ee49 A |
482 | if (phys_id > (uint32_t)ml_get_max_cpu_number()) { |
483 | return -1; | |
484 | } | |
485 | ||
5ba3f43e A |
486 | return (int)phys_id; |
487 | } | |
488 | ||
f427ee49 A |
489 | int |
490 | ml_get_cluster_number(__unused uint32_t phys_id) | |
491 | { | |
492 | return 0; | |
493 | } | |
494 | ||
5ba3f43e A |
495 | int |
496 | ml_get_max_cpu_number(void) | |
497 | { | |
f427ee49 A |
498 | return topology_info.num_cpus - 1; |
499 | } | |
500 | ||
501 | int | |
502 | ml_get_max_cluster_number(void) | |
503 | { | |
504 | return topology_info.max_cluster_id; | |
505 | } | |
506 | ||
507 | unsigned int | |
508 | ml_get_first_cpu_id(unsigned int cluster_id) | |
509 | { | |
510 | return topology_info.clusters[cluster_id].first_cpu_id; | |
5ba3f43e A |
511 | } |
512 | ||
513 | kern_return_t | |
0a7de745 A |
514 | ml_processor_register(ml_processor_info_t *in_processor_info, |
515 | processor_t * processor_out, ipi_handler_t *ipi_handler_out, | |
516 | perfmon_interrupt_handler_func *pmi_handler_out) | |
5ba3f43e A |
517 | { |
518 | cpu_data_t *this_cpu_datap; | |
519 | boolean_t is_boot_cpu; | |
520 | ||
f427ee49 A |
521 | const unsigned int max_cpu_id = ml_get_max_cpu_number(); |
522 | if (in_processor_info->phys_id > max_cpu_id) { | |
5ba3f43e A |
523 | /* |
524 | * The physical CPU ID indicates that we have more CPUs than | |
525 | * this xnu build support. This probably means we have an | |
526 | * incorrect board configuration. | |
527 | * | |
528 | * TODO: Should this just return a failure instead? A panic | |
529 | * is simply a convenient way to catch bugs in the pexpert | |
530 | * headers. | |
531 | */ | |
f427ee49 | 532 | panic("phys_id %u is too large for max_cpu_id (%u)", in_processor_info->phys_id, max_cpu_id); |
5ba3f43e A |
533 | } |
534 | ||
535 | /* Fail the registration if the number of CPUs has been limited by boot-arg. */ | |
f427ee49 | 536 | if ((in_processor_info->phys_id >= topology_info.num_cpus) || |
0a7de745 | 537 | (in_processor_info->log_id > (uint32_t)ml_get_max_cpu_number())) { |
5ba3f43e | 538 | return KERN_FAILURE; |
0a7de745 | 539 | } |
5ba3f43e A |
540 | |
541 | if (in_processor_info->log_id != (uint32_t)ml_get_boot_cpu_number()) { | |
542 | is_boot_cpu = FALSE; | |
543 | this_cpu_datap = cpu_data_alloc(FALSE); | |
544 | cpu_data_init(this_cpu_datap); | |
545 | } else { | |
546 | this_cpu_datap = &BootCpuData; | |
547 | is_boot_cpu = TRUE; | |
548 | } | |
549 | ||
550 | this_cpu_datap->cpu_id = in_processor_info->cpu_id; | |
551 | ||
5ba3f43e | 552 | this_cpu_datap->cpu_console_buf = console_cpu_alloc(is_boot_cpu); |
0a7de745 | 553 | if (this_cpu_datap->cpu_console_buf == (void *)(NULL)) { |
5ba3f43e | 554 | goto processor_register_error; |
0a7de745 | 555 | } |
5ba3f43e A |
556 | |
557 | if (!is_boot_cpu) { | |
0a7de745 | 558 | if (cpu_data_register(this_cpu_datap) != KERN_SUCCESS) { |
5ba3f43e | 559 | goto processor_register_error; |
0a7de745 | 560 | } |
5ba3f43e A |
561 | } |
562 | ||
f427ee49 A |
563 | this_cpu_datap->cpu_idle_notify = in_processor_info->processor_idle; |
564 | this_cpu_datap->cpu_cache_dispatch = (cache_dispatch_t) in_processor_info->platform_cache_dispatch; | |
5ba3f43e A |
565 | nanoseconds_to_absolutetime((uint64_t) in_processor_info->powergate_latency, &this_cpu_datap->cpu_idle_latency); |
566 | this_cpu_datap->cpu_reset_assist = kvtophys(in_processor_info->powergate_stub_addr); | |
567 | ||
f427ee49 | 568 | this_cpu_datap->idle_timer_notify = in_processor_info->idle_timer; |
5ba3f43e A |
569 | this_cpu_datap->idle_timer_refcon = in_processor_info->idle_timer_refcon; |
570 | ||
f427ee49 | 571 | this_cpu_datap->platform_error_handler = in_processor_info->platform_error_handler; |
5ba3f43e A |
572 | this_cpu_datap->cpu_regmap_paddr = in_processor_info->regmap_paddr; |
573 | this_cpu_datap->cpu_phys_id = in_processor_info->phys_id; | |
574 | this_cpu_datap->cpu_l2_access_penalty = in_processor_info->l2_access_penalty; | |
575 | ||
f427ee49 | 576 | processor_t processor = PERCPU_GET_RELATIVE(processor, cpu_data, this_cpu_datap); |
5ba3f43e | 577 | if (!is_boot_cpu) { |
f427ee49 A |
578 | processor_init(processor, this_cpu_datap->cpu_number, |
579 | processor_pset(master_processor)); | |
5ba3f43e A |
580 | |
581 | if (this_cpu_datap->cpu_l2_access_penalty) { | |
582 | /* | |
583 | * Cores that have a non-zero L2 access penalty compared | |
584 | * to the boot processor should be de-prioritized by the | |
585 | * scheduler, so that threads use the cores with better L2 | |
586 | * preferentially. | |
587 | */ | |
f427ee49 | 588 | processor_set_primary(processor, master_processor); |
5ba3f43e A |
589 | } |
590 | } | |
591 | ||
f427ee49 | 592 | *processor_out = processor; |
0a7de745 A |
593 | *ipi_handler_out = cpu_signal_handler; |
594 | *pmi_handler_out = NULL; | |
595 | if (in_processor_info->idle_tickle != (idle_tickle_t *) NULL) { | |
5ba3f43e | 596 | *in_processor_info->idle_tickle = (idle_tickle_t) cpu_idle_tickle; |
0a7de745 | 597 | } |
5ba3f43e A |
598 | |
599 | #if KPC | |
0a7de745 | 600 | if (kpc_register_cpu(this_cpu_datap) != TRUE) { |
5ba3f43e | 601 | goto processor_register_error; |
0a7de745 | 602 | } |
5ba3f43e A |
603 | #endif |
604 | ||
0a7de745 | 605 | if (!is_boot_cpu) { |
cb323159 | 606 | random_cpu_init(this_cpu_datap->cpu_number); |
0a7de745 | 607 | } |
5ba3f43e A |
608 | |
609 | return KERN_SUCCESS; | |
610 | ||
611 | processor_register_error: | |
612 | #if KPC | |
613 | kpc_unregister_cpu(this_cpu_datap); | |
614 | #endif | |
0a7de745 | 615 | if (!is_boot_cpu) { |
5ba3f43e | 616 | cpu_data_free(this_cpu_datap); |
0a7de745 | 617 | } |
5ba3f43e A |
618 | return KERN_FAILURE; |
619 | } | |
620 | ||
621 | void | |
622 | ml_init_arm_debug_interface( | |
0a7de745 A |
623 | void * in_cpu_datap, |
624 | vm_offset_t virt_address) | |
5ba3f43e A |
625 | { |
626 | ((cpu_data_t *)in_cpu_datap)->cpu_debug_interface_map = virt_address; | |
627 | do_debugid(); | |
628 | } | |
629 | ||
630 | /* | |
631 | * Routine: init_ast_check | |
632 | * Function: | |
633 | */ | |
634 | void | |
635 | init_ast_check( | |
0a7de745 | 636 | __unused processor_t processor) |
5ba3f43e A |
637 | { |
638 | } | |
639 | ||
640 | /* | |
641 | * Routine: cause_ast_check | |
642 | * Function: | |
643 | */ | |
644 | void | |
645 | cause_ast_check( | |
0a7de745 | 646 | processor_t processor) |
5ba3f43e A |
647 | { |
648 | if (current_processor() != processor) { | |
649 | cpu_signal(processor_to_cpu_datap(processor), SIGPast, (void *)NULL, (void *)NULL); | |
650 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 1 /* ast */, 0, 0, 0); | |
651 | } | |
652 | } | |
653 | ||
5ba3f43e A |
654 | extern uint32_t cpu_idle_count; |
655 | ||
0a7de745 A |
656 | void |
657 | ml_get_power_state(boolean_t *icp, boolean_t *pidlep) | |
658 | { | |
5ba3f43e A |
659 | *icp = ml_at_interrupt_context(); |
660 | *pidlep = (cpu_idle_count == real_ncpus); | |
661 | } | |
662 | ||
663 | /* | |
664 | * Routine: ml_cause_interrupt | |
665 | * Function: Generate a fake interrupt | |
666 | */ | |
0a7de745 | 667 | void |
5ba3f43e A |
668 | ml_cause_interrupt(void) |
669 | { | |
0a7de745 | 670 | return; /* BS_XXX */ |
5ba3f43e A |
671 | } |
672 | ||
673 | /* Map memory map IO space */ | |
674 | vm_offset_t | |
675 | ml_io_map( | |
0a7de745 A |
676 | vm_offset_t phys_addr, |
677 | vm_size_t size) | |
5ba3f43e | 678 | { |
0a7de745 | 679 | return io_map(phys_addr, size, VM_WIMG_IO); |
5ba3f43e A |
680 | } |
681 | ||
cb323159 A |
682 | /* Map memory map IO space (with protections specified) */ |
683 | vm_offset_t | |
684 | ml_io_map_with_prot( | |
685 | vm_offset_t phys_addr, | |
686 | vm_size_t size, | |
687 | vm_prot_t prot) | |
688 | { | |
689 | return io_map_with_prot(phys_addr, size, VM_WIMG_IO, prot); | |
690 | } | |
691 | ||
5ba3f43e A |
692 | vm_offset_t |
693 | ml_io_map_wcomb( | |
0a7de745 A |
694 | vm_offset_t phys_addr, |
695 | vm_size_t size) | |
5ba3f43e | 696 | { |
0a7de745 | 697 | return io_map(phys_addr, size, VM_WIMG_WCOMB); |
5ba3f43e A |
698 | } |
699 | ||
f427ee49 A |
700 | void |
701 | ml_io_unmap(vm_offset_t addr, vm_size_t sz) | |
702 | { | |
703 | pmap_remove(kernel_pmap, addr, addr + sz); | |
704 | kmem_free(kernel_map, addr, sz); | |
705 | } | |
706 | ||
5ba3f43e | 707 | /* boot memory allocation */ |
0a7de745 | 708 | vm_offset_t |
5ba3f43e | 709 | ml_static_malloc( |
0a7de745 | 710 | __unused vm_size_t size) |
5ba3f43e | 711 | { |
0a7de745 | 712 | return (vm_offset_t) NULL; |
5ba3f43e A |
713 | } |
714 | ||
715 | vm_map_address_t | |
716 | ml_map_high_window( | |
0a7de745 A |
717 | vm_offset_t phys_addr, |
718 | vm_size_t len) | |
5ba3f43e A |
719 | { |
720 | return pmap_map_high_window_bd(phys_addr, len, VM_PROT_READ | VM_PROT_WRITE); | |
721 | } | |
722 | ||
723 | vm_offset_t | |
724 | ml_static_ptovirt( | |
0a7de745 | 725 | vm_offset_t paddr) |
5ba3f43e A |
726 | { |
727 | return phystokv(paddr); | |
728 | } | |
729 | ||
730 | vm_offset_t | |
731 | ml_static_vtop( | |
0a7de745 | 732 | vm_offset_t vaddr) |
5ba3f43e | 733 | { |
cb323159 | 734 | assertf(((vm_address_t)(vaddr) - gVirtBase) < gPhysSize, "%s: illegal vaddr: %p", __func__, (void*)vaddr); |
0a7de745 | 735 | return (vm_address_t)(vaddr) - gVirtBase + gPhysBase; |
5ba3f43e A |
736 | } |
737 | ||
cb323159 A |
738 | /* |
739 | * Return the maximum contiguous KVA range that can be accessed from this | |
740 | * physical address. For arm64, we employ a segmented physical aperture | |
741 | * relocation table which can limit the available range for a given PA to | |
742 | * something less than the extent of physical memory. But here, we still | |
743 | * have a flat physical aperture, so no such requirement exists. | |
744 | */ | |
745 | vm_map_address_t | |
746 | phystokv_range(pmap_paddr_t pa, vm_size_t *max_len) | |
747 | { | |
748 | vm_size_t len = gPhysSize - (pa - gPhysBase); | |
749 | if (*max_len > len) { | |
750 | *max_len = len; | |
751 | } | |
752 | assertf((pa - gPhysBase) < gPhysSize, "%s: illegal PA: 0x%lx", __func__, (unsigned long)pa); | |
753 | return pa - gPhysBase + gVirtBase; | |
754 | } | |
755 | ||
d9a64523 A |
756 | vm_offset_t |
757 | ml_static_slide( | |
758 | vm_offset_t vaddr) | |
759 | { | |
760 | return VM_KERNEL_SLIDE(vaddr); | |
761 | } | |
762 | ||
f427ee49 A |
763 | kern_return_t |
764 | ml_static_verify_page_protections( | |
765 | uint64_t base, uint64_t size, vm_prot_t prot) | |
766 | { | |
767 | /* XXX Implement Me */ | |
768 | (void)base; | |
769 | (void)size; | |
770 | (void)prot; | |
771 | return KERN_FAILURE; | |
772 | } | |
773 | ||
774 | ||
d9a64523 A |
775 | vm_offset_t |
776 | ml_static_unslide( | |
777 | vm_offset_t vaddr) | |
778 | { | |
779 | return VM_KERNEL_UNSLIDE(vaddr); | |
780 | } | |
5ba3f43e A |
781 | |
782 | kern_return_t | |
783 | ml_static_protect( | |
784 | vm_offset_t vaddr, /* kernel virtual address */ | |
785 | vm_size_t size, | |
786 | vm_prot_t new_prot) | |
787 | { | |
788 | pt_entry_t arm_prot = 0; | |
789 | pt_entry_t arm_block_prot = 0; | |
790 | vm_offset_t vaddr_cur; | |
791 | ppnum_t ppn; | |
792 | kern_return_t result = KERN_SUCCESS; | |
793 | ||
0a7de745 | 794 | if (vaddr < VM_MIN_KERNEL_ADDRESS) { |
5ba3f43e | 795 | return KERN_FAILURE; |
0a7de745 | 796 | } |
5ba3f43e A |
797 | |
798 | assert((vaddr & (ARM_PGBYTES - 1)) == 0); /* must be page aligned */ | |
799 | ||
800 | if ((new_prot & VM_PROT_WRITE) && (new_prot & VM_PROT_EXECUTE)) { | |
801 | panic("ml_static_protect(): WX request on %p", (void *) vaddr); | |
802 | } | |
f427ee49 A |
803 | if (lockdown_done && (new_prot & VM_PROT_EXECUTE)) { |
804 | panic("ml_static_protect(): attempt to inject executable mapping on %p", (void *) vaddr); | |
805 | } | |
5ba3f43e A |
806 | |
807 | /* Set up the protection bits, and block bits so we can validate block mappings. */ | |
808 | if (new_prot & VM_PROT_WRITE) { | |
809 | arm_prot |= ARM_PTE_AP(AP_RWNA); | |
810 | arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RWNA); | |
811 | } else { | |
812 | arm_prot |= ARM_PTE_AP(AP_RONA); | |
813 | arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RONA); | |
814 | } | |
815 | ||
816 | if (!(new_prot & VM_PROT_EXECUTE)) { | |
817 | arm_prot |= ARM_PTE_NX; | |
818 | arm_block_prot |= ARM_TTE_BLOCK_NX; | |
819 | } | |
820 | ||
821 | for (vaddr_cur = vaddr; | |
0a7de745 A |
822 | vaddr_cur < ((vaddr + size) & ~ARM_PGMASK); |
823 | vaddr_cur += ARM_PGBYTES) { | |
5ba3f43e A |
824 | ppn = pmap_find_phys(kernel_pmap, vaddr_cur); |
825 | if (ppn != (vm_offset_t) NULL) { | |
826 | tt_entry_t *ttp = &kernel_pmap->tte[ttenum(vaddr_cur)]; | |
827 | tt_entry_t tte = *ttp; | |
828 | ||
829 | if ((tte & ARM_TTE_TYPE_MASK) != ARM_TTE_TYPE_TABLE) { | |
830 | if (((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) && | |
831 | ((tte & (ARM_TTE_BLOCK_APMASK | ARM_TTE_BLOCK_NX_MASK)) == arm_block_prot)) { | |
832 | /* | |
833 | * We can support ml_static_protect on a block mapping if the mapping already has | |
834 | * the desired protections. We still want to run checks on a per-page basis. | |
835 | */ | |
836 | continue; | |
837 | } | |
838 | ||
839 | result = KERN_FAILURE; | |
840 | break; | |
841 | } | |
842 | ||
843 | pt_entry_t *pte_p = (pt_entry_t *) ttetokv(tte) + ptenum(vaddr_cur); | |
844 | pt_entry_t ptmp = *pte_p; | |
845 | ||
846 | ptmp = (ptmp & ~(ARM_PTE_APMASK | ARM_PTE_NX_MASK)) | arm_prot; | |
847 | *pte_p = ptmp; | |
5ba3f43e A |
848 | } |
849 | } | |
850 | ||
0a7de745 | 851 | if (vaddr_cur > vaddr) { |
5ba3f43e | 852 | flush_mmu_tlb_region(vaddr, (vm_size_t)(vaddr_cur - vaddr)); |
0a7de745 | 853 | } |
5ba3f43e A |
854 | |
855 | return result; | |
856 | } | |
857 | ||
858 | /* | |
859 | * Routine: ml_static_mfree | |
860 | * Function: | |
861 | */ | |
862 | void | |
863 | ml_static_mfree( | |
0a7de745 A |
864 | vm_offset_t vaddr, |
865 | vm_size_t size) | |
5ba3f43e A |
866 | { |
867 | vm_offset_t vaddr_cur; | |
868 | ppnum_t ppn; | |
869 | uint32_t freed_pages = 0; | |
f427ee49 | 870 | uint32_t freed_kernelcache_pages = 0; |
5ba3f43e A |
871 | |
872 | /* It is acceptable (if bad) to fail to free. */ | |
0a7de745 | 873 | if (vaddr < VM_MIN_KERNEL_ADDRESS) { |
5ba3f43e | 874 | return; |
0a7de745 | 875 | } |
5ba3f43e | 876 | |
0a7de745 | 877 | assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */ |
5ba3f43e A |
878 | |
879 | for (vaddr_cur = vaddr; | |
0a7de745 A |
880 | vaddr_cur < trunc_page_32(vaddr + size); |
881 | vaddr_cur += PAGE_SIZE) { | |
5ba3f43e A |
882 | ppn = pmap_find_phys(kernel_pmap, vaddr_cur); |
883 | if (ppn != (vm_offset_t) NULL) { | |
884 | /* | |
885 | * It is not acceptable to fail to update the protections on a page | |
886 | * we will release to the VM. We need to either panic or continue. | |
887 | * For now, we'll panic (to help flag if there is memory we can | |
888 | * reclaim). | |
889 | */ | |
890 | if (ml_static_protect(vaddr_cur, PAGE_SIZE, VM_PROT_WRITE | VM_PROT_READ) != KERN_SUCCESS) { | |
891 | panic("Failed ml_static_mfree on %p", (void *) vaddr_cur); | |
892 | } | |
5ba3f43e A |
893 | vm_page_create(ppn, (ppn + 1)); |
894 | freed_pages++; | |
f427ee49 A |
895 | if (vaddr_cur >= segLOWEST && vaddr_cur < end_kern) { |
896 | freed_kernelcache_pages++; | |
897 | } | |
5ba3f43e A |
898 | } |
899 | } | |
900 | vm_page_lockspin_queues(); | |
901 | vm_page_wire_count -= freed_pages; | |
902 | vm_page_wire_count_initial -= freed_pages; | |
f427ee49 | 903 | vm_page_kernelcache_count -= freed_kernelcache_pages; |
5ba3f43e | 904 | vm_page_unlock_queues(); |
0a7de745 | 905 | #if DEBUG |
5ba3f43e A |
906 | kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn); |
907 | #endif | |
908 | } | |
909 | ||
910 | ||
911 | /* virtual to physical on wired pages */ | |
912 | vm_offset_t | |
913 | ml_vtophys(vm_offset_t vaddr) | |
914 | { | |
915 | return kvtophys(vaddr); | |
916 | } | |
917 | ||
918 | /* | |
919 | * Routine: ml_nofault_copy | |
920 | * Function: Perform a physical mode copy if the source and destination have | |
921 | * valid translations in the kernel pmap. If translations are present, they are | |
922 | * assumed to be wired; e.g., no attempt is made to guarantee that the | |
923 | * translations obtained remain valid for the duration of the copy process. | |
924 | */ | |
0a7de745 | 925 | vm_size_t |
5ba3f43e A |
926 | ml_nofault_copy(vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size) |
927 | { | |
928 | addr64_t cur_phys_dst, cur_phys_src; | |
929 | uint32_t count, nbytes = 0; | |
930 | ||
931 | while (size > 0) { | |
0a7de745 | 932 | if (!(cur_phys_src = kvtophys(virtsrc))) { |
5ba3f43e | 933 | break; |
0a7de745 A |
934 | } |
935 | if (!(cur_phys_dst = kvtophys(virtdst))) { | |
5ba3f43e | 936 | break; |
0a7de745 | 937 | } |
5ba3f43e | 938 | if (!pmap_valid_address(trunc_page_64(cur_phys_dst)) || |
0a7de745 | 939 | !pmap_valid_address(trunc_page_64(cur_phys_src))) { |
5ba3f43e | 940 | break; |
0a7de745 | 941 | } |
5ba3f43e | 942 | count = PAGE_SIZE - (cur_phys_src & PAGE_MASK); |
0a7de745 | 943 | if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) { |
5ba3f43e | 944 | count = PAGE_SIZE - (cur_phys_dst & PAGE_MASK); |
0a7de745 A |
945 | } |
946 | if (count > size) { | |
5ba3f43e | 947 | count = size; |
0a7de745 | 948 | } |
5ba3f43e A |
949 | |
950 | bcopy_phys(cur_phys_src, cur_phys_dst, count); | |
951 | ||
952 | nbytes += count; | |
953 | virtsrc += count; | |
954 | virtdst += count; | |
955 | size -= count; | |
956 | } | |
957 | ||
958 | return nbytes; | |
959 | } | |
960 | ||
961 | /* | |
962 | * Routine: ml_validate_nofault | |
963 | * Function: Validate that ths address range has a valid translations | |
964 | * in the kernel pmap. If translations are present, they are | |
965 | * assumed to be wired; i.e. no attempt is made to guarantee | |
966 | * that the translation persist after the check. | |
967 | * Returns: TRUE if the range is mapped and will not cause a fault, | |
968 | * FALSE otherwise. | |
969 | */ | |
970 | ||
0a7de745 A |
971 | boolean_t |
972 | ml_validate_nofault( | |
5ba3f43e A |
973 | vm_offset_t virtsrc, vm_size_t size) |
974 | { | |
975 | addr64_t cur_phys_src; | |
976 | uint32_t count; | |
977 | ||
978 | while (size > 0) { | |
0a7de745 | 979 | if (!(cur_phys_src = kvtophys(virtsrc))) { |
5ba3f43e | 980 | return FALSE; |
0a7de745 A |
981 | } |
982 | if (!pmap_valid_address(trunc_page_64(cur_phys_src))) { | |
5ba3f43e | 983 | return FALSE; |
0a7de745 | 984 | } |
5ba3f43e | 985 | count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK)); |
0a7de745 | 986 | if (count > size) { |
5ba3f43e | 987 | count = (uint32_t)size; |
0a7de745 | 988 | } |
5ba3f43e A |
989 | |
990 | virtsrc += count; | |
991 | size -= count; | |
992 | } | |
993 | ||
994 | return TRUE; | |
995 | } | |
996 | ||
997 | void | |
998 | ml_get_bouncepool_info(vm_offset_t * phys_addr, vm_size_t * size) | |
999 | { | |
1000 | *phys_addr = 0; | |
1001 | *size = 0; | |
1002 | } | |
1003 | ||
1004 | /* | |
1005 | * Stubs for CPU Stepper | |
1006 | */ | |
1007 | void | |
1008 | active_rt_threads(__unused boolean_t active) | |
1009 | { | |
1010 | } | |
1011 | ||
1012 | void | |
0a7de745 A |
1013 | thread_tell_urgency(__unused thread_urgency_t urgency, |
1014 | __unused uint64_t rt_period, | |
1015 | __unused uint64_t rt_deadline, | |
1016 | __unused uint64_t sched_latency, | |
1017 | __unused thread_t nthread) | |
5ba3f43e A |
1018 | { |
1019 | } | |
1020 | ||
1021 | void | |
1022 | machine_run_count(__unused uint32_t count) | |
1023 | { | |
1024 | } | |
1025 | ||
1026 | processor_t | |
1027 | machine_choose_processor(__unused processor_set_t pset, processor_t processor) | |
1028 | { | |
0a7de745 | 1029 | return processor; |
5ba3f43e A |
1030 | } |
1031 | ||
0a7de745 A |
1032 | boolean_t |
1033 | machine_timeout_suspended(void) | |
1034 | { | |
5ba3f43e A |
1035 | return FALSE; |
1036 | } | |
1037 | ||
0a7de745 A |
1038 | kern_return_t |
1039 | ml_interrupt_prewarm(__unused uint64_t deadline) | |
5ba3f43e A |
1040 | { |
1041 | return KERN_FAILURE; | |
1042 | } | |
1043 | ||
1044 | uint64_t | |
1045 | ml_get_hwclock(void) | |
1046 | { | |
1047 | uint64_t high_first = 0; | |
1048 | uint64_t high_second = 0; | |
1049 | uint64_t low = 0; | |
1050 | ||
1051 | __builtin_arm_isb(ISB_SY); | |
1052 | ||
1053 | do { | |
1054 | high_first = __builtin_arm_mrrc(15, 0, 14) >> 32; | |
1055 | low = __builtin_arm_mrrc(15, 0, 14) & 0xFFFFFFFFULL; | |
1056 | high_second = __builtin_arm_mrrc(15, 0, 14) >> 32; | |
1057 | } while (high_first != high_second); | |
1058 | ||
1059 | return (high_first << 32) | (low); | |
1060 | } | |
1061 | ||
1062 | boolean_t | |
1063 | ml_delay_should_spin(uint64_t interval) | |
1064 | { | |
1065 | cpu_data_t *cdp = getCpuDatap(); | |
1066 | ||
1067 | if (cdp->cpu_idle_latency) { | |
1068 | return (interval < cdp->cpu_idle_latency) ? TRUE : FALSE; | |
1069 | } else { | |
1070 | /* | |
1071 | * Early boot, latency is unknown. Err on the side of blocking, | |
1072 | * which should always be safe, even if slow | |
1073 | */ | |
1074 | return FALSE; | |
1075 | } | |
1076 | } | |
1077 | ||
0a7de745 A |
1078 | void |
1079 | ml_delay_on_yield(void) | |
1080 | { | |
1081 | } | |
e8c3f781 | 1082 | |
0a7de745 A |
1083 | boolean_t |
1084 | ml_thread_is64bit(thread_t thread) | |
5ba3f43e | 1085 | { |
0a7de745 | 1086 | return thread_is_64bit_addr(thread); |
5ba3f43e A |
1087 | } |
1088 | ||
0a7de745 A |
1089 | void |
1090 | ml_timer_evaluate(void) | |
1091 | { | |
5ba3f43e A |
1092 | } |
1093 | ||
1094 | boolean_t | |
0a7de745 A |
1095 | ml_timer_forced_evaluation(void) |
1096 | { | |
5ba3f43e A |
1097 | return FALSE; |
1098 | } | |
1099 | ||
1100 | uint64_t | |
0a7de745 A |
1101 | ml_energy_stat(__unused thread_t t) |
1102 | { | |
5ba3f43e A |
1103 | return 0; |
1104 | } | |
1105 | ||
1106 | ||
1107 | void | |
0a7de745 A |
1108 | ml_gpu_stat_update(__unused uint64_t gpu_ns_delta) |
1109 | { | |
5ba3f43e A |
1110 | /* |
1111 | * For now: update the resource coalition stats of the | |
1112 | * current thread's coalition | |
1113 | */ | |
1114 | task_coalition_update_gpu_stats(current_task(), gpu_ns_delta); | |
5ba3f43e A |
1115 | } |
1116 | ||
1117 | uint64_t | |
0a7de745 A |
1118 | ml_gpu_stat(__unused thread_t t) |
1119 | { | |
5ba3f43e A |
1120 | return 0; |
1121 | } | |
1122 | ||
1123 | #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME | |
1124 | static void | |
1125 | timer_state_event(boolean_t switch_to_kernel) | |
1126 | { | |
1127 | thread_t thread = current_thread(); | |
0a7de745 A |
1128 | if (!thread->precise_user_kernel_time) { |
1129 | return; | |
1130 | } | |
5ba3f43e | 1131 | |
f427ee49 | 1132 | processor_t pd = current_processor(); |
5ba3f43e A |
1133 | uint64_t now = ml_get_timebase(); |
1134 | ||
1135 | timer_stop(pd->current_state, now); | |
1136 | pd->current_state = (switch_to_kernel) ? &pd->system_state : &pd->user_state; | |
1137 | timer_start(pd->current_state, now); | |
1138 | ||
1139 | timer_stop(pd->thread_timer, now); | |
1140 | pd->thread_timer = (switch_to_kernel) ? &thread->system_timer : &thread->user_timer; | |
1141 | timer_start(pd->thread_timer, now); | |
1142 | } | |
1143 | ||
1144 | void | |
1145 | timer_state_event_user_to_kernel(void) | |
1146 | { | |
1147 | timer_state_event(TRUE); | |
1148 | } | |
1149 | ||
1150 | void | |
1151 | timer_state_event_kernel_to_user(void) | |
1152 | { | |
1153 | timer_state_event(FALSE); | |
1154 | } | |
1155 | #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */ | |
1156 | ||
0a7de745 A |
1157 | uint32_t |
1158 | get_arm_cpu_version(void) | |
1159 | { | |
1160 | uint32_t value = machine_read_midr(); | |
1161 | ||
1162 | /* Compose the register values into 8 bits; variant[7:4], revision[3:0]. */ | |
1163 | return ((value & MIDR_REV_MASK) >> MIDR_REV_SHIFT) | ((value & MIDR_VAR_MASK) >> (MIDR_VAR_SHIFT - 4)); | |
1164 | } | |
1165 | ||
5ba3f43e A |
1166 | boolean_t |
1167 | user_cont_hwclock_allowed(void) | |
1168 | { | |
1169 | return FALSE; | |
1170 | } | |
1171 | ||
cb323159 A |
1172 | uint8_t |
1173 | user_timebase_type(void) | |
5ba3f43e A |
1174 | { |
1175 | #if __ARM_TIME__ | |
cb323159 | 1176 | return USER_TIMEBASE_SPEC; |
5ba3f43e | 1177 | #else |
cb323159 | 1178 | return USER_TIMEBASE_NONE; |
5ba3f43e A |
1179 | #endif |
1180 | } | |
1181 | ||
1182 | /* | |
1183 | * The following are required for parts of the kernel | |
1184 | * that cannot resolve these functions as inlines: | |
1185 | */ | |
cb323159 | 1186 | extern thread_t current_act(void) __attribute__((const)); |
5ba3f43e A |
1187 | thread_t |
1188 | current_act(void) | |
1189 | { | |
1190 | return current_thread_fast(); | |
1191 | } | |
1192 | ||
1193 | #undef current_thread | |
cb323159 | 1194 | extern thread_t current_thread(void) __attribute__((const)); |
5ba3f43e A |
1195 | thread_t |
1196 | current_thread(void) | |
1197 | { | |
1198 | return current_thread_fast(); | |
1199 | } | |
1200 | ||
1201 | #if __ARM_USER_PROTECT__ | |
1202 | uintptr_t | |
1203 | arm_user_protect_begin(thread_t thread) | |
1204 | { | |
0a7de745 | 1205 | uintptr_t ttbr0, asid = 0; // kernel asid |
5ba3f43e | 1206 | |
0a7de745 A |
1207 | ttbr0 = __builtin_arm_mrc(15, 0, 2, 0, 0); // Get TTBR0 |
1208 | if (ttbr0 != thread->machine.kptw_ttb) { | |
1209 | __builtin_arm_mcr(15, 0, thread->machine.kptw_ttb, 2, 0, 0); // Set TTBR0 | |
1210 | __builtin_arm_mcr(15, 0, asid, 13, 0, 1); // Set CONTEXTIDR | |
1211 | __builtin_arm_isb(ISB_SY); | |
1212 | } | |
1213 | return ttbr0; | |
5ba3f43e A |
1214 | } |
1215 | ||
1216 | void | |
1217 | arm_user_protect_end(thread_t thread, uintptr_t ttbr0, boolean_t disable_interrupts) | |
1218 | { | |
0a7de745 A |
1219 | if ((ttbr0 != thread->machine.kptw_ttb) && (thread->machine.uptw_ttb != thread->machine.kptw_ttb)) { |
1220 | if (disable_interrupts) { | |
1221 | __asm__ volatile ("cpsid if" ::: "memory"); // Disable FIQ/IRQ | |
1222 | } | |
1223 | __builtin_arm_mcr(15, 0, thread->machine.uptw_ttb, 2, 0, 0); // Set TTBR0 | |
1224 | __builtin_arm_mcr(15, 0, thread->machine.asid, 13, 0, 1); // Set CONTEXTIDR with thread asid | |
1225 | __builtin_arm_dsb(DSB_ISH); | |
1226 | __builtin_arm_isb(ISB_SY); | |
1227 | } | |
5ba3f43e A |
1228 | } |
1229 | #endif // __ARM_USER_PROTECT__ | |
f427ee49 A |
1230 | |
1231 | void | |
1232 | machine_lockdown(void) | |
1233 | { | |
1234 | arm_vm_prot_finalize(PE_state.bootArgs); | |
1235 | lockdown_done = 1; | |
1236 | } | |
1237 | ||
1238 | void | |
1239 | ml_lockdown_init(void) | |
1240 | { | |
1241 | } | |
1242 | ||
1243 | void | |
1244 | ml_hibernate_active_pre(void) | |
1245 | { | |
1246 | } | |
1247 | ||
1248 | void | |
1249 | ml_hibernate_active_post(void) | |
1250 | { | |
1251 | } | |
1252 | ||
1253 | size_t | |
1254 | ml_get_vm_reserved_regions(bool vm_is64bit, struct vm_reserved_region **regions) | |
1255 | { | |
1256 | #pragma unused(vm_is64bit) | |
1257 | assert(regions != NULL); | |
1258 | ||
1259 | *regions = NULL; | |
1260 | return 0; | |
1261 | } |