]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2007-2016 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <arm/proc_reg.h> | |
30 | #include <arm/machine_cpu.h> | |
31 | #include <arm/cpu_internal.h> | |
32 | #include <arm/cpuid.h> | |
33 | #include <arm/io_map_entries.h> | |
34 | #include <arm/cpu_data.h> | |
35 | #include <arm/cpu_data_internal.h> | |
36 | #include <arm/misc_protos.h> | |
37 | #include <arm/rtclock.h> | |
38 | #include <arm/caches_internal.h> | |
39 | #include <console/serial_protos.h> | |
40 | #include <kern/machine.h> | |
41 | #include <prng/random.h> | |
42 | #include <kern/startup.h> | |
43 | #include <kern/sched.h> | |
44 | #include <kern/thread.h> | |
45 | #include <mach/machine.h> | |
46 | #include <machine/atomic.h> | |
47 | #include <vm/pmap.h> | |
48 | #include <vm/vm_page.h> | |
49 | #include <sys/kdebug.h> | |
50 | #include <kern/coalition.h> | |
51 | #include <pexpert/device_tree.h> | |
52 | ||
53 | #include <IOKit/IOPlatformExpert.h> | |
54 | #include <libkern/section_keywords.h> | |
55 | ||
56 | #if KPC | |
57 | #include <kern/kpc.h> | |
58 | #endif | |
59 | ||
60 | static int max_cpus_initialized = 0; | |
61 | #define MAX_CPUS_SET 0x1 | |
62 | #define MAX_CPUS_WAIT 0x2 | |
63 | ||
64 | static unsigned int avail_cpus = 0; | |
65 | ||
66 | uint32_t LockTimeOut; | |
67 | uint32_t LockTimeOutUsec; | |
68 | uint64_t MutexSpin; | |
69 | boolean_t is_clock_configured = FALSE; | |
70 | ||
71 | extern int mach_assert; | |
72 | extern volatile uint32_t debug_enabled; | |
73 | SECURITY_READ_ONLY_LATE(unsigned int) debug_boot_arg; | |
74 | ||
75 | void machine_conf(void); | |
76 | ||
77 | void | |
78 | machine_startup(__unused boot_args * args) | |
79 | { | |
80 | int boot_arg; | |
81 | ||
82 | #if MACH_KDP | |
83 | if (PE_parse_boot_argn("debug", &debug_boot_arg, sizeof (debug_boot_arg)) && | |
84 | debug_enabled) { | |
85 | #if DEVELOPMENT || DEBUG | |
86 | if (debug_boot_arg & DB_HALT) | |
87 | halt_in_debugger = 1; | |
88 | #endif | |
89 | if (debug_boot_arg & DB_NMI) | |
90 | panicDebugging = TRUE; | |
91 | } else { | |
92 | debug_boot_arg = 0; | |
93 | } | |
94 | #endif | |
95 | ||
96 | PE_parse_boot_argn("assert", &mach_assert, sizeof (mach_assert)); | |
97 | ||
98 | if (PE_parse_boot_argn("preempt", &boot_arg, sizeof (boot_arg))) { | |
99 | default_preemption_rate = boot_arg; | |
100 | } | |
101 | if (PE_parse_boot_argn("bg_preempt", &boot_arg, sizeof (boot_arg))) { | |
102 | default_bg_preemption_rate = boot_arg; | |
103 | } | |
104 | ||
105 | machine_conf(); | |
106 | ||
107 | /* | |
108 | * Kick off the kernel bootstrap. | |
109 | */ | |
110 | kernel_bootstrap(); | |
111 | /* NOTREACHED */ | |
112 | } | |
113 | ||
114 | char * | |
115 | machine_boot_info( | |
116 | __unused char *buf, | |
117 | __unused vm_size_t size) | |
118 | { | |
119 | return (PE_boot_args()); | |
120 | } | |
121 | ||
122 | void | |
123 | machine_conf(void) | |
124 | { | |
125 | machine_info.memory_size = mem_size; | |
126 | } | |
127 | ||
128 | void | |
129 | machine_init(void) | |
130 | { | |
131 | debug_log_init(); | |
132 | clock_config(); | |
133 | is_clock_configured = TRUE; | |
134 | if (debug_enabled) | |
135 | pmap_map_globals(); | |
136 | } | |
137 | ||
138 | void | |
139 | slave_machine_init(__unused void *param) | |
140 | { | |
141 | cpu_machine_init(); /* Initialize the processor */ | |
142 | clock_init(); /* Init the clock */ | |
143 | } | |
144 | ||
145 | /* | |
146 | * Routine: machine_processor_shutdown | |
147 | * Function: | |
148 | */ | |
149 | thread_t | |
150 | machine_processor_shutdown( | |
151 | __unused thread_t thread, | |
152 | void (*doshutdown) (processor_t), | |
153 | processor_t processor) | |
154 | { | |
155 | return (Shutdown_context(doshutdown, processor)); | |
156 | } | |
157 | ||
158 | /* | |
159 | * Routine: ml_init_max_cpus | |
160 | * Function: | |
161 | */ | |
162 | void | |
163 | ml_init_max_cpus(unsigned int max_cpus) | |
164 | { | |
165 | boolean_t current_state; | |
166 | ||
167 | current_state = ml_set_interrupts_enabled(FALSE); | |
168 | if (max_cpus_initialized != MAX_CPUS_SET) { | |
169 | machine_info.max_cpus = max_cpus; | |
170 | machine_info.physical_cpu_max = max_cpus; | |
171 | machine_info.logical_cpu_max = max_cpus; | |
172 | if (max_cpus_initialized == MAX_CPUS_WAIT) | |
173 | thread_wakeup((event_t) & max_cpus_initialized); | |
174 | max_cpus_initialized = MAX_CPUS_SET; | |
175 | } | |
176 | (void) ml_set_interrupts_enabled(current_state); | |
177 | } | |
178 | ||
179 | /* | |
180 | * Routine: ml_get_max_cpus | |
181 | * Function: | |
182 | */ | |
183 | unsigned int | |
184 | ml_get_max_cpus(void) | |
185 | { | |
186 | boolean_t current_state; | |
187 | ||
188 | current_state = ml_set_interrupts_enabled(FALSE); | |
189 | if (max_cpus_initialized != MAX_CPUS_SET) { | |
190 | max_cpus_initialized = MAX_CPUS_WAIT; | |
191 | assert_wait((event_t) & max_cpus_initialized, THREAD_UNINT); | |
192 | (void) thread_block(THREAD_CONTINUE_NULL); | |
193 | } | |
194 | (void) ml_set_interrupts_enabled(current_state); | |
195 | return (machine_info.max_cpus); | |
196 | } | |
197 | ||
198 | /* | |
199 | * Routine: ml_init_lock_timeout | |
200 | * Function: | |
201 | */ | |
202 | void | |
203 | ml_init_lock_timeout(void) | |
204 | { | |
205 | uint64_t abstime; | |
206 | uint64_t mtxspin; | |
207 | uint64_t default_timeout_ns = NSEC_PER_SEC>>2; | |
208 | uint32_t slto; | |
209 | ||
210 | if (PE_parse_boot_argn("slto_us", &slto, sizeof (slto))) | |
211 | default_timeout_ns = slto * NSEC_PER_USEC; | |
212 | ||
213 | nanoseconds_to_absolutetime(default_timeout_ns, &abstime); | |
214 | LockTimeOutUsec = (uint32_t)(abstime / NSEC_PER_USEC); | |
215 | LockTimeOut = (uint32_t)abstime; | |
216 | ||
217 | if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof (mtxspin))) { | |
218 | if (mtxspin > USEC_PER_SEC>>4) | |
219 | mtxspin = USEC_PER_SEC>>4; | |
220 | nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime); | |
221 | } else { | |
222 | nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime); | |
223 | } | |
224 | MutexSpin = abstime; | |
225 | } | |
226 | ||
227 | /* | |
228 | * This is called from the machine-independent routine cpu_up() | |
229 | * to perform machine-dependent info updates. | |
230 | */ | |
231 | void | |
232 | ml_cpu_up(void) | |
233 | { | |
234 | hw_atomic_add(&machine_info.physical_cpu, 1); | |
235 | hw_atomic_add(&machine_info.logical_cpu, 1); | |
236 | } | |
237 | ||
238 | /* | |
239 | * This is called from the machine-independent routine cpu_down() | |
240 | * to perform machine-dependent info updates. | |
241 | */ | |
242 | void | |
243 | ml_cpu_down(void) | |
244 | { | |
245 | cpu_data_t *cpu_data_ptr; | |
246 | ||
247 | hw_atomic_sub(&machine_info.physical_cpu, 1); | |
248 | hw_atomic_sub(&machine_info.logical_cpu, 1); | |
249 | ||
250 | /* | |
251 | * If we want to deal with outstanding IPIs, we need to | |
252 | * do relatively early in the processor_doshutdown path, | |
253 | * as we pend decrementer interrupts using the IPI | |
254 | * mechanism if we cannot immediately service them (if | |
255 | * IRQ is masked). Do so now. | |
256 | * | |
257 | * We aren't on the interrupt stack here; would it make | |
258 | * more sense to disable signaling and then enable | |
259 | * interrupts? It might be a bit cleaner. | |
260 | */ | |
261 | cpu_data_ptr = getCpuDatap(); | |
262 | cpu_data_ptr->cpu_running = FALSE; | |
263 | ||
264 | cpu_signal_handler_internal(TRUE); | |
265 | } | |
266 | ||
267 | /* | |
268 | * Routine: ml_cpu_get_info | |
269 | * Function: | |
270 | */ | |
271 | void | |
272 | ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info) | |
273 | { | |
274 | cache_info_t *cpuid_cache_info; | |
275 | ||
276 | cpuid_cache_info = cache_info(); | |
277 | ml_cpu_info->vector_unit = 0; | |
278 | ml_cpu_info->cache_line_size = cpuid_cache_info->c_linesz; | |
279 | ml_cpu_info->l1_icache_size = cpuid_cache_info->c_isize; | |
280 | ml_cpu_info->l1_dcache_size = cpuid_cache_info->c_dsize; | |
281 | ||
282 | #if (__ARM_ARCH__ >= 7) | |
283 | ml_cpu_info->l2_settings = 1; | |
284 | ml_cpu_info->l2_cache_size = cpuid_cache_info->c_l2size; | |
285 | #else | |
286 | ml_cpu_info->l2_settings = 0; | |
287 | ml_cpu_info->l2_cache_size = 0xFFFFFFFF; | |
288 | #endif | |
289 | ml_cpu_info->l3_settings = 0; | |
290 | ml_cpu_info->l3_cache_size = 0xFFFFFFFF; | |
291 | } | |
292 | ||
293 | unsigned int | |
294 | ml_get_machine_mem(void) | |
295 | { | |
296 | return (machine_info.memory_size); | |
297 | } | |
298 | ||
299 | /* Return max offset */ | |
300 | vm_map_offset_t | |
301 | ml_get_max_offset( | |
302 | boolean_t is64, | |
303 | unsigned int option) | |
304 | { | |
305 | unsigned int pmap_max_offset_option = 0; | |
306 | ||
307 | switch (option) { | |
308 | case MACHINE_MAX_OFFSET_DEFAULT: | |
309 | pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEFAULT; | |
310 | break; | |
311 | case MACHINE_MAX_OFFSET_MIN: | |
312 | pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_MIN; | |
313 | break; | |
314 | case MACHINE_MAX_OFFSET_MAX: | |
315 | pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_MAX; | |
316 | break; | |
317 | case MACHINE_MAX_OFFSET_DEVICE: | |
318 | pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEVICE; | |
319 | break; | |
320 | default: | |
321 | panic("ml_get_max_offset(): Illegal option 0x%x\n", option); | |
322 | break; | |
323 | } | |
324 | return pmap_max_offset(is64, pmap_max_offset_option); | |
325 | } | |
326 | ||
327 | boolean_t | |
328 | ml_wants_panic_trap_to_debugger(void) | |
329 | { | |
330 | return FALSE; | |
331 | } | |
332 | ||
333 | void | |
334 | ml_panic_trap_to_debugger(__unused const char *panic_format_str, | |
335 | __unused va_list *panic_args, | |
336 | __unused unsigned int reason, | |
337 | __unused void *ctx, | |
338 | __unused uint64_t panic_options_mask, | |
339 | __unused unsigned long panic_caller) | |
340 | { | |
341 | return; | |
342 | } | |
343 | ||
344 | __attribute__((noreturn)) | |
345 | void | |
346 | halt_all_cpus(boolean_t reboot) | |
347 | { | |
348 | if (reboot) { | |
349 | printf("MACH Reboot\n"); | |
350 | PEHaltRestart(kPERestartCPU); | |
351 | } else { | |
352 | printf("CPU halted\n"); | |
353 | PEHaltRestart(kPEHaltCPU); | |
354 | } | |
355 | while (1); | |
356 | } | |
357 | ||
358 | __attribute__((noreturn)) | |
359 | void | |
360 | halt_cpu(void) | |
361 | { | |
362 | halt_all_cpus(FALSE); | |
363 | } | |
364 | ||
365 | /* | |
366 | * Routine: machine_signal_idle | |
367 | * Function: | |
368 | */ | |
369 | void | |
370 | machine_signal_idle( | |
371 | processor_t processor) | |
372 | { | |
373 | cpu_signal(processor_to_cpu_datap(processor), SIGPnop, (void *)NULL, (void *)NULL); | |
374 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); | |
375 | } | |
376 | ||
377 | void | |
378 | machine_signal_idle_deferred( | |
379 | processor_t processor) | |
380 | { | |
381 | cpu_signal_deferred(processor_to_cpu_datap(processor)); | |
382 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_DEFERRED_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); | |
383 | } | |
384 | ||
385 | void | |
386 | machine_signal_idle_cancel( | |
387 | processor_t processor) | |
388 | { | |
389 | cpu_signal_cancel(processor_to_cpu_datap(processor)); | |
390 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_CANCEL_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); | |
391 | } | |
392 | ||
393 | /* | |
394 | * Routine: ml_install_interrupt_handler | |
395 | * Function: Initialize Interrupt Handler | |
396 | */ | |
397 | void | |
398 | ml_install_interrupt_handler( | |
399 | void *nub, | |
400 | int source, | |
401 | void *target, | |
402 | IOInterruptHandler handler, | |
403 | void *refCon) | |
404 | { | |
405 | cpu_data_t *cpu_data_ptr; | |
406 | boolean_t current_state; | |
407 | ||
408 | current_state = ml_set_interrupts_enabled(FALSE); | |
409 | cpu_data_ptr = getCpuDatap(); | |
410 | ||
411 | cpu_data_ptr->interrupt_nub = nub; | |
412 | cpu_data_ptr->interrupt_source = source; | |
413 | cpu_data_ptr->interrupt_target = target; | |
414 | cpu_data_ptr->interrupt_handler = handler; | |
415 | cpu_data_ptr->interrupt_refCon = refCon; | |
416 | ||
417 | cpu_data_ptr->interrupts_enabled = TRUE; | |
418 | (void) ml_set_interrupts_enabled(current_state); | |
419 | ||
420 | initialize_screen(NULL, kPEAcquireScreen); | |
421 | } | |
422 | ||
423 | /* | |
424 | * Routine: ml_init_interrupt | |
425 | * Function: Initialize Interrupts | |
426 | */ | |
427 | void | |
428 | ml_init_interrupt(void) | |
429 | { | |
430 | } | |
431 | ||
432 | /* | |
433 | * Routine: ml_init_timebase | |
434 | * Function: register and setup Timebase, Decremeter services | |
435 | */ | |
436 | void ml_init_timebase( | |
437 | void *args, | |
438 | tbd_ops_t tbd_funcs, | |
439 | vm_offset_t int_address, | |
440 | vm_offset_t int_value) | |
441 | { | |
442 | cpu_data_t *cpu_data_ptr; | |
443 | ||
444 | cpu_data_ptr = (cpu_data_t *)args; | |
445 | ||
446 | if ((cpu_data_ptr == &BootCpuData) | |
447 | && (rtclock_timebase_func.tbd_fiq_handler == (void *)NULL)) { | |
448 | rtclock_timebase_func = *tbd_funcs; | |
449 | rtclock_timebase_addr = int_address; | |
450 | rtclock_timebase_val = int_value; | |
451 | } | |
452 | } | |
453 | ||
454 | void | |
455 | ml_parse_cpu_topology(void) | |
456 | { | |
457 | DTEntry entry, child; | |
458 | OpaqueDTEntryIterator iter; | |
459 | uint32_t cpu_boot_arg; | |
460 | int err; | |
461 | ||
462 | err = DTLookupEntry(NULL, "/cpus", &entry); | |
463 | assert(err == kSuccess); | |
464 | ||
465 | err = DTInitEntryIterator(entry, &iter); | |
466 | assert(err == kSuccess); | |
467 | ||
468 | while (kSuccess == DTIterateEntries(&iter, &child)) { | |
469 | ||
470 | #if MACH_ASSERT | |
471 | unsigned int propSize; | |
472 | void *prop = NULL; | |
473 | if (avail_cpus == 0) { | |
474 | if (kSuccess != DTGetProperty(child, "state", &prop, &propSize)) | |
475 | panic("unable to retrieve state for cpu %u", avail_cpus); | |
476 | ||
477 | if (strncmp((char*)prop, "running", propSize) != 0) | |
478 | panic("cpu 0 has not been marked as running!"); | |
479 | } | |
480 | assert(kSuccess == DTGetProperty(child, "reg", &prop, &propSize)); | |
481 | assert(avail_cpus == *((uint32_t*)prop)); | |
482 | #endif | |
483 | ++avail_cpus; | |
484 | } | |
485 | ||
486 | cpu_boot_arg = avail_cpus; | |
487 | if (PE_parse_boot_argn("cpus", &cpu_boot_arg, sizeof(cpu_boot_arg)) && | |
488 | (avail_cpus > cpu_boot_arg)) | |
489 | avail_cpus = cpu_boot_arg; | |
490 | ||
491 | if (avail_cpus == 0) | |
492 | panic("No cpus found!"); | |
493 | } | |
494 | ||
495 | unsigned int | |
496 | ml_get_cpu_count(void) | |
497 | { | |
498 | return avail_cpus; | |
499 | } | |
500 | ||
501 | int | |
502 | ml_get_boot_cpu_number(void) | |
503 | { | |
504 | return 0; | |
505 | } | |
506 | ||
507 | cluster_type_t | |
508 | ml_get_boot_cluster(void) | |
509 | { | |
510 | return CLUSTER_TYPE_SMP; | |
511 | } | |
512 | ||
513 | int | |
514 | ml_get_cpu_number(uint32_t phys_id) | |
515 | { | |
516 | return (int)phys_id; | |
517 | } | |
518 | ||
519 | int | |
520 | ml_get_max_cpu_number(void) | |
521 | { | |
522 | return avail_cpus - 1; | |
523 | } | |
524 | ||
525 | kern_return_t | |
526 | ml_processor_register( | |
527 | ml_processor_info_t * in_processor_info, | |
528 | processor_t * processor_out, | |
529 | ipi_handler_t * ipi_handler) | |
530 | { | |
531 | cpu_data_t *this_cpu_datap; | |
532 | boolean_t is_boot_cpu; | |
533 | ||
534 | if (in_processor_info->phys_id >= MAX_CPUS) { | |
535 | /* | |
536 | * The physical CPU ID indicates that we have more CPUs than | |
537 | * this xnu build support. This probably means we have an | |
538 | * incorrect board configuration. | |
539 | * | |
540 | * TODO: Should this just return a failure instead? A panic | |
541 | * is simply a convenient way to catch bugs in the pexpert | |
542 | * headers. | |
543 | */ | |
544 | panic("phys_id %u is too large for MAX_CPUS (%u)", in_processor_info->phys_id, MAX_CPUS); | |
545 | } | |
546 | ||
547 | /* Fail the registration if the number of CPUs has been limited by boot-arg. */ | |
548 | if ((in_processor_info->phys_id >= avail_cpus) || | |
549 | (in_processor_info->log_id > (uint32_t)ml_get_max_cpu_number())) | |
550 | return KERN_FAILURE; | |
551 | ||
552 | if (in_processor_info->log_id != (uint32_t)ml_get_boot_cpu_number()) { | |
553 | is_boot_cpu = FALSE; | |
554 | this_cpu_datap = cpu_data_alloc(FALSE); | |
555 | cpu_data_init(this_cpu_datap); | |
556 | } else { | |
557 | this_cpu_datap = &BootCpuData; | |
558 | is_boot_cpu = TRUE; | |
559 | } | |
560 | ||
561 | this_cpu_datap->cpu_id = in_processor_info->cpu_id; | |
562 | ||
563 | this_cpu_datap->cpu_chud = chudxnu_cpu_alloc(is_boot_cpu); | |
564 | if (this_cpu_datap->cpu_chud == (void *)NULL) | |
565 | goto processor_register_error; | |
566 | this_cpu_datap->cpu_console_buf = console_cpu_alloc(is_boot_cpu); | |
567 | if (this_cpu_datap->cpu_console_buf == (void *)(NULL)) | |
568 | goto processor_register_error; | |
569 | ||
570 | if (!is_boot_cpu) { | |
571 | if (cpu_data_register(this_cpu_datap) != KERN_SUCCESS) | |
572 | goto processor_register_error; | |
573 | } | |
574 | ||
575 | this_cpu_datap->cpu_idle_notify = (void *) in_processor_info->processor_idle; | |
576 | this_cpu_datap->cpu_cache_dispatch = in_processor_info->platform_cache_dispatch; | |
577 | nanoseconds_to_absolutetime((uint64_t) in_processor_info->powergate_latency, &this_cpu_datap->cpu_idle_latency); | |
578 | this_cpu_datap->cpu_reset_assist = kvtophys(in_processor_info->powergate_stub_addr); | |
579 | ||
580 | this_cpu_datap->idle_timer_notify = (void *) in_processor_info->idle_timer; | |
581 | this_cpu_datap->idle_timer_refcon = in_processor_info->idle_timer_refcon; | |
582 | ||
583 | this_cpu_datap->platform_error_handler = (void *) in_processor_info->platform_error_handler; | |
584 | this_cpu_datap->cpu_regmap_paddr = in_processor_info->regmap_paddr; | |
585 | this_cpu_datap->cpu_phys_id = in_processor_info->phys_id; | |
586 | this_cpu_datap->cpu_l2_access_penalty = in_processor_info->l2_access_penalty; | |
587 | ||
588 | if (!is_boot_cpu) { | |
589 | processor_init((struct processor *)this_cpu_datap->cpu_processor, | |
590 | this_cpu_datap->cpu_number, processor_pset(master_processor)); | |
591 | ||
592 | if (this_cpu_datap->cpu_l2_access_penalty) { | |
593 | /* | |
594 | * Cores that have a non-zero L2 access penalty compared | |
595 | * to the boot processor should be de-prioritized by the | |
596 | * scheduler, so that threads use the cores with better L2 | |
597 | * preferentially. | |
598 | */ | |
599 | processor_set_primary(this_cpu_datap->cpu_processor, | |
600 | master_processor); | |
601 | } | |
602 | } | |
603 | ||
604 | *processor_out = this_cpu_datap->cpu_processor; | |
605 | *ipi_handler = cpu_signal_handler; | |
606 | if (in_processor_info->idle_tickle != (idle_tickle_t *) NULL) | |
607 | *in_processor_info->idle_tickle = (idle_tickle_t) cpu_idle_tickle; | |
608 | ||
609 | #if KPC | |
610 | if (kpc_register_cpu(this_cpu_datap) != TRUE) | |
611 | goto processor_register_error; | |
612 | #endif | |
613 | ||
614 | if (!is_boot_cpu) | |
615 | prng_cpu_init(this_cpu_datap->cpu_number); | |
616 | ||
617 | return KERN_SUCCESS; | |
618 | ||
619 | processor_register_error: | |
620 | #if KPC | |
621 | kpc_unregister_cpu(this_cpu_datap); | |
622 | #endif | |
623 | if (this_cpu_datap->cpu_chud != (void *)NULL) | |
624 | chudxnu_cpu_free(this_cpu_datap->cpu_chud); | |
625 | if (!is_boot_cpu) | |
626 | cpu_data_free(this_cpu_datap); | |
627 | return KERN_FAILURE; | |
628 | } | |
629 | ||
630 | void | |
631 | ml_init_arm_debug_interface( | |
632 | void * in_cpu_datap, | |
633 | vm_offset_t virt_address) | |
634 | { | |
635 | ((cpu_data_t *)in_cpu_datap)->cpu_debug_interface_map = virt_address; | |
636 | do_debugid(); | |
637 | } | |
638 | ||
639 | /* | |
640 | * Routine: init_ast_check | |
641 | * Function: | |
642 | */ | |
643 | void | |
644 | init_ast_check( | |
645 | __unused processor_t processor) | |
646 | { | |
647 | } | |
648 | ||
649 | /* | |
650 | * Routine: cause_ast_check | |
651 | * Function: | |
652 | */ | |
653 | void | |
654 | cause_ast_check( | |
655 | processor_t processor) | |
656 | { | |
657 | if (current_processor() != processor) { | |
658 | cpu_signal(processor_to_cpu_datap(processor), SIGPast, (void *)NULL, (void *)NULL); | |
659 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 1 /* ast */, 0, 0, 0); | |
660 | } | |
661 | } | |
662 | ||
663 | ||
664 | /* | |
665 | * Routine: ml_at_interrupt_context | |
666 | * Function: Check if running at interrupt context | |
667 | */ | |
668 | boolean_t | |
669 | ml_at_interrupt_context(void) | |
670 | { | |
671 | vm_offset_t stack_ptr; | |
672 | vm_offset_t intstack_top_ptr; | |
673 | ||
674 | __asm__ volatile("mov %0, sp\n":"=r"(stack_ptr)); | |
675 | intstack_top_ptr = getCpuDatap()->intstack_top; | |
676 | return ((stack_ptr < intstack_top_ptr) && (stack_ptr > intstack_top_ptr - INTSTACK_SIZE)); | |
677 | } | |
678 | ||
679 | extern uint32_t cpu_idle_count; | |
680 | ||
681 | void ml_get_power_state(boolean_t *icp, boolean_t *pidlep) { | |
682 | *icp = ml_at_interrupt_context(); | |
683 | *pidlep = (cpu_idle_count == real_ncpus); | |
684 | } | |
685 | ||
686 | /* | |
687 | * Routine: ml_cause_interrupt | |
688 | * Function: Generate a fake interrupt | |
689 | */ | |
690 | void | |
691 | ml_cause_interrupt(void) | |
692 | { | |
693 | return; /* BS_XXX */ | |
694 | } | |
695 | ||
696 | /* Map memory map IO space */ | |
697 | vm_offset_t | |
698 | ml_io_map( | |
699 | vm_offset_t phys_addr, | |
700 | vm_size_t size) | |
701 | { | |
702 | return (io_map(phys_addr, size, VM_WIMG_IO)); | |
703 | } | |
704 | ||
705 | vm_offset_t | |
706 | ml_io_map_wcomb( | |
707 | vm_offset_t phys_addr, | |
708 | vm_size_t size) | |
709 | { | |
710 | return (io_map(phys_addr, size, VM_WIMG_WCOMB)); | |
711 | } | |
712 | ||
713 | /* boot memory allocation */ | |
714 | vm_offset_t | |
715 | ml_static_malloc( | |
716 | __unused vm_size_t size) | |
717 | { | |
718 | return ((vm_offset_t) NULL); | |
719 | } | |
720 | ||
721 | vm_map_address_t | |
722 | ml_map_high_window( | |
723 | vm_offset_t phys_addr, | |
724 | vm_size_t len) | |
725 | { | |
726 | return pmap_map_high_window_bd(phys_addr, len, VM_PROT_READ | VM_PROT_WRITE); | |
727 | } | |
728 | ||
729 | vm_offset_t | |
730 | ml_static_ptovirt( | |
731 | vm_offset_t paddr) | |
732 | { | |
733 | return phystokv(paddr); | |
734 | } | |
735 | ||
736 | vm_offset_t | |
737 | ml_static_vtop( | |
738 | vm_offset_t vaddr) | |
739 | { | |
740 | if (((vm_address_t)(vaddr) - gVirtBase) >= gPhysSize) | |
741 | panic("ml_static_ptovirt(): illegal vaddr: %p\n", (void*)vaddr); | |
742 | return ((vm_address_t)(vaddr) - gVirtBase + gPhysBase); | |
743 | } | |
744 | ||
745 | ||
746 | kern_return_t | |
747 | ml_static_protect( | |
748 | vm_offset_t vaddr, /* kernel virtual address */ | |
749 | vm_size_t size, | |
750 | vm_prot_t new_prot) | |
751 | { | |
752 | pt_entry_t arm_prot = 0; | |
753 | pt_entry_t arm_block_prot = 0; | |
754 | vm_offset_t vaddr_cur; | |
755 | ppnum_t ppn; | |
756 | kern_return_t result = KERN_SUCCESS; | |
757 | ||
758 | if (vaddr < VM_MIN_KERNEL_ADDRESS) | |
759 | return KERN_FAILURE; | |
760 | ||
761 | assert((vaddr & (ARM_PGBYTES - 1)) == 0); /* must be page aligned */ | |
762 | ||
763 | if ((new_prot & VM_PROT_WRITE) && (new_prot & VM_PROT_EXECUTE)) { | |
764 | panic("ml_static_protect(): WX request on %p", (void *) vaddr); | |
765 | } | |
766 | ||
767 | /* Set up the protection bits, and block bits so we can validate block mappings. */ | |
768 | if (new_prot & VM_PROT_WRITE) { | |
769 | arm_prot |= ARM_PTE_AP(AP_RWNA); | |
770 | arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RWNA); | |
771 | } else { | |
772 | arm_prot |= ARM_PTE_AP(AP_RONA); | |
773 | arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RONA); | |
774 | } | |
775 | ||
776 | if (!(new_prot & VM_PROT_EXECUTE)) { | |
777 | arm_prot |= ARM_PTE_NX; | |
778 | arm_block_prot |= ARM_TTE_BLOCK_NX; | |
779 | } | |
780 | ||
781 | for (vaddr_cur = vaddr; | |
782 | vaddr_cur < ((vaddr + size) & ~ARM_PGMASK); | |
783 | vaddr_cur += ARM_PGBYTES) { | |
784 | ppn = pmap_find_phys(kernel_pmap, vaddr_cur); | |
785 | if (ppn != (vm_offset_t) NULL) { | |
786 | tt_entry_t *ttp = &kernel_pmap->tte[ttenum(vaddr_cur)]; | |
787 | tt_entry_t tte = *ttp; | |
788 | ||
789 | if ((tte & ARM_TTE_TYPE_MASK) != ARM_TTE_TYPE_TABLE) { | |
790 | if (((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) && | |
791 | ((tte & (ARM_TTE_BLOCK_APMASK | ARM_TTE_BLOCK_NX_MASK)) == arm_block_prot)) { | |
792 | /* | |
793 | * We can support ml_static_protect on a block mapping if the mapping already has | |
794 | * the desired protections. We still want to run checks on a per-page basis. | |
795 | */ | |
796 | continue; | |
797 | } | |
798 | ||
799 | result = KERN_FAILURE; | |
800 | break; | |
801 | } | |
802 | ||
803 | pt_entry_t *pte_p = (pt_entry_t *) ttetokv(tte) + ptenum(vaddr_cur); | |
804 | pt_entry_t ptmp = *pte_p; | |
805 | ||
806 | ptmp = (ptmp & ~(ARM_PTE_APMASK | ARM_PTE_NX_MASK)) | arm_prot; | |
807 | *pte_p = ptmp; | |
808 | #ifndef __ARM_L1_PTW__ | |
809 | FlushPoC_DcacheRegion((vm_offset_t) pte_p, sizeof(*pte_p)); | |
810 | #endif | |
811 | } | |
812 | } | |
813 | ||
814 | if (vaddr_cur > vaddr) | |
815 | flush_mmu_tlb_region(vaddr, (vm_size_t)(vaddr_cur - vaddr)); | |
816 | ||
817 | return result; | |
818 | } | |
819 | ||
820 | /* | |
821 | * Routine: ml_static_mfree | |
822 | * Function: | |
823 | */ | |
824 | void | |
825 | ml_static_mfree( | |
826 | vm_offset_t vaddr, | |
827 | vm_size_t size) | |
828 | { | |
829 | vm_offset_t vaddr_cur; | |
830 | ppnum_t ppn; | |
831 | uint32_t freed_pages = 0; | |
832 | ||
833 | /* It is acceptable (if bad) to fail to free. */ | |
834 | if (vaddr < VM_MIN_KERNEL_ADDRESS) | |
835 | return; | |
836 | ||
837 | assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */ | |
838 | ||
839 | for (vaddr_cur = vaddr; | |
840 | vaddr_cur < trunc_page_32(vaddr + size); | |
841 | vaddr_cur += PAGE_SIZE) { | |
842 | ppn = pmap_find_phys(kernel_pmap, vaddr_cur); | |
843 | if (ppn != (vm_offset_t) NULL) { | |
844 | /* | |
845 | * It is not acceptable to fail to update the protections on a page | |
846 | * we will release to the VM. We need to either panic or continue. | |
847 | * For now, we'll panic (to help flag if there is memory we can | |
848 | * reclaim). | |
849 | */ | |
850 | if (ml_static_protect(vaddr_cur, PAGE_SIZE, VM_PROT_WRITE | VM_PROT_READ) != KERN_SUCCESS) { | |
851 | panic("Failed ml_static_mfree on %p", (void *) vaddr_cur); | |
852 | } | |
853 | #if 0 | |
854 | /* | |
855 | * Must NOT tear down the "V==P" mapping for vaddr_cur as the zone alias scheme | |
856 | * relies on the persistence of these mappings for all time. | |
857 | */ | |
858 | // pmap_remove(kernel_pmap, (addr64_t) vaddr_cur, (addr64_t) (vaddr_cur + PAGE_SIZE)); | |
859 | #endif | |
860 | vm_page_create(ppn, (ppn + 1)); | |
861 | freed_pages++; | |
862 | } | |
863 | } | |
864 | vm_page_lockspin_queues(); | |
865 | vm_page_wire_count -= freed_pages; | |
866 | vm_page_wire_count_initial -= freed_pages; | |
867 | vm_page_unlock_queues(); | |
868 | #if DEBUG | |
869 | kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn); | |
870 | #endif | |
871 | } | |
872 | ||
873 | ||
874 | /* virtual to physical on wired pages */ | |
875 | vm_offset_t | |
876 | ml_vtophys(vm_offset_t vaddr) | |
877 | { | |
878 | return kvtophys(vaddr); | |
879 | } | |
880 | ||
881 | /* | |
882 | * Routine: ml_nofault_copy | |
883 | * Function: Perform a physical mode copy if the source and destination have | |
884 | * valid translations in the kernel pmap. If translations are present, they are | |
885 | * assumed to be wired; e.g., no attempt is made to guarantee that the | |
886 | * translations obtained remain valid for the duration of the copy process. | |
887 | */ | |
888 | vm_size_t | |
889 | ml_nofault_copy(vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size) | |
890 | { | |
891 | addr64_t cur_phys_dst, cur_phys_src; | |
892 | uint32_t count, nbytes = 0; | |
893 | ||
894 | while (size > 0) { | |
895 | if (!(cur_phys_src = kvtophys(virtsrc))) | |
896 | break; | |
897 | if (!(cur_phys_dst = kvtophys(virtdst))) | |
898 | break; | |
899 | if (!pmap_valid_address(trunc_page_64(cur_phys_dst)) || | |
900 | !pmap_valid_address(trunc_page_64(cur_phys_src))) | |
901 | break; | |
902 | count = PAGE_SIZE - (cur_phys_src & PAGE_MASK); | |
903 | if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) | |
904 | count = PAGE_SIZE - (cur_phys_dst & PAGE_MASK); | |
905 | if (count > size) | |
906 | count = size; | |
907 | ||
908 | bcopy_phys(cur_phys_src, cur_phys_dst, count); | |
909 | ||
910 | nbytes += count; | |
911 | virtsrc += count; | |
912 | virtdst += count; | |
913 | size -= count; | |
914 | } | |
915 | ||
916 | return nbytes; | |
917 | } | |
918 | ||
919 | /* | |
920 | * Routine: ml_validate_nofault | |
921 | * Function: Validate that ths address range has a valid translations | |
922 | * in the kernel pmap. If translations are present, they are | |
923 | * assumed to be wired; i.e. no attempt is made to guarantee | |
924 | * that the translation persist after the check. | |
925 | * Returns: TRUE if the range is mapped and will not cause a fault, | |
926 | * FALSE otherwise. | |
927 | */ | |
928 | ||
929 | boolean_t ml_validate_nofault( | |
930 | vm_offset_t virtsrc, vm_size_t size) | |
931 | { | |
932 | addr64_t cur_phys_src; | |
933 | uint32_t count; | |
934 | ||
935 | while (size > 0) { | |
936 | if (!(cur_phys_src = kvtophys(virtsrc))) | |
937 | return FALSE; | |
938 | if (!pmap_valid_address(trunc_page_64(cur_phys_src))) | |
939 | return FALSE; | |
940 | count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK)); | |
941 | if (count > size) | |
942 | count = (uint32_t)size; | |
943 | ||
944 | virtsrc += count; | |
945 | size -= count; | |
946 | } | |
947 | ||
948 | return TRUE; | |
949 | } | |
950 | ||
951 | void | |
952 | ml_get_bouncepool_info(vm_offset_t * phys_addr, vm_size_t * size) | |
953 | { | |
954 | *phys_addr = 0; | |
955 | *size = 0; | |
956 | } | |
957 | ||
958 | /* | |
959 | * Stubs for CPU Stepper | |
960 | */ | |
961 | void | |
962 | active_rt_threads(__unused boolean_t active) | |
963 | { | |
964 | } | |
965 | ||
966 | void | |
967 | thread_tell_urgency(__unused int urgency, | |
968 | __unused uint64_t rt_period, | |
969 | __unused uint64_t rt_deadline, | |
970 | __unused uint64_t sched_latency, | |
971 | __unused thread_t nthread) | |
972 | { | |
973 | } | |
974 | ||
975 | void | |
976 | machine_run_count(__unused uint32_t count) | |
977 | { | |
978 | } | |
979 | ||
980 | processor_t | |
981 | machine_choose_processor(__unused processor_set_t pset, processor_t processor) | |
982 | { | |
983 | return (processor); | |
984 | } | |
985 | ||
986 | vm_offset_t | |
987 | ml_stack_remaining(void) | |
988 | { | |
989 | uintptr_t local = (uintptr_t) &local; | |
990 | ||
991 | if (ml_at_interrupt_context()) { | |
992 | return (local - (getCpuDatap()->intstack_top - INTSTACK_SIZE)); | |
993 | } else { | |
994 | return (local - current_thread()->kernel_stack); | |
995 | } | |
996 | } | |
997 | ||
998 | boolean_t machine_timeout_suspended(void) { | |
999 | return FALSE; | |
1000 | } | |
1001 | ||
1002 | kern_return_t | |
1003 | ml_interrupt_prewarm(__unused uint64_t deadline) | |
1004 | { | |
1005 | return KERN_FAILURE; | |
1006 | } | |
1007 | ||
1008 | uint64_t | |
1009 | ml_get_hwclock(void) | |
1010 | { | |
1011 | uint64_t high_first = 0; | |
1012 | uint64_t high_second = 0; | |
1013 | uint64_t low = 0; | |
1014 | ||
1015 | __builtin_arm_isb(ISB_SY); | |
1016 | ||
1017 | do { | |
1018 | high_first = __builtin_arm_mrrc(15, 0, 14) >> 32; | |
1019 | low = __builtin_arm_mrrc(15, 0, 14) & 0xFFFFFFFFULL; | |
1020 | high_second = __builtin_arm_mrrc(15, 0, 14) >> 32; | |
1021 | } while (high_first != high_second); | |
1022 | ||
1023 | return (high_first << 32) | (low); | |
1024 | } | |
1025 | ||
1026 | boolean_t | |
1027 | ml_delay_should_spin(uint64_t interval) | |
1028 | { | |
1029 | cpu_data_t *cdp = getCpuDatap(); | |
1030 | ||
1031 | if (cdp->cpu_idle_latency) { | |
1032 | return (interval < cdp->cpu_idle_latency) ? TRUE : FALSE; | |
1033 | } else { | |
1034 | /* | |
1035 | * Early boot, latency is unknown. Err on the side of blocking, | |
1036 | * which should always be safe, even if slow | |
1037 | */ | |
1038 | return FALSE; | |
1039 | } | |
1040 | } | |
1041 | ||
1042 | boolean_t ml_thread_is64bit(thread_t thread) | |
1043 | { | |
1044 | return (thread_is_64bit(thread)); | |
1045 | } | |
1046 | ||
1047 | void ml_timer_evaluate(void) { | |
1048 | } | |
1049 | ||
1050 | boolean_t | |
1051 | ml_timer_forced_evaluation(void) { | |
1052 | return FALSE; | |
1053 | } | |
1054 | ||
1055 | uint64_t | |
1056 | ml_energy_stat(__unused thread_t t) { | |
1057 | return 0; | |
1058 | } | |
1059 | ||
1060 | ||
1061 | void | |
1062 | ml_gpu_stat_update(__unused uint64_t gpu_ns_delta) { | |
1063 | #if CONFIG_EMBEDDED | |
1064 | /* | |
1065 | * For now: update the resource coalition stats of the | |
1066 | * current thread's coalition | |
1067 | */ | |
1068 | task_coalition_update_gpu_stats(current_task(), gpu_ns_delta); | |
1069 | #endif | |
1070 | } | |
1071 | ||
1072 | uint64_t | |
1073 | ml_gpu_stat(__unused thread_t t) { | |
1074 | return 0; | |
1075 | } | |
1076 | ||
1077 | #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME | |
1078 | static void | |
1079 | timer_state_event(boolean_t switch_to_kernel) | |
1080 | { | |
1081 | thread_t thread = current_thread(); | |
1082 | if (!thread->precise_user_kernel_time) return; | |
1083 | ||
1084 | processor_data_t *pd = &getCpuDatap()->cpu_processor->processor_data; | |
1085 | uint64_t now = ml_get_timebase(); | |
1086 | ||
1087 | timer_stop(pd->current_state, now); | |
1088 | pd->current_state = (switch_to_kernel) ? &pd->system_state : &pd->user_state; | |
1089 | timer_start(pd->current_state, now); | |
1090 | ||
1091 | timer_stop(pd->thread_timer, now); | |
1092 | pd->thread_timer = (switch_to_kernel) ? &thread->system_timer : &thread->user_timer; | |
1093 | timer_start(pd->thread_timer, now); | |
1094 | } | |
1095 | ||
1096 | void | |
1097 | timer_state_event_user_to_kernel(void) | |
1098 | { | |
1099 | timer_state_event(TRUE); | |
1100 | } | |
1101 | ||
1102 | void | |
1103 | timer_state_event_kernel_to_user(void) | |
1104 | { | |
1105 | timer_state_event(FALSE); | |
1106 | } | |
1107 | #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */ | |
1108 | ||
1109 | boolean_t | |
1110 | user_cont_hwclock_allowed(void) | |
1111 | { | |
1112 | return FALSE; | |
1113 | } | |
1114 | ||
1115 | boolean_t | |
1116 | user_timebase_allowed(void) | |
1117 | { | |
1118 | #if __ARM_TIME__ | |
1119 | return TRUE; | |
1120 | #else | |
1121 | return FALSE; | |
1122 | #endif | |
1123 | } | |
1124 | ||
1125 | /* | |
1126 | * The following are required for parts of the kernel | |
1127 | * that cannot resolve these functions as inlines: | |
1128 | */ | |
1129 | extern thread_t current_act(void); | |
1130 | thread_t | |
1131 | current_act(void) | |
1132 | { | |
1133 | return current_thread_fast(); | |
1134 | } | |
1135 | ||
1136 | #undef current_thread | |
1137 | extern thread_t current_thread(void); | |
1138 | thread_t | |
1139 | current_thread(void) | |
1140 | { | |
1141 | return current_thread_fast(); | |
1142 | } | |
1143 | ||
1144 | #if __ARM_USER_PROTECT__ | |
1145 | uintptr_t | |
1146 | arm_user_protect_begin(thread_t thread) | |
1147 | { | |
1148 | uintptr_t ttbr0, asid = 0; // kernel asid | |
1149 | ||
1150 | ttbr0 = __builtin_arm_mrc(15,0,2,0,0); // Get TTBR0 | |
1151 | if (ttbr0 != thread->machine.kptw_ttb) { | |
1152 | __builtin_arm_mcr(15,0,thread->machine.kptw_ttb,2,0,0); // Set TTBR0 | |
1153 | __builtin_arm_mcr(15,0,asid,13,0,1); // Set CONTEXTIDR | |
1154 | __builtin_arm_isb(ISB_SY); | |
1155 | } | |
1156 | return ttbr0; | |
1157 | } | |
1158 | ||
1159 | void | |
1160 | arm_user_protect_end(thread_t thread, uintptr_t ttbr0, boolean_t disable_interrupts) | |
1161 | { | |
1162 | if ((ttbr0 != thread->machine.kptw_ttb) && (thread->machine.uptw_ttb != thread->machine.kptw_ttb)) { | |
1163 | if (disable_interrupts) | |
1164 | __asm__ volatile ("cpsid if" ::: "memory"); // Disable FIQ/IRQ | |
1165 | __builtin_arm_mcr(15,0,thread->machine.uptw_ttb,2,0,0); // Set TTBR0 | |
1166 | __builtin_arm_mcr(15,0,thread->machine.asid,13,0,1); // Set CONTEXTIDR with thread asid | |
1167 | __builtin_arm_dsb(DSB_ISH); | |
1168 | __builtin_arm_isb(ISB_SY); | |
1169 | } | |
1170 | } | |
1171 | #endif // __ARM_USER_PROTECT__ | |
1172 | ||
1173 | void ml_task_set_rop_pid(__unused task_t task, __unused task_t parent_task, __unused boolean_t inherit) | |
1174 | { | |
1175 | return; | |
1176 | } |