]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/cpu.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / arm / cpu.c
1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * File: arm/cpu.c
30 *
31 * cpu specific routines
32 */
33
34 #include <kern/kalloc.h>
35 #include <kern/machine.h>
36 #include <kern/cpu_number.h>
37 #include <kern/thread.h>
38 #include <kern/timer_queue.h>
39 #include <arm/cpu_data.h>
40 #include <arm/cpuid.h>
41 #include <arm/caches_internal.h>
42 #include <arm/cpu_data_internal.h>
43 #include <arm/cpu_internal.h>
44 #include <arm/misc_protos.h>
45 #include <arm/machine_cpu.h>
46 #include <arm/rtclock.h>
47 #include <arm/proc_reg.h>
48 #include <mach/processor_info.h>
49 #include <vm/pmap.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_map.h>
52 #include <pexpert/arm/board_config.h>
53 #include <pexpert/arm/protos.h>
54 #include <sys/kdebug.h>
55
56 #include <machine/atomic.h>
57
58 #if KPC
59 #include <kern/kpc.h>
60 #endif
61
62 extern unsigned int resume_idle_cpu;
63 extern unsigned int start_cpu;
64
65 unsigned int start_cpu_paddr;
66
67 extern boolean_t idle_enable;
68 extern unsigned int real_ncpus;
69 extern uint64_t wake_abstime;
70
71 extern void* wfi_inst;
72 unsigned wfi_fast = 1;
73 unsigned patch_to_nop = 0xe1a00000;
74
75 void *LowExceptionVectorsAddr;
76 #define IOS_STATE (((vm_offset_t)LowExceptionVectorsAddr + 0x80))
77 #define IOS_STATE_SIZE (0x08UL)
78 static const uint8_t suspend_signature[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'};
79 static const uint8_t running_signature[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'};
80
81 /*
82 * Routine: cpu_bootstrap
83 * Function:
84 */
85 void
86 cpu_bootstrap(void)
87 {
88 }
89
90
91 /*
92 * Routine: cpu_sleep
93 * Function:
94 */
95 void
96 cpu_sleep(void)
97 {
98 cpu_data_t *cpu_data_ptr = getCpuDatap();
99 pmap_switch_user_ttb(kernel_pmap);
100 cpu_data_ptr->cpu_active_thread = current_thread();
101 cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr;
102 cpu_data_ptr->cpu_flags |= SleepState;
103 cpu_data_ptr->cpu_user_debug = NULL;
104
105 CleanPoC_Dcache();
106
107 PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);
108
109 }
110
111 _Atomic uint32_t cpu_idle_count = 0;
112
113 /*
114 * Routine: cpu_idle
115 * Function:
116 */
117 void __attribute__((noreturn))
118 cpu_idle(void)
119 {
120 cpu_data_t *cpu_data_ptr = getCpuDatap();
121 uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop;
122
123 if ((!idle_enable) || (cpu_data_ptr->cpu_signal & SIGPdisabled))
124 Idle_load_context();
125 if (!SetIdlePop())
126 Idle_load_context();
127 lastPop = cpu_data_ptr->rtcPop;
128
129 pmap_switch_user_ttb(kernel_pmap);
130 cpu_data_ptr->cpu_active_thread = current_thread();
131 if (cpu_data_ptr->cpu_user_debug)
132 arm_debug_set(NULL);
133 cpu_data_ptr->cpu_user_debug = NULL;
134
135 if (cpu_data_ptr->cpu_idle_notify)
136 ((processor_idle_t) cpu_data_ptr->cpu_idle_notify) (cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks);
137
138 if (cpu_data_ptr->idle_timer_notify != 0) {
139 if (new_idle_timeout_ticks == 0x0ULL) {
140 /* turn off the idle timer */
141 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
142 } else {
143 /* set the new idle timeout */
144 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
145 }
146 timer_resync_deadlines();
147 if (cpu_data_ptr->rtcPop != lastPop)
148 SetIdlePop();
149 }
150
151 #if KPC
152 kpc_idle();
153 #endif
154
155 platform_cache_idle_enter();
156 cpu_idle_wfi((boolean_t) wfi_fast);
157 platform_cache_idle_exit();
158
159 ClearIdlePop(TRUE);
160 cpu_idle_exit();
161 }
162
163 /*
164 * Routine: cpu_idle_exit
165 * Function:
166 */
167 void
168 cpu_idle_exit(void)
169 {
170 uint64_t new_idle_timeout_ticks = 0x0ULL;
171 cpu_data_t *cpu_data_ptr = getCpuDatap();
172
173 #if KPC
174 kpc_idle_exit();
175 #endif
176
177
178 pmap_set_pmap(cpu_data_ptr->cpu_active_thread->map->pmap, current_thread());
179
180 if (cpu_data_ptr->cpu_idle_notify)
181 ((processor_idle_t) cpu_data_ptr->cpu_idle_notify) (cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks);
182
183 if (cpu_data_ptr->idle_timer_notify != 0) {
184 if (new_idle_timeout_ticks == 0x0ULL) {
185 /* turn off the idle timer */
186 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
187 } else {
188 /* set the new idle timeout */
189 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
190 }
191 timer_resync_deadlines();
192 }
193
194 Idle_load_context();
195 }
196
197 void
198 cpu_init(void)
199 {
200 cpu_data_t *cdp = getCpuDatap();
201 arm_cpu_info_t *cpu_info_p;
202
203 if (cdp->cpu_type != CPU_TYPE_ARM) {
204
205 cdp->cpu_type = CPU_TYPE_ARM;
206
207 timer_call_queue_init(&cdp->rtclock_timer.queue);
208 cdp->rtclock_timer.deadline = EndOfAllTime;
209
210 if (cdp == &BootCpuData) {
211 do_cpuid();
212 do_cacheid();
213 do_mvfpid();
214 } else {
215 /*
216 * We initialize non-boot CPUs here; the boot CPU is
217 * dealt with as part of pmap_bootstrap.
218 */
219 pmap_cpu_data_init();
220 }
221 /* ARM_SMP: Assuming identical cpu */
222 do_debugid();
223
224 cpu_info_p = cpuid_info();
225
226 /* switch based on CPU's reported architecture */
227 switch (cpu_info_p->arm_info.arm_arch) {
228 case CPU_ARCH_ARMv4T:
229 case CPU_ARCH_ARMv5T:
230 cdp->cpu_subtype = CPU_SUBTYPE_ARM_V4T;
231 break;
232 case CPU_ARCH_ARMv5TE:
233 case CPU_ARCH_ARMv5TEJ:
234 if (cpu_info_p->arm_info.arm_implementor == CPU_VID_INTEL)
235 cdp->cpu_subtype = CPU_SUBTYPE_ARM_XSCALE;
236 else
237 cdp->cpu_subtype = CPU_SUBTYPE_ARM_V5TEJ;
238 break;
239 case CPU_ARCH_ARMv6:
240 cdp->cpu_subtype = CPU_SUBTYPE_ARM_V6;
241 break;
242 case CPU_ARCH_ARMv7:
243 cdp->cpu_subtype = CPU_SUBTYPE_ARM_V7;
244 break;
245 case CPU_ARCH_ARMv7f:
246 cdp->cpu_subtype = CPU_SUBTYPE_ARM_V7F;
247 break;
248 case CPU_ARCH_ARMv7s:
249 cdp->cpu_subtype = CPU_SUBTYPE_ARM_V7S;
250 break;
251 case CPU_ARCH_ARMv7k:
252 cdp->cpu_subtype = CPU_SUBTYPE_ARM_V7K;
253 break;
254 default:
255 cdp->cpu_subtype = CPU_SUBTYPE_ARM_ALL;
256 break;
257 }
258
259 cdp->cpu_threadtype = CPU_THREADTYPE_NONE;
260 }
261 cdp->cpu_stat.irq_ex_cnt_wake = 0;
262 cdp->cpu_stat.ipi_cnt_wake = 0;
263 cdp->cpu_stat.timer_cnt_wake = 0;
264 cdp->cpu_running = TRUE;
265 cdp->cpu_sleep_token_last = cdp->cpu_sleep_token;
266 cdp->cpu_sleep_token = 0x0UL;
267
268 }
269
270 cpu_data_t *
271 cpu_data_alloc(boolean_t is_boot_cpu)
272 {
273 cpu_data_t *cpu_data_ptr = NULL;
274
275 if (is_boot_cpu)
276 cpu_data_ptr = &BootCpuData;
277 else {
278 void *irq_stack = NULL;
279 void *fiq_stack = NULL;
280
281 if ((kmem_alloc(kernel_map, (vm_offset_t *)&cpu_data_ptr, sizeof(cpu_data_t), VM_KERN_MEMORY_CPU)) != KERN_SUCCESS)
282 goto cpu_data_alloc_error;
283
284 bzero((void *)cpu_data_ptr, sizeof(cpu_data_t));
285
286 if ((irq_stack = kalloc(INTSTACK_SIZE)) == 0)
287 goto cpu_data_alloc_error;
288 #if __BIGGEST_ALIGNMENT__
289 /* force 16-byte alignment */
290 if ((uint32_t)irq_stack & 0x0F)
291 irq_stack = (void *)((uint32_t)irq_stack + (0x10 - ((uint32_t)irq_stack & 0x0F)));
292 #endif
293 cpu_data_ptr->intstack_top = (vm_offset_t)irq_stack + INTSTACK_SIZE ;
294 cpu_data_ptr->istackptr = cpu_data_ptr->intstack_top;
295
296 if ((fiq_stack = kalloc(PAGE_SIZE)) == 0)
297 goto cpu_data_alloc_error;
298 #if __BIGGEST_ALIGNMENT__
299 /* force 16-byte alignment */
300 if ((uint32_t)fiq_stack & 0x0F)
301 fiq_stack = (void *)((uint32_t)fiq_stack + (0x10 - ((uint32_t)fiq_stack & 0x0F)));
302 #endif
303 cpu_data_ptr->fiqstack_top = (vm_offset_t)fiq_stack + PAGE_SIZE ;
304 cpu_data_ptr->fiqstackptr = cpu_data_ptr->fiqstack_top;
305 }
306
307 cpu_data_ptr->cpu_processor = cpu_processor_alloc(is_boot_cpu);
308 if (cpu_data_ptr->cpu_processor == (struct processor *)NULL)
309 goto cpu_data_alloc_error;
310
311 return cpu_data_ptr;
312
313 cpu_data_alloc_error:
314 panic("cpu_data_alloc() failed\n");
315 return (cpu_data_t *)NULL;
316 }
317
318
319 void
320 cpu_data_free(cpu_data_t *cpu_data_ptr)
321 {
322 if (cpu_data_ptr == &BootCpuData)
323 return;
324
325 cpu_processor_free( cpu_data_ptr->cpu_processor);
326 kfree( (void *)(cpu_data_ptr->intstack_top - INTSTACK_SIZE), INTSTACK_SIZE);
327 kfree( (void *)(cpu_data_ptr->fiqstack_top - PAGE_SIZE), PAGE_SIZE);
328 kmem_free(kernel_map, (vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t));
329 }
330
331 void
332 cpu_data_init(cpu_data_t *cpu_data_ptr)
333 {
334 uint32_t i = 0;
335
336 cpu_data_ptr->cpu_flags = 0;
337 #if __arm__
338 cpu_data_ptr->cpu_exc_vectors = (vm_offset_t)&ExceptionVectorsTable;
339 #endif
340 cpu_data_ptr->interrupts_enabled = 0;
341 cpu_data_ptr->cpu_int_state = 0;
342 cpu_data_ptr->cpu_pending_ast = AST_NONE;
343 cpu_data_ptr->cpu_cache_dispatch = (void *) 0;
344 cpu_data_ptr->rtcPop = EndOfAllTime;
345 cpu_data_ptr->rtclock_datap = &RTClockData;
346 cpu_data_ptr->cpu_user_debug = NULL;
347 cpu_data_ptr->cpu_base_timebase_low = 0;
348 cpu_data_ptr->cpu_base_timebase_high = 0;
349 cpu_data_ptr->cpu_idle_notify = (void *) 0;
350 cpu_data_ptr->cpu_idle_latency = 0x0ULL;
351 cpu_data_ptr->cpu_idle_pop = 0x0ULL;
352 cpu_data_ptr->cpu_reset_type = 0x0UL;
353 cpu_data_ptr->cpu_reset_handler = 0x0UL;
354 cpu_data_ptr->cpu_reset_assist = 0x0UL;
355 cpu_data_ptr->cpu_regmap_paddr = 0x0ULL;
356 cpu_data_ptr->cpu_phys_id = 0x0UL;
357 cpu_data_ptr->cpu_l2_access_penalty = 0;
358 cpu_data_ptr->cpu_cluster_type = CLUSTER_TYPE_SMP;
359 cpu_data_ptr->cpu_cluster_id = 0;
360 cpu_data_ptr->cpu_l2_id = 0;
361 cpu_data_ptr->cpu_l2_size = 0;
362 cpu_data_ptr->cpu_l3_id = 0;
363 cpu_data_ptr->cpu_l3_size = 0;
364
365 cpu_data_ptr->cpu_signal = SIGPdisabled;
366
367 #if DEBUG || DEVELOPMENT
368 cpu_data_ptr->failed_xcall = NULL;
369 cpu_data_ptr->failed_signal = 0;
370 cpu_data_ptr->failed_signal_count = 0;
371 #endif
372
373 cpu_data_ptr->cpu_get_fiq_handler = NULL;
374 cpu_data_ptr->cpu_tbd_hardware_addr = NULL;
375 cpu_data_ptr->cpu_tbd_hardware_val = NULL;
376 cpu_data_ptr->cpu_get_decrementer_func = NULL;
377 cpu_data_ptr->cpu_set_decrementer_func = NULL;
378 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
379 cpu_data_ptr->cpu_sleep_token_last = 0x00000000UL;
380 cpu_data_ptr->cpu_xcall_p0 = NULL;
381 cpu_data_ptr->cpu_xcall_p1 = NULL;
382
383 #if __ARM_SMP__ && defined(ARMA7)
384 cpu_data_ptr->cpu_CLWFlush_req = 0x0ULL;
385 cpu_data_ptr->cpu_CLWFlush_last = 0x0ULL;
386 cpu_data_ptr->cpu_CLWClean_req = 0x0ULL;
387 cpu_data_ptr->cpu_CLWClean_last = 0x0ULL;
388 cpu_data_ptr->cpu_CLW_active = 0x1UL;
389 #endif
390
391 pmap_cpu_data_t * pmap_cpu_data_ptr = &cpu_data_ptr->cpu_pmap_cpu_data;
392
393 pmap_cpu_data_ptr->cpu_user_pmap = (struct pmap *) NULL;
394 pmap_cpu_data_ptr->cpu_user_pmap_stamp = 0;
395 pmap_cpu_data_ptr->cpu_number = PMAP_INVALID_CPU_NUM;
396
397 for (i = 0; i < (sizeof(pmap_cpu_data_ptr->cpu_asid_high_bits) / sizeof(*pmap_cpu_data_ptr->cpu_asid_high_bits)); i++) {
398 pmap_cpu_data_ptr->cpu_asid_high_bits[i] = 0;
399 }
400 cpu_data_ptr->halt_status = CPU_NOT_HALTED;
401 }
402
403 kern_return_t
404 cpu_data_register(cpu_data_t *cpu_data_ptr)
405 {
406 int cpu;
407
408 cpu = OSIncrementAtomic((SInt32*)&real_ncpus);
409 if (real_ncpus > MAX_CPUS) {
410 return KERN_FAILURE;
411 }
412
413 cpu_data_ptr->cpu_number = cpu;
414 CpuDataEntries[cpu].cpu_data_vaddr = cpu_data_ptr;
415 CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys( (vm_offset_t)cpu_data_ptr);
416 return KERN_SUCCESS;
417 }
418
419 kern_return_t
420 cpu_start(int cpu)
421 {
422 kprintf("cpu_start() cpu: %d\n", cpu);
423 if (cpu == cpu_number()) {
424 cpu_machine_init();
425 return KERN_SUCCESS;
426 } else {
427 #if __ARM_SMP__
428 cpu_data_t *cpu_data_ptr;
429 thread_t first_thread;
430
431 cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
432 cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr;
433
434 cpu_data_ptr->cpu_pmap_cpu_data.cpu_user_pmap = NULL;
435
436 if (cpu_data_ptr->cpu_processor->next_thread != THREAD_NULL)
437 first_thread = cpu_data_ptr->cpu_processor->next_thread;
438 else
439 first_thread = cpu_data_ptr->cpu_processor->idle_thread;
440 cpu_data_ptr->cpu_active_thread = first_thread;
441 first_thread->machine.CpuDatap = cpu_data_ptr;
442
443 flush_dcache((vm_offset_t)&CpuDataEntries[cpu], sizeof(cpu_data_entry_t), FALSE);
444 flush_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
445 (void) PE_cpu_start(cpu_data_ptr->cpu_id, (vm_offset_t)NULL, (vm_offset_t)NULL);
446 return KERN_SUCCESS;
447 #else
448 return KERN_FAILURE;
449 #endif
450 }
451 }
452
453 void
454 cpu_timebase_init(boolean_t from_boot __unused)
455 {
456 cpu_data_t *cdp = getCpuDatap();
457
458 if (cdp->cpu_get_fiq_handler == NULL) {
459 cdp->cpu_get_fiq_handler = rtclock_timebase_func.tbd_fiq_handler;
460 cdp->cpu_get_decrementer_func = rtclock_timebase_func.tbd_get_decrementer;
461 cdp->cpu_set_decrementer_func = rtclock_timebase_func.tbd_set_decrementer;
462 cdp->cpu_tbd_hardware_addr = (void *)rtclock_timebase_addr;
463 cdp->cpu_tbd_hardware_val = (void *)rtclock_timebase_val;
464 }
465 cdp->cpu_decrementer = 0x7FFFFFFFUL;
466 cdp->cpu_timebase_low = 0x0UL;
467 cdp->cpu_timebase_high = 0x0UL;
468
469 #if __arm__ && (__BIGGEST_ALIGNMENT__ > 4)
470 /* For the newer ARMv7k ABI where 64-bit types are 64-bit aligned, but pointers
471 * are 32-bit. */
472 cdp->cpu_base_timebase_low = rtclock_base_abstime_low;
473 cdp->cpu_base_timebase_high = rtclock_base_abstime_high;
474 #else
475 *((uint64_t *) & cdp->cpu_base_timebase_low) = rtclock_base_abstime;
476 #endif
477 }
478
479
480 __attribute__((noreturn))
481 void
482 ml_arm_sleep(void)
483 {
484 cpu_data_t *cpu_data_ptr = getCpuDatap();
485
486 if (cpu_data_ptr == &BootCpuData) {
487 cpu_data_t *target_cdp;
488 unsigned int cpu;
489
490 for (cpu=0; cpu < MAX_CPUS; cpu++) {
491 target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
492 if(target_cdp == (cpu_data_t *)NULL)
493 break;
494
495 if (target_cdp == cpu_data_ptr)
496 continue;
497
498 while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH);
499 }
500
501 /* Now that the other cores have entered the sleep path, set
502 * the abstime fixup we'll use when we resume.*/
503 rtclock_base_abstime = ml_get_timebase();
504 wake_abstime = rtclock_base_abstime;
505
506 } else {
507 platform_cache_disable();
508 CleanPoU_Dcache();
509 }
510 cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
511 #if __ARM_SMP__ && defined(ARMA7)
512 cpu_data_ptr->cpu_CLWFlush_req = 0;
513 cpu_data_ptr->cpu_CLWClean_req = 0;
514 __builtin_arm_dmb(DMB_ISH);
515 cpu_data_ptr->cpu_CLW_active = 0;
516 #endif
517 if (cpu_data_ptr == &BootCpuData) {
518 platform_cache_disable();
519 platform_cache_shutdown();
520 bcopy((const void *)suspend_signature, (void *)(IOS_STATE), IOS_STATE_SIZE);
521 } else
522 CleanPoC_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t));
523
524 __builtin_arm_dsb(DSB_SY);
525 while (TRUE) {
526 #if __ARM_ENABLE_WFE_
527 __builtin_arm_wfe();
528 #endif
529 } /* Spin */
530 }
531
532 void
533 cpu_machine_idle_init(boolean_t from_boot)
534 {
535 static const unsigned int *BootArgs_paddr = (unsigned int *)NULL;
536 static const unsigned int *CpuDataEntries_paddr = (unsigned int *)NULL;
537 static unsigned int resume_idle_cpu_paddr = (unsigned int )NULL;
538 cpu_data_t *cpu_data_ptr = getCpuDatap();
539
540 if (from_boot) {
541 unsigned int jtag = 0;
542 unsigned int wfi;
543
544
545 if (PE_parse_boot_argn("jtag", &jtag, sizeof (jtag))) {
546 if (jtag != 0)
547 idle_enable = FALSE;
548 else
549 idle_enable = TRUE;
550 } else
551 idle_enable = TRUE;
552
553 if (!PE_parse_boot_argn("wfi", &wfi, sizeof (wfi)))
554 wfi = 1;
555
556 if (wfi == 0)
557 bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)&patch_to_nop),
558 (addr64_t)ml_static_vtop((vm_offset_t)&wfi_inst), sizeof(unsigned));
559 if (wfi == 2)
560 wfi_fast = 0;
561
562 LowExceptionVectorsAddr = (void *)ml_io_map(ml_vtophys((vm_offset_t)gPhysBase), PAGE_SIZE);
563
564 /* Copy Exception Vectors low, but don't touch the sleep token */
565 bcopy((void *)&ExceptionLowVectorsBase, (void *)LowExceptionVectorsAddr, 0x90);
566 bcopy(((void *)(((vm_offset_t)&ExceptionLowVectorsBase) + 0xA0)), ((void *)(((vm_offset_t)LowExceptionVectorsAddr) + 0xA0)), ARM_PGBYTES - 0xA0);
567
568 start_cpu_paddr = ml_static_vtop((vm_offset_t)&start_cpu);
569
570 BootArgs_paddr = (unsigned int *)ml_static_vtop((vm_offset_t)BootArgs);
571 bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)&BootArgs_paddr),
572 (addr64_t)((unsigned int)(gPhysBase) +
573 ((unsigned int)&(ResetHandlerData.boot_args) - (unsigned int)&ExceptionLowVectorsBase)),
574 4);
575
576 CpuDataEntries_paddr = (unsigned int *)ml_static_vtop((vm_offset_t)CpuDataEntries);
577 bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)&CpuDataEntries_paddr),
578 (addr64_t)((unsigned int)(gPhysBase) +
579 ((unsigned int)&(ResetHandlerData.cpu_data_entries) - (unsigned int)&ExceptionLowVectorsBase)),
580 4);
581
582 CleanPoC_DcacheRegion((vm_offset_t) phystokv((char *) (gPhysBase)), PAGE_SIZE);
583
584 resume_idle_cpu_paddr = (unsigned int)ml_static_vtop((vm_offset_t)&resume_idle_cpu);
585
586 }
587
588 if (cpu_data_ptr == &BootCpuData) {
589 bcopy(((const void *)running_signature), (void *)(IOS_STATE), IOS_STATE_SIZE);
590 };
591
592 cpu_data_ptr->cpu_reset_handler = resume_idle_cpu_paddr;
593 clean_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
594 }
595
596 void
597 machine_track_platform_idle(boolean_t entry)
598 {
599 if (entry)
600 (void)__c11_atomic_fetch_add(&cpu_idle_count, 1, __ATOMIC_RELAXED);
601 else
602 (void)__c11_atomic_fetch_sub(&cpu_idle_count, 1, __ATOMIC_RELAXED);
603 }
604