]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2007-2016 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * File: arm/cpu.c | |
30 | * | |
31 | * cpu specific routines | |
32 | */ | |
33 | ||
34 | #include <kern/kalloc.h> | |
35 | #include <kern/machine.h> | |
36 | #include <kern/cpu_number.h> | |
37 | #include <kern/thread.h> | |
38 | #include <kern/timer_queue.h> | |
39 | #include <arm/cpu_data.h> | |
40 | #include <arm/cpuid.h> | |
41 | #include <arm/caches_internal.h> | |
42 | #include <arm/cpu_data_internal.h> | |
43 | #include <arm/cpu_internal.h> | |
44 | #include <arm/misc_protos.h> | |
45 | #include <arm/machine_cpu.h> | |
46 | #include <arm/rtclock.h> | |
47 | #include <arm/proc_reg.h> | |
48 | #include <mach/processor_info.h> | |
49 | #include <vm/pmap.h> | |
50 | #include <vm/vm_kern.h> | |
51 | #include <vm/vm_map.h> | |
52 | #include <pexpert/arm/board_config.h> | |
53 | #include <pexpert/arm/protos.h> | |
54 | #include <sys/kdebug.h> | |
55 | ||
56 | #include <machine/atomic.h> | |
57 | ||
58 | #if KPC | |
59 | #include <kern/kpc.h> | |
60 | #endif | |
61 | ||
62 | extern unsigned int resume_idle_cpu; | |
63 | extern unsigned int start_cpu; | |
64 | ||
65 | unsigned int start_cpu_paddr; | |
66 | ||
0a7de745 A |
67 | extern boolean_t idle_enable; |
68 | extern unsigned int real_ncpus; | |
69 | extern uint64_t wake_abstime; | |
5ba3f43e A |
70 | |
71 | extern void* wfi_inst; | |
72 | unsigned wfi_fast = 1; | |
73 | unsigned patch_to_nop = 0xe1a00000; | |
74 | ||
0a7de745 A |
75 | void *LowExceptionVectorsAddr; |
76 | #define IOS_STATE (((vm_offset_t)LowExceptionVectorsAddr + 0x80)) | |
77 | #define IOS_STATE_SIZE (0x08UL) | |
5ba3f43e A |
78 | static const uint8_t suspend_signature[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'}; |
79 | static const uint8_t running_signature[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'}; | |
80 | ||
81 | /* | |
82 | * Routine: cpu_bootstrap | |
83 | * Function: | |
84 | */ | |
85 | void | |
86 | cpu_bootstrap(void) | |
87 | { | |
88 | } | |
89 | ||
90 | ||
91 | /* | |
92 | * Routine: cpu_sleep | |
93 | * Function: | |
94 | */ | |
95 | void | |
96 | cpu_sleep(void) | |
97 | { | |
98 | cpu_data_t *cpu_data_ptr = getCpuDatap(); | |
99 | pmap_switch_user_ttb(kernel_pmap); | |
100 | cpu_data_ptr->cpu_active_thread = current_thread(); | |
101 | cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr; | |
102 | cpu_data_ptr->cpu_flags |= SleepState; | |
103 | cpu_data_ptr->cpu_user_debug = NULL; | |
104 | ||
105 | CleanPoC_Dcache(); | |
106 | ||
107 | PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id); | |
5ba3f43e A |
108 | } |
109 | ||
110 | _Atomic uint32_t cpu_idle_count = 0; | |
111 | ||
112 | /* | |
113 | * Routine: cpu_idle | |
114 | * Function: | |
115 | */ | |
116 | void __attribute__((noreturn)) | |
117 | cpu_idle(void) | |
118 | { | |
119 | cpu_data_t *cpu_data_ptr = getCpuDatap(); | |
0a7de745 | 120 | uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop; |
5ba3f43e | 121 | |
0a7de745 | 122 | if ((!idle_enable) || (cpu_data_ptr->cpu_signal & SIGPdisabled)) { |
5ba3f43e | 123 | Idle_load_context(); |
0a7de745 A |
124 | } |
125 | if (!SetIdlePop()) { | |
5ba3f43e | 126 | Idle_load_context(); |
0a7de745 | 127 | } |
5ba3f43e A |
128 | lastPop = cpu_data_ptr->rtcPop; |
129 | ||
130 | pmap_switch_user_ttb(kernel_pmap); | |
131 | cpu_data_ptr->cpu_active_thread = current_thread(); | |
0a7de745 | 132 | if (cpu_data_ptr->cpu_user_debug) { |
5ba3f43e | 133 | arm_debug_set(NULL); |
0a7de745 | 134 | } |
5ba3f43e A |
135 | cpu_data_ptr->cpu_user_debug = NULL; |
136 | ||
0a7de745 A |
137 | if (cpu_data_ptr->cpu_idle_notify) { |
138 | ((processor_idle_t) cpu_data_ptr->cpu_idle_notify)(cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks); | |
139 | } | |
5ba3f43e A |
140 | |
141 | if (cpu_data_ptr->idle_timer_notify != 0) { | |
142 | if (new_idle_timeout_ticks == 0x0ULL) { | |
143 | /* turn off the idle timer */ | |
144 | cpu_data_ptr->idle_timer_deadline = 0x0ULL; | |
145 | } else { | |
146 | /* set the new idle timeout */ | |
147 | clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline); | |
148 | } | |
149 | timer_resync_deadlines(); | |
0a7de745 | 150 | if (cpu_data_ptr->rtcPop != lastPop) { |
5ba3f43e | 151 | SetIdlePop(); |
0a7de745 | 152 | } |
5ba3f43e A |
153 | } |
154 | ||
155 | #if KPC | |
156 | kpc_idle(); | |
157 | #endif | |
158 | ||
159 | platform_cache_idle_enter(); | |
160 | cpu_idle_wfi((boolean_t) wfi_fast); | |
161 | platform_cache_idle_exit(); | |
162 | ||
163 | ClearIdlePop(TRUE); | |
d9a64523 | 164 | cpu_idle_exit(FALSE); |
5ba3f43e A |
165 | } |
166 | ||
167 | /* | |
168 | * Routine: cpu_idle_exit | |
169 | * Function: | |
170 | */ | |
171 | void | |
d9a64523 | 172 | cpu_idle_exit(boolean_t from_reset __unused) |
5ba3f43e | 173 | { |
0a7de745 | 174 | uint64_t new_idle_timeout_ticks = 0x0ULL; |
5ba3f43e A |
175 | cpu_data_t *cpu_data_ptr = getCpuDatap(); |
176 | ||
177 | #if KPC | |
178 | kpc_idle_exit(); | |
179 | #endif | |
180 | ||
181 | ||
182 | pmap_set_pmap(cpu_data_ptr->cpu_active_thread->map->pmap, current_thread()); | |
183 | ||
0a7de745 A |
184 | if (cpu_data_ptr->cpu_idle_notify) { |
185 | ((processor_idle_t) cpu_data_ptr->cpu_idle_notify)(cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks); | |
186 | } | |
5ba3f43e A |
187 | |
188 | if (cpu_data_ptr->idle_timer_notify != 0) { | |
189 | if (new_idle_timeout_ticks == 0x0ULL) { | |
190 | /* turn off the idle timer */ | |
191 | cpu_data_ptr->idle_timer_deadline = 0x0ULL; | |
192 | } else { | |
193 | /* set the new idle timeout */ | |
194 | clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline); | |
195 | } | |
196 | timer_resync_deadlines(); | |
197 | } | |
198 | ||
199 | Idle_load_context(); | |
200 | } | |
201 | ||
202 | void | |
203 | cpu_init(void) | |
204 | { | |
205 | cpu_data_t *cdp = getCpuDatap(); | |
206 | arm_cpu_info_t *cpu_info_p; | |
207 | ||
208 | if (cdp->cpu_type != CPU_TYPE_ARM) { | |
5ba3f43e A |
209 | cdp->cpu_type = CPU_TYPE_ARM; |
210 | ||
211 | timer_call_queue_init(&cdp->rtclock_timer.queue); | |
212 | cdp->rtclock_timer.deadline = EndOfAllTime; | |
213 | ||
214 | if (cdp == &BootCpuData) { | |
215 | do_cpuid(); | |
216 | do_cacheid(); | |
217 | do_mvfpid(); | |
218 | } else { | |
219 | /* | |
220 | * We initialize non-boot CPUs here; the boot CPU is | |
221 | * dealt with as part of pmap_bootstrap. | |
222 | */ | |
223 | pmap_cpu_data_init(); | |
224 | } | |
225 | /* ARM_SMP: Assuming identical cpu */ | |
226 | do_debugid(); | |
227 | ||
228 | cpu_info_p = cpuid_info(); | |
229 | ||
230 | /* switch based on CPU's reported architecture */ | |
231 | switch (cpu_info_p->arm_info.arm_arch) { | |
232 | case CPU_ARCH_ARMv4T: | |
233 | case CPU_ARCH_ARMv5T: | |
234 | cdp->cpu_subtype = CPU_SUBTYPE_ARM_V4T; | |
235 | break; | |
236 | case CPU_ARCH_ARMv5TE: | |
237 | case CPU_ARCH_ARMv5TEJ: | |
0a7de745 | 238 | if (cpu_info_p->arm_info.arm_implementor == CPU_VID_INTEL) { |
5ba3f43e | 239 | cdp->cpu_subtype = CPU_SUBTYPE_ARM_XSCALE; |
0a7de745 | 240 | } else { |
5ba3f43e | 241 | cdp->cpu_subtype = CPU_SUBTYPE_ARM_V5TEJ; |
0a7de745 | 242 | } |
5ba3f43e A |
243 | break; |
244 | case CPU_ARCH_ARMv6: | |
245 | cdp->cpu_subtype = CPU_SUBTYPE_ARM_V6; | |
246 | break; | |
247 | case CPU_ARCH_ARMv7: | |
248 | cdp->cpu_subtype = CPU_SUBTYPE_ARM_V7; | |
249 | break; | |
250 | case CPU_ARCH_ARMv7f: | |
251 | cdp->cpu_subtype = CPU_SUBTYPE_ARM_V7F; | |
252 | break; | |
253 | case CPU_ARCH_ARMv7s: | |
254 | cdp->cpu_subtype = CPU_SUBTYPE_ARM_V7S; | |
255 | break; | |
256 | case CPU_ARCH_ARMv7k: | |
257 | cdp->cpu_subtype = CPU_SUBTYPE_ARM_V7K; | |
258 | break; | |
259 | default: | |
260 | cdp->cpu_subtype = CPU_SUBTYPE_ARM_ALL; | |
261 | break; | |
262 | } | |
263 | ||
264 | cdp->cpu_threadtype = CPU_THREADTYPE_NONE; | |
265 | } | |
266 | cdp->cpu_stat.irq_ex_cnt_wake = 0; | |
267 | cdp->cpu_stat.ipi_cnt_wake = 0; | |
268 | cdp->cpu_stat.timer_cnt_wake = 0; | |
269 | cdp->cpu_running = TRUE; | |
270 | cdp->cpu_sleep_token_last = cdp->cpu_sleep_token; | |
271 | cdp->cpu_sleep_token = 0x0UL; | |
5ba3f43e A |
272 | } |
273 | ||
d9a64523 A |
274 | void |
275 | cpu_stack_alloc(cpu_data_t *cpu_data_ptr) | |
5ba3f43e | 276 | { |
0a7de745 A |
277 | vm_offset_t irq_stack = 0; |
278 | vm_offset_t fiq_stack = 0; | |
d9a64523 A |
279 | |
280 | kern_return_t kr = kernel_memory_allocate(kernel_map, &irq_stack, | |
0a7de745 A |
281 | INTSTACK_SIZE + (2 * PAGE_SIZE), |
282 | PAGE_MASK, | |
283 | KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT, | |
284 | VM_KERN_MEMORY_STACK); | |
285 | if (kr != KERN_SUCCESS) { | |
d9a64523 | 286 | panic("Unable to allocate cpu interrupt stack\n"); |
0a7de745 | 287 | } |
d9a64523 A |
288 | |
289 | cpu_data_ptr->intstack_top = irq_stack + PAGE_SIZE + INTSTACK_SIZE; | |
290 | cpu_data_ptr->istackptr = cpu_data_ptr->intstack_top; | |
291 | ||
292 | kr = kernel_memory_allocate(kernel_map, &fiq_stack, | |
0a7de745 A |
293 | FIQSTACK_SIZE + (2 * PAGE_SIZE), |
294 | PAGE_MASK, | |
295 | KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT, | |
296 | VM_KERN_MEMORY_STACK); | |
297 | if (kr != KERN_SUCCESS) { | |
d9a64523 | 298 | panic("Unable to allocate cpu exception stack\n"); |
0a7de745 | 299 | } |
d9a64523 A |
300 | |
301 | cpu_data_ptr->fiqstack_top = fiq_stack + PAGE_SIZE + FIQSTACK_SIZE; | |
302 | cpu_data_ptr->fiqstackptr = cpu_data_ptr->fiqstack_top; | |
5ba3f43e A |
303 | } |
304 | ||
5ba3f43e A |
305 | void |
306 | cpu_data_free(cpu_data_t *cpu_data_ptr) | |
307 | { | |
cb323159 | 308 | if ((cpu_data_ptr == NULL) || (cpu_data_ptr == &BootCpuData)) { |
0a7de745 A |
309 | return; |
310 | } | |
5ba3f43e A |
311 | |
312 | cpu_processor_free( cpu_data_ptr->cpu_processor); | |
cb323159 A |
313 | if (CpuDataEntries[cpu_data_ptr->cpu_number].cpu_data_vaddr == cpu_data_ptr) { |
314 | OSDecrementAtomic((SInt32*)&real_ncpus); | |
315 | CpuDataEntries[cpu_data_ptr->cpu_number].cpu_data_vaddr = NULL; | |
316 | CpuDataEntries[cpu_data_ptr->cpu_number].cpu_data_paddr = 0; | |
317 | __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu array are visible | |
318 | } | |
0a7de745 A |
319 | (kfree)((void *)(cpu_data_ptr->intstack_top - INTSTACK_SIZE), INTSTACK_SIZE); |
320 | (kfree)((void *)(cpu_data_ptr->fiqstack_top - FIQSTACK_SIZE), FIQSTACK_SIZE); | |
5ba3f43e A |
321 | kmem_free(kernel_map, (vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t)); |
322 | } | |
323 | ||
324 | void | |
325 | cpu_data_init(cpu_data_t *cpu_data_ptr) | |
326 | { | |
327 | uint32_t i = 0; | |
328 | ||
329 | cpu_data_ptr->cpu_flags = 0; | |
0a7de745 | 330 | #if __arm__ |
5ba3f43e A |
331 | cpu_data_ptr->cpu_exc_vectors = (vm_offset_t)&ExceptionVectorsTable; |
332 | #endif | |
333 | cpu_data_ptr->interrupts_enabled = 0; | |
334 | cpu_data_ptr->cpu_int_state = 0; | |
335 | cpu_data_ptr->cpu_pending_ast = AST_NONE; | |
336 | cpu_data_ptr->cpu_cache_dispatch = (void *) 0; | |
337 | cpu_data_ptr->rtcPop = EndOfAllTime; | |
338 | cpu_data_ptr->rtclock_datap = &RTClockData; | |
339 | cpu_data_ptr->cpu_user_debug = NULL; | |
340 | cpu_data_ptr->cpu_base_timebase_low = 0; | |
341 | cpu_data_ptr->cpu_base_timebase_high = 0; | |
342 | cpu_data_ptr->cpu_idle_notify = (void *) 0; | |
343 | cpu_data_ptr->cpu_idle_latency = 0x0ULL; | |
344 | cpu_data_ptr->cpu_idle_pop = 0x0ULL; | |
345 | cpu_data_ptr->cpu_reset_type = 0x0UL; | |
346 | cpu_data_ptr->cpu_reset_handler = 0x0UL; | |
347 | cpu_data_ptr->cpu_reset_assist = 0x0UL; | |
348 | cpu_data_ptr->cpu_regmap_paddr = 0x0ULL; | |
349 | cpu_data_ptr->cpu_phys_id = 0x0UL; | |
350 | cpu_data_ptr->cpu_l2_access_penalty = 0; | |
351 | cpu_data_ptr->cpu_cluster_type = CLUSTER_TYPE_SMP; | |
352 | cpu_data_ptr->cpu_cluster_id = 0; | |
353 | cpu_data_ptr->cpu_l2_id = 0; | |
354 | cpu_data_ptr->cpu_l2_size = 0; | |
355 | cpu_data_ptr->cpu_l3_id = 0; | |
356 | cpu_data_ptr->cpu_l3_size = 0; | |
357 | ||
358 | cpu_data_ptr->cpu_signal = SIGPdisabled; | |
359 | ||
5ba3f43e A |
360 | cpu_data_ptr->cpu_get_fiq_handler = NULL; |
361 | cpu_data_ptr->cpu_tbd_hardware_addr = NULL; | |
362 | cpu_data_ptr->cpu_tbd_hardware_val = NULL; | |
363 | cpu_data_ptr->cpu_get_decrementer_func = NULL; | |
364 | cpu_data_ptr->cpu_set_decrementer_func = NULL; | |
365 | cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH; | |
366 | cpu_data_ptr->cpu_sleep_token_last = 0x00000000UL; | |
367 | cpu_data_ptr->cpu_xcall_p0 = NULL; | |
368 | cpu_data_ptr->cpu_xcall_p1 = NULL; | |
cb323159 A |
369 | cpu_data_ptr->cpu_imm_xcall_p0 = NULL; |
370 | cpu_data_ptr->cpu_imm_xcall_p1 = NULL; | |
5ba3f43e | 371 | |
0a7de745 | 372 | #if __ARM_SMP__ && defined(ARMA7) |
5ba3f43e A |
373 | cpu_data_ptr->cpu_CLWFlush_req = 0x0ULL; |
374 | cpu_data_ptr->cpu_CLWFlush_last = 0x0ULL; | |
375 | cpu_data_ptr->cpu_CLWClean_req = 0x0ULL; | |
376 | cpu_data_ptr->cpu_CLWClean_last = 0x0ULL; | |
377 | cpu_data_ptr->cpu_CLW_active = 0x1UL; | |
378 | #endif | |
379 | ||
380 | pmap_cpu_data_t * pmap_cpu_data_ptr = &cpu_data_ptr->cpu_pmap_cpu_data; | |
381 | ||
382 | pmap_cpu_data_ptr->cpu_user_pmap = (struct pmap *) NULL; | |
383 | pmap_cpu_data_ptr->cpu_user_pmap_stamp = 0; | |
384 | pmap_cpu_data_ptr->cpu_number = PMAP_INVALID_CPU_NUM; | |
385 | ||
386 | for (i = 0; i < (sizeof(pmap_cpu_data_ptr->cpu_asid_high_bits) / sizeof(*pmap_cpu_data_ptr->cpu_asid_high_bits)); i++) { | |
387 | pmap_cpu_data_ptr->cpu_asid_high_bits[i] = 0; | |
388 | } | |
389 | cpu_data_ptr->halt_status = CPU_NOT_HALTED; | |
390 | } | |
391 | ||
392 | kern_return_t | |
393 | cpu_data_register(cpu_data_t *cpu_data_ptr) | |
394 | { | |
395 | int cpu; | |
396 | ||
397 | cpu = OSIncrementAtomic((SInt32*)&real_ncpus); | |
398 | if (real_ncpus > MAX_CPUS) { | |
399 | return KERN_FAILURE; | |
400 | } | |
401 | ||
402 | cpu_data_ptr->cpu_number = cpu; | |
cb323159 | 403 | __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu data are visible |
5ba3f43e | 404 | CpuDataEntries[cpu].cpu_data_vaddr = cpu_data_ptr; |
0a7de745 | 405 | CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys((vm_offset_t)cpu_data_ptr); |
5ba3f43e A |
406 | return KERN_SUCCESS; |
407 | } | |
408 | ||
409 | kern_return_t | |
410 | cpu_start(int cpu) | |
411 | { | |
412 | kprintf("cpu_start() cpu: %d\n", cpu); | |
413 | if (cpu == cpu_number()) { | |
414 | cpu_machine_init(); | |
415 | return KERN_SUCCESS; | |
416 | } else { | |
417 | #if __ARM_SMP__ | |
0a7de745 A |
418 | cpu_data_t *cpu_data_ptr; |
419 | thread_t first_thread; | |
5ba3f43e A |
420 | |
421 | cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr; | |
422 | cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr; | |
423 | ||
424 | cpu_data_ptr->cpu_pmap_cpu_data.cpu_user_pmap = NULL; | |
425 | ||
cb323159 A |
426 | if (cpu_data_ptr->cpu_processor->startup_thread != THREAD_NULL) { |
427 | first_thread = cpu_data_ptr->cpu_processor->startup_thread; | |
0a7de745 | 428 | } else { |
5ba3f43e | 429 | first_thread = cpu_data_ptr->cpu_processor->idle_thread; |
0a7de745 | 430 | } |
5ba3f43e A |
431 | cpu_data_ptr->cpu_active_thread = first_thread; |
432 | first_thread->machine.CpuDatap = cpu_data_ptr; | |
433 | ||
434 | flush_dcache((vm_offset_t)&CpuDataEntries[cpu], sizeof(cpu_data_entry_t), FALSE); | |
435 | flush_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE); | |
436 | (void) PE_cpu_start(cpu_data_ptr->cpu_id, (vm_offset_t)NULL, (vm_offset_t)NULL); | |
437 | return KERN_SUCCESS; | |
438 | #else | |
439 | return KERN_FAILURE; | |
440 | #endif | |
441 | } | |
442 | } | |
443 | ||
444 | void | |
445 | cpu_timebase_init(boolean_t from_boot __unused) | |
446 | { | |
447 | cpu_data_t *cdp = getCpuDatap(); | |
448 | ||
449 | if (cdp->cpu_get_fiq_handler == NULL) { | |
450 | cdp->cpu_get_fiq_handler = rtclock_timebase_func.tbd_fiq_handler; | |
451 | cdp->cpu_get_decrementer_func = rtclock_timebase_func.tbd_get_decrementer; | |
452 | cdp->cpu_set_decrementer_func = rtclock_timebase_func.tbd_set_decrementer; | |
453 | cdp->cpu_tbd_hardware_addr = (void *)rtclock_timebase_addr; | |
454 | cdp->cpu_tbd_hardware_val = (void *)rtclock_timebase_val; | |
455 | } | |
456 | cdp->cpu_decrementer = 0x7FFFFFFFUL; | |
457 | cdp->cpu_timebase_low = 0x0UL; | |
458 | cdp->cpu_timebase_high = 0x0UL; | |
459 | ||
460 | #if __arm__ && (__BIGGEST_ALIGNMENT__ > 4) | |
461 | /* For the newer ARMv7k ABI where 64-bit types are 64-bit aligned, but pointers | |
462 | * are 32-bit. */ | |
463 | cdp->cpu_base_timebase_low = rtclock_base_abstime_low; | |
464 | cdp->cpu_base_timebase_high = rtclock_base_abstime_high; | |
465 | #else | |
0a7de745 | 466 | *((uint64_t *) &cdp->cpu_base_timebase_low) = rtclock_base_abstime; |
5ba3f43e A |
467 | #endif |
468 | } | |
469 | ||
470 | ||
471 | __attribute__((noreturn)) | |
472 | void | |
473 | ml_arm_sleep(void) | |
474 | { | |
475 | cpu_data_t *cpu_data_ptr = getCpuDatap(); | |
476 | ||
477 | if (cpu_data_ptr == &BootCpuData) { | |
0a7de745 A |
478 | cpu_data_t *target_cdp; |
479 | unsigned int cpu; | |
5ba3f43e | 480 | |
0a7de745 | 481 | for (cpu = 0; cpu < MAX_CPUS; cpu++) { |
5ba3f43e | 482 | target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; |
0a7de745 | 483 | if (target_cdp == (cpu_data_t *)NULL) { |
5ba3f43e | 484 | break; |
0a7de745 | 485 | } |
5ba3f43e | 486 | |
0a7de745 | 487 | if (target_cdp == cpu_data_ptr) { |
5ba3f43e | 488 | continue; |
0a7de745 | 489 | } |
5ba3f43e | 490 | |
0a7de745 A |
491 | while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH) { |
492 | ; | |
493 | } | |
5ba3f43e A |
494 | } |
495 | ||
496 | /* Now that the other cores have entered the sleep path, set | |
497 | * the abstime fixup we'll use when we resume.*/ | |
498 | rtclock_base_abstime = ml_get_timebase(); | |
499 | wake_abstime = rtclock_base_abstime; | |
5ba3f43e A |
500 | } else { |
501 | platform_cache_disable(); | |
502 | CleanPoU_Dcache(); | |
503 | } | |
504 | cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH; | |
0a7de745 | 505 | #if __ARM_SMP__ && defined(ARMA7) |
5ba3f43e A |
506 | cpu_data_ptr->cpu_CLWFlush_req = 0; |
507 | cpu_data_ptr->cpu_CLWClean_req = 0; | |
508 | __builtin_arm_dmb(DMB_ISH); | |
509 | cpu_data_ptr->cpu_CLW_active = 0; | |
510 | #endif | |
511 | if (cpu_data_ptr == &BootCpuData) { | |
512 | platform_cache_disable(); | |
513 | platform_cache_shutdown(); | |
514 | bcopy((const void *)suspend_signature, (void *)(IOS_STATE), IOS_STATE_SIZE); | |
0a7de745 | 515 | } else { |
5ba3f43e | 516 | CleanPoC_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t)); |
0a7de745 | 517 | } |
5ba3f43e A |
518 | |
519 | __builtin_arm_dsb(DSB_SY); | |
520 | while (TRUE) { | |
521 | #if __ARM_ENABLE_WFE_ | |
522 | __builtin_arm_wfe(); | |
523 | #endif | |
524 | } /* Spin */ | |
525 | } | |
526 | ||
527 | void | |
528 | cpu_machine_idle_init(boolean_t from_boot) | |
529 | { | |
0a7de745 A |
530 | static const unsigned int *BootArgs_paddr = (unsigned int *)NULL; |
531 | static const unsigned int *CpuDataEntries_paddr = (unsigned int *)NULL; | |
532 | static unsigned int resume_idle_cpu_paddr = (unsigned int)NULL; | |
533 | cpu_data_t *cpu_data_ptr = getCpuDatap(); | |
5ba3f43e A |
534 | |
535 | if (from_boot) { | |
536 | unsigned int jtag = 0; | |
537 | unsigned int wfi; | |
538 | ||
539 | ||
0a7de745 A |
540 | if (PE_parse_boot_argn("jtag", &jtag, sizeof(jtag))) { |
541 | if (jtag != 0) { | |
5ba3f43e | 542 | idle_enable = FALSE; |
0a7de745 | 543 | } else { |
5ba3f43e | 544 | idle_enable = TRUE; |
0a7de745 A |
545 | } |
546 | } else { | |
5ba3f43e | 547 | idle_enable = TRUE; |
0a7de745 | 548 | } |
5ba3f43e | 549 | |
0a7de745 | 550 | if (!PE_parse_boot_argn("wfi", &wfi, sizeof(wfi))) { |
5ba3f43e | 551 | wfi = 1; |
0a7de745 | 552 | } |
5ba3f43e | 553 | |
0a7de745 | 554 | if (wfi == 0) { |
5ba3f43e | 555 | bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)&patch_to_nop), |
0a7de745 A |
556 | (addr64_t)ml_static_vtop((vm_offset_t)&wfi_inst), sizeof(unsigned)); |
557 | } | |
558 | if (wfi == 2) { | |
5ba3f43e | 559 | wfi_fast = 0; |
0a7de745 | 560 | } |
5ba3f43e A |
561 | |
562 | LowExceptionVectorsAddr = (void *)ml_io_map(ml_vtophys((vm_offset_t)gPhysBase), PAGE_SIZE); | |
563 | ||
564 | /* Copy Exception Vectors low, but don't touch the sleep token */ | |
565 | bcopy((void *)&ExceptionLowVectorsBase, (void *)LowExceptionVectorsAddr, 0x90); | |
566 | bcopy(((void *)(((vm_offset_t)&ExceptionLowVectorsBase) + 0xA0)), ((void *)(((vm_offset_t)LowExceptionVectorsAddr) + 0xA0)), ARM_PGBYTES - 0xA0); | |
567 | ||
568 | start_cpu_paddr = ml_static_vtop((vm_offset_t)&start_cpu); | |
569 | ||
570 | BootArgs_paddr = (unsigned int *)ml_static_vtop((vm_offset_t)BootArgs); | |
571 | bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)&BootArgs_paddr), | |
0a7de745 A |
572 | (addr64_t)((unsigned int)(gPhysBase) + |
573 | ((unsigned int)&(ResetHandlerData.boot_args) - (unsigned int)&ExceptionLowVectorsBase)), | |
574 | 4); | |
5ba3f43e A |
575 | |
576 | CpuDataEntries_paddr = (unsigned int *)ml_static_vtop((vm_offset_t)CpuDataEntries); | |
577 | bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)&CpuDataEntries_paddr), | |
0a7de745 A |
578 | (addr64_t)((unsigned int)(gPhysBase) + |
579 | ((unsigned int)&(ResetHandlerData.cpu_data_entries) - (unsigned int)&ExceptionLowVectorsBase)), | |
580 | 4); | |
5ba3f43e | 581 | |
d9a64523 | 582 | CleanPoC_DcacheRegion((vm_offset_t) phystokv(gPhysBase), PAGE_SIZE); |
5ba3f43e A |
583 | |
584 | resume_idle_cpu_paddr = (unsigned int)ml_static_vtop((vm_offset_t)&resume_idle_cpu); | |
5ba3f43e A |
585 | } |
586 | ||
587 | if (cpu_data_ptr == &BootCpuData) { | |
588 | bcopy(((const void *)running_signature), (void *)(IOS_STATE), IOS_STATE_SIZE); | |
0a7de745 A |
589 | } |
590 | ; | |
5ba3f43e A |
591 | |
592 | cpu_data_ptr->cpu_reset_handler = resume_idle_cpu_paddr; | |
593 | clean_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE); | |
594 | } | |
595 | ||
596 | void | |
597 | machine_track_platform_idle(boolean_t entry) | |
598 | { | |
0a7de745 | 599 | if (entry) { |
cb323159 | 600 | os_atomic_inc(&cpu_idle_count, relaxed); |
0a7de745 | 601 | } else { |
cb323159 | 602 | os_atomic_dec(&cpu_idle_count, relaxed); |
0a7de745 | 603 | } |
5ba3f43e | 604 | } |