]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm/cpu_common.c
xnu-6153.121.1.tar.gz
[apple/xnu.git] / osfmk / arm / cpu_common.c
CommitLineData
5ba3f43e 1/*
0a7de745 2 * Copyright (c) 2017-2019 Apple Inc. All rights reserved.
5ba3f43e
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * File: arm/cpu_common.c
30 *
31 * cpu routines common to all supported arm variants
32 */
33
34#include <kern/kalloc.h>
35#include <kern/machine.h>
36#include <kern/cpu_number.h>
37#include <kern/thread.h>
38#include <kern/timer_queue.h>
39#include <arm/cpu_data.h>
40#include <arm/cpuid.h>
41#include <arm/caches_internal.h>
42#include <arm/cpu_data_internal.h>
43#include <arm/cpu_internal.h>
44#include <arm/misc_protos.h>
45#include <arm/machine_cpu.h>
46#include <arm/rtclock.h>
47#include <mach/processor_info.h>
48#include <machine/atomic.h>
49#include <machine/config.h>
50#include <vm/vm_kern.h>
51#include <vm/vm_map.h>
52#include <pexpert/arm/protos.h>
53#include <pexpert/device_tree.h>
54#include <sys/kdebug.h>
55#include <arm/machine_routines.h>
56#include <libkern/OSAtomic.h>
5ba3f43e
A
57
58#if KPERF
59void kperf_signal_handler(unsigned int cpu_number);
60#endif
61
0a7de745
A
62cpu_data_t BootCpuData;
63cpu_data_entry_t CpuDataEntries[MAX_CPUS];
64
5ba3f43e
A
65struct processor BootProcessor;
66
0a7de745
A
67unsigned int real_ncpus = 1;
68boolean_t idle_enable = FALSE;
69uint64_t wake_abstime = 0x0ULL;
5ba3f43e 70
c6bf4f31
A
71#if defined(HAS_IPI)
72extern unsigned int gFastIPI;
73#endif /* defined(HAS_IPI) */
5ba3f43e
A
74
75cpu_data_t *
76cpu_datap(int cpu)
77{
78 assert(cpu < MAX_CPUS);
0a7de745 79 return CpuDataEntries[cpu].cpu_data_vaddr;
5ba3f43e
A
80}
81
82kern_return_t
83cpu_control(int slot_num,
0a7de745
A
84 processor_info_t info,
85 unsigned int count)
5ba3f43e
A
86{
87 printf("cpu_control(%d,%p,%d) not implemented\n",
0a7de745
A
88 slot_num, info, count);
89 return KERN_FAILURE;
5ba3f43e
A
90}
91
92kern_return_t
93cpu_info_count(processor_flavor_t flavor,
0a7de745 94 unsigned int *count)
5ba3f43e 95{
5ba3f43e
A
96 switch (flavor) {
97 case PROCESSOR_CPU_STAT:
98 *count = PROCESSOR_CPU_STAT_COUNT;
0a7de745
A
99 return KERN_SUCCESS;
100
101 case PROCESSOR_CPU_STAT64:
102 *count = PROCESSOR_CPU_STAT64_COUNT;
103 return KERN_SUCCESS;
5ba3f43e
A
104
105 default:
106 *count = 0;
0a7de745 107 return KERN_FAILURE;
5ba3f43e
A
108 }
109}
110
111kern_return_t
0a7de745
A
112cpu_info(processor_flavor_t flavor, int slot_num, processor_info_t info,
113 unsigned int *count)
5ba3f43e 114{
0a7de745
A
115 cpu_data_t *cpu_data_ptr = CpuDataEntries[slot_num].cpu_data_vaddr;
116
5ba3f43e
A
117 switch (flavor) {
118 case PROCESSOR_CPU_STAT:
0a7de745
A
119 {
120 if (*count < PROCESSOR_CPU_STAT_COUNT) {
121 return KERN_FAILURE;
122 }
123
124 processor_cpu_stat_t cpu_stat = (processor_cpu_stat_t)info;
125 cpu_stat->irq_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.irq_ex_cnt;
126 cpu_stat->ipi_cnt = (uint32_t)cpu_data_ptr->cpu_stat.ipi_cnt;
127 cpu_stat->timer_cnt = (uint32_t)cpu_data_ptr->cpu_stat.timer_cnt;
128 cpu_stat->undef_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.undef_ex_cnt;
129 cpu_stat->unaligned_cnt = (uint32_t)cpu_data_ptr->cpu_stat.unaligned_cnt;
130 cpu_stat->vfp_cnt = (uint32_t)cpu_data_ptr->cpu_stat.vfp_cnt;
131 cpu_stat->vfp_shortv_cnt = 0;
132 cpu_stat->data_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.data_ex_cnt;
133 cpu_stat->instr_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.instr_ex_cnt;
134
135 *count = PROCESSOR_CPU_STAT_COUNT;
136
137 return KERN_SUCCESS;
138 }
139
140 case PROCESSOR_CPU_STAT64:
141 {
142 if (*count < PROCESSOR_CPU_STAT64_COUNT) {
143 return KERN_FAILURE;
5ba3f43e
A
144 }
145
0a7de745
A
146 processor_cpu_stat64_t cpu_stat = (processor_cpu_stat64_t)info;
147 cpu_stat->irq_ex_cnt = cpu_data_ptr->cpu_stat.irq_ex_cnt;
148 cpu_stat->ipi_cnt = cpu_data_ptr->cpu_stat.ipi_cnt;
149 cpu_stat->timer_cnt = cpu_data_ptr->cpu_stat.timer_cnt;
150 cpu_stat->undef_ex_cnt = cpu_data_ptr->cpu_stat.undef_ex_cnt;
151 cpu_stat->unaligned_cnt = cpu_data_ptr->cpu_stat.unaligned_cnt;
152 cpu_stat->vfp_cnt = cpu_data_ptr->cpu_stat.vfp_cnt;
153 cpu_stat->vfp_shortv_cnt = 0;
154 cpu_stat->data_ex_cnt = cpu_data_ptr->cpu_stat.data_ex_cnt;
155 cpu_stat->instr_ex_cnt = cpu_data_ptr->cpu_stat.instr_ex_cnt;
cb323159
A
156#if MONOTONIC
157 cpu_stat->pmi_cnt = cpu_data_ptr->cpu_monotonic.mtc_npmis;
158#endif /* MONOTONIC */
0a7de745
A
159
160 *count = PROCESSOR_CPU_STAT64_COUNT;
161
162 return KERN_SUCCESS;
163 }
164
5ba3f43e 165 default:
0a7de745 166 return KERN_FAILURE;
5ba3f43e
A
167 }
168}
169
170/*
171 * Routine: cpu_doshutdown
172 * Function:
173 */
174void
0a7de745
A
175cpu_doshutdown(void (*doshutdown)(processor_t),
176 processor_t processor)
5ba3f43e
A
177{
178 doshutdown(processor);
179}
180
181/*
182 * Routine: cpu_idle_tickle
183 *
184 */
185void
186cpu_idle_tickle(void)
187{
0a7de745
A
188 boolean_t intr;
189 cpu_data_t *cpu_data_ptr;
190 uint64_t new_idle_timeout_ticks = 0x0ULL;
5ba3f43e
A
191
192 intr = ml_set_interrupts_enabled(FALSE);
193 cpu_data_ptr = getCpuDatap();
194
195 if (cpu_data_ptr->idle_timer_notify != (void *)NULL) {
196 ((idle_timer_t)cpu_data_ptr->idle_timer_notify)(cpu_data_ptr->idle_timer_refcon, &new_idle_timeout_ticks);
197 if (new_idle_timeout_ticks != 0x0ULL) {
198 /* if a new idle timeout was requested set the new idle timer deadline */
199 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
200 } else {
201 /* turn off the idle timer */
202 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
203 }
204 timer_resync_deadlines();
205 }
206 (void) ml_set_interrupts_enabled(intr);
207}
208
209static void
210cpu_handle_xcall(cpu_data_t *cpu_data_ptr)
211{
0a7de745
A
212 broadcastFunc xfunc;
213 void *xparam;
5ba3f43e 214
cb323159 215 os_atomic_thread_fence(acquire);
5ba3f43e 216 /* Come back around if cpu_signal_internal is running on another CPU and has just
0a7de745 217 * added SIGPxcall to the pending mask, but hasn't yet assigned the call params.*/
5ba3f43e
A
218 if (cpu_data_ptr->cpu_xcall_p0 != NULL && cpu_data_ptr->cpu_xcall_p1 != NULL) {
219 xfunc = cpu_data_ptr->cpu_xcall_p0;
220 xparam = cpu_data_ptr->cpu_xcall_p1;
221 cpu_data_ptr->cpu_xcall_p0 = NULL;
222 cpu_data_ptr->cpu_xcall_p1 = NULL;
cb323159
A
223 os_atomic_thread_fence(acq_rel);
224 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPxcall, relaxed);
225 xfunc(xparam);
226 }
227 if (cpu_data_ptr->cpu_imm_xcall_p0 != NULL && cpu_data_ptr->cpu_imm_xcall_p1 != NULL) {
228 xfunc = cpu_data_ptr->cpu_imm_xcall_p0;
229 xparam = cpu_data_ptr->cpu_imm_xcall_p1;
230 cpu_data_ptr->cpu_imm_xcall_p0 = NULL;
231 cpu_data_ptr->cpu_imm_xcall_p1 = NULL;
232 os_atomic_thread_fence(acq_rel);
233 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPxcallImm, relaxed);
5ba3f43e
A
234 xfunc(xparam);
235 }
5ba3f43e
A
236}
237
cb323159
A
238static unsigned int
239cpu_broadcast_xcall_internal(unsigned int signal,
240 uint32_t *synch,
0a7de745
A
241 boolean_t self_xcall,
242 broadcastFunc func,
243 void *parm)
5ba3f43e 244{
0a7de745
A
245 boolean_t intr;
246 cpu_data_t *cpu_data_ptr;
247 cpu_data_t *target_cpu_datap;
248 unsigned int failsig;
249 int cpu;
cb323159 250 int max_cpu = ml_get_max_cpu_number() + 1;
5ba3f43e
A
251
252 intr = ml_set_interrupts_enabled(FALSE);
253 cpu_data_ptr = getCpuDatap();
254
255 failsig = 0;
256
257 if (synch != NULL) {
cb323159 258 *synch = max_cpu;
5ba3f43e
A
259 assert_wait((event_t)synch, THREAD_UNINT);
260 }
261
cb323159 262 for (cpu = 0; cpu < max_cpu; cpu++) {
5ba3f43e
A
263 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
264
cb323159 265 if (target_cpu_datap == cpu_data_ptr) {
5ba3f43e 266 continue;
0a7de745 267 }
5ba3f43e 268
cb323159
A
269 if ((target_cpu_datap == NULL) ||
270 KERN_SUCCESS != cpu_signal(target_cpu_datap, signal, (void *)func, parm)) {
5ba3f43e
A
271 failsig++;
272 }
273 }
274
275
276 if (self_xcall) {
277 func(parm);
278 }
279
280 (void) ml_set_interrupts_enabled(intr);
281
282 if (synch != NULL) {
cb323159 283 if (os_atomic_sub(synch, (!self_xcall) ? failsig + 1 : failsig, relaxed) == 0) {
5ba3f43e 284 clear_wait(current_thread(), THREAD_AWAKENED);
0a7de745 285 } else {
5ba3f43e 286 thread_block(THREAD_CONTINUE_NULL);
0a7de745 287 }
5ba3f43e
A
288 }
289
0a7de745 290 if (!self_xcall) {
cb323159 291 return max_cpu - failsig - 1;
0a7de745 292 } else {
cb323159 293 return max_cpu - failsig;
0a7de745 294 }
5ba3f43e
A
295}
296
cb323159
A
297unsigned int
298cpu_broadcast_xcall(uint32_t *synch,
299 boolean_t self_xcall,
300 broadcastFunc func,
301 void *parm)
302{
303 return cpu_broadcast_xcall_internal(SIGPxcall, synch, self_xcall, func, parm);
304}
305
306unsigned int
307cpu_broadcast_immediate_xcall(uint32_t *synch,
308 boolean_t self_xcall,
309 broadcastFunc func,
310 void *parm)
311{
312 return cpu_broadcast_xcall_internal(SIGPxcallImm, synch, self_xcall, func, parm);
313}
314
315static kern_return_t
316cpu_xcall_internal(unsigned int signal, int cpu_number, broadcastFunc func, void *param)
5ba3f43e 317{
0a7de745 318 cpu_data_t *target_cpu_datap;
5ba3f43e 319
0a7de745 320 if ((cpu_number < 0) || (cpu_number > ml_get_max_cpu_number())) {
5ba3f43e 321 return KERN_INVALID_ARGUMENT;
0a7de745 322 }
5ba3f43e 323
cb323159
A
324 if (func == NULL || param == NULL) {
325 return KERN_INVALID_ARGUMENT;
326 }
327
0a7de745
A
328 target_cpu_datap = (cpu_data_t*)CpuDataEntries[cpu_number].cpu_data_vaddr;
329 if (target_cpu_datap == NULL) {
5ba3f43e 330 return KERN_INVALID_ARGUMENT;
0a7de745 331 }
5ba3f43e 332
cb323159
A
333 return cpu_signal(target_cpu_datap, signal, (void*)func, param);
334}
335
336kern_return_t
337cpu_xcall(int cpu_number, broadcastFunc func, void *param)
338{
339 return cpu_xcall_internal(SIGPxcall, cpu_number, func, param);
340}
341
342kern_return_t
343cpu_immediate_xcall(int cpu_number, broadcastFunc func, void *param)
344{
345 return cpu_xcall_internal(SIGPxcallImm, cpu_number, func, param);
5ba3f43e
A
346}
347
348static kern_return_t
349cpu_signal_internal(cpu_data_t *target_proc,
0a7de745
A
350 unsigned int signal,
351 void *p0,
352 void *p1,
353 boolean_t defer)
5ba3f43e 354{
0a7de745
A
355 unsigned int Check_SIGPdisabled;
356 int current_signals;
357 Boolean swap_success;
358 boolean_t interruptible = ml_set_interrupts_enabled(FALSE);
359 cpu_data_t *current_proc = getCpuDatap();
5ba3f43e
A
360
361 /* We'll mandate that only IPIs meant to kick a core out of idle may ever be deferred. */
362 if (defer) {
363 assert(signal == SIGPnop);
364 }
365
0a7de745 366 if (current_proc != target_proc) {
5ba3f43e 367 Check_SIGPdisabled = SIGPdisabled;
0a7de745 368 } else {
5ba3f43e 369 Check_SIGPdisabled = 0;
0a7de745 370 }
5ba3f43e 371
cb323159 372 if ((signal == SIGPxcall) || (signal == SIGPxcallImm)) {
5ba3f43e
A
373 do {
374 current_signals = target_proc->cpu_signal;
375 if ((current_signals & SIGPdisabled) == SIGPdisabled) {
5ba3f43e
A
376 ml_set_interrupts_enabled(interruptible);
377 return KERN_FAILURE;
378 }
cb323159 379 swap_success = OSCompareAndSwap(current_signals & (~signal), current_signals | signal,
0a7de745 380 &target_proc->cpu_signal);
5ba3f43e 381
cb323159
A
382 if (!swap_success && (signal == SIGPxcallImm) && (target_proc->cpu_signal & SIGPxcallImm)) {
383 ml_set_interrupts_enabled(interruptible);
384 return KERN_ALREADY_WAITING;
385 }
386
5ba3f43e
A
387 /* Drain pending xcalls on this cpu; the CPU we're trying to xcall may in turn
388 * be trying to xcall us. Since we have interrupts disabled that can deadlock,
389 * so break the deadlock by draining pending xcalls. */
cb323159 390 if (!swap_success && (current_proc->cpu_signal & signal)) {
5ba3f43e 391 cpu_handle_xcall(current_proc);
0a7de745 392 }
5ba3f43e
A
393 } while (!swap_success);
394
cb323159
A
395 if (signal == SIGPxcallImm) {
396 target_proc->cpu_imm_xcall_p0 = p0;
397 target_proc->cpu_imm_xcall_p1 = p1;
398 } else {
399 target_proc->cpu_xcall_p0 = p0;
400 target_proc->cpu_xcall_p1 = p1;
401 }
5ba3f43e
A
402 } else {
403 do {
404 current_signals = target_proc->cpu_signal;
0a7de745 405 if ((Check_SIGPdisabled != 0) && (current_signals & Check_SIGPdisabled) == SIGPdisabled) {
5ba3f43e
A
406 ml_set_interrupts_enabled(interruptible);
407 return KERN_FAILURE;
408 }
409
410 swap_success = OSCompareAndSwap(current_signals, current_signals | signal,
0a7de745 411 &target_proc->cpu_signal);
5ba3f43e
A
412 } while (!swap_success);
413 }
414
415 /*
416 * Issue DSB here to guarantee: 1) prior stores to pending signal mask and xcall params
417 * will be visible to other cores when the IPI is dispatched, and 2) subsequent
418 * instructions to signal the other cores will not execute until after the barrier.
419 * DMB would be sufficient to guarantee 1) but not 2).
420 */
421 __builtin_arm_dsb(DSB_ISH);
422
423 if (!(target_proc->cpu_signal & SIGPdisabled)) {
424 if (defer) {
c6bf4f31
A
425#if defined(HAS_IPI)
426 if (gFastIPI) {
427 ml_cpu_signal_deferred(target_proc->cpu_phys_id);
428 } else {
429 PE_cpu_signal_deferred(getCpuDatap()->cpu_id, target_proc->cpu_id);
430 }
431#else
5ba3f43e 432 PE_cpu_signal_deferred(getCpuDatap()->cpu_id, target_proc->cpu_id);
c6bf4f31 433#endif /* defined(HAS_IPI) */
5ba3f43e 434 } else {
c6bf4f31
A
435#if defined(HAS_IPI)
436 if (gFastIPI) {
437 ml_cpu_signal(target_proc->cpu_phys_id);
438 } else {
439 PE_cpu_signal(getCpuDatap()->cpu_id, target_proc->cpu_id);
440 }
441#else
5ba3f43e 442 PE_cpu_signal(getCpuDatap()->cpu_id, target_proc->cpu_id);
c6bf4f31 443#endif /* defined(HAS_IPI) */
5ba3f43e
A
444 }
445 }
446
447 ml_set_interrupts_enabled(interruptible);
0a7de745 448 return KERN_SUCCESS;
5ba3f43e
A
449}
450
451kern_return_t
452cpu_signal(cpu_data_t *target_proc,
0a7de745
A
453 unsigned int signal,
454 void *p0,
455 void *p1)
5ba3f43e
A
456{
457 return cpu_signal_internal(target_proc, signal, p0, p1, FALSE);
458}
459
460kern_return_t
461cpu_signal_deferred(cpu_data_t *target_proc)
462{
463 return cpu_signal_internal(target_proc, SIGPnop, NULL, NULL, TRUE);
464}
465
466void
467cpu_signal_cancel(cpu_data_t *target_proc)
468{
469 /* TODO: Should we care about the state of a core as far as squashing deferred IPIs goes? */
470 if (!(target_proc->cpu_signal & SIGPdisabled)) {
c6bf4f31
A
471#if defined(HAS_IPI)
472 if (gFastIPI) {
473 ml_cpu_signal_retract(target_proc->cpu_phys_id);
474 } else {
475 PE_cpu_signal_cancel(getCpuDatap()->cpu_id, target_proc->cpu_id);
476 }
477#else
5ba3f43e 478 PE_cpu_signal_cancel(getCpuDatap()->cpu_id, target_proc->cpu_id);
c6bf4f31 479#endif /* defined(HAS_IPI) */
5ba3f43e
A
480 }
481}
482
483void
484cpu_signal_handler(void)
485{
486 cpu_signal_handler_internal(FALSE);
487}
488
489void
490cpu_signal_handler_internal(boolean_t disable_signal)
491{
492 cpu_data_t *cpu_data_ptr = getCpuDatap();
0a7de745 493 unsigned int cpu_signal;
5ba3f43e
A
494
495
496 cpu_data_ptr->cpu_stat.ipi_cnt++;
497 cpu_data_ptr->cpu_stat.ipi_cnt_wake++;
498
499 SCHED_STATS_IPI(current_processor());
500
cb323159 501 cpu_signal = os_atomic_or(&cpu_data_ptr->cpu_signal, 0, relaxed);
5ba3f43e 502
0a7de745 503 if ((!(cpu_signal & SIGPdisabled)) && (disable_signal == TRUE)) {
cb323159 504 os_atomic_or(&cpu_data_ptr->cpu_signal, SIGPdisabled, relaxed);
0a7de745 505 } else if ((cpu_signal & SIGPdisabled) && (disable_signal == FALSE)) {
cb323159 506 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdisabled, relaxed);
0a7de745 507 }
5ba3f43e
A
508
509 while (cpu_signal & ~SIGPdisabled) {
510 if (cpu_signal & SIGPdec) {
cb323159 511 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdec, relaxed);
5ba3f43e
A
512 rtclock_intr(FALSE);
513 }
5ba3f43e
A
514#if KPERF
515 if (cpu_signal & SIGPkptimer) {
cb323159 516 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPkptimer, relaxed);
5ba3f43e
A
517 kperf_signal_handler((unsigned int)cpu_data_ptr->cpu_number);
518 }
519#endif
cb323159 520 if (cpu_signal & (SIGPxcall | SIGPxcallImm)) {
5ba3f43e
A
521 cpu_handle_xcall(cpu_data_ptr);
522 }
523 if (cpu_signal & SIGPast) {
cb323159 524 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPast, relaxed);
5ba3f43e
A
525 ast_check(cpu_data_ptr->cpu_processor);
526 }
527 if (cpu_signal & SIGPdebug) {
cb323159 528 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdebug, relaxed);
5ba3f43e
A
529 DebuggerXCall(cpu_data_ptr->cpu_int_state);
530 }
0a7de745 531#if __ARM_SMP__ && defined(ARMA7)
5ba3f43e 532 if (cpu_signal & SIGPLWFlush) {
cb323159 533 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPLWFlush, relaxed);
5ba3f43e
A
534 cache_xcall_handler(LWFlush);
535 }
536 if (cpu_signal & SIGPLWClean) {
cb323159 537 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPLWClean, relaxed);
5ba3f43e
A
538 cache_xcall_handler(LWClean);
539 }
540#endif
541
cb323159 542 cpu_signal = os_atomic_or(&cpu_data_ptr->cpu_signal, 0, relaxed);
5ba3f43e
A
543 }
544}
545
546void
547cpu_exit_wait(int cpu)
548{
0a7de745
A
549 if (cpu != master_cpu) {
550 cpu_data_t *cpu_data_ptr;
5ba3f43e
A
551
552 cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
0a7de745
A
553 while (!((*(volatile unsigned int*)&cpu_data_ptr->cpu_sleep_token) == ARM_CPU_ON_SLEEP_PATH)) {
554 }
555 ;
5ba3f43e
A
556 }
557}
558
0a7de745
A
559boolean_t
560cpu_can_exit(__unused int cpu)
561{
562 return TRUE;
563}
564
5ba3f43e
A
565void
566cpu_machine_init(void)
567{
568 static boolean_t started = FALSE;
0a7de745 569 cpu_data_t *cpu_data_ptr;
5ba3f43e
A
570
571 cpu_data_ptr = getCpuDatap();
572 started = ((cpu_data_ptr->cpu_flags & StartedState) == StartedState);
0a7de745 573 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) {
5ba3f43e 574 platform_cache_init();
0a7de745 575 }
cb323159
A
576
577 /* Note: this calls IOCPURunPlatformActiveActions when resuming on boot cpu */
5ba3f43e 578 PE_cpu_machine_init(cpu_data_ptr->cpu_id, !started);
cb323159 579
5ba3f43e
A
580 cpu_data_ptr->cpu_flags |= StartedState;
581 ml_init_interrupt();
582}
583
584processor_t
585cpu_processor_alloc(boolean_t is_boot_cpu)
586{
587 processor_t proc;
588
0a7de745 589 if (is_boot_cpu) {
5ba3f43e 590 return &BootProcessor;
0a7de745 591 }
5ba3f43e
A
592
593 proc = kalloc(sizeof(*proc));
0a7de745 594 if (!proc) {
5ba3f43e 595 return NULL;
0a7de745 596 }
5ba3f43e
A
597
598 bzero((void *) proc, sizeof(*proc));
599 return proc;
600}
601
602void
603cpu_processor_free(processor_t proc)
604{
0a7de745
A
605 if (proc != NULL && proc != &BootProcessor) {
606 kfree(proc, sizeof(*proc));
607 }
5ba3f43e
A
608}
609
610processor_t
611current_processor(void)
612{
613 return getCpuDatap()->cpu_processor;
614}
615
616processor_t
617cpu_to_processor(int cpu)
618{
619 cpu_data_t *cpu_data = cpu_datap(cpu);
0a7de745 620 if (cpu_data != NULL) {
5ba3f43e 621 return cpu_data->cpu_processor;
0a7de745 622 } else {
5ba3f43e 623 return NULL;
0a7de745 624 }
5ba3f43e
A
625}
626
627cpu_data_t *
628processor_to_cpu_datap(processor_t processor)
629{
630 cpu_data_t *target_cpu_datap;
631
632 assert(processor->cpu_id < MAX_CPUS);
633 assert(CpuDataEntries[processor->cpu_id].cpu_data_vaddr != NULL);
634
635 target_cpu_datap = (cpu_data_t*)CpuDataEntries[processor->cpu_id].cpu_data_vaddr;
636 assert(target_cpu_datap->cpu_processor == processor);
637
638 return target_cpu_datap;
639}
640
d9a64523
A
641cpu_data_t *
642cpu_data_alloc(boolean_t is_boot_cpu)
643{
0a7de745 644 cpu_data_t *cpu_data_ptr = NULL;
d9a64523 645
0a7de745 646 if (is_boot_cpu) {
d9a64523 647 cpu_data_ptr = &BootCpuData;
0a7de745
A
648 } else {
649 if ((kmem_alloc(kernel_map, (vm_offset_t *)&cpu_data_ptr, sizeof(cpu_data_t), VM_KERN_MEMORY_CPU)) != KERN_SUCCESS) {
d9a64523 650 goto cpu_data_alloc_error;
0a7de745 651 }
d9a64523
A
652
653 bzero((void *)cpu_data_ptr, sizeof(cpu_data_t));
654
655 cpu_stack_alloc(cpu_data_ptr);
656 }
657
658 cpu_data_ptr->cpu_processor = cpu_processor_alloc(is_boot_cpu);
0a7de745 659 if (cpu_data_ptr->cpu_processor == (struct processor *)NULL) {
d9a64523 660 goto cpu_data_alloc_error;
0a7de745 661 }
d9a64523
A
662
663 return cpu_data_ptr;
664
665cpu_data_alloc_error:
666 panic("cpu_data_alloc() failed\n");
667 return (cpu_data_t *)NULL;
668}
669
5ba3f43e
A
670ast_t *
671ast_pending(void)
672{
0a7de745 673 return &getCpuDatap()->cpu_pending_ast;
5ba3f43e
A
674}
675
676cpu_type_t
677slot_type(int slot_num)
678{
0a7de745 679 return cpu_datap(slot_num)->cpu_type;
5ba3f43e
A
680}
681
682cpu_subtype_t
683slot_subtype(int slot_num)
684{
0a7de745 685 return cpu_datap(slot_num)->cpu_subtype;
5ba3f43e
A
686}
687
688cpu_threadtype_t
689slot_threadtype(int slot_num)
690{
0a7de745 691 return cpu_datap(slot_num)->cpu_threadtype;
5ba3f43e
A
692}
693
694cpu_type_t
695cpu_type(void)
696{
0a7de745 697 return getCpuDatap()->cpu_type;
5ba3f43e
A
698}
699
700cpu_subtype_t
701cpu_subtype(void)
702{
0a7de745 703 return getCpuDatap()->cpu_subtype;
5ba3f43e
A
704}
705
706cpu_threadtype_t
707cpu_threadtype(void)
708{
0a7de745 709 return getCpuDatap()->cpu_threadtype;
5ba3f43e
A
710}
711
712int
713cpu_number(void)
714{
0a7de745 715 return getCpuDatap()->cpu_number;
5ba3f43e
A
716}
717
718uint64_t
719ml_get_wake_timebase(void)
720{
721 return wake_abstime;
722}