]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/cpu_common.c
327434ece8c53a963414d7afbd9471b7cefd21a9
[apple/xnu.git] / osfmk / arm / cpu_common.c
1 /*
2 * Copyright (c) 2017-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * File: arm/cpu_common.c
30 *
31 * cpu routines common to all supported arm variants
32 */
33
34 #include <kern/kalloc.h>
35 #include <kern/machine.h>
36 #include <kern/cpu_number.h>
37 #include <kern/thread.h>
38 #include <kern/timer_queue.h>
39 #include <arm/cpu_data.h>
40 #include <arm/cpuid.h>
41 #include <arm/caches_internal.h>
42 #include <arm/cpu_data_internal.h>
43 #include <arm/cpu_internal.h>
44 #include <arm/misc_protos.h>
45 #include <arm/machine_cpu.h>
46 #include <arm/rtclock.h>
47 #include <mach/processor_info.h>
48 #include <machine/atomic.h>
49 #include <machine/config.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_map.h>
52 #include <pexpert/arm/protos.h>
53 #include <pexpert/device_tree.h>
54 #include <sys/kdebug.h>
55 #include <arm/machine_routines.h>
56 #include <libkern/OSAtomic.h>
57
58 #if KPERF
59 void kperf_signal_handler(unsigned int cpu_number);
60 #endif
61
62 cpu_data_t BootCpuData;
63 cpu_data_entry_t CpuDataEntries[MAX_CPUS];
64
65 struct processor BootProcessor;
66
67 unsigned int real_ncpus = 1;
68 boolean_t idle_enable = FALSE;
69 uint64_t wake_abstime = 0x0ULL;
70
71
72 cpu_data_t *
73 cpu_datap(int cpu)
74 {
75 assert(cpu < MAX_CPUS);
76 return CpuDataEntries[cpu].cpu_data_vaddr;
77 }
78
79 kern_return_t
80 cpu_control(int slot_num,
81 processor_info_t info,
82 unsigned int count)
83 {
84 printf("cpu_control(%d,%p,%d) not implemented\n",
85 slot_num, info, count);
86 return KERN_FAILURE;
87 }
88
89 kern_return_t
90 cpu_info_count(processor_flavor_t flavor,
91 unsigned int *count)
92 {
93 switch (flavor) {
94 case PROCESSOR_CPU_STAT:
95 *count = PROCESSOR_CPU_STAT_COUNT;
96 return KERN_SUCCESS;
97
98 case PROCESSOR_CPU_STAT64:
99 *count = PROCESSOR_CPU_STAT64_COUNT;
100 return KERN_SUCCESS;
101
102 default:
103 *count = 0;
104 return KERN_FAILURE;
105 }
106 }
107
108 kern_return_t
109 cpu_info(processor_flavor_t flavor, int slot_num, processor_info_t info,
110 unsigned int *count)
111 {
112 cpu_data_t *cpu_data_ptr = CpuDataEntries[slot_num].cpu_data_vaddr;
113
114 switch (flavor) {
115 case PROCESSOR_CPU_STAT:
116 {
117 if (*count < PROCESSOR_CPU_STAT_COUNT) {
118 return KERN_FAILURE;
119 }
120
121 processor_cpu_stat_t cpu_stat = (processor_cpu_stat_t)info;
122 cpu_stat->irq_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.irq_ex_cnt;
123 cpu_stat->ipi_cnt = (uint32_t)cpu_data_ptr->cpu_stat.ipi_cnt;
124 cpu_stat->timer_cnt = (uint32_t)cpu_data_ptr->cpu_stat.timer_cnt;
125 cpu_stat->undef_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.undef_ex_cnt;
126 cpu_stat->unaligned_cnt = (uint32_t)cpu_data_ptr->cpu_stat.unaligned_cnt;
127 cpu_stat->vfp_cnt = (uint32_t)cpu_data_ptr->cpu_stat.vfp_cnt;
128 cpu_stat->vfp_shortv_cnt = 0;
129 cpu_stat->data_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.data_ex_cnt;
130 cpu_stat->instr_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.instr_ex_cnt;
131
132 *count = PROCESSOR_CPU_STAT_COUNT;
133
134 return KERN_SUCCESS;
135 }
136
137 case PROCESSOR_CPU_STAT64:
138 {
139 if (*count < PROCESSOR_CPU_STAT64_COUNT) {
140 return KERN_FAILURE;
141 }
142
143 processor_cpu_stat64_t cpu_stat = (processor_cpu_stat64_t)info;
144 cpu_stat->irq_ex_cnt = cpu_data_ptr->cpu_stat.irq_ex_cnt;
145 cpu_stat->ipi_cnt = cpu_data_ptr->cpu_stat.ipi_cnt;
146 cpu_stat->timer_cnt = cpu_data_ptr->cpu_stat.timer_cnt;
147 cpu_stat->undef_ex_cnt = cpu_data_ptr->cpu_stat.undef_ex_cnt;
148 cpu_stat->unaligned_cnt = cpu_data_ptr->cpu_stat.unaligned_cnt;
149 cpu_stat->vfp_cnt = cpu_data_ptr->cpu_stat.vfp_cnt;
150 cpu_stat->vfp_shortv_cnt = 0;
151 cpu_stat->data_ex_cnt = cpu_data_ptr->cpu_stat.data_ex_cnt;
152 cpu_stat->instr_ex_cnt = cpu_data_ptr->cpu_stat.instr_ex_cnt;
153 #if MONOTONIC
154 cpu_stat->pmi_cnt = cpu_data_ptr->cpu_monotonic.mtc_npmis;
155 #endif /* MONOTONIC */
156
157 *count = PROCESSOR_CPU_STAT64_COUNT;
158
159 return KERN_SUCCESS;
160 }
161
162 default:
163 return KERN_FAILURE;
164 }
165 }
166
167 /*
168 * Routine: cpu_doshutdown
169 * Function:
170 */
171 void
172 cpu_doshutdown(void (*doshutdown)(processor_t),
173 processor_t processor)
174 {
175 doshutdown(processor);
176 }
177
178 /*
179 * Routine: cpu_idle_tickle
180 *
181 */
182 void
183 cpu_idle_tickle(void)
184 {
185 boolean_t intr;
186 cpu_data_t *cpu_data_ptr;
187 uint64_t new_idle_timeout_ticks = 0x0ULL;
188
189 intr = ml_set_interrupts_enabled(FALSE);
190 cpu_data_ptr = getCpuDatap();
191
192 if (cpu_data_ptr->idle_timer_notify != (void *)NULL) {
193 ((idle_timer_t)cpu_data_ptr->idle_timer_notify)(cpu_data_ptr->idle_timer_refcon, &new_idle_timeout_ticks);
194 if (new_idle_timeout_ticks != 0x0ULL) {
195 /* if a new idle timeout was requested set the new idle timer deadline */
196 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
197 } else {
198 /* turn off the idle timer */
199 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
200 }
201 timer_resync_deadlines();
202 }
203 (void) ml_set_interrupts_enabled(intr);
204 }
205
206 static void
207 cpu_handle_xcall(cpu_data_t *cpu_data_ptr)
208 {
209 broadcastFunc xfunc;
210 void *xparam;
211
212 os_atomic_thread_fence(acquire);
213 /* Come back around if cpu_signal_internal is running on another CPU and has just
214 * added SIGPxcall to the pending mask, but hasn't yet assigned the call params.*/
215 if (cpu_data_ptr->cpu_xcall_p0 != NULL && cpu_data_ptr->cpu_xcall_p1 != NULL) {
216 xfunc = cpu_data_ptr->cpu_xcall_p0;
217 xparam = cpu_data_ptr->cpu_xcall_p1;
218 cpu_data_ptr->cpu_xcall_p0 = NULL;
219 cpu_data_ptr->cpu_xcall_p1 = NULL;
220 os_atomic_thread_fence(acq_rel);
221 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPxcall, relaxed);
222 xfunc(xparam);
223 }
224 if (cpu_data_ptr->cpu_imm_xcall_p0 != NULL && cpu_data_ptr->cpu_imm_xcall_p1 != NULL) {
225 xfunc = cpu_data_ptr->cpu_imm_xcall_p0;
226 xparam = cpu_data_ptr->cpu_imm_xcall_p1;
227 cpu_data_ptr->cpu_imm_xcall_p0 = NULL;
228 cpu_data_ptr->cpu_imm_xcall_p1 = NULL;
229 os_atomic_thread_fence(acq_rel);
230 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPxcallImm, relaxed);
231 xfunc(xparam);
232 }
233 }
234
235 static unsigned int
236 cpu_broadcast_xcall_internal(unsigned int signal,
237 uint32_t *synch,
238 boolean_t self_xcall,
239 broadcastFunc func,
240 void *parm)
241 {
242 boolean_t intr;
243 cpu_data_t *cpu_data_ptr;
244 cpu_data_t *target_cpu_datap;
245 unsigned int failsig;
246 int cpu;
247 int max_cpu = ml_get_max_cpu_number() + 1;
248
249 intr = ml_set_interrupts_enabled(FALSE);
250 cpu_data_ptr = getCpuDatap();
251
252 failsig = 0;
253
254 if (synch != NULL) {
255 *synch = max_cpu;
256 assert_wait((event_t)synch, THREAD_UNINT);
257 }
258
259 for (cpu = 0; cpu < max_cpu; cpu++) {
260 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
261
262 if (target_cpu_datap == cpu_data_ptr) {
263 continue;
264 }
265
266 if ((target_cpu_datap == NULL) ||
267 KERN_SUCCESS != cpu_signal(target_cpu_datap, signal, (void *)func, parm)) {
268 failsig++;
269 }
270 }
271
272
273 if (self_xcall) {
274 func(parm);
275 }
276
277 (void) ml_set_interrupts_enabled(intr);
278
279 if (synch != NULL) {
280 if (os_atomic_sub(synch, (!self_xcall) ? failsig + 1 : failsig, relaxed) == 0) {
281 clear_wait(current_thread(), THREAD_AWAKENED);
282 } else {
283 thread_block(THREAD_CONTINUE_NULL);
284 }
285 }
286
287 if (!self_xcall) {
288 return max_cpu - failsig - 1;
289 } else {
290 return max_cpu - failsig;
291 }
292 }
293
294 unsigned int
295 cpu_broadcast_xcall(uint32_t *synch,
296 boolean_t self_xcall,
297 broadcastFunc func,
298 void *parm)
299 {
300 return cpu_broadcast_xcall_internal(SIGPxcall, synch, self_xcall, func, parm);
301 }
302
303 unsigned int
304 cpu_broadcast_immediate_xcall(uint32_t *synch,
305 boolean_t self_xcall,
306 broadcastFunc func,
307 void *parm)
308 {
309 return cpu_broadcast_xcall_internal(SIGPxcallImm, synch, self_xcall, func, parm);
310 }
311
312 static kern_return_t
313 cpu_xcall_internal(unsigned int signal, int cpu_number, broadcastFunc func, void *param)
314 {
315 cpu_data_t *target_cpu_datap;
316
317 if ((cpu_number < 0) || (cpu_number > ml_get_max_cpu_number())) {
318 return KERN_INVALID_ARGUMENT;
319 }
320
321 if (func == NULL || param == NULL) {
322 return KERN_INVALID_ARGUMENT;
323 }
324
325 target_cpu_datap = (cpu_data_t*)CpuDataEntries[cpu_number].cpu_data_vaddr;
326 if (target_cpu_datap == NULL) {
327 return KERN_INVALID_ARGUMENT;
328 }
329
330 return cpu_signal(target_cpu_datap, signal, (void*)func, param);
331 }
332
333 kern_return_t
334 cpu_xcall(int cpu_number, broadcastFunc func, void *param)
335 {
336 return cpu_xcall_internal(SIGPxcall, cpu_number, func, param);
337 }
338
339 kern_return_t
340 cpu_immediate_xcall(int cpu_number, broadcastFunc func, void *param)
341 {
342 return cpu_xcall_internal(SIGPxcallImm, cpu_number, func, param);
343 }
344
345 static kern_return_t
346 cpu_signal_internal(cpu_data_t *target_proc,
347 unsigned int signal,
348 void *p0,
349 void *p1,
350 boolean_t defer)
351 {
352 unsigned int Check_SIGPdisabled;
353 int current_signals;
354 Boolean swap_success;
355 boolean_t interruptible = ml_set_interrupts_enabled(FALSE);
356 cpu_data_t *current_proc = getCpuDatap();
357
358 /* We'll mandate that only IPIs meant to kick a core out of idle may ever be deferred. */
359 if (defer) {
360 assert(signal == SIGPnop);
361 }
362
363 if (current_proc != target_proc) {
364 Check_SIGPdisabled = SIGPdisabled;
365 } else {
366 Check_SIGPdisabled = 0;
367 }
368
369 if ((signal == SIGPxcall) || (signal == SIGPxcallImm)) {
370 do {
371 current_signals = target_proc->cpu_signal;
372 if ((current_signals & SIGPdisabled) == SIGPdisabled) {
373 ml_set_interrupts_enabled(interruptible);
374 return KERN_FAILURE;
375 }
376 swap_success = OSCompareAndSwap(current_signals & (~signal), current_signals | signal,
377 &target_proc->cpu_signal);
378
379 if (!swap_success && (signal == SIGPxcallImm) && (target_proc->cpu_signal & SIGPxcallImm)) {
380 ml_set_interrupts_enabled(interruptible);
381 return KERN_ALREADY_WAITING;
382 }
383
384 /* Drain pending xcalls on this cpu; the CPU we're trying to xcall may in turn
385 * be trying to xcall us. Since we have interrupts disabled that can deadlock,
386 * so break the deadlock by draining pending xcalls. */
387 if (!swap_success && (current_proc->cpu_signal & signal)) {
388 cpu_handle_xcall(current_proc);
389 }
390 } while (!swap_success);
391
392 if (signal == SIGPxcallImm) {
393 target_proc->cpu_imm_xcall_p0 = p0;
394 target_proc->cpu_imm_xcall_p1 = p1;
395 } else {
396 target_proc->cpu_xcall_p0 = p0;
397 target_proc->cpu_xcall_p1 = p1;
398 }
399 } else {
400 do {
401 current_signals = target_proc->cpu_signal;
402 if ((Check_SIGPdisabled != 0) && (current_signals & Check_SIGPdisabled) == SIGPdisabled) {
403 ml_set_interrupts_enabled(interruptible);
404 return KERN_FAILURE;
405 }
406
407 swap_success = OSCompareAndSwap(current_signals, current_signals | signal,
408 &target_proc->cpu_signal);
409 } while (!swap_success);
410 }
411
412 /*
413 * Issue DSB here to guarantee: 1) prior stores to pending signal mask and xcall params
414 * will be visible to other cores when the IPI is dispatched, and 2) subsequent
415 * instructions to signal the other cores will not execute until after the barrier.
416 * DMB would be sufficient to guarantee 1) but not 2).
417 */
418 __builtin_arm_dsb(DSB_ISH);
419
420 if (!(target_proc->cpu_signal & SIGPdisabled)) {
421 if (defer) {
422 PE_cpu_signal_deferred(getCpuDatap()->cpu_id, target_proc->cpu_id);
423 } else {
424 PE_cpu_signal(getCpuDatap()->cpu_id, target_proc->cpu_id);
425 }
426 }
427
428 ml_set_interrupts_enabled(interruptible);
429 return KERN_SUCCESS;
430 }
431
432 kern_return_t
433 cpu_signal(cpu_data_t *target_proc,
434 unsigned int signal,
435 void *p0,
436 void *p1)
437 {
438 return cpu_signal_internal(target_proc, signal, p0, p1, FALSE);
439 }
440
441 kern_return_t
442 cpu_signal_deferred(cpu_data_t *target_proc)
443 {
444 return cpu_signal_internal(target_proc, SIGPnop, NULL, NULL, TRUE);
445 }
446
447 void
448 cpu_signal_cancel(cpu_data_t *target_proc)
449 {
450 /* TODO: Should we care about the state of a core as far as squashing deferred IPIs goes? */
451 if (!(target_proc->cpu_signal & SIGPdisabled)) {
452 PE_cpu_signal_cancel(getCpuDatap()->cpu_id, target_proc->cpu_id);
453 }
454 }
455
456 void
457 cpu_signal_handler(void)
458 {
459 cpu_signal_handler_internal(FALSE);
460 }
461
462 void
463 cpu_signal_handler_internal(boolean_t disable_signal)
464 {
465 cpu_data_t *cpu_data_ptr = getCpuDatap();
466 unsigned int cpu_signal;
467
468
469 cpu_data_ptr->cpu_stat.ipi_cnt++;
470 cpu_data_ptr->cpu_stat.ipi_cnt_wake++;
471
472 SCHED_STATS_IPI(current_processor());
473
474 cpu_signal = os_atomic_or(&cpu_data_ptr->cpu_signal, 0, relaxed);
475
476 if ((!(cpu_signal & SIGPdisabled)) && (disable_signal == TRUE)) {
477 os_atomic_or(&cpu_data_ptr->cpu_signal, SIGPdisabled, relaxed);
478 } else if ((cpu_signal & SIGPdisabled) && (disable_signal == FALSE)) {
479 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdisabled, relaxed);
480 }
481
482 while (cpu_signal & ~SIGPdisabled) {
483 if (cpu_signal & SIGPdec) {
484 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdec, relaxed);
485 rtclock_intr(FALSE);
486 }
487 #if KPERF
488 if (cpu_signal & SIGPkptimer) {
489 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPkptimer, relaxed);
490 kperf_signal_handler((unsigned int)cpu_data_ptr->cpu_number);
491 }
492 #endif
493 if (cpu_signal & (SIGPxcall | SIGPxcallImm)) {
494 cpu_handle_xcall(cpu_data_ptr);
495 }
496 if (cpu_signal & SIGPast) {
497 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPast, relaxed);
498 ast_check(cpu_data_ptr->cpu_processor);
499 }
500 if (cpu_signal & SIGPdebug) {
501 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdebug, relaxed);
502 DebuggerXCall(cpu_data_ptr->cpu_int_state);
503 }
504 #if __ARM_SMP__ && defined(ARMA7)
505 if (cpu_signal & SIGPLWFlush) {
506 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPLWFlush, relaxed);
507 cache_xcall_handler(LWFlush);
508 }
509 if (cpu_signal & SIGPLWClean) {
510 os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPLWClean, relaxed);
511 cache_xcall_handler(LWClean);
512 }
513 #endif
514
515 cpu_signal = os_atomic_or(&cpu_data_ptr->cpu_signal, 0, relaxed);
516 }
517 }
518
519 void
520 cpu_exit_wait(int cpu)
521 {
522 if (cpu != master_cpu) {
523 cpu_data_t *cpu_data_ptr;
524
525 cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
526 while (!((*(volatile unsigned int*)&cpu_data_ptr->cpu_sleep_token) == ARM_CPU_ON_SLEEP_PATH)) {
527 }
528 ;
529 }
530 }
531
532 boolean_t
533 cpu_can_exit(__unused int cpu)
534 {
535 return TRUE;
536 }
537
538 void
539 cpu_machine_init(void)
540 {
541 static boolean_t started = FALSE;
542 cpu_data_t *cpu_data_ptr;
543
544 cpu_data_ptr = getCpuDatap();
545 started = ((cpu_data_ptr->cpu_flags & StartedState) == StartedState);
546 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) {
547 platform_cache_init();
548 }
549
550 /* Note: this calls IOCPURunPlatformActiveActions when resuming on boot cpu */
551 PE_cpu_machine_init(cpu_data_ptr->cpu_id, !started);
552
553 cpu_data_ptr->cpu_flags |= StartedState;
554 ml_init_interrupt();
555 }
556
557 processor_t
558 cpu_processor_alloc(boolean_t is_boot_cpu)
559 {
560 processor_t proc;
561
562 if (is_boot_cpu) {
563 return &BootProcessor;
564 }
565
566 proc = kalloc(sizeof(*proc));
567 if (!proc) {
568 return NULL;
569 }
570
571 bzero((void *) proc, sizeof(*proc));
572 return proc;
573 }
574
575 void
576 cpu_processor_free(processor_t proc)
577 {
578 if (proc != NULL && proc != &BootProcessor) {
579 kfree(proc, sizeof(*proc));
580 }
581 }
582
583 processor_t
584 current_processor(void)
585 {
586 return getCpuDatap()->cpu_processor;
587 }
588
589 processor_t
590 cpu_to_processor(int cpu)
591 {
592 cpu_data_t *cpu_data = cpu_datap(cpu);
593 if (cpu_data != NULL) {
594 return cpu_data->cpu_processor;
595 } else {
596 return NULL;
597 }
598 }
599
600 cpu_data_t *
601 processor_to_cpu_datap(processor_t processor)
602 {
603 cpu_data_t *target_cpu_datap;
604
605 assert(processor->cpu_id < MAX_CPUS);
606 assert(CpuDataEntries[processor->cpu_id].cpu_data_vaddr != NULL);
607
608 target_cpu_datap = (cpu_data_t*)CpuDataEntries[processor->cpu_id].cpu_data_vaddr;
609 assert(target_cpu_datap->cpu_processor == processor);
610
611 return target_cpu_datap;
612 }
613
614 cpu_data_t *
615 cpu_data_alloc(boolean_t is_boot_cpu)
616 {
617 cpu_data_t *cpu_data_ptr = NULL;
618
619 if (is_boot_cpu) {
620 cpu_data_ptr = &BootCpuData;
621 } else {
622 if ((kmem_alloc(kernel_map, (vm_offset_t *)&cpu_data_ptr, sizeof(cpu_data_t), VM_KERN_MEMORY_CPU)) != KERN_SUCCESS) {
623 goto cpu_data_alloc_error;
624 }
625
626 bzero((void *)cpu_data_ptr, sizeof(cpu_data_t));
627
628 cpu_stack_alloc(cpu_data_ptr);
629 }
630
631 cpu_data_ptr->cpu_processor = cpu_processor_alloc(is_boot_cpu);
632 if (cpu_data_ptr->cpu_processor == (struct processor *)NULL) {
633 goto cpu_data_alloc_error;
634 }
635
636 return cpu_data_ptr;
637
638 cpu_data_alloc_error:
639 panic("cpu_data_alloc() failed\n");
640 return (cpu_data_t *)NULL;
641 }
642
643 ast_t *
644 ast_pending(void)
645 {
646 return &getCpuDatap()->cpu_pending_ast;
647 }
648
649 cpu_type_t
650 slot_type(int slot_num)
651 {
652 return cpu_datap(slot_num)->cpu_type;
653 }
654
655 cpu_subtype_t
656 slot_subtype(int slot_num)
657 {
658 return cpu_datap(slot_num)->cpu_subtype;
659 }
660
661 cpu_threadtype_t
662 slot_threadtype(int slot_num)
663 {
664 return cpu_datap(slot_num)->cpu_threadtype;
665 }
666
667 cpu_type_t
668 cpu_type(void)
669 {
670 return getCpuDatap()->cpu_type;
671 }
672
673 cpu_subtype_t
674 cpu_subtype(void)
675 {
676 return getCpuDatap()->cpu_subtype;
677 }
678
679 cpu_threadtype_t
680 cpu_threadtype(void)
681 {
682 return getCpuDatap()->cpu_threadtype;
683 }
684
685 int
686 cpu_number(void)
687 {
688 return getCpuDatap()->cpu_number;
689 }
690
691 uint64_t
692 ml_get_wake_timebase(void)
693 {
694 return wake_abstime;
695 }