#include <kern/kalloc.h>
#include <kern/machine.h>
#include <kern/cpu_number.h>
+#include <kern/percpu.h>
#include <kern/thread.h>
#include <kern/timer_queue.h>
#include <arm/cpu_data.h>
unsigned int start_cpu_paddr;
-extern boolean_t idle_enable;
-extern unsigned int real_ncpus;
-extern uint64_t wake_abstime;
+extern boolean_t idle_enable;
+extern unsigned int real_ncpus;
+extern uint64_t wake_abstime;
extern void* wfi_inst;
unsigned wfi_fast = 1;
unsigned patch_to_nop = 0xe1a00000;
-void *LowExceptionVectorsAddr;
-#define IOS_STATE (((vm_offset_t)LowExceptionVectorsAddr + 0x80))
-#define IOS_STATE_SIZE (0x08UL)
+void *LowExceptionVectorsAddr;
+#define IOS_STATE (((vm_offset_t)LowExceptionVectorsAddr + 0x80))
+#define IOS_STATE_SIZE (0x08UL)
static const uint8_t suspend_signature[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'};
static const uint8_t running_signature[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'};
CleanPoC_Dcache();
PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);
-
}
_Atomic uint32_t cpu_idle_count = 0;
cpu_idle(void)
{
cpu_data_t *cpu_data_ptr = getCpuDatap();
- uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop;
+ uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop;
- if ((!idle_enable) || (cpu_data_ptr->cpu_signal & SIGPdisabled))
+ if ((!idle_enable) || (cpu_data_ptr->cpu_signal & SIGPdisabled)) {
Idle_load_context();
- if (!SetIdlePop())
+ }
+ if (!SetIdlePop()) {
Idle_load_context();
+ }
lastPop = cpu_data_ptr->rtcPop;
pmap_switch_user_ttb(kernel_pmap);
cpu_data_ptr->cpu_active_thread = current_thread();
- if (cpu_data_ptr->cpu_user_debug)
+ if (cpu_data_ptr->cpu_user_debug) {
arm_debug_set(NULL);
+ }
cpu_data_ptr->cpu_user_debug = NULL;
- if (cpu_data_ptr->cpu_idle_notify)
- ((processor_idle_t) cpu_data_ptr->cpu_idle_notify) (cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks);
+ if (cpu_data_ptr->cpu_idle_notify != NULL) {
+ cpu_data_ptr->cpu_idle_notify(cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks);
+ }
if (cpu_data_ptr->idle_timer_notify != 0) {
if (new_idle_timeout_ticks == 0x0ULL) {
clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
}
timer_resync_deadlines();
- if (cpu_data_ptr->rtcPop != lastPop)
+ if (cpu_data_ptr->rtcPop != lastPop) {
+ /*
+ * Ignore the return value here: this CPU has called idle_notify and
+ * committed to going idle.
+ */
SetIdlePop();
+ }
}
#if KPC
kpc_idle();
-#endif
+#endif /* KPC */
platform_cache_idle_enter();
cpu_idle_wfi((boolean_t) wfi_fast);
platform_cache_idle_exit();
ClearIdlePop(TRUE);
- cpu_idle_exit();
+ cpu_idle_exit(FALSE);
}
/*
* Function:
*/
void
-cpu_idle_exit(void)
+cpu_idle_exit(boolean_t from_reset __unused)
{
- uint64_t new_idle_timeout_ticks = 0x0ULL;
+ uint64_t new_idle_timeout_ticks = 0x0ULL;
cpu_data_t *cpu_data_ptr = getCpuDatap();
#if KPC
pmap_set_pmap(cpu_data_ptr->cpu_active_thread->map->pmap, current_thread());
- if (cpu_data_ptr->cpu_idle_notify)
- ((processor_idle_t) cpu_data_ptr->cpu_idle_notify) (cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks);
+ if (cpu_data_ptr->cpu_idle_notify != NULL) {
+ cpu_data_ptr->cpu_idle_notify(cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks);
+ }
if (cpu_data_ptr->idle_timer_notify != 0) {
if (new_idle_timeout_ticks == 0x0ULL) {
arm_cpu_info_t *cpu_info_p;
if (cdp->cpu_type != CPU_TYPE_ARM) {
-
cdp->cpu_type = CPU_TYPE_ARM;
timer_call_queue_init(&cdp->rtclock_timer.queue);
break;
case CPU_ARCH_ARMv5TE:
case CPU_ARCH_ARMv5TEJ:
- if (cpu_info_p->arm_info.arm_implementor == CPU_VID_INTEL)
+ if (cpu_info_p->arm_info.arm_implementor == CPU_VID_INTEL) {
cdp->cpu_subtype = CPU_SUBTYPE_ARM_XSCALE;
- else
+ } else {
cdp->cpu_subtype = CPU_SUBTYPE_ARM_V5TEJ;
+ }
break;
case CPU_ARCH_ARMv6:
cdp->cpu_subtype = CPU_SUBTYPE_ARM_V6;
}
cdp->cpu_stat.irq_ex_cnt_wake = 0;
cdp->cpu_stat.ipi_cnt_wake = 0;
- cdp->cpu_stat.timer_cnt_wake = 0;
cdp->cpu_running = TRUE;
cdp->cpu_sleep_token_last = cdp->cpu_sleep_token;
cdp->cpu_sleep_token = 0x0UL;
-
}
-cpu_data_t *
-cpu_data_alloc(boolean_t is_boot_cpu)
+void
+cpu_stack_alloc(cpu_data_t *cpu_data_ptr)
{
- cpu_data_t *cpu_data_ptr = NULL;
-
- if (is_boot_cpu)
- cpu_data_ptr = &BootCpuData;
- else {
- void *irq_stack = NULL;
- void *fiq_stack = NULL;
-
- if ((kmem_alloc(kernel_map, (vm_offset_t *)&cpu_data_ptr, sizeof(cpu_data_t), VM_KERN_MEMORY_CPU)) != KERN_SUCCESS)
- goto cpu_data_alloc_error;
-
- bzero((void *)cpu_data_ptr, sizeof(cpu_data_t));
-
- if ((irq_stack = kalloc(INTSTACK_SIZE)) == 0)
- goto cpu_data_alloc_error;
-#if __BIGGEST_ALIGNMENT__
- /* force 16-byte alignment */
- if ((uint32_t)irq_stack & 0x0F)
- irq_stack = (void *)((uint32_t)irq_stack + (0x10 - ((uint32_t)irq_stack & 0x0F)));
-#endif
- cpu_data_ptr->intstack_top = (vm_offset_t)irq_stack + INTSTACK_SIZE ;
- cpu_data_ptr->istackptr = cpu_data_ptr->intstack_top;
-
- if ((fiq_stack = kalloc(PAGE_SIZE)) == 0)
- goto cpu_data_alloc_error;
-#if __BIGGEST_ALIGNMENT__
- /* force 16-byte alignment */
- if ((uint32_t)fiq_stack & 0x0F)
- fiq_stack = (void *)((uint32_t)fiq_stack + (0x10 - ((uint32_t)fiq_stack & 0x0F)));
-#endif
- cpu_data_ptr->fiqstack_top = (vm_offset_t)fiq_stack + PAGE_SIZE ;
- cpu_data_ptr->fiqstackptr = cpu_data_ptr->fiqstack_top;
+ vm_offset_t irq_stack = 0;
+ vm_offset_t fiq_stack = 0;
+
+ kern_return_t kr = kernel_memory_allocate(kernel_map, &irq_stack,
+ INTSTACK_SIZE + (2 * PAGE_SIZE),
+ PAGE_MASK,
+ KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
+ VM_KERN_MEMORY_STACK);
+ if (kr != KERN_SUCCESS) {
+ panic("Unable to allocate cpu interrupt stack\n");
}
- cpu_data_ptr->cpu_processor = cpu_processor_alloc(is_boot_cpu);
- if (cpu_data_ptr->cpu_processor == (struct processor *)NULL)
- goto cpu_data_alloc_error;
+ cpu_data_ptr->intstack_top = irq_stack + PAGE_SIZE + INTSTACK_SIZE;
+ cpu_data_ptr->istackptr = cpu_data_ptr->intstack_top;
- return cpu_data_ptr;
+ kr = kernel_memory_allocate(kernel_map, &fiq_stack,
+ FIQSTACK_SIZE + (2 * PAGE_SIZE),
+ PAGE_MASK,
+ KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
+ VM_KERN_MEMORY_STACK);
+ if (kr != KERN_SUCCESS) {
+ panic("Unable to allocate cpu exception stack\n");
+ }
-cpu_data_alloc_error:
- panic("cpu_data_alloc() failed\n");
- return (cpu_data_t *)NULL;
+ cpu_data_ptr->fiqstack_top = fiq_stack + PAGE_SIZE + FIQSTACK_SIZE;
+ cpu_data_ptr->fiqstackptr = cpu_data_ptr->fiqstack_top;
}
-
void
cpu_data_free(cpu_data_t *cpu_data_ptr)
{
- if (cpu_data_ptr == &BootCpuData)
- return;
+ if ((cpu_data_ptr == NULL) || (cpu_data_ptr == &BootCpuData)) {
+ return;
+ }
+
+ int cpu_number = cpu_data_ptr->cpu_number;
- cpu_processor_free( cpu_data_ptr->cpu_processor);
- kfree( (void *)(cpu_data_ptr->intstack_top - INTSTACK_SIZE), INTSTACK_SIZE);
- kfree( (void *)(cpu_data_ptr->fiqstack_top - PAGE_SIZE), PAGE_SIZE);
- kmem_free(kernel_map, (vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t));
+ if (CpuDataEntries[cpu_number].cpu_data_vaddr == cpu_data_ptr) {
+ OSDecrementAtomic((SInt32*)&real_ncpus);
+ CpuDataEntries[cpu_number].cpu_data_vaddr = NULL;
+ CpuDataEntries[cpu_number].cpu_data_paddr = 0;
+ __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu array are visible
+ }
+ (kfree)((void *)(cpu_data_ptr->intstack_top - INTSTACK_SIZE), INTSTACK_SIZE);
+ (kfree)((void *)(cpu_data_ptr->fiqstack_top - FIQSTACK_SIZE), FIQSTACK_SIZE);
}
void
cpu_data_init(cpu_data_t *cpu_data_ptr)
{
- uint32_t i = 0;
-
cpu_data_ptr->cpu_flags = 0;
-#if __arm__
+#if __arm__
cpu_data_ptr->cpu_exc_vectors = (vm_offset_t)&ExceptionVectorsTable;
#endif
- cpu_data_ptr->interrupts_enabled = 0;
cpu_data_ptr->cpu_int_state = 0;
cpu_data_ptr->cpu_pending_ast = AST_NONE;
- cpu_data_ptr->cpu_cache_dispatch = (void *) 0;
+ cpu_data_ptr->cpu_cache_dispatch = NULL;
cpu_data_ptr->rtcPop = EndOfAllTime;
cpu_data_ptr->rtclock_datap = &RTClockData;
cpu_data_ptr->cpu_user_debug = NULL;
cpu_data_ptr->cpu_base_timebase_low = 0;
cpu_data_ptr->cpu_base_timebase_high = 0;
- cpu_data_ptr->cpu_idle_notify = (void *) 0;
+ cpu_data_ptr->cpu_idle_notify = NULL;
cpu_data_ptr->cpu_idle_latency = 0x0ULL;
cpu_data_ptr->cpu_idle_pop = 0x0ULL;
cpu_data_ptr->cpu_reset_type = 0x0UL;
cpu_data_ptr->cpu_signal = SIGPdisabled;
-#if DEBUG || DEVELOPMENT
- cpu_data_ptr->failed_xcall = NULL;
- cpu_data_ptr->failed_signal = 0;
- cpu_data_ptr->failed_signal_count = 0;
-#endif
-
cpu_data_ptr->cpu_get_fiq_handler = NULL;
cpu_data_ptr->cpu_tbd_hardware_addr = NULL;
cpu_data_ptr->cpu_tbd_hardware_val = NULL;
cpu_data_ptr->cpu_sleep_token_last = 0x00000000UL;
cpu_data_ptr->cpu_xcall_p0 = NULL;
cpu_data_ptr->cpu_xcall_p1 = NULL;
+ cpu_data_ptr->cpu_imm_xcall_p0 = NULL;
+ cpu_data_ptr->cpu_imm_xcall_p1 = NULL;
-#if __ARM_SMP__ && defined(ARMA7)
+#if defined(ARMA7)
cpu_data_ptr->cpu_CLWFlush_req = 0x0ULL;
cpu_data_ptr->cpu_CLWFlush_last = 0x0ULL;
cpu_data_ptr->cpu_CLWClean_req = 0x0ULL;
cpu_data_ptr->cpu_CLW_active = 0x1UL;
#endif
+#if !XNU_MONITOR
pmap_cpu_data_t * pmap_cpu_data_ptr = &cpu_data_ptr->cpu_pmap_cpu_data;
pmap_cpu_data_ptr->cpu_user_pmap = (struct pmap *) NULL;
pmap_cpu_data_ptr->cpu_user_pmap_stamp = 0;
pmap_cpu_data_ptr->cpu_number = PMAP_INVALID_CPU_NUM;
- for (i = 0; i < (sizeof(pmap_cpu_data_ptr->cpu_asid_high_bits) / sizeof(*pmap_cpu_data_ptr->cpu_asid_high_bits)); i++) {
- pmap_cpu_data_ptr->cpu_asid_high_bits[i] = 0;
- }
+ bzero(&(pmap_cpu_data_ptr->cpu_sw_asids[0]), sizeof(pmap_cpu_data_ptr->cpu_sw_asids));
+#endif
cpu_data_ptr->halt_status = CPU_NOT_HALTED;
}
int cpu;
cpu = OSIncrementAtomic((SInt32*)&real_ncpus);
- if (real_ncpus > MAX_CPUS) {
+ if (real_ncpus > ml_get_cpu_count()) {
return KERN_FAILURE;
}
cpu_data_ptr->cpu_number = cpu;
+ __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu data are visible
CpuDataEntries[cpu].cpu_data_vaddr = cpu_data_ptr;
- CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys( (vm_offset_t)cpu_data_ptr);
+ CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys((vm_offset_t)cpu_data_ptr);
return KERN_SUCCESS;
}
cpu_machine_init();
return KERN_SUCCESS;
} else {
-#if __ARM_SMP__
- cpu_data_t *cpu_data_ptr;
- thread_t first_thread;
+ cpu_data_t *cpu_data_ptr;
+ thread_t first_thread;
+ processor_t processor;
cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr;
+#if !XNU_MONITOR
cpu_data_ptr->cpu_pmap_cpu_data.cpu_user_pmap = NULL;
+#endif
- if (cpu_data_ptr->cpu_processor->next_thread != THREAD_NULL)
- first_thread = cpu_data_ptr->cpu_processor->next_thread;
- else
- first_thread = cpu_data_ptr->cpu_processor->idle_thread;
+ processor = PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data_ptr);
+ if (processor->startup_thread != THREAD_NULL) {
+ first_thread = processor->startup_thread;
+ } else {
+ first_thread = processor->idle_thread;
+ }
cpu_data_ptr->cpu_active_thread = first_thread;
first_thread->machine.CpuDatap = cpu_data_ptr;
+ first_thread->machine.pcpu_data_base =
+ (vm_address_t)cpu_data_ptr - __PERCPU_ADDR(cpu_data);
flush_dcache((vm_offset_t)&CpuDataEntries[cpu], sizeof(cpu_data_entry_t), FALSE);
flush_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
(void) PE_cpu_start(cpu_data_ptr->cpu_id, (vm_offset_t)NULL, (vm_offset_t)NULL);
return KERN_SUCCESS;
-#else
- return KERN_FAILURE;
-#endif
}
}
cdp->cpu_base_timebase_low = rtclock_base_abstime_low;
cdp->cpu_base_timebase_high = rtclock_base_abstime_high;
#else
- *((uint64_t *) & cdp->cpu_base_timebase_low) = rtclock_base_abstime;
+ *((uint64_t *) &cdp->cpu_base_timebase_low) = rtclock_base_abstime;
#endif
}
cpu_data_t *cpu_data_ptr = getCpuDatap();
if (cpu_data_ptr == &BootCpuData) {
- cpu_data_t *target_cdp;
- unsigned int cpu;
+ cpu_data_t *target_cdp;
+ unsigned int cpu;
- for (cpu=0; cpu < MAX_CPUS; cpu++) {
+ const unsigned int max_cpu_id = ml_get_max_cpu_number();
+ for (cpu = 0; cpu <= max_cpu_id; cpu++) {
target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
- if(target_cdp == (cpu_data_t *)NULL)
+ if (target_cdp == (cpu_data_t *)NULL) {
break;
+ }
- if (target_cdp == cpu_data_ptr)
+ if (target_cdp == cpu_data_ptr) {
continue;
+ }
- while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH);
+ while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH) {
+ ;
+ }
}
/* Now that the other cores have entered the sleep path, set
* the abstime fixup we'll use when we resume.*/
rtclock_base_abstime = ml_get_timebase();
wake_abstime = rtclock_base_abstime;
-
} else {
platform_cache_disable();
CleanPoU_Dcache();
}
cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
-#if __ARM_SMP__ && defined(ARMA7)
+#if defined(ARMA7)
cpu_data_ptr->cpu_CLWFlush_req = 0;
cpu_data_ptr->cpu_CLWClean_req = 0;
__builtin_arm_dmb(DMB_ISH);
platform_cache_disable();
platform_cache_shutdown();
bcopy((const void *)suspend_signature, (void *)(IOS_STATE), IOS_STATE_SIZE);
- } else
+ } else {
CleanPoC_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t));
+ }
__builtin_arm_dsb(DSB_SY);
while (TRUE) {
void
cpu_machine_idle_init(boolean_t from_boot)
{
- static const unsigned int *BootArgs_paddr = (unsigned int *)NULL;
- static const unsigned int *CpuDataEntries_paddr = (unsigned int *)NULL;
- static unsigned int resume_idle_cpu_paddr = (unsigned int )NULL;
- cpu_data_t *cpu_data_ptr = getCpuDatap();
+ static const unsigned int *BootArgs_paddr = (unsigned int *)NULL;
+ static const unsigned int *CpuDataEntries_paddr = (unsigned int *)NULL;
+ static unsigned int resume_idle_cpu_paddr = (unsigned int)NULL;
+ cpu_data_t *cpu_data_ptr = getCpuDatap();
if (from_boot) {
unsigned int jtag = 0;
unsigned int wfi;
- if (PE_parse_boot_argn("jtag", &jtag, sizeof (jtag))) {
- if (jtag != 0)
+ if (PE_parse_boot_argn("jtag", &jtag, sizeof(jtag))) {
+ if (jtag != 0) {
idle_enable = FALSE;
- else
+ } else {
idle_enable = TRUE;
- } else
+ }
+ } else {
idle_enable = TRUE;
+ }
- if (!PE_parse_boot_argn("wfi", &wfi, sizeof (wfi)))
+ if (!PE_parse_boot_argn("wfi", &wfi, sizeof(wfi))) {
wfi = 1;
+ }
- if (wfi == 0)
+ if (wfi == 0) {
bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)&patch_to_nop),
- (addr64_t)ml_static_vtop((vm_offset_t)&wfi_inst), sizeof(unsigned));
- if (wfi == 2)
+ (addr64_t)ml_static_vtop((vm_offset_t)&wfi_inst), sizeof(unsigned));
+ }
+ if (wfi == 2) {
wfi_fast = 0;
+ }
LowExceptionVectorsAddr = (void *)ml_io_map(ml_vtophys((vm_offset_t)gPhysBase), PAGE_SIZE);
BootArgs_paddr = (unsigned int *)ml_static_vtop((vm_offset_t)BootArgs);
bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)&BootArgs_paddr),
- (addr64_t)((unsigned int)(gPhysBase) +
- ((unsigned int)&(ResetHandlerData.boot_args) - (unsigned int)&ExceptionLowVectorsBase)),
- 4);
+ (addr64_t)((unsigned int)(gPhysBase) +
+ ((unsigned int)&(ResetHandlerData.boot_args) - (unsigned int)&ExceptionLowVectorsBase)),
+ 4);
CpuDataEntries_paddr = (unsigned int *)ml_static_vtop((vm_offset_t)CpuDataEntries);
bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)&CpuDataEntries_paddr),
- (addr64_t)((unsigned int)(gPhysBase) +
- ((unsigned int)&(ResetHandlerData.cpu_data_entries) - (unsigned int)&ExceptionLowVectorsBase)),
- 4);
+ (addr64_t)((unsigned int)(gPhysBase) +
+ ((unsigned int)&(ResetHandlerData.cpu_data_entries) - (unsigned int)&ExceptionLowVectorsBase)),
+ 4);
- CleanPoC_DcacheRegion((vm_offset_t) phystokv((char *) (gPhysBase)), PAGE_SIZE);
+ CleanPoC_DcacheRegion((vm_offset_t) phystokv(gPhysBase), PAGE_SIZE);
resume_idle_cpu_paddr = (unsigned int)ml_static_vtop((vm_offset_t)&resume_idle_cpu);
-
}
if (cpu_data_ptr == &BootCpuData) {
bcopy(((const void *)running_signature), (void *)(IOS_STATE), IOS_STATE_SIZE);
- };
+ }
+ ;
cpu_data_ptr->cpu_reset_handler = resume_idle_cpu_paddr;
clean_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
void
machine_track_platform_idle(boolean_t entry)
{
- if (entry)
- (void)__c11_atomic_fetch_add(&cpu_idle_count, 1, __ATOMIC_RELAXED);
- else
- (void)__c11_atomic_fetch_sub(&cpu_idle_count, 1, __ATOMIC_RELAXED);
+ if (entry) {
+ os_atomic_inc(&cpu_idle_count, relaxed);
+ } else {
+ os_atomic_dec(&cpu_idle_count, relaxed);
+ }
}
-