/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
+
+#include <mach/mach_types.h>
+
#include <ppc/machine_routines.h>
-#include <ppc/machine_cpu.h>
+#include <ppc/cpu_internal.h>
#include <ppc/exception.h>
+#include <ppc/io_map_entries.h>
#include <ppc/misc_protos.h>
+#include <ppc/savearea.h>
#include <ppc/Firmware.h>
-#include <vm/vm_page.h>
#include <ppc/pmap.h>
+#include <ppc/mem.h>
+#include <ppc/new_screen.h>
#include <ppc/proc_reg.h>
+#include <ppc/machine_cpu.h> /* for cpu_signal_handler() */
+#include <ppc/fpu_protos.h>
+#include <kern/kern_types.h>
#include <kern/processor.h>
+#include <kern/machine.h>
+
+#include <vm/vm_page.h>
-boolean_t get_interrupts_enabled(void);
+unsigned int LockTimeOut = 1250000000;
+unsigned int MutexSpin = 0;
+
+static int max_cpus_initialized = 0;
+
+uint32_t warFlags = 0;
+#define warDisMBpoff 0x80000000
+#define MAX_CPUS_SET 0x01
+#define MAX_CPUS_WAIT 0x02
+
+decl_simple_lock_data(, spsLock);
+unsigned int spsLockInit = 0;
+
+extern unsigned int hwllckPatch_isync;
+extern unsigned int hwulckPatch_isync;
+extern unsigned int hwulckbPatch_isync;
+extern unsigned int hwlmlckPatch_isync;
+extern unsigned int hwltlckPatch_isync;
+extern unsigned int hwcsatomicPatch_isync;
+extern unsigned int mlckePatch_isync;
+extern unsigned int mlckPatch_isync;
+extern unsigned int mltelckPatch_isync;
+extern unsigned int mltlckPatch_isync;
+extern unsigned int mulckePatch_isync;
+extern unsigned int mulckPatch_isync;
+extern unsigned int slckPatch_isync;
+extern unsigned int stlckPatch_isync;
+extern unsigned int sulckPatch_isync;
+extern unsigned int rwlePatch_isync;
+extern unsigned int rwlsPatch_isync;
+extern unsigned int rwlsePatch_isync;
+extern unsigned int rwlesPatch_isync;
+extern unsigned int rwtlePatch_isync;
+extern unsigned int rwtlsPatch_isync;
+extern unsigned int rwldPatch_isync;
+extern unsigned int hwulckPatch_eieio;
+extern unsigned int mulckPatch_eieio;
+extern unsigned int mulckePatch_eieio;
+extern unsigned int sulckPatch_eieio;
+extern unsigned int rwlesPatch_eieio;
+extern unsigned int rwldPatch_eieio;
+
+struct patch_up {
+ unsigned int *addr;
+ unsigned int data;
+};
+
+typedef struct patch_up patch_up_t;
+
+patch_up_t patch_up_table[] = {
+ {&hwllckPatch_isync, 0x60000000},
+ {&hwulckPatch_isync, 0x60000000},
+ {&hwulckbPatch_isync, 0x60000000},
+ {&hwlmlckPatch_isync, 0x60000000},
+ {&hwltlckPatch_isync, 0x60000000},
+ {&hwcsatomicPatch_isync, 0x60000000},
+ {&mlckePatch_isync, 0x60000000},
+ {&mlckPatch_isync, 0x60000000},
+ {&mltelckPatch_isync, 0x60000000},
+ {&mltlckPatch_isync, 0x60000000},
+ {&mulckePatch_isync, 0x60000000},
+ {&mulckPatch_isync, 0x60000000},
+ {&slckPatch_isync, 0x60000000},
+ {&stlckPatch_isync, 0x60000000},
+ {&sulckPatch_isync, 0x60000000},
+ {&rwlePatch_isync, 0x60000000},
+ {&rwlsPatch_isync, 0x60000000},
+ {&rwlsePatch_isync, 0x60000000},
+ {&rwlesPatch_isync, 0x60000000},
+ {&rwtlePatch_isync, 0x60000000},
+ {&rwtlsPatch_isync, 0x60000000},
+ {&rwldPatch_isync, 0x60000000},
+ {&hwulckPatch_eieio, 0x60000000},
+ {&hwulckPatch_eieio, 0x60000000},
+ {&mulckPatch_eieio, 0x60000000},
+ {&mulckePatch_eieio, 0x60000000},
+ {&sulckPatch_eieio, 0x60000000},
+ {&rwlesPatch_eieio, 0x60000000},
+ {&rwldPatch_eieio, 0x60000000},
+ {NULL, 0x00000000}
+};
+
+extern int forcenap;
+extern boolean_t pmap_initialized;
/* Map memory map IO space */
vm_offset_t
vm_offset_t phys_addr,
vm_size_t size)
{
- return(io_map(phys_addr,size));
+ return(io_map(phys_addr,size,VM_WIMG_IO));
}
-/* static memory allocation */
+
+void ml_get_bouncepool_info(vm_offset_t *phys_addr, vm_size_t *size)
+{
+ *phys_addr = 0;
+ *size = 0;
+}
+
+
+/*
+ * Routine: ml_static_malloc
+ * Function: static memory allocation
+ */
vm_offset_t
ml_static_malloc(
vm_size_t size)
{
- extern vm_offset_t static_memory_end;
- extern boolean_t pmap_initialized;
vm_offset_t vaddr;
if (pmap_initialized)
}
}
+/*
+ * Routine: ml_static_ptovirt
+ * Function:
+ */
vm_offset_t
ml_static_ptovirt(
vm_offset_t paddr)
{
- extern vm_offset_t static_memory_end;
vm_offset_t vaddr;
/* Static memory is map V=R */
return((vm_offset_t)NULL);
}
+/*
+ * Routine: ml_static_mfree
+ * Function:
+ */
void
ml_static_mfree(
vm_offset_t vaddr,
{
vm_offset_t paddr_cur, vaddr_cur;
- for (vaddr_cur = round_page(vaddr);
- vaddr_cur < trunc_page(vaddr+size);
+ for (vaddr_cur = round_page_32(vaddr);
+ vaddr_cur < trunc_page_32(vaddr+size);
vaddr_cur += PAGE_SIZE) {
paddr_cur = pmap_extract(kernel_pmap, vaddr_cur);
if (paddr_cur != (vm_offset_t)NULL) {
vm_page_wire_count--;
- pmap_remove(kernel_pmap, vaddr_cur, vaddr_cur+PAGE_SIZE);
- vm_page_create(paddr_cur,paddr_cur+PAGE_SIZE);
+ pmap_remove(kernel_pmap, (addr64_t)vaddr_cur, (addr64_t)(vaddr_cur+PAGE_SIZE));
+ vm_page_create(paddr_cur>>12,(paddr_cur+PAGE_SIZE)>>12);
}
}
}
-/* virtual to physical on wired pages */
+/*
+ * Routine: ml_vtophys
+ * Function: virtual to physical on static pages
+ */
vm_offset_t ml_vtophys(
vm_offset_t vaddr)
{
return(pmap_extract(kernel_pmap, vaddr));
}
-/* Initialize Interrupt Handler */
+/*
+ * Routine: ml_install_interrupt_handler
+ * Function: Initialize Interrupt Handler
+ */
void ml_install_interrupt_handler(
void *nub,
int source,
IOInterruptHandler handler,
void *refCon)
{
- int current_cpu;
- boolean_t current_state;
+ struct per_proc_info *proc_info;
+ boolean_t current_state;
- current_cpu = cpu_number();
current_state = ml_get_interrupts_enabled();
+ proc_info = getPerProc();
- per_proc_info[current_cpu].interrupt_nub = nub;
- per_proc_info[current_cpu].interrupt_source = source;
- per_proc_info[current_cpu].interrupt_target = target;
- per_proc_info[current_cpu].interrupt_handler = handler;
- per_proc_info[current_cpu].interrupt_refCon = refCon;
+ proc_info->interrupt_nub = nub;
+ proc_info->interrupt_source = source;
+ proc_info->interrupt_target = target;
+ proc_info->interrupt_handler = handler;
+ proc_info->interrupt_refCon = refCon;
- per_proc_info[current_cpu].interrupts_enabled = TRUE;
+ proc_info->interrupts_enabled = TRUE;
(void) ml_set_interrupts_enabled(current_state);
- initialize_screen(0, kPEAcquireScreen);
+ initialize_screen(NULL, kPEAcquireScreen);
}
-/* Initialize Interrupts */
-void ml_init_interrupt(void)
-{
- int current_cpu;
- boolean_t current_state;
+/*
+ * Routine: ml_nofault_copy
+ * Function: Perform a physical mode copy if the source and
+ * destination have valid translations in the kernel pmap.
+ * If translations are present, they are assumed to
+ * be wired; i.e. no attempt is made to guarantee that the
+ * translations obtained remained valid for
+ * the duration of their use.
+ */
- current_state = ml_get_interrupts_enabled();
+vm_size_t ml_nofault_copy(
+ vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size)
+{
+ addr64_t cur_phys_dst, cur_phys_src;
+ uint32_t count, pindex, nbytes = 0;
+
+ while (size > 0) {
+ if (!(cur_phys_src = kvtophys(virtsrc)))
+ break;
+ if (!(cur_phys_dst = kvtophys(virtdst)))
+ break;
+ if (!mapping_phys_lookup((cur_phys_src>>12), &pindex) ||
+ !mapping_phys_lookup((cur_phys_dst>>12), &pindex))
+ break;
+ count = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
+ if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK)))
+ count = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
+ if (count > size)
+ count = size;
+
+ bcopy_phys(cur_phys_src, cur_phys_dst, count);
+
+ nbytes += count;
+ virtsrc += count;
+ virtdst += count;
+ size -= count;
+ }
- current_cpu = cpu_number();
- per_proc_info[current_cpu].interrupts_enabled = TRUE;
- (void) ml_set_interrupts_enabled(current_state);
+ return nbytes;
}
-boolean_t fake_get_interrupts_enabled(void)
+/*
+ * Routine: ml_init_interrupt
+ * Function: Initialize Interrupts
+ */
+void ml_init_interrupt(void)
{
- /*
- * The scheduler is not active on this cpu. There is no need to disable
- * preemption. The current thread wont be dispatched on anhother cpu.
- */
- return((per_proc_info[cpu_number()].cpu_flags & turnEEon) != 0);
-}
+ boolean_t current_state;
-boolean_t fake_set_interrupts_enabled(boolean_t enable)
-{
- boolean_t interrupt_state_prev;
+ current_state = ml_get_interrupts_enabled();
- /*
- * The scheduler is not active on this cpu. There is no need to disable
- * preemption. The current thread wont be dispatched on anhother cpu.
- */
- interrupt_state_prev =
- (per_proc_info[cpu_number()].cpu_flags & turnEEon) != 0;
- if (interrupt_state_prev != enable)
- per_proc_info[cpu_number()].cpu_flags ^= turnEEon;
- return(interrupt_state_prev);
+ getPerProc()->interrupts_enabled = TRUE;
+ (void) ml_set_interrupts_enabled(current_state);
}
-/* Get Interrupts Enabled */
+/*
+ * Routine: ml_get_interrupts_enabled
+ * Function: Get Interrupts Enabled
+ */
boolean_t ml_get_interrupts_enabled(void)
-{
- if (per_proc_info[cpu_number()].interrupts_enabled == TRUE)
- return(get_interrupts_enabled());
- else
- return(fake_get_interrupts_enabled());
-}
-
-boolean_t get_interrupts_enabled(void)
{
return((mfmsr() & MASK(MSR_EE)) != 0);
}
-/* Check if running at interrupt context */
+/*
+ * Routine: ml_at_interrupt_context
+ * Function: Check if running at interrupt context
+ */
boolean_t ml_at_interrupt_context(void)
{
boolean_t ret;
boolean_t current_state;
current_state = ml_set_interrupts_enabled(FALSE);
- ret = (per_proc_info[cpu_number()].istackptr == 0);
+ ret = (getPerProc()->istackptr == 0);
ml_set_interrupts_enabled(current_state);
return(ret);
}
-/* Generate a fake interrupt */
+/*
+ * Routine: ml_cause_interrupt
+ * Function: Generate a fake interrupt
+ */
void ml_cause_interrupt(void)
{
CreateFakeIO();
}
+/*
+ * Routine: ml_thread_policy
+ * Function:
+ */
void ml_thread_policy(
thread_t thread,
- unsigned policy_id,
+__unused unsigned policy_id,
unsigned policy_info)
{
- if ((policy_id == MACHINE_GROUP) &&
- ((per_proc_info[0].pf.Available) & pfSMPcap))
- thread_bind(thread, master_processor);
-
if (policy_info & MACHINE_NETWORK_WORKLOOP) {
spl_t s = splsched();
thread_lock(thread);
- thread->sched_mode |= TH_MODE_FORCEDPREEMPT;
set_priority(thread, thread->priority + 1);
thread_unlock(thread);
}
}
-void machine_idle(void)
-{
- if (per_proc_info[cpu_number()].interrupts_enabled == TRUE) {
- int cur_decr;
-
- machine_idle_ppc();
-
- /*
- * protect against a lost decrementer trap
- * if the current decrementer value is negative
- * by more than 10 ticks, re-arm it since it's
- * unlikely to fire at this point... a hardware
- * interrupt got us out of machine_idle and may
- * also be contributing to this state
- */
- cur_decr = isync_mfdec();
-
- if (cur_decr < -10) {
- mtdec(1);
- }
- }
-}
-
+/*
+ * Routine: machine_signal_idle
+ * Function:
+ */
void
machine_signal_idle(
processor_t processor)
{
- (void)cpu_signal(processor->slot_num, SIGPwake, 0, 0);
+ struct per_proc_info *proc_info;
+
+ proc_info = PROCESSOR_TO_PER_PROC(processor);
+
+ if (proc_info->pf.Available & (pfCanDoze|pfWillNap))
+ (void)cpu_signal(proc_info->cpu_number, SIGPwake, 0, 0);
}
+/*
+ * Routine: ml_processor_register
+ * Function:
+ */
kern_return_t
ml_processor_register(
- ml_processor_info_t *processor_info,
- processor_t *processor,
- ipi_handler_t *ipi_handler)
+ ml_processor_info_t *in_processor_info,
+ processor_t *processor_out,
+ ipi_handler_t *ipi_handler)
{
- kern_return_t ret;
- int target_cpu;
-
- if (processor_info->boot_cpu == FALSE) {
- if (cpu_register(&target_cpu) != KERN_SUCCESS)
+ struct per_proc_info *proc_info;
+ int donap;
+ boolean_t current_state;
+ boolean_t boot_processor;
+
+ if (in_processor_info->boot_cpu == FALSE) {
+ if (spsLockInit == 0) {
+ spsLockInit = 1;
+ simple_lock_init(&spsLock, 0);
+ }
+ boot_processor = FALSE;
+ proc_info = cpu_per_proc_alloc();
+ if (proc_info == (struct per_proc_info *)NULL)
return KERN_FAILURE;
+ proc_info->pp_cbfr = console_per_proc_alloc(FALSE);
+ if (proc_info->pp_cbfr == (void *)NULL)
+ goto processor_register_error;
} else {
- /* boot_cpu is always 0 */
- target_cpu= 0;
+ boot_processor = TRUE;
+ proc_info = PerProcTable[master_cpu].ppe_vaddr;
}
- per_proc_info[target_cpu].cpu_id = processor_info->cpu_id;
- per_proc_info[target_cpu].start_paddr = processor_info->start_paddr;
+ proc_info->pp_chud = chudxnu_per_proc_alloc(boot_processor);
+ if (proc_info->pp_chud == (void *)NULL)
+ goto processor_register_error;
- if(per_proc_info[target_cpu].pf.Available & pfCanNap)
- if(processor_info->supports_nap)
- per_proc_info[target_cpu].pf.Available |= pfWillNap;
+ if (!boot_processor)
+ if (cpu_per_proc_register(proc_info) != KERN_SUCCESS)
+ goto processor_register_error;
- if(processor_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
- per_proc_info[target_cpu].time_base_enable = processor_info->time_base_enable;
+ proc_info->cpu_id = in_processor_info->cpu_id;
+ proc_info->start_paddr = in_processor_info->start_paddr;
+ if(in_processor_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
+ proc_info->time_base_enable = in_processor_info->time_base_enable;
else
- per_proc_info[target_cpu].time_base_enable = (void(*)(cpu_id_t, boolean_t ))NULL;
-
- if(target_cpu == cpu_number())
- __asm__ volatile("mtsprg 2,%0" : : "r" (per_proc_info[target_cpu].pf.Available)); /* Set live value */
+ proc_info->time_base_enable = (void(*)(cpu_id_t, boolean_t ))NULL;
+
+ if((proc_info->pf.pfPowerModes & pmType) == pmPowerTune) {
+ proc_info->pf.pfPowerTune0 = in_processor_info->power_mode_0;
+ proc_info->pf.pfPowerTune1 = in_processor_info->power_mode_1;
+ }
+
+ donap = in_processor_info->supports_nap; /* Assume we use requested nap */
+ if(forcenap) donap = forcenap - 1; /* If there was an override, use that */
+
+ if((proc_info->pf.Available & pfCanNap)
+ && (donap)) {
+ proc_info->pf.Available |= pfWillNap;
+ current_state = ml_set_interrupts_enabled(FALSE);
+ if(proc_info == getPerProc())
+ __asm__ volatile("mtsprg 2,%0" : : "r" (proc_info->pf.Available)); /* Set live value */
+ (void) ml_set_interrupts_enabled(current_state);
+ }
- *processor = cpu_to_processor(target_cpu);
+ if (!boot_processor) {
+ (void)hw_atomic_add(&saveanchor.savetarget, FreeListMin); /* saveareas for this processor */
+ processor_init((struct processor *)proc_info->processor,
+ proc_info->cpu_number, processor_pset(master_processor));
+ }
+
+ *processor_out = (struct processor *)proc_info->processor;
*ipi_handler = cpu_signal_handler;
return KERN_SUCCESS;
+
+processor_register_error:
+ if (proc_info->pp_cbfr != (void *)NULL)
+ console_per_proc_free(proc_info->pp_cbfr);
+ if (proc_info->pp_chud != (void *)NULL)
+ chudxnu_per_proc_free(proc_info->pp_chud);
+ if (!boot_processor)
+ cpu_per_proc_free(proc_info);
+ return KERN_FAILURE;
}
+/*
+ * Routine: ml_enable_nap
+ * Function:
+ */
boolean_t
ml_enable_nap(int target_cpu, boolean_t nap_enabled)
{
- boolean_t prev_value = (per_proc_info[target_cpu].pf.Available & pfCanNap) && (per_proc_info[target_cpu].pf.Available & pfWillNap);
+ struct per_proc_info *proc_info;
+ boolean_t prev_value;
+ boolean_t current_state;
+
+ proc_info = PerProcTable[target_cpu].ppe_vaddr;
+
+ prev_value = (proc_info->pf.Available & pfCanNap) && (proc_info->pf.Available & pfWillNap);
- if(per_proc_info[target_cpu].pf.Available & pfCanNap) { /* Can the processor nap? */
- if (nap_enabled) per_proc_info[target_cpu].pf.Available |= pfWillNap; /* Is nap supported on this machine? */
- else per_proc_info[target_cpu].pf.Available &= ~pfWillNap; /* Clear if not */
+ if(forcenap) nap_enabled = forcenap - 1; /* If we are to force nap on or off, do it */
+
+ if(proc_info->pf.Available & pfCanNap) { /* Can the processor nap? */
+ if (nap_enabled) proc_info->pf.Available |= pfWillNap; /* Is nap supported on this machine? */
+ else proc_info->pf.Available &= ~pfWillNap; /* Clear if not */
}
- if(target_cpu == cpu_number())
- __asm__ volatile("mtsprg 2,%0" : : "r" (per_proc_info[target_cpu].pf.Available)); /* Set live value */
-
+ current_state = ml_set_interrupts_enabled(FALSE);
+ if(proc_info == getPerProc())
+ __asm__ volatile("mtsprg 2,%0" : : "r" (proc_info->pf.Available)); /* Set live value */
+ (void) ml_set_interrupts_enabled(current_state);
+
return (prev_value);
}
+/*
+ * Routine: ml_init_max_cpus
+ * Function:
+ */
+void
+ml_init_max_cpus(unsigned int max_cpus)
+{
+ boolean_t current_state;
+
+ current_state = ml_set_interrupts_enabled(FALSE);
+ if (max_cpus_initialized != MAX_CPUS_SET) {
+ if (max_cpus > 0 && max_cpus <= MAX_CPUS) {
+ /*
+ * Note: max_ncpus is the maximum number
+ * that the kernel supports or that the "cpus="
+ * boot-arg has set. Here we take int minimum.
+ */
+ machine_info.max_cpus = MIN(max_cpus, max_ncpus);
+ machine_info.physical_cpu_max = max_cpus;
+ machine_info.logical_cpu_max = max_cpus;
+ }
+ if (max_cpus_initialized == MAX_CPUS_WAIT)
+ wakeup((event_t)&max_cpus_initialized);
+ max_cpus_initialized = MAX_CPUS_SET;
+ }
+
+ if (machine_info.logical_cpu_max == 1) {
+ struct patch_up *patch_up_ptr = &patch_up_table[0];
+
+ while (patch_up_ptr->addr != NULL) {
+ /*
+ * Patch for V=R kernel text section
+ */
+ bcopy_phys((addr64_t)((unsigned int)(&patch_up_ptr->data)),
+ (addr64_t)((unsigned int)(patch_up_ptr->addr)), 4);
+ sync_cache64((addr64_t)((unsigned int)(patch_up_ptr->addr)),4);
+ patch_up_ptr++;
+ }
+ }
+
+ (void) ml_set_interrupts_enabled(current_state);
+}
+
+/*
+ * Routine: ml_get_max_cpus
+ * Function:
+ */
+unsigned int
+ml_get_max_cpus(void)
+{
+ boolean_t current_state;
+
+ current_state = ml_set_interrupts_enabled(FALSE);
+ if (max_cpus_initialized != MAX_CPUS_SET) {
+ max_cpus_initialized = MAX_CPUS_WAIT;
+ assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT);
+ (void)thread_block(THREAD_CONTINUE_NULL);
+ }
+ (void) ml_set_interrupts_enabled(current_state);
+ return(machine_info.max_cpus);
+}
+
+/*
+ * This is called from the machine-independent routine cpu_up()
+ * to perform machine-dependent info updates.
+ */
+void
+ml_cpu_up(void)
+{
+ (void)hw_atomic_add(&machine_info.physical_cpu, 1);
+ (void)hw_atomic_add(&machine_info.logical_cpu, 1);
+}
+
+/*
+ * This is called from the machine-independent routine cpu_down()
+ * to perform machine-dependent info updates.
+ */
void
-ml_ppc_get_info(ml_ppc_cpu_info_t *cpu_info)
+ml_cpu_down(void)
{
- if (cpu_info == 0) return;
+ (void)hw_atomic_sub(&machine_info.physical_cpu, 1);
+ (void)hw_atomic_sub(&machine_info.logical_cpu, 1);
+}
+
+/*
+ * Routine: ml_cpu_get_info
+ * Function:
+ */
+void
+ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info)
+{
+ struct per_proc_info *proc_info;
+
+ if (ml_cpu_info == 0) return;
- cpu_info->vector_unit = (per_proc_info[0].pf.Available & pfAltivec) != 0;
- cpu_info->cache_line_size = per_proc_info[0].pf.lineSize;
- cpu_info->l1_icache_size = per_proc_info[0].pf.l1iSize;
- cpu_info->l1_dcache_size = per_proc_info[0].pf.l1dSize;
+ proc_info = PerProcTable[master_cpu].ppe_vaddr;
+ ml_cpu_info->vector_unit = (proc_info->pf.Available & pfAltivec) != 0;
+ ml_cpu_info->cache_line_size = proc_info->pf.lineSize;
+ ml_cpu_info->l1_icache_size = proc_info->pf.l1iSize;
+ ml_cpu_info->l1_dcache_size = proc_info->pf.l1dSize;
- if (per_proc_info[0].pf.Available & pfL2) {
- cpu_info->l2_settings = per_proc_info[0].pf.l2cr;
- cpu_info->l2_cache_size = per_proc_info[0].pf.l2Size;
+ if (proc_info->pf.Available & pfL2) {
+ ml_cpu_info->l2_settings = proc_info->pf.l2cr;
+ ml_cpu_info->l2_cache_size = proc_info->pf.l2Size;
} else {
- cpu_info->l2_settings = 0;
- cpu_info->l2_cache_size = 0xFFFFFFFF;
+ ml_cpu_info->l2_settings = 0;
+ ml_cpu_info->l2_cache_size = 0xFFFFFFFF;
}
- if (per_proc_info[0].pf.Available & pfL3) {
- cpu_info->l3_settings = per_proc_info[0].pf.l3cr;
- cpu_info->l3_cache_size = per_proc_info[0].pf.l3Size;
+ if (proc_info->pf.Available & pfL3) {
+ ml_cpu_info->l3_settings = proc_info->pf.l3cr;
+ ml_cpu_info->l3_cache_size = proc_info->pf.l3Size;
} else {
- cpu_info->l3_settings = 0;
- cpu_info->l3_cache_size = 0xFFFFFFFF;
+ ml_cpu_info->l3_settings = 0;
+ ml_cpu_info->l3_cache_size = 0xFFFFFFFF;
}
}
+/*
+ * Routine: ml_enable_cache_level
+ * Function:
+ */
#define l2em 0x80000000
#define l3em 0x80000000
-
-extern int real_ncpus;
-
int
ml_enable_cache_level(int cache_level, int enable)
{
int old_mode;
unsigned long available, ccr;
+ struct per_proc_info *proc_info;
- if (real_ncpus != 1) return -1;
+ if (real_ncpus != 1) return -1; /* XXX: This test is not safe */
- available = per_proc_info[0].pf.Available;
+ proc_info = PerProcTable[master_cpu].ppe_vaddr;
+ available = proc_info->pf.Available;
if ((cache_level == 2) && (available & pfL2)) {
- ccr = per_proc_info[0].pf.l2cr;
+ ccr = proc_info->pf.l2cr;
old_mode = (ccr & l2em) ? TRUE : FALSE;
if (old_mode != enable) {
- if (enable) ccr = per_proc_info[0].pf.l2crOriginal;
+ if (enable) ccr = proc_info->pf.l2crOriginal;
else ccr = 0;
- per_proc_info[0].pf.l2cr = ccr;
+ proc_info->pf.l2cr = ccr;
cacheInit();
}
}
if ((cache_level == 3) && (available & pfL3)) {
- ccr = per_proc_info[0].pf.l3cr;
+ ccr = proc_info->pf.l3cr;
old_mode = (ccr & l3em) ? TRUE : FALSE;
if (old_mode != enable) {
- if (enable) ccr = per_proc_info[0].pf.l3crOriginal;
+ if (enable) ccr = proc_info->pf.l3crOriginal;
else ccr = 0;
- per_proc_info[0].pf.l3cr = ccr;
+ proc_info->pf.l3cr = ccr;
cacheInit();
}
return -1;
}
+
+/*
+ * Routine: ml_set_processor_speed
+ * Function:
+ */
void
-init_ast_check(processor_t processor)
+ml_set_processor_speed(unsigned long speed)
+{
+ struct per_proc_info *proc_info;
+ uint32_t cpu;
+ kern_return_t result;
+ boolean_t current_state;
+ unsigned int i;
+
+ proc_info = PerProcTable[master_cpu].ppe_vaddr;
+
+ switch (proc_info->pf.pfPowerModes & pmType) { /* Figure specific type */
+ case pmDualPLL:
+
+ ml_set_processor_speed_dpll(speed);
+ break;
+
+ case pmDFS:
+
+ for (cpu = 0; cpu < real_ncpus; cpu++) {
+ /*
+ * cpu_signal() returns after .5ms if it fails to signal a running cpu
+ * retry cpu_signal() for .1s to deal with long interrupt latency at boot
+ */
+ for (i=200; i>0; i--) {
+ current_state = ml_set_interrupts_enabled(FALSE);
+ if (cpu != (unsigned)cpu_number()) {
+ if (PerProcTable[cpu].ppe_vaddr->cpu_flags & SignalReady)
+ /*
+ * Target cpu is off-line, skip
+ */
+ result = KERN_SUCCESS;
+ else {
+ simple_lock(&spsLock);
+ result = cpu_signal(cpu, SIGPcpureq, CPRQsps, speed);
+ if (result == KERN_SUCCESS)
+ thread_sleep_simple_lock(&spsLock, &spsLock, THREAD_UNINT);
+ simple_unlock(&spsLock);
+ }
+ } else {
+ ml_set_processor_speed_dfs(speed);
+ result = KERN_SUCCESS;
+ }
+ (void) ml_set_interrupts_enabled(current_state);
+ if (result == KERN_SUCCESS)
+ break;
+ }
+ if (result != KERN_SUCCESS)
+ panic("ml_set_processor_speed(): Fail to set cpu%d speed\n", cpu);
+ }
+ break;
+
+ case pmPowerTune:
+
+ ml_set_processor_speed_powertune(speed);
+ break;
+
+ default:
+ break;
+
+ }
+ return;
+}
+
+/*
+ * Routine: ml_set_processor_speed_slave
+ * Function:
+ */
+void
+ml_set_processor_speed_slave(unsigned long speed)
+{
+ ml_set_processor_speed_dfs(speed);
+
+ simple_lock(&spsLock);
+ thread_wakeup(&spsLock);
+ simple_unlock(&spsLock);
+}
+
+/*
+ * Routine: ml_init_lock_timeout
+ * Function:
+ */
+void
+ml_init_lock_timeout(void)
+{
+ uint64_t abstime;
+ uint32_t mtxspin;
+
+ nanoseconds_to_absolutetime(NSEC_PER_SEC>>2, &abstime);
+ LockTimeOut = (unsigned int)abstime;
+
+ if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof (mtxspin))) {
+ if (mtxspin > USEC_PER_SEC>>4)
+ mtxspin = USEC_PER_SEC>>4;
+ nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
+ } else {
+ nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime);
+ }
+ MutexSpin = (unsigned int)abstime;
+}
+
+/*
+ * Routine: init_ast_check
+ * Function:
+ */
+void
+init_ast_check(
+ __unused processor_t processor)
{}
-
+
+/*
+ * Routine: cause_ast_check
+ * Function:
+ */
void
cause_ast_check(
processor_t processor)
{
- if ( processor != current_processor() &&
- per_proc_info[processor->slot_num].interrupts_enabled == TRUE )
- cpu_signal(processor->slot_num, SIGPast, NULL, NULL);
+ struct per_proc_info *proc_info;
+
+ proc_info = PROCESSOR_TO_PER_PROC(processor);
+
+ if (proc_info != getPerProc()
+ && proc_info->interrupts_enabled == TRUE)
+ cpu_signal(proc_info->cpu_number, SIGPast, (unsigned int)NULL, (unsigned int)NULL);
}
+/*
+ * Routine: machine_processor_shutdown
+ * Function:
+ */
thread_t
-switch_to_shutdown_context(
- thread_t thread,
- void (*doshutdown)(processor_t),
- processor_t processor)
+machine_processor_shutdown(
+ __unused thread_t thread,
+ __unused void (*doshutdown)(processor_t),
+ __unused processor_t processor)
{
CreateShutdownCTX();
- return((thread_t)(per_proc_info[cpu_number()].old_thread));
+ return((thread_t)(getPerProc()->old_thread));
}
-int
-set_be_bit()
-{
+
+void ml_mem_backoff(void) {
+
+ if(warFlags & warDisMBpoff) return; /* If backoff disabled, exit */
+
+ __asm__ volatile("sync");
+ __asm__ volatile("isync");
- int mycpu;
- boolean_t current_state;
+ return;
+}
- current_state = ml_set_interrupts_enabled(FALSE); /* Can't allow interruptions when mucking with per_proc flags */
- mycpu = cpu_number();
- per_proc_info[mycpu].cpu_flags |= traceBE;
- (void) ml_set_interrupts_enabled(current_state);
- return(1);
+
+
+/*
+ * Stubs for CPU Stepper
+ */
+void
+machine_run_count(__unused uint32_t count)
+{
}
-int
-clr_be_bit()
+boolean_t
+machine_processor_is_inactive(__unused processor_t processor)
{
- int mycpu;
- boolean_t current_state;
+ return(FALSE);
+}
- current_state = ml_set_interrupts_enabled(FALSE); /* Can't allow interruptions when mucking with per_proc flags */
- mycpu = cpu_number();
- per_proc_info[mycpu].cpu_flags &= ~traceBE;
- (void) ml_set_interrupts_enabled(current_state);
- return(1);
+processor_t
+machine_choose_processor(__unused processor_set_t pset, processor_t processor)
+{
+ return (processor);
}
-int
-be_tracing()
+vm_offset_t ml_stack_remaining(void)
{
- int mycpu = cpu_number();
- return(per_proc_info[mycpu].cpu_flags & traceBE);
+ uintptr_t local = (uintptr_t) &local;
+
+ if (ml_at_interrupt_context()) {
+ return (local - (getPerProc()->intstack_top_ss - INTSTACK_SIZE));
+ } else {
+ return (local - current_thread()->kernel_stack);
+ }
}
+boolean_t machine_timeout_suspended(void) {
+ return FALSE;
+}