#include <kern/processor.h>
unsigned int max_cpus_initialized = 0;
-extern int forcenap;
#define MAX_CPUS_SET 0x1
#define MAX_CPUS_WAIT 0x2
return((vm_offset_t)NULL);
else {
vaddr = static_memory_end;
- static_memory_end = round_page_32(vaddr+size);
+ static_memory_end = round_page(vaddr+size);
return(vaddr);
}
}
{
vm_offset_t paddr_cur, vaddr_cur;
- for (vaddr_cur = round_page_32(vaddr);
- vaddr_cur < trunc_page_32(vaddr+size);
+ for (vaddr_cur = round_page(vaddr);
+ vaddr_cur < trunc_page(vaddr+size);
vaddr_cur += PAGE_SIZE) {
paddr_cur = pmap_extract(kernel_pmap, vaddr_cur);
if (paddr_cur != (vm_offset_t)NULL) {
vm_page_wire_count--;
- pmap_remove(kernel_pmap, (addr64_t)vaddr_cur, (addr64_t)(vaddr_cur+PAGE_SIZE));
- vm_page_create(paddr_cur>>12,(paddr_cur+PAGE_SIZE)>>12);
+ pmap_remove(kernel_pmap, vaddr_cur, vaddr_cur+PAGE_SIZE);
+ vm_page_create(paddr_cur,paddr_cur+PAGE_SIZE);
}
}
}
(void) ml_set_interrupts_enabled(current_state);
}
+boolean_t fake_get_interrupts_enabled(void)
+{
+ /*
+ * The scheduler is not active on this cpu. There is no need to disable
+ * preemption. The current thread wont be dispatched on anhother cpu.
+ */
+ return((per_proc_info[cpu_number()].cpu_flags & turnEEon) != 0);
+}
+
+boolean_t fake_set_interrupts_enabled(boolean_t enable)
+{
+ boolean_t interrupt_state_prev;
+
+ /*
+ * The scheduler is not active on this cpu. There is no need to disable
+ * preemption. The current thread wont be dispatched on anhother cpu.
+ */
+ interrupt_state_prev =
+ (per_proc_info[cpu_number()].cpu_flags & turnEEon) != 0;
+ if (interrupt_state_prev != enable)
+ per_proc_info[cpu_number()].cpu_flags ^= turnEEon;
+ return(interrupt_state_prev);
+}
+
/* Get Interrupts Enabled */
boolean_t ml_get_interrupts_enabled(void)
+{
+ if (per_proc_info[cpu_number()].interrupts_enabled == TRUE)
+ return(get_interrupts_enabled());
+ else
+ return(fake_get_interrupts_enabled());
+}
+
+boolean_t get_interrupts_enabled(void)
{
return((mfmsr() & MASK(MSR_EE)) != 0);
}
ipi_handler_t *ipi_handler)
{
kern_return_t ret;
- int target_cpu, cpu;
- int donap;
+ int target_cpu;
if (processor_info->boot_cpu == FALSE) {
if (cpu_register(&target_cpu) != KERN_SUCCESS)
return KERN_FAILURE;
} else {
/* boot_cpu is always 0 */
- target_cpu = 0;
+ target_cpu= 0;
}
per_proc_info[target_cpu].cpu_id = processor_info->cpu_id;
per_proc_info[target_cpu].start_paddr = processor_info->start_paddr;
- donap = processor_info->supports_nap; /* Assume we use requested nap */
- if(forcenap) donap = forcenap - 1; /* If there was an override, use that */
-
if(per_proc_info[target_cpu].pf.Available & pfCanNap)
- if(donap)
+ if(processor_info->supports_nap)
per_proc_info[target_cpu].pf.Available |= pfWillNap;
if(processor_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
{
boolean_t prev_value = (per_proc_info[target_cpu].pf.Available & pfCanNap) && (per_proc_info[target_cpu].pf.Available & pfWillNap);
- if(forcenap) nap_enabled = forcenap - 1; /* If we are to force nap on or off, do it */
-
if(per_proc_info[target_cpu].pf.Available & pfCanNap) { /* Can the processor nap? */
if (nap_enabled) per_proc_info[target_cpu].pf.Available |= pfWillNap; /* Is nap supported on this machine? */
else per_proc_info[target_cpu].pf.Available &= ~pfWillNap; /* Clear if not */
if(target_cpu == cpu_number())
__asm__ volatile("mtsprg 2,%0" : : "r" (per_proc_info[target_cpu].pf.Available)); /* Set live value */
-
+
return (prev_value);
}