]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/ppc/machine_routines.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / osfmk / ppc / machine_routines.c
index 7f127ee81e844fa5b832e8027ffade5102fc4f5b..d4fb8e1ca5493cdcfd4479693c62a955808273b0 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
@@ -50,9 +50,7 @@
 unsigned int           LockTimeOut = 1250000000;
 unsigned int           MutexSpin = 0;
 
-decl_mutex_data(static,mcpus_lock);
-unsigned int           mcpus_lock_initialized = 0;
-unsigned int           mcpus_state = 0;
+static int max_cpus_initialized = 0;
 
 uint32_t warFlags = 0;
 #define warDisMBpoff   0x80000000
@@ -495,35 +493,30 @@ ml_enable_nap(int target_cpu, boolean_t nap_enabled)
  *     Function:
  */
 void
-ml_init_max_cpus(unsigned int mcpus)
+ml_init_max_cpus(unsigned int max_cpus)
 {
+       boolean_t current_state;
 
-       if (hw_compare_and_store(0,1,&mcpus_lock_initialized))
-               mutex_init(&mcpus_lock,0);
-       mutex_lock(&mcpus_lock);
-       if ((mcpus_state & MAX_CPUS_SET)
-           || (mcpus == 0)
-           || (mcpus > MAX_CPUS))
-               panic("ml_init_max_cpus(): Invalid call, max_cpus: %d\n", mcpus);
-
-       machine_info.max_cpus = mcpus;
-       machine_info.physical_cpu_max = mcpus;
-       machine_info.logical_cpu_max = mcpus;
-       mcpus_state |= MAX_CPUS_SET;
-
-       if (mcpus_state & MAX_CPUS_WAIT) {
-               mcpus_state |= ~MAX_CPUS_WAIT;
-               thread_wakeup((event_t)&mcpus_state);
+       current_state = ml_set_interrupts_enabled(FALSE);
+       if (max_cpus_initialized != MAX_CPUS_SET) {
+                       if (max_cpus > 0 && max_cpus <= MAX_CPUS) {
+                       /*
+                        * Note: max_ncpus is the maximum number
+                        * that the kernel supports or that the "cpus="
+                        * boot-arg has set. Here we take int minimum.
+                        */
+                       machine_info.max_cpus = MIN(max_cpus, max_ncpus);
+                       machine_info.physical_cpu_max = max_cpus;
+                       machine_info.logical_cpu_max = max_cpus;
+               }
+               if (max_cpus_initialized == MAX_CPUS_WAIT)
+                       wakeup((event_t)&max_cpus_initialized);
+               max_cpus_initialized = MAX_CPUS_SET;
        }
-       mutex_unlock(&mcpus_lock);
-
+       
        if (machine_info.logical_cpu_max == 1) {
-               struct patch_up *patch_up_ptr;
-               boolean_t current_state;
-
-               patch_up_ptr = &patch_up_table[0];
+               struct patch_up *patch_up_ptr = &patch_up_table[0];
 
-               current_state = ml_set_interrupts_enabled(FALSE);
                while (patch_up_ptr->addr != NULL) {
                        /*
                         * Patch for V=R kernel text section
@@ -533,8 +526,9 @@ ml_init_max_cpus(unsigned int mcpus)
                        sync_cache64((addr64_t)((unsigned int)(patch_up_ptr->addr)),4);
                        patch_up_ptr++;
                }
-               (void) ml_set_interrupts_enabled(current_state);
        }
+       
+       (void) ml_set_interrupts_enabled(current_state);        
 }
 
 /*
@@ -544,15 +538,15 @@ ml_init_max_cpus(unsigned int mcpus)
 unsigned int
 ml_get_max_cpus(void)
 {
-       if (hw_compare_and_store(0,1,&mcpus_lock_initialized))
-               mutex_init(&mcpus_lock,0);
-       mutex_lock(&mcpus_lock);
-       if (!(mcpus_state & MAX_CPUS_SET)) {
-               mcpus_state |= MAX_CPUS_WAIT;
-               thread_sleep_mutex((event_t)&mcpus_state,
-                                        &mcpus_lock, THREAD_UNINT);
+       boolean_t current_state;
+
+       current_state = ml_set_interrupts_enabled(FALSE);
+       if (max_cpus_initialized != MAX_CPUS_SET) {
+                       max_cpus_initialized = MAX_CPUS_WAIT;
+                       assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT);
+                       (void)thread_block(THREAD_CONTINUE_NULL);
        }
-       mutex_unlock(&mcpus_lock);
+       (void) ml_set_interrupts_enabled(current_state);
        return(machine_info.max_cpus);
 }
 
@@ -754,7 +748,7 @@ ml_init_lock_timeout(void)
        nanoseconds_to_absolutetime(NSEC_PER_SEC>>2, &abstime);
        LockTimeOut = (unsigned int)abstime;
 
-       if (PE_parse_boot_arg("mtxspin", &mtxspin)) {
+       if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof (mtxspin))) {
                if (mtxspin > USEC_PER_SEC>>4)
                        mtxspin =  USEC_PER_SEC>>4;
                nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
@@ -815,3 +809,39 @@ void ml_mem_backoff(void) {
        return;
 }
 
+
+
+/*
+ * Stubs for CPU Stepper
+ */
+void
+machine_run_count(__unused uint32_t count)
+{
+}
+
+boolean_t
+machine_processor_is_inactive(__unused processor_t processor)
+{
+    return(FALSE);
+}
+
+processor_t
+machine_choose_processor(__unused processor_set_t pset, processor_t processor)
+{
+    return (processor);
+}
+
+vm_offset_t ml_stack_remaining(void)
+{
+       uintptr_t local = (uintptr_t) &local;
+
+       if (ml_at_interrupt_context()) {
+           return (local - (getPerProc()->intstack_top_ss - INTSTACK_SIZE));
+       } else {
+           return (local - current_thread()->kernel_stack);
+       }
+}
+
+boolean_t machine_timeout_suspended(void) {
+       return FALSE;
+}