]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/startup.c
xnu-1456.1.26.tar.gz
[apple/xnu.git] / osfmk / kern / startup.c
index 54596deed7aa408dbe39184b0d86069bdce27927..fb673da769519a23dba6f0cd8cc90ef759b81478 100644 (file)
@@ -1,16 +1,19 @@
 /*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
- * 
- * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ * 
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
  * 
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
@@ -20,7 +23,7 @@
  * Please see the License for the specific language governing rights and
  * limitations under the License.
  * 
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
  * any improvements or extensions that they make and grant Carnegie Mellon
  * the rights to redistribute these changes.
  */
+/*
+ * NOTICE: This file was modified by McAfee Research in 2004 to introduce
+ * support for mandatory and extensible security protections.  This notice
+ * is included in support of clause 2.2 (b) of the Apple Public License,
+ * Version 2.0.
+ */
 /*
  */
 
 #include <debug.h>
 #include <xpr_debug.h>
 #include <mach_kdp.h>
-#include <cpus.h>
-#include <mach_host.h>
 #include <norma_vm.h>
-#include <etap.h>
 
 #include <mach/boolean.h>
 #include <mach/machine.h>
+#include <mach/thread_act.h>
 #include <mach/task_special_ports.h>
 #include <mach/vm_param.h>
 #include <ipc/ipc_init.h>
 #include <kern/assert.h>
+#include <kern/mach_param.h>
 #include <kern/misc_protos.h>
 #include <kern/clock.h>
 #include <kern/cpu_number.h>
-#include <kern/etap_macros.h>
+#include <kern/ledger.h>
 #include <kern/machine.h>
 #include <kern/processor.h>
 #include <kern/sched_prim.h>
-#include <kern/mk_sp.h>
 #include <kern/startup.h>
 #include <kern/task.h>
 #include <kern/thread.h>
 #include <kern/timer.h>
-#include <kern/timer_call.h>
+#include <kern/wait_queue.h>
 #include <kern/xpr.h>
 #include <kern/zalloc.h>
+#include <kern/locks.h>
+#include <console/serial_protos.h>
 #include <vm/vm_kern.h>
 #include <vm/vm_init.h>
 #include <vm/vm_map.h>
 #include <vm/vm_object.h>
 #include <vm/vm_page.h>
 #include <vm/vm_pageout.h>
+#include <vm/vm_shared_region.h>
 #include <machine/pmap.h>
 #include <machine/commpage.h>
-#include <sys/version.h>
+#include <libkern/version.h>
+
+#if MACH_KDP
+#include <kdp/kdp.h>
+#endif
+
+#if CONFIG_MACF
+#include <security/mac_mach_internal.h>
+#endif
+
+#if CONFIG_COUNTERS
+#include <pmc/pmc.h>
+#endif
 
 #ifdef __ppc__
 #include <ppc/Firmware.h>
 #include <ppc/mappings.h>
 #endif
 
-/* Externs XXX */
-extern void    rtclock_reset(void);
+static void            kernel_bootstrap_thread(void);
+
+static void            load_context(
+                                       thread_t        thread);
+#if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
+extern void cpu_userwindow_init(int);
+extern void cpu_physwindow_init(int);
+#endif
+
+// libkern/OSKextLib.cpp
+extern void    OSKextRemoveKextBootstrap(void);
+
+void srv_setup(void);
+extern void bsd_srv_setup(int);
+extern unsigned int semaphore_max;
 
-/* Forwards */
-void           cpu_launch_first_thread(
-                       thread_t                        thread);
-void           start_kernel_threads(void);
-void        swapin_thread();
 
 /*
  *     Running in virtual memory, on the interrupt stack.
- *     Does not return.  Dispatches initial thread.
- *
- *     Assumes that master_cpu is set.
  */
+
+extern int srv;
+
 void
-setup_main(void)
+kernel_bootstrap(void)
 {
-       thread_t        startup_thread;
+       kern_return_t   result;
+       thread_t                thread;
 
-       sched_init();
+       printf("%s\n", version); /* log kernel version */
+
+#define kernel_bootstrap_kprintf(x...) /* kprintf("kernel_bootstrap: " x) */
+
+       /* i386_vm_init already checks for this ; do it aagin anyway */
+        if (PE_parse_boot_argn("srv", &srv, sizeof (srv))) {
+                srv = 1;
+        }
+
+       srv_setup();
+
+       kernel_bootstrap_kprintf("calling lck_mod_init\n");
+       lck_mod_init();
+
+       kernel_bootstrap_kprintf("calling vm_mem_bootstrap\n");
        vm_mem_bootstrap();
-       ipc_bootstrap();
+
+       kernel_bootstrap_kprintf("calling vm_mem_init\n");
        vm_mem_init();
+
+       machine_info.memory_size = (uint32_t)mem_size;
+       machine_info.max_mem = max_mem;
+       machine_info.major_version = version_major;
+       machine_info.minor_version = version_minor;
+
+       kernel_bootstrap_kprintf("calling sched_init\n");
+       sched_init();
+
+       kernel_bootstrap_kprintf("calling wait_queue_bootstrap\n");
+       wait_queue_bootstrap();
+
+       kernel_bootstrap_kprintf("calling ipc_bootstrap\n");
+       ipc_bootstrap();
+
+#if CONFIG_MACF
+       mac_policy_init();
+#endif
+       kernel_bootstrap_kprintf("calling ipc_init\n");
        ipc_init();
 
        /*
         * As soon as the virtual memory system is up, we record
         * that this CPU is using the kernel pmap.
         */
+       kernel_bootstrap_kprintf("calling PMAP_ACTIVATE_KERNEL\n");
        PMAP_ACTIVATE_KERNEL(master_cpu);
 
-#ifdef __ppc__
+       kernel_bootstrap_kprintf("calling mapping_free_prime\n");
        mapping_free_prime();                                           /* Load up with temporary mapping blocks */
-#endif
 
+       kernel_bootstrap_kprintf("calling machine_init\n");
        machine_init();
-       kmod_init();
-       clock_init();
 
-       init_timers();
-       timer_call_initialize();
+       kernel_bootstrap_kprintf("calling clock_init\n");
+       clock_init();
 
-       machine_info.max_cpus = NCPUS;
-       machine_info.memory_size = mem_size;
-       machine_info.avail_cpus = 0;
-       machine_info.major_version = KERNEL_MAJOR_VERSION;
-       machine_info.minor_version = KERNEL_MINOR_VERSION;
 
        /*
         *      Initialize the IPC, task, and thread subsystems.
         */
+       kernel_bootstrap_kprintf("calling ledger_init\n");
        ledger_init();
+
+       kernel_bootstrap_kprintf("calling task_init\n");
        task_init();
-       act_init();
-       thread_init();
 
-       /*
-        *      Initialize the Event Trace Analysis Package.
-        *      Dynamic Phase: 2 of 2
-        */
-       etap_init_phase2();
+       kernel_bootstrap_kprintf("calling thread_init\n");
+       thread_init();
        
        /*
-        *      Create a kernel thread to start the other kernel
-        *      threads.
-        */
-       startup_thread = kernel_thread_with_priority(
-                                                                               kernel_task, MAXPRI_KERNEL,
-                                                                                       start_kernel_threads, TRUE, FALSE);
-       /*
-        * Pretend it is already running.
-        *
-        * We can do this without locking, because nothing
-        * else is running yet.
+        *      Create a kernel thread to execute the kernel bootstrap.
         */
-       startup_thread->state = TH_RUN;
-       hw_atomic_add(&startup_thread->processor_set->run_count, 1);
+       kernel_bootstrap_kprintf("calling kernel_thread_create\n");
+       result = kernel_thread_create((thread_continue_t)kernel_bootstrap_thread, NULL, MAXPRI_KERNEL, &thread);
 
-       /*
-        * Start the thread.
-        */
-       cpu_launch_first_thread(startup_thread);
+       if (result != KERN_SUCCESS) panic("kernel_bootstrap: result = %08X\n", result);
+
+       thread->state = TH_RUN;
+       thread_deallocate(thread);
+
+       kernel_bootstrap_kprintf("calling load_context - done\n");
+       load_context(thread);
        /*NOTREACHED*/
-       panic("cpu_launch_first_thread returns!");
 }
 
+int kth_started = 0;
+
 /*
- * Now running in a thread.  Create the rest of the kernel threads
- * and the bootstrap task.
+ * Now running in a thread.  Kick off other services,
+ * invoke user bootstrap, enter pageout loop.
  */
-void
-start_kernel_threads(void)
+static void
+kernel_bootstrap_thread(void)
 {
-       register int                            i;
-
-       thread_bind(current_thread(), cpu_to_processor(cpu_number()));
+       processor_t             processor = current_processor();
 
+#define kernel_bootstrap_thread_kprintf(x...) /* kprintf("kernel_bootstrap_thread: " x) */
+       kernel_bootstrap_thread_kprintf("calling idle_thread_create\n");
        /*
-        *      Create the idle threads and the other
-        *      service threads.
+        * Create the idle processor thread.
         */
-       for (i = 0; i < NCPUS; i++) {
-               processor_t             processor = cpu_to_processor(i);
-               thread_t                thread;
-               spl_t                   s;
-
-               thread = kernel_thread_with_priority(
-                                                                       kernel_task, MAXPRI_KERNEL,
-                                                                                       idle_thread, TRUE, FALSE);
-               s = splsched();
-               thread_lock(thread);
-               thread_bind_locked(thread, processor);
-               processor->idle_thread = thread;
-               thread->ref_count++;
-               thread->state |= TH_IDLE;
-               thread_go_locked(thread, THREAD_AWAKENED);
-               thread_unlock(thread);
-               splx(s);
-       }
+       idle_thread_create(processor);
 
        /*
-        * Initialize the thread reaper mechanism.
-        */
-       thread_reaper_init();
-
-       /*
-        * Initialize the stack swapin mechanism.
-        */
-       swapin_init();
-
-       /*
-        * Initialize the periodic scheduler mechanism.
+        * N.B. Do not stick anything else
+        * before this point.
+        *
+        * Start up the scheduler services.
         */
-       sched_tick_init();
+       kernel_bootstrap_thread_kprintf("calling sched_startup\n");
+       sched_startup();
 
        /*
-        * Initialize the thread callout mechanism.
+        * Remain on current processor as
+        * additional processors come online.
         */
-       thread_call_initialize();
+       kernel_bootstrap_thread_kprintf("calling thread_bind\n");
+       thread_bind(processor);
 
        /*
-        * Invoke some black magic.
+        * Kick off memory mapping adjustments.
         */
-#if __ppc__
+       kernel_bootstrap_thread_kprintf("calling mapping_adjust\n");
        mapping_adjust();
-#endif
 
        /*
         *      Create the clock service.
         */
+       kernel_bootstrap_thread_kprintf("calling clock_service_create\n");
        clock_service_create();
 
        /*
@@ -259,118 +288,217 @@ start_kernel_threads(void)
         */
        device_service_create();
 
-       shared_file_boot_time_init();
+       kth_started = 1;
+
+#if MACH_KDP
+       kernel_bootstrap_kprintf("calling kdp_init\n");
+       kdp_init();
+#endif
+               
+#if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
+       /*
+        * Create and initialize the physical copy window for processor 0
+        * This is required before starting kicking off  IOKit.
+        */
+       cpu_physwindow_init(0);
+#endif
+
+#if CONFIG_COUNTERS
+       pmc_bootstrap();
+#endif
 
 #ifdef IOKIT
-       {
-               PE_init_iokit();
-       }
+       PE_init_iokit();
 #endif
        
        (void) spllo();         /* Allow interruptions */
 
-    /*
-     * Fill in the comm area (mapped into every task address space.)
-     */
-    commpage_populate();
+#if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
+       /*
+        * Create and initialize the copy window for processor 0
+        * This also allocates window space for all other processors.
+        * However, this is dependent on the number of processors - so this call
+        * must be after IOKit has been started because IOKit performs processor
+        * discovery.
+        */
+       cpu_userwindow_init(0);
+#endif
+
+       /*
+        *      Initialize the shared region module.
+        */
+       vm_shared_region_init();
+       vm_commpage_init();
+
+#if CONFIG_MACF
+       mac_policy_initmach();
+#endif
 
        /*
         *      Start the user bootstrap.
         */
-       
 #ifdef MACH_BSD
-       { 
-               extern void bsd_init(void);
-               bsd_init();
-       }
+       bsd_init();
 #endif
 
-       thread_bind(current_thread(), PROCESSOR_NULL);
+    /*
+     * Get rid of segments used to bootstrap kext loading. This removes
+     * the KLD, PRELINK symtab, LINKEDIT, and symtab segments/load commands.
+     */
+       OSKextRemoveKextBootstrap();
+
+       serial_keyboard_init();         /* Start serial keyboard if wanted */
+
+       vm_page_init_local_q();
+
+       thread_bind(PROCESSOR_NULL);
 
        /*
         *      Become the pageout daemon.
         */
-
        vm_pageout();
        /*NOTREACHED*/
 }
 
+/*
+ *     slave_main:
+ *
+ *     Load the first thread to start a processor.
+ */
 void
-slave_main(void)
+slave_main(void *machine_param)
 {
-       processor_t             myprocessor = current_processor();
+       processor_t             processor = current_processor();
        thread_t                thread;
 
-       myprocessor->cpu_data = get_cpu_data();
-       thread = myprocessor->next_thread;
-       myprocessor->next_thread = THREAD_NULL;
-       if (thread == THREAD_NULL) {
-               thread = machine_wake_thread;
-               machine_wake_thread = THREAD_NULL;
+       /*
+        *      Use the idle processor thread if there
+        *      is no dedicated start up thread.
+        */
+       if (processor->next_thread == THREAD_NULL) {
+               thread = processor->idle_thread;
+               thread->continuation = (thread_continue_t)processor_start_thread;
+               thread->parameter = machine_param;
+       }
+       else {
+               thread = processor->next_thread;
+               processor->next_thread = THREAD_NULL;
        }
-        thread_machine_set_current(thread);
-       if (thread == machine_wake_thread)
-               thread_bind(thread, myprocessor);
 
-       cpu_launch_first_thread(thread);
+       load_context(thread);
        /*NOTREACHED*/
-       panic("slave_main");
 }
 
 /*
- * Now running in a thread context
+ *     processor_start_thread:
+ *
+ *     First thread to execute on a started processor.
+ *
+ *     Called at splsched.
  */
 void
-start_cpu_thread(void)
+processor_start_thread(void *machine_param)
 {
-       processor_t     processor;
+       processor_t             processor = current_processor();
+       thread_t                self = current_thread();
 
-       processor = cpu_to_processor(cpu_number());
+       slave_machine_init(machine_param);
 
-       slave_machine_init();
-
-       if (processor->processor_self == IP_NULL) {
-               ipc_processor_init(processor);
-               ipc_processor_enable(processor);
-       }
+       /*
+        *      If running the idle processor thread,
+        *      reenter the idle loop, else terminate.
+        */
+       if (self == processor->idle_thread)
+               thread_block((thread_continue_t)idle_thread);
 
-       (void) thread_terminate(current_act());
+       thread_terminate(self);
+       /*NOTREACHED*/
 }
 
 /*
- *     Start up the first thread on a CPU.
+ *     load_context:
+ *
+ *     Start the first thread on a processor.
  */
-void
-cpu_launch_first_thread(
+static void
+load_context(
        thread_t                thread)
 {
-       register int    mycpu = cpu_number();
-       processor_t             processor = cpu_to_processor(mycpu);
+       processor_t             processor = current_processor();
 
-       processor->cpu_data->preemption_level = 0;
 
-       cpu_up(mycpu);
-       start_timer(&kernel_timer[mycpu]);
-       clock_get_uptime(&processor->last_dispatch);
+#define load_context_kprintf(x...) /* kprintf("load_context: " x) */
 
-       if (thread == THREAD_NULL || thread == processor->idle_thread)
-               panic("cpu_launch_first_thread");
+       load_context_kprintf("calling machine_set_current_thread\n");
+       machine_set_current_thread(thread);
 
-       rtclock_reset();                /* start realtime clock ticking */
-       PMAP_ACTIVATE_KERNEL(mycpu);
+       load_context_kprintf("calling processor_up\n");
+       processor_up(processor);
 
-       thread_machine_set_current(thread);
-       thread_lock(thread);
-       thread->state &= ~TH_UNINT;
-       thread->last_processor = processor;
+       PMAP_ACTIVATE_KERNEL(processor->cpu_id);
+
+       /*
+        * Acquire a stack if none attached.  The panic
+        * should never occur since the thread is expected
+        * to have reserved stack.
+        */
+       load_context_kprintf("stack %x, stackptr %x\n", 
+                            thread->kernel_stack, thread->machine.kstackptr);
+       if (!thread->kernel_stack) {
+               load_context_kprintf("calling stack_alloc_try\n");
+               if (!stack_alloc_try(thread))
+                       panic("load_context");
+       }
+
+       /*
+        * The idle processor threads are not counted as
+        * running for load calculations.
+        */
+       if (!(thread->state & TH_IDLE))
+               sched_run_incr();
+
+       processor->active_thread = thread;
        processor->current_pri = thread->sched_pri;
-       _mk_sp_thread_begin(thread, processor);
-       thread_unlock(thread);
-       timer_switch(&thread->system_timer);
+       processor->deadline = UINT64_MAX;
+       thread->last_processor = processor;
 
-       PMAP_ACTIVATE_USER(thread->top_act, mycpu);
+       processor->last_dispatch = mach_absolute_time();
+       timer_start(&thread->system_timer, processor->last_dispatch);
+       PROCESSOR_DATA(processor, thread_timer) = PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
 
-       /* preemption enabled by load_context */
-       load_context(thread);
+       timer_start(&PROCESSOR_DATA(processor, system_state), processor->last_dispatch);
+       PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, system_state);
+
+       PMAP_ACTIVATE_USER(thread, processor->cpu_id);
+
+       load_context_kprintf("calling machine_load_context\n");
+       machine_load_context(thread);
        /*NOTREACHED*/
 }
+
+void
+srv_setup()
+{
+       int scale = 0;
+#if defined(__LP64__)
+       /* if memory is more than 16G, then apply rules for processes */
+       if ((srv != 0) && ((uint64_t)sane_size >= (uint64_t)(16 * 1024 * 1024 *1024ULL))) {
+               scale = (int)((uint64_t)sane_size / (uint64_t)(8 * 1024 * 1024 *1024ULL));
+               /* limit to 128 G */
+               if (scale > 16)
+                       scale = 16;
+               task_max = 2500 * scale;
+               task_threadmax = task_max;
+               thread_max = task_max * 5;
+       } else
+               scale = 0;
+#endif
+       bsd_srv_setup(scale);
+       
+       ipc_space_max = SPACE_MAX;
+       ipc_tree_entry_max = ITE_MAX;
+       ipc_port_max = PORT_MAX;
+       ipc_pset_max = SET_MAX;
+       semaphore_max = SEMAPHORE_MAX;
+}
+