X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/6d2010ae8f7a6078e10b361c6962983bab233e0f..2dced7af2b695f87fe26496a3e73c219b7880cbc:/osfmk/kern/startup.c diff --git a/osfmk/kern/startup.c b/osfmk/kern/startup.c index 47290e3d8..53013fa79 100644 --- a/osfmk/kern/startup.c +++ b/osfmk/kern/startup.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2009 Apple Inc. All rights reserved. + * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -69,7 +69,6 @@ #include #include #include -#include #include #include @@ -81,19 +80,28 @@ #include #include #include +#include #include #include #include #include #include +#if CONFIG_SCHED_SFI +#include +#endif #include #include #include #include -#include +#if CONFIG_TELEMETRY +#include +#endif #include #include #include +#include +#include +#include #include #include #include @@ -105,7 +113,28 @@ #include #include #include +#include #include +#include + +#include + + +#if CONFIG_ATM +#include +#endif + +#if CONFIG_CSR +#include +#endif + +#if CONFIG_BANK +#include +#endif + +#if ALTERNATE_DEBUGGER +#include +#endif #if MACH_KDP #include @@ -115,10 +144,20 @@ #include #endif -#if CONFIG_COUNTERS -#include +#if KPC +#include #endif +#if KPERF +#include +#endif + +#if HYPERVISOR +#include +#endif + + +#include static void kernel_bootstrap_thread(void); static void load_context( @@ -128,12 +167,21 @@ extern void cpu_userwindow_init(int); extern void cpu_physwindow_init(int); #endif +#if CONFIG_ECC_LOGGING +#include +#endif + +#if (defined(__i386__) || defined(__x86_64__)) && CONFIG_VMX +#include +#endif + // libkern/OSKextLib.cpp extern void OSKextRemoveKextBootstrap(void); void scale_setup(void); extern void bsd_scale_setup(int); extern unsigned int semaphore_max; +extern void stackshot_lock_init(void); /* * Running in virtual memory, on the interrupt stack. @@ -143,24 +191,54 @@ extern int serverperfmode; /* size of kernel trace buffer, disabled by default */ unsigned int new_nkdbufs = 0; +unsigned int wake_nkdbufs = 0; +unsigned int write_trace_on_panic = 0; +unsigned int trace_typefilter = 0; +boolean_t trace_serial = FALSE; /* mach leak logging */ int log_leaks = 0; int turn_on_log_leaks = 0; +static inline void +kernel_bootstrap_log(const char *message) +{ +// kprintf("kernel_bootstrap: %s\n", message); + kernel_debug_string_simple(message); +} + +static inline void +kernel_bootstrap_thread_log(const char *message) +{ +// kprintf("kernel_bootstrap_thread: %s\n", message); + kernel_debug_string_simple(message); +} void kernel_early_bootstrap(void) { + /* serverperfmode is needed by timer setup */ + if (PE_parse_boot_argn("serverperfmode", &serverperfmode, sizeof (serverperfmode))) { + serverperfmode = 1; + } lck_mod_init(); /* * Initialize the timer callout world */ - timer_call_initialize(); + timer_call_init(); + +#if CONFIG_SCHED_SFI + /* + * Configure SFI classes + */ + sfi_early_init(); +#endif } +extern boolean_t IORamDiskBSDRoot(void); +extern kern_return_t cpm_preallocate_early(void); void kernel_bootstrap(void) @@ -171,23 +249,23 @@ kernel_bootstrap(void) printf("%s\n", version); /* log kernel version */ -#define kernel_bootstrap_kprintf(x...) /* kprintf("kernel_bootstrap: " x) */ - if (PE_parse_boot_argn("-l", namep, sizeof (namep))) /* leaks logging */ turn_on_log_leaks = 1; PE_parse_boot_argn("trace", &new_nkdbufs, sizeof (new_nkdbufs)); + PE_parse_boot_argn("trace_wake", &wake_nkdbufs, sizeof (wake_nkdbufs)); + PE_parse_boot_argn("trace_panic", &write_trace_on_panic, sizeof(write_trace_on_panic)); + PE_parse_boot_argn("trace_typefilter", &trace_typefilter, sizeof(trace_typefilter)); - /* i386_vm_init already checks for this ; do it aagin anyway */ - if (PE_parse_boot_argn("serverperfmode", &serverperfmode, sizeof (serverperfmode))) { - serverperfmode = 1; - } scale_setup(); - kernel_bootstrap_kprintf("calling vm_mem_bootstrap\n"); + kernel_bootstrap_log("vm_mem_bootstrap"); vm_mem_bootstrap(); - kernel_bootstrap_kprintf("calling vm_mem_init\n"); + kernel_bootstrap_log("cs_init"); + cs_init(); + + kernel_bootstrap_log("vm_mem_init"); vm_mem_init(); machine_info.memory_size = (uint32_t)mem_size; @@ -195,75 +273,107 @@ kernel_bootstrap(void) machine_info.major_version = version_major; machine_info.minor_version = version_minor; - kernel_bootstrap_kprintf("calling sched_init\n"); + +#if CONFIG_TELEMETRY + kernel_bootstrap_log("telemetry_init"); + telemetry_init(); +#endif + +#if CONFIG_CSR + kernel_bootstrap_log("csr_init"); + csr_init(); +#endif + + kernel_bootstrap_log("stackshot_lock_init"); + stackshot_lock_init(); + + kernel_bootstrap_log("sched_init"); sched_init(); - kernel_bootstrap_kprintf("calling wait_queue_bootstrap\n"); - wait_queue_bootstrap(); + kernel_bootstrap_log("waitq_bootstrap"); + waitq_bootstrap(); - kernel_bootstrap_kprintf("calling ipc_bootstrap\n"); + kernel_bootstrap_log("ipc_bootstrap"); ipc_bootstrap(); #if CONFIG_MACF + kernel_bootstrap_log("mac_policy_init"); mac_policy_init(); #endif - kernel_bootstrap_kprintf("calling ipc_init\n"); + + kernel_bootstrap_log("ipc_init"); ipc_init(); /* * As soon as the virtual memory system is up, we record * that this CPU is using the kernel pmap. */ - kernel_bootstrap_kprintf("calling PMAP_ACTIVATE_KERNEL\n"); + kernel_bootstrap_log("PMAP_ACTIVATE_KERNEL"); PMAP_ACTIVATE_KERNEL(master_cpu); - kernel_bootstrap_kprintf("calling mapping_free_prime\n"); + kernel_bootstrap_log("mapping_free_prime"); mapping_free_prime(); /* Load up with temporary mapping blocks */ - kernel_bootstrap_kprintf("calling machine_init\n"); + kernel_bootstrap_log("machine_init"); machine_init(); - kernel_bootstrap_kprintf("calling clock_init\n"); + kernel_bootstrap_log("clock_init"); clock_init(); + ledger_init(); /* * Initialize the IPC, task, and thread subsystems. */ - kernel_bootstrap_kprintf("calling ledger_init\n"); - ledger_init(); +#if CONFIG_COALITIONS + kernel_bootstrap_log("coalitions_init"); + coalitions_init(); +#endif - kernel_bootstrap_kprintf("calling task_init\n"); + kernel_bootstrap_log("task_init"); task_init(); - kernel_bootstrap_kprintf("calling thread_init\n"); + kernel_bootstrap_log("thread_init"); thread_init(); + +#if CONFIG_ATM + /* Initialize the Activity Trace Resource Manager. */ + kernel_bootstrap_log("atm_init"); + atm_init(); +#endif + +#if CONFIG_BANK + /* Initialize the BANK Manager. */ + kernel_bootstrap_log("bank_init"); + bank_init(); +#endif + /* initialize the corpse config based on boot-args */ + corpses_init(); + /* * Create a kernel thread to execute the kernel bootstrap. */ - kernel_bootstrap_kprintf("calling kernel_thread_create\n"); + kernel_bootstrap_log("kernel_thread_create"); result = kernel_thread_create((thread_continue_t)kernel_bootstrap_thread, NULL, MAXPRI_KERNEL, &thread); if (result != KERN_SUCCESS) panic("kernel_bootstrap: result = %08X\n", result); thread->state = TH_RUN; + thread->last_made_runnable_time = mach_absolute_time(); thread_deallocate(thread); - /* transfer statistics from init thread to kernel */ - thread_t init_thread = current_thread(); - kernel_task->tkm_private.alloc = init_thread->tkm_private.alloc; - kernel_task->tkm_private.free = init_thread->tkm_private.free; - kernel_task->tkm_shared.alloc = init_thread->tkm_shared.alloc; - kernel_task->tkm_shared.free = init_thread->tkm_shared.free; - - kernel_bootstrap_kprintf("calling load_context - done\n"); + kernel_bootstrap_log("load_context - done"); load_context(thread); /*NOTREACHED*/ } int kth_started = 0; +vm_offset_t vm_kernel_addrperm; +vm_offset_t buf_kernel_addrperm; +vm_offset_t vm_kernel_addrperm_ext; + /* * Now running in a thread. Kick off other services, * invoke user bootstrap, enter pageout loop. @@ -274,7 +384,7 @@ kernel_bootstrap_thread(void) processor_t processor = current_processor(); #define kernel_bootstrap_thread_kprintf(x...) /* kprintf("kernel_bootstrap_thread: " x) */ - kernel_bootstrap_thread_kprintf("calling idle_thread_create\n"); + kernel_bootstrap_thread_log("idle_thread_create"); /* * Create the idle processor thread. */ @@ -286,38 +396,47 @@ kernel_bootstrap_thread(void) * * Start up the scheduler services. */ - kernel_bootstrap_thread_kprintf("calling sched_startup\n"); + kernel_bootstrap_thread_log("sched_startup"); sched_startup(); /* * Thread lifecycle maintenance (teardown, stack allocation) */ - kernel_bootstrap_thread_kprintf("calling thread_daemon_init\n"); + kernel_bootstrap_thread_log("thread_daemon_init"); thread_daemon_init(); - + + /* Create kernel map entry reserve */ + vm_kernel_reserved_entry_init(); + /* * Thread callout service. */ - kernel_bootstrap_thread_kprintf("calling thread_call_initialize\n"); + kernel_bootstrap_thread_log("thread_call_initialize"); thread_call_initialize(); - + /* * Remain on current processor as * additional processors come online. */ - kernel_bootstrap_thread_kprintf("calling thread_bind\n"); + kernel_bootstrap_thread_log("thread_bind"); thread_bind(processor); + /* + * Initialize ipc thread call support. + */ + kernel_bootstrap_thread_log("ipc_thread_call_init"); + ipc_thread_call_init(); + /* * Kick off memory mapping adjustments. */ - kernel_bootstrap_thread_kprintf("calling mapping_adjust\n"); + kernel_bootstrap_thread_log("mapping_adjust"); mapping_adjust(); /* * Create the clock service. */ - kernel_bootstrap_thread_kprintf("calling clock_service_create\n"); + kernel_bootstrap_thread_log("clock_service_create"); clock_service_create(); /* @@ -326,11 +445,6 @@ kernel_bootstrap_thread(void) device_service_create(); kth_started = 1; - -#if MACH_KDP - kernel_bootstrap_kprintf("calling kdp_init\n"); - kdp_init(); -#endif #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0 /* @@ -340,22 +454,69 @@ kernel_bootstrap_thread(void) cpu_physwindow_init(0); #endif -#if CONFIG_COUNTERS - pmc_bootstrap(); + + +#if MACH_KDP + kernel_bootstrap_log("kdp_init"); + kdp_init(); +#endif + +#if ALTERNATE_DEBUGGER + alternate_debugger_init(); +#endif + +#if KPC + kpc_init(); +#endif + +#if CONFIG_ECC_LOGGING + ecc_log_init(); +#endif + +#if KPERF + kperf_bootstrap(); +#endif + +#if HYPERVISOR + hv_support_init(); +#endif + +#if CONFIG_TELEMETRY + kernel_bootstrap_log("bootprofile_init"); + bootprofile_init(); +#endif + +#if (defined(__i386__) || defined(__x86_64__)) && CONFIG_VMX + vmx_init(); #endif #if (defined(__i386__) || defined(__x86_64__)) + if (kdebug_serial) { + new_nkdbufs = 1; + if (trace_typefilter == 0) + trace_typefilter = 1; + } if (turn_on_log_leaks && !new_nkdbufs) new_nkdbufs = 200000; - start_kern_tracing(new_nkdbufs); + if (trace_typefilter) + start_kern_tracing_with_typefilter(new_nkdbufs, + FALSE, + trace_typefilter); + else + start_kern_tracing(new_nkdbufs, FALSE); if (turn_on_log_leaks) log_leaks = 1; + #endif + kernel_bootstrap_log("prng_init"); + prng_cpu_init(master_cpu); + #ifdef IOKIT PE_init_iokit(); #endif - + + assert(ml_get_interrupts_enabled() == FALSE); (void) spllo(); /* Allow interruptions */ #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0 @@ -372,7 +533,10 @@ kernel_bootstrap_thread(void) #if (!defined(__i386__) && !defined(__x86_64__)) if (turn_on_log_leaks && !new_nkdbufs) new_nkdbufs = 200000; - start_kern_tracing(new_nkdbufs); + if (trace_typefilter) + start_kern_tracing_with_typefilter(new_nkdbufs, FALSE, trace_typefilter); + else + start_kern_tracing(new_nkdbufs, FALSE); if (turn_on_log_leaks) log_leaks = 1; #endif @@ -382,11 +546,39 @@ kernel_bootstrap_thread(void) */ vm_shared_region_init(); vm_commpage_init(); + vm_commpage_text_init(); + #if CONFIG_MACF + kernel_bootstrap_log("mac_policy_initmach"); mac_policy_initmach(); #endif +#if CONFIG_SCHED_SFI + kernel_bootstrap_log("sfi_init"); + sfi_init(); +#endif + + /* + * Initialize the globals used for permuting kernel + * addresses that may be exported to userland as tokens + * using VM_KERNEL_ADDRPERM()/VM_KERNEL_ADDRPERM_EXTERNAL(). + * Force the random number to be odd to avoid mapping a non-zero + * word-aligned address to zero via addition. + * Note: at this stage we can use the cryptographically secure PRNG + * rather than early_random(). + */ + read_random(&vm_kernel_addrperm, sizeof(vm_kernel_addrperm)); + vm_kernel_addrperm |= 1; + read_random(&buf_kernel_addrperm, sizeof(buf_kernel_addrperm)); + buf_kernel_addrperm |= 1; + read_random(&vm_kernel_addrperm_ext, sizeof(vm_kernel_addrperm_ext)); + vm_kernel_addrperm_ext |= 1; + + vm_set_restrictions(); + + + /* * Start the user bootstrap. */ @@ -482,10 +674,10 @@ load_context( #define load_context_kprintf(x...) /* kprintf("load_context: " x) */ - load_context_kprintf("calling machine_set_current_thread\n"); + load_context_kprintf("machine_set_current_thread\n"); machine_set_current_thread(thread); - load_context_kprintf("calling processor_up\n"); + load_context_kprintf("processor_up\n"); processor_up(processor); PMAP_ACTIVATE_KERNEL(processor->cpu_id); @@ -495,10 +687,10 @@ load_context( * should never occur since the thread is expected * to have reserved stack. */ - load_context_kprintf("stack %x, stackptr %x\n", + load_context_kprintf("thread %p, stack %lx, stackptr %lx\n", thread, thread->kernel_stack, thread->machine.kstackptr); if (!thread->kernel_stack) { - load_context_kprintf("calling stack_alloc_try\n"); + load_context_kprintf("stack_alloc_try\n"); if (!stack_alloc_try(thread)) panic("load_context"); } @@ -508,7 +700,7 @@ load_context( * running for load calculations. */ if (!(thread->state & TH_IDLE)) - sched_run_incr(); + sched_run_incr(thread); processor->active_thread = thread; processor->current_pri = thread->sched_pri; @@ -525,7 +717,7 @@ load_context( PMAP_ACTIVATE_USER(thread, processor->cpu_id); - load_context_kprintf("calling machine_load_context\n"); + load_context_kprintf("machine_load_context\n"); machine_load_context(thread); /*NOTREACHED*/ } @@ -559,7 +751,6 @@ scale_setup() bsd_scale_setup(scale); ipc_space_max = SPACE_MAX; - ipc_tree_entry_max = ITE_MAX; ipc_port_max = PORT_MAX; ipc_pset_max = SET_MAX; semaphore_max = SEMAPHORE_MAX;