X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/8ad349bb6ed4a0be06e34c92be0d98b92e078db4..2dced7af2b695f87fe26496a3e73c219b7880cbc:/osfmk/kern/startup.c?ds=sidebyside diff --git a/osfmk/kern/startup.c b/osfmk/kern/startup.c index cf1e83b07..53013fa79 100644 --- a/osfmk/kern/startup.c +++ b/osfmk/kern/startup.c @@ -1,31 +1,29 @@ /* - * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_OSREFERENCE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the - * License may not be used to create, or enable the creation or - * redistribution of, unlawful or unlicensed copies of an Apple operating - * system, or to circumvent, violate, or enable the circumvention or - * violation of, any terms of an Apple operating system software license - * agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and * limitations under the License. - * - * @APPLE_LICENSE_OSREFERENCE_HEADER_END@ + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ @@ -55,6 +53,12 @@ * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ +/* + * NOTICE: This file was modified by McAfee Research in 2004 to introduce + * support for mandatory and extensible security protections. This notice + * is included in support of clause 2.2 (b) of the Apple Public License, + * Version 2.0. + */ /* */ @@ -65,8 +69,6 @@ #include #include #include -#include -#include #include #include @@ -75,96 +77,303 @@ #include #include #include +#include #include #include +#include #include #include #include #include #include +#if CONFIG_SCHED_SFI +#include +#endif #include #include #include #include +#if CONFIG_TELEMETRY +#include +#endif #include #include #include -#include +#include +#include +#include +#include #include #include #include #include #include #include +#include #include #include #include +#include +#include +#include + +#include + + +#if CONFIG_ATM +#include +#endif + +#if CONFIG_CSR +#include +#endif + +#if CONFIG_BANK +#include +#endif + +#if ALTERNATE_DEBUGGER +#include +#endif + +#if MACH_KDP +#include +#endif + +#if CONFIG_MACF +#include +#endif + +#if KPC +#include +#endif -#ifdef __ppc__ -#include -#include -#include +#if KPERF +#include #endif +#if HYPERVISOR +#include +#endif + + +#include static void kernel_bootstrap_thread(void); static void load_context( thread_t thread); +#if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0 +extern void cpu_userwindow_init(int); +extern void cpu_physwindow_init(int); +#endif + +#if CONFIG_ECC_LOGGING +#include +#endif + +#if (defined(__i386__) || defined(__x86_64__)) && CONFIG_VMX +#include +#endif + +// libkern/OSKextLib.cpp +extern void OSKextRemoveKextBootstrap(void); + +void scale_setup(void); +extern void bsd_scale_setup(int); +extern unsigned int semaphore_max; +extern void stackshot_lock_init(void); /* * Running in virtual memory, on the interrupt stack. */ + +extern int serverperfmode; + +/* size of kernel trace buffer, disabled by default */ +unsigned int new_nkdbufs = 0; +unsigned int wake_nkdbufs = 0; +unsigned int write_trace_on_panic = 0; +unsigned int trace_typefilter = 0; +boolean_t trace_serial = FALSE; + +/* mach leak logging */ +int log_leaks = 0; +int turn_on_log_leaks = 0; + +static inline void +kernel_bootstrap_log(const char *message) +{ +// kprintf("kernel_bootstrap: %s\n", message); + kernel_debug_string_simple(message); +} + +static inline void +kernel_bootstrap_thread_log(const char *message) +{ +// kprintf("kernel_bootstrap_thread: %s\n", message); + kernel_debug_string_simple(message); +} + +void +kernel_early_bootstrap(void) +{ + /* serverperfmode is needed by timer setup */ + if (PE_parse_boot_argn("serverperfmode", &serverperfmode, sizeof (serverperfmode))) { + serverperfmode = 1; + } + + lck_mod_init(); + + /* + * Initialize the timer callout world + */ + timer_call_init(); + +#if CONFIG_SCHED_SFI + /* + * Configure SFI classes + */ + sfi_early_init(); +#endif +} + +extern boolean_t IORamDiskBSDRoot(void); +extern kern_return_t cpm_preallocate_early(void); + void kernel_bootstrap(void) { kern_return_t result; - thread_t thread; + thread_t thread; + char namep[16]; - lck_mod_init(); - sched_init(); + printf("%s\n", version); /* log kernel version */ + + if (PE_parse_boot_argn("-l", namep, sizeof (namep))) /* leaks logging */ + turn_on_log_leaks = 1; + + PE_parse_boot_argn("trace", &new_nkdbufs, sizeof (new_nkdbufs)); + PE_parse_boot_argn("trace_wake", &wake_nkdbufs, sizeof (wake_nkdbufs)); + PE_parse_boot_argn("trace_panic", &write_trace_on_panic, sizeof(write_trace_on_panic)); + PE_parse_boot_argn("trace_typefilter", &trace_typefilter, sizeof(trace_typefilter)); + + scale_setup(); + + kernel_bootstrap_log("vm_mem_bootstrap"); vm_mem_bootstrap(); - ipc_bootstrap(); + + kernel_bootstrap_log("cs_init"); + cs_init(); + + kernel_bootstrap_log("vm_mem_init"); vm_mem_init(); + + machine_info.memory_size = (uint32_t)mem_size; + machine_info.max_mem = max_mem; + machine_info.major_version = version_major; + machine_info.minor_version = version_minor; + + +#if CONFIG_TELEMETRY + kernel_bootstrap_log("telemetry_init"); + telemetry_init(); +#endif + +#if CONFIG_CSR + kernel_bootstrap_log("csr_init"); + csr_init(); +#endif + + kernel_bootstrap_log("stackshot_lock_init"); + stackshot_lock_init(); + + kernel_bootstrap_log("sched_init"); + sched_init(); + + kernel_bootstrap_log("waitq_bootstrap"); + waitq_bootstrap(); + + kernel_bootstrap_log("ipc_bootstrap"); + ipc_bootstrap(); + +#if CONFIG_MACF + kernel_bootstrap_log("mac_policy_init"); + mac_policy_init(); +#endif + + kernel_bootstrap_log("ipc_init"); ipc_init(); /* * As soon as the virtual memory system is up, we record * that this CPU is using the kernel pmap. */ + kernel_bootstrap_log("PMAP_ACTIVATE_KERNEL"); PMAP_ACTIVATE_KERNEL(master_cpu); + kernel_bootstrap_log("mapping_free_prime"); mapping_free_prime(); /* Load up with temporary mapping blocks */ + kernel_bootstrap_log("machine_init"); machine_init(); - kmod_init(); + + kernel_bootstrap_log("clock_init"); clock_init(); - machine_info.memory_size = mem_size; - machine_info.max_mem = max_mem; - machine_info.major_version = version_major; - machine_info.minor_version = version_minor; + ledger_init(); /* * Initialize the IPC, task, and thread subsystems. */ - ledger_init(); +#if CONFIG_COALITIONS + kernel_bootstrap_log("coalitions_init"); + coalitions_init(); +#endif + + kernel_bootstrap_log("task_init"); task_init(); + + kernel_bootstrap_log("thread_init"); thread_init(); + +#if CONFIG_ATM + /* Initialize the Activity Trace Resource Manager. */ + kernel_bootstrap_log("atm_init"); + atm_init(); +#endif + +#if CONFIG_BANK + /* Initialize the BANK Manager. */ + kernel_bootstrap_log("bank_init"); + bank_init(); +#endif + /* initialize the corpse config based on boot-args */ + corpses_init(); + /* * Create a kernel thread to execute the kernel bootstrap. */ + kernel_bootstrap_log("kernel_thread_create"); result = kernel_thread_create((thread_continue_t)kernel_bootstrap_thread, NULL, MAXPRI_KERNEL, &thread); - if (result != KERN_SUCCESS) - panic("kernel_bootstrap"); + + if (result != KERN_SUCCESS) panic("kernel_bootstrap: result = %08X\n", result); thread->state = TH_RUN; + thread->last_made_runnable_time = mach_absolute_time(); thread_deallocate(thread); + kernel_bootstrap_log("load_context - done"); load_context(thread); /*NOTREACHED*/ } +int kth_started = 0; + +vm_offset_t vm_kernel_addrperm; +vm_offset_t buf_kernel_addrperm; +vm_offset_t vm_kernel_addrperm_ext; + /* * Now running in a thread. Kick off other services, * invoke user bootstrap, enter pageout loop. @@ -173,8 +382,9 @@ static void kernel_bootstrap_thread(void) { processor_t processor = current_processor(); - thread_t self = current_thread(); +#define kernel_bootstrap_thread_kprintf(x...) /* kprintf("kernel_bootstrap_thread: " x) */ + kernel_bootstrap_thread_log("idle_thread_create"); /* * Create the idle processor thread. */ @@ -186,22 +396,47 @@ kernel_bootstrap_thread(void) * * Start up the scheduler services. */ + kernel_bootstrap_thread_log("sched_startup"); sched_startup(); + /* + * Thread lifecycle maintenance (teardown, stack allocation) + */ + kernel_bootstrap_thread_log("thread_daemon_init"); + thread_daemon_init(); + + /* Create kernel map entry reserve */ + vm_kernel_reserved_entry_init(); + + /* + * Thread callout service. + */ + kernel_bootstrap_thread_log("thread_call_initialize"); + thread_call_initialize(); + /* * Remain on current processor as * additional processors come online. */ - thread_bind(self, processor); + kernel_bootstrap_thread_log("thread_bind"); + thread_bind(processor); + + /* + * Initialize ipc thread call support. + */ + kernel_bootstrap_thread_log("ipc_thread_call_init"); + ipc_thread_call_init(); /* * Kick off memory mapping adjustments. */ + kernel_bootstrap_thread_log("mapping_adjust"); mapping_adjust(); /* * Create the clock service. */ + kernel_bootstrap_thread_log("clock_service_create"); clock_service_create(); /* @@ -209,35 +444,159 @@ kernel_bootstrap_thread(void) */ device_service_create(); - shared_file_boot_time_init(ENV_DEFAULT_ROOT, cpu_type()); + kth_started = 1; + +#if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0 + /* + * Create and initialize the physical copy window for processor 0 + * This is required before starting kicking off IOKit. + */ + cpu_physwindow_init(0); +#endif -#ifdef IOKIT - { - PE_init_iokit(); + + +#if MACH_KDP + kernel_bootstrap_log("kdp_init"); + kdp_init(); +#endif + +#if ALTERNATE_DEBUGGER + alternate_debugger_init(); +#endif + +#if KPC + kpc_init(); +#endif + +#if CONFIG_ECC_LOGGING + ecc_log_init(); +#endif + +#if KPERF + kperf_bootstrap(); +#endif + +#if HYPERVISOR + hv_support_init(); +#endif + +#if CONFIG_TELEMETRY + kernel_bootstrap_log("bootprofile_init"); + bootprofile_init(); +#endif + +#if (defined(__i386__) || defined(__x86_64__)) && CONFIG_VMX + vmx_init(); +#endif + +#if (defined(__i386__) || defined(__x86_64__)) + if (kdebug_serial) { + new_nkdbufs = 1; + if (trace_typefilter == 0) + trace_typefilter = 1; } + if (turn_on_log_leaks && !new_nkdbufs) + new_nkdbufs = 200000; + if (trace_typefilter) + start_kern_tracing_with_typefilter(new_nkdbufs, + FALSE, + trace_typefilter); + else + start_kern_tracing(new_nkdbufs, FALSE); + if (turn_on_log_leaks) + log_leaks = 1; + #endif - + + kernel_bootstrap_log("prng_init"); + prng_cpu_init(master_cpu); + +#ifdef IOKIT + PE_init_iokit(); +#endif + + assert(ml_get_interrupts_enabled() == FALSE); (void) spllo(); /* Allow interruptions */ - /* - * Fill in the comm area (mapped into every task address space.) - */ - commpage_populate(); +#if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0 + /* + * Create and initialize the copy window for processor 0 + * This also allocates window space for all other processors. + * However, this is dependent on the number of processors - so this call + * must be after IOKit has been started because IOKit performs processor + * discovery. + */ + cpu_userwindow_init(0); +#endif + +#if (!defined(__i386__) && !defined(__x86_64__)) + if (turn_on_log_leaks && !new_nkdbufs) + new_nkdbufs = 200000; + if (trace_typefilter) + start_kern_tracing_with_typefilter(new_nkdbufs, FALSE, trace_typefilter); + else + start_kern_tracing(new_nkdbufs, FALSE); + if (turn_on_log_leaks) + log_leaks = 1; +#endif + + /* + * Initialize the shared region module. + */ + vm_shared_region_init(); + vm_commpage_init(); + vm_commpage_text_init(); + + +#if CONFIG_MACF + kernel_bootstrap_log("mac_policy_initmach"); + mac_policy_initmach(); +#endif + +#if CONFIG_SCHED_SFI + kernel_bootstrap_log("sfi_init"); + sfi_init(); +#endif + + /* + * Initialize the globals used for permuting kernel + * addresses that may be exported to userland as tokens + * using VM_KERNEL_ADDRPERM()/VM_KERNEL_ADDRPERM_EXTERNAL(). + * Force the random number to be odd to avoid mapping a non-zero + * word-aligned address to zero via addition. + * Note: at this stage we can use the cryptographically secure PRNG + * rather than early_random(). + */ + read_random(&vm_kernel_addrperm, sizeof(vm_kernel_addrperm)); + vm_kernel_addrperm |= 1; + read_random(&buf_kernel_addrperm, sizeof(buf_kernel_addrperm)); + buf_kernel_addrperm |= 1; + read_random(&vm_kernel_addrperm_ext, sizeof(vm_kernel_addrperm_ext)); + vm_kernel_addrperm_ext |= 1; + + vm_set_restrictions(); + + /* * Start the user bootstrap. */ #ifdef MACH_BSD - { - bsd_init(); - } + bsd_init(); #endif -#if __ppc__ + /* + * Get rid of segments used to bootstrap kext loading. This removes + * the KLD, PRELINK symtab, LINKEDIT, and symtab segments/load commands. + */ + OSKextRemoveKextBootstrap(); + serial_keyboard_init(); /* Start serial keyboard if wanted */ -#endif - thread_bind(self, PROCESSOR_NULL); + vm_page_init_local_q(); + + thread_bind(PROCESSOR_NULL); /* * Become the pageout daemon. @@ -252,7 +611,7 @@ kernel_bootstrap_thread(void) * Load the first thread to start a processor. */ void -slave_main(void) +slave_main(void *machine_param) { processor_t processor = current_processor(); thread_t thread; @@ -264,7 +623,7 @@ slave_main(void) if (processor->next_thread == THREAD_NULL) { thread = processor->idle_thread; thread->continuation = (thread_continue_t)processor_start_thread; - thread->parameter = NULL; + thread->parameter = machine_param; } else { thread = processor->next_thread; @@ -283,12 +642,12 @@ slave_main(void) * Called at splsched. */ void -processor_start_thread(void) +processor_start_thread(void *machine_param) { processor_t processor = current_processor(); thread_t self = current_thread(); - slave_machine_init(); + slave_machine_init(machine_param); /* * If running the idle processor thread, @@ -312,17 +671,26 @@ load_context( { processor_t processor = current_processor(); + +#define load_context_kprintf(x...) /* kprintf("load_context: " x) */ + + load_context_kprintf("machine_set_current_thread\n"); machine_set_current_thread(thread); + + load_context_kprintf("processor_up\n"); processor_up(processor); - PMAP_ACTIVATE_KERNEL(PROCESSOR_DATA(processor, slot_num)); + PMAP_ACTIVATE_KERNEL(processor->cpu_id); /* * Acquire a stack if none attached. The panic * should never occur since the thread is expected * to have reserved stack. */ + load_context_kprintf("thread %p, stack %lx, stackptr %lx\n", thread, + thread->kernel_stack, thread->machine.kstackptr); if (!thread->kernel_stack) { + load_context_kprintf("stack_alloc_try\n"); if (!stack_alloc_try(thread)) panic("load_context"); } @@ -332,19 +700,59 @@ load_context( * running for load calculations. */ if (!(thread->state & TH_IDLE)) - pset_run_incr(thread->processor_set); + sched_run_incr(thread); processor->active_thread = thread; processor->current_pri = thread->sched_pri; + processor->current_thmode = thread->sched_mode; processor->deadline = UINT64_MAX; thread->last_processor = processor; processor->last_dispatch = mach_absolute_time(); - timer_switch((uint32_t)processor->last_dispatch, - &PROCESSOR_DATA(processor, offline_timer)); + timer_start(&thread->system_timer, processor->last_dispatch); + PROCESSOR_DATA(processor, thread_timer) = PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer; - PMAP_ACTIVATE_USER(thread, PROCESSOR_DATA(processor, slot_num)); + timer_start(&PROCESSOR_DATA(processor, system_state), processor->last_dispatch); + PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, system_state); + PMAP_ACTIVATE_USER(thread, processor->cpu_id); + + load_context_kprintf("machine_load_context\n"); machine_load_context(thread); /*NOTREACHED*/ } + +void +scale_setup() +{ + int scale = 0; +#if defined(__LP64__) + typeof(task_max) task_max_base = task_max; + + /* Raise limits for servers with >= 16G */ + if ((serverperfmode != 0) && ((uint64_t)sane_size >= (uint64_t)(16 * 1024 * 1024 *1024ULL))) { + scale = (int)((uint64_t)sane_size / (uint64_t)(8 * 1024 * 1024 *1024ULL)); + /* limit to 128 G */ + if (scale > 16) + scale = 16; + task_max_base = 2500; + } else if ((uint64_t)sane_size >= (uint64_t)(3 * 1024 * 1024 *1024ULL)) + scale = 2; + + task_max = MAX(task_max, task_max_base * scale); + + if (scale != 0) { + task_threadmax = task_max; + thread_max = task_max * 5; + } + +#endif + + bsd_scale_setup(scale); + + ipc_space_max = SPACE_MAX; + ipc_port_max = PORT_MAX; + ipc_pset_max = SET_MAX; + semaphore_max = SEMAPHORE_MAX; +} +