/*
- * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <debug.h>
#include <xpr_debug.h>
#include <mach_kdp.h>
-#include <norma_vm.h>
#include <mach/boolean.h>
#include <mach/machine.h>
#include <machine/pmap.h>
#include <machine/commpage.h>
#include <libkern/version.h>
+#include <sys/kdebug.h>
#if MACH_KDP
#include <kdp/kdp.h>
#include <pmc/pmc.h>
#endif
-#ifdef __ppc__
-#include <ppc/Firmware.h>
-#include <ppc/mappings.h>
-#endif
-
static void kernel_bootstrap_thread(void);
static void load_context(
// libkern/OSKextLib.cpp
extern void OSKextRemoveKextBootstrap(void);
-void srv_setup(void);
-extern void bsd_srv_setup(int);
+void scale_setup(void);
+extern void bsd_scale_setup(int);
extern unsigned int semaphore_max;
-
/*
* Running in virtual memory, on the interrupt stack.
*/
-extern int srv;
+extern int serverperfmode;
+
+/* size of kernel trace buffer, disabled by default */
+unsigned int new_nkdbufs = 0;
+
+/* mach leak logging */
+int log_leaks = 0;
+int turn_on_log_leaks = 0;
+
+
+void
+kernel_early_bootstrap(void)
+{
+
+ lck_mod_init();
+
+ /*
+ * Initialize the timer callout world
+ */
+ timer_call_initialize();
+}
+
void
kernel_bootstrap(void)
{
kern_return_t result;
- thread_t thread;
+ thread_t thread;
+ char namep[16];
printf("%s\n", version); /* log kernel version */
#define kernel_bootstrap_kprintf(x...) /* kprintf("kernel_bootstrap: " x) */
- /* i386_vm_init already checks for this ; do it aagin anyway */
- if (PE_parse_boot_argn("srv", &srv, sizeof (srv))) {
- srv = 1;
- }
+ if (PE_parse_boot_argn("-l", namep, sizeof (namep))) /* leaks logging */
+ turn_on_log_leaks = 1;
- srv_setup();
+ PE_parse_boot_argn("trace", &new_nkdbufs, sizeof (new_nkdbufs));
- kernel_bootstrap_kprintf("calling lck_mod_init\n");
- lck_mod_init();
+ /* i386_vm_init already checks for this ; do it aagin anyway */
+ if (PE_parse_boot_argn("serverperfmode", &serverperfmode, sizeof (serverperfmode))) {
+ serverperfmode = 1;
+ }
+ scale_setup();
kernel_bootstrap_kprintf("calling vm_mem_bootstrap\n");
vm_mem_bootstrap();
kernel_bootstrap_kprintf("calling clock_init\n");
clock_init();
+ ledger_init();
/*
* Initialize the IPC, task, and thread subsystems.
*/
- kernel_bootstrap_kprintf("calling ledger_init\n");
- ledger_init();
-
kernel_bootstrap_kprintf("calling task_init\n");
task_init();
int kth_started = 0;
+vm_offset_t vm_kernel_addrperm;
+
/*
* Now running in a thread. Kick off other services,
* invoke user bootstrap, enter pageout loop.
kernel_bootstrap_thread_kprintf("calling sched_startup\n");
sched_startup();
+ /*
+ * Thread lifecycle maintenance (teardown, stack allocation)
+ */
+ kernel_bootstrap_thread_kprintf("calling thread_daemon_init\n");
+ thread_daemon_init();
+
+ /*
+ * Thread callout service.
+ */
+ kernel_bootstrap_thread_kprintf("calling thread_call_initialize\n");
+ thread_call_initialize();
+
/*
* Remain on current processor as
* additional processors come online.
device_service_create();
kth_started = 1;
-
-#if MACH_KDP
- kernel_bootstrap_kprintf("calling kdp_init\n");
- kdp_init();
-#endif
#if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
/*
cpu_physwindow_init(0);
#endif
+ vm_kernel_reserved_entry_init();
+
+#if MACH_KDP
+ kernel_bootstrap_kprintf("calling kdp_init\n");
+ kdp_init();
+#endif
+
#if CONFIG_COUNTERS
pmc_bootstrap();
#endif
+#if (defined(__i386__) || defined(__x86_64__))
+ if (turn_on_log_leaks && !new_nkdbufs)
+ new_nkdbufs = 200000;
+ start_kern_tracing(new_nkdbufs);
+ if (turn_on_log_leaks)
+ log_leaks = 1;
+#endif
+
#ifdef IOKIT
PE_init_iokit();
#endif
cpu_userwindow_init(0);
#endif
+#if (!defined(__i386__) && !defined(__x86_64__))
+ if (turn_on_log_leaks && !new_nkdbufs)
+ new_nkdbufs = 200000;
+ start_kern_tracing(new_nkdbufs);
+ if (turn_on_log_leaks)
+ log_leaks = 1;
+#endif
+
/*
* Initialize the shared region module.
*/
vm_shared_region_init();
vm_commpage_init();
+ vm_commpage_text_init();
#if CONFIG_MACF
mac_policy_initmach();
#endif
+ /*
+ * Initialize the global used for permuting kernel
+ * addresses that may be exported to userland as tokens
+ * using VM_KERNEL_ADDRPERM(). Force the random number
+ * to be odd to avoid mapping a non-zero
+ * word-aligned address to zero via addition.
+ */
+ vm_kernel_addrperm = (vm_offset_t)early_random() | 1;
+
/*
* Start the user bootstrap.
*/
* should never occur since the thread is expected
* to have reserved stack.
*/
- load_context_kprintf("stack %x, stackptr %x\n",
+ load_context_kprintf("thread %p, stack %x, stackptr %x\n", thread,
thread->kernel_stack, thread->machine.kstackptr);
if (!thread->kernel_stack) {
load_context_kprintf("calling stack_alloc_try\n");
processor->active_thread = thread;
processor->current_pri = thread->sched_pri;
+ processor->current_thmode = thread->sched_mode;
processor->deadline = UINT64_MAX;
thread->last_processor = processor;
}
void
-srv_setup()
+scale_setup()
{
int scale = 0;
#if defined(__LP64__)
- /* if memory is more than 16G, then apply rules for processes */
- if ((srv != 0) && ((uint64_t)sane_size >= (uint64_t)(16 * 1024 * 1024 *1024ULL))) {
+ typeof(task_max) task_max_base = task_max;
+
+ /* Raise limits for servers with >= 16G */
+ if ((serverperfmode != 0) && ((uint64_t)sane_size >= (uint64_t)(16 * 1024 * 1024 *1024ULL))) {
scale = (int)((uint64_t)sane_size / (uint64_t)(8 * 1024 * 1024 *1024ULL));
/* limit to 128 G */
if (scale > 16)
scale = 16;
- task_max = 2500 * scale;
+ task_max_base = 2500;
+ } else if ((uint64_t)sane_size >= (uint64_t)(3 * 1024 * 1024 *1024ULL))
+ scale = 2;
+
+ task_max = MAX(task_max, task_max_base * scale);
+
+ if (scale != 0) {
task_threadmax = task_max;
- thread_max = task_max * 5;
- } else
- scale = 0;
+ thread_max = task_max * 5;
+ }
+
#endif
- bsd_srv_setup(scale);
+
+ bsd_scale_setup(scale);
ipc_space_max = SPACE_MAX;
- ipc_tree_entry_max = ITE_MAX;
ipc_port_max = PORT_MAX;
ipc_pset_max = SET_MAX;
semaphore_max = SEMAPHORE_MAX;