]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/startup.c
xnu-3247.10.11.tar.gz
[apple/xnu.git] / osfmk / kern / startup.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64
65 /*
66 * Mach kernel startup.
67 */
68
69 #include <debug.h>
70 #include <xpr_debug.h>
71 #include <mach_kdp.h>
72
73 #include <mach/boolean.h>
74 #include <mach/machine.h>
75 #include <mach/thread_act.h>
76 #include <mach/task_special_ports.h>
77 #include <mach/vm_param.h>
78 #include <ipc/ipc_init.h>
79 #include <kern/assert.h>
80 #include <kern/mach_param.h>
81 #include <kern/misc_protos.h>
82 #include <kern/clock.h>
83 #include <kern/coalition.h>
84 #include <kern/cpu_number.h>
85 #include <kern/ledger.h>
86 #include <kern/machine.h>
87 #include <kern/processor.h>
88 #include <kern/sched_prim.h>
89 #if CONFIG_SCHED_SFI
90 #include <kern/sfi.h>
91 #endif
92 #include <kern/startup.h>
93 #include <kern/task.h>
94 #include <kern/thread.h>
95 #include <kern/timer.h>
96 #if CONFIG_TELEMETRY
97 #include <kern/telemetry.h>
98 #endif
99 #include <kern/xpr.h>
100 #include <kern/zalloc.h>
101 #include <kern/locks.h>
102 #include <kern/debug.h>
103 #include <corpses/task_corpse.h>
104 #include <prng/random.h>
105 #include <console/serial_protos.h>
106 #include <vm/vm_kern.h>
107 #include <vm/vm_init.h>
108 #include <vm/vm_map.h>
109 #include <vm/vm_object.h>
110 #include <vm/vm_page.h>
111 #include <vm/vm_pageout.h>
112 #include <vm/vm_shared_region.h>
113 #include <machine/pmap.h>
114 #include <machine/commpage.h>
115 #include <libkern/version.h>
116 #include <sys/codesign.h>
117 #include <sys/kdebug.h>
118 #include <sys/random.h>
119
120 #include <kern/waitq.h>
121
122
123 #if CONFIG_ATM
124 #include <atm/atm_internal.h>
125 #endif
126
127 #if CONFIG_CSR
128 #include <sys/csr.h>
129 #endif
130
131 #if CONFIG_BANK
132 #include <bank/bank_internal.h>
133 #endif
134
135 #if ALTERNATE_DEBUGGER
136 #include <arm64/alternate_debugger.h>
137 #endif
138
139 #if MACH_KDP
140 #include <kdp/kdp.h>
141 #endif
142
143 #if CONFIG_MACF
144 #include <security/mac_mach_internal.h>
145 #endif
146
147 #if KPC
148 #include <kern/kpc.h>
149 #endif
150
151 #if KPERF
152 #include <kperf/kperf.h>
153 #endif
154
155 #if HYPERVISOR
156 #include <kern/hv_support.h>
157 #endif
158
159
160 #include <i386/pmCPU.h>
161 static void kernel_bootstrap_thread(void);
162
163 static void load_context(
164 thread_t thread);
165 #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
166 extern void cpu_userwindow_init(int);
167 extern void cpu_physwindow_init(int);
168 #endif
169
170 #if CONFIG_ECC_LOGGING
171 #include <kern/ecc.h>
172 #endif
173
174 #if (defined(__i386__) || defined(__x86_64__)) && CONFIG_VMX
175 #include <i386/vmx/vmx_cpu.h>
176 #endif
177
178 // libkern/OSKextLib.cpp
179 extern void OSKextRemoveKextBootstrap(void);
180
181 void scale_setup(void);
182 extern void bsd_scale_setup(int);
183 extern unsigned int semaphore_max;
184 extern void stackshot_lock_init(void);
185
186 /*
187 * Running in virtual memory, on the interrupt stack.
188 */
189
190 extern int serverperfmode;
191
192 /* size of kernel trace buffer, disabled by default */
193 unsigned int new_nkdbufs = 0;
194 unsigned int wake_nkdbufs = 0;
195 unsigned int write_trace_on_panic = 0;
196 unsigned int trace_typefilter = 0;
197 boolean_t trace_serial = FALSE;
198
199 /* mach leak logging */
200 int log_leaks = 0;
201 int turn_on_log_leaks = 0;
202
203 static inline void
204 kernel_bootstrap_log(const char *message)
205 {
206 // kprintf("kernel_bootstrap: %s\n", message);
207 kernel_debug_string_simple(message);
208 }
209
210 static inline void
211 kernel_bootstrap_thread_log(const char *message)
212 {
213 // kprintf("kernel_bootstrap_thread: %s\n", message);
214 kernel_debug_string_simple(message);
215 }
216
217 void
218 kernel_early_bootstrap(void)
219 {
220 /* serverperfmode is needed by timer setup */
221 if (PE_parse_boot_argn("serverperfmode", &serverperfmode, sizeof (serverperfmode))) {
222 serverperfmode = 1;
223 }
224
225 lck_mod_init();
226
227 /*
228 * Initialize the timer callout world
229 */
230 timer_call_init();
231
232 #if CONFIG_SCHED_SFI
233 /*
234 * Configure SFI classes
235 */
236 sfi_early_init();
237 #endif
238 }
239
240 extern boolean_t IORamDiskBSDRoot(void);
241 extern kern_return_t cpm_preallocate_early(void);
242
243 void
244 kernel_bootstrap(void)
245 {
246 kern_return_t result;
247 thread_t thread;
248 char namep[16];
249
250 printf("%s\n", version); /* log kernel version */
251
252 if (PE_parse_boot_argn("-l", namep, sizeof (namep))) /* leaks logging */
253 turn_on_log_leaks = 1;
254
255 PE_parse_boot_argn("trace", &new_nkdbufs, sizeof (new_nkdbufs));
256 PE_parse_boot_argn("trace_wake", &wake_nkdbufs, sizeof (wake_nkdbufs));
257 PE_parse_boot_argn("trace_panic", &write_trace_on_panic, sizeof(write_trace_on_panic));
258 PE_parse_boot_argn("trace_typefilter", &trace_typefilter, sizeof(trace_typefilter));
259
260 scale_setup();
261
262 kernel_bootstrap_log("vm_mem_bootstrap");
263 vm_mem_bootstrap();
264
265 kernel_bootstrap_log("cs_init");
266 cs_init();
267
268 kernel_bootstrap_log("vm_mem_init");
269 vm_mem_init();
270
271 machine_info.memory_size = (uint32_t)mem_size;
272 machine_info.max_mem = max_mem;
273 machine_info.major_version = version_major;
274 machine_info.minor_version = version_minor;
275
276
277 #if CONFIG_TELEMETRY
278 kernel_bootstrap_log("telemetry_init");
279 telemetry_init();
280 #endif
281
282 #if CONFIG_CSR
283 kernel_bootstrap_log("csr_init");
284 csr_init();
285 #endif
286
287 kernel_bootstrap_log("stackshot_lock_init");
288 stackshot_lock_init();
289
290 kernel_bootstrap_log("sched_init");
291 sched_init();
292
293 kernel_bootstrap_log("waitq_bootstrap");
294 waitq_bootstrap();
295
296 kernel_bootstrap_log("ipc_bootstrap");
297 ipc_bootstrap();
298
299 #if CONFIG_MACF
300 kernel_bootstrap_log("mac_policy_init");
301 mac_policy_init();
302 #endif
303
304 kernel_bootstrap_log("ipc_init");
305 ipc_init();
306
307 /*
308 * As soon as the virtual memory system is up, we record
309 * that this CPU is using the kernel pmap.
310 */
311 kernel_bootstrap_log("PMAP_ACTIVATE_KERNEL");
312 PMAP_ACTIVATE_KERNEL(master_cpu);
313
314 kernel_bootstrap_log("mapping_free_prime");
315 mapping_free_prime(); /* Load up with temporary mapping blocks */
316
317 kernel_bootstrap_log("machine_init");
318 machine_init();
319
320 kernel_bootstrap_log("clock_init");
321 clock_init();
322
323 ledger_init();
324
325 /*
326 * Initialize the IPC, task, and thread subsystems.
327 */
328 #if CONFIG_COALITIONS
329 kernel_bootstrap_log("coalitions_init");
330 coalitions_init();
331 #endif
332
333 kernel_bootstrap_log("task_init");
334 task_init();
335
336 kernel_bootstrap_log("thread_init");
337 thread_init();
338
339 #if CONFIG_ATM
340 /* Initialize the Activity Trace Resource Manager. */
341 kernel_bootstrap_log("atm_init");
342 atm_init();
343 #endif
344
345 #if CONFIG_BANK
346 /* Initialize the BANK Manager. */
347 kernel_bootstrap_log("bank_init");
348 bank_init();
349 #endif
350
351 /* initialize the corpse config based on boot-args */
352 corpses_init();
353
354 /*
355 * Create a kernel thread to execute the kernel bootstrap.
356 */
357 kernel_bootstrap_log("kernel_thread_create");
358 result = kernel_thread_create((thread_continue_t)kernel_bootstrap_thread, NULL, MAXPRI_KERNEL, &thread);
359
360 if (result != KERN_SUCCESS) panic("kernel_bootstrap: result = %08X\n", result);
361
362 thread->state = TH_RUN;
363 thread->last_made_runnable_time = mach_absolute_time();
364 thread_deallocate(thread);
365
366 kernel_bootstrap_log("load_context - done");
367 load_context(thread);
368 /*NOTREACHED*/
369 }
370
371 int kth_started = 0;
372
373 vm_offset_t vm_kernel_addrperm;
374 vm_offset_t buf_kernel_addrperm;
375 vm_offset_t vm_kernel_addrperm_ext;
376
377 /*
378 * Now running in a thread. Kick off other services,
379 * invoke user bootstrap, enter pageout loop.
380 */
381 static void
382 kernel_bootstrap_thread(void)
383 {
384 processor_t processor = current_processor();
385
386 #define kernel_bootstrap_thread_kprintf(x...) /* kprintf("kernel_bootstrap_thread: " x) */
387 kernel_bootstrap_thread_log("idle_thread_create");
388 /*
389 * Create the idle processor thread.
390 */
391 idle_thread_create(processor);
392
393 /*
394 * N.B. Do not stick anything else
395 * before this point.
396 *
397 * Start up the scheduler services.
398 */
399 kernel_bootstrap_thread_log("sched_startup");
400 sched_startup();
401
402 /*
403 * Thread lifecycle maintenance (teardown, stack allocation)
404 */
405 kernel_bootstrap_thread_log("thread_daemon_init");
406 thread_daemon_init();
407
408 /* Create kernel map entry reserve */
409 vm_kernel_reserved_entry_init();
410
411 /*
412 * Thread callout service.
413 */
414 kernel_bootstrap_thread_log("thread_call_initialize");
415 thread_call_initialize();
416
417 /*
418 * Remain on current processor as
419 * additional processors come online.
420 */
421 kernel_bootstrap_thread_log("thread_bind");
422 thread_bind(processor);
423
424 /*
425 * Initialize ipc thread call support.
426 */
427 kernel_bootstrap_thread_log("ipc_thread_call_init");
428 ipc_thread_call_init();
429
430 /*
431 * Kick off memory mapping adjustments.
432 */
433 kernel_bootstrap_thread_log("mapping_adjust");
434 mapping_adjust();
435
436 /*
437 * Create the clock service.
438 */
439 kernel_bootstrap_thread_log("clock_service_create");
440 clock_service_create();
441
442 /*
443 * Create the device service.
444 */
445 device_service_create();
446
447 kth_started = 1;
448
449 #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
450 /*
451 * Create and initialize the physical copy window for processor 0
452 * This is required before starting kicking off IOKit.
453 */
454 cpu_physwindow_init(0);
455 #endif
456
457
458
459 #if MACH_KDP
460 kernel_bootstrap_log("kdp_init");
461 kdp_init();
462 #endif
463
464 #if ALTERNATE_DEBUGGER
465 alternate_debugger_init();
466 #endif
467
468 #if KPC
469 kpc_init();
470 #endif
471
472 #if CONFIG_ECC_LOGGING
473 ecc_log_init();
474 #endif
475
476 #if KPERF
477 kperf_bootstrap();
478 #endif
479
480 #if HYPERVISOR
481 hv_support_init();
482 #endif
483
484 #if CONFIG_TELEMETRY
485 kernel_bootstrap_log("bootprofile_init");
486 bootprofile_init();
487 #endif
488
489 #if (defined(__i386__) || defined(__x86_64__)) && CONFIG_VMX
490 vmx_init();
491 #endif
492
493 #if (defined(__i386__) || defined(__x86_64__))
494 if (kdebug_serial) {
495 new_nkdbufs = 1;
496 if (trace_typefilter == 0)
497 trace_typefilter = 1;
498 }
499 if (turn_on_log_leaks && !new_nkdbufs)
500 new_nkdbufs = 200000;
501 if (trace_typefilter)
502 start_kern_tracing_with_typefilter(new_nkdbufs,
503 FALSE,
504 trace_typefilter);
505 else
506 start_kern_tracing(new_nkdbufs, FALSE);
507 if (turn_on_log_leaks)
508 log_leaks = 1;
509
510 #endif
511
512 kernel_bootstrap_log("prng_init");
513 prng_cpu_init(master_cpu);
514
515 #ifdef IOKIT
516 PE_init_iokit();
517 #endif
518
519 assert(ml_get_interrupts_enabled() == FALSE);
520 (void) spllo(); /* Allow interruptions */
521
522 #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
523 /*
524 * Create and initialize the copy window for processor 0
525 * This also allocates window space for all other processors.
526 * However, this is dependent on the number of processors - so this call
527 * must be after IOKit has been started because IOKit performs processor
528 * discovery.
529 */
530 cpu_userwindow_init(0);
531 #endif
532
533 #if (!defined(__i386__) && !defined(__x86_64__))
534 if (turn_on_log_leaks && !new_nkdbufs)
535 new_nkdbufs = 200000;
536 if (trace_typefilter)
537 start_kern_tracing_with_typefilter(new_nkdbufs, FALSE, trace_typefilter);
538 else
539 start_kern_tracing(new_nkdbufs, FALSE);
540 if (turn_on_log_leaks)
541 log_leaks = 1;
542 #endif
543
544 /*
545 * Initialize the shared region module.
546 */
547 vm_shared_region_init();
548 vm_commpage_init();
549 vm_commpage_text_init();
550
551
552 #if CONFIG_MACF
553 kernel_bootstrap_log("mac_policy_initmach");
554 mac_policy_initmach();
555 #endif
556
557 #if CONFIG_SCHED_SFI
558 kernel_bootstrap_log("sfi_init");
559 sfi_init();
560 #endif
561
562 /*
563 * Initialize the globals used for permuting kernel
564 * addresses that may be exported to userland as tokens
565 * using VM_KERNEL_ADDRPERM()/VM_KERNEL_ADDRPERM_EXTERNAL().
566 * Force the random number to be odd to avoid mapping a non-zero
567 * word-aligned address to zero via addition.
568 * Note: at this stage we can use the cryptographically secure PRNG
569 * rather than early_random().
570 */
571 read_random(&vm_kernel_addrperm, sizeof(vm_kernel_addrperm));
572 vm_kernel_addrperm |= 1;
573 read_random(&buf_kernel_addrperm, sizeof(buf_kernel_addrperm));
574 buf_kernel_addrperm |= 1;
575 read_random(&vm_kernel_addrperm_ext, sizeof(vm_kernel_addrperm_ext));
576 vm_kernel_addrperm_ext |= 1;
577
578 vm_set_restrictions();
579
580
581
582 /*
583 * Start the user bootstrap.
584 */
585 #ifdef MACH_BSD
586 bsd_init();
587 #endif
588
589 /*
590 * Get rid of segments used to bootstrap kext loading. This removes
591 * the KLD, PRELINK symtab, LINKEDIT, and symtab segments/load commands.
592 */
593 OSKextRemoveKextBootstrap();
594
595 serial_keyboard_init(); /* Start serial keyboard if wanted */
596
597 vm_page_init_local_q();
598
599 thread_bind(PROCESSOR_NULL);
600
601 /*
602 * Become the pageout daemon.
603 */
604 vm_pageout();
605 /*NOTREACHED*/
606 }
607
608 /*
609 * slave_main:
610 *
611 * Load the first thread to start a processor.
612 */
613 void
614 slave_main(void *machine_param)
615 {
616 processor_t processor = current_processor();
617 thread_t thread;
618
619 /*
620 * Use the idle processor thread if there
621 * is no dedicated start up thread.
622 */
623 if (processor->next_thread == THREAD_NULL) {
624 thread = processor->idle_thread;
625 thread->continuation = (thread_continue_t)processor_start_thread;
626 thread->parameter = machine_param;
627 }
628 else {
629 thread = processor->next_thread;
630 processor->next_thread = THREAD_NULL;
631 }
632
633 load_context(thread);
634 /*NOTREACHED*/
635 }
636
637 /*
638 * processor_start_thread:
639 *
640 * First thread to execute on a started processor.
641 *
642 * Called at splsched.
643 */
644 void
645 processor_start_thread(void *machine_param)
646 {
647 processor_t processor = current_processor();
648 thread_t self = current_thread();
649
650 slave_machine_init(machine_param);
651
652 /*
653 * If running the idle processor thread,
654 * reenter the idle loop, else terminate.
655 */
656 if (self == processor->idle_thread)
657 thread_block((thread_continue_t)idle_thread);
658
659 thread_terminate(self);
660 /*NOTREACHED*/
661 }
662
663 /*
664 * load_context:
665 *
666 * Start the first thread on a processor.
667 */
668 static void
669 load_context(
670 thread_t thread)
671 {
672 processor_t processor = current_processor();
673
674
675 #define load_context_kprintf(x...) /* kprintf("load_context: " x) */
676
677 load_context_kprintf("machine_set_current_thread\n");
678 machine_set_current_thread(thread);
679
680 load_context_kprintf("processor_up\n");
681 processor_up(processor);
682
683 PMAP_ACTIVATE_KERNEL(processor->cpu_id);
684
685 /*
686 * Acquire a stack if none attached. The panic
687 * should never occur since the thread is expected
688 * to have reserved stack.
689 */
690 load_context_kprintf("thread %p, stack %lx, stackptr %lx\n", thread,
691 thread->kernel_stack, thread->machine.kstackptr);
692 if (!thread->kernel_stack) {
693 load_context_kprintf("stack_alloc_try\n");
694 if (!stack_alloc_try(thread))
695 panic("load_context");
696 }
697
698 /*
699 * The idle processor threads are not counted as
700 * running for load calculations.
701 */
702 if (!(thread->state & TH_IDLE))
703 sched_run_incr(thread);
704
705 processor->active_thread = thread;
706 processor->current_pri = thread->sched_pri;
707 processor->current_thmode = thread->sched_mode;
708 processor->deadline = UINT64_MAX;
709 thread->last_processor = processor;
710
711 processor->last_dispatch = mach_absolute_time();
712 timer_start(&thread->system_timer, processor->last_dispatch);
713 PROCESSOR_DATA(processor, thread_timer) = PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
714
715 timer_start(&PROCESSOR_DATA(processor, system_state), processor->last_dispatch);
716 PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, system_state);
717
718 PMAP_ACTIVATE_USER(thread, processor->cpu_id);
719
720 load_context_kprintf("machine_load_context\n");
721 machine_load_context(thread);
722 /*NOTREACHED*/
723 }
724
725 void
726 scale_setup()
727 {
728 int scale = 0;
729 #if defined(__LP64__)
730 typeof(task_max) task_max_base = task_max;
731
732 /* Raise limits for servers with >= 16G */
733 if ((serverperfmode != 0) && ((uint64_t)sane_size >= (uint64_t)(16 * 1024 * 1024 *1024ULL))) {
734 scale = (int)((uint64_t)sane_size / (uint64_t)(8 * 1024 * 1024 *1024ULL));
735 /* limit to 128 G */
736 if (scale > 16)
737 scale = 16;
738 task_max_base = 2500;
739 } else if ((uint64_t)sane_size >= (uint64_t)(3 * 1024 * 1024 *1024ULL))
740 scale = 2;
741
742 task_max = MAX(task_max, task_max_base * scale);
743
744 if (scale != 0) {
745 task_threadmax = task_max;
746 thread_max = task_max * 5;
747 }
748
749 #endif
750
751 bsd_scale_setup(scale);
752
753 ipc_space_max = SPACE_MAX;
754 ipc_port_max = PORT_MAX;
755 ipc_pset_max = SET_MAX;
756 semaphore_max = SEMAPHORE_MAX;
757 }
758