]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/startup.c
xnu-2422.110.17.tar.gz
[apple/xnu.git] / osfmk / kern / startup.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64
65 /*
66 * Mach kernel startup.
67 */
68
69 #include <debug.h>
70 #include <xpr_debug.h>
71 #include <mach_kdp.h>
72
73 #include <mach/boolean.h>
74 #include <mach/machine.h>
75 #include <mach/thread_act.h>
76 #include <mach/task_special_ports.h>
77 #include <mach/vm_param.h>
78 #include <ipc/ipc_init.h>
79 #include <kern/assert.h>
80 #include <kern/mach_param.h>
81 #include <kern/misc_protos.h>
82 #include <kern/clock.h>
83 #include <kern/cpu_number.h>
84 #include <kern/ledger.h>
85 #include <kern/machine.h>
86 #include <kern/processor.h>
87 #include <kern/sched_prim.h>
88 #include <kern/startup.h>
89 #include <kern/task.h>
90 #include <kern/thread.h>
91 #include <kern/timer.h>
92 #if CONFIG_TELEMETRY
93 #include <kern/telemetry.h>
94 #endif
95 #include <kern/wait_queue.h>
96 #include <kern/xpr.h>
97 #include <kern/zalloc.h>
98 #include <kern/locks.h>
99 #include <console/serial_protos.h>
100 #include <vm/vm_kern.h>
101 #include <vm/vm_init.h>
102 #include <vm/vm_map.h>
103 #include <vm/vm_object.h>
104 #include <vm/vm_page.h>
105 #include <vm/vm_pageout.h>
106 #include <vm/vm_shared_region.h>
107 #include <machine/pmap.h>
108 #include <machine/commpage.h>
109 #include <libkern/version.h>
110 #include <sys/codesign.h>
111 #include <sys/kdebug.h>
112
113 #if MACH_KDP
114 #include <kdp/kdp.h>
115 #endif
116
117 #if CONFIG_MACF
118 #include <security/mac_mach_internal.h>
119 #endif
120
121 #if CONFIG_COUNTERS
122 #include <pmc/pmc.h>
123 #endif
124
125 #if KPC
126 #include <kern/kpc.h>
127 #endif
128
129 #if KPERF
130 #include <kperf/kperf.h>
131 #endif
132
133
134 #include <i386/pmCPU.h>
135 static void kernel_bootstrap_thread(void);
136
137 static void load_context(
138 thread_t thread);
139 #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
140 extern void cpu_userwindow_init(int);
141 extern void cpu_physwindow_init(int);
142 #endif
143
144 // libkern/OSKextLib.cpp
145 extern void OSKextRemoveKextBootstrap(void);
146
147 void scale_setup(void);
148 extern void bsd_scale_setup(int);
149 extern unsigned int semaphore_max;
150 extern void stackshot_lock_init(void);
151
152 /*
153 * Running in virtual memory, on the interrupt stack.
154 */
155
156 extern int serverperfmode;
157
158 /* size of kernel trace buffer, disabled by default */
159 unsigned int new_nkdbufs = 0;
160 unsigned int wake_nkdbufs = 0;
161
162 /* mach leak logging */
163 int log_leaks = 0;
164 int turn_on_log_leaks = 0;
165
166
167 void
168 kernel_early_bootstrap(void)
169 {
170 /* serverperfmode is needed by timer setup */
171 if (PE_parse_boot_argn("serverperfmode", &serverperfmode, sizeof (serverperfmode))) {
172 serverperfmode = 1;
173 }
174
175 lck_mod_init();
176
177 /*
178 * Initialize the timer callout world
179 */
180 timer_call_init();
181 }
182
183
184 void
185 kernel_bootstrap(void)
186 {
187 kern_return_t result;
188 thread_t thread;
189 char namep[16];
190
191 printf("%s\n", version); /* log kernel version */
192
193 #define kernel_bootstrap_kprintf(x...) /* kprintf("kernel_bootstrap: " x) */
194
195 if (PE_parse_boot_argn("-l", namep, sizeof (namep))) /* leaks logging */
196 turn_on_log_leaks = 1;
197
198 PE_parse_boot_argn("trace", &new_nkdbufs, sizeof (new_nkdbufs));
199
200 PE_parse_boot_argn("trace_wake", &wake_nkdbufs, sizeof (wake_nkdbufs));
201
202 scale_setup();
203
204 kernel_bootstrap_kprintf("calling vm_mem_bootstrap\n");
205 vm_mem_bootstrap();
206
207 kernel_bootstrap_kprintf("calling cs_init\n");
208 cs_init();
209
210 kernel_bootstrap_kprintf("calling vm_mem_init\n");
211 vm_mem_init();
212
213 machine_info.memory_size = (uint32_t)mem_size;
214 machine_info.max_mem = max_mem;
215 machine_info.major_version = version_major;
216 machine_info.minor_version = version_minor;
217
218 #if CONFIG_TELEMETRY
219 kernel_bootstrap_kprintf("calling telemetry_init\n");
220 telemetry_init();
221 #endif
222
223 kernel_bootstrap_kprintf("calling stackshot_lock_init\n");
224 stackshot_lock_init();
225
226 kernel_bootstrap_kprintf("calling sched_init\n");
227 sched_init();
228
229 kernel_bootstrap_kprintf("calling wait_queue_bootstrap\n");
230 wait_queue_bootstrap();
231
232 kernel_bootstrap_kprintf("calling ipc_bootstrap\n");
233 ipc_bootstrap();
234
235 #if CONFIG_MACF
236 mac_policy_init();
237 #endif
238 kernel_bootstrap_kprintf("calling ipc_init\n");
239 ipc_init();
240
241 /*
242 * As soon as the virtual memory system is up, we record
243 * that this CPU is using the kernel pmap.
244 */
245 kernel_bootstrap_kprintf("calling PMAP_ACTIVATE_KERNEL\n");
246 PMAP_ACTIVATE_KERNEL(master_cpu);
247
248 kernel_bootstrap_kprintf("calling mapping_free_prime\n");
249 mapping_free_prime(); /* Load up with temporary mapping blocks */
250
251 kernel_bootstrap_kprintf("calling machine_init\n");
252 machine_init();
253
254 kernel_bootstrap_kprintf("calling clock_init\n");
255 clock_init();
256
257 ledger_init();
258
259 /*
260 * Initialize the IPC, task, and thread subsystems.
261 */
262 kernel_bootstrap_kprintf("calling task_init\n");
263 task_init();
264
265 kernel_bootstrap_kprintf("calling thread_init\n");
266 thread_init();
267
268 /*
269 * Create a kernel thread to execute the kernel bootstrap.
270 */
271 kernel_bootstrap_kprintf("calling kernel_thread_create\n");
272 result = kernel_thread_create((thread_continue_t)kernel_bootstrap_thread, NULL, MAXPRI_KERNEL, &thread);
273
274 if (result != KERN_SUCCESS) panic("kernel_bootstrap: result = %08X\n", result);
275
276 thread->state = TH_RUN;
277 thread_deallocate(thread);
278
279 kernel_bootstrap_kprintf("calling load_context - done\n");
280 load_context(thread);
281 /*NOTREACHED*/
282 }
283
284 int kth_started = 0;
285
286 vm_offset_t vm_kernel_addrperm;
287 vm_offset_t buf_kernel_addrperm;
288
289 /*
290 * Now running in a thread. Kick off other services,
291 * invoke user bootstrap, enter pageout loop.
292 */
293 static void
294 kernel_bootstrap_thread(void)
295 {
296 processor_t processor = current_processor();
297
298 #define kernel_bootstrap_thread_kprintf(x...) /* kprintf("kernel_bootstrap_thread: " x) */
299 kernel_bootstrap_thread_kprintf("calling idle_thread_create\n");
300 /*
301 * Create the idle processor thread.
302 */
303 idle_thread_create(processor);
304
305 /*
306 * N.B. Do not stick anything else
307 * before this point.
308 *
309 * Start up the scheduler services.
310 */
311 kernel_bootstrap_thread_kprintf("calling sched_startup\n");
312 sched_startup();
313
314 /*
315 * Thread lifecycle maintenance (teardown, stack allocation)
316 */
317 kernel_bootstrap_thread_kprintf("calling thread_daemon_init\n");
318 thread_daemon_init();
319
320 /* Create kernel map entry reserve */
321 vm_kernel_reserved_entry_init();
322
323 /*
324 * Thread callout service.
325 */
326 kernel_bootstrap_thread_kprintf("calling thread_call_initialize\n");
327 thread_call_initialize();
328
329 /*
330 * Remain on current processor as
331 * additional processors come online.
332 */
333 kernel_bootstrap_thread_kprintf("calling thread_bind\n");
334 thread_bind(processor);
335
336 /*
337 * Kick off memory mapping adjustments.
338 */
339 kernel_bootstrap_thread_kprintf("calling mapping_adjust\n");
340 mapping_adjust();
341
342 /*
343 * Create the clock service.
344 */
345 kernel_bootstrap_thread_kprintf("calling clock_service_create\n");
346 clock_service_create();
347
348 /*
349 * Create the device service.
350 */
351 device_service_create();
352
353 kth_started = 1;
354
355 #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
356 /*
357 * Create and initialize the physical copy window for processor 0
358 * This is required before starting kicking off IOKit.
359 */
360 cpu_physwindow_init(0);
361 #endif
362
363
364
365 #if MACH_KDP
366 kernel_bootstrap_kprintf("calling kdp_init\n");
367 kdp_init();
368 #endif
369
370 #if ALTERNATE_DEBUGGER
371 alternate_debugger_init();
372 #endif
373
374 #if CONFIG_COUNTERS
375 pmc_bootstrap();
376 #endif
377
378 #if KPC
379 kpc_init();
380 #endif
381
382 #if KPERF
383 kperf_bootstrap();
384 #endif
385
386 #if CONFIG_TELEMETRY
387 kernel_bootstrap_kprintf("calling bootprofile_init\n");
388 bootprofile_init();
389 #endif
390
391 #if (defined(__i386__) || defined(__x86_64__))
392 if (turn_on_log_leaks && !new_nkdbufs)
393 new_nkdbufs = 200000;
394 start_kern_tracing(new_nkdbufs, FALSE);
395 if (turn_on_log_leaks)
396 log_leaks = 1;
397
398 #endif
399
400 #ifdef IOKIT
401 PE_init_iokit();
402 #endif
403
404 (void) spllo(); /* Allow interruptions */
405
406 #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
407 /*
408 * Create and initialize the copy window for processor 0
409 * This also allocates window space for all other processors.
410 * However, this is dependent on the number of processors - so this call
411 * must be after IOKit has been started because IOKit performs processor
412 * discovery.
413 */
414 cpu_userwindow_init(0);
415 #endif
416
417 #if (!defined(__i386__) && !defined(__x86_64__))
418 if (turn_on_log_leaks && !new_nkdbufs)
419 new_nkdbufs = 200000;
420 start_kern_tracing(new_nkdbufs, FALSE);
421 if (turn_on_log_leaks)
422 log_leaks = 1;
423 #endif
424
425 /*
426 * Initialize the shared region module.
427 */
428 vm_shared_region_init();
429 vm_commpage_init();
430 vm_commpage_text_init();
431
432
433 #if CONFIG_MACF
434 mac_policy_initmach();
435 #endif
436
437 /*
438 * Initialize the global used for permuting kernel
439 * addresses that may be exported to userland as tokens
440 * using VM_KERNEL_ADDRPERM(). Force the random number
441 * to be odd to avoid mapping a non-zero
442 * word-aligned address to zero via addition.
443 */
444 vm_kernel_addrperm = (vm_offset_t)early_random() | 1;
445 buf_kernel_addrperm = (vm_offset_t)early_random() | 1;
446
447 /*
448 * Start the user bootstrap.
449 */
450 #ifdef MACH_BSD
451 bsd_init();
452 #endif
453
454 /*
455 * Get rid of segments used to bootstrap kext loading. This removes
456 * the KLD, PRELINK symtab, LINKEDIT, and symtab segments/load commands.
457 */
458 OSKextRemoveKextBootstrap();
459
460 serial_keyboard_init(); /* Start serial keyboard if wanted */
461
462 vm_page_init_local_q();
463
464 thread_bind(PROCESSOR_NULL);
465
466 /*
467 * Become the pageout daemon.
468 */
469 vm_pageout();
470 /*NOTREACHED*/
471 }
472
473 /*
474 * slave_main:
475 *
476 * Load the first thread to start a processor.
477 */
478 void
479 slave_main(void *machine_param)
480 {
481 processor_t processor = current_processor();
482 thread_t thread;
483
484 /*
485 * Use the idle processor thread if there
486 * is no dedicated start up thread.
487 */
488 if (processor->next_thread == THREAD_NULL) {
489 thread = processor->idle_thread;
490 thread->continuation = (thread_continue_t)processor_start_thread;
491 thread->parameter = machine_param;
492 }
493 else {
494 thread = processor->next_thread;
495 processor->next_thread = THREAD_NULL;
496 }
497
498 load_context(thread);
499 /*NOTREACHED*/
500 }
501
502 /*
503 * processor_start_thread:
504 *
505 * First thread to execute on a started processor.
506 *
507 * Called at splsched.
508 */
509 void
510 processor_start_thread(void *machine_param)
511 {
512 processor_t processor = current_processor();
513 thread_t self = current_thread();
514
515 slave_machine_init(machine_param);
516
517 /*
518 * If running the idle processor thread,
519 * reenter the idle loop, else terminate.
520 */
521 if (self == processor->idle_thread)
522 thread_block((thread_continue_t)idle_thread);
523
524 thread_terminate(self);
525 /*NOTREACHED*/
526 }
527
528 /*
529 * load_context:
530 *
531 * Start the first thread on a processor.
532 */
533 static void
534 load_context(
535 thread_t thread)
536 {
537 processor_t processor = current_processor();
538
539
540 #define load_context_kprintf(x...) /* kprintf("load_context: " x) */
541
542 load_context_kprintf("calling machine_set_current_thread\n");
543 machine_set_current_thread(thread);
544
545 load_context_kprintf("calling processor_up\n");
546 processor_up(processor);
547
548 PMAP_ACTIVATE_KERNEL(processor->cpu_id);
549
550 /*
551 * Acquire a stack if none attached. The panic
552 * should never occur since the thread is expected
553 * to have reserved stack.
554 */
555 load_context_kprintf("thread %p, stack %lx, stackptr %lx\n", thread,
556 thread->kernel_stack, thread->machine.kstackptr);
557 if (!thread->kernel_stack) {
558 load_context_kprintf("calling stack_alloc_try\n");
559 if (!stack_alloc_try(thread))
560 panic("load_context");
561 }
562
563 /*
564 * The idle processor threads are not counted as
565 * running for load calculations.
566 */
567 if (!(thread->state & TH_IDLE))
568 sched_run_incr();
569
570 processor->active_thread = thread;
571 processor->current_pri = thread->sched_pri;
572 processor->current_thmode = thread->sched_mode;
573 processor->deadline = UINT64_MAX;
574 thread->last_processor = processor;
575
576 processor->last_dispatch = mach_absolute_time();
577 timer_start(&thread->system_timer, processor->last_dispatch);
578 PROCESSOR_DATA(processor, thread_timer) = PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
579
580 timer_start(&PROCESSOR_DATA(processor, system_state), processor->last_dispatch);
581 PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, system_state);
582
583 PMAP_ACTIVATE_USER(thread, processor->cpu_id);
584
585 load_context_kprintf("calling machine_load_context\n");
586 machine_load_context(thread);
587 /*NOTREACHED*/
588 }
589
590 void
591 scale_setup()
592 {
593 int scale = 0;
594 #if defined(__LP64__)
595 typeof(task_max) task_max_base = task_max;
596
597 /* Raise limits for servers with >= 16G */
598 if ((serverperfmode != 0) && ((uint64_t)sane_size >= (uint64_t)(16 * 1024 * 1024 *1024ULL))) {
599 scale = (int)((uint64_t)sane_size / (uint64_t)(8 * 1024 * 1024 *1024ULL));
600 /* limit to 128 G */
601 if (scale > 16)
602 scale = 16;
603 task_max_base = 2500;
604 } else if ((uint64_t)sane_size >= (uint64_t)(3 * 1024 * 1024 *1024ULL))
605 scale = 2;
606
607 task_max = MAX(task_max, task_max_base * scale);
608
609 if (scale != 0) {
610 task_threadmax = task_max;
611 thread_max = task_max * 5;
612 }
613
614 #endif
615
616 bsd_scale_setup(scale);
617
618 ipc_space_max = SPACE_MAX;
619 ipc_port_max = PORT_MAX;
620 ipc_pset_max = SET_MAX;
621 semaphore_max = SEMAPHORE_MAX;
622 }
623