]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/startup.c
xnu-6153.101.6.tar.gz
[apple/xnu.git] / osfmk / kern / startup.c
1 /*
2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64
65 /*
66 * Mach kernel startup.
67 */
68
69 #include <debug.h>
70 #include <mach_kdp.h>
71
72 #include <mach/boolean.h>
73 #include <mach/machine.h>
74 #include <mach/thread_act.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/vm_param.h>
77 #include <ipc/ipc_init.h>
78 #include <kern/assert.h>
79 #include <kern/mach_param.h>
80 #include <kern/misc_protos.h>
81 #include <kern/clock.h>
82 #include <kern/coalition.h>
83 #include <kern/cpu_number.h>
84 #include <kern/cpu_quiesce.h>
85 #include <kern/ledger.h>
86 #include <kern/machine.h>
87 #include <kern/processor.h>
88 #include <kern/restartable.h>
89 #include <kern/sched_prim.h>
90 #include <kern/turnstile.h>
91 #if CONFIG_SCHED_SFI
92 #include <kern/sfi.h>
93 #endif
94 #include <kern/startup.h>
95 #include <kern/task.h>
96 #include <kern/thread.h>
97 #include <kern/timer.h>
98 #if CONFIG_TELEMETRY
99 #include <kern/telemetry.h>
100 #endif
101 #include <kern/zalloc.h>
102 #include <kern/locks.h>
103 #include <kern/debug.h>
104 #include <corpses/task_corpse.h>
105 #include <prng/random.h>
106 #include <console/serial_protos.h>
107 #include <vm/vm_kern.h>
108 #include <vm/vm_init.h>
109 #include <vm/vm_map.h>
110 #include <vm/vm_object.h>
111 #include <vm/vm_page.h>
112 #include <vm/vm_pageout.h>
113 #include <vm/vm_shared_region.h>
114 #include <machine/pmap.h>
115 #include <machine/commpage.h>
116 #include <libkern/version.h>
117 #include <sys/codesign.h>
118 #include <sys/kdebug.h>
119 #include <sys/random.h>
120 #include <sys/ktrace.h>
121 #include <libkern/section_keywords.h>
122
123 #include <kern/ltable.h>
124 #include <kern/waitq.h>
125 #include <ipc/ipc_voucher.h>
126 #include <voucher/ipc_pthread_priority_internal.h>
127 #include <mach/host_info.h>
128 #include <pthread/workqueue_internal.h>
129
130 #if CONFIG_XNUPOST
131 #include <tests/ktest.h>
132 #include <tests/xnupost.h>
133 #endif
134
135 #if CONFIG_ATM
136 #include <atm/atm_internal.h>
137 #endif
138
139 #if CONFIG_CSR
140 #include <sys/csr.h>
141 #endif
142
143 #include <bank/bank_internal.h>
144
145 #if ALTERNATE_DEBUGGER
146 #include <arm64/alternate_debugger.h>
147 #endif
148
149 #if MACH_KDP
150 #include <kdp/kdp.h>
151 #endif
152
153 #if CONFIG_MACF
154 #include <security/mac_mach_internal.h>
155 #if CONFIG_VNGUARD
156 extern void vnguard_policy_init(void);
157 #endif
158 #endif
159
160 #if KPC
161 #include <kern/kpc.h>
162 #endif
163
164 #if HYPERVISOR
165 #include <kern/hv_support.h>
166 #endif
167
168 #include <san/kasan.h>
169
170 #if defined(__arm__) || defined(__arm64__)
171 #include <arm/misc_protos.h> // for arm_vm_prot_finalize
172 #endif
173
174 #include <i386/pmCPU.h>
175 static void kernel_bootstrap_thread(void);
176
177 static void load_context(
178 thread_t thread);
179 #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
180 extern void cpu_userwindow_init(int);
181 extern void cpu_physwindow_init(int);
182 #endif
183
184 #if CONFIG_ECC_LOGGING
185 #include <kern/ecc.h>
186 #endif
187
188 #if (defined(__i386__) || defined(__x86_64__)) && CONFIG_VMX
189 #include <i386/vmx/vmx_cpu.h>
190 #endif
191
192 #if CONFIG_DTRACE
193 extern void dtrace_early_init(void);
194 extern void sdt_early_init(void);
195 #endif
196
197 // libkern/OSKextLib.cpp
198 extern void OSKextRemoveKextBootstrap(void);
199
200 void scale_setup(void);
201 extern void bsd_scale_setup(int);
202 extern unsigned int semaphore_max;
203 extern void stackshot_init(void);
204 extern void ktrace_init(void);
205 extern void oslog_init(void);
206
207 /*
208 * Running in virtual memory, on the interrupt stack.
209 */
210
211 extern int serverperfmode;
212
213 /* size of kernel trace buffer, disabled by default */
214 unsigned int new_nkdbufs = 0;
215 unsigned int wake_nkdbufs = 0;
216 unsigned int write_trace_on_panic = 0;
217 unsigned int trace_wrap = 0;
218 boolean_t trace_serial = FALSE;
219 boolean_t early_boot_complete = FALSE;
220
221 /* mach leak logging */
222 int log_leaks = 0;
223
224 static inline void
225 kernel_bootstrap_log(const char *message)
226 {
227 // kprintf("kernel_bootstrap: %s\n", message);
228 kernel_debug_string_early(message);
229 }
230
231 static inline void
232 kernel_bootstrap_thread_log(const char *message)
233 {
234 // kprintf("kernel_bootstrap_thread: %s\n", message);
235 kernel_debug_string_early(message);
236 }
237
238 void
239 kernel_early_bootstrap(void)
240 {
241 /* serverperfmode is needed by timer setup */
242 if (PE_parse_boot_argn("serverperfmode", &serverperfmode, sizeof(serverperfmode))) {
243 serverperfmode = 1;
244 }
245
246 #if CONFIG_SCHED_SFI
247 /*
248 * Configure SFI classes
249 */
250 sfi_early_init();
251 #endif
252 }
253
254
255 void
256 kernel_bootstrap(void)
257 {
258 kern_return_t result;
259 thread_t thread;
260 char namep[16];
261
262 printf("%s\n", version); /* log kernel version */
263
264 if (PE_parse_boot_argn("-l", namep, sizeof(namep))) { /* leaks logging */
265 log_leaks = 1;
266 }
267
268 PE_parse_boot_argn("trace", &new_nkdbufs, sizeof(new_nkdbufs));
269 PE_parse_boot_argn("trace_wake", &wake_nkdbufs, sizeof(wake_nkdbufs));
270 PE_parse_boot_argn("trace_panic", &write_trace_on_panic, sizeof(write_trace_on_panic));
271 PE_parse_boot_argn("trace_wrap", &trace_wrap, sizeof(trace_wrap));
272
273 scale_setup();
274
275 kernel_bootstrap_log("vm_mem_bootstrap");
276 vm_mem_bootstrap();
277
278 kernel_bootstrap_log("cs_init");
279 cs_init();
280
281 kernel_bootstrap_log("vm_mem_init");
282 vm_mem_init();
283
284 machine_info.memory_size = (uint32_t)mem_size;
285 machine_info.max_mem = max_mem;
286 machine_info.major_version = version_major;
287 machine_info.minor_version = version_minor;
288
289 oslog_init();
290
291 #if KASAN
292 kernel_bootstrap_log("kasan_late_init");
293 kasan_late_init();
294 #endif
295
296 #if CONFIG_TELEMETRY
297 kernel_bootstrap_log("telemetry_init");
298 telemetry_init();
299 #endif
300
301 #if CONFIG_CSR
302 kernel_bootstrap_log("csr_init");
303 csr_init();
304 #endif
305
306 if (PE_i_can_has_debugger(NULL)) {
307 if (PE_parse_boot_argn("-show_pointers", &namep, sizeof(namep))) {
308 doprnt_hide_pointers = FALSE;
309 }
310 if (PE_parse_boot_argn("-no_slto_panic", &namep, sizeof(namep))) {
311 extern boolean_t spinlock_timeout_panic;
312 spinlock_timeout_panic = FALSE;
313 }
314 }
315
316 kernel_bootstrap_log("console_init");
317 console_init();
318
319 kernel_bootstrap_log("stackshot_init");
320 stackshot_init();
321
322 kernel_bootstrap_log("sched_init");
323 sched_init();
324
325 kernel_bootstrap_log("ltable_bootstrap");
326 ltable_bootstrap();
327
328 kernel_bootstrap_log("waitq_bootstrap");
329 waitq_bootstrap();
330
331 kernel_bootstrap_log("ipc_bootstrap");
332 ipc_bootstrap();
333
334 #if CONFIG_MACF
335 kernel_bootstrap_log("mac_policy_init");
336 mac_policy_init();
337 #endif
338
339 kernel_bootstrap_log("ipc_init");
340 ipc_init();
341
342 /*
343 * As soon as the virtual memory system is up, we record
344 * that this CPU is using the kernel pmap.
345 */
346 kernel_bootstrap_log("PMAP_ACTIVATE_KERNEL");
347 PMAP_ACTIVATE_KERNEL(master_cpu);
348
349 kernel_bootstrap_log("mapping_free_prime");
350 mapping_free_prime(); /* Load up with temporary mapping blocks */
351
352 kernel_bootstrap_log("machine_init");
353 machine_init();
354
355 kernel_bootstrap_log("thread_machine_init_template");
356 thread_machine_init_template();
357
358 kernel_bootstrap_log("clock_init");
359 clock_init();
360
361 ledger_init();
362
363 /*
364 * Initialize the IPC, task, and thread subsystems.
365 */
366
367 #if CONFIG_COALITIONS
368 kernel_bootstrap_log("coalitions_init");
369 coalitions_init();
370 #endif
371
372 kernel_bootstrap_log("task_init");
373 task_init();
374
375 kernel_bootstrap_log("thread_init");
376 thread_init();
377
378 kernel_bootstrap_log("restartable_init");
379 restartable_init();
380
381 kernel_bootstrap_log("workq_init");
382 workq_init();
383
384 kernel_bootstrap_log("turnstiles_init");
385 turnstiles_init();
386
387 #if CONFIG_ATM
388 /* Initialize the Activity Trace Resource Manager. */
389 kernel_bootstrap_log("atm_init");
390 atm_init();
391 #endif
392 kernel_bootstrap_log("mach_init_activity_id");
393 mach_init_activity_id();
394
395 /* Initialize the BANK Manager. */
396 kernel_bootstrap_log("bank_init");
397 bank_init();
398
399 kernel_bootstrap_log("ipc_pthread_priority_init");
400 ipc_pthread_priority_init();
401
402 /* initialize the corpse config based on boot-args */
403 corpses_init();
404
405 /* initialize host_statistics */
406 host_statistics_init();
407
408 /* initialize exceptions */
409 exception_init();
410
411 #if CONFIG_SCHED_SFI
412 kernel_bootstrap_log("sfi_init");
413 sfi_init();
414 #endif
415
416 /*
417 * Create a kernel thread to execute the kernel bootstrap.
418 */
419
420 kernel_bootstrap_log("kernel_thread_create");
421 result = kernel_thread_create((thread_continue_t)kernel_bootstrap_thread, NULL, MAXPRI_KERNEL, &thread);
422
423 if (result != KERN_SUCCESS) {
424 panic("kernel_bootstrap: result = %08X\n", result);
425 }
426
427 /* The static init_thread is re-used as the bootstrap thread */
428 assert(thread == current_thread());
429
430 /* TODO: do a proper thread_start() (without the thread_setrun()) */
431 thread->state = TH_RUN;
432 thread->last_made_runnable_time = mach_absolute_time();
433 thread_set_thread_name(thread, "kernel_bootstrap_thread");
434
435 thread_deallocate(thread);
436
437 kernel_bootstrap_log("load_context - done");
438 load_context(thread);
439 /*NOTREACHED*/
440 }
441
442 int kth_started = 0;
443
444 vm_offset_t vm_kernel_addrperm;
445 vm_offset_t buf_kernel_addrperm;
446 vm_offset_t vm_kernel_addrperm_ext;
447 uint64_t vm_kernel_addrhash_salt;
448 uint64_t vm_kernel_addrhash_salt_ext;
449
450 /*
451 * Now running in a thread. Kick off other services,
452 * invoke user bootstrap, enter pageout loop.
453 */
454 static void
455 kernel_bootstrap_thread(void)
456 {
457 processor_t processor = current_processor();
458
459 #define kernel_bootstrap_thread_kprintf(x...) /* kprintf("kernel_bootstrap_thread: " x) */
460 kernel_bootstrap_thread_log("idle_thread_create");
461 /*
462 * Create the idle processor thread.
463 */
464 idle_thread_create(processor);
465
466 /*
467 * N.B. Do not stick anything else
468 * before this point.
469 *
470 * Start up the scheduler services.
471 */
472 kernel_bootstrap_thread_log("sched_startup");
473 sched_startup();
474
475 /*
476 * Thread lifecycle maintenance (teardown, stack allocation)
477 */
478 kernel_bootstrap_thread_log("thread_daemon_init");
479 thread_daemon_init();
480
481 /* Create kernel map entry reserve */
482 vm_kernel_reserved_entry_init();
483
484 /*
485 * Thread callout service.
486 */
487 kernel_bootstrap_thread_log("thread_call_initialize");
488 thread_call_initialize();
489
490 /*
491 * Remain on current processor as
492 * additional processors come online.
493 */
494 kernel_bootstrap_thread_log("thread_bind");
495 thread_bind(processor);
496
497 /*
498 * Initialize ipc thread call support.
499 */
500 kernel_bootstrap_thread_log("ipc_thread_call_init");
501 ipc_thread_call_init();
502
503 /*
504 * Kick off memory mapping adjustments.
505 */
506 kernel_bootstrap_thread_log("mapping_adjust");
507 mapping_adjust();
508
509 /*
510 * Create the clock service.
511 */
512 kernel_bootstrap_thread_log("clock_service_create");
513 clock_service_create();
514
515 /*
516 * Create the device service.
517 */
518 device_service_create();
519
520 kth_started = 1;
521
522 #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
523 /*
524 * Create and initialize the physical copy window for processor 0
525 * This is required before starting kicking off IOKit.
526 */
527 cpu_physwindow_init(0);
528 #endif
529
530 phys_carveout_init();
531
532 #if MACH_KDP
533 kernel_bootstrap_log("kdp_init");
534 kdp_init();
535 #endif
536
537 #if ALTERNATE_DEBUGGER
538 alternate_debugger_init();
539 #endif
540
541 #if KPC
542 kpc_init();
543 #endif
544
545 #if CONFIG_ECC_LOGGING
546 ecc_log_init();
547 #endif
548
549 #if HYPERVISOR
550 hv_support_init();
551 #endif
552
553 #if CONFIG_TELEMETRY
554 kernel_bootstrap_log("bootprofile_init");
555 bootprofile_init();
556 #endif
557
558 #if (defined(__i386__) || defined(__x86_64__)) && CONFIG_VMX
559 vmx_init();
560 #endif
561
562 kernel_bootstrap_thread_log("ktrace_init");
563 ktrace_init();
564
565 char trace_typefilter[256] = {};
566 PE_parse_boot_arg_str("trace_typefilter", trace_typefilter,
567 sizeof(trace_typefilter));
568 kdebug_init(new_nkdbufs, trace_typefilter, trace_wrap);
569
570 #ifdef MACH_BSD
571 kernel_bootstrap_log("bsd_early_init");
572 bsd_early_init();
573 #endif
574
575 #if defined(__arm64__)
576 ml_lockdown_init();
577 #endif
578
579 #ifdef IOKIT
580 kernel_bootstrap_log("PE_init_iokit");
581 PE_init_iokit();
582 #endif
583
584 assert(ml_get_interrupts_enabled() == FALSE);
585
586 /*
587 * Past this point, kernel subsystems that expect to operate with
588 * interrupts or preemption enabled may begin enforcement.
589 */
590 early_boot_complete = TRUE;
591
592 #if INTERRUPT_MASKED_DEBUG
593 // Reset interrupts masked timeout before we enable interrupts
594 ml_spin_debug_clear_self();
595 #endif
596 (void) spllo(); /* Allow interruptions */
597
598 #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
599 /*
600 * Create and initialize the copy window for processor 0
601 * This also allocates window space for all other processors.
602 * However, this is dependent on the number of processors - so this call
603 * must be after IOKit has been started because IOKit performs processor
604 * discovery.
605 */
606 cpu_userwindow_init(0);
607 #endif
608
609 /*
610 * Initialize the shared region module.
611 */
612 vm_shared_region_init();
613 vm_commpage_init();
614 vm_commpage_text_init();
615
616 #if CONFIG_MACF
617 kernel_bootstrap_log("mac_policy_initmach");
618 mac_policy_initmach();
619 #if CONFIG_VNGUARD
620 vnguard_policy_init();
621 #endif
622 #endif
623
624 #if CONFIG_DTRACE
625 dtrace_early_init();
626 sdt_early_init();
627 #endif
628
629
630 /*
631 * Get rid of segments used to bootstrap kext loading. This removes
632 * the KLD, PRELINK symtab, LINKEDIT, and symtab segments/load commands.
633 * Must be done prior to lockdown so that we can free (and possibly relocate)
634 * the static KVA mappings used for the jettisoned bootstrap segments.
635 */
636 OSKextRemoveKextBootstrap();
637 #if defined(__arm__) || defined(__arm64__)
638 #if CONFIG_KERNEL_INTEGRITY
639 machine_lockdown_preflight();
640 #endif
641 /*
642 * Finalize protections on statically mapped pages now that comm page mapping is established.
643 */
644 arm_vm_prot_finalize(PE_state.bootArgs);
645 #endif
646
647 /*
648 * Initialize the globals used for permuting kernel
649 * addresses that may be exported to userland as tokens
650 * using VM_KERNEL_ADDRPERM()/VM_KERNEL_ADDRPERM_EXTERNAL().
651 * Force the random number to be odd to avoid mapping a non-zero
652 * word-aligned address to zero via addition.
653 * Note: at this stage we can use the cryptographically secure PRNG
654 * rather than early_random().
655 */
656 read_random(&vm_kernel_addrperm, sizeof(vm_kernel_addrperm));
657 vm_kernel_addrperm |= 1;
658 read_random(&buf_kernel_addrperm, sizeof(buf_kernel_addrperm));
659 buf_kernel_addrperm |= 1;
660 read_random(&vm_kernel_addrperm_ext, sizeof(vm_kernel_addrperm_ext));
661 vm_kernel_addrperm_ext |= 1;
662 read_random(&vm_kernel_addrhash_salt, sizeof(vm_kernel_addrhash_salt));
663 read_random(&vm_kernel_addrhash_salt_ext, sizeof(vm_kernel_addrhash_salt_ext));
664
665 vm_set_restrictions();
666
667
668 #ifdef CONFIG_XNUPOST
669 kern_return_t result = kernel_list_tests();
670 result = kernel_do_post();
671 if (result != KERN_SUCCESS) {
672 panic("kernel_do_post: Tests failed with result = 0x%08x\n", result);
673 }
674 kernel_bootstrap_log("kernel_do_post - done");
675 #endif /* CONFIG_XNUPOST */
676
677
678 /*
679 * Start the user bootstrap.
680 */
681 #ifdef MACH_BSD
682 bsd_init();
683 #endif
684
685 #if defined (__x86_64__)
686 x86_64_protect_data_const();
687 #endif
688
689
690 /*
691 * Get rid of pages used for early boot tracing.
692 */
693 kdebug_free_early_buf();
694
695 serial_keyboard_init(); /* Start serial keyboard if wanted */
696
697 vm_page_init_local_q();
698
699 thread_bind(PROCESSOR_NULL);
700
701 /*
702 * Now that all CPUs are available to run threads, this is essentially
703 * a background thread. Take this opportunity to initialize and free
704 * any remaining vm_pages that were delayed earlier by pmap_startup().
705 */
706 vm_free_delayed_pages();
707
708 /*
709 * Become the pageout daemon.
710 */
711 vm_pageout();
712 /*NOTREACHED*/
713 }
714
715 /*
716 * slave_main:
717 *
718 * Load the first thread to start a processor.
719 * This path will also be used by the master processor
720 * after being offlined.
721 */
722 void
723 slave_main(void *machine_param)
724 {
725 processor_t processor = current_processor();
726 thread_t thread;
727
728 /*
729 * Use the idle processor thread if there
730 * is no dedicated start up thread.
731 */
732 if (processor->processor_offlined == true) {
733 /* Return to the saved processor_offline context */
734 assert(processor->startup_thread == THREAD_NULL);
735
736 thread = processor->idle_thread;
737 thread->parameter = machine_param;
738 } else if (processor->startup_thread) {
739 thread = processor->startup_thread;
740 processor->startup_thread = THREAD_NULL;
741 } else {
742 thread = processor->idle_thread;
743 thread->continuation = processor_start_thread;
744 thread->parameter = machine_param;
745 }
746
747 load_context(thread);
748 /*NOTREACHED*/
749 }
750
751 /*
752 * processor_start_thread:
753 *
754 * First thread to execute on a started processor.
755 *
756 * Called at splsched.
757 */
758 void
759 processor_start_thread(void *machine_param,
760 __unused wait_result_t result)
761 {
762 processor_t processor = current_processor();
763 thread_t self = current_thread();
764
765 slave_machine_init(machine_param);
766
767 /*
768 * If running the idle processor thread,
769 * reenter the idle loop, else terminate.
770 */
771 if (self == processor->idle_thread) {
772 thread_block(idle_thread);
773 }
774
775 thread_terminate(self);
776 /*NOTREACHED*/
777 }
778
779 /*
780 * load_context:
781 *
782 * Start the first thread on a processor.
783 * This may be the first thread ever run on a processor, or
784 * it could be a processor that was previously offlined.
785 */
786 static void __attribute__((noreturn))
787 load_context(
788 thread_t thread)
789 {
790 processor_t processor = current_processor();
791
792
793 #define load_context_kprintf(x...) /* kprintf("load_context: " x) */
794
795 load_context_kprintf("machine_set_current_thread\n");
796 machine_set_current_thread(thread);
797
798 load_context_kprintf("processor_up\n");
799
800 PMAP_ACTIVATE_KERNEL(processor->cpu_id);
801
802 /*
803 * Acquire a stack if none attached. The panic
804 * should never occur since the thread is expected
805 * to have reserved stack.
806 */
807 load_context_kprintf("thread %p, stack %lx, stackptr %lx\n", thread,
808 thread->kernel_stack, thread->machine.kstackptr);
809 if (!thread->kernel_stack) {
810 load_context_kprintf("stack_alloc_try\n");
811 if (!stack_alloc_try(thread)) {
812 panic("load_context");
813 }
814 }
815
816 /*
817 * The idle processor threads are not counted as
818 * running for load calculations.
819 */
820 if (!(thread->state & TH_IDLE)) {
821 SCHED(run_count_incr)(thread);
822 }
823
824 processor->active_thread = thread;
825 processor_state_update_explicit(processor, thread->sched_pri,
826 SFI_CLASS_KERNEL, PSET_SMP, thread_get_perfcontrol_class(thread), THREAD_URGENCY_NONE);
827 processor->current_is_bound = thread->bound_processor != PROCESSOR_NULL;
828 processor->current_is_NO_SMT = false;
829 processor->starting_pri = thread->sched_pri;
830 processor->deadline = UINT64_MAX;
831 thread->last_processor = processor;
832
833 processor_up(processor);
834
835 processor->last_dispatch = mach_absolute_time();
836 timer_start(&thread->system_timer, processor->last_dispatch);
837 PROCESSOR_DATA(processor, thread_timer) = PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
838
839 timer_start(&PROCESSOR_DATA(processor, system_state), processor->last_dispatch);
840 PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, system_state);
841
842
843 cpu_quiescent_counter_join(processor->last_dispatch);
844
845 PMAP_ACTIVATE_USER(thread, processor->cpu_id);
846
847 load_context_kprintf("machine_load_context\n");
848
849 #if __arm__ || __arm64__
850 #if __SMP__
851 /* TODO: Should this be ordered? */
852 thread->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU;
853 #endif /* __SMP__ */
854 #endif /* __arm__ || __arm64__ */
855
856 machine_load_context(thread);
857 /*NOTREACHED*/
858 }
859
860 void
861 scale_setup()
862 {
863 int scale = 0;
864 #if defined(__LP64__)
865 typeof(task_max) task_max_base = task_max;
866
867 /* Raise limits for servers with >= 16G */
868 if ((serverperfmode != 0) && ((uint64_t)sane_size >= (uint64_t)(16 * 1024 * 1024 * 1024ULL))) {
869 scale = (int)((uint64_t)sane_size / (uint64_t)(8 * 1024 * 1024 * 1024ULL));
870 /* limit to 128 G */
871 if (scale > 16) {
872 scale = 16;
873 }
874 task_max_base = 2500;
875 /* Raise limits for machines with >= 3GB */
876 } else if ((uint64_t)sane_size >= (uint64_t)(3 * 1024 * 1024 * 1024ULL)) {
877 if ((uint64_t)sane_size < (uint64_t)(8 * 1024 * 1024 * 1024ULL)) {
878 scale = 2;
879 } else {
880 /* limit to 64GB */
881 scale = MIN(16, (int)((uint64_t)sane_size / (uint64_t)(4 * 1024 * 1024 * 1024ULL)));
882 }
883 }
884
885 task_max = MAX(task_max, task_max_base * scale);
886
887 if (scale != 0) {
888 task_threadmax = task_max;
889 thread_max = task_max * 5;
890 }
891
892 #endif
893
894 bsd_scale_setup(scale);
895
896 ipc_space_max = SPACE_MAX;
897 ipc_port_max = PORT_MAX;
898 ipc_pset_max = SET_MAX;
899 semaphore_max = SEMAPHORE_MAX;
900 }