]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/startup.c
xnu-4570.61.1.tar.gz
[apple/xnu.git] / osfmk / kern / startup.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64
65 /*
66 * Mach kernel startup.
67 */
68
69 #include <debug.h>
70 #include <xpr_debug.h>
71 #include <mach_kdp.h>
72
73 #include <mach/boolean.h>
74 #include <mach/machine.h>
75 #include <mach/thread_act.h>
76 #include <mach/task_special_ports.h>
77 #include <mach/vm_param.h>
78 #include <ipc/ipc_init.h>
79 #include <kern/assert.h>
80 #include <kern/mach_param.h>
81 #include <kern/misc_protos.h>
82 #include <kern/clock.h>
83 #include <kern/coalition.h>
84 #include <kern/cpu_number.h>
85 #include <kern/ledger.h>
86 #include <kern/machine.h>
87 #include <kern/processor.h>
88 #include <kern/sched_prim.h>
89 #if CONFIG_SCHED_SFI
90 #include <kern/sfi.h>
91 #endif
92 #include <kern/startup.h>
93 #include <kern/task.h>
94 #include <kern/thread.h>
95 #include <kern/timer.h>
96 #if CONFIG_TELEMETRY
97 #include <kern/telemetry.h>
98 #endif
99 #include <kern/xpr.h>
100 #include <kern/zalloc.h>
101 #include <kern/locks.h>
102 #include <kern/debug.h>
103 #include <corpses/task_corpse.h>
104 #include <prng/random.h>
105 #include <console/serial_protos.h>
106 #include <vm/vm_kern.h>
107 #include <vm/vm_init.h>
108 #include <vm/vm_map.h>
109 #include <vm/vm_object.h>
110 #include <vm/vm_page.h>
111 #include <vm/vm_pageout.h>
112 #include <vm/vm_shared_region.h>
113 #include <machine/pmap.h>
114 #include <machine/commpage.h>
115 #include <libkern/version.h>
116 #include <sys/codesign.h>
117 #include <sys/kdebug.h>
118 #include <sys/random.h>
119 #include <sys/ktrace.h>
120
121 #include <kern/ltable.h>
122 #include <kern/waitq.h>
123 #include <ipc/ipc_voucher.h>
124 #include <voucher/ipc_pthread_priority_internal.h>
125 #include <mach/host_info.h>
126
127
128 #if CONFIG_ATM
129 #include <atm/atm_internal.h>
130 #endif
131
132 #if CONFIG_CSR
133 #include <sys/csr.h>
134 #endif
135
136 #include <bank/bank_internal.h>
137
138 #if ALTERNATE_DEBUGGER
139 #include <arm64/alternate_debugger.h>
140 #endif
141
142 #if MACH_KDP
143 #include <kdp/kdp.h>
144 #endif
145
146 #if CONFIG_MACF
147 #include <security/mac_mach_internal.h>
148 #if CONFIG_VNGUARD
149 extern void vnguard_policy_init(void);
150 #endif
151 #endif
152
153 #if KPC
154 #include <kern/kpc.h>
155 #endif
156
157 #if HYPERVISOR
158 #include <kern/hv_support.h>
159 #endif
160
161 #include <san/kasan.h>
162
163 #if defined(__arm__) || defined(__arm64__)
164 #include <arm/misc_protos.h> // for arm_vm_prot_finalize
165 #endif
166
167 #include <i386/pmCPU.h>
168 static void kernel_bootstrap_thread(void);
169
170 static void load_context(
171 thread_t thread);
172 #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
173 extern void cpu_userwindow_init(int);
174 extern void cpu_physwindow_init(int);
175 #endif
176
177 #if CONFIG_ECC_LOGGING
178 #include <kern/ecc.h>
179 #endif
180
181 #if (defined(__i386__) || defined(__x86_64__)) && CONFIG_VMX
182 #include <i386/vmx/vmx_cpu.h>
183 #endif
184
185 // libkern/OSKextLib.cpp
186 extern void OSKextRemoveKextBootstrap(void);
187
188 void scale_setup(void);
189 extern void bsd_scale_setup(int);
190 extern unsigned int semaphore_max;
191 extern void stackshot_init(void);
192 extern void ktrace_init(void);
193 extern void oslog_init(void);
194
195 /*
196 * Running in virtual memory, on the interrupt stack.
197 */
198
199 extern int serverperfmode;
200
201 /* size of kernel trace buffer, disabled by default */
202 unsigned int new_nkdbufs = 0;
203 unsigned int wake_nkdbufs = 0;
204 unsigned int write_trace_on_panic = 0;
205 static char trace_typefilter[64] = { 0 };
206 unsigned int trace_wrap = 0;
207 boolean_t trace_serial = FALSE;
208 boolean_t early_boot_complete = FALSE;
209
210 /* mach leak logging */
211 int log_leaks = 0;
212
213 static inline void
214 kernel_bootstrap_log(const char *message)
215 {
216 // kprintf("kernel_bootstrap: %s\n", message);
217 kernel_debug_string_early(message);
218 }
219
220 static inline void
221 kernel_bootstrap_thread_log(const char *message)
222 {
223 // kprintf("kernel_bootstrap_thread: %s\n", message);
224 kernel_debug_string_early(message);
225 }
226
227 void
228 kernel_early_bootstrap(void)
229 {
230 /* serverperfmode is needed by timer setup */
231 if (PE_parse_boot_argn("serverperfmode", &serverperfmode, sizeof (serverperfmode))) {
232 serverperfmode = 1;
233 }
234
235 lck_mod_init();
236
237 /*
238 * Initialize the timer callout world
239 */
240 timer_call_init();
241
242 #if CONFIG_SCHED_SFI
243 /*
244 * Configure SFI classes
245 */
246 sfi_early_init();
247 #endif
248 }
249
250 extern boolean_t IORamDiskBSDRoot(void);
251 extern kern_return_t cpm_preallocate_early(void);
252
253 void
254 kernel_bootstrap(void)
255 {
256 kern_return_t result;
257 thread_t thread;
258 char namep[16];
259
260 printf("%s\n", version); /* log kernel version */
261
262 if (PE_parse_boot_argn("-l", namep, sizeof (namep))) /* leaks logging */
263 log_leaks = 1;
264
265 PE_parse_boot_argn("trace", &new_nkdbufs, sizeof (new_nkdbufs));
266 PE_parse_boot_argn("trace_wake", &wake_nkdbufs, sizeof (wake_nkdbufs));
267 PE_parse_boot_argn("trace_panic", &write_trace_on_panic, sizeof(write_trace_on_panic));
268 PE_parse_boot_arg_str("trace_typefilter", trace_typefilter, sizeof(trace_typefilter));
269 PE_parse_boot_argn("trace_wrap", &trace_wrap, sizeof(trace_wrap));
270
271 scale_setup();
272
273 kernel_bootstrap_log("vm_mem_bootstrap");
274 vm_mem_bootstrap();
275
276 kernel_bootstrap_log("cs_init");
277 cs_init();
278
279 kernel_bootstrap_log("vm_mem_init");
280 vm_mem_init();
281
282 machine_info.memory_size = (uint32_t)mem_size;
283 machine_info.max_mem = max_mem;
284 machine_info.major_version = version_major;
285 machine_info.minor_version = version_minor;
286
287 oslog_init();
288
289 #if KASAN
290 kernel_bootstrap_log("kasan_late_init");
291 kasan_late_init();
292 #endif
293
294 #if CONFIG_TELEMETRY
295 kernel_bootstrap_log("telemetry_init");
296 telemetry_init();
297 #endif
298
299 #if CONFIG_CSR
300 kernel_bootstrap_log("csr_init");
301 csr_init();
302 #endif
303
304 if (PE_i_can_has_debugger(NULL) &&
305 PE_parse_boot_argn("-show_pointers", &namep, sizeof (namep))) {
306 doprnt_hide_pointers = FALSE;
307 }
308
309 kernel_bootstrap_log("console_init");
310 console_init();
311
312 kernel_bootstrap_log("stackshot_init");
313 stackshot_init();
314
315 kernel_bootstrap_log("sched_init");
316 sched_init();
317
318 kernel_bootstrap_log("ltable_bootstrap");
319 ltable_bootstrap();
320
321 kernel_bootstrap_log("waitq_bootstrap");
322 waitq_bootstrap();
323
324 kernel_bootstrap_log("ipc_bootstrap");
325 ipc_bootstrap();
326
327 #if CONFIG_MACF
328 kernel_bootstrap_log("mac_policy_init");
329 mac_policy_init();
330 #endif
331
332 kernel_bootstrap_log("ipc_init");
333 ipc_init();
334
335 /*
336 * As soon as the virtual memory system is up, we record
337 * that this CPU is using the kernel pmap.
338 */
339 kernel_bootstrap_log("PMAP_ACTIVATE_KERNEL");
340 PMAP_ACTIVATE_KERNEL(master_cpu);
341
342 kernel_bootstrap_log("mapping_free_prime");
343 mapping_free_prime(); /* Load up with temporary mapping blocks */
344
345 kernel_bootstrap_log("machine_init");
346 machine_init();
347
348 kernel_bootstrap_log("clock_init");
349 clock_init();
350
351 ledger_init();
352
353 /*
354 * Initialize the IPC, task, and thread subsystems.
355 */
356
357 #if CONFIG_COALITIONS
358 kernel_bootstrap_log("coalitions_init");
359 coalitions_init();
360 #endif
361
362 kernel_bootstrap_log("task_init");
363 task_init();
364
365 kernel_bootstrap_log("thread_init");
366 thread_init();
367
368 #if CONFIG_ATM
369 /* Initialize the Activity Trace Resource Manager. */
370 kernel_bootstrap_log("atm_init");
371 atm_init();
372 #endif
373 kernel_bootstrap_log("mach_init_activity_id");
374 mach_init_activity_id();
375
376 /* Initialize the BANK Manager. */
377 kernel_bootstrap_log("bank_init");
378 bank_init();
379
380 kernel_bootstrap_log("ipc_pthread_priority_init");
381 ipc_pthread_priority_init();
382
383 /* initialize the corpse config based on boot-args */
384 corpses_init();
385
386 /* initialize host_statistics */
387 host_statistics_init();
388
389 /*
390 * Create a kernel thread to execute the kernel bootstrap.
391 */
392 kernel_bootstrap_log("kernel_thread_create");
393 result = kernel_thread_create((thread_continue_t)kernel_bootstrap_thread, NULL, MAXPRI_KERNEL, &thread);
394
395 if (result != KERN_SUCCESS) panic("kernel_bootstrap: result = %08X\n", result);
396
397 thread->state = TH_RUN;
398 thread->last_made_runnable_time = mach_absolute_time();
399 thread_deallocate(thread);
400
401 kernel_bootstrap_log("load_context - done");
402 load_context(thread);
403 /*NOTREACHED*/
404 }
405
406 int kth_started = 0;
407
408 vm_offset_t vm_kernel_addrperm;
409 vm_offset_t buf_kernel_addrperm;
410 vm_offset_t vm_kernel_addrperm_ext;
411 uint64_t vm_kernel_addrhash_salt;
412 uint64_t vm_kernel_addrhash_salt_ext;
413
414 /*
415 * Now running in a thread. Kick off other services,
416 * invoke user bootstrap, enter pageout loop.
417 */
418 static void
419 kernel_bootstrap_thread(void)
420 {
421 processor_t processor = current_processor();
422
423 #define kernel_bootstrap_thread_kprintf(x...) /* kprintf("kernel_bootstrap_thread: " x) */
424 kernel_bootstrap_thread_log("idle_thread_create");
425 /*
426 * Create the idle processor thread.
427 */
428 idle_thread_create(processor);
429
430 /*
431 * N.B. Do not stick anything else
432 * before this point.
433 *
434 * Start up the scheduler services.
435 */
436 kernel_bootstrap_thread_log("sched_startup");
437 sched_startup();
438
439 /*
440 * Thread lifecycle maintenance (teardown, stack allocation)
441 */
442 kernel_bootstrap_thread_log("thread_daemon_init");
443 thread_daemon_init();
444
445 /* Create kernel map entry reserve */
446 vm_kernel_reserved_entry_init();
447
448 /*
449 * Thread callout service.
450 */
451 kernel_bootstrap_thread_log("thread_call_initialize");
452 thread_call_initialize();
453
454 /*
455 * Remain on current processor as
456 * additional processors come online.
457 */
458 kernel_bootstrap_thread_log("thread_bind");
459 thread_bind(processor);
460
461 #if __arm64__
462 if (IORamDiskBSDRoot()) {
463 cpm_preallocate_early();
464 }
465 #endif /* __arm64__ */
466
467 /*
468 * Initialize ipc thread call support.
469 */
470 kernel_bootstrap_thread_log("ipc_thread_call_init");
471 ipc_thread_call_init();
472
473 /*
474 * Kick off memory mapping adjustments.
475 */
476 kernel_bootstrap_thread_log("mapping_adjust");
477 mapping_adjust();
478
479 /*
480 * Create the clock service.
481 */
482 kernel_bootstrap_thread_log("clock_service_create");
483 clock_service_create();
484
485 /*
486 * Create the device service.
487 */
488 device_service_create();
489
490 kth_started = 1;
491
492 #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
493 /*
494 * Create and initialize the physical copy window for processor 0
495 * This is required before starting kicking off IOKit.
496 */
497 cpu_physwindow_init(0);
498 #endif
499
500
501
502 #if MACH_KDP
503 kernel_bootstrap_log("kdp_init");
504 kdp_init();
505 #endif
506
507 #if ALTERNATE_DEBUGGER
508 alternate_debugger_init();
509 #endif
510
511 #if KPC
512 kpc_init();
513 #endif
514
515 #if CONFIG_ECC_LOGGING
516 ecc_log_init();
517 #endif
518
519 #if HYPERVISOR
520 hv_support_init();
521 #endif
522
523 #if CONFIG_TELEMETRY
524 kernel_bootstrap_log("bootprofile_init");
525 bootprofile_init();
526 #endif
527
528 #if (defined(__i386__) || defined(__x86_64__)) && CONFIG_VMX
529 vmx_init();
530 #endif
531
532 kernel_bootstrap_thread_log("ktrace_init");
533 ktrace_init();
534
535 kdebug_init(new_nkdbufs, trace_typefilter, trace_wrap);
536
537 kernel_bootstrap_log("prng_init");
538 prng_cpu_init(master_cpu);
539
540 #ifdef MACH_BSD
541 kernel_bootstrap_log("bsd_early_init");
542 bsd_early_init();
543 #endif
544
545 #if defined(__arm64__)
546 ml_lockdown_init();
547 #endif
548
549 #ifdef IOKIT
550 kernel_bootstrap_log("PE_init_iokit");
551 PE_init_iokit();
552 #endif
553
554 assert(ml_get_interrupts_enabled() == FALSE);
555
556 /*
557 * Past this point, kernel subsystems that expect to operate with
558 * interrupts or preemption enabled may begin enforcement.
559 */
560 early_boot_complete = TRUE;
561
562 #if INTERRUPT_MASKED_DEBUG
563 // Reset interrupts masked timeout before we enable interrupts
564 ml_spin_debug_clear_self();
565 #endif
566 (void) spllo(); /* Allow interruptions */
567
568 #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
569 /*
570 * Create and initialize the copy window for processor 0
571 * This also allocates window space for all other processors.
572 * However, this is dependent on the number of processors - so this call
573 * must be after IOKit has been started because IOKit performs processor
574 * discovery.
575 */
576 cpu_userwindow_init(0);
577 #endif
578
579 /*
580 * Initialize the shared region module.
581 */
582 vm_shared_region_init();
583 vm_commpage_init();
584 vm_commpage_text_init();
585
586 #if CONFIG_MACF
587 kernel_bootstrap_log("mac_policy_initmach");
588 mac_policy_initmach();
589 #if CONFIG_VNGUARD
590 vnguard_policy_init();
591 #endif
592 #endif
593
594 #if defined(__arm__) || defined(__arm64__)
595 #if CONFIG_KERNEL_INTEGRITY
596 machine_lockdown_preflight();
597 #endif
598 /*
599 * Finalize protections on statically mapped pages now that comm page mapping is established.
600 */
601 arm_vm_prot_finalize(PE_state.bootArgs);
602 #endif
603
604 #if CONFIG_SCHED_SFI
605 kernel_bootstrap_log("sfi_init");
606 sfi_init();
607 #endif
608
609 /*
610 * Initialize the globals used for permuting kernel
611 * addresses that may be exported to userland as tokens
612 * using VM_KERNEL_ADDRPERM()/VM_KERNEL_ADDRPERM_EXTERNAL().
613 * Force the random number to be odd to avoid mapping a non-zero
614 * word-aligned address to zero via addition.
615 * Note: at this stage we can use the cryptographically secure PRNG
616 * rather than early_random().
617 */
618 read_random(&vm_kernel_addrperm, sizeof(vm_kernel_addrperm));
619 vm_kernel_addrperm |= 1;
620 read_random(&buf_kernel_addrperm, sizeof(buf_kernel_addrperm));
621 buf_kernel_addrperm |= 1;
622 read_random(&vm_kernel_addrperm_ext, sizeof(vm_kernel_addrperm_ext));
623 vm_kernel_addrperm_ext |= 1;
624 read_random(&vm_kernel_addrhash_salt, sizeof(vm_kernel_addrhash_salt));
625 read_random(&vm_kernel_addrhash_salt_ext, sizeof(vm_kernel_addrhash_salt_ext));
626
627 vm_set_restrictions();
628
629
630
631
632 /*
633 * Start the user bootstrap.
634 */
635 #ifdef MACH_BSD
636 bsd_init();
637 #endif
638
639 /*
640 * Get rid of segments used to bootstrap kext loading. This removes
641 * the KLD, PRELINK symtab, LINKEDIT, and symtab segments/load commands.
642 */
643 OSKextRemoveKextBootstrap();
644
645 /*
646 * Get rid of pages used for early boot tracing.
647 */
648 kdebug_free_early_buf();
649
650 serial_keyboard_init(); /* Start serial keyboard if wanted */
651
652 vm_page_init_local_q();
653
654 thread_bind(PROCESSOR_NULL);
655
656 /*
657 * Become the pageout daemon.
658 */
659 vm_pageout();
660 /*NOTREACHED*/
661 }
662
663 /*
664 * slave_main:
665 *
666 * Load the first thread to start a processor.
667 */
668 void
669 slave_main(void *machine_param)
670 {
671 processor_t processor = current_processor();
672 thread_t thread;
673
674 /*
675 * Use the idle processor thread if there
676 * is no dedicated start up thread.
677 */
678 if (processor->next_thread == THREAD_NULL) {
679 thread = processor->idle_thread;
680 thread->continuation = (thread_continue_t)processor_start_thread;
681 thread->parameter = machine_param;
682 }
683 else {
684 thread = processor->next_thread;
685 processor->next_thread = THREAD_NULL;
686 }
687
688 load_context(thread);
689 /*NOTREACHED*/
690 }
691
692 /*
693 * processor_start_thread:
694 *
695 * First thread to execute on a started processor.
696 *
697 * Called at splsched.
698 */
699 void
700 processor_start_thread(void *machine_param)
701 {
702 processor_t processor = current_processor();
703 thread_t self = current_thread();
704
705 slave_machine_init(machine_param);
706
707 /*
708 * If running the idle processor thread,
709 * reenter the idle loop, else terminate.
710 */
711 if (self == processor->idle_thread)
712 thread_block((thread_continue_t)idle_thread);
713
714 thread_terminate(self);
715 /*NOTREACHED*/
716 }
717
718 /*
719 * load_context:
720 *
721 * Start the first thread on a processor.
722 */
723 static void __attribute__((noreturn))
724 load_context(
725 thread_t thread)
726 {
727 processor_t processor = current_processor();
728
729
730 #define load_context_kprintf(x...) /* kprintf("load_context: " x) */
731
732 load_context_kprintf("machine_set_current_thread\n");
733 machine_set_current_thread(thread);
734
735 load_context_kprintf("processor_up\n");
736 processor_up(processor);
737
738 PMAP_ACTIVATE_KERNEL(processor->cpu_id);
739
740 /*
741 * Acquire a stack if none attached. The panic
742 * should never occur since the thread is expected
743 * to have reserved stack.
744 */
745 load_context_kprintf("thread %p, stack %lx, stackptr %lx\n", thread,
746 thread->kernel_stack, thread->machine.kstackptr);
747 if (!thread->kernel_stack) {
748 load_context_kprintf("stack_alloc_try\n");
749 if (!stack_alloc_try(thread))
750 panic("load_context");
751 }
752
753 /*
754 * The idle processor threads are not counted as
755 * running for load calculations.
756 */
757 if (!(thread->state & TH_IDLE))
758 sched_run_incr(thread);
759
760 processor->active_thread = thread;
761 processor_state_update_explicit(processor, thread->sched_pri,
762 SFI_CLASS_KERNEL, PSET_SMP, thread_get_perfcontrol_class(thread));
763 processor->starting_pri = thread->sched_pri;
764 processor->deadline = UINT64_MAX;
765 thread->last_processor = processor;
766
767 processor->last_dispatch = mach_absolute_time();
768 timer_start(&thread->system_timer, processor->last_dispatch);
769 PROCESSOR_DATA(processor, thread_timer) = PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
770
771 timer_start(&PROCESSOR_DATA(processor, system_state), processor->last_dispatch);
772 PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, system_state);
773
774
775 PMAP_ACTIVATE_USER(thread, processor->cpu_id);
776
777 load_context_kprintf("machine_load_context\n");
778 machine_load_context(thread);
779 /*NOTREACHED*/
780 }
781
782 void
783 scale_setup()
784 {
785 int scale = 0;
786 #if defined(__LP64__)
787 typeof(task_max) task_max_base = task_max;
788
789 /* Raise limits for servers with >= 16G */
790 if ((serverperfmode != 0) && ((uint64_t)sane_size >= (uint64_t)(16 * 1024 * 1024 *1024ULL))) {
791 scale = (int)((uint64_t)sane_size / (uint64_t)(8 * 1024 * 1024 *1024ULL));
792 /* limit to 128 G */
793 if (scale > 16)
794 scale = 16;
795 task_max_base = 2500;
796 /* Raise limits for machines with >= 3GB */
797 } else if ((uint64_t)sane_size >= (uint64_t)(3 * 1024 * 1024 *1024ULL)) {
798 if ((uint64_t)sane_size < (uint64_t)(8 * 1024 * 1024 *1024ULL)) {
799 scale = 2;
800 } else {
801 /* limit to 64GB */
802 scale = MIN(16, (int)((uint64_t)sane_size / (uint64_t)(4 * 1024 * 1024 *1024ULL)));
803 }
804 }
805
806 task_max = MAX(task_max, task_max_base * scale);
807
808 if (scale != 0) {
809 task_threadmax = task_max;
810 thread_max = task_max * 5;
811 }
812
813 #endif
814
815 bsd_scale_setup(scale);
816
817 ipc_space_max = SPACE_MAX;
818 ipc_port_max = PORT_MAX;
819 ipc_pset_max = SET_MAX;
820 semaphore_max = SEMAPHORE_MAX;
821 }
822