+__startup_func
+extern void
+kernel_startup_tunable_init(const struct startup_tunable_spec *spec)
+{
+ if (PE_parse_boot_argn(spec->name, spec->var_addr, spec->var_len)) {
+ if (spec->var_is_bool) {
+ /* make sure bool's are valued in {0, 1} */
+ *(bool *)spec->var_addr = *(uint8_t *)spec->var_addr;
+ }
+ }
+}
+
+static void
+kernel_startup_log(startup_subsystem_id_t subsystem)
+{
+ static const char *names[] = {
+ [STARTUP_SUB_TUNABLES] = "tunables",
+ [STARTUP_SUB_LOCKS_EARLY] = "locks_early",
+ [STARTUP_SUB_KPRINTF] = "kprintf",
+
+ [STARTUP_SUB_PMAP_STEAL] = "pmap_steal",
+ [STARTUP_SUB_VM_KERNEL] = "vm_kernel",
+ [STARTUP_SUB_KMEM] = "kmem",
+ [STARTUP_SUB_KMEM_ALLOC] = "kmem_alloc",
+ [STARTUP_SUB_ZALLOC] = "zalloc",
+ [STARTUP_SUB_PERCPU] = "percpu",
+ [STARTUP_SUB_LOCKS] = "locks",
+
+ [STARTUP_SUB_CODESIGNING] = "codesigning",
+ [STARTUP_SUB_OSLOG] = "oslog",
+ [STARTUP_SUB_MACH_IPC] = "mach_ipc",
+ [STARTUP_SUB_EARLY_BOOT] = "early_boot",
+
+ /* LOCKDOWN is special and its value won't fit here. */
+ };
+ static startup_subsystem_id_t logged = STARTUP_SUB_NONE;
+
+ if (subsystem <= logged) {
+ return;
+ }
+
+ if (subsystem < sizeof(names) / sizeof(names[0]) && names[subsystem]) {
+ kernel_bootstrap_log(names[subsystem]);
+ }
+ logged = subsystem;
+}
+
+__startup_func
+void
+kernel_startup_initialize_upto(startup_subsystem_id_t upto)
+{
+ struct startup_entry *cur = startup_entry_cur;
+
+ assert(startup_phase < upto);
+
+ while (cur < startup_entries_end && cur->subsystem <= upto) {
+ if ((startup_debug & STARTUP_DEBUG_VERBOSE) &&
+ startup_phase >= STARTUP_SUB_KPRINTF) {
+ kprintf("%s[%d, rank %d]: %p(%p)\n", __func__,
+ cur->subsystem, cur->rank, cur->func, cur->arg);
+ }
+ startup_phase = cur->subsystem - 1;
+ kernel_startup_log(cur->subsystem);
+ cur->func(cur->arg);
+ startup_entry_cur = ++cur;
+ }
+ kernel_startup_log(upto);
+
+ if ((startup_debug & STARTUP_DEBUG_VERBOSE) &&
+ upto >= STARTUP_SUB_KPRINTF) {
+ kprintf("%s: reached phase %d\n", __func__, upto);
+ }
+ startup_phase = upto;
+}