+# Define KERNEL_BASE_OFFSET so known at compile time:
+CFLAGS_X86_64 += -DKERNEL_BASE_OFFSET=$(KERNEL_BASE_OFFSET)
+CFLAGS_X86_64H += -DKERNEL_BASE_OFFSET=$(KERNEL_BASE_OFFSET)
+
+LDFLAGS_KERNEL_DEBUGX86_64 = $(LDFLAGS_KERNEL_RELEASEX86_64)
+LDFLAGS_KERNEL_DEVELOPMENTX86_64 = $(LDFLAGS_KERNEL_RELEASEX86_64)
+LDFLAGS_KERNEL_KASANX86_64 = $(LDFLAGS_KERNEL_DEVELOPMENTX86_64) \
+ -Wl,-sectalign,__HIB,__asan_globals,0x1000 \
+ -Wl,-sectalign,__HIB,__asan_liveness,0x1000 \
+ -Wl,-sectalign,__HIB,__mod_term_func,0x1000 \
+ -Wl,-rename_section,__HIB,__mod_init_func,__NULL,__mod_init_func \
+ -Wl,-rename_section,__HIB,__eh_frame,__NULL,__eh_frame
+LDFLAGS_KERNEL_PROFILEX86_64 = $(LDFLAGS_KERNEL_RELEASEX86_64)
+
+LDFLAGS_KERNEL_RELEASEX86_64H = $(LDFLAGS_KERNEL_RELEASEX86_64)
+LDFLAGS_KERNEL_DEBUGX86_64H = $(LDFLAGS_KERNEL_RELEASEX86_64H)
+LDFLAGS_KERNEL_DEVELOPMENTX86_64H = $(LDFLAGS_KERNEL_RELEASEX86_64H)
+LDFLAGS_KERNEL_KASANX86_64H = $(LDFLAGS_KERNEL_KASANX86_64)
+LDFLAGS_KERNEL_PROFILEX86_64H = $(LDFLAGS_KERNEL_RELEASEX86_64H)
+
+# We preload ___udivmoddi4 in order to work around an issue with building
+# LTO on armv7.
+LDFLAGS_KERNEL_GENARM = \
+ -Wl,-pie \
+ -Wl,-static \
+ -Wl,-image_base,0x80001000 \
+ -Wl,-sectalign,__DATA,__const,0x1000 \
+ -Wl,-u,___udivmoddi4
+
+LDFLAGS_KERNEL_RELEASEARM = \
+ $(LDFLAGS_KERNEL_GENARM) \
+ $(LDFLAGS_KERNEL_STRIP_LTO)
+
+LDFLAGS_KERNEL_EXPORTS_RELEASEARM = \
+ -Wl,-exported_symbols_list,$(TARGET)/all-kpi.exp
+
+LDFLAGS_KERNEL_DEVELOPMENTARM = \
+ $(LDFLAGS_KERNEL_GENARM) \
+ $(LDFLAGS_NOSTRIP_FLAG)
+
+LDFLAGS_KERNEL_EXPORTS_DEVELOPMENTARM =
+
+LDFLAGS_KERNEL_DEBUGARM = $(LDFLAGS_KERNEL_DEVELOPMENTARM)
+LDFLAGS_KERNEL_EXPORTS_DEBUGARM = $(LDFLAGS_KERNEL_EXPORTS_DEVELOPMENTARM)
+
+# Offset image base by page to have iBoot load kernel TEXT correctly.
+# First page is used for various purposes : sleep token, reset vector.
+# We also need a 32MB offset, as this is the minimum block mapping size
+# for a 16KB page runtime, and we wish to use the first virtual block
+# to map the low globals page. We also need another 4MB to account for
+# the address space reserved by L4 (because the reservation is not a
+# multiple of the block size in alignment/length, we will implictly map
+# it with our block mapping, and we therefore must reflect that the
+# first 4MB of the block mapping for xnu do not belong to xnu).
+# For the moment, kaliber has a unique memory layout (monitor at the top
+# of memory). Support this by breaking 16KB on other platforms and
+# mandating 32MB alignment. Image base (i.e. __TEXT) must be 16KB
+# aligned since ld64 will link with 16KB alignment for ARM64.
+#
+# We currently offset by an additional 32MB in order to reclaim memory.
+# We need a dedicated virtual page for the low globals. Our bootloader
+# may have a significant chunk of memory (up to an L2 entry in size)
+# that lies before the kernel. The addition 32MB of virtual padding
+# ensures that we have enough virtual address space to map all of that
+# memory as part of the V-to-P mapping.
+# 23355738 - put __PRELINK_TEXT first. We reserve enough room
+# for 0x0000000003000000 = 48MB of kexts
+#
+# 0xfffffff000000000 (32MB range for low globals)
+# 0xfffffff002000000 (32MB range to allow for large page physical slide)
+# 0xfffffff004000000 (16KB range to reserve the first available page)
+# 0xfffffff004004000 (48MB range for kexts)
+# 0xfffffff007004000 (Start of xnu proper).
+LDFLAGS_KERNEL_GENARM64 = \
+ -Wl,-pie \
+ -Wl,-static \
+ -Wl,-segaddr,__PRELINK_TEXT,0xfffffff004004000 \
+ -Wl,-image_base,0xfffffff007004000 \
+ -Wl,-sectalign,__DATA,__const,0x4000 \
+ -Wl,-rename_section,__DATA,__mod_init_func,__DATA_CONST,__mod_init_func \
+ -Wl,-rename_section,__DATA,__mod_term_func,__DATA_CONST,__mod_term_func \
+ -Wl,-rename_section,__DATA,__auth_ptr,__DATA_CONST,__auth_ptr \
+ -Wl,-rename_section,__DATA,__auth_got,__DATA_CONST,__auth_got \
+ -Wl,-rename_section,__DATA,__const,__DATA_CONST,__const \
+ -Wl,-rename_section,__TEXT,__text,__TEXT_EXEC,__text \
+ -Wl,-rename_section,__TEXT,__stubs,__TEXT_EXEC,__stubs \
+ -Wl,-rename_section,__TEXT,initcode,__TEXT_EXEC,initcode \
+ -Wl,-sectcreate,"__PLK_TEXT_EXEC",__text,/dev/null \
+ -Wl,-sectcreate,__PRELINK_DATA,__data,/dev/null \
+ -Wl,-sectcreate,"__PLK_DATA_CONST",__data,/dev/null \
+ -Wl,-sectcreate,"__PLK_LLVM_COV",__llvm_covmap,/dev/null \
+ -Wl,-sectcreate,"__PLK_LINKEDIT",__data,/dev/null
+
+
+LDFLAGS_KERNEL_SEGARM64 ?= \
+ -Wl,-segment_order,__TEXT:__DATA_CONST:__LINKEDIT:__TEXT_EXEC:__LAST:__KLD:__DATA:__BOOTDATA
+
+LDFLAGS_KERNEL_RELEASEARM64 = \
+ $(LDFLAGS_KERNEL_GENARM64) \
+ $(LDFLAGS_KERNEL_SEGARM64) \
+ $(LDFLAGS_KERNEL_STRIP_LTO)
+
+LDFLAGS_KERNEL_EXPORTS_RELEASEARM64 = \
+ -Wl,-exported_symbols_list,$(TARGET)/all-kpi.exp
+
+LDFLAGS_KERNEL_DEVELOPMENTARM64 = \
+ $(LDFLAGS_KERNEL_GENARM64) \
+ $(LDFLAGS_KERNEL_SEGARM64) \
+ $(LDFLAGS_NOSTRIP_FLAG)
+
+LDFLAGS_KERNEL_EXPORTS_DEVELOPMENTARM64 =
+
+LDFLAGS_KERNEL_KASANARM64 = $(LDFLAGS_KERNEL_DEVELOPMENTARM64)
+LDFLAGS_KERNEL_DEBUGARM64 = $(LDFLAGS_KERNEL_DEVELOPMENTARM64)
+
+LDFLAGS_KERNEL_EXPORTS_KASANARM64 = $(LDFLAGS_KERNEL_EXPORTS_DEVELOPMENTARM64)
+LDFLAGS_KERNEL_EXPORTS_DEBUGARM64 = $(LDFLAGS_KERNEL_EXPORTS_DEVELOPMENTARM64)
+
+LDFLAGS_KERNEL = $(LDFLAGS_KERNEL_GEN) \
+ $(LDFLAGS_KERNEL_SDK) \
+ $($(addsuffix $(CURRENT_ARCH_CONFIG),ARCH_FLAGS_)) \
+ $($(addsuffix $(CURRENT_ARCH_CONFIG),LDFLAGS_KERNEL_)) \
+ $($(addsuffix $(CURRENT_KERNEL_CONFIG),LDFLAGS_KERNEL_)) \
+ $($(addsuffix $(CURRENT_ARCH_CONFIG), $(addsuffix $(CURRENT_KERNEL_CONFIG),LDFLAGS_KERNEL_))) \
+ $(DEPLOYMENT_TARGET_FLAGS)
+
+
+LDFLAGS_KERNEL_EXPORTS = \
+ $($(addsuffix $(CURRENT_ARCH_CONFIG), $(addsuffix $(CURRENT_KERNEL_CONFIG),LDFLAGS_KERNEL_EXPORTS_))) \
+ -Wl,-alias_list,$(TARGET)/all-alias.exp