#include <kern/host.h>
#include <kern/kext_alloc.h>
#include <firehose/tracepoint_private.h>
+#include <firehose/chunk_private.h>
#include <os/firehose_buffer_private.h>
#include <vm/vm_kern.h>
#include <kextd/kextd_mach.h>
#include <IOKit/IOStatisticsPrivate.h>
#include <IOKit/IOBSD.h>
+#include <san/kasan.h>
+
#if PRAGMA_MARK
#pragma mark External & Internal Function Protos
#endif
#define VM_MAPPED_KEXTS 1
#define KASLR_KEXT_DEBUG 0
#define KASLR_IOREG_DEBUG 0
+#elif __arm__ || __arm64__
+#define VM_MAPPED_KEXTS 0
+#define KASLR_KEXT_DEBUG 0
#else
#error Unsupported architecture
#endif
/* version */ "0", // filled in in OSKext::initialize()
/* reference_count */ -1, // never adjusted; kernel never unloads
/* reference_list */ NULL,
- /* address */ NULL,
+ /* address */ 0,
/* size */ 0, // filled in in OSKext::initialize()
/* hdr_size */ 0,
/* start */ 0,
}
PE_parse_boot_argn("keepsyms", &sKeepSymbols, sizeof(sKeepSymbols));
+#if KASAN_DYNAMIC_BLACKLIST
+ /* needed for function lookup */
+ sKeepSymbols = true;
+#endif
/* Set up an OSKext instance to represent the kernel itself.
*/
kernel_segment_command_t * seg_to_remove = NULL;
+#if __arm__ || __arm64__
+ const char * dt_segment_name = NULL;
+ void * segment_paddress = NULL;
+ int segment_size = 0;
+#endif
/* This must be the very first thing done by this function.
*/
OSRuntimeUnloadCPPForSegment(seg_to_remove);
}
-#if __i386__ || __x86_64__
+#if __arm__ || __arm64__
+#if !(defined(KERNEL_INTEGRITY_KTRR))
+ /* Free the memory that was set up by bootx.
+ */
+ dt_segment_name = "Kernel-__KLD";
+ if (0 == IODTGetLoaderInfo(dt_segment_name, &segment_paddress, &segment_size)) {
+ /* We cannot free this with KTRR enabled, as we cannot
+ * update the permissions on the KLD range this late
+ * in the boot process.
+ */
+ IODTFreeLoaderInfo(dt_segment_name, (void *)segment_paddress,
+ (int)segment_size);
+ }
+#endif /* !(defined(KERNEL_INTEGRITY_KTRR)) */
+#elif __i386__ || __x86_64__
/* On x86, use the mapping data from the segment load command to
* unload KLD directly.
* This may invalidate any assumptions about "avail_start"
* managed memory, then copy the segment back in.
*/
#if CONFIG_KXLD
+#if (__arm__ || __arm64__)
+#error CONFIG_KXLD not expected for this arch
+#endif
if (!sKeepSymbols) {
kern_return_t mem_result;
void *seg_copy = NULL;
&seg_offset,
seg_length, /* mask */ 0,
VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
+ VM_MAP_KERNEL_FLAGS_NONE,
+ VM_KERN_MEMORY_NONE,
(ipc_port_t)NULL,
(vm_object_offset_t) 0,
/* copy */ FALSE,
kmem_free(kernel_map, seg_copy_offset, seg_length);
}
#else /* we are not CONFIG_KXLD */
+#if !(__arm__ || __arm64__)
#error CONFIG_KXLD is expected for this arch
+#endif
/*****
* Dump the LINKEDIT segment, unless keepsyms is set.
return foundKext;
}
+OSData *
+OSKext::copyKextUUIDForAddress(OSNumber *address)
+{
+ OSData *uuid = NULL;
+
+ if (!address) {
+ return NULL;
+ }
+
+ uintptr_t addr = (uintptr_t)address->unsigned64BitValue() + vm_kernel_slide;
+
+#if CONFIG_MACF
+ /* Is the calling process allowed to query kext info? */
+ if (current_task() != kernel_task) {
+ int macCheckResult = 0;
+ kauth_cred_t cred = NULL;
+
+ cred = kauth_cred_get_with_ref();
+ macCheckResult = mac_kext_check_query(cred);
+ kauth_cred_unref(&cred);
+
+ if (macCheckResult != 0) {
+ OSKextLog(/* kext */ NULL,
+ kOSKextLogErrorLevel | kOSKextLogLoadFlag,
+ "Failed to query kext UUID (MAC policy error 0x%x).",
+ macCheckResult);
+ return NULL;
+ }
+ }
+#endif
+
+ if (((vm_offset_t)addr >= vm_kernel_stext) && ((vm_offset_t)addr < vm_kernel_etext)) {
+ /* address in xnu proper */
+ unsigned long uuid_len = 0;
+ uuid = OSData::withBytes(getuuidfromheader(&_mh_execute_header, &uuid_len), uuid_len);
+ } else {
+ IOLockLock(sKextSummariesLock);
+ OSKextLoadedKextSummary *summary = OSKext::summaryForAddress(addr);
+ if (summary) {
+ uuid = OSData::withBytes(summary->uuid, sizeof(uuid_t));
+ }
+ IOLockUnlock(sKextSummariesLock);
+ }
+
+ return uuid;
+}
/*********************************************************************
*********************************************************************/
OSReturn
OSKext::removeKext(
OSKext * aKext,
+#if CONFIG_EMBEDDED
+ __unused
+#endif
bool terminateServicesAndRemovePersonalitiesFlag)
{
+#if CONFIG_EMBEDDED
+ OSKextLog(aKext,
+ kOSKextLogErrorLevel |
+ kOSKextLogKextBookkeepingFlag,
+ "removeKext() called for %s, not supported on embedded",
+ aKext->getIdentifier() ? aKext->getIdentifierCString() : "unknown kext");
+
+ return kOSReturnSuccess;
+#else /* CONFIG_EMBEDDED */
OSReturn result = kOSKextReturnInUse;
OSKext * checkKext = NULL; // do not release
finish:
IORecursiveLockUnlock(sKextLock);
return result;
+#endif /* CONFIG_EMBEDDED */
}
/*********************************************************************
/*********************************************************************
*********************************************************************/
+#if defined (__arm__)
+#include <arm/arch.h>
+#endif
#if defined (__x86_64__)
#define ARCHNAME "x86_64"
+#elif defined (__arm64__)
+#define ARCHNAME "arm64"
+#elif defined (__arm__)
+
+#if defined (__ARM_ARCH_7S__)
+#define ARCHNAME "armv7s"
+#elif defined (__ARM_ARCH_7F__)
+#define ARCHNAME "armv7f"
+#elif defined (__ARM_ARCH_7K__)
+#define ARCHNAME "armv7k"
+#elif defined (_ARM_ARCH_7) /* umbrella for all remaining */
+#define ARCHNAME "armv7"
+#elif defined (_ARM_ARCH_6) /* umbrella for all armv6 */
+#define ARCHNAME "armv6"
+#endif
+
+#elif defined (__arm64__)
+#define ARCHNAME "arm64"
#else
#error architecture not supported
#endif
}
bzero(account, sizeof(*account));
account->loadTag = kmod_info->id;
+ account->site.refcount = 0;
account->site.flags = VM_TAG_KMOD;
account->kext = this;
goto finish;
}
+#if KASAN
+ kasan_load_kext((vm_offset_t)linkedExecutable->getBytesNoCopy(),
+ linkedExecutable->getLength(), getIdentifierCString());
+#else
+ if (lookupSection(KASAN_GLOBAL_SEGNAME, KASAN_GLOBAL_SECTNAME)) {
+ OSKextLog(this,
+ kOSKextLogErrorLevel | kOSKextLogLoadFlag,
+ "KASAN: cannot load KASAN-ified kext %s on a non-KASAN kernel\n",
+ getIdentifierCString()
+ );
+ result = KERN_FAILURE;
+ goto finish;
+ }
+#endif
+
result = kOSReturnSuccess;
finish:
* called only by loadExecutable()
*********************************************************************/
#if !VM_MAPPED_KEXTS
+#if defined(__arm__) || defined(__arm64__)
+static inline kern_return_t
+OSKext_protect(
+ vm_map_t map,
+ vm_map_offset_t start,
+ vm_map_offset_t end,
+ vm_prot_t new_prot,
+ boolean_t set_max)
+{
+#pragma unused(map)
+ assert(map == kernel_map); // we can handle KEXTs arising from the PRELINK segment and no others
+ assert(start <= end);
+ if (start >= end)
+ return KERN_SUCCESS; // Punt segments of length zero (e.g., headers) or less (i.e., blunders)
+ else if (set_max)
+ return KERN_SUCCESS; // Punt set_max, as there's no mechanism to record that state
+ else
+ return ml_static_protect(start, end - start, new_prot);
+}
+
+static inline kern_return_t
+OSKext_wire(
+ vm_map_t map,
+ vm_map_offset_t start,
+ vm_map_offset_t end,
+ vm_prot_t access_type,
+ boolean_t user_wire)
+{
+#pragma unused(map,start,end,access_type,user_wire)
+ return KERN_SUCCESS; // No-op as PRELINK kexts are cemented into physical memory at boot
+}
+#else
#error Unrecognized architecture
+#endif
#else
static inline kern_return_t
OSKext_protect(
vm_prot_t access_type,
boolean_t user_wire)
{
- return vm_map_wire(map, start, end, access_type | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_KEXT), user_wire);
+ return vm_map_wire_kernel(map, start, end, access_type, VM_KERN_MEMORY_KEXT, user_wire);
}
#endif
seg = firstsegfromheader((kernel_mach_header_t *)kmod_info->address);
while (seg) {
+#if __arm__
+ /* We build all ARM kexts, so we can ensure they are aligned */
+ assert((seg->vmaddr & PAGE_MASK) == 0);
+ assert((seg->vmsize & PAGE_MASK) == 0);
+#endif
start = round_page(seg->vmaddr);
end = trunc_page(seg->vmaddr + seg->vmsize);
goto finish;
}
- if (hasOSMetaClassInstances()) {
+ if (!isLoaded()) {
+ result = kOSReturnSuccess;
+ goto finish;
+ }
+
+ if (isKernelComponent()) {
+ result = kOSKextReturnInvalidArgument;
+ goto finish;
+ }
+
+ if (metaClasses && !OSMetaClass::removeClasses(metaClasses)) {
OSKextLog(this,
kOSKextLogErrorLevel |
kOSKextLogLoadFlag | kOSKextLogKextBookkeepingFlag,
result = kOSKextReturnInUse;
goto finish;
}
-
- if (!isLoaded()) {
- result = kOSReturnSuccess;
- goto finish;
- }
-
- if (isKernelComponent()) {
- result = kOSKextReturnInvalidArgument;
- goto finish;
- }
/* Note that the kext is unloading before running any code that
* might be in the kext (request callbacks, module stop function).
/* Unwire and free the linked executable.
*/
if (linkedExecutable) {
+#if KASAN
+ kasan_unload_kext((vm_offset_t)linkedExecutable->getBytesNoCopy(), linkedExecutable->getLength());
+#endif
+
#if VM_MAPPED_KEXTS
if (!isInterface()) {
kernel_segment_command_t *seg = NULL;
if (responseObject) {
result = kOSReturnSuccess;
} else {
- OSKextLog(/* kext */ NULL,
- kOSKextLogErrorLevel |
- kOSKextLogIPCFlag,
- "Get UUID by Address failed.");
goto finish;
}
return position;
}
-
int OSKextGrabPgoDataLocked(OSKext *kext,
bool metadata,
uuid_t instance_uuid,
char *pBuffer,
uint64_t bufferSize)
{
-
int err = 0;
kernel_section_t *sect_prf_data = NULL;
return result;
}
-/*********************************************************************
-*********************************************************************/
-/* static */
-OSData *
-OSKext::copyKextUUIDForAddress(OSNumber *address)
-{
- OSKext *kext = NULL;
- OSData *uuid = NULL;
- vm_address_t vm_addr = 0;
-
- if (!address)
- goto finish;
-
-#if CONFIG_MACF
- /* Is the calling process allowed to query kext info? */
- if (current_task() != kernel_task) {
- int macCheckResult = 0;
- kauth_cred_t cred = NULL;
-
- cred = kauth_cred_get_with_ref();
- macCheckResult = mac_kext_check_query(cred);
- kauth_cred_unref(&cred);
-
- if (macCheckResult != 0) {
- OSKextLog(/* kext */ NULL,
- kOSKextLogErrorLevel | kOSKextLogLoadFlag,
- "Failed to query kext UUID (MAC policy error 0x%x).",
- macCheckResult);
- goto finish;
- }
- }
-#endif
-
- vm_addr = (vm_address_t)(address->unsigned64BitValue() + vm_kernel_slide);
-
- kext = OSKext::lookupKextWithAddress(vm_addr);
- if (kext) {
- uuid = kext->copyUUID();
- }
-
-finish:
- if (kext) {
- kext->release();
- }
- return uuid;
-}
-
-
/*********************************************************************
*********************************************************************/
/* static */
segp->filesize = 0;
}
}
+
#if 0
OSKextLog(/* kext */ NULL,
kOSKextLogErrorLevel |
}
#endif
segp->vmaddr = VM_KERNEL_UNSLIDE(segp->vmaddr);
-
+
for (secp = firstsect(segp); secp != NULL; secp = nextsect(segp, secp)) {
secp->addr = VM_KERNEL_UNSLIDE(secp->addr);
}
return result;
}
+
+/*********************************************************************
+* Busy timeout triage
+*********************************************************************/
+/* static */
+bool
+OSKext::isWaitingKextd(void)
+{
+ return sRequestCallbackRecords && sRequestCallbackRecords->getCount();
+}
+
/*********************************************************************
* Assumes sKextLock is held.
*********************************************************************/
vm_offset_t * addr,
unsigned int cnt,
int (* printf_func)(const char *fmt, ...),
- bool lockFlag,
- bool doUnslide)
+ uint32_t flags)
{
addr64_t summary_page = 0;
addr64_t last_summary_page = 0;
bool found_kmod = false;
u_int i = 0;
- if (lockFlag) {
+ if (kPrintKextsLock & flags) {
if (!sKextSummariesLock) return;
IOLockLock(sKextSummariesLock);
}
}
if (!found_kmod) {
- (*printf_func)(" Kernel Extensions in backtrace:\n");
+ if (!(kPrintKextsTerse & flags)) {
+ (*printf_func)(" Kernel Extensions in backtrace:\n");
+ }
found_kmod = true;
}
- printSummary(summary, printf_func, doUnslide);
+ printSummary(summary, printf_func, flags);
}
finish:
- if (lockFlag) {
+ if (kPrintKextsLock & flags) {
IOLockUnlock(sKextSummariesLock);
}
return FALSE;
}
-/* static */
-void *
-OSKext::kextForAddress(
- const void * addr)
+/*
+ * Get the kext summary object for the kext where 'addr' lies. Must be called with
+ * sKextSummariesLock held.
+ */
+OSKextLoadedKextSummary *
+OSKext::summaryForAddress(const uintptr_t addr)
{
- void *image = NULL;
- u_int i;
-
-#if !VM_MAPPED_KEXTS
- kernel_mach_header_t *mh = NULL;
- kernel_segment_command_t *seg = NULL;
-#endif
-
- if (((vm_offset_t)(uintptr_t)addr >= vm_kernel_stext) &&
- ((vm_offset_t)(uintptr_t)addr < vm_kernel_etext)) {
- return (void *)&_mh_execute_header;
- }
-
- if (!sKextSummariesLock) return image;
- IOLockLock(sKextSummariesLock);
-
- if (!gLoadedKextSummaries) {
- goto finish;
- }
+ for (unsigned i = 0; i < gLoadedKextSummaries->numSummaries; ++i) {
- for (i = 0; i < gLoadedKextSummaries->numSummaries; ++i) {
- OSKextLoadedKextSummary * summary;
+ OSKextLoadedKextSummary *summary = &gLoadedKextSummaries->summaries[i];
+ if (!summary->address) {
+ continue;
+ }
- summary = gLoadedKextSummaries->summaries + i;
- if (!summary->address) {
- continue;
- }
+#if VM_MAPPED_KEXTS
+ /* On our platforms that use VM_MAPPED_KEXTS, we currently do not
+ * support split kexts, but we also may unmap the kexts, which can
+ * race with the above codepath (see OSKext::unload). As such,
+ * use a simple range lookup if we are using VM_MAPPED_KEXTS.
+ */
+ if ((addr >= summary->address) && (addr < (summary->address + summary->size))) {
+ return summary;
+ }
+#else
+ kernel_mach_header_t *mh = (kernel_mach_header_t *)summary->address;
+ kernel_segment_command_t *seg;
+
+ for (seg = firstsegfromheader(mh); seg != NULL; seg = nextsegfromheader(mh, seg)) {
+ if ((addr >= seg->vmaddr) && (addr < (seg->vmaddr + seg->vmsize))) {
+ return summary;
+ }
+ }
+#endif
+ }
-#if !VM_MAPPED_KEXTS
- mh = (kernel_mach_header_t *)summary->address;
+ /* addr did not map to any kext */
+ return NULL;
+}
- for (seg = firstsegfromheader(mh); seg != NULL; seg = nextsegfromheader(mh, seg)) {
- if (((uint64_t)addr >= seg->vmaddr) &&
- ((uint64_t)addr < (seg->vmaddr + seg->vmsize))) {
- image = (void *)summary->address;
- break;
- }
- }
+/* static */
+void *
+OSKext::kextForAddress(const void *addr)
+{
+ void *image = NULL;
- if (image) {
- break;
- }
-#else
- /* On our platforms that use VM_MAPPED_KEXTS, we currently do not
- * support split kexts, but we also may unmap the kexts, which can
- * race with the above codepath (see OSKext::unload). As such,
- * use a simple range lookup if we are using VM_MAPPED_KEXTS.
- */
- if (((uint64_t)(uintptr_t)addr >= summary->address) &&
- ((uint64_t)(uintptr_t)addr < (summary->address + summary->size)))
- {
- image = (void *)(uintptr_t)summary->address;
- break;
- }
-#endif
- }
+ if (((vm_offset_t)(uintptr_t)addr >= vm_kernel_stext) &&
+ ((vm_offset_t)(uintptr_t)addr < vm_kernel_etext)) {
+ return (void *)&_mh_execute_header;
+ }
-finish:
- IOLockUnlock(sKextSummariesLock);
+ if (!sKextSummariesLock) {
+ return NULL;
+ }
+ IOLockLock(sKextSummariesLock);
+ OSKextLoadedKextSummary *summary = OSKext::summaryForAddress((uintptr_t)addr);
+ if (summary) {
+ image = (void *)summary->address;
+ }
+ IOLockUnlock(sKextSummariesLock);
- return image;
+ return image;
}
/*********************************************************************
void OSKext::printSummary(
OSKextLoadedKextSummary * summary,
int (* printf_func)(const char *fmt, ...),
- bool doUnslide)
+ uint32_t flags)
{
kmod_reference_t * kmod_ref = NULL;
uuid_string_t uuid;
}
(void) uuid_unparse(summary->uuid, uuid);
- if (doUnslide) {
+ if (kPrintKextsUnslide & flags) {
tmpAddr = VM_KERNEL_UNSLIDE(summary->address);
}
else {
tmpAddr = summary->address;
}
- (*printf_func)(" %s(%s)[%s]@0x%llx->0x%llx\n",
+ (*printf_func)("%s%s(%s)[%s]@0x%llx->0x%llx\n",
+ (kPrintKextsTerse & flags) ? "" : " ",
summary->name, version, uuid,
tmpAddr, tmpAddr + summary->size - 1);
+
+ if (kPrintKextsTerse & flags) return;
/* print dependency info */
for (kmod_ref = (kmod_reference_t *) summary->reference_list;
if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)kmod_ref)) == 0) {
(*printf_func)(" kmod dependency scan stopped "
"due to missing dependency page: %p\n",
- doUnslide ? (void *)VM_KERNEL_UNSLIDE(kmod_ref) : kmod_ref);
+ (kPrintKextsUnslide & flags) ? (void *)VM_KERNEL_UNSLIDE(kmod_ref) : kmod_ref);
break;
}
rinfo = kmod_ref->info;
if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)rinfo)) == 0) {
(*printf_func)(" kmod dependency scan stopped "
"due to missing kmod page: %p\n",
- doUnslide ? (void *)VM_KERNEL_UNSLIDE(rinfo) : rinfo);
+ (kPrintKextsUnslide & flags) ? (void *)VM_KERNEL_UNSLIDE(rinfo) : rinfo);
break;
}
/* locate UUID in gLoadedKextSummaries */
findSummaryUUID(rinfo->id, uuid);
- if (doUnslide) {
+ if (kPrintKextsUnslide & flags) {
tmpAddr = VM_KERNEL_UNSLIDE(rinfo->address);
}
else {
{
OSKextActiveAccount * active;
vm_allocation_site_t * site;
+ vm_allocation_site_t * releasesite;
+
uint32_t baseIdx;
uint32_t lim;
IOSimpleLockLock(sKextAccountsLock);
- site = NULL;
+ site = releasesite = NULL;
+
// bsearch sKextAccounts list
for (baseIdx = 0, lim = sKextAccountsCount; lim; lim >>= 1)
{
if ((address >= active->address) && (address < active->address_end))
{
site = &active->account->site;
- if (!site->tag) vm_tag_alloc_locked(site);
+ if (!site->tag) vm_tag_alloc_locked(site, &releasesite);
break;
}
else if (address > active->address)
// else move left
}
IOSimpleLockUnlock(sKextAccountsLock);
+ if (releasesite) kern_allocation_name_release(releasesite);
return (site);
}
extern "C" uint32_t
-OSKextGetKmodIDForSite(vm_allocation_site_t * site, char * name, vm_size_t namelen)
+OSKextGetKmodIDForSite(const vm_allocation_site_t * site, char * name, vm_size_t namelen)
{
OSKextAccount * account = (typeof(account)) site;
const char * kname;
#endif // CONFIG_KEC_FIPS
+#if CONFIG_IMAGEBOOT
+int OSKextGetUUIDForName(const char *name, uuid_t uuid)
+{
+ OSKext *kext = OSKext::lookupKextWithIdentifier(name);
+ if (!kext) {
+ return 1;
+ }
+
+ OSData *uuid_data = kext->copyUUID();
+ if (uuid_data) {
+ memcpy(uuid, uuid_data->getBytesNoCopy(), sizeof(uuid_t));
+ OSSafeReleaseNULL(uuid_data);
+ return 0;
+ }
+
+ return 1;
+}
+#endif
+