/*
- * Copyright (c) 2008-2012 Apple Inc. All rights reserved.
+ * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
*/
extern "C" {
+#include <string.h>
#include <kern/clock.h>
#include <kern/host.h>
#include <kern/kext_alloc.h>
+#include <firehose/tracepoint_private.h>
+#include <firehose/chunk_private.h>
+#include <os/firehose_buffer_private.h>
+#include <vm/vm_kern.h>
#include <kextd/kextd_mach.h>
#include <libkern/kernel_mach_header.h>
#include <libkern/kext_panic_report.h>
// 04/18/11 - gab: <rdar://problem/9236163>
#include <sys/random.h>
+#include <sys/pgo.h>
+
#if CONFIG_MACF
#include <sys/kauth.h>
#include <security/mac_framework.h>
#include <IOKit/IOService.h>
#include <IOKit/IOStatisticsPrivate.h>
+#include <IOKit/IOBSD.h>
+
+#include <san/kasan.h>
#if PRAGMA_MARK
#pragma mark External & Internal Function Protos
#define VM_MAPPED_KEXTS 1
#define KASLR_KEXT_DEBUG 0
#define KASLR_IOREG_DEBUG 0
+#elif __arm__ || __arm64__
+#define VM_MAPPED_KEXTS 0
+#define KASLR_KEXT_DEBUG 0
#else
#error Unsupported architecture
#endif
*/
#define _kOSKextExecutableExternalDataKey "_OSKextExecutableExternalData"
+#define OS_LOG_HDR_VERSION 1
+#define NUM_OS_LOG_SECTIONS 2
+
+#define OS_LOG_SECT_IDX 0
+#define CSTRING_SECT_IDX 1
+
#if PRAGMA_MARK
#pragma mark Typedefs
#endif
* Typedefs
*********************************************************************/
+/*********************************************************************
+* osLogDataHeaderRef describes the header information of an OSData
+* object that is returned when querying for kOSBundleLogStringsKey.
+* We currently return information regarding 2 sections - os_log and
+* cstring. In the case that the os_log section doesn't exist, we just
+* return an offset and length of 0 for that section.
+*********************************************************************/
+typedef struct osLogDataHeader {
+ uint32_t version;
+ uint32_t sect_count;
+ struct {
+ uint32_t sect_offset;
+ uint32_t sect_size;
+ } sections[0];
+} osLogDataHeaderRef;
+
/*********************************************************************
* MkextEntryRef describes the contents of an OSData object
* referencing a file entry from an mkext so that we can uncompress
/* version */ "0", // filled in in OSKext::initialize()
/* reference_count */ -1, // never adjusted; kernel never unloads
/* reference_list */ NULL,
- /* address */ NULL,
+ /* address */ 0,
/* size */ 0, // filled in in OSKext::initialize()
/* hdr_size */ 0,
/* start */ 0,
* to automatically parse the list of loaded kexts.
**********/
static IOLock * sKextSummariesLock = NULL;
+extern "C" lck_spin_t vm_allocation_sites_lock;
+static IOSimpleLock * sKextAccountsLock = &vm_allocation_sites_lock;
void (*sLoadedKextSummariesUpdated)(void) = OSKextLoadedKextSummariesUpdated;
OSKextLoadedKextSummaryHeader * gLoadedKextSummaries __attribute__((used)) = NULL;
+uint64_t gLoadedKextSummariesTimestamp __attribute__((used)) = 0;
static size_t sLoadedKextSummariesAllocSize = 0;
+
+static OSKextActiveAccount * sKextAccounts;
+static uint32_t sKextAccountsCount;
};
/*********************************************************************
static OSKextLogSpec sKernelLogFilter = kDefaultKernelLogFilter;
static bool sBootArgLogFilterFound = false;
SYSCTL_UINT(_debug, OID_AUTO, kextlog, CTLFLAG_RW | CTLFLAG_LOCKED, &sKernelLogFilter,
- sKernelLogFilter, "kernel kext logging");
+ 0, "kernel kext logging");
static OSKextLogSpec sUserSpaceKextLogFilter = kOSKextLogSilentFilter;
static OSArray * sUserSpaceLogSpecArray = NULL;
* End scope for sKextInnerLock-protected variables.
*********************************************************************/
+
+/*********************************************************************
+ helper function used for collecting PGO data upon unload of a kext
+ */
+
+static int OSKextGrabPgoDataLocked(OSKext *kext,
+ bool metadata,
+ uuid_t instance_uuid,
+ uint64_t *pSize,
+ char *pBuffer,
+ uint64_t bufferSize);
+
+/**********************************************************************/
+
+
+
#if PRAGMA_MARK
#pragma mark OSData callbacks (need to move to OSData)
#endif
result = 0;
}
- OSSafeRelease(linkBuffer);
+ OSSafeReleaseNULL(linkBuffer);
return (kxld_addr_t)result;
}
}
PE_parse_boot_argn("keepsyms", &sKeepSymbols, sizeof(sKeepSymbols));
+#if KASAN_DYNAMIC_BLACKLIST
+ /* needed for function lookup */
+ sKeepSymbols = true;
+#endif
/* Set up an OSKext instance to represent the kernel itself.
*/
assert(kernelExecutable);
#if KASLR_KEXT_DEBUG
- IOLog("kaslr: kernel start 0x%lx end 0x%lx length %lu \n",
+ IOLog("kaslr: kernel start 0x%lx end 0x%lx length %lu vm_kernel_slide %llu (0x%016lx) \n",
(unsigned long)kernelStart,
(unsigned long)getlastaddr(),
- kernelLength);
+ kernelLength,
+ vm_kernel_slide, vm_kernel_slide);
#endif
sKernelKext->loadTag = sNextLoadTag++; // the kernel is load tag 0
registryRoot->setProperty(kOSKernelCPUTypeKey, kernelCPUType);
registryRoot->setProperty(kOSKernelCPUSubtypeKey, kernelCPUSubtype);
- OSSafeRelease(kernelCPUType);
- OSSafeRelease(kernelCPUSubtype);
+ OSSafeReleaseNULL(kernelCPUType);
+ OSSafeReleaseNULL(kernelCPUSubtype);
timestamp = __OSAbsoluteTimePtr(&last_loaded_timestamp);
*timestamp = 0;
kernel_segment_command_t * seg_to_remove = NULL;
+#if __arm__ || __arm64__
+ const char * dt_segment_name = NULL;
+ void * segment_paddress = NULL;
+ int segment_size = 0;
+#endif
/* This must be the very first thing done by this function.
*/
OSRuntimeUnloadCPPForSegment(seg_to_remove);
}
-#if __i386__ || __x86_64__
+#if __arm__ || __arm64__
+#if !(defined(KERNEL_INTEGRITY_KTRR))
+ /* Free the memory that was set up by bootx.
+ */
+ dt_segment_name = "Kernel-__KLD";
+ if (0 == IODTGetLoaderInfo(dt_segment_name, &segment_paddress, &segment_size)) {
+ /* We cannot free this with KTRR enabled, as we cannot
+ * update the permissions on the KLD range this late
+ * in the boot process.
+ */
+ IODTFreeLoaderInfo(dt_segment_name, (void *)segment_paddress,
+ (int)segment_size);
+ }
+#endif /* !(defined(KERNEL_INTEGRITY_KTRR)) */
+#elif __i386__ || __x86_64__
/* On x86, use the mapping data from the segment load command to
* unload KLD directly.
* This may invalidate any assumptions about "avail_start"
* managed memory, then copy the segment back in.
*/
#if CONFIG_KXLD
+#if (__arm__ || __arm64__)
+#error CONFIG_KXLD not expected for this arch
+#endif
if (!sKeepSymbols) {
kern_return_t mem_result;
void *seg_copy = NULL;
/* Allocate space for the LINKEDIT copy.
*/
mem_result = kmem_alloc(kernel_map, (vm_offset_t *) &seg_copy,
- seg_length);
+ seg_length, VM_KERN_MEMORY_KEXT);
if (mem_result != KERN_SUCCESS) {
OSKextLog(/* kext */ NULL,
kOSKextLogErrorLevel |
&seg_offset,
seg_length, /* mask */ 0,
VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
+ VM_MAP_KERNEL_FLAGS_NONE,
+ VM_KERN_MEMORY_NONE,
(ipc_port_t)NULL,
(vm_object_offset_t) 0,
/* copy */ FALSE,
kmem_free(kernel_map, seg_copy_offset, seg_length);
}
#else /* we are not CONFIG_KXLD */
+#if !(__arm__ || __arm64__)
#error CONFIG_KXLD is expected for this arch
+#endif
/*****
* Dump the LINKEDIT segment, unless keepsyms is set.
*/
if (!sKeepSymbols) {
- const char *dt_segment_name = "Kernel-__LINKEDIT";
+ dt_segment_name = "Kernel-__LINKEDIT";
if (0 == IODTGetLoaderInfo(dt_segment_name,
&segment_paddress, &segment_size)) {
#ifdef SECURE_KERNEL
finish:
IORecursiveLockUnlock(sKextLock);
- OSSafeRelease(prelinkedKexts);
- OSSafeRelease(kextIterator);
- OSSafeRelease(prelinkIterator);
+ OSSafeReleaseNULL(prelinkedKexts);
+ OSSafeReleaseNULL(kextIterator);
+ OSSafeReleaseNULL(prelinkIterator);
return;
}
IORecursiveLockUnlock(sKextLock);
- OSSafeRelease(exitRequest);
+ OSSafeReleaseNULL(exitRequest);
return;
}
*********************************************************************/
OSKext *
OSKext::withPrelinkedInfoDict(
- OSDictionary * anInfoDict)
+ OSDictionary * anInfoDict,
+ bool doCoalesedSlides)
{
OSKext * newKext = new OSKext;
- if (newKext && !newKext->initWithPrelinkedInfoDict(anInfoDict)) {
+ if (newKext && !newKext->initWithPrelinkedInfoDict(anInfoDict, doCoalesedSlides)) {
newKext->release();
return NULL;
}
*********************************************************************/
bool
OSKext::initWithPrelinkedInfoDict(
- OSDictionary * anInfoDict)
+ OSDictionary * anInfoDict,
+ bool doCoalesedSlides)
{
bool result = false;
OSString * kextPath = NULL; // do not release
goto finish;
}
#if KASLR_KEXT_DEBUG
- IOLog("kaslr: kext %s \n", getIdentifierCString());
+ IOLog("kaslr: doCoalesedSlides %d kext %s \n", doCoalesedSlides, getIdentifierCString());
#endif
/* Also get the executable's bundle-relative path if present.
length = (uint32_t) (lengthNum->unsigned32BitValue());
#if KASLR_KEXT_DEBUG
- IOLog("kaslr: unslid 0x%lx slid 0x%lx length %u - prelink executable \n",
+ IOLog("kaslr: unslid 0x%lx slid 0x%lx length %u - prelink executable \n",
(unsigned long)VM_KERNEL_UNSLIDE(data),
(unsigned long)data,
length);
anInfoDict->removeObject(kPrelinkExecutableLoadKey);
anInfoDict->removeObject(kPrelinkExecutableSizeKey);
- /* If the kext's load address differs from its source address, allocate
- * space in the kext map at the load address and copy the kext over.
- */
+ /* If the kext's load address differs from its source address, allocate
+ * space in the kext map at the load address and copy the kext over.
+ */
addressNum = OSDynamicCast(OSNumber, anInfoDict->getObject(kPrelinkExecutableSourceKey));
if (addressNum) {
srcData = (void *) ((intptr_t) (addressNum->unsigned64BitValue()) + vm_kernel_slide);
-
+
#if KASLR_KEXT_DEBUG
- IOLog("kaslr: unslid 0x%lx slid 0x%lx - prelink executable source \n",
- (unsigned long)VM_KERNEL_UNSLIDE(srcData),
+ IOLog("kaslr: unslid 0x%lx slid 0x%lx - prelink executable source \n",
+ (unsigned long)VM_KERNEL_UNSLIDE(srcData),
(unsigned long)srcData);
#endif
-
+
if (data != srcData) {
#if __LP64__
kern_return_t alloc_result;
-
+
alloc_result = kext_alloc((vm_offset_t *)&data, length, /* fixed */ TRUE);
if (alloc_result != KERN_SUCCESS) {
OSKextLog(this,
- kOSKextLogErrorLevel | kOSKextLogGeneralFlag,
- "Failed to allocate space for prelinked kext %s.",
- getIdentifierCString());
+ kOSKextLogErrorLevel | kOSKextLogGeneralFlag,
+ "Failed to allocate space for prelinked kext %s.",
+ getIdentifierCString());
goto finish;
}
memcpy(data, srcData, length);
#else
OSKextLog(this,
- kOSKextLogErrorLevel | kOSKextLogGeneralFlag,
- "Error: prelinked kext %s - source and load addresses "
- "differ on ILP32 architecture.",
- getIdentifierCString());
+ kOSKextLogErrorLevel | kOSKextLogGeneralFlag,
+ "Error: prelinked kext %s - source and load addresses "
+ "differ on ILP32 architecture.",
+ getIdentifierCString());
goto finish;
#endif /* __LP64__ */
}
-
+
anInfoDict->removeObject(kPrelinkExecutableSourceKey);
}
kmod_info = (kmod_info_t *) (intptr_t) (addressNum->unsigned64BitValue() + vm_kernel_slide);
kmod_info->address += vm_kernel_slide;
#if KASLR_KEXT_DEBUG
- IOLog("kaslr: unslid 0x%lx slid 0x%lx - kmod_info \n",
+ IOLog("kaslr: unslid 0x%lx slid 0x%lx - kmod_info \n",
(unsigned long)VM_KERNEL_UNSLIDE(kmod_info),
(unsigned long)kmod_info);
IOLog("kaslr: unslid 0x%lx slid 0x%lx - kmod_info->address \n",
}
}
+ result = slidePrelinkedExecutable(doCoalesedSlides);
+ if (result != kOSReturnSuccess) {
+ goto finish;
+ }
+
+ if (doCoalesedSlides == false) {
+ /* set VM protections now, wire later at kext load */
+ result = setVMAttributes(true, false);
+ if (result != KERN_SUCCESS) {
+ goto finish;
+ }
+ }
+
flags.prelinked = true;
/* If we created a kext from prelink info,
result = registerIdentifier();
finish:
- OSSafeRelease(prelinkedExecutable);
+ OSSafeReleaseNULL(prelinkedExecutable);
return result;
}
+/*********************************************************************
+ *********************************************************************/
+/* static */
+void OSKext::setAllVMAttributes(void)
+{
+ OSCollectionIterator * kextIterator = NULL; // must release
+ const OSSymbol * thisID = NULL; // do not release
+
+ IORecursiveLockLock(sKextLock);
+
+ kextIterator = OSCollectionIterator::withCollection(sKextsByID);
+ if (!kextIterator) {
+ goto finish;
+ }
+
+ while ((thisID = OSDynamicCast(OSSymbol, kextIterator->getNextObject()))) {
+ OSKext * thisKext; // do not release
+
+ thisKext = OSDynamicCast(OSKext, sKextsByID->getObject(thisID));
+ if (!thisKext || thisKext->isInterface() || !thisKext->declaresExecutable()) {
+ continue;
+ }
+
+ /* set VM protections now, wire later at kext load */
+ thisKext->setVMAttributes(true, false);
+ }
+
+finish:
+ IORecursiveLockUnlock(sKextLock);
+ OSSafeReleaseNULL(kextIterator);
+
+ return;
+}
+
/*********************************************************************
*********************************************************************/
OSKext *
result = registerIdentifier();
finish:
- OSSafeRelease(parsedXML);
- OSSafeRelease(kextPath);
- OSSafeRelease(errorString);
- OSSafeRelease(executable);
+ OSSafeReleaseNULL(parsedXML);
+ OSSafeReleaseNULL(kextPath);
+ OSSafeReleaseNULL(errorString);
+ OSSafeReleaseNULL(executable);
return result;
}
getIdentifierCString(), newVersionCString);
}
- OSSafeRelease(newUUID);
- OSSafeRelease(existingUUID);
+ OSSafeReleaseNULL(newUUID);
+ OSSafeReleaseNULL(existingUUID);
return result;
}
panic("Attempt to free loaded kext %s.", getIdentifierCString());
}
- OSSafeRelease(infoDict);
- OSSafeRelease(bundleID);
- OSSafeRelease(path);
- OSSafeRelease(executableRelPath);
- OSSafeRelease(dependencies);
- OSSafeRelease(linkedExecutable);
- OSSafeRelease(metaClasses);
- OSSafeRelease(interfaceUUID);
+ OSSafeReleaseNULL(infoDict);
+ OSSafeReleaseNULL(bundleID);
+ OSSafeReleaseNULL(path);
+ OSSafeReleaseNULL(executableRelPath);
+ OSSafeReleaseNULL(dependencies);
+ OSSafeReleaseNULL(linkedExecutable);
+ OSSafeReleaseNULL(metaClasses);
+ OSSafeReleaseNULL(interfaceUUID);
if (isInterface() && kmod_info) {
kfree(kmod_info, sizeof(kmod_info_t));
infoDict = OSDynamicCast(OSDictionary,
- mkextInfoDictArray->getObject(i));
+ mkextInfoDictArray->getObject(i));
/* Create the kext for the entry, then release it, because the
* kext system keeps them around until explicitly removed.
* Any creation/registration failures are already logged for us.
*/
- OSKext * newKext = OSKext::withMkext2Info(infoDict, mkextData);
- OSSafeRelease(newKext);
+ if (infoDict) {
+ OSKext * newKext = OSKext::withMkext2Info(infoDict, mkextData);
+ OSSafeReleaseNULL(newKext);
+ }
}
/* Even if we didn't keep any kexts from the mkext, we may have a load
finish:
- OSSafeRelease(parsedXML);
- OSSafeRelease(mkextPlistUncompressedData);
- OSSafeRelease(errorString);
+ OSSafeReleaseNULL(parsedXML);
+ OSSafeReleaseNULL(mkextPlistUncompressedData);
+ OSSafeReleaseNULL(errorString);
return result;
}
OSCollectionIterator * iterator = NULL; // must release
OSData * executable = NULL; // must release
- if (!super::init()) {
+ if (anInfoDict == NULL || !super::init()) {
goto finish;
}
/* Get the path. Don't look for an arch-specific path property.
*/
kextPath = OSDynamicCast(OSString,
- anInfoDict->getObject(kMKEXTBundlePathKey));
+ anInfoDict->getObject(kMKEXTBundlePathKey));
if (!setInfoDictionaryAndPath(anInfoDict, kextPath)) {
goto finish;
finish:
- OSSafeRelease(executable);
- OSSafeRelease(iterator);
+ OSSafeReleaseNULL(executable);
+ OSSafeReleaseNULL(iterator);
return result;
}
}
uint32_t allocSize = (uint32_t)allocSize64;
- zmem = (z_mem *)kalloc(allocSize);
+ zmem = (z_mem *)kalloc_tag(allocSize, VM_KERN_MEMORY_OSKEXT);
if (!zmem) {
goto finish;
}
}
if (KERN_SUCCESS != kmem_alloc(kernel_map,
- (vm_offset_t*)&uncompressedDataBuffer, fullSize)) {
+ (vm_offset_t*)&uncompressedDataBuffer, fullSize, VM_KERN_MEMORY_OSKEXT)) {
/* How's this for cheesy? The kernel is only asked to extract
* kext plists so we tailor the log messages.
if (zstream_inited) inflateEnd(&zstream);
if (!result) {
- OSSafeRelease(uncompressedData);
+ OSSafeReleaseNULL(uncompressedData);
}
return result;
IORecursiveLockUnlock(sKextLock);
- OSSafeRelease(mkextData);
- OSSafeRelease(mkextPlist);
- OSSafeRelease(serializer);
- OSSafeRelease(logInfoArray);
+ OSSafeReleaseNULL(mkextData);
+ OSSafeReleaseNULL(mkextPlist);
+ OSSafeReleaseNULL(serializer);
+ OSSafeReleaseNULL(logInfoArray);
return result;
}
logInfo = serializer->text();
logInfoLength = serializer->getLength();
- kmem_result = kmem_alloc(kernel_map, (vm_offset_t *)&buffer, round_page(logInfoLength));
+ kmem_result = kmem_alloc(kernel_map, (vm_offset_t *)&buffer, round_page(logInfoLength), VM_KERN_MEMORY_OSKEXT);
if (kmem_result != KERN_SUCCESS) {
OSKextLog(/* kext */ NULL,
kOSKextLogErrorLevel |
result = kOSReturnSuccess;
finish:
- OSSafeRelease(serializer);
+ OSSafeReleaseNULL(serializer);
return result;
}
(vm_address_t)thisKext->linkedExecutable->getBytesNoCopy();
vm_address_t kext_end = kext_start +
thisKext->linkedExecutable->getLength();
-
if ((kext_start <= address) && (address < kext_end)) {
foundKext = thisKext;
foundKext->retain();
return foundKext;
}
+OSData *
+OSKext::copyKextUUIDForAddress(OSNumber *address)
+{
+ OSData *uuid = NULL;
+
+ if (!address) {
+ return NULL;
+ }
+
+ uintptr_t addr = (uintptr_t)address->unsigned64BitValue() + vm_kernel_slide;
+
+#if CONFIG_MACF
+ /* Is the calling process allowed to query kext info? */
+ if (current_task() != kernel_task) {
+ int macCheckResult = 0;
+ kauth_cred_t cred = NULL;
+
+ cred = kauth_cred_get_with_ref();
+ macCheckResult = mac_kext_check_query(cred);
+ kauth_cred_unref(&cred);
+
+ if (macCheckResult != 0) {
+ OSKextLog(/* kext */ NULL,
+ kOSKextLogErrorLevel | kOSKextLogLoadFlag,
+ "Failed to query kext UUID (MAC policy error 0x%x).",
+ macCheckResult);
+ return NULL;
+ }
+ }
+#endif
+
+ if (((vm_offset_t)addr >= vm_kernel_stext) && ((vm_offset_t)addr < vm_kernel_etext)) {
+ /* address in xnu proper */
+ unsigned long uuid_len = 0;
+ uuid = OSData::withBytes(getuuidfromheader(&_mh_execute_header, &uuid_len), uuid_len);
+ } else {
+ IOLockLock(sKextSummariesLock);
+ OSKextLoadedKextSummary *summary = OSKext::summaryForAddress(addr);
+ if (summary) {
+ uuid = OSData::withBytes(summary->uuid, sizeof(uuid_t));
+ }
+ IOLockUnlock(sKextSummariesLock);
+ }
+
+ return uuid;
+}
+
+/*********************************************************************
+*********************************************************************/
+OSKext *
+OSKext::lookupKextWithUUID(uuid_t wanted)
+{
+ OSKext * foundKext = NULL; // returned
+ uint32_t count, i;
+
+ IORecursiveLockLock(sKextLock);
+
+ count = sLoadedKexts->getCount();
+
+ for (i = 0; i < count; i++) {
+ OSKext * thisKext = NULL;
+
+ thisKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i));
+ if (!thisKext) {
+ continue;
+ }
+
+ OSData *uuid_data = thisKext->copyUUID();
+ if (!uuid_data) {
+ continue;
+ }
+
+ uuid_t uuid;
+ memcpy(&uuid, uuid_data->getBytesNoCopy(), sizeof(uuid));
+ uuid_data->release();
+
+ if (0 == uuid_compare(wanted, uuid)) {
+ foundKext = thisKext;
+ foundKext->retain();
+ goto finish;
+ }
+
+ }
+
+finish:
+ IORecursiveLockUnlock(sKextLock);
+
+ return foundKext;
+}
+
+
+
+
/*********************************************************************
*********************************************************************/
/* static */
OSReturn
OSKext::removeKext(
OSKext * aKext,
+#if CONFIG_EMBEDDED
+ __unused
+#endif
bool terminateServicesAndRemovePersonalitiesFlag)
{
+#if CONFIG_EMBEDDED
+ OSKextLog(aKext,
+ kOSKextLogErrorLevel |
+ kOSKextLogKextBookkeepingFlag,
+ "removeKext() called for %s, not supported on embedded",
+ aKext->getIdentifier() ? aKext->getIdentifierCString() : "unknown kext");
+
+ return kOSReturnSuccess;
+#else /* CONFIG_EMBEDDED */
+
OSReturn result = kOSKextReturnInUse;
OSKext * checkKext = NULL; // do not release
#if CONFIG_MACF
}
#endif
+ /* make sure there are no resource requests in flight - 17187548 */
+ if (aKext->countRequestCallbacks()) {
+ goto finish;
+ }
+
/* If we are terminating, send the request to the IOCatalogue
* (which will actually call us right back but that's ok we have
* a recursive lock don't you know) but do not ask the IOCatalogue
finish:
IORecursiveLockUnlock(sKextLock);
return result;
+#endif /* CONFIG_EMBEDDED */
}
/*********************************************************************
myTempDict = OSDynamicCast(
OSDictionary,
theInfoDict->getObject("OSKextExcludeList"));
- if ( myTempDict ) {
- IORecursiveLockLock(sKextLock);
-
- /* get rid of old exclusion list */
- if (sExcludeListByID) {
- sExcludeListByID->flushCollection();
- OSSafeRelease(sExcludeListByID);
- }
- sExcludeListByID = OSDictionary::withDictionary(myTempDict, 0);
- IORecursiveLockUnlock(sKextLock);
+ if ( NULL == myTempDict ) {
+ /* 25322874 */
+ panic("Missing OSKextExcludeList dictionary\n");
+ }
+
+ IORecursiveLockLock(sKextLock);
+
+ /* get rid of old exclusion list */
+ if (sExcludeListByID) {
+ OSSafeReleaseNULL(sExcludeListByID);
}
+ sExcludeListByID = OSDictionary::withDictionary(myTempDict, 0);
+ IORecursiveLockUnlock(sKextLock);
+
break;
}
OSDictionary * myTempDict; // do not free
myTempDict = OSDynamicCast(OSDictionary,
myInfoDict->getObject("OSKextExcludeList"));
- if ( myTempDict ) {
- IORecursiveLockLock(sKextLock);
- // get rid of old exclude list
- if (sExcludeListByID) {
- sExcludeListByID->flushCollection();
- OSSafeRelease(sExcludeListByID);
- }
-
- sExcludeListByID = OSDictionary::withDictionary(myTempDict, 0);
- IORecursiveLockUnlock(sKextLock);
+ if ( NULL == myTempDict ) {
+ /* 25322874 */
+ panic("Missing OSKextExcludeList dictionary\n");
}
+
+ IORecursiveLockLock(sKextLock);
+ // get rid of old exclude list
+ if (sExcludeListByID) {
+ OSSafeReleaseNULL(sExcludeListByID);
+ }
+
+ sExcludeListByID = OSDictionary::withDictionary(myTempDict, 0);
+ IORecursiveLockUnlock(sKextLock);
break;
}
} // for (i = 0; i < theInfoArray->getCount()...
finish:
- OSSafeRelease(extractedExecutable);
+ OSSafeReleaseNULL(extractedExecutable);
return result;
}
/*********************************************************************
*********************************************************************/
+#if defined (__arm__)
+#include <arm/arch.h>
+#endif
#if defined (__x86_64__)
#define ARCHNAME "x86_64"
+#elif defined (__arm64__)
+#define ARCHNAME "arm64"
+#elif defined (__arm__)
+
+#if defined (__ARM_ARCH_7S__)
+#define ARCHNAME "armv7s"
+#elif defined (__ARM_ARCH_7F__)
+#define ARCHNAME "armv7f"
+#elif defined (__ARM_ARCH_7K__)
+#define ARCHNAME "armv7k"
+#elif defined (_ARM_ARCH_7) /* umbrella for all remaining */
+#define ARCHNAME "armv7"
+#elif defined (_ARM_ARCH_6) /* umbrella for all armv6 */
+#define ARCHNAME "armv6"
+#endif
+
+#elif defined (__arm64__)
+#define ARCHNAME "arm64"
#else
#error architecture not supported
#endif
/* Add 1 for the ARCH_SEPARATOR_CHAR, and 1 for the '\0'.
*/
keySize = 1 + 1 + strlen(key) + strlen(ARCHNAME);
- result = (char *)kalloc(keySize);
+ result = (char *)kalloc_tag(keySize, VM_KERN_MEMORY_OSKEXT);
if (!result) {
goto finish;
}
* string (or strings) that we will not allow to load
*/
versionString = OSDynamicCast(OSString, sExcludeListByID->getObject(bundleID));
- if (!versionString) {
+ if (versionString == NULL || versionString->getLength() > (sizeof(myBuffer) - 1)) {
return(false);
}
startOpt, startMatchingOpt, personalityNames);
finish:
- OSSafeRelease(kextIdentifier);
+ OSSafeReleaseNULL(kextIdentifier);
return result;
}
}
finish:
- OSSafeRelease(loadRequest);
- OSSafeRelease(kextIdentifierSymbol);
+ OSSafeReleaseNULL(loadRequest);
+ OSSafeReleaseNULL(kextIdentifierSymbol);
IORecursiveLockUnlock(sKextLock);
goto finish;
}
+ IORecursiveLockLock(sKextLock);
if (!sAllKextLoadIdentifiers->containsObject(kextIdentifierSymbol)) {
if (!sAllKextLoadIdentifiers->setObject(kextIdentifierSymbol)) {
fail = true;
kextIdentifier->getCStringNoCopy());
}
}
+ IORecursiveLockUnlock(sKextLock);
+
finish:
if (fail) {
"Failed to record kext %s as a candidate for inclusion in prelinked kernel.",
kextIdentifier->getCStringNoCopy());
}
- OSSafeRelease(kextIdentifierSymbol);
+ OSSafeReleaseNULL(kextIdentifierSymbol);
return;
}
if (!sKxldContext) {
kxldResult = kxld_create_context(&sKxldContext, &kern_allocate,
&kxld_log_callback, /* Flags */ (KXLDFlags) 0,
- /* cputype */ 0, /* cpusubtype */ 0);
+ /* cputype */ 0, /* cpusubtype */ 0, /* page size */ 0);
if (kxldResult) {
OSKextLog(this,
kOSKextLogErrorLevel |
goto finish;
}
+ pendingPgoHead.next = &pendingPgoHead;
+ pendingPgoHead.prev = &pendingPgoHead;
+
+ uuid_generate(instance_uuid);
+ account = IONew(OSKextAccount, 1);
+ if (!account) {
+ result = KERN_MEMORY_ERROR;
+ goto finish;
+ }
+ bzero(account, sizeof(*account));
+ account->loadTag = kmod_info->id;
+ account->site.refcount = 0;
+ account->site.flags = VM_TAG_KMOD;
+ account->kext = this;
+
flags.loaded = true;
/* Add the kext to the list of loaded kexts and update the kmod_info
#else
jettisonLinkeditSegment();
#endif /* CONFIG_DTRACE */
+
+#if !VM_MAPPED_KEXTS
+ /* If there is a page (or more) worth of padding after the end
+ * of the last data section but before the end of the data segment
+ * then free it in the same manner the LinkeditSegment is freed
+ */
+ jettisonDATASegmentPadding();
+#endif
}
loaded:
}
size = 1 + strlen(string);
- result = (char *)kalloc(size);
+ result = (char *)kalloc_tag(size, VM_KERN_MEMORY_OSKEXT);
if (!result) {
goto finish;
}
/*********************************************************************
*
*********************************************************************/
+
+kernel_section_t *
+OSKext::lookupSection(const char *segname, const char *secname)
+{
+ kernel_section_t * found_section = NULL;
+ kernel_mach_header_t * mh = NULL;
+ kernel_segment_command_t * seg = NULL;
+ kernel_section_t * sec = NULL;
+
+ mh = (kernel_mach_header_t *)linkedExecutable->getBytesNoCopy();
+
+ for (seg = firstsegfromheader(mh); seg != NULL; seg = nextsegfromheader(mh, seg)) {
+
+ if (0 != strcmp(seg->segname, segname)) {
+ continue;
+ }
+
+ for (sec = firstsect(seg); sec != NULL; sec = nextsect(seg, sec)) {
+
+ if (0 == strcmp(sec->sectname, secname)) {
+ found_section = sec;
+ goto out;
+ }
+ }
+ }
+
+ out:
+ return found_section;
+}
+
+/*********************************************************************
+*
+*********************************************************************/
+
OSReturn
-OSKext::slidePrelinkedExecutable()
-{
- OSReturn result = kOSKextReturnBadData;
- kernel_mach_header_t * mh = NULL;
- kernel_segment_command_t * seg = NULL;
- kernel_segment_command_t * linkeditSeg = NULL;
- kernel_section_t * sec = NULL;
- char * linkeditBase = NULL;
- bool haveLinkeditBase = false;
- char * relocBase = NULL;
- bool haveRelocBase = false;
- struct dysymtab_command * dysymtab = NULL;
- struct symtab_command * symtab = NULL;
- kernel_nlist_t * sym = NULL;
- struct relocation_info * reloc = NULL;
- uint32_t i = 0;
- int reloc_size;
- vm_offset_t new_kextsize;
+OSKext::slidePrelinkedExecutable(bool doCoalesedSlides)
+{
+ OSReturn result = kOSKextReturnBadData;
+ kernel_mach_header_t * mh = NULL;
+ kernel_segment_command_t * seg = NULL;
+ kernel_segment_command_t * linkeditSeg = NULL;
+ kernel_section_t * sec = NULL;
+ char * linkeditBase = NULL;
+ bool haveLinkeditBase = false;
+ char * relocBase = NULL;
+ bool haveRelocBase = false;
+ struct dysymtab_command * dysymtab = NULL;
+ struct linkedit_data_command * segmentSplitInfo = NULL;
+ struct symtab_command * symtab = NULL;
+ kernel_nlist_t * sym = NULL;
+ struct relocation_info * reloc = NULL;
+ uint32_t i = 0;
+ int reloc_size;
+ vm_offset_t new_kextsize;
if (linkedExecutable == NULL || vm_kernel_slide == 0) {
result = kOSReturnSuccess;
}
mh = (kernel_mach_header_t *)linkedExecutable->getBytesNoCopy();
+ segmentSplitInfo = (struct linkedit_data_command *) getcommandfromheader(mh, LC_SEGMENT_SPLIT_INFO);
for (seg = firstsegfromheader(mh); seg != NULL; seg = nextsegfromheader(mh, seg)) {
if (!seg->vmaddr) {
seg->vmaddr += vm_kernel_slide;
#if KASLR_KEXT_DEBUG
- IOLog("kaslr: segname %s unslid 0x%lx slid 0x%lx \n",
+ IOLog("kaslr: segname %s unslid 0x%lx slid 0x%lx \n",
seg->segname,
(unsigned long)VM_KERNEL_UNSLIDE(seg->vmaddr),
(unsigned long)seg->vmaddr);
sec->addr += vm_kernel_slide;
#if KASLR_KEXT_DEBUG
- IOLog("kaslr: sectname %s unslid 0x%lx slid 0x%lx \n",
+ IOLog("kaslr: sectname %s unslid 0x%lx slid 0x%lx \n",
sec->sectname,
(unsigned long)VM_KERNEL_UNSLIDE(sec->addr),
(unsigned long)sec->addr);
symtab = (struct symtab_command *) getcommandfromheader(mh, LC_SYMTAB);
- if (symtab != NULL) {
+ if (symtab != NULL && doCoalesedSlides == false) {
/* Some pseudo-kexts have symbol tables without segments.
* Ignore them. */
if (symtab->nsyms > 0 && haveLinkeditBase) {
}
}
}
-
- if (dysymtab != NULL) {
+
+ if (dysymtab != NULL && doCoalesedSlides == false) {
if (dysymtab->nextrel > 0) {
OSKextLog(this,
kOSKextLogErrorLevel | kOSKextLogLoadFlag |
/* We should free these relocations, not just delete the reference to them.
* <rdar://problem/10535549> Free relocations from PIE kexts.
+ *
+ * For now, we do not free LINKEDIT for kexts with split segments.
*/
new_kextsize = round_page(kmod_info->size - reloc_size);
-
- if ((kmod_info->size - new_kextsize) > PAGE_SIZE) {
+ if (((kmod_info->size - new_kextsize) > PAGE_SIZE) && (!segmentSplitInfo)) {
vm_offset_t endofkext = kmod_info->address + kmod_info->size;
vm_offset_t new_endofkext = kmod_info->address + new_kextsize;
vm_offset_t endofrelocInfo = (vm_offset_t) (((uint8_t *)reloc) + reloc_size);
}
if (isPrelinked()) {
- result = slidePrelinkedExecutable();
- if (result != kOSReturnSuccess) {
- goto finish;
- }
goto register_kmod;
}
+ /* <rdar://problem/21444003> all callers must be entitled */
+ if (FALSE == IOTaskHasEntitlement(current_task(), "com.apple.rootless.kext-management")) {
+ OSKextLog(this,
+ kOSKextLogErrorLevel | kOSKextLogLoadFlag,
+ "Not entitled to link kext '%s'",
+ getIdentifierCString());
+ result = kOSKextReturnNotPrivileged;
+ goto finish;
+ }
+
theExecutable = getExecutable();
if (!theExecutable) {
if (declaresExecutable()) {
goto finish;
}
- kxlddeps = (KXLDDependency *)kalloc(num_kxlddeps * sizeof(*kxlddeps));
+ kxlddeps = (KXLDDependency *)kalloc_tag(num_kxlddeps * sizeof(*kxlddeps), VM_KERN_MEMORY_OSKEXT);
if (!kxlddeps) {
OSKextLog(this,
kOSKextLogErrorLevel |
/* Whip up a fake kmod_info entry for the interface kext.
*/
- kmod_info = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
+ kmod_info = (kmod_info_t *)kalloc_tag(sizeof(kmod_info_t), VM_KERN_MEMORY_OSKEXT);
if (!kmod_info) {
result = KERN_MEMORY_ERROR;
goto finish;
*/
num_kmod_refs = getNumDependencies();
if (num_kmod_refs) {
- kmod_info->reference_list = (kmod_reference_t *)kalloc(
- num_kmod_refs * sizeof(kmod_reference_t));
+ kmod_info->reference_list = (kmod_reference_t *)kalloc_tag(
+ num_kmod_refs * sizeof(kmod_reference_t), VM_KERN_MEMORY_OSKEXT);
if (!kmod_info->reference_list) {
result = KERN_MEMORY_ERROR;
goto finish;
(unsigned)kmod_info->id);
}
- result = setVMProtections();
+ /* if prelinked, VM protections are already set */
+ result = setVMAttributes(!isPrelinked(), true);
if (result != KERN_SUCCESS) {
goto finish;
}
+#if KASAN
+ kasan_load_kext((vm_offset_t)linkedExecutable->getBytesNoCopy(),
+ linkedExecutable->getLength(), getIdentifierCString());
+#else
+ if (lookupSection(KASAN_GLOBAL_SEGNAME, KASAN_GLOBAL_SECTNAME)) {
+ OSKextLog(this,
+ kOSKextLogErrorLevel | kOSKextLogLoadFlag,
+ "KASAN: cannot load KASAN-ified kext %s on a non-KASAN kernel\n",
+ getIdentifierCString()
+ );
+ result = KERN_FAILURE;
+ goto finish;
+ }
+#endif
+
result = kOSReturnSuccess;
finish:
- OSSafeRelease(linkDependencies);
+ OSSafeReleaseNULL(linkDependencies);
/* Clear up locally allocated dependency info.
*/
vm_size_t linkeditsize, kextsize;
OSData * data = NULL;
- /* 16K_XXX: To Remove */
- /* We don't currently guarantee alignment greater than 4KB for kext
- * segments, so we cannot always jettison __LINKEDIT cleanly, so let
- * it be for now.
- */
- if (!TEST_PAGE_SIZE_4K)
- return;
-
#if NO_KEXTD
/* We can free symbol tables for all embedded kexts because we don't
* support runtime kext linking.
return;
}
+/*********************************************************************
+* If there are whole pages that are unused betweem the last section
+* of the DATA segment and the end of the DATA segment then we can free
+* them
+*********************************************************************/
+void
+OSKext::jettisonDATASegmentPadding(void)
+{
+ kernel_mach_header_t * mh;
+ kernel_segment_command_t * dataSeg;
+ kernel_section_t * sec, * lastSec;
+ vm_offset_t dataSegEnd, lastSecEnd;
+ vm_size_t padSize;
+
+ mh = (kernel_mach_header_t *)kmod_info->address;
+
+ dataSeg = getsegbynamefromheader(mh, SEG_DATA);
+ if (dataSeg == NULL) {
+ return;
+ }
+
+ lastSec = NULL;
+ sec = firstsect(dataSeg);
+ while (sec != NULL) {
+ lastSec = sec;
+ sec = nextsect(dataSeg, sec);
+ }
+
+ if (lastSec == NULL) {
+ return;
+ }
+
+ if ((dataSeg->vmaddr != round_page(dataSeg->vmaddr)) ||
+ (dataSeg->vmsize != round_page(dataSeg->vmsize))) {
+ return;
+ }
+
+ dataSegEnd = dataSeg->vmaddr + dataSeg->vmsize;
+ lastSecEnd = round_page(lastSec->addr + lastSec->size);
+
+ if (dataSegEnd <= lastSecEnd) {
+ return;
+ }
+
+ padSize = dataSegEnd - lastSecEnd;
+
+ if (padSize >= PAGE_SIZE) {
+#if VM_MAPPED_KEXTS
+ kext_free(lastSecEnd, padSize);
+#else
+ ml_static_mfree(lastSecEnd, padSize);
+#endif
+ }
+}
+
/*********************************************************************
*********************************************************************/
void
* called only by loadExecutable()
*********************************************************************/
#if !VM_MAPPED_KEXTS
-#error Unrecognized architecture
-#else
+#if defined(__arm__) || defined(__arm64__)
static inline kern_return_t
OSKext_protect(
vm_map_t map,
vm_prot_t new_prot,
boolean_t set_max)
{
- if (start == end) { // 10538581
- return(KERN_SUCCESS);
- }
- return vm_map_protect(map, start, end, new_prot, set_max);
+#pragma unused(map)
+ assert(map == kernel_map); // we can handle KEXTs arising from the PRELINK segment and no others
+ assert(start <= end);
+ if (start >= end)
+ return KERN_SUCCESS; // Punt segments of length zero (e.g., headers) or less (i.e., blunders)
+ else if (set_max)
+ return KERN_SUCCESS; // Punt set_max, as there's no mechanism to record that state
+ else
+ return ml_static_protect(start, end - start, new_prot);
}
static inline kern_return_t
vm_prot_t access_type,
boolean_t user_wire)
{
- return vm_map_wire(map, start, end, access_type, user_wire);
+#pragma unused(map,start,end,access_type,user_wire)
+ return KERN_SUCCESS; // No-op as PRELINK kexts are cemented into physical memory at boot
}
+#else
+#error Unrecognized architecture
#endif
-
-OSReturn
-OSKext::setVMProtections(void)
+#else
+static inline kern_return_t
+OSKext_protect(
+ vm_map_t map,
+ vm_map_offset_t start,
+ vm_map_offset_t end,
+ vm_prot_t new_prot,
+ boolean_t set_max)
{
- vm_map_t kext_map = NULL;
+ if (start == end) { // 10538581
+ return(KERN_SUCCESS);
+ }
+ return vm_map_protect(map, start, end, new_prot, set_max);
+}
+
+static inline kern_return_t
+OSKext_wire(
+ vm_map_t map,
+ vm_map_offset_t start,
+ vm_map_offset_t end,
+ vm_prot_t access_type,
+ boolean_t user_wire)
+{
+ return vm_map_wire_kernel(map, start, end, access_type, VM_KERN_MEMORY_KEXT, user_wire);
+}
+#endif
+
+OSReturn
+OSKext::setVMAttributes(bool protect, bool wire)
+{
+ vm_map_t kext_map = NULL;
kernel_segment_command_t * seg = NULL;
vm_map_offset_t start = 0;
vm_map_offset_t end = 0;
OSReturn result = kOSReturnError;
- if (!kmod_info->address && !kmod_info->size) {
+ if (isInterface() || !declaresExecutable()) {
result = kOSReturnSuccess;
goto finish;
}
goto finish;
}
+#if !VM_MAPPED_KEXTS
+ if (getcommandfromheader((kernel_mach_header_t *)kmod_info->address, LC_SEGMENT_SPLIT_INFO)) {
+ /* This is a split kext in a prelinked kernelcache; we'll let the
+ * platform code take care of protecting it. It is already wired.
+ */
+ /* TODO: Should this still allow protections for the first segment
+ * to go through, in the event that we have a mix of split and
+ * unsplit kexts?
+ */
+ result = KERN_SUCCESS;
+ goto finish;
+ }
+#endif
+
/* Protect the headers as read-only; they do not need to be wired */
- result = OSKext_protect(kext_map, kmod_info->address,
- kmod_info->address + kmod_info->hdr_size, VM_PROT_READ, TRUE);
+ result = (protect) ? OSKext_protect(kext_map, kmod_info->address,
+ kmod_info->address + kmod_info->hdr_size, VM_PROT_READ, TRUE)
+ : KERN_SUCCESS;
if (result != KERN_SUCCESS) {
goto finish;
}
/* Set the VM protections and wire down each of the segments */
seg = firstsegfromheader((kernel_mach_header_t *)kmod_info->address);
while (seg) {
+
+#if __arm__
+ /* We build all ARM kexts, so we can ensure they are aligned */
+ assert((seg->vmaddr & PAGE_MASK) == 0);
+ assert((seg->vmsize & PAGE_MASK) == 0);
+#endif
+
start = round_page(seg->vmaddr);
end = trunc_page(seg->vmaddr + seg->vmsize);
- result = OSKext_protect(kext_map, start, end, seg->maxprot, TRUE);
- if (result != KERN_SUCCESS) {
- OSKextLog(this,
- kOSKextLogErrorLevel |
- kOSKextLogLoadFlag,
- "Kext %s failed to set maximum VM protections "
- "for segment %s - 0x%x.",
- getIdentifierCString(), seg->segname, (int)result);
- goto finish;
- }
+ if (protect) {
+ result = OSKext_protect(kext_map, start, end, seg->maxprot, TRUE);
+ if (result != KERN_SUCCESS) {
+ OSKextLog(this,
+ kOSKextLogErrorLevel |
+ kOSKextLogLoadFlag,
+ "Kext %s failed to set maximum VM protections "
+ "for segment %s - 0x%x.",
+ getIdentifierCString(), seg->segname, (int)result);
+ goto finish;
+ }
- result = OSKext_protect(kext_map, start, end, seg->initprot, FALSE);
- if (result != KERN_SUCCESS) {
- OSKextLog(this,
- kOSKextLogErrorLevel |
- kOSKextLogLoadFlag,
- "Kext %s failed to set initial VM protections "
- "for segment %s - 0x%x.",
- getIdentifierCString(), seg->segname, (int)result);
- goto finish;
+ result = OSKext_protect(kext_map, start, end, seg->initprot, FALSE);
+ if (result != KERN_SUCCESS) {
+ OSKextLog(this,
+ kOSKextLogErrorLevel |
+ kOSKextLogLoadFlag,
+ "Kext %s failed to set initial VM protections "
+ "for segment %s - 0x%x.",
+ getIdentifierCString(), seg->segname, (int)result);
+ goto finish;
+ }
}
- if (segmentShouldBeWired(seg)) {
+ if (segmentShouldBeWired(seg) && wire) {
result = OSKext_wire(kext_map, start, end, seg->initprot, FALSE);
if (result != KERN_SUCCESS) {
goto finish;
/* Verify that the start/stop function lies within the kext's address range.
*/
- if (address < kmod_info->address + kmod_info->hdr_size ||
- kmod_info->address + kmod_info->size <= address)
- {
- OSKextLog(this,
- kOSKextLogErrorLevel |
- kOSKextLogLoadFlag,
- "Kext %s module %s pointer is outside of kext range "
- "(%s %p - kext at %p-%p)..",
- getIdentifierCString(),
- whichOp,
- whichOp,
- (void *)VM_KERNEL_UNSLIDE(address),
- (void *)VM_KERNEL_UNSLIDE(kmod_info->address),
- (void *)(VM_KERNEL_UNSLIDE(kmod_info->address) + kmod_info->size));
- result = kOSKextReturnBadData;
- goto finish;
+ if (getcommandfromheader((kernel_mach_header_t *)kmod_info->address, LC_SEGMENT_SPLIT_INFO)) {
+ /* This will likely be how we deal with split kexts; walk the segments to
+ * check that the function lies inside one of the segments of this kext.
+ */
+ for (seg = firstsegfromheader((kernel_mach_header_t *)kmod_info->address);
+ seg != NULL;
+ seg = nextsegfromheader((kernel_mach_header_t *)kmod_info->address, seg)) {
+ if ((address >= seg->vmaddr) && address < (seg->vmaddr + seg->vmsize)) {
+ break;
+ }
+ }
+
+ if (!seg) {
+ OSKextLog(this,
+ kOSKextLogErrorLevel |
+ kOSKextLogLoadFlag,
+ "Kext %s module %s pointer is outside of kext range "
+ "(%s %p - kext starts at %p).",
+ getIdentifierCString(),
+ whichOp,
+ whichOp,
+ (void *)VM_KERNEL_UNSLIDE(address),
+ (void *)VM_KERNEL_UNSLIDE(kmod_info->address));
+ result = kOSKextReturnBadData;
+ goto finish;
+ }
+
+ seg = NULL;
+ } else {
+ if (address < kmod_info->address + kmod_info->hdr_size ||
+ kmod_info->address + kmod_info->size <= address)
+ {
+ OSKextLog(this,
+ kOSKextLogErrorLevel |
+ kOSKextLogLoadFlag,
+ "Kext %s module %s pointer is outside of kext range "
+ "(%s %p - kext at %p-%p).",
+ getIdentifierCString(),
+ whichOp,
+ whichOp,
+ (void *)VM_KERNEL_UNSLIDE(address),
+ (void *)VM_KERNEL_UNSLIDE(kmod_info->address),
+ (void *)(VM_KERNEL_UNSLIDE(kmod_info->address) + kmod_info->size));
+ result = kOSKextReturnBadData;
+ goto finish;
+ }
}
/* Only do these checks before calling the start function;
return true;
}
+/*********************************************************************
+*********************************************************************/
+static void
+OSKextLogKextInfo(OSKext *aKext, uint64_t address, uint64_t size, firehose_tracepoint_code_t code)
+{
+
+ uint64_t stamp = 0;
+ firehose_tracepoint_id_u trace_id;
+ struct firehose_trace_uuid_info_s uuid_info_s;
+ firehose_trace_uuid_info_t uuid_info = &uuid_info_s;
+ size_t uuid_info_len = sizeof(struct firehose_trace_uuid_info_s);
+ OSData *uuid_data;
+
+ stamp = firehose_tracepoint_time(firehose_activity_flags_default);
+ trace_id.ftid_value = FIREHOSE_TRACE_ID_MAKE(firehose_tracepoint_namespace_metadata, _firehose_tracepoint_type_metadata_kext, (firehose_tracepoint_flags_t)0, code);
+
+ uuid_data = aKext->copyUUID();
+ if (uuid_data) {
+ memcpy(uuid_info->ftui_uuid, uuid_data->getBytesNoCopy(), sizeof(uuid_info->ftui_uuid));
+ OSSafeReleaseNULL(uuid_data);
+ }
+
+ uuid_info->ftui_size = size;
+ uuid_info->ftui_address = VM_KERNEL_UNSLIDE(address);
+
+ firehose_trace_metadata(firehose_stream_metadata, trace_id, stamp, uuid_info, uuid_info_len);
+ return;
+}
+
/*********************************************************************
*********************************************************************/
OSReturn
flags.starting = 1;
+ // Drop a log message so logd can grab the needed information to decode this kext
+ OSKextLogKextInfo(this, kmod_info->address, kmod_info->size, firehose_tracepoint_code_load);
+
#if !CONFIG_STATIC_CPPINIT
result = OSRuntimeInitializeCPP(kmod_info, NULL);
if (result == KERN_SUCCESS) {
}
#endif
#endif // CONFIG_KEC_FIPS
-
result = startfunc(kmod_info, kmodStartData);
#if !CONFIG_STATIC_CPPINIT
{
OSReturn result = kOSReturnError;
kern_return_t (*stopfunc)(kmod_info_t *, void *);
-
+
if (!isStarted() || isInterface()) {
result = kOSReturnSuccess;
goto finish;
}
finish:
+ // Drop a log message so logd can update this kext's metadata
+ OSKextLogKextInfo(this, kmod_info->address, kmod_info->size, firehose_tracepoint_code_unload);
return result;
}
OSReturn
OSKext::unload(void)
{
- OSReturn result = kOSReturnError;
- unsigned int index;
- uint32_t num_kmod_refs = 0;
+ OSReturn result = kOSReturnError;
+ unsigned int index;
+ uint32_t num_kmod_refs = 0;
+ OSKextAccount * freeAccount;
if (!sUnloadEnabled) {
OSKextLog(this,
goto finish;
}
- if (hasOSMetaClassInstances()) {
+ if (!isLoaded()) {
+ result = kOSReturnSuccess;
+ goto finish;
+ }
+
+ if (isKernelComponent()) {
+ result = kOSKextReturnInvalidArgument;
+ goto finish;
+ }
+
+ if (metaClasses && !OSMetaClass::removeClasses(metaClasses)) {
OSKextLog(this,
kOSKextLogErrorLevel |
kOSKextLogLoadFlag | kOSKextLogKextBookkeepingFlag,
result = kOSKextReturnInUse;
goto finish;
}
-
- if (!isLoaded()) {
- result = kOSReturnSuccess;
- goto finish;
- }
-
- if (isKernelComponent()) {
- result = kOSKextReturnInvalidArgument;
- goto finish;
- }
/* Note that the kext is unloading before running any code that
* might be in the kext (request callbacks, module stop function).
"Kext %s unloading.",
getIdentifierCString());
+ {
+ struct list_head *p;
+ struct list_head *prev;
+ struct list_head *next;
+ for (p = pendingPgoHead.next; p != &pendingPgoHead; p = next) {
+ OSKextGrabPgoStruct *s = container_of(p, OSKextGrabPgoStruct, list_head);
+ s->err = OSKextGrabPgoDataLocked(this, s->metadata, instance_uuid, s->pSize, s->pBuffer, s->bufferSize);
+ prev = p->prev;
+ next = p->next;
+ prev->next = next;
+ next->prev = prev;
+ p->prev = p;
+ p->next = p;
+ IORecursiveLockWakeup(sKextLock, s, false);
+ }
+ }
+
+
/* Even if we don't call the stop function, we want to be sure we
* have no OSMetaClass references before unloading the kext executable
* from memory. OSMetaClasses may have pointers into the kext executable
notifyKextUnloadObservers(this);
+ freeAccount = NULL;
+ IOSimpleLockLock(sKextAccountsLock);
+ account->kext = NULL;
+ if (account->site.tag) account->site.flags |= VM_TAG_UNLOAD;
+ else freeAccount = account;
+ IOSimpleLockUnlock(sKextAccountsLock);
+ if (freeAccount) IODelete(freeAccount, OSKextAccount, 1);
+
/* Unwire and free the linked executable.
*/
if (linkedExecutable) {
+#if KASAN
+ kasan_unload_kext((vm_offset_t)linkedExecutable->getBytesNoCopy(), linkedExecutable->getLength());
+#endif
+
#if VM_MAPPED_KEXTS
if (!isInterface()) {
kernel_segment_command_t *seg = NULL;
result = kOSReturnSuccess;
finish:
- OSSafeRelease(loadRequest);
+ OSSafeReleaseNULL(loadRequest);
return result;
}
*********************************************************************/
extern "C" {
+IOReturn OSKextSystemSleepOrWake(UInt32 messageType);
IOReturn OSKextSystemSleepOrWake(UInt32 messageType)
{
IORecursiveLockLock(sKextInnerLock);
finish:
IORecursiveLockUnlock(sKextLock);
- OSSafeRelease(prelinkRequest);
- OSSafeRelease(kextIterator);
+ OSSafeReleaseNULL(prelinkRequest);
+ OSSafeReleaseNULL(kextIterator);
return;
}
getIdentifierCString());
}
- OSSafeRelease(localLoopStack);
- OSSafeRelease(libraryIterator);
+ OSSafeReleaseNULL(localLoopStack);
+ OSSafeReleaseNULL(libraryIterator);
return result;
}
finish:
- OSSafeRelease(classIterator);
+ OSSafeReleaseNULL(classIterator);
return result;
}
theKext->reportOSMetaClassInstances(msgLogSpec);
finish:
- OSSafeRelease(theKext);
+ OSSafeReleaseNULL(theKext);
return;
}
}
finish:
- OSSafeRelease(classIterator);
+ OSSafeReleaseNULL(classIterator);
return;
}
} else if (predicate->isEqualTo(kKextRequestPredicateSendResource)) {
result = OSKext::dispatchResource(requestDict);
- } else if (predicate->isEqualTo(kKextRequestPredicateGetLoaded)) {
+ } else if (predicate->isEqualTo(kKextRequestPredicateGetUUIDByAddress)) {
+
+ OSNumber *lookupNum = NULL;
+ lookupNum = OSDynamicCast(OSNumber,
+ _OSKextGetRequestArgument(requestDict,
+ kKextRequestArgumentLookupAddressKey));
+
+ responseObject = OSKext::copyKextUUIDForAddress(lookupNum);
+ if (responseObject) {
+ result = kOSReturnSuccess;
+ } else {
+ goto finish;
+ }
+
+ } else if (predicate->isEqualTo(kKextRequestPredicateGetLoaded) ||
+ predicate->isEqualTo(kKextRequestPredicateGetLoadedByUUID)) {
OSBoolean * delayAutounloadBool = NULL;
OSObject * infoKeysRaw = NULL;
OSArray * infoKeys = NULL;
}
}
- responseObject = OSKext::copyLoadedKextInfo(kextIdentifiers, infoKeys);
+ if (predicate->isEqualTo(kKextRequestPredicateGetLoaded)) {
+ responseObject = OSKext::copyLoadedKextInfo(kextIdentifiers, infoKeys);
+ }
+ else if (predicate->isEqualTo(kKextRequestPredicateGetLoadedByUUID)) {
+ responseObject = OSKext::copyLoadedKextInfoByUUID(kextIdentifiers, infoKeys);
+ }
if (!responseObject) {
result = kOSKextReturnInternalError;
} else {
/* This kmem_alloc sets the return value of the function.
*/
kmem_result = kmem_alloc(kernel_map, (vm_offset_t *)&buffer,
- round_page(responseLength));
+ round_page(responseLength), VM_KERN_MEMORY_OSKEXT);
if (kmem_result != KERN_SUCCESS) {
OSKextLog(/* kext */ NULL,
kOSKextLogErrorLevel |
IORecursiveLockUnlock(sKextLock);
- OSSafeRelease(parsedXML);
- OSSafeRelease(errorString);
- OSSafeRelease(responseObject);
- OSSafeRelease(serializer);
- OSSafeRelease(logInfoArray);
+ OSSafeReleaseNULL(parsedXML);
+ OSSafeReleaseNULL(errorString);
+ OSSafeReleaseNULL(responseObject);
+ OSSafeReleaseNULL(serializer);
+ OSSafeReleaseNULL(logInfoArray);
+
+ return result;
+}
+
+
+// #include <InstrProfiling.h>
+extern "C" {
+
+ uint64_t __llvm_profile_get_size_for_buffer_internal(const char *DataBegin,
+ const char *DataEnd,
+ const char *CountersBegin,
+ const char *CountersEnd ,
+ const char *NamesBegin,
+ const char *NamesEnd);
+ int __llvm_profile_write_buffer_internal(char *Buffer,
+ const char *DataBegin,
+ const char *DataEnd,
+ const char *CountersBegin,
+ const char *CountersEnd ,
+ const char *NamesBegin,
+ const char *NamesEnd);
+}
+
+
+static
+void OSKextPgoMetadataPut(char *pBuffer,
+ size_t *position,
+ size_t bufferSize,
+ uint32_t *num_pairs,
+ const char *key,
+ const char *value)
+{
+ size_t strlen_key = strlen(key);
+ size_t strlen_value = strlen(value);
+ size_t len = strlen(key) + 1 + strlen(value) + 1;
+ char *pos = pBuffer + *position;
+ *position += len;
+ if (pBuffer && bufferSize && *position <= bufferSize) {
+ memcpy(pos, key, strlen_key); pos += strlen_key;
+ *(pos++) = '=';
+ memcpy(pos, value, strlen_value); pos += strlen_value;
+ *(pos++) = 0;
+ if (num_pairs) {
+ (*num_pairs)++;
+ }
+ }
+}
+
+
+static
+void OSKextPgoMetadataPutMax(size_t *position, const char *key, size_t value_max)
+{
+ *position += strlen(key) + 1 + value_max + 1;
+}
+
+
+static
+void OSKextPgoMetadataPutAll(OSKext *kext,
+ uuid_t instance_uuid,
+ char *pBuffer,
+ size_t *position,
+ size_t bufferSize,
+ uint32_t *num_pairs)
+{
+ _static_assert_1_arg(sizeof(clock_sec_t) % 2 == 0);
+ //log_10 2^16 ≈ 4.82
+ const size_t max_secs_string_size = 5 * sizeof(clock_sec_t)/2;
+ const size_t max_timestamp_string_size = max_secs_string_size + 1 + 6;
+
+ if (!pBuffer) {
+ OSKextPgoMetadataPutMax(position, "INSTANCE", 36);
+ OSKextPgoMetadataPutMax(position, "UUID", 36);
+ OSKextPgoMetadataPutMax(position, "TIMESTAMP", max_timestamp_string_size);
+ } else {
+ uuid_string_t instance_uuid_string;
+ uuid_unparse(instance_uuid, instance_uuid_string);
+ OSKextPgoMetadataPut(pBuffer, position, bufferSize, num_pairs,
+ "INSTANCE", instance_uuid_string);
+
+ OSData *uuid_data;
+ uuid_t uuid;
+ uuid_string_t uuid_string;
+ uuid_data = kext->copyUUID();
+ if (uuid_data) {
+ memcpy(uuid, uuid_data->getBytesNoCopy(), sizeof(uuid));
+ OSSafeReleaseNULL(uuid_data);
+ uuid_unparse(uuid, uuid_string);
+ OSKextPgoMetadataPut(pBuffer, position, bufferSize, num_pairs,
+ "UUID", uuid_string);
+ }
+
+ clock_sec_t secs;
+ clock_usec_t usecs;
+ clock_get_calendar_microtime(&secs, &usecs);
+ assert(usecs < 1000000);
+ char timestamp[max_timestamp_string_size + 1];
+ _static_assert_1_arg(sizeof(long) >= sizeof(clock_sec_t));
+ snprintf(timestamp, sizeof(timestamp), "%lu.%06d", (unsigned long)secs, (int)usecs);
+ OSKextPgoMetadataPut(pBuffer, position, bufferSize, num_pairs,
+ "TIMESTAMP", timestamp);
+ }
+
+ OSKextPgoMetadataPut(pBuffer, position, bufferSize, num_pairs,
+ "NAME", kext->getIdentifierCString());
+
+ char versionCString[kOSKextVersionMaxLength];
+ OSKextVersionGetString(kext->getVersion(), versionCString, kOSKextVersionMaxLength);
+ OSKextPgoMetadataPut(pBuffer, position, bufferSize, num_pairs,
+ "VERSION", versionCString);
+
+}
+
+static
+size_t OSKextPgoMetadataSize(OSKext *kext)
+{
+ size_t position = 0;
+ uuid_t fakeuuid = {};
+ OSKextPgoMetadataPutAll(kext, fakeuuid, NULL, &position, 0, NULL);
+ return position;
+}
+
+int OSKextGrabPgoDataLocked(OSKext *kext,
+ bool metadata,
+ uuid_t instance_uuid,
+ uint64_t *pSize,
+ char *pBuffer,
+ uint64_t bufferSize)
+{
+ int err = 0;
+
+ kernel_section_t *sect_prf_data = NULL;
+ kernel_section_t *sect_prf_name = NULL;
+ kernel_section_t *sect_prf_cnts = NULL;
+ uint64_t size;
+ size_t metadata_size = 0;
+
+ sect_prf_data = kext->lookupSection("__DATA", "__llvm_prf_data");
+ sect_prf_name = kext->lookupSection("__DATA", "__llvm_prf_name");
+ sect_prf_cnts = kext->lookupSection("__DATA", "__llvm_prf_cnts");
+
+ if (!sect_prf_data || !sect_prf_name || !sect_prf_cnts) {
+ err = ENOTSUP;
+ goto out;
+ }
+
+ size = __llvm_profile_get_size_for_buffer_internal(
+ (const char*) sect_prf_data->addr, (const char*) sect_prf_data->addr + sect_prf_data->size,
+ (const char*) sect_prf_cnts->addr, (const char*) sect_prf_cnts->addr + sect_prf_cnts->size,
+ (const char*) sect_prf_name->addr, (const char*) sect_prf_name->addr + sect_prf_name->size);
+
+ if (metadata) {
+ metadata_size = OSKextPgoMetadataSize(kext);
+ size += metadata_size;
+ size += sizeof(pgo_metadata_footer);
+ }
+
+
+ if (pSize) {
+ *pSize = size;
+ }
+
+ if (pBuffer && bufferSize) {
+ if (bufferSize < size) {
+ err = ERANGE;
+ goto out;
+ }
+
+ err = __llvm_profile_write_buffer_internal(
+ pBuffer,
+ (const char*) sect_prf_data->addr, (const char*) sect_prf_data->addr + sect_prf_data->size,
+ (const char*) sect_prf_cnts->addr, (const char*) sect_prf_cnts->addr + sect_prf_cnts->size,
+ (const char*) sect_prf_name->addr, (const char*) sect_prf_name->addr + sect_prf_name->size);
+
+ if (err) {
+ err = EIO;
+ goto out;
+ }
+
+ if (metadata) {
+ char *end_of_buffer = pBuffer + size;
+ struct pgo_metadata_footer *footerp = (struct pgo_metadata_footer *) (end_of_buffer - sizeof(struct pgo_metadata_footer));
+ char *metadata_buffer = end_of_buffer - (sizeof(struct pgo_metadata_footer) + metadata_size);
+
+ size_t metadata_position = 0;
+ uint32_t num_pairs = 0;
+ OSKextPgoMetadataPutAll(kext, instance_uuid, metadata_buffer, &metadata_position, metadata_size, &num_pairs);
+ while (metadata_position < metadata_size) {
+ metadata_buffer[metadata_position++] = 0;
+ }
+
+ struct pgo_metadata_footer footer;
+ footer.magic = htonl(0x6d657461);
+ footer.number_of_pairs = htonl( num_pairs );
+ footer.offset_to_pairs = htonl( sizeof(struct pgo_metadata_footer) + metadata_size );
+ memcpy(footerp, &footer, sizeof(footer));
+ }
+
+ }
+
+out:
+ return err;
+}
+
+
+int
+OSKextGrabPgoData(uuid_t uuid,
+ uint64_t *pSize,
+ char *pBuffer,
+ uint64_t bufferSize,
+ int wait_for_unload,
+ int metadata)
+{
+ int err = 0;
+ OSKext *kext = NULL;
+
+
+ IORecursiveLockLock(sKextLock);
+
+ kext = OSKext::lookupKextWithUUID(uuid);
+ if (!kext) {
+ err = ENOENT;
+ goto out;
+ }
+
+ if (wait_for_unload) {
+ OSKextGrabPgoStruct s;
+
+ s.metadata = metadata;
+ s.pSize = pSize;
+ s.pBuffer = pBuffer;
+ s.bufferSize = bufferSize;
+ s.err = EINTR;
+
+ struct list_head *prev = &kext->pendingPgoHead;
+ struct list_head *next = kext->pendingPgoHead.next;
+
+ s.list_head.prev = prev;
+ s.list_head.next = next;
+
+ prev->next = &s.list_head;
+ next->prev = &s.list_head;
+
+ kext->release();
+ kext = NULL;
+
+ IORecursiveLockSleep(sKextLock, &s, THREAD_ABORTSAFE);
+
+ prev = s.list_head.prev;
+ next = s.list_head.next;
+
+ prev->next = next;
+ next->prev = prev;
+
+ err = s.err;
+
+ } else {
+ err = OSKextGrabPgoDataLocked(kext, metadata, kext->instance_uuid, pSize, pBuffer, bufferSize);
+ }
+
+ out:
+ if (kext) {
+ kext->release();
+ }
+
+ IORecursiveLockUnlock(sKextLock);
+
+ return err;
+}
+
+void
+OSKextResetPgoCountersLock()
+{
+ IORecursiveLockLock(sKextLock);
+}
+
+void
+OSKextResetPgoCountersUnlock()
+{
+ IORecursiveLockUnlock(sKextLock);
+}
+
+
+extern unsigned int not_in_kdp;
+
+void
+OSKextResetPgoCounters()
+{
+ assert(!not_in_kdp);
+ uint32_t count = sLoadedKexts->getCount();
+ for (uint32_t i = 0; i < count; i++) {
+ OSKext *kext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i));
+ kernel_section_t *sect_prf_cnts = kext->lookupSection("__DATA", "__llvm_prf_cnts");
+ if (!sect_prf_cnts) {
+ continue;
+ }
+ memset((void*)sect_prf_cnts->addr, 0, sect_prf_cnts->size);
+ }
+}
+
+OSDictionary *
+OSKext::copyLoadedKextInfoByUUID(
+ OSArray * kextIdentifiers,
+ OSArray * infoKeys)
+{
+ OSDictionary * result = NULL;
+ OSDictionary * kextInfo = NULL; // must release
+ uint32_t count, i;
+ uint32_t idCount = 0;
+ uint32_t idIndex = 0;
+
+ IORecursiveLockLock(sKextLock);
+
+#if CONFIG_MACF
+ /* Is the calling process allowed to query kext info? */
+ if (current_task() != kernel_task) {
+ int macCheckResult = 0;
+ kauth_cred_t cred = NULL;
+
+ cred = kauth_cred_get_with_ref();
+ macCheckResult = mac_kext_check_query(cred);
+ kauth_cred_unref(&cred);
+
+ if (macCheckResult != 0) {
+ OSKextLog(/* kext */ NULL,
+ kOSKextLogErrorLevel | kOSKextLogLoadFlag,
+ "Failed to query kext info (MAC policy error 0x%x).",
+ macCheckResult);
+ goto finish;
+ }
+ }
+#endif
+
+ /* Empty list of UUIDs is equivalent to no list (get all).
+ */
+ if (kextIdentifiers && !kextIdentifiers->getCount()) {
+ kextIdentifiers = NULL;
+ } else if (kextIdentifiers) {
+ idCount = kextIdentifiers->getCount();
+ }
+
+ /* Same for keys.
+ */
+ if (infoKeys && !infoKeys->getCount()) {
+ infoKeys = NULL;
+ }
+
+ count = sLoadedKexts->getCount();
+ result = OSDictionary::withCapacity(count);
+ if (!result) {
+ goto finish;
+ }
+
+ for (i = 0; i < count; i++) {
+ OSKext *thisKext = NULL; // do not release
+ Boolean includeThis = true;
+ uuid_t thisKextUUID;
+ OSData *uuid_data;
+ uuid_string_t uuid_key;
+
+ if (kextInfo) {
+ kextInfo->release();
+ kextInfo = NULL;
+ }
+
+ thisKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i));
+ if (!thisKext) {
+ continue;
+ }
+
+ uuid_data = thisKext->copyUUID();
+ if (!uuid_data) {
+ continue;
+ }
+
+ memcpy(&thisKextUUID, uuid_data->getBytesNoCopy(), sizeof(thisKextUUID));
+ OSSafeReleaseNULL(uuid_data);
+
+ uuid_unparse(thisKextUUID, uuid_key);
+
+ /* Skip current kext if we have a list of UUIDs and
+ * it isn't in the list.
+ */
+ if (kextIdentifiers) {
+ includeThis = false;
+
+ for (idIndex = 0; idIndex < idCount; idIndex++) {
+ const OSString* wantedUUID = OSDynamicCast(OSString,
+ kextIdentifiers->getObject(idIndex));
+
+ uuid_t uuid;
+ uuid_parse(wantedUUID->getCStringNoCopy(), uuid);
+
+ if (0 == uuid_compare(uuid, thisKextUUID)) {
+ includeThis = true;
+ break;
+ }
+
+ }
+ }
+
+ if (!includeThis) {
+ continue;
+ }
+
+ kextInfo = thisKext->copyInfo(infoKeys);
+ if (kextInfo) {
+ result->setObject(uuid_key, kextInfo);
+ }
+ }
+
+finish:
+ IORecursiveLockUnlock(sKextLock);
+
+ if (kextInfo) kextInfo->release();
return result;
}
IORecursiveLockLock(sKextLock);
+#if CONFIG_MACF
+ /* Is the calling process allowed to query kext info? */
+ if (current_task() != kernel_task) {
+ int macCheckResult = 0;
+ kauth_cred_t cred = NULL;
+
+ cred = kauth_cred_get_with_ref();
+ macCheckResult = mac_kext_check_query(cred);
+ kauth_cred_unref(&cred);
+
+ if (macCheckResult != 0) {
+ OSKextLog(/* kext */ NULL,
+ kOSKextLogErrorLevel | kOSKextLogLoadFlag,
+ "Failed to query kext info (MAC policy error 0x%x).",
+ macCheckResult);
+ goto finish;
+ }
+ }
+#endif
+
/* Empty list of bundle ids is equivalent to no list (get all).
*/
if (kextIdentifiers && !kextIdentifiers->getCount()) {
if (!result) {
goto finish;
}
+
+#if 0
+ OSKextLog(/* kext */ NULL,
+ kOSKextLogErrorLevel |
+ kOSKextLogGeneralFlag,
+ "kaslr: vm_kernel_slide 0x%lx \n",
+ vm_kernel_slide);
+ OSKextLog(/* kext */ NULL,
+ kOSKextLogErrorLevel |
+ kOSKextLogGeneralFlag,
+ "kaslr: vm_kernel_stext 0x%lx vm_kernel_etext 0x%lx \n",
+ vm_kernel_stext, vm_kernel_etext);
+ OSKextLog(/* kext */ NULL,
+ kOSKextLogErrorLevel |
+ kOSKextLogGeneralFlag,
+ "kaslr: vm_kernel_base 0x%lx vm_kernel_top 0x%lx \n",
+ vm_kernel_base, vm_kernel_top);
+ OSKextLog(/* kext */ NULL,
+ kOSKextLogErrorLevel |
+ kOSKextLogGeneralFlag,
+ "kaslr: vm_kext_base 0x%lx vm_kext_top 0x%lx \n",
+ vm_kext_base, vm_kext_top);
+ OSKextLog(/* kext */ NULL,
+ kOSKextLogErrorLevel |
+ kOSKextLogGeneralFlag,
+ "kaslr: vm_prelink_stext 0x%lx vm_prelink_etext 0x%lx \n",
+ vm_prelink_stext, vm_prelink_etext);
+ OSKextLog(/* kext */ NULL,
+ kOSKextLogErrorLevel |
+ kOSKextLogGeneralFlag,
+ "kaslr: vm_prelink_sinfo 0x%lx vm_prelink_einfo 0x%lx \n",
+ vm_prelink_sinfo, vm_prelink_einfo);
+ OSKextLog(/* kext */ NULL,
+ kOSKextLogErrorLevel |
+ kOSKextLogGeneralFlag,
+ "kaslr: vm_slinkedit 0x%lx vm_elinkedit 0x%lx \n",
+ vm_slinkedit, vm_elinkedit);
+#endif
+
for (i = 0; i < count; i++) {
OSKext * thisKext = NULL; // do not release
Boolean includeThis = true;
OSDictionary * result = NULL;
bool success = false;
OSData * headerData = NULL; // must release
+ OSData * logData = NULL; // must release
OSNumber * cpuTypeNumber = NULL; // must release
OSNumber * cpuSubtypeNumber = NULL; // must release
OSString * versionString = NULL; // do not release
- uint32_t executablePathCStringSize = 0;
+ uint32_t executablePathCStringSize = 0;
char * executablePathCString = NULL; // must release
OSString * executablePathString = NULL; // must release
OSData * uuid = NULL; // must release
*/
if (!infoKeys ||
_OSArrayContainsCString(infoKeys, kOSBundleMachOHeadersKey) ||
+ _OSArrayContainsCString(infoKeys, kOSBundleLogStringsKey) ||
_OSArrayContainsCString(infoKeys, kOSBundleCPUTypeKey) ||
_OSArrayContainsCString(infoKeys, kOSBundleCPUSubtypeKey))
{
linkedExecutable->getBytesNoCopy();
#if !SECURE_KERNEL
+ // do not return macho header info on shipping iOS - 19095897
if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleMachOHeadersKey)) {
kernel_mach_header_t * temp_kext_mach_hdr;
struct load_command * lcp;
segp->filesize = 0;
}
}
+
#if 0
OSKextLog(/* kext */ NULL,
kOSKextLogErrorLevel |
VM_KERNEL_UNSLIDE(segp->vmaddr),
segp->vmsize, segp->nsects);
if ( (VM_KERNEL_IS_SLID(segp->vmaddr) == false) &&
- (VM_KERNEL_IS_KEXT(segp->vmaddr) == false) &&
- (VM_KERNEL_IS_PRELINKTEXT(segp->vmaddr) == false) &&
- (VM_KERNEL_IS_PRELINKINFO(segp->vmaddr) == false) &&
- (VM_KERNEL_IS_KEXT_LINKEDIT(segp->vmaddr) == false) ) {
+ (VM_KERNEL_IS_KEXT(segp->vmaddr) == false) &&
+ (VM_KERNEL_IS_PRELINKTEXT(segp->vmaddr) == false) &&
+ (VM_KERNEL_IS_PRELINKINFO(segp->vmaddr) == false) &&
+ (VM_KERNEL_IS_KEXT_LINKEDIT(segp->vmaddr) == false) ) {
OSKextLog(/* kext */ NULL,
kOSKextLogErrorLevel |
kOSKextLogGeneralFlag,
}
#endif
segp->vmaddr = VM_KERNEL_UNSLIDE(segp->vmaddr);
-
+
for (secp = firstsect(segp); secp != NULL; secp = nextsect(segp, secp)) {
secp->addr = VM_KERNEL_UNSLIDE(secp->addr);
}
- }
+ }
lcp = (struct load_command *)((caddr_t)lcp + lcp->cmdsize);
}
result->setObject(kOSBundleMachOHeadersKey, headerData);
}
#endif // SECURE_KERNEL
+ if (_OSArrayContainsCString(infoKeys, kOSBundleLogStringsKey)) {
+ osLogDataHeaderRef *header;
+ char headerBytes[offsetof(osLogDataHeaderRef, sections) + NUM_OS_LOG_SECTIONS * sizeof(header->sections[0])];
+
+ void *os_log_data = NULL;
+ void *cstring_data = NULL;
+ unsigned long os_log_size = 0;
+ unsigned long cstring_size = 0;
+ uint32_t os_log_offset = 0;
+ uint32_t cstring_offset = 0;
+ bool res;
+
+ os_log_data = getsectdatafromheader(kext_mach_hdr, "__TEXT", "__os_log", &os_log_size);
+ os_log_offset = getsectoffsetfromheader(kext_mach_hdr, "__TEXT", "__os_log");
+ cstring_data = getsectdatafromheader(kext_mach_hdr, "__TEXT", "__cstring", &cstring_size);
+ cstring_offset = getsectoffsetfromheader(kext_mach_hdr, "__TEXT", "__cstring");
+
+ header = (osLogDataHeaderRef *) headerBytes;
+ header->version = OS_LOG_HDR_VERSION;
+ header->sect_count = NUM_OS_LOG_SECTIONS;
+ header->sections[OS_LOG_SECT_IDX].sect_offset = os_log_offset;
+ header->sections[OS_LOG_SECT_IDX].sect_size = (uint32_t) os_log_size;
+ header->sections[CSTRING_SECT_IDX].sect_offset = cstring_offset;
+ header->sections[CSTRING_SECT_IDX].sect_size = (uint32_t) cstring_size;
+
+
+ logData = OSData::withBytes(header, (u_int) (sizeof(osLogDataHeaderRef)));
+ if (!logData) {
+ goto finish;
+ }
+ res = logData->appendBytes(&(header->sections[0]), (u_int)(header->sect_count * sizeof(header->sections[0])));
+ if (!res) {
+ goto finish;
+ }
+ if (os_log_data) {
+ res = logData->appendBytes(os_log_data, (u_int)header->sections[OS_LOG_SECT_IDX].sect_size);
+ if (!res) {
+ goto finish;
+ }
+ }
+ if (cstring_data) {
+ res = logData->appendBytes(cstring_data, (u_int)header->sections[CSTRING_SECT_IDX].sect_size);
+ if (!res) {
+ goto finish;
+ }
+ }
+ result->setObject(kOSBundleLogStringsKey, logData);
+ }
+
if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleCPUTypeKey)) {
cpuTypeNumber = OSNumber::withNumber(
(uint64_t) kext_mach_hdr->cputype,
// +1 for slash, +1 for \0
executablePathCStringSize = pathLength + executableRelPath->getLength() + 2;
- executablePathCString = (char *)kalloc((executablePathCStringSize) *
- sizeof(char)); // +1 for \0
+ executablePathCString = (char *)kalloc_tag((executablePathCStringSize) *
+ sizeof(char), VM_KERN_MEMORY_OSKEXT); // +1 for \0
if (!executablePathCString) {
goto finish;
}
executablePathString = OSString::withCString(executablePathCString);
- if (!executablePathCString) {
+ if (!executablePathString) {
goto finish;
}
if (!infoKeys ||
_OSArrayContainsCString(infoKeys, kOSBundleLoadAddressKey) ||
_OSArrayContainsCString(infoKeys, kOSBundleLoadSizeKey) ||
+ _OSArrayContainsCString(infoKeys, kOSBundleExecLoadAddressKey) ||
+ _OSArrayContainsCString(infoKeys, kOSBundleExecLoadSizeKey) ||
_OSArrayContainsCString(infoKeys, kOSBundleWiredSizeKey))
{
if (isInterface() || linkedExecutable) {
/* These go to userspace via serialization, so we don't want any doubts
* about their size.
*/
- uint64_t loadAddress = 0;
- uint32_t loadSize = 0;
- uint32_t wiredSize = 0;
+ uint64_t loadAddress = 0;
+ uint32_t loadSize = 0;
+ uint32_t wiredSize = 0;
+ uint64_t execLoadAddress = 0;
+ uint32_t execLoadSize = 0;
/* Interfaces always report 0 load address & size.
* Just the way they roll.
* xxx - shouldn't have one!
*/
if (linkedExecutable /* && !isInterface() */) {
+ kernel_mach_header_t *mh = NULL;
+ kernel_segment_command_t *seg = NULL;
+
loadAddress = (uint64_t)linkedExecutable->getBytesNoCopy();
+ mh = (kernel_mach_header_t *)loadAddress;
loadAddress = VM_KERNEL_UNSLIDE(loadAddress);
loadSize = linkedExecutable->getLength();
-
+
+ /* Walk through the kext, looking for the first executable
+ * segment in case we were asked for its size/address.
+ */
+ for (seg = firstsegfromheader(mh); seg != NULL; seg = nextsegfromheader(mh, seg)) {
+ if (seg->initprot & VM_PROT_EXECUTE) {
+ execLoadAddress = VM_KERNEL_UNSLIDE(seg->vmaddr);
+ execLoadSize = seg->vmsize;
+ break;
+ }
+ }
+
/* If we have a kmod_info struct, calculated the wired size
* from that. Otherwise it's the full load size.
*/
result->setObject(kOSBundleLoadAddressKey, scratchNumber);
OSSafeReleaseNULL(scratchNumber);
}
+ if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleExecLoadAddressKey)) {
+ scratchNumber = OSNumber::withNumber(
+ (unsigned long long)(execLoadAddress),
+ /* numBits */ 8 * sizeof(execLoadAddress));
+ if (!scratchNumber) {
+ goto finish;
+ }
+ result->setObject(kOSBundleExecLoadAddressKey, scratchNumber);
+ OSSafeReleaseNULL(scratchNumber);
+ }
if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleLoadSizeKey)) {
scratchNumber = OSNumber::withNumber(
(unsigned long long)(loadSize),
result->setObject(kOSBundleLoadSizeKey, scratchNumber);
OSSafeReleaseNULL(scratchNumber);
}
+ if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleExecLoadSizeKey)) {
+ scratchNumber = OSNumber::withNumber(
+ (unsigned long long)(execLoadSize),
+ /* numBits */ 8 * sizeof(execLoadSize));
+ if (!scratchNumber) {
+ goto finish;
+ }
+ result->setObject(kOSBundleExecLoadSizeKey, scratchNumber);
+ OSSafeReleaseNULL(scratchNumber);
+ }
if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleWiredSizeKey)) {
scratchNumber = OSNumber::withNumber(
(unsigned long long)(wiredSize),
success = true;
finish:
- OSSafeRelease(headerData);
- OSSafeRelease(cpuTypeNumber);
- OSSafeRelease(cpuSubtypeNumber);
- OSSafeRelease(executablePathString);
- if (executablePathString) kfree(executablePathCString, executablePathCStringSize);
- OSSafeRelease(uuid);
- OSSafeRelease(scratchNumber);
- OSSafeRelease(dependencyLoadTags);
- OSSafeRelease(metaClassIterator);
- OSSafeRelease(metaClassInfo);
- OSSafeRelease(metaClassDict);
- OSSafeRelease(metaClassName);
- OSSafeRelease(superclassName);
+ OSSafeReleaseNULL(headerData);
+ OSSafeReleaseNULL(logData);
+ OSSafeReleaseNULL(cpuTypeNumber);
+ OSSafeReleaseNULL(cpuSubtypeNumber);
+ OSSafeReleaseNULL(executablePathString);
+ if (executablePathCString) kfree(executablePathCString, executablePathCStringSize);
+ OSSafeReleaseNULL(uuid);
+ OSSafeReleaseNULL(scratchNumber);
+ OSSafeReleaseNULL(dependencyLoadTags);
+ OSSafeReleaseNULL(metaClassIterator);
+ OSSafeReleaseNULL(metaClassInfo);
+ OSSafeReleaseNULL(metaClassDict);
+ OSSafeReleaseNULL(metaClassName);
+ OSSafeReleaseNULL(superclassName);
if (!success) {
OSSafeReleaseNULL(result);
}
callbackRecordOut);
finish:
- OSSafeRelease(requestTagNum);
+ OSSafeReleaseNULL(requestTagNum);
return result;
}
return result;
}
+
+/*********************************************************************
+* Busy timeout triage
+*********************************************************************/
+/* static */
+bool
+OSKext::isWaitingKextd(void)
+{
+ return sRequestCallbackRecords && sRequestCallbackRecords->getCount();
+}
+
/*********************************************************************
* Assumes sKextLock is held.
*********************************************************************/
switch (logLevel) {
case kOSKextLogErrorLevel:
return VTRED VTBOLD;
- break;
case kOSKextLogWarningLevel:
return VTRED;
- break;
case kOSKextLogBasicLevel:
return VTYELLOW VTUNDER;
- break;
case kOSKextLogProgressLevel:
return VTYELLOW;
- break;
case kOSKextLogStepLevel:
return VTGREEN;
- break;
case kOSKextLogDetailLevel:
return VTCYAN;
- break;
case kOSKextLogDebugLevel:
return VTMAGENTA;
- break;
default:
return ""; // white
- break;
}
- return "";
}
inline bool logSpecMatch(
va_end(argList);
if (length + 1 >= sizeof(stackBuffer)) {
- allocBuffer = (char *)kalloc((length + 1) * sizeof(char));
+ allocBuffer = (char *)kalloc_tag((length + 1) * sizeof(char), VM_KERN_MEMORY_OSKEXT);
if (!allocBuffer) {
goto finish;
}
if (allocBuffer) {
kfree(allocBuffer, (length + 1) * sizeof(char));
}
- OSSafeRelease(logString);
- OSSafeRelease(logSpecNum);
+ OSSafeReleaseNULL(logString);
+ OSSafeReleaseNULL(logSpecNum);
return;
}
vm_offset_t * addr,
unsigned int cnt,
int (* printf_func)(const char *fmt, ...),
- bool lockFlag,
- bool doUnslide)
+ uint32_t flags)
{
addr64_t summary_page = 0;
addr64_t last_summary_page = 0;
bool found_kmod = false;
u_int i = 0;
- if (lockFlag) {
+ if (kPrintKextsLock & flags) {
+ if (!sKextSummariesLock) return;
IOLockLock(sKextSummariesLock);
}
}
if (!found_kmod) {
- (*printf_func)(" Kernel Extensions in backtrace:\n");
+ if (!(kPrintKextsTerse & flags)) {
+ (*printf_func)(" Kernel Extensions in backtrace:\n");
+ }
found_kmod = true;
}
- printSummary(summary, printf_func, doUnslide);
+ printSummary(summary, printf_func, flags);
}
finish:
- if (lockFlag) {
+ if (kPrintKextsLock & flags) {
IOLockUnlock(sKextSummariesLock);
}
return FALSE;
}
+/*
+ * Get the kext summary object for the kext where 'addr' lies. Must be called with
+ * sKextSummariesLock held.
+ */
+OSKextLoadedKextSummary *
+OSKext::summaryForAddress(const uintptr_t addr)
+{
+ for (unsigned i = 0; i < gLoadedKextSummaries->numSummaries; ++i) {
+
+ OSKextLoadedKextSummary *summary = &gLoadedKextSummaries->summaries[i];
+ if (!summary->address) {
+ continue;
+ }
+
+#if VM_MAPPED_KEXTS
+ /* On our platforms that use VM_MAPPED_KEXTS, we currently do not
+ * support split kexts, but we also may unmap the kexts, which can
+ * race with the above codepath (see OSKext::unload). As such,
+ * use a simple range lookup if we are using VM_MAPPED_KEXTS.
+ */
+ if ((addr >= summary->address) && (addr < (summary->address + summary->size))) {
+ return summary;
+ }
+#else
+ kernel_mach_header_t *mh = (kernel_mach_header_t *)summary->address;
+ kernel_segment_command_t *seg;
+
+ for (seg = firstsegfromheader(mh); seg != NULL; seg = nextsegfromheader(mh, seg)) {
+ if ((addr >= seg->vmaddr) && (addr < (seg->vmaddr + seg->vmsize))) {
+ return summary;
+ }
+ }
+#endif
+ }
+
+ /* addr did not map to any kext */
+ return NULL;
+}
+
+/* static */
+void *
+OSKext::kextForAddress(const void *addr)
+{
+ void *image = NULL;
+
+ if (((vm_offset_t)(uintptr_t)addr >= vm_kernel_stext) &&
+ ((vm_offset_t)(uintptr_t)addr < vm_kernel_etext)) {
+ return (void *)&_mh_execute_header;
+ }
+
+ if (!sKextSummariesLock) {
+ return NULL;
+ }
+ IOLockLock(sKextSummariesLock);
+ OSKextLoadedKextSummary *summary = OSKext::summaryForAddress((uintptr_t)addr);
+ if (summary) {
+ image = (void *)summary->address;
+ }
+ IOLockUnlock(sKextSummariesLock);
+
+ return image;
+}
+
/*********************************************************************
* scan list of loaded kext summaries looking for a load address match and if
* found return the UUID C string. If not found then set empty string.
void OSKext::printSummary(
OSKextLoadedKextSummary * summary,
int (* printf_func)(const char *fmt, ...),
- bool doUnslide)
+ uint32_t flags)
{
kmod_reference_t * kmod_ref = NULL;
uuid_string_t uuid;
}
(void) uuid_unparse(summary->uuid, uuid);
- if (doUnslide) {
+ if (kPrintKextsUnslide & flags) {
tmpAddr = VM_KERNEL_UNSLIDE(summary->address);
}
else {
tmpAddr = summary->address;
}
- (*printf_func)(" %s(%s)[%s]@0x%llx->0x%llx\n",
+ (*printf_func)("%s%s(%s)[%s]@0x%llx->0x%llx\n",
+ (kPrintKextsTerse & flags) ? "" : " ",
summary->name, version, uuid,
tmpAddr, tmpAddr + summary->size - 1);
+
+ if (kPrintKextsTerse & flags) return;
/* print dependency info */
for (kmod_ref = (kmod_reference_t *) summary->reference_list;
if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)kmod_ref)) == 0) {
(*printf_func)(" kmod dependency scan stopped "
- "due to missing dependency page: %p\n", kmod_ref);
+ "due to missing dependency page: %p\n",
+ (kPrintKextsUnslide & flags) ? (void *)VM_KERNEL_UNSLIDE(kmod_ref) : kmod_ref);
break;
}
rinfo = kmod_ref->info;
if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)rinfo)) == 0) {
(*printf_func)(" kmod dependency scan stopped "
- "due to missing kmod page: %p\n", rinfo);
+ "due to missing kmod page: %p\n",
+ (kPrintKextsUnslide & flags) ? (void *)VM_KERNEL_UNSLIDE(rinfo) : rinfo);
break;
}
/* locate UUID in gLoadedKextSummaries */
findSummaryUUID(rinfo->id, uuid);
- if (doUnslide) {
+ if (kPrintKextsUnslide & flags) {
tmpAddr = VM_KERNEL_UNSLIDE(rinfo->address);
}
else {
uint32_t newlist_size = 0;
newlist_size = KEXT_PANICLIST_SIZE;
- newlist = (char *)kalloc(newlist_size);
+ newlist = (char *)kalloc_tag(newlist_size, VM_KERN_MEMORY_OSKEXT);
if (!newlist) {
OSKextLog(/* kext */ NULL,
u_int count;
u_int maxKexts;
u_int i, j;
+ OSKextActiveAccount * accountingList;
+ OSKextActiveAccount * prevAccountingList;
+ uint32_t idx, accountingListAlloc, accountingListCount, prevAccountingListCount;
+ prevAccountingList = NULL;
+ prevAccountingListCount = 0;
+
#if DEVELOPMENT || DEBUG
if (IORecursiveLockHaveLock(sKextLock) == false) {
panic("sKextLock must be held");
if (gLoadedKextSummaries == NULL || sLoadedKextSummariesAllocSize < size) {
if (gLoadedKextSummaries) {
- kmem_free(kernel_map,
- (vm_offset_t)gLoadedKextSummaries,
- sLoadedKextSummariesAllocSize);
+ kmem_free(kernel_map, (vm_offset_t)gLoadedKextSummaries, sLoadedKextSummariesAllocSize);
gLoadedKextSummaries = NULL;
+ gLoadedKextSummariesTimestamp = mach_absolute_time();
sLoadedKextSummariesAllocSize = 0;
}
- result = kmem_alloc(kernel_map,
- (vm_offset_t*)&summaryHeaderAlloc,
- size);
+ result = kmem_alloc(kernel_map, (vm_offset_t *)&summaryHeaderAlloc, size, VM_KERN_MEMORY_OSKEXT);
if (result != KERN_SUCCESS) goto finish;
summaryHeader = summaryHeaderAlloc;
summarySize = size;
bzero(summaryHeader, summarySize);
summaryHeader->version = kOSKextLoadedKextSummaryVersion;
summaryHeader->entry_size = sizeof(OSKextLoadedKextSummary);
-
+
/* Populate each kext summary.
*/
count = sLoadedKexts->getCount();
+ accountingListAlloc = 0;
for (i = 0, j = 0; i < count && j < maxKexts; ++i) {
aKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i));
if (!aKext || !aKext->isExecutable()) {
aKext->updateLoadedKextSummary(&summaryHeader->summaries[j++]);
summaryHeader->numSummaries++;
+ accountingListAlloc++;
}
-
+
+ accountingList = IONew(typeof(accountingList[0]), accountingListAlloc);
+ accountingListCount = 0;
+ for (i = 0, j = 0; i < count && j < maxKexts; ++i) {
+ aKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i));
+ if (!aKext || !aKext->isExecutable()) {
+ continue;
+ }
+
+ OSKextActiveAccount activeAccount;
+ aKext->updateActiveAccount(&activeAccount);
+ // order by address
+ for (idx = 0; idx < accountingListCount; idx++)
+ {
+ if (activeAccount.address < accountingList[idx].address) break;
+ }
+ bcopy(&accountingList[idx], &accountingList[idx + 1], (accountingListCount - idx) * sizeof(accountingList[0]));
+ accountingList[idx] = activeAccount;
+ accountingListCount++;
+ }
+ assert(accountingListCount == accountingListAlloc);
/* Write protect the buffer and move it into place.
*/
start = (vm_map_offset_t) summaryHeader;
end = start + summarySize;
-
+
result = vm_map_protect(kernel_map, start, end, VM_PROT_READ, FALSE);
- if (result != KERN_SUCCESS) goto finish;
-
+ if (result != KERN_SUCCESS)
+ goto finish;
+
gLoadedKextSummaries = summaryHeader;
+ gLoadedKextSummariesTimestamp = mach_absolute_time();
sLoadedKextSummariesAllocSize = summarySize;
summaryHeaderAlloc = NULL;
-
+
/* Call the magic breakpoint function through a static function pointer so
* the compiler can't optimize the function away.
*/
if (sLoadedKextSummariesUpdated) (*sLoadedKextSummariesUpdated)();
+ IOSimpleLockLock(sKextAccountsLock);
+ prevAccountingList = sKextAccounts;
+ prevAccountingListCount = sKextAccountsCount;
+ sKextAccounts = accountingList;
+ sKextAccountsCount = accountingListCount;
+ IOSimpleLockUnlock(sKextAccountsLock);
+
finish:
IOLockUnlock(sKextSummariesLock);
if (summaryHeaderAlloc) {
kmem_free(kernel_map, (vm_offset_t)summaryHeaderAlloc, summarySize);
}
+ if (prevAccountingList) {
+ IODelete(prevAccountingList, typeof(accountingList[0]), prevAccountingListCount);
+ }
return;
}
uuid = copyUUID();
if (uuid) {
memcpy(summary->uuid, uuid->getBytesNoCopy(), sizeof(summary->uuid));
- OSSafeRelease(uuid);
+ OSSafeReleaseNULL(uuid);
}
summary->address = kmod_info->address;
return;
}
+/*********************************************************************
+*********************************************************************/
+
+void
+OSKext::updateActiveAccount(OSKextActiveAccount *accountp)
+{
+ kernel_mach_header_t *hdr = NULL;
+ kernel_segment_command_t *seg = NULL;
+
+ hdr = (kernel_mach_header_t *)kmod_info->address;
+
+ if (getcommandfromheader(hdr, LC_SEGMENT_SPLIT_INFO)) {
+ /* If this kext supports split segments, use the first
+ * executable segment as the range for instructions
+ * (and thus for backtracing.
+ */
+ for (seg = firstsegfromheader(hdr); seg != NULL; seg = nextsegfromheader(hdr, seg)) {
+ if (seg->initprot & VM_PROT_EXECUTE) {
+ break;
+ }
+ }
+ }
+
+ bzero(accountp, sizeof(*accountp));
+ if (seg) {
+ accountp->address = seg->vmaddr;
+ if (accountp->address) {
+ accountp->address_end = seg->vmaddr + seg->vmsize;
+ }
+ } else {
+ /* For non-split kexts and for kexts without executable
+ * segments, just use the kmod_info range (as the kext
+ * is either all in one range or should not show up in
+ * instruction backtraces).
+ */
+ accountp->address = kmod_info->address;
+ if (accountp->address) {
+ accountp->address_end = kmod_info->address + kmod_info->size;
+ }
+ }
+ accountp->account = this->account;
+}
+
+extern "C" const vm_allocation_site_t *
+OSKextGetAllocationSiteForCaller(uintptr_t address)
+{
+ OSKextActiveAccount * active;
+ vm_allocation_site_t * site;
+ vm_allocation_site_t * releasesite;
+
+ uint32_t baseIdx;
+ uint32_t lim;
+
+ IOSimpleLockLock(sKextAccountsLock);
+ site = releasesite = NULL;
+
+ // bsearch sKextAccounts list
+ for (baseIdx = 0, lim = sKextAccountsCount; lim; lim >>= 1)
+ {
+ active = &sKextAccounts[baseIdx + (lim >> 1)];
+ if ((address >= active->address) && (address < active->address_end))
+ {
+ site = &active->account->site;
+ if (!site->tag) vm_tag_alloc_locked(site, &releasesite);
+ break;
+ }
+ else if (address > active->address)
+ {
+ // move right
+ baseIdx += (lim >> 1) + 1;
+ lim--;
+ }
+ // else move left
+ }
+ IOSimpleLockUnlock(sKextAccountsLock);
+ if (releasesite) kern_allocation_name_release(releasesite);
+
+ return (site);
+}
+
+extern "C" uint32_t
+OSKextGetKmodIDForSite(const vm_allocation_site_t * site, char * name, vm_size_t namelen)
+{
+ OSKextAccount * account = (typeof(account)) site;
+ const char * kname;
+
+ if (name)
+ {
+ if (account->kext) kname = account->kext->getIdentifierCString();
+ else kname = "<>";
+ strlcpy(name, kname, namelen);
+ }
+
+ return (account->loadTag);
+}
+
+extern "C" void
+OSKextFreeSite(vm_allocation_site_t * site)
+{
+ OSKextAccount * freeAccount = (typeof(freeAccount)) site;
+ IODelete(freeAccount, OSKextAccount, 1);
+}
+
/*********************************************************************
*********************************************************************/
static void *
GetAppleTEXTHashForKext(OSKext * theKext, OSDictionary *theInfoDict)
{
- AppleTEXTHash_t my_ath = {1, 0, NULL};
+ AppleTEXTHash_t my_ath = {2, 0, NULL};
AppleTEXTHash_t * my_athp = NULL; // do not release
- OSDictionary * textHashDict = NULL; // do not release
OSData * segmentHash = NULL; // do not release
if (theKext == NULL || theInfoDict == NULL) {
return(NULL);
}
- textHashDict = OSDynamicCast(OSDictionary, theInfoDict->getObject(kAppleTextHashesKey));
- if (textHashDict == NULL) {
- return(NULL);
- }
-
- segmentHash = OSDynamicCast(OSData,
- textHashDict->getObject(ARCHNAME));
+ // Get the part of the plist associate with kAppleTextHashesKey and let
+ // the crypto library do further parsing (slice/architecture)
+ segmentHash = OSDynamicCast(OSData, theInfoDict->getObject(kAppleTextHashesKey));
+ // Support for ATH v1 while rolling out ATH v2 without revision locking submissions
+ // Remove this when v2 PLIST are supported
+ if (segmentHash == NULL) {
+ // If this fails, we may be dealing with a v1 PLIST
+ OSDictionary * textHashDict = NULL; // do not release
+ textHashDict = OSDynamicCast(OSDictionary, theInfoDict->getObject(kAppleTextHashesKey));
+ if (textHashDict == NULL) {
+ return(NULL);
+ }
+ my_ath.ath_version=1;
+ segmentHash = OSDynamicCast(OSData,textHashDict->getObject(ARCHNAME));
+ } // end of v2 rollout
+
if (segmentHash == NULL) {
return(NULL);
}
// KEC_FIPS type kexts never unload so we don't have to clean up our
// AppleTEXTHash_t
if (kmem_alloc(kernel_map, (vm_offset_t *) &my_athp,
- sizeof(AppleTEXTHash_t)) != KERN_SUCCESS) {
+ sizeof(AppleTEXTHash_t), VM_KERN_MEMORY_OSKEXT) != KERN_SUCCESS) {
return(NULL);
}
#endif // CONFIG_KEC_FIPS
+#if CONFIG_IMAGEBOOT
+int OSKextGetUUIDForName(const char *name, uuid_t uuid)
+{
+ OSKext *kext = OSKext::lookupKextWithIdentifier(name);
+ if (!kext) {
+ return 1;
+ }
+
+ OSData *uuid_data = kext->copyUUID();
+ if (uuid_data) {
+ memcpy(uuid, uuid_data->getBytesNoCopy(), sizeof(uuid_t));
+ OSSafeReleaseNULL(uuid_data);
+ return 0;
+ }
+
+ return 1;
+}
+#endif
+