C116F1A523F5BB39002D386B /* update_dyld_shared_cache-build.sh */ = {isa = PBXFileReference; lastKnownFileType = text.script.sh; path = "update_dyld_shared_cache-build.sh"; sourceTree = "<group>"; };
C11ECA8E233C307C0011726F /* SharedCacheBuilder.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = SharedCacheBuilder.cpp; path = "dyld3/shared-cache/SharedCacheBuilder.cpp"; sourceTree = "<group>"; };
C11ECA8F233C307C0011726F /* SharedCacheBuilder.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = SharedCacheBuilder.h; path = "dyld3/shared-cache/SharedCacheBuilder.h"; sourceTree = "<group>"; };
+ C141DF8825673EF00077621A /* PointerAuth.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = PointerAuth.h; path = dyld3/PointerAuth.h; sourceTree = "<group>"; };
C14965DB22BDCE7C00568D15 /* dyld_app_cache_util.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = dyld_app_cache_util.cpp; path = dyld3/dyld_app_cache_util.cpp; sourceTree = "<group>"; };
C14965E022BDCF6800568D15 /* dyld_app_cache_util */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = dyld_app_cache_util; sourceTree = BUILT_PRODUCTS_DIR; };
C14C3560230531820059E04C /* run-static */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "run-static"; sourceTree = BUILT_PRODUCTS_DIR; };
C18F095221925E7600034B68 /* Map.h */,
F9F76FAE1E08CFF200828678 /* PathOverrides.cpp */,
F9F76FAF1E08CFF200828678 /* PathOverrides.h */,
+ C141DF8825673EF00077621A /* PointerAuth.h */,
C116F19A23F4B11B002D386B /* RootsChecker.cpp */,
C116F19B23F4B11B002D386B /* RootsChecker.h */,
F977DDC91E53BEA700609230 /* SharedCacheRuntime.cpp */,
GCC_WARN_UNUSED_FUNCTION = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
INSTALL_PATH = "$(INSTALL_LOCATION)/usr/local/bin";
- MACOSX_DEPLOYMENT_TARGET = 10.11;
+ MACOSX_DEPLOYMENT_TARGET = 10.14;
MTL_ENABLE_DEBUG_INFO = YES;
ONLY_ACTIVE_ARCH = YES;
OTHER_CFLAGS = (
GCC_WARN_UNUSED_FUNCTION = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
INSTALL_PATH = "$(INSTALL_LOCATION)/usr/local/bin";
- MACOSX_DEPLOYMENT_TARGET = 10.11;
+ MACOSX_DEPLOYMENT_TARGET = 10.14;
MTL_ENABLE_DEBUG_INFO = NO;
OTHER_CFLAGS = (
"-DBOM_SUPPORT=1",
// get base address of cache
__block uint64_t cacheUnslidBaseAddress = 0;
- sharedCache->forEachRegion(^(const void *content, uint64_t vmAddr, uint64_t size, uint32_t permissions,
- uint64_t flags) {
+ sharedCache->forEachRegion(^(const void *content, uint64_t vmAddr, uint64_t size,
+ uint32_t initProt, uint32_t maxProt, uint64_t flags) {
if ( cacheUnslidBaseAddress == 0 )
cacheUnslidBaseAddress = vmAddr;
});
#include <algorithm>
+#if __has_feature(ptrauth_calls)
+ #include <ptrauth.h>
+#endif
+
#include "dlfcn.h"
#include "AllImages.h"
{
log_apis("NSAddressOfSymbol(%p)\n", symbol);
+ if ( symbol == nullptr )
+ return nullptr;
+
// in dyld 1.0, NSSymbol was a pointer to the nlist entry in the symbol table
- return (void*)symbol;
+ void *result = (void*)symbol;
+
+#if __has_feature(ptrauth_calls)
+ __block const MachOLoaded *module = nullptr;
+ gAllImages.infoForImageMappedAt(symbol, ^(const LoadedImage& foundImage, uint8_t permissions) {
+ module = foundImage.loadedAddress();
+ });
+
+ int64_t slide = module->getSlide();
+ __block bool resultPointsToInstructions = false;
+ module->forEachSection(^(const MachOAnalyzer::SectionInfo& sectInfo, bool malformedSectionRange, bool& stop) {
+ uint64_t sectStartAddr = sectInfo.sectAddr + slide;
+ uint64_t sectEndAddr = sectStartAddr + sectInfo.sectSize;
+ if ( ((uint64_t)result >= sectStartAddr) && ((uint64_t)result < sectEndAddr) ) {
+ resultPointsToInstructions = (sectInfo.sectFlags & S_ATTR_PURE_INSTRUCTIONS) || (sectInfo.sectFlags & S_ATTR_SOME_INSTRUCTIONS);
+ stop = true;
+ }
+ });
+
+ if (resultPointsToInstructions) {
+ result = __builtin_ptrauth_sign_unauthenticated(result, ptrauth_key_asia, 0);
+ }
+#endif
+
+ return result;
}
NSModule NSModuleForSymbol(NSSymbol symbol)
extern "C" int __cxa_atexit(void (*func)(void *), void* arg, void* dso);
-
-VIS_HIDDEN bool gUseDyld3 = false;
+VIS_HIDDEN void* __ptrauth_dyld_address_auth gUseDyld3 = nullptr;
namespace dyld3 {
void Reaper::runTerminators(const LoadedImage& li)
{
+ // <rdar://problem/71820555> Don't run static terminator for arm64e
+ const MachOAnalyzer* ma = (MachOAnalyzer*)li.loadedAddress();
+ if ( ma->isArch("arm64e") )
+ return;
+
if ( li.image()->hasTerminators() ) {
typedef void (*Terminator)();
li.image()->forEachTerminator(li.loadedAddress(), ^(const void* terminator) {
Terminator termFunc = (Terminator)terminator;
-#if __has_feature(ptrauth_calls)
- termFunc = (Terminator)__builtin_ptrauth_sign_unauthenticated((void*)termFunc, 0, 0);
-#endif
termFunc();
log_initializers("dyld: called static terminator %p in %s\n", termFunc, li.image()->path());
});
}
}
-void AllImages::applyInterposingToDyldCache(const closure::Closure* closure)
+void AllImages::applyInterposingToDyldCache(const closure::Closure* closure, mach_port_t mach_task_self)
{
dyld3::ScopedTimer timer(DBG_DYLD_TIMING_APPLY_INTERPOSING, 0, 0, 0);
const uintptr_t cacheStart = (uintptr_t)_dyldCacheAddress;
__block closure::ImageNum lastCachedDylibImageNum = 0;
__block const closure::Image* lastCachedDylibImage = nullptr;
__block bool suspendedAccounting = false;
+
+ if ( closure->findAttributePayload(closure::TypedBytes::Type::cacheOverrides) == nullptr )
+ return;
+
+ // make the cache writable for this block
+ DyldSharedCache::DataConstScopedWriter patcher(_dyldCacheAddress, mach_task_self, (DyldSharedCache::DataConstLogFunc)&log_segments);
+
closure->forEachPatchEntry(^(const closure::Closure::PatchEntry& entry) {
if ( entry.overriddenDylibInCache != lastCachedDylibImageNum ) {
lastCachedDylibImage = closure::ImageArray::findImage(imagesArrays(), entry.overriddenDylibInCache);
// if closure adds images that override dyld cache, patch cache
if ( newClosure != nullptr )
- applyInterposingToDyldCache(newClosure);
+ applyInterposingToDyldCache(newClosure, mach_task_self());
runImageCallbacks(newImages);
#include <os/lock_private.h>
#include <mach-o/dyld_priv.h>
+#include <sys/types.h>
+
#include "Closure.h"
#include "Loading.h"
#include "MachOLoaded.h"
#include "DyldSharedCache.h"
+#include "PointerAuth.h"
#if TARGET_OS_OSX
void removeImages(const Array<LoadedImage>& unloadImages);
void runImageNotifiers(const Array<LoadedImage>& newImages);
void runImageCallbacks(const Array<LoadedImage>& newImages);
- void applyInterposingToDyldCache(const closure::Closure* closure);
+ void applyInterposingToDyldCache(const closure::Closure* closure, mach_port_t mach_task_self);
void runStartupInitialzers();
void runInitialzersBottomUp(const closure::Image* topImage);
void runLibSystemInitializer(LoadedImage& libSystem);
uintptr_t resolveTarget(closure::Image::ResolvedSymbolTarget target) const;
void addImmutableRange(uintptr_t start, uintptr_t end);
- void constructMachPorts(int slot);
- void teardownMachPorts(int slot);
- void forEachPortSlot(void (^callback)(int slot));
- void sendMachMessage(int slot, mach_msg_id_t msg_id, mach_msg_header_t* msg_buffer, mach_msg_size_t msg_size);
- void notifyMonitoringDyld(bool unloading, const Array<LoadedImage>& images);
-
static void runAllStaticTerminatorsHelper(void*);
typedef closure::ImageArray ImageArray;
- const closure::LaunchClosure* _mainClosure = nullptr;
+ typedef const closure::LaunchClosure* __ptrauth_dyld_address_auth MainClosurePtrType;
+
+ MainClosurePtrType _mainClosure = nullptr;
const DyldSharedCache* _dyldCacheAddress = nullptr;
const char* _dyldCachePath = nullptr;
uint64_t _dyldCacheSlide = 0;
#include <assert.h>
#include <stddef.h>
#include <mach/mach.h>
+#include <TargetConditionals.h>
#if !TARGET_OS_DRIVERKIT && (BUILDING_LIBDYLD || BUILDING_DYLD)
#include <CrashReporterClient.h>
return (_flags & kEnableCompactImageInfoMask);
}
+bool BootArgs::forceReadOnlyDataConst() {
+ return (_flags & kForceReadOnlyDataConstMask);
+}
+
+bool BootArgs::forceReadWriteDataConst() {
+ return (_flags & kForceReadWriteDataConstMask);
+}
+
void BootArgs::setFlags(uint64_t flags) {
#if TARGET_IPHONE_SIMULATOR
return;
static bool forceDyld3();
static bool enableDyldTestMode();
static bool enableCompactImageInfo();
+ static bool forceReadOnlyDataConst();
+ static bool forceReadWriteDataConst();
static void setFlags(uint64_t flags);
private:
static const uint64_t kForceCustomerCacheMask = 1<<0;
static const uint64_t kForceDyld2CacheMask = 1<<15;
static const uint64_t kForceDyld3CacheMask = 1<<16;
static const uint64_t kEnableCompactImageInfoMask = 1<<17;
+ static const uint64_t kForceReadOnlyDataConstMask = 1<<18;
+ static const uint64_t kForceReadWriteDataConstMask = 1<<19;
//FIXME: Move this into __DATA_CONST once it is enabled for dyld
static uint64_t _flags;
};
if ( foundInCache && !fileFound ) {
ImageNum dyldCacheImageNum = dyldCacheImageIndex + 1;
for (BuilderLoadedImage& li: _loadedImages) {
- if ( li.overrideImageNum == dyldCacheImageNum ) {
+ if ( (li.overrideImageNum == dyldCacheImageNum) || (li.imageNum == dyldCacheImageNum) ) {
foundImage = &li;
result = true;
stop = true;
if ( _fileSystem.getRealPath(possiblePath, realPath) ) {
foundInCache = _dyldCache->hasImagePath(realPath, dyldCacheImageIndex);
if ( foundInCache ) {
- filePath = realPath;
+ ImageNum dyldCacheImageNum = dyldCacheImageIndex + 1;
+ const Image* image = _dyldImageArray->imageForNum(dyldCacheImageNum);
+ filePath = image->path();
#if BUILDING_LIBDYLD
// handle case where OS dylib was updated after this process launched
if ( foundInCache ) {
for (BuilderLoadedImage& li: _loadedImages) {
- if ( strcmp(li.path(), realPath) == 0 ) {
+ if ( strcmp(li.path(), filePath) == 0 ) {
foundImage = &li;
result = true;
stop = true;
if ( !markNeverUnload ) {
// If the parent didn't force us to be never unload, other conditions still may
- if ( mh->hasThreadLocalVariables() ) {
- markNeverUnload = true;
- } else if ( mh->hasObjC() && mh->isDylib() ) {
- markNeverUnload = true;
- } else {
- // record if image has DOF sections
- __block bool hasDOFs = false;
- mh->forEachDOFSection(_diag, ^(uint32_t offset) {
- hasDOFs = true;
- });
- if ( hasDOFs )
- markNeverUnload = true;
- }
+ markNeverUnload = mh->markNeverUnload(_diag);
}
// Set the path again just in case it was strdup'ed.
// build _loadedImages[] with every dylib in cache, followed by others
_nextIndex = 0;
for (const LoadedFileInfo& aDylibInfo : otherDylibs) {
+ auto *mh = (const MachOAnalyzer*)aDylibInfo.fileContent;
+
BuilderLoadedImage entry;
entry.loadedFileInfo = aDylibInfo;
entry.imageNum = _startImageNum + _nextIndex++;
entry.unmapWhenDone = false;
entry.contentRebased = false;
entry.hasInits = false;
- entry.markNeverUnload = false;
+ entry.markNeverUnload = mh->markNeverUnload(_diag);
entry.rtldLocal = false;
entry.isBadImage = false;
entry.mustBuildClosure = false;
if ( overrideOfCache )
vmAccountingSetSuspended(true, _logFixups);
if ( image->fixupsNotEncoded() ) {
+ // make the cache writable for this block
+ // We do this lazily, only if we find a symbol which needs to be overridden
+ DyldSharedCache::DataConstLazyScopedWriter patcher((const DyldSharedCache*)_dyldCacheAddress, mach_task_self(), (DyldSharedCache::DataConstLogFunc)_logSegments);
+ auto* patcherPtr = &patcher;
+
WrappedMachO wmo((MachOAnalyzer*)info.loadedAddress(), this, (void*)info.image());
wmo.forEachFixup(diag,
^(uint64_t fixupLocRuntimeOffset, PointerMetaData pmd, const FixupTarget& target, bool& stop) {
// Full dlopen closures don't patch weak defs. Bail out early if we are libdyld to match this behaviour
return;
#endif
+ patcherPtr->makeWriteable();
((const DyldSharedCache*)_dyldCacheAddress)->forEachPatchableUseOfExport(cachedDylibIndex, exportCacheOffset, ^(dyld_cache_patchable_location patchLoc) {
uintptr_t* loc = (uintptr_t*)(((uint8_t*)_dyldCacheAddress)+patchLoc.cacheOffset);
uintptr_t newImpl = (uintptr_t)(target.foundInImage._mh) + target.offsetInImage + DyldSharedCache::getAddend(patchLoc);
}
}
+bool MachOAnalyzer::markNeverUnload(Diagnostics &diag) const {
+ bool neverUnload = false;
+
+ if ( hasThreadLocalVariables() ) {
+ neverUnload = true;
+ } else if ( hasObjC() && isDylib() ) {
+ neverUnload = true;
+ } else {
+ // record if image has DOF sections
+ __block bool hasDOFs = false;
+ forEachDOFSection(diag, ^(uint32_t offset) {
+ hasDOFs = true;
+ });
+ if ( hasDOFs )
+ neverUnload = true;
+ }
+ return neverUnload;
+}
+
+
bool MachOAnalyzer::canBePlacedInDyldCache(const char* path, void (^failureReason)(const char*)) const
{
if (!MachOFile::canBePlacedInDyldCache(path, failureReason))
return false;
}
- if ( !(isArch("x86_64") || isArch("x86_64h")) )
- return true;
-
- if ( hasChainedFixups() )
- return true;
-
// evict swift dylibs with split seg v1 info
if ( this->isSwiftLibrary() && this->isSplitSegV1() )
return false;
+ if ( hasChainedFixups() ) {
+ // Chained fixups assumes split seg v2. This is true for now as chained fixups is arm64e only
+ return this->isSplitSegV2();
+ }
+
+ if ( !(isArch("x86_64") || isArch("x86_64h")) )
+ return true;
+
__block bool rebasesOk = true;
uint64_t startVMAddr = preferredLoadAddress();
uint64_t endVMAddr = startVMAddr + mappedSize();
uint32_t chain_starts[1];
};
+// ld64 can't sometimes determine the size of __thread_starts accurately,
+// because these sections have to be given a size before everything is laid out,
+// and you don't know the actual size of the chains until everything is
+// laid out. In order to account for this, the linker puts trailing 0xFFFFFFFF at
+// the end of the section, that must be ignored when walking the chains. This
+// patch adjust the section size accordingly.
+static uint32_t adjustStartsCount(uint32_t startsCount, const uint32_t* starts) {
+ for ( int i = startsCount; i > 0; --i )
+ {
+ if ( starts[i - 1] == 0xFFFFFFFF )
+ startsCount--;
+ else
+ break;
+ }
+ return startsCount;
+}
+
bool MachOAnalyzer::hasFirmwareChainStarts(uint16_t* pointerFormat, uint32_t* startsCount, const uint32_t** starts) const
{
if ( !this->isPreload() && !this->isStaticExecutable() )
}
if (const OldThreadsStartSection* sect = (OldThreadsStartSection*)this->findSectionContent("__TEXT", "__thread_starts", sectionSize) ) {
*pointerFormat = sect->stride8 ? DYLD_CHAINED_PTR_ARM64E : DYLD_CHAINED_PTR_ARM64E_FIRMWARE;
- *startsCount = (uint32_t)(sectionSize/4) - 1;
+ *startsCount = adjustStartsCount((uint32_t)(sectionSize/4) - 1, sect->chain_starts);
*starts = sect->chain_starts;
return true;
}
static bool sliceIsOSBinary(int fd, uint64_t sliceOffset, uint64_t sliceSize);
const MachOAnalyzer* remapIfZeroFill(Diagnostics& diag, const closure::FileSystem& fileSystem, closure::LoadedFileInfo& info) const;
+
+ bool markNeverUnload(Diagnostics &diag) const;
struct ObjCInfo {
uint32_t selRefCount;
return false;
}
+ // Only x86_64 is allowed to have RWX segments
+ if ( !isArch("x86_64") && !isArch("x86_64h") ) {
+ __block bool foundBadSegment = false;
+ forEachSegment(^(const SegmentInfo &info, bool &stop) {
+ if ( (info.protections & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == (VM_PROT_WRITE | VM_PROT_EXECUTE) ) {
+ failureReason("Segments are not allowed to be both writable and executable");
+ foundBadSegment = true;
+ stop = true;
+ }
+ });
+ if ( foundBadSegment )
+ return false;
+ }
+
return true;
}
#endif
--- /dev/null
+/*
+ * Copyright (c) 2017 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+
+
+#ifndef __DYLD_POINTER_AUTH_H__
+#define __DYLD_POINTER_AUTH_H__
+
+#include <ptrauth.h>
+
+#if __has_feature(ptrauth_calls)
+#define __ptrauth_dyld_address_auth __ptrauth(ptrauth_key_process_dependent_data, 1, 0)
+#define __ptrauth_dyld_function_ptr __ptrauth(ptrauth_key_process_dependent_code, 1, 0)
+#else
+#define __ptrauth_dyld_address_auth
+#define __ptrauth_dyld_function_ptr
+#endif
+
+namespace dyld3 {
+
+// On arm64e, signs the given pointer with the address of where it is stored.
+// Other archs just have a regular pointer
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wptrauth-null-pointers"
+template<typename T>
+struct AuthenticatedValue {
+ static_assert(sizeof(T) <= sizeof(uintptr_t));
+
+ AuthenticatedValue() {
+ this->value = ptrauth_sign_unauthenticated(nullptr, ptrauth_key_process_dependent_data, this);
+ }
+ ~AuthenticatedValue() = default;
+ AuthenticatedValue(const AuthenticatedValue& other) {
+ this->value = ptrauth_auth_and_resign(other.value,
+ ptrauth_key_process_dependent_data, &other,
+ ptrauth_key_process_dependent_data, this);
+ }
+ AuthenticatedValue(AuthenticatedValue&& other) {
+ this->value = ptrauth_auth_and_resign(other.value,
+ ptrauth_key_process_dependent_data, &other,
+ ptrauth_key_process_dependent_data, this);
+ other.value = ptrauth_sign_unauthenticated(nullptr, ptrauth_key_process_dependent_data, &other);
+ }
+
+ AuthenticatedValue& operator=(const AuthenticatedValue&) = delete;
+ AuthenticatedValue& operator=(AuthenticatedValue&&) = delete;
+
+ // Add a few convenience methods for interoperating with values of the given type
+ AuthenticatedValue& operator=(const T& other) {
+ this->value = ptrauth_sign_unauthenticated(other, ptrauth_key_process_dependent_data, this);
+ return *this;
+ }
+ bool operator==(const T& other) const {
+ return ptrauth_auth_data(this->value, ptrauth_key_process_dependent_data, this) == other;
+ }
+ bool operator!=(const T& other) const {
+ return ptrauth_auth_data(this->value, ptrauth_key_process_dependent_data, this) != other;
+ }
+
+private:
+ const void* value;
+};
+
+
+#pragma clang diagnostic pop
+
+} // namespace dyld3
+
+#endif // __DYLD_POINTER_AUTH_H__
+
#define VM_PROT_NOAUTH 0x40 /* must not interfere with normal prot assignments */
#endif
+extern bool gEnableSharedCacheDataConst;
+
namespace dyld {
extern void log(const char*, ...);
extern void logToConsole(const char* format, ...);
{
for (int i=0; i < mappingsCount; ++i) {
const char* mappingName = "";
- if ( mappings[i].sms_init_prot & VM_PROT_WRITE ) {
- if ( mappings[i].sms_init_prot & VM_PROT_NOAUTH ) {
+ if ( mappings[i].sms_max_prot & VM_PROT_WRITE ) {
+ if ( mappings[i].sms_max_prot & VM_PROT_NOAUTH ) {
// __DATA*
mappingName = "data";
} else {
mappingName);
}
}
+
+
+static void verboseSharedCacheMappingsToConsole(const shared_file_mapping_slide_np mappings[DyldSharedCache::MaxMappings],
+ uint32_t mappingsCount)
+{
+ for (int i=0; i < mappingsCount; ++i) {
+ const char* mappingName = "";
+ if ( mappings[i].sms_max_prot & VM_PROT_WRITE ) {
+ if ( mappings[i].sms_max_prot & VM_PROT_NOAUTH ) {
+ // __DATA*
+ mappingName = "data";
+ } else {
+ // __AUTH*
+ mappingName = "auth";
+ }
+ }
+ uint32_t init_prot = mappings[i].sms_init_prot & (VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE);
+ uint32_t max_prot = mappings[i].sms_max_prot & (VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE);
+ dyld::logToConsole("dyld: mapping 0x%08llX->0x%08llX init=%x, max=%x %s%s%s%s\n",
+ mappings[i].sms_address, mappings[i].sms_address+mappings[i].sms_size-1,
+ init_prot, max_prot,
+ ((mappings[i].sms_init_prot & VM_PROT_READ) ? "read " : ""),
+ ((mappings[i].sms_init_prot & VM_PROT_WRITE) ? "write " : ""),
+ ((mappings[i].sms_init_prot & VM_PROT_EXECUTE) ? "execute " : ""),
+ mappingName);
+ }
+}
#endif
static bool preflightCacheFile(const SharedCacheOptions& options, SharedCacheLoadInfo* results, CacheInfo* info)
uint64_t slideInfoFileOffset = 0;
uint64_t slideInfoFileSize = 0;
vm_prot_t authProt = 0;
+ vm_prot_t initProt = fileMappings[i].initProt;
if ( cache->header.mappingOffset <= __offsetof(dyld_cache_header, mappingWithSlideOffset) ) {
// Old cache without the new slid mappings
if ( i == 1 ) {
slideInfoFileSize = slidableMappings[i].slideInfoFileSize;
if ( (slidableMappings[i].flags & DYLD_CACHE_MAPPING_AUTH_DATA) == 0 )
authProt = VM_PROT_NOAUTH;
+ if ( (slidableMappings[i].flags & DYLD_CACHE_MAPPING_CONST_DATA) != 0 ) {
+ // The cache was built with __DATA_CONST being read-only. We can override that
+ // with a boot-arg
+ if ( !gEnableSharedCacheDataConst )
+ initProt |= VM_PROT_WRITE;
+ }
}
// Add a file for each mapping
info->mappings[i].sms_slide_size = 0;
info->mappings[i].sms_slide_start = 0;
info->mappings[i].sms_max_prot = fileMappings[i].maxProt;
- info->mappings[i].sms_init_prot = fileMappings[i].initProt;
+ info->mappings[i].sms_init_prot = initProt;
if ( slideInfoFileSize != 0 ) {
uint64_t offsetInLinkEditRegion = (slideInfoFileOffset - linkeditMapping->fileOffset);
info->mappings[i].sms_slide_start = (user_addr_t)(linkeditMapping->address + offsetInLinkEditRegion);
// we don't know the path this cache was previously loaded from, assume default
getCachePath(options, sizeof(results->path), results->path);
if ( options.verbose ) {
- const shared_file_mapping_np* const mappings = (shared_file_mapping_np*)(cacheBaseAddress + existingCache->header.mappingOffset);
+ const dyld_cache_mapping_and_slide_info* const mappings = (const dyld_cache_mapping_and_slide_info*)(cacheBaseAddress + existingCache->header.mappingWithSlideOffset);
dyld::log("re-using existing shared cache (%s):\n", results->path);
shared_file_mapping_slide_np slidMappings[DyldSharedCache::MaxMappings];
for (int i=0; i < DyldSharedCache::MaxMappings; ++i) {
- slidMappings[i].sms_address = mappings[i].sfm_address;
- slidMappings[i].sms_size = mappings[i].sfm_size;
- slidMappings[i].sms_file_offset = mappings[i].sfm_file_offset;
- slidMappings[i].sms_max_prot = mappings[i].sfm_max_prot;
- slidMappings[i].sms_init_prot = mappings[i].sfm_init_prot;
-
+ slidMappings[i].sms_address = mappings[i].address;
+ slidMappings[i].sms_size = mappings[i].size;
+ slidMappings[i].sms_file_offset = mappings[i].fileOffset;
+ slidMappings[i].sms_max_prot = mappings[i].maxProt;
+ slidMappings[i].sms_init_prot = mappings[i].initProt;
slidMappings[i].sms_address += results->slide;
if ( existingCache->header.mappingOffset > __offsetof(dyld_cache_header, mappingWithSlideOffset) ) {
// New caches have slide info on each new mapping
slidMappings[i].sms_max_prot |= VM_PROT_NOAUTH;
slidMappings[i].sms_init_prot |= VM_PROT_NOAUTH;
}
+ if ( (slidableMappings[i].flags & DYLD_CACHE_MAPPING_CONST_DATA) != 0 ) {
+ // The cache was built with __DATA_CONST being read-only. We can override that
+ // with a boot-arg
+ if ( !gEnableSharedCacheDataConst )
+ slidMappings[i].sms_init_prot |= VM_PROT_WRITE;
+ }
}
}
verboseSharedCacheMappings(slidMappings, existingCache->header.mappingCount);
else {
results->errorMessage = "existing shared cache in memory is not compatible";
}
+
return true;
}
return false;
results->loadAddress = (const DyldSharedCache*)(info.mappings[0].sms_address);
if ( info.mappingsCount != 3 ) {
// We don't know our own slide any more as the kernel owns it, so ask for it again now
- if ( reuseExistingCache(options, results) )
+ if ( reuseExistingCache(options, results) ) {
+
+ // update mappings based on the slide the kernel chose
+ for (uint32_t i=0; i < info.mappingsCount; ++i) {
+ info.mappings[i].sms_address += results->slide;
+ if ( info.mappings[i].sms_slide_size != 0 )
+ info.mappings[i].sms_slide_start += (uint32_t)results->slide;
+ }
+
+ if ( options.verbose )
+ verboseSharedCacheMappingsToConsole(info.mappings, info.mappingsCount);
return true;
+ }
// Uh oh, we mapped the kernel, but we didn't find the slide
- dyld::logToConsole("dyld: error finding shared cache slide for system wide mapping\n");
+ if ( options.verbose )
+ dyld::logToConsole("dyld: error finding shared cache slide for system wide mapping\n");
return false;
}
}
#if TARGET_OS_SIMULATOR // simulator caches do not support sliding
return true;
#else
+
+ // Change __DATA_CONST to read-write for this block
+ DyldSharedCache::DataConstScopedWriter patcher(results->loadAddress, mach_task_self(), options.verbose ? &dyld::log : nullptr);
+
__block bool success = true;
for (int i=0; i < info.mappingsCount; ++i) {
if ( info.mappings[i].sms_slide_size == 0 )
#include <stdint.h>
#include "DyldSharedCache.h"
+#include "PointerAuth.h"
namespace dyld3 {
};
struct SharedCacheLoadInfo {
- const DyldSharedCache* loadAddress;
+ typedef const DyldSharedCache* __ptrauth_dyld_address_auth DyldCachePtrType;
+ DyldCachePtrType loadAddress;
long slide;
const char* errorMessage;
char path[256];
const fsid_t fsid,
const mach_header* load_addr)
{
- uint64_t id = kdebug_trace_string(code, 0, imagePath);
+ uint64_t id = kdebug_trace_string(KDBG_CODE(DBG_DYLD, DBG_DYLD_UUID, code), 0, imagePath);
#if __ARM_ARCH_7K__
uint32_t *uuid = (uint32_t *)uuid_bytes;
kdebug_trace(KDBG_CODE(DBG_DYLD, DBG_DYLD_UUID, code + 2), uuid[0],
((uint64_t)fsobjid.fid_generation << 32),
id, 0, 0);
#endif /* !__ARM_ARCH_7K__ */
- kdebug_trace_string(code, id, nullptr);
+ kdebug_trace_string(KDBG_CODE(DBG_DYLD, DBG_DYLD_UUID, code), id, nullptr);
}
// FIXME
#define DBG_DYLD_DEBUGGING_VM_UNMAP (KDBG_CODE(DBG_DYLD, DBG_DYLD_DEBUGGING_SUBCLASS, 1))
#define DBG_DYLD_DEBUGGING_MAP_LOOP (KDBG_CODE(DBG_DYLD, DBG_DYLD_DEBUGGING_SUBCLASS, 2))
#define DBG_DYLD_DEBUGGING_MARK (KDBG_CODE(DBG_DYLD, DBG_DYLD_DEBUGGING_SUBCLASS, 3))
-
+#define DBG_DYLD_TASK_NOTIFY_REGISTER (KDBG_CODE(DBG_DYLD, DBG_DYLD_DEBUGGING_SUBCLASS, 4))
+#define DBG_DYLD_TASK_NOTIFY_DEREGISTER (KDBG_CODE(DBG_DYLD, DBG_DYLD_DEBUGGING_SUBCLASS, 5))
#define VIS_HIDDEN __attribute__((visibility("hidden")))
VIS_HIDDEN const char** appleParams;
-extern bool gUseDyld3;
+extern void* __ptrauth_dyld_address_auth gUseDyld3;
+extern bool gEnableSharedCacheDataConst;
namespace dyld3 {
return argv0;
}
-static void entry_setVars(const mach_header* mainMH, int argc, const char* argv[], const char* envp[], const char* apple[], bool keysOff, bool platformBinariesOnly)
+static void entry_setVars(const mach_header* mainMH, int argc, const char* argv[], const char* envp[], const char* apple[],
+ bool keysOff, bool platformBinariesOnly, bool enableSharedCacheDataConst)
{
NXArgc = argc;
NXArgv = argv;
sVars.__prognamePtr = &__progname;
gAllImages.setProgramVars(&sVars, keysOff, platformBinariesOnly);
- gUseDyld3 = true;
+ gUseDyld3 = (void*)1;
setLoggingFromEnvs(envp);
+
+ gEnableSharedCacheDataConst = enableSharedCacheDataConst;
}
static void entry_setHaltFunction(void (*func)(const char* message) __attribute__((noreturn)) )
}
static void entry_setInitialImageList(const closure::LaunchClosure* closure,
- const DyldSharedCache* dyldCacheLoadAddress, const char* dyldCachePath,
- const Array<LoadedImage>& initialImages, LoadedImage& libSystem)
+ const DyldSharedCache* dyldCacheLoadAddress, const char* dyldCachePath,
+ const Array<LoadedImage>& initialImages, LoadedImage& libSystem,
+ mach_port_t mach_task_self)
{
gAllImages.init(closure, dyldCacheLoadAddress, dyldCachePath, initialImages);
- gAllImages.applyInterposingToDyldCache(closure);
+ gAllImages.applyInterposingToDyldCache(closure, mach_task_self);
// run initializer for libSytem.B.dylib
// this calls back into _dyld_initializer which calls gAllIimages.addImages()
#define __DYLD_ENTRY_VECTOR_H__
#include <mach-o/loader.h>
+#include <sys/types.h>
#include <Availability.h>
#include "Loading.h"
uint32_t vectorVersion; // should be kCurrentVectorVersion
uint32_t binaryFormatVersion; // should be dyld3::closure::kFormatVersion
- void (*setVars)(const mach_header* mainMH, int argc, const char* argv[], const char* envp[], const char* apple[], bool keysOff, bool platformBinariesOnly);
+ void (*setVars)(const mach_header* mainMH, int argc, const char* argv[], const char* envp[], const char* apple[],
+ bool keysOff, bool platformBinariesOnly, bool enableSharedCacheDataConst);
void (*setHaltFunction)(void (*func)(const char* message) __attribute__((noreturn)) );
void (*setOldAllImageInfo)(dyld_all_image_infos*);
void (*setInitialImageList)(const closure::LaunchClosure* closure,
- const DyldSharedCache* dyldCacheLoadAddress, const char* dyldCachePath,
- const Array<LoadedImage>& initialImages, LoadedImage& libSystem);
+ const DyldSharedCache* dyldCacheLoadAddress, const char* dyldCachePath,
+ const Array<LoadedImage>& initialImages, LoadedImage& libSystem,
+ mach_port_t mach_task_self);
void (*runInitialzersBottomUp)(const mach_header* topImageLoadAddress);
void (*startFunc)();
// added in version 3
// kclist needs this segment, even if its empty, so leave it in there
readOnlyTextRegion.bufferSize = align(offsetInRegion, 14);
readOnlyTextRegion.sizeInUse = readOnlyTextRegion.bufferSize;
- readOnlyTextRegion.permissions = VM_PROT_READ;
+ readOnlyTextRegion.initProt = VM_PROT_READ;
+ readOnlyTextRegion.maxProt = VM_PROT_READ;
readOnlyTextRegion.name = "__PRELINK_TEXT";
}
// align r/x region end
readExecuteRegion.bufferSize = align(offsetInRegion, 14);
readExecuteRegion.sizeInUse = readExecuteRegion.bufferSize;
- readExecuteRegion.permissions = VM_PROT_READ | VM_PROT_EXECUTE;
+ readExecuteRegion.initProt = VM_PROT_READ | VM_PROT_EXECUTE;
+ readExecuteRegion.maxProt = VM_PROT_READ | VM_PROT_EXECUTE;
readExecuteRegion.name = "__TEXT_EXEC";
}
// 6-bytes per jmpq
branchStubsRegion.bufferSize = align(branchTargetsFromKexts * 6, 14);
branchStubsRegion.sizeInUse = branchStubsRegion.bufferSize;
- branchStubsRegion.permissions = VM_PROT_READ | VM_PROT_EXECUTE;
+ branchStubsRegion.initProt = VM_PROT_READ | VM_PROT_EXECUTE;
+ branchStubsRegion.maxProt = VM_PROT_READ | VM_PROT_EXECUTE;
branchStubsRegion.name = "__BRANCH_STUBS";
}
// align r/o region end
dataConstRegion.bufferSize = align(offsetInRegion, 14);
dataConstRegion.sizeInUse = dataConstRegion.bufferSize;
- dataConstRegion.permissions = VM_PROT_READ;
+ dataConstRegion.initProt = VM_PROT_READ;
+ dataConstRegion.maxProt = VM_PROT_READ;
dataConstRegion.name = "__DATA_CONST";
}
// 8-bytes per GOT
branchGOTsRegion.bufferSize = align(branchTargetsFromKexts * 8, 14);
branchGOTsRegion.sizeInUse = branchGOTsRegion.bufferSize;
- branchGOTsRegion.permissions = VM_PROT_READ | VM_PROT_WRITE;
+ branchGOTsRegion.initProt = VM_PROT_READ | VM_PROT_WRITE;
+ branchGOTsRegion.maxProt = VM_PROT_READ | VM_PROT_WRITE;
branchGOTsRegion.name = "__BRANCH_GOTS";
}
// align r/w region end
readWriteRegion.bufferSize = align(offsetInRegion, 14);
readWriteRegion.sizeInUse = readWriteRegion.bufferSize;
- readWriteRegion.permissions = VM_PROT_READ | VM_PROT_WRITE;
+ readWriteRegion.initProt = VM_PROT_READ | VM_PROT_WRITE;
+ readWriteRegion.maxProt = VM_PROT_READ | VM_PROT_WRITE;
readWriteRegion.name = "__DATA";
}
hibernateRegion.bufferSize = align(offsetInRegion, 14);
hibernateRegion.sizeInUse = hibernateRegion.bufferSize;
- hibernateRegion.permissions = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
+ hibernateRegion.initProt = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
+ hibernateRegion.maxProt = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
hibernateRegion.name = "__HIB";
}
return;
nonSplitSegRegions.emplace_back();
- nonSplitSegRegions.back().permissions = segInfo.protections;
+ nonSplitSegRegions.back().initProt = segInfo.protections;
+ nonSplitSegRegions.back().maxProt = segInfo.protections;
nonSplitSegRegions.back().name = "__REGION" + std::to_string(nonSplitSegRegions.size() - 1);
// Note we don't align the region offset as we have no split seg
// align region end
customRegion.bufferSize = align(offsetInRegion, 14);
customRegion.sizeInUse = customRegion.bufferSize;
- customRegion.permissions = VM_PROT_READ;
+ customRegion.initProt = VM_PROT_READ;
+ customRegion.maxProt = VM_PROT_READ;
customRegion.name = segment.segmentName;
}
}
// align region end
prelinkInfoRegion.bufferSize = align(xmlDataLength, 14);
prelinkInfoRegion.sizeInUse = prelinkInfoRegion.bufferSize;
- prelinkInfoRegion.permissions = VM_PROT_READ | VM_PROT_WRITE;
+ prelinkInfoRegion.initProt = VM_PROT_READ | VM_PROT_WRITE;
+ prelinkInfoRegion.maxProt = VM_PROT_READ | VM_PROT_WRITE;
prelinkInfoRegion.name = "__PRELINK_INFO";
}
}
// align r/o region end
_readOnlyRegion.bufferSize = align(offsetInRegion, 14);
_readOnlyRegion.sizeInUse = _readOnlyRegion.bufferSize;
- _readOnlyRegion.permissions = VM_PROT_READ;
+ _readOnlyRegion.initProt = VM_PROT_READ;
+ _readOnlyRegion.maxProt = VM_PROT_READ;
_readOnlyRegion.name = "__LINKEDIT";
// Add space in __LINKEDIT for chained fixups and classic relocs
uint64_t numBytes = align(numBytesForChainedFixups, 3) + align(numBytesForClassicRelocs, 3);
fixupsSubRegion.bufferSize = align(numBytes, 14);
fixupsSubRegion.sizeInUse = fixupsSubRegion.bufferSize;
- fixupsSubRegion.permissions = VM_PROT_READ;
+ fixupsSubRegion.initProt = VM_PROT_READ;
+ fixupsSubRegion.maxProt = VM_PROT_READ;
fixupsSubRegion.name = "__FIXUPS";
}
}
cacheHeaderRegion.bufferSize = cacheHeaderRegionSize;
cacheHeaderRegion.sizeInUse = cacheHeaderRegion.bufferSize;
cacheHeaderRegion.cacheFileOffset = 0;
- cacheHeaderRegion.permissions = VM_PROT_READ;
+ cacheHeaderRegion.initProt = VM_PROT_READ;
+ cacheHeaderRegion.maxProt = VM_PROT_READ;
cacheHeaderRegion.name = "__TEXT";
#if 0
}
for (auto& regionAndOffset : regions) {
- assert(regionAndOffset.first->permissions != 0);
+ assert(regionAndOffset.first->initProt != 0);
+ assert(regionAndOffset.first->maxProt != 0);
segment_command_64* loadCommand = (segment_command_64*)(cacheHeaderRegion.buffer + regionAndOffset.second);
header.segments.push_back({ loadCommand, regionAndOffset.first });
}
region->index = segmentIndex;
++segmentIndex;
- assert(region->permissions != 0);
+ assert(region->initProt != 0);
+ assert(region->maxProt != 0);
const char* name = region->name.c_str();
cmd->set_vmsize(region->sizeInUse);
cmd->set_fileoff(region->cacheFileOffset);
cmd->set_filesize(region->sizeInUse);
- cmd->set_maxprot(region->permissions);
- cmd->set_initprot(region->permissions);
+ cmd->set_maxprot(region->maxProt);
+ cmd->set_initprot(region->initProt);
cmd->set_nsects(0);
cmd->set_flags(0);
uint64_t sizeInUse = 0;
uint64_t unslidLoadAddress = 0;
uint64_t cacheFileOffset = 0;
- uint8_t permissions = 0;
+ uint8_t initProt = 0;
+ uint8_t maxProt = 0;
std::string name;
uint64_t index = ~0ULL; // The index of this region in the final binary
#include <sstream>
#endif
+#if (BUILDING_LIBDYLD || BUILDING_DYLD)
+VIS_HIDDEN bool gEnableSharedCacheDataConst = false;
+#endif
+
#if BUILDING_CACHE_BUILDER
DyldSharedCache::CreateResults DyldSharedCache::create(const CreateOptions& options,
return mappings[header.mappingCount-1].address + mappings[header.mappingCount-1].size;
}
-void DyldSharedCache::forEachRegion(void (^handler)(const void* content, uint64_t vmAddr, uint64_t size, uint32_t permissions,
- uint64_t flags)) const
+void DyldSharedCache::forEachRegion(void (^handler)(const void* content, uint64_t vmAddr, uint64_t size,
+ uint32_t initProt, uint32_t maxProt, uint64_t flags)) const
{
// <rdar://problem/49875993> sanity check cache header
if ( strncmp(header.magic, "dyld_v1", 7) != 0 )
const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
const dyld_cache_mapping_info* mappingsEnd = &mappings[header.mappingCount];
for (const dyld_cache_mapping_info* m=mappings; m < mappingsEnd; ++m) {
- handler((char*)this + m->fileOffset, m->address, m->size, m->initProt, 0);
+ handler((char*)this + m->fileOffset, m->address, m->size, m->initProt, m->maxProt, 0);
}
} else {
const dyld_cache_mapping_and_slide_info* mappings = (const dyld_cache_mapping_and_slide_info*)((char*)this + header.mappingWithSlideOffset);
const dyld_cache_mapping_and_slide_info* mappingsEnd = &mappings[header.mappingCount];
for (const dyld_cache_mapping_and_slide_info* m=mappings; m < mappingsEnd; ++m) {
- handler((char*)this + m->fileOffset, m->address, m->size, m->initProt, m->flags);
+ handler((char*)this + m->fileOffset, m->address, m->size, m->initProt, m->maxProt, m->flags);
}
}
}
__block std::vector<uint64_t> regionFileOffsets;
result.reserve(256*1024);
- forEachRegion(^(const void* content, uint64_t vmAddr, uint64_t size, uint32_t permissions,
- uint64_t flags) {
+ forEachRegion(^(const void* content, uint64_t vmAddr, uint64_t size,
+ uint32_t initProt, uint32_t maxProt, uint64_t flags) {
regionStartAddresses.push_back(vmAddr);
regionSizes.push_back(size);
regionFileOffsets.push_back((uint8_t*)content - (uint8_t*)this);
char lineBuffer[256];
const char* prot = "RW";
- if ( permissions == (VM_PROT_EXECUTE|VM_PROT_READ) )
+ if ( maxProt == (VM_PROT_EXECUTE|VM_PROT_READ) )
prot = "EX";
- else if ( permissions == VM_PROT_READ )
+ else if ( maxProt == VM_PROT_READ )
prot = "RO";
if ( size > 1024*1024 )
sprintf(lineBuffer, "mapping %s %4lluMB 0x%0llX -> 0x%0llX\n", prot, size/(1024*1024), vmAddr, vmAddr+size);
{
__block uint64_t startAddr = 0;
__block uint64_t endAddr = 0;
- forEachRegion(^(const void* content, uint64_t vmAddr, uint64_t size, uint32_t permissions,
- uint64_t flags) {
+ forEachRegion(^(const void* content, uint64_t vmAddr, uint64_t size,
+ uint32_t initProt, uint32_t maxProt, uint64_t flags) {
if ( startAddr == 0 )
startAddr = vmAddr;
uint64_t end = vmAddr+size;
}
}
+#if (BUILDING_LIBDYLD || BUILDING_DYLD)
+void DyldSharedCache::changeDataConstPermissions(mach_port_t machTask, uint32_t permissions,
+ DataConstLogFunc logFunc) const {
+
+ const dyld_cache_mapping_info* mappings = (dyld_cache_mapping_info*)((char*)this + header.mappingOffset);
+ uintptr_t slide = (uintptr_t)this - (uintptr_t)(mappings[0].address);
+
+ if ( (permissions & VM_PROT_WRITE) != 0 )
+ permissions |= VM_PROT_COPY;
+
+ forEachRegion(^(const void *, uint64_t vmAddr, uint64_t size,
+ uint32_t initProt, uint32_t maxProt, uint64_t flags) {
+ void* content = (void*)(vmAddr + slide);
+ if ( ( flags & DYLD_CACHE_MAPPING_CONST_DATA) == 0 )
+ return;
+ if ( logFunc != nullptr ) {
+ logFunc("dyld: marking shared cache range 0x%x permissions: 0x%09lX -> 0x%09lX\n",
+ permissions, (long)content, (long)content + size);
+ }
+ kern_return_t result = vm_protect(machTask, (vm_address_t)content, (vm_size_t)size, false, permissions);
+ if ( result != KERN_SUCCESS ) {
+ if ( logFunc != nullptr )
+ logFunc("dyld: failed to mprotect shared cache due to: %d\n", result);
+ }
+ });
+}
+
+DyldSharedCache::DataConstLazyScopedWriter::DataConstLazyScopedWriter(const DyldSharedCache* cache, mach_port_t machTask, DataConstLogFunc logFunc)
+ : cache(cache), machTask(machTask), logFunc(logFunc) {
+}
+
+DyldSharedCache::DataConstLazyScopedWriter::~DataConstLazyScopedWriter() {
+ if ( wasMadeWritable )
+ cache->changeDataConstPermissions(machTask, VM_PROT_READ, logFunc);
+}
+
+void DyldSharedCache::DataConstLazyScopedWriter::makeWriteable() {
+ if ( wasMadeWritable )
+ return;
+ if ( !gEnableSharedCacheDataConst )
+ return;
+ if ( cache == nullptr )
+ return;
+ wasMadeWritable = true;
+ cache->changeDataConstPermissions(machTask, VM_PROT_READ | VM_PROT_WRITE, logFunc);
+}
+
+DyldSharedCache::DataConstScopedWriter::DataConstScopedWriter(const DyldSharedCache* cache, mach_port_t machTask, DataConstLogFunc logFunc)
+ : writer(cache, machTask, logFunc) {
+ writer.makeWriteable();
+}
+#endif
+
#if !(BUILDING_LIBDYLD || BUILDING_DYLD)
// MRM map file generator
std::string DyldSharedCache::generateJSONMap(const char* disposition) const {
#include <TargetConditionals.h>
#include <uuid/uuid.h>
+#if (BUILDING_LIBDYLD || BUILDING_DYLD)
+#include <sys/types.h>
+#endif
+
#if !(BUILDING_LIBDYLD || BUILDING_DYLD)
#include <set>
#include <string>
//
// Iterates over each of the three regions in the cache
//
- void forEachRegion(void (^handler)(const void* content, uint64_t vmAddr, uint64_t size, uint32_t permissions,
- uint64_t flags)) const;
+ void forEachRegion(void (^handler)(const void* content, uint64_t vmAddr, uint64_t size,
+ uint32_t initProt, uint32_t maxProt, uint64_t flags)) const;
//
return dummy.arm64e.keyName();
}
+#if (BUILDING_LIBDYLD || BUILDING_DYLD)
+
+ typedef void (*DataConstLogFunc)(const char*, ...) __attribute__((format(printf, 1, 2)));
+ void changeDataConstPermissions(mach_port_t machTask, uint32_t permissions, DataConstLogFunc logFunc) const;
+
+ struct DataConstLazyScopedWriter {
+ DataConstLazyScopedWriter(const DyldSharedCache* cache, mach_port_t machTask, DataConstLogFunc logFunc);
+ ~DataConstLazyScopedWriter();
+
+ // Delete all other kinds of constructors to make sure we don't accidentally copy these around
+ DataConstLazyScopedWriter() = delete;
+ DataConstLazyScopedWriter(const DataConstLazyScopedWriter&) = delete;
+ DataConstLazyScopedWriter(DataConstLazyScopedWriter&&) = delete;
+ DataConstLazyScopedWriter& operator=(const DataConstLazyScopedWriter&) = delete;
+ DataConstLazyScopedWriter& operator=(DataConstLazyScopedWriter&&) = delete;
+
+ void makeWriteable();
+
+ const DyldSharedCache* cache = nullptr;
+ mach_port_t machTask = MACH_PORT_NULL;
+ DataConstLogFunc logFunc = nullptr;
+ bool wasMadeWritable = false;
+ };
+
+ struct DataConstScopedWriter {
+ DataConstScopedWriter(const DyldSharedCache* cache, mach_port_t machTask, DataConstLogFunc logFunc);
+ ~DataConstScopedWriter() = default;
+
+ // Delete all other kinds of constructors to make sure we don't accidentally copy these around
+ DataConstScopedWriter() = delete;
+ DataConstScopedWriter(const DataConstScopedWriter&) = delete;
+ DataConstScopedWriter(DataConstScopedWriter&&) = delete;
+ DataConstScopedWriter& operator=(const DataConstScopedWriter&) = delete;
+ DataConstScopedWriter& operator=(DataConstScopedWriter&&) = delete;
+
+ DataConstLazyScopedWriter writer;
+ };
+#endif
+
#if !(BUILDING_LIBDYLD || BUILDING_DYLD)
// MRM map file generator
std::string generateJSONMap(const char* disposition) const;
void addObjcSegments(Diagnostics& diag, DyldSharedCache* cache, const mach_header* libobjcMH,
uint8_t* objcReadOnlyBuffer, uint64_t objcReadOnlyBufferSizeAllocated,
uint8_t* objcReadWriteBuffer, uint64_t objcReadWriteBufferSizeAllocated,
- uint32_t objcRwFileOffset)
+ uint64_t objcRwFileOffset)
{
// validate there is enough free space to add the load commands
const dyld3::MachOAnalyzer* libobjcMA = ((dyld3::MachOAnalyzer*)libobjcMH);
const std::map<void*, std::string>& missingWeakImports, Diagnostics& diag,
uint8_t* objcReadOnlyBuffer, uint64_t objcReadOnlyBufferSizeUsed, uint64_t objcReadOnlyBufferSizeAllocated,
uint8_t* objcReadWriteBuffer, uint64_t objcReadWriteBufferSizeAllocated,
- uint32_t objcRwFileOffset,
+ uint64_t objcRwFileOffset,
std::vector<CacheBuilder::DylibInfo> & allDylibs,
const std::vector<const IMPCaches::Selector*> & inlinedSelectors,
bool impCachesSuccess,
void SharedCacheBuilder::optimizeObjC(bool impCachesSuccess, const std::vector<const IMPCaches::Selector*> & inlinedSelectors)
{
- // FIXME: Can we move the objc RW content to the __DATA_CONST region?
- // For now, it is always at the end of the last region
- const Region* readWriteRegion = lastDataRegion();
- uint32_t objcRwFileOffset = (uint32_t)((_objcReadWriteBuffer - readWriteRegion->buffer) + readWriteRegion->cacheFileOffset);
if ( _archLayout->is64 )
doOptimizeObjC<Pointer64<LittleEndian>>((DyldSharedCache*)_readExecuteRegion.buffer,
_options.optimizeStubs,
_objcReadOnlyBufferSizeUsed,
_objcReadOnlyBufferSizeAllocated,
_objcReadWriteBuffer, _objcReadWriteBufferSizeAllocated,
- objcRwFileOffset, _sortedDylibs, inlinedSelectors, impCachesSuccess, _timeRecorder);
+ _objcReadWriteFileOffset, _sortedDylibs, inlinedSelectors, impCachesSuccess, _timeRecorder);
else
doOptimizeObjC<Pointer32<LittleEndian>>((DyldSharedCache*)_readExecuteRegion.buffer,
_options.optimizeStubs,
_objcReadOnlyBufferSizeUsed,
_objcReadOnlyBufferSizeAllocated,
_objcReadWriteBuffer, _objcReadWriteBufferSizeAllocated,
- objcRwFileOffset, _sortedDylibs, inlinedSelectors, impCachesSuccess, _timeRecorder);
+ _objcReadWriteFileOffset, _sortedDylibs, inlinedSelectors, impCachesSuccess, _timeRecorder);
}
static uint32_t hashTableSize(uint32_t maxElements, uint32_t perElementData)
if ( i == 0 ) {
assert(_dataRegions[i].cacheFileOffset == _readExecuteRegion.sizeInUse);
}
+
+ assert(_dataRegions[i].initProt != 0);
+ assert(_dataRegions[i].maxProt != 0);
+
mappings[i + 1].address = _dataRegions[i].unslidLoadAddress;
mappings[i + 1].fileOffset = _dataRegions[i].cacheFileOffset;
mappings[i + 1].size = _dataRegions[i].sizeInUse;
- mappings[i + 1].maxProt = VM_PROT_READ | VM_PROT_WRITE;
- mappings[i + 1].initProt = VM_PROT_READ | VM_PROT_WRITE;
+ mappings[i + 1].maxProt = _dataRegions[i].maxProt;
+ mappings[i + 1].initProt = _dataRegions[i].initProt;
}
assert(_readOnlyRegion.cacheFileOffset == (_dataRegions.back().cacheFileOffset + _dataRegions.back().sizeInUse));
mappings[mappingCount - 1].address = _readOnlyRegion.unslidLoadAddress;
flags |= DYLD_CACHE_MAPPING_CONST_DATA;
}
+ assert(_dataRegions[i].initProt != 0);
+ assert(_dataRegions[i].maxProt != 0);
+
slidableMappings[i + 1].address = _dataRegions[i].unslidLoadAddress;
slidableMappings[i + 1].fileOffset = _dataRegions[i].cacheFileOffset;
slidableMappings[i + 1].size = _dataRegions[i].sizeInUse;
- slidableMappings[i + 1].maxProt = VM_PROT_READ | VM_PROT_WRITE;
- slidableMappings[i + 1].initProt = VM_PROT_READ | VM_PROT_WRITE;
+ slidableMappings[i + 1].maxProt = _dataRegions[i].maxProt;
+ slidableMappings[i + 1].initProt = _dataRegions[i].initProt;
slidableMappings[i + 1].slideInfoFileOffset = _dataRegions[i].slideInfoFileOffset;
slidableMappings[i + 1].slideInfoFileSize = _dataRegions[i].slideInfoFileSize;
slidableMappings[i + 1].flags = flags;
uint64_t nextRegionFileOffset = _readExecuteRegion.sizeInUse;
const size_t dylibCount = _sortedDylibs.size();
- uint32_t dirtyDataSortIndexes[dylibCount];
+ BLOCK_ACCCESSIBLE_ARRAY(uint32_t, dirtyDataSortIndexes, dylibCount);
for (size_t i=0; i < dylibCount; ++i)
dirtyDataSortIndexes[i] = (uint32_t)i;
std::sort(&dirtyDataSortIndexes[0], &dirtyDataSortIndexes[dylibCount], [&](const uint32_t& a, const uint32_t& b) {
return _sortedDylibs[a].input->mappedFile.runtimePath < _sortedDylibs[b].input->mappedFile.runtimePath;
});
- // Work out if we'll have __AUTH regions, as the objc RW has to go at the end of __AUTH if it exists, or
- // the end of __DATA if we have no __AUTH
- __block bool foundAuthenticatedFixups = false;
+ bool supportsAuthFixups = false;
// This tracks which segments contain authenticated data, even if their name isn't __AUTH*
- std::map<const DylibInfo*, std::set<uint32_t>> authenticatedSegments;
-
+ std::set<uint32_t> authenticatedSegments[dylibCount];
if ( strcmp(_archLayout->archName, "arm64e") == 0 ) {
+ supportsAuthFixups = true;
+
for (DylibInfo& dylib : _sortedDylibs) {
- __block std::set<uint32_t>& authSegmentIndices = authenticatedSegments[&dylib];
+ uint64_t dylibIndex = &dylib - _sortedDylibs.data();
+ __block std::set<uint32_t>& authSegmentIndices = authenticatedSegments[dylibIndex];
// Put all __DATA_DIRTY segments in the __AUTH region first, then we don't need to walk their chains
dylib.input->mappedFile.mh->forEachSegment(^(const dyld3::MachOFile::SegmentInfo& segInfo, bool& stop) {
if ( strcmp(segInfo.segName, "__DATA_DIRTY") == 0 ) {
authSegmentIndices.insert(segInfo.segIndex);
- foundAuthenticatedFixups = true;
stop = true;
}
});
assert( (chainedFixupsFormat == DYLD_CHAINED_PTR_ARM64E) || (chainedFixupsFormat == DYLD_CHAINED_PTR_ARM64E_USERLAND) || (chainedFixupsFormat == DYLD_CHAINED_PTR_ARM64E_USERLAND24) );
if ( fixupLoc->arm64e.authRebase.auth ) {
- foundAuthenticatedFixups = true;
authSegmentIndices.insert(segIndex);
stopChain = true;
return;
}
}
- // __DATA
- {
- Region region;
- region.buffer = (uint8_t*)_fullAllocatedBuffer + addr - _archLayout->sharedMemoryStart;
- region.bufferSize = 0;
- region.sizeInUse = 0;
- region.unslidLoadAddress = addr;
- region.cacheFileOffset = nextRegionFileOffset;
- region.name = "__DATA";
+ // Categorize each segment in each binary
+ enum class SegmentType : uint8_t {
+ skip, // used for non-data segments we should ignore here
+ data,
+ dataDirty,
+ dataConst,
+ auth,
+ authDirty,
+ authConst,
+ };
- // layout all __DATA_CONST/__OBJC_CONST segments
- __block int dataConstSegmentCount = 0;
- for (DylibInfo& dylib : _sortedDylibs) {
- __block uint64_t textSegVmAddr = 0;
- __block std::set<uint32_t>& authSegmentIndices = authenticatedSegments[&dylib];
- dylib.input->mappedFile.mh->forEachSegment(^(const dyld3::MachOFile::SegmentInfo& segInfo, bool& stop) {
- if ( _options.platform == dyld3::Platform::watchOS_simulator && !_is64)
- return;
- if ( strcmp(segInfo.segName, "__TEXT") == 0 )
- textSegVmAddr = segInfo.vmAddr;
- if ( segInfo.protections != (VM_PROT_READ | VM_PROT_WRITE) )
- return;
- if ( (strcmp(segInfo.segName, "__DATA_CONST") != 0) && (strcmp(segInfo.segName, "__OBJC_CONST") != 0) )
- return;
+ BLOCK_ACCCESSIBLE_ARRAY(uint64_t, textSegVmAddrs, dylibCount);
+ BLOCK_ACCCESSIBLE_ARRAY(std::vector<SegmentType>, segmentTypes, dylibCount);
- // We may have coalesced the sections at the end of this segment. In that case, shrink the segment to remove them.
- __block size_t sizeOfSections = 0;
- __block bool foundCoalescedSection = false;
- dylib.input->mappedFile.mh->forEachSection(^(const dyld3::MachOAnalyzer::SectionInfo §Info, bool malformedSectionRange, bool &stopSection) {
- if (strcmp(sectInfo.segInfo.segName, segInfo.segName) != 0)
- return;
- if ( dylib.textCoalescer.sectionWasCoalesced(segInfo.segName, sectInfo.sectName)) {
- foundCoalescedSection = true;
- } else {
- sizeOfSections = sectInfo.sectAddr + sectInfo.sectSize - segInfo.vmAddr;
- }
- });
- if (!foundCoalescedSection)
- sizeOfSections = segInfo.sizeOfSections;
+ // Just in case __AUTH is used in a non-arm64e binary, we can force it to use data enums
+ SegmentType authSegment = supportsAuthFixups ? SegmentType::auth : SegmentType::data;
+ SegmentType authConstSegment = supportsAuthFixups ? SegmentType::authConst : SegmentType::dataConst;
- if ( authSegmentIndices.count(segInfo.segIndex) ) {
- // Only move this segment to __AUTH if it had content we didn't coalesce away
- if ( !foundCoalescedSection || (sizeOfSections != 0) ) {
- // Don't put authenticated __DATA_CONST/__OBJC_CONST in the non-AUTH __DATA mapping
- _diagnostics.verbose("%s: treating authenticated %s as __AUTH_CONST\n", dylib.dylibID.c_str(), segInfo.segName);
- return;
- }
- }
+ for (const DylibInfo& dylib : _sortedDylibs) {
+ uint64_t dylibIndex = &dylib - _sortedDylibs.data();
+ __block std::set<uint32_t>& authSegmentIndices = authenticatedSegments[dylibIndex];
+ __block std::vector<SegmentType>& dylibSegmentTypes = segmentTypes[dylibIndex];
+ uint64_t &textSegVmAddr = textSegVmAddrs[dylibIndex];
+ dylib.input->mappedFile.mh->forEachSegment(^(const dyld3::MachOFile::SegmentInfo& segInfo, bool& stop) {
+ if ( strcmp(segInfo.segName, "__TEXT") == 0 ) {
+ textSegVmAddr = segInfo.vmAddr;
+ }
- ++dataConstSegmentCount;
- // Pack __DATA_CONST segments
- addr = align(addr, segInfo.p2align);
- size_t copySize = std::min((size_t)segInfo.fileSize, (size_t)sizeOfSections);
- uint64_t offsetInRegion = addr - region.unslidLoadAddress;
- SegmentMappingInfo loc;
- loc.srcSegment = (uint8_t*)dylib.input->mappedFile.mh + segInfo.vmAddr - textSegVmAddr;
- loc.segName = segInfo.segName;
- loc.dstSegment = region.buffer + offsetInRegion;
- loc.dstCacheUnslidAddress = addr;
- loc.dstCacheFileOffset = (uint32_t)(region.cacheFileOffset + offsetInRegion);
- loc.dstCacheSegmentSize = (uint32_t)sizeOfSections;
- loc.dstCacheFileSize = (uint32_t)copySize;
- loc.copySegmentSize = (uint32_t)copySize;
- loc.srcSegmentIndex = segInfo.segIndex;
- dylib.cacheLocation.push_back(loc);
- addr += loc.dstCacheSegmentSize;
- });
- }
+ // Skip non-DATA segments
+ if ( segInfo.protections != (VM_PROT_READ | VM_PROT_WRITE) ) {
+ dylibSegmentTypes.push_back(SegmentType::skip);
+ return;
+ }
- // align __DATA_CONST region end
- addr = align(addr, _archLayout->sharedRegionAlignP2);
+ // If we don't have split seg v2, then all remaining segments must look like __DATA so that they
+ // stay contiguous
+ if (!dylib.input->mappedFile.mh->isSplitSegV2()) {
+ dylibSegmentTypes.push_back(SegmentType::data);
+ return;
+ }
- // Make space for the cfstrings
- if ( _coalescedText.cfStrings.bufferSize != 0 ) {
- // Keep __DATA segments 4K or more aligned
- addr = align(addr, 12);
- uint64_t offsetInRegion = addr - region.unslidLoadAddress;
+ __block bool supportsDataConst = true;
+ if ( dylib.input->mappedFile.mh->isSwiftLibrary() ) {
+ uint64_t objcConstSize = 0;
+ bool containsObjCSection = dylib.input->mappedFile.mh->findSectionContent(segInfo.segName, "__objc_const", objcConstSize);
+
+ // <rdar://problem/66284631> Don't put __objc_const read-only memory as Swift has method lists we can't see
+ if ( containsObjCSection )
+ supportsDataConst = false;
+ } else if ( !strcmp(dylib.input->mappedFile.mh->installName(), "/System/Library/Frameworks/Foundation.framework/Foundation") ||
+ !strcmp(dylib.input->mappedFile.mh->installName(), "/System/Library/Frameworks/Foundation.framework/Versions/C/Foundation") ) {
+ // <rdar://problem/69813664> _NSTheOneTruePredicate is incompatible with __DATA_CONST
+ supportsDataConst = false;
+ } else if ( !strcmp(dylib.input->mappedFile.mh->installName(), "/usr/lib/system/libdispatch.dylib") ) {
+ // rdar://72361509 (Speechrecognitiond crashing on AzulE18E123)
+ supportsDataConst = false;
+ } else if ( !strcmp(dylib.input->mappedFile.mh->installName(), "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation") ||
+ !strcmp(dylib.input->mappedFile.mh->installName(), "/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation") ) {
+ // rdar://74112547 CF writes to kCFNull constant object
+ supportsDataConst = false;
+ }
- CacheCoalescedText::CFSection& cacheSection = _coalescedText.cfStrings;
- cacheSection.bufferAddr = region.buffer + offsetInRegion;
- cacheSection.bufferVMAddr = addr;
- cacheSection.cacheFileOffset = region.cacheFileOffset + offsetInRegion;
- addr += cacheSection.bufferSize;
- }
+ // Don't use data const for dylibs containing resolver functions. This will be fixed in ld64 by moving their pointer atoms to __DATA
+ if ( supportsDataConst && endsWith(segInfo.segName, "_CONST") ) {
+ dylib.input->mappedFile.mh->forEachExportedSymbol(_diagnostics,
+ ^(const char *symbolName, uint64_t imageOffset, uint64_t flags, uint64_t other, const char *importName, bool &stop) {
+ if ( (flags & EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER ) != 0 ) {
+ _diagnostics.verbose("%s: preventing use of __DATA_CONST due to resolvers\n", dylib.dylibID.c_str());
+ supportsDataConst = false;
+ stop = true;
+ }
+ });
+ }
- // layout all __DATA_DIRTY segments, sorted (FIXME)
- for (size_t i=0; i < dylibCount; ++i) {
- DylibInfo& dylib = _sortedDylibs[dirtyDataSortIndexes[i]];
- __block uint64_t textSegVmAddr = 0;
- __block std::set<uint32_t>& authSegmentIndices = authenticatedSegments[&dylib];
- dylib.input->mappedFile.mh->forEachSegment(^(const dyld3::MachOFile::SegmentInfo& segInfo, bool& stop) {
- if ( _options.platform == dyld3::Platform::watchOS_simulator && !_is64)
- return;
- if ( strcmp(segInfo.segName, "__TEXT") == 0 )
- textSegVmAddr = segInfo.vmAddr;
- if ( segInfo.protections != (VM_PROT_READ | VM_PROT_WRITE) )
- return;
- if ( strcmp(segInfo.segName, "__DATA_DIRTY") != 0 )
- return;
- if ( authSegmentIndices.count(segInfo.segIndex) ) {
- // Don't put authenticated __DATA_DIRTY in the non-AUTH __DATA mapping
- // This is going to be true for all arm64e __DATA_DIRTY as we move it all, regardless of auth fixups.
- // Given that, don't issue a diagnostic as its really not helpful
- return;
- }
- // Pack __DATA_DIRTY segments
- addr = align(addr, segInfo.p2align);
- size_t copySize = std::min((size_t)segInfo.fileSize, (size_t)segInfo.sizeOfSections);
- uint64_t offsetInRegion = addr - region.unslidLoadAddress;
- SegmentMappingInfo loc;
- loc.srcSegment = (uint8_t*)dylib.input->mappedFile.mh + segInfo.vmAddr - textSegVmAddr;
- loc.segName = segInfo.segName;
- loc.dstSegment = region.buffer + offsetInRegion;
- loc.dstCacheUnslidAddress = addr;
- loc.dstCacheFileOffset = (uint32_t)(region.cacheFileOffset + offsetInRegion);
- loc.dstCacheSegmentSize = (uint32_t)segInfo.sizeOfSections;
- loc.dstCacheFileSize = (uint32_t)copySize;
- loc.copySegmentSize = (uint32_t)copySize;
- loc.srcSegmentIndex = segInfo.segIndex;
- dylib.cacheLocation.push_back(loc);
- addr += loc.dstCacheSegmentSize;
- });
- }
+ // If we are still allowed to use __DATA_CONST, then make sure that we are not using pointer based method lists. These may not be written in libobjc due
+ // to uniquing or sorting (as those are done in the builder), but clients can still call setIMP to mutate them.
+ if ( supportsDataConst && endsWith(segInfo.segName, "_CONST") ) {
+ uint64_t segStartVMAddr = segInfo.vmAddr;
+ uint64_t segEndVMAddr = segInfo.vmAddr + segInfo.vmSize;
- // align __DATA_DIRTY region end
- addr = align(addr, _archLayout->sharedRegionAlignP2);
+ auto vmAddrConverter = dylib.input->mappedFile.mh->makeVMAddrConverter(false);
+ const uint32_t pointerSize = dylib.input->mappedFile.mh->pointerSize();
- // layout all __DATA segments (and other r/w non-dirty, non-const, non-auth) segments
- for (DylibInfo& dylib : _sortedDylibs) {
- __block uint64_t textSegVmAddr = 0;
- __block std::set<uint32_t>& authSegmentIndices = authenticatedSegments[&dylib];
- dylib.input->mappedFile.mh->forEachSegment(^(const dyld3::MachOFile::SegmentInfo& segInfo, bool& stop) {
- if ( strcmp(segInfo.segName, "__TEXT") == 0 )
- textSegVmAddr = segInfo.vmAddr;
- if ( segInfo.protections != (VM_PROT_READ | VM_PROT_WRITE) )
- return;
- if ( _options.platform != dyld3::Platform::watchOS_simulator || _is64) {
- if ( strcmp(segInfo.segName, "__DATA_CONST") == 0 )
+ __block bool foundPointerBasedMethodList = false;
+ auto visitMethodList = ^(uint64_t methodListVMAddr) {
+ if ( foundPointerBasedMethodList )
return;
- if ( strcmp(segInfo.segName, "__DATA_DIRTY") == 0 )
+ if ( methodListVMAddr == 0 )
return;
- if ( strcmp(segInfo.segName, "__OBJC_CONST") == 0 )
+ // Ignore method lists in other segments
+ if ( (methodListVMAddr < segStartVMAddr) || (methodListVMAddr >= segEndVMAddr) )
return;
+ auto visitMethod = ^(uint64_t methodVMAddr, const dyld3::MachOAnalyzer::ObjCMethod& method) { };
+ bool isRelativeMethodList = false;
+ dylib.input->mappedFile.mh->forEachObjCMethod(methodListVMAddr, vmAddrConverter, visitMethod, &isRelativeMethodList);
+ if ( !isRelativeMethodList )
+ foundPointerBasedMethodList = true;
+ };
+
+ auto visitClass = ^(Diagnostics& diag, uint64_t classVMAddr,
+ uint64_t classSuperclassVMAddr, uint64_t classDataVMAddr,
+ const dyld3::MachOAnalyzer::ObjCClassInfo& objcClass, bool isMetaClass) {
+ visitMethodList(objcClass.baseMethodsVMAddr(pointerSize));
+ };
+
+ auto visitCategory = ^(Diagnostics& diag, uint64_t categoryVMAddr,
+ const dyld3::MachOAnalyzer::ObjCCategory& objcCategory) {
+ visitMethodList(objcCategory.instanceMethodsVMAddr);
+ visitMethodList(objcCategory.classMethodsVMAddr);
+ };
+
+ // Walk the class list
+ Diagnostics classDiag;
+ dylib.input->mappedFile.mh->forEachObjCClass(classDiag, vmAddrConverter, visitClass);
+
+ // Walk the category list
+ Diagnostics categoryDiag;
+ dylib.input->mappedFile.mh->forEachObjCCategory(categoryDiag, vmAddrConverter, visitCategory);
+
+ // Note we don't walk protocols as they don't have an IMP to set
+
+ if ( foundPointerBasedMethodList ) {
+ _diagnostics.verbose("%s: preventing use of read-only %s due to pointer based method list\n", dylib.dylibID.c_str(), segInfo.segName);
+ supportsDataConst = false;
}
- // Skip __AUTH* segments as they'll be handled elsewhere
- if ( strncmp(segInfo.segName, "__AUTH", 6) == 0 )
- return;
- if ( authSegmentIndices.count(segInfo.segIndex) ) {
- // Don't put authenticated __DATA in the non-AUTH __DATA mapping
- _diagnostics.verbose("%s: treating authenticated __DATA as __AUTH\n", dylib.dylibID.c_str());
- return;
- }
- bool forcePageAlignedData = false;
- if (_options.platform == dyld3::Platform::macOS) {
- forcePageAlignedData = dylib.input->mappedFile.mh->hasUnalignedPointerFixups();
- //if ( forcePageAlignedData )
- // warning("unaligned pointer in %s\n", dylib.input->mappedFile.runtimePath.c_str());
- }
- if ( (dataConstSegmentCount > 10) && !forcePageAlignedData ) {
- // Pack __DATA segments only if we also have __DATA_CONST segments
- addr = align(addr, segInfo.p2align);
- }
- else {
- // Keep __DATA segments 4K or more aligned
- addr = align(addr, std::max((int)segInfo.p2align, (int)12));
+ }
+
+ // __AUTH_CONST
+ if ( strcmp(segInfo.segName, "__AUTH_CONST") == 0 ) {
+ dylibSegmentTypes.push_back(supportsDataConst ? authConstSegment : authSegment);
+ return;
+ }
+
+ // __DATA_CONST
+ if ( (strcmp(segInfo.segName, "__DATA_CONST") == 0) || (strcmp(segInfo.segName, "__OBJC_CONST") == 0) ) {
+ if ( authSegmentIndices.count(segInfo.segIndex) ) {
+ // _diagnostics.verbose("%s: treating authenticated %s as __AUTH_CONST\n", dylib.dylibID.c_str(), segInfo.segName);
+ dylibSegmentTypes.push_back(supportsDataConst ? SegmentType::authConst : SegmentType::auth);
+ } else {
+ dylibSegmentTypes.push_back(supportsDataConst ? SegmentType::dataConst : SegmentType::data);
}
- size_t copySize = std::min((size_t)segInfo.fileSize, (size_t)segInfo.sizeOfSections);
- uint64_t offsetInRegion = addr - region.unslidLoadAddress;
- SegmentMappingInfo loc;
- loc.srcSegment = (uint8_t*)dylib.input->mappedFile.mh + segInfo.vmAddr - textSegVmAddr;
- loc.segName = segInfo.segName;
- loc.dstSegment = region.buffer + offsetInRegion;
- loc.dstCacheUnslidAddress = addr;
- loc.dstCacheFileOffset = (uint32_t)(region.cacheFileOffset + offsetInRegion);
- loc.dstCacheSegmentSize = (uint32_t)segInfo.sizeOfSections;
- loc.dstCacheFileSize = (uint32_t)copySize;
- loc.copySegmentSize = (uint32_t)copySize;
- loc.srcSegmentIndex = segInfo.segIndex;
- dylib.cacheLocation.push_back(loc);
- addr += loc.dstCacheSegmentSize;
- });
- }
+ return;
+ }
- if ( !foundAuthenticatedFixups ) {
- // reserve space for objc r/w optimization tables
- _objcReadWriteBufferSizeAllocated = align(computeReadWriteObjC((uint32_t)_sortedDylibs.size(), totalProtocolDefCount), 14);
- addr = align(addr, 4); // objc r/w section contains pointer and must be at least pointer align
- _objcReadWriteBuffer = region.buffer + (addr - region.unslidLoadAddress);
- addr += _objcReadWriteBufferSizeAllocated;
- }
+ // __DATA_DIRTY
+ if ( strcmp(segInfo.segName, "__DATA_DIRTY") == 0 ) {
+ if ( authSegmentIndices.count(segInfo.segIndex) ) {
+ dylibSegmentTypes.push_back(SegmentType::authDirty);
+ } else {
+ dylibSegmentTypes.push_back(SegmentType::dataDirty);
+ }
+ return;
+ }
- // align DATA region end
- addr = align(addr, _archLayout->sharedRegionAlignP2);
- uint64_t endDataAddress = addr;
- region.bufferSize = endDataAddress - region.unslidLoadAddress;
- region.sizeInUse = region.bufferSize;
+ // __AUTH
+ if ( strcmp(segInfo.segName, "__AUTH") == 0 ) {
+ dylibSegmentTypes.push_back(authSegment);
+ return;
+ }
- _dataRegions.push_back(region);
- nextRegionFileOffset = region.cacheFileOffset + region.sizeInUse;
+ // DATA
+ if ( authSegmentIndices.count(segInfo.segIndex) ) {
+ // _diagnostics.verbose("%s: treating authenticated %s as __AUTH\n", dylib.dylibID.c_str(), segInfo.segName);
+ dylibSegmentTypes.push_back(SegmentType::auth);
+ } else {
+ dylibSegmentTypes.push_back(SegmentType::data);
+ }
+ });
}
- // __AUTH
- if ( foundAuthenticatedFixups ) {
+ auto processDylibSegments = ^(SegmentType onlyType, Region& region) {
+ for (size_t unsortedDylibIndex = 0; unsortedDylibIndex != dylibCount; ++unsortedDylibIndex) {
+ size_t dylibIndex = unsortedDylibIndex;
+ if ( (onlyType == SegmentType::dataDirty) || (onlyType == SegmentType::authDirty) )
+ dylibIndex = dirtyDataSortIndexes[dylibIndex];
- // align __AUTH region
- addr = align((addr + _archLayout->sharedRegionPadding), _archLayout->sharedRegionAlignP2);
+ DylibInfo& dylib = _sortedDylibs[dylibIndex];
+ const std::vector<SegmentType>& dylibSegmentTypes = segmentTypes[dylibIndex];
+ const uint64_t textSegVmAddr = textSegVmAddrs[dylibIndex];
- Region region;
- region.buffer = (uint8_t*)_fullAllocatedBuffer + addr - _archLayout->sharedMemoryStart;
- region.bufferSize = 0;
- region.sizeInUse = 0;
- region.unslidLoadAddress = addr;
- region.cacheFileOffset = nextRegionFileOffset;
- region.name = "__AUTH";
+ bool forcePageAlignedData = false;
+ if ( (_options.platform == dyld3::Platform::macOS) && (onlyType == SegmentType::data) ) {
+ forcePageAlignedData = dylib.input->mappedFile.mh->hasUnalignedPointerFixups();
+ //if ( forcePageAlignedData )
+ // warning("unaligned pointer in %s\n", dylib.input->mappedFile.runtimePath.c_str());
+ }
- // layout all __AUTH_CONST segments
- __block int authConstSegmentCount = 0;
- for (DylibInfo& dylib : _sortedDylibs) {
- __block uint64_t textSegVmAddr = 0;
- __block std::set<uint32_t>& authSegmentIndices = authenticatedSegments[&dylib];
dylib.input->mappedFile.mh->forEachSegment(^(const dyld3::MachOFile::SegmentInfo& segInfo, bool& stop) {
- if ( _options.platform == dyld3::Platform::watchOS_simulator && !_is64)
- return;
- if ( strcmp(segInfo.segName, "__TEXT") == 0 )
- textSegVmAddr = segInfo.vmAddr;
- if ( segInfo.protections != (VM_PROT_READ | VM_PROT_WRITE) )
+ if ( dylibSegmentTypes[segInfo.segIndex] != onlyType )
return;
// We may have coalesced the sections at the end of this segment. In that case, shrink the segment to remove them.
if (!foundCoalescedSection)
sizeOfSections = segInfo.sizeOfSections;
- if ( strcmp(segInfo.segName, "__AUTH_CONST") == 0 ) {
- // We'll handle __AUTH_CONST here
- } else if ( (strcmp(segInfo.segName, "__DATA_CONST") == 0) || (strcmp(segInfo.segName, "__OBJC_CONST") == 0) ) {
- // And we'll also handle __DATA_CONST/__OBJC_CONST which may contain authenticated pointers
- if ( authSegmentIndices.count(segInfo.segIndex) == 0 ) {
- // This __DATA_CONST doesn't contain authenticated pointers so was handled earlier
- return;
- } else {
- // We only moved this segment to __AUTH if it had content we didn't coalesce away
- if ( foundCoalescedSection && (sizeOfSections == 0) ) {
- // This __DATA_CONST doesn't contain authenticated pointers so was handled earlier
- return;
- }
- }
- } else {
- // Not __AUTH_CONST or __DATA_CONST/__OBJC_CONST so skip this
- return;
+ if ( !forcePageAlignedData ) {
+ // Pack __DATA segments
+ addr = align(addr, segInfo.p2align);
+ }
+ else {
+ // Keep __DATA segments 4K or more aligned
+ addr = align(addr, std::max((int)segInfo.p2align, (int)12));
}
- ++authConstSegmentCount;
- // Pack __AUTH_CONST segments
- addr = align(addr, segInfo.p2align);
+
size_t copySize = std::min((size_t)segInfo.fileSize, (size_t)sizeOfSections);
uint64_t offsetInRegion = addr - region.unslidLoadAddress;
SegmentMappingInfo loc;
});
}
- // align __AUTH_CONST region end
+ // align region end
addr = align(addr, _archLayout->sharedRegionAlignP2);
+ };
- // __AUTH_DIRTY. Note this is really __DATA_DIRTY as we don't generate an __AUTH_DIRTY in ld64
- for (size_t i=0; i < dylibCount; ++i) {
- DylibInfo& dylib = _sortedDylibs[dirtyDataSortIndexes[i]];
- __block uint64_t textSegVmAddr = 0;
- dylib.input->mappedFile.mh->forEachSegment(^(const dyld3::MachOFile::SegmentInfo& segInfo, bool& stop) {
- if ( _options.platform == dyld3::Platform::watchOS_simulator && !_is64)
- return;
- if ( strcmp(segInfo.segName, "__TEXT") == 0 )
- textSegVmAddr = segInfo.vmAddr;
- if ( segInfo.protections != (VM_PROT_READ | VM_PROT_WRITE) )
- return;
- if ( strcmp(segInfo.segName, "__DATA_DIRTY") != 0 )
- return;
- // Pack __AUTH_DIRTY segments
- addr = align(addr, segInfo.p2align);
- size_t copySize = std::min((size_t)segInfo.fileSize, (size_t)segInfo.sizeOfSections);
- uint64_t offsetInRegion = addr - region.unslidLoadAddress;
- SegmentMappingInfo loc;
- loc.srcSegment = (uint8_t*)dylib.input->mappedFile.mh + segInfo.vmAddr - textSegVmAddr;
- loc.segName = segInfo.segName;
- loc.dstSegment = region.buffer + offsetInRegion;
- loc.dstCacheUnslidAddress = addr;
- loc.dstCacheFileOffset = (uint32_t)(region.cacheFileOffset + offsetInRegion);
- loc.dstCacheSegmentSize = (uint32_t)segInfo.sizeOfSections;
- loc.dstCacheFileSize = (uint32_t)copySize;
- loc.copySegmentSize = (uint32_t)copySize;
- loc.srcSegmentIndex = segInfo.segIndex;
- dylib.cacheLocation.push_back(loc);
- addr += loc.dstCacheSegmentSize;
- });
- }
+ struct DataRegion {
+ const char* regionName;
+ SegmentType dataSegment;
+ std::optional<SegmentType> dirtySegment;
+ // Note this is temporary as once all platforms/archs support __DATA_CONST, we can move to a DataRegion just for CONST
+ std::optional<SegmentType> dataConstSegment;
+ bool addCFStrings;
+ bool addObjCRW;
+ };
+ std::vector<DataRegion> dataRegions;
+
+ // We only support __DATA_CONST on arm64(e) for now.
+ bool supportDataConst = false;
+ //supportDataConst |= strcmp(_archLayout->archName, "arm64") == 0;
+ supportDataConst |= strcmp(_archLayout->archName, "arm64e") == 0;
+ if ( supportDataConst ) {
+ bool addObjCRWToData = !supportsAuthFixups;
+ DataRegion dataWriteRegion = { "__DATA", SegmentType::data, SegmentType::dataDirty, {}, false, addObjCRWToData };
+ DataRegion dataConstRegion = { "__DATA_CONST", SegmentType::dataConst, {}, {}, true, false };
+ DataRegion authWriteRegion = { "__AUTH", SegmentType::auth, SegmentType::authDirty, {}, false, !addObjCRWToData };
+ DataRegion authConstRegion = { "__AUTH_CONST", SegmentType::authConst, {}, {}, false, false };
+ dataRegions.push_back(dataWriteRegion);
+ dataRegions.push_back(dataConstRegion);
+ if ( supportsAuthFixups ) {
+ dataRegions.push_back(authWriteRegion);
+ dataRegions.push_back(authConstRegion);
+ }
+ } else {
+ DataRegion dataWriteRegion = { "__DATA", SegmentType::data, SegmentType::dataDirty, SegmentType::dataConst, false, true };
+ dataRegions.push_back(dataWriteRegion);
+ }
+
+ for (DataRegion& dataRegion : dataRegions)
+ {
+ Region region;
+ region.buffer = (uint8_t*)_fullAllocatedBuffer + addr - _archLayout->sharedMemoryStart;
+ region.bufferSize = 0;
+ region.sizeInUse = 0;
+ region.unslidLoadAddress = addr;
+ region.cacheFileOffset = nextRegionFileOffset;
+ region.name = dataRegion.regionName;
+ region.initProt = endsWith(dataRegion.regionName, "_CONST") ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE);
+ region.maxProt = VM_PROT_READ | VM_PROT_WRITE;
- // align __AUTH_DIRTY region end
- addr = align(addr, _archLayout->sharedRegionAlignP2);
+ // layout all __DATA_DIRTY segments, sorted (FIXME)
+ if (dataRegion.dirtySegment.has_value())
+ processDylibSegments(*dataRegion.dirtySegment, region);
- // layout all __AUTH segments (and other r/w non-dirty, non-const, non-auth) segments
- for (DylibInfo& dylib : _sortedDylibs) {
- __block uint64_t textSegVmAddr = 0;
- __block std::set<uint32_t>& authSegmentIndices = authenticatedSegments[&dylib];
- dylib.input->mappedFile.mh->forEachSegment(^(const dyld3::MachOFile::SegmentInfo& segInfo, bool& stop) {
- if ( strcmp(segInfo.segName, "__TEXT") == 0 )
- textSegVmAddr = segInfo.vmAddr;
- if ( segInfo.protections != (VM_PROT_READ | VM_PROT_WRITE) )
- return;
- if ( _options.platform != dyld3::Platform::watchOS_simulator || _is64) {
- if ( strcmp(segInfo.segName, "__AUTH_CONST") == 0 )
- return;
- }
- if ( strncmp(segInfo.segName, "__AUTH", 6) == 0 ) {
- // We'll handle __AUTH* here
- } else {
- // And we'll also handle __DATA* which contains authenticated pointers
- if ( authSegmentIndices.count(segInfo.segIndex) == 0 ) {
- // This __DATA doesn't contain authenticated pointers so was handled earlier
- return;
- }
- if ( _options.platform != dyld3::Platform::watchOS_simulator || _is64) {
- if ( strcmp(segInfo.segName, "__DATA_CONST") == 0 )
- return;
- if ( strcmp(segInfo.segName, "__DATA_DIRTY") == 0 )
- return;
- if ( strcmp(segInfo.segName, "__OBJC_CONST") == 0 )
- return;
- }
- }
- bool forcePageAlignedData = false;
- if (_options.platform == dyld3::Platform::macOS) {
- forcePageAlignedData = dylib.input->mappedFile.mh->hasUnalignedPointerFixups();
- //if ( forcePageAlignedData )
- // warning("unaligned pointer in %s\n", dylib.input->mappedFile.runtimePath.c_str());
- }
- if ( (authConstSegmentCount > 10) && !forcePageAlignedData ) {
- // Pack __AUTH segments only if we also have __AUTH_CONST segments
- addr = align(addr, segInfo.p2align);
- }
- else {
- // Keep __AUTH segments 4K or more aligned
- addr = align(addr, std::max((int)segInfo.p2align, (int)12));
- }
- size_t copySize = std::min((size_t)segInfo.fileSize, (size_t)segInfo.sizeOfSections);
- uint64_t offsetInRegion = addr - region.unslidLoadAddress;
- SegmentMappingInfo loc;
- loc.srcSegment = (uint8_t*)dylib.input->mappedFile.mh + segInfo.vmAddr - textSegVmAddr;
- loc.segName = segInfo.segName;
- loc.dstSegment = region.buffer + offsetInRegion;
- loc.dstCacheUnslidAddress = addr;
- loc.dstCacheFileOffset = (uint32_t)(region.cacheFileOffset + offsetInRegion);
- loc.dstCacheSegmentSize = (uint32_t)segInfo.sizeOfSections;
- loc.dstCacheFileSize = (uint32_t)copySize;
- loc.copySegmentSize = (uint32_t)copySize;
- loc.srcSegmentIndex = segInfo.segIndex;
- dylib.cacheLocation.push_back(loc);
- addr += loc.dstCacheSegmentSize;
- });
+ // layout all __DATA segments (and other r/w non-dirty, non-const, non-auth) segments
+ processDylibSegments(dataRegion.dataSegment, region);
+
+ // When __DATA_CONST is not its own DataRegion, we fold it in to the __DATA DataRegion
+ if (dataRegion.dataConstSegment.has_value())
+ processDylibSegments(*dataRegion.dataConstSegment, region);
+
+ // Make space for the cfstrings
+ if ( (dataRegion.addCFStrings) && (_coalescedText.cfStrings.bufferSize != 0) ) {
+ // Keep __DATA segments 4K or more aligned
+ addr = align(addr, 12);
+ uint64_t offsetInRegion = addr - region.unslidLoadAddress;
+
+ CacheCoalescedText::CFSection& cacheSection = _coalescedText.cfStrings;
+ cacheSection.bufferAddr = region.buffer + offsetInRegion;
+ cacheSection.bufferVMAddr = addr;
+ cacheSection.cacheFileOffset = region.cacheFileOffset + offsetInRegion;
+ addr += cacheSection.bufferSize;
}
- // reserve space for objc r/w optimization tables
- _objcReadWriteBufferSizeAllocated = align(computeReadWriteObjC((uint32_t)_sortedDylibs.size(), totalProtocolDefCount), 14);
- addr = align(addr, 4); // objc r/w section contains pointer and must be at least pointer align
- _objcReadWriteBuffer = region.buffer + (addr - region.unslidLoadAddress);
- addr += _objcReadWriteBufferSizeAllocated;
+ if ( dataRegion.addObjCRW ) {
+ // reserve space for objc r/w optimization tables
+ _objcReadWriteBufferSizeAllocated = align(computeReadWriteObjC((uint32_t)_sortedDylibs.size(), totalProtocolDefCount), 14);
+ addr = align(addr, 4); // objc r/w section contains pointer and must be at least pointer align
+ _objcReadWriteBuffer = region.buffer + (addr - region.unslidLoadAddress);
+ _objcReadWriteFileOffset = (uint32_t)((_objcReadWriteBuffer - region.buffer) + region.cacheFileOffset);
+ addr += _objcReadWriteBufferSizeAllocated;
+
+
+ // align region end
+ addr = align(addr, _archLayout->sharedRegionAlignP2);
+ }
// align DATA region end
- addr = align(addr, _archLayout->sharedRegionAlignP2);
uint64_t endDataAddress = addr;
region.bufferSize = endDataAddress - region.unslidLoadAddress;
region.sizeInUse = region.bufferSize;
_dataRegions.push_back(region);
nextRegionFileOffset = region.cacheFileOffset + region.sizeInUse;
+
+ // Only arm64 and arm64e shared caches have enough space to pad between __DATA and __DATA_CONST
+ // All other caches are overflowing.
+ if ( !strcmp(_archLayout->archName, "arm64") || !strcmp(_archLayout->archName, "arm64e") )
+ addr = align((addr + _archLayout->sharedRegionPadding), _archLayout->sharedRegionAlignP2);
}
-#if 0
// Sanity check that we didn't put the same segment in 2 different ranges
for (DylibInfo& dylib : _sortedDylibs) {
- __block std::unordered_set<uint64_t> seenSegmentIndices;
+ std::unordered_set<uint64_t> seenSegmentIndices;
for (SegmentMappingInfo& segmentInfo : dylib.cacheLocation) {
if ( seenSegmentIndices.count(segmentInfo.srcSegmentIndex) != 0 ) {
_diagnostics.error("%s segment %s was duplicated in layout",
seenSegmentIndices.insert(segmentInfo.srcSegmentIndex);
}
}
-#endif
}
void SharedCacheBuilder::assignSegmentAddresses()
pint_t lastValue = (pint_t)P::getP(*lastLoc);
pint_t newValue = ((lastValue - valueAdd) & valueMask);
P::setP(*lastLoc, newValue);
- }
- if ( startValue & DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA ) {
- // add end bit to extras
- pageExtras.back() |= DYLD_CACHE_SLIDE4_PAGE_EXTRA_END;
+ if ( startValue & DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA ) {
+ // add end bit to extras
+ pageExtras.back() |= DYLD_CACHE_SLIDE4_PAGE_EXTRA_END;
+ }
}
pageStarts.push_back(startValue);
}
uint64_t _objcReadOnlyBufferSizeAllocated = 0;
uint8_t* _objcReadWriteBuffer = nullptr;
uint64_t _objcReadWriteBufferSizeAllocated = 0;
+ uint64_t _objcReadWriteFileOffset = 0;
uint64_t _selectorStringsFromExecutables = 0;
InstallNameToMA _installNameToCacheDylib;
std::unordered_map<std::string, uint32_t> _dataDirtySegsOrder;
}
});
// compute number of symbols in new symbol table
- const macho_nlist<P>* const mergedSymTabStart = (macho_nlist<P>*)(((uint8_t*)mapped_cache) + symtab->symoff);
- const macho_nlist<P>* const mergedSymTabend = &mergedSymTabStart[symtab->nsyms];
+ const macho_nlist<P>* mergedSymTabStart = (macho_nlist<P>*)(((uint8_t*)mapped_cache) + symtab->symoff);
+ const macho_nlist<P>* const mergedSymTabend = &mergedSymTabStart[symtab->nsyms];
uint32_t newSymCount = symtab->nsyms;
if ( localNlistCount != 0 ) {
- newSymCount = localNlistCount;
- for (const macho_nlist<P>* s = mergedSymTabStart; s != mergedSymTabend; ++s) {
- // skip any locals in cache
- if ( (s->n_type() & (N_TYPE|N_EXT)) == N_SECT )
- continue;
- ++newSymCount;
- }
+ // if we are recombining with unmapped locals, recompute new total size
+ newSymCount = localNlistCount + dynamicSymTab->nextdefsym + dynamicSymTab->nundefsym;
}
// add room for N_INDR symbols for re-exported symbols
// first pool entry is always empty string
newSymNames.push_back('\0');
+ // local symbols are first in dylibs, if this cache has unmapped locals, insert them all first
+ uint32_t undefSymbolShift = 0;
+ if ( localNlistCount != 0 ) {
+ const char* localStrings = cache->getLocalStrings();
+ undefSymbolShift = localNlistCount - dynamicSymTab->nlocalsym;
+ // update load command to reflect new count of locals
+ dynamicSymTab->ilocalsym = (uint32_t)newSymTab.size();
+ dynamicSymTab->nlocalsym = localNlistCount;
+ // copy local symbols
+ for (uint32_t i=0; i < localNlistCount; ++i) {
+ const char* localName = &localStrings[localNlists[i].n_strx()];
+ if ( localName > localStrings + cache->getLocalStringsSize() )
+ localName = "<corrupt local symbol name>";
+ macho_nlist<P> t = localNlists[i];
+ t.set_n_strx((uint32_t)newSymNames.size());
+ newSymNames.insert(newSymNames.end(),
+ localName,
+ localName + (strlen(localName) + 1));
+ newSymTab.push_back(t);
+ }
+ // now start copying symbol table from start of externs instead of start of locals
+ mergedSymTabStart = &mergedSymTabStart[dynamicSymTab->iextdefsym];
+ }
+ // copy full symbol table from cache (skipping locals if they where elsewhere)
for (const macho_nlist<P>* s = mergedSymTabStart; s != mergedSymTabend; ++s) {
- // if we have better local symbol info, skip any locals here
- if ( (localNlists != NULL) && ((s->n_type() & (N_TYPE|N_EXT)) == N_SECT) )
- continue;
macho_nlist<P> t = *s;
t.set_n_strx((uint32_t)newSymNames.size());
const char* symName = &mergedStringPoolStart[s->n_strx()];
importName + (strlen(importName) + 1));
newSymTab.push_back(t);
}
- if ( localNlistCount != 0 ) {
- const char* localStrings = cache->getLocalStrings();
- // update load command to reflect new count of locals
- dynamicSymTab->ilocalsym = (uint32_t)newSymTab.size();
- dynamicSymTab->nlocalsym = localNlistCount;
- // copy local symbols
- for (uint32_t i=0; i < localNlistCount; ++i) {
- const char* localName = &localStrings[localNlists[i].n_strx()];
- if ( localName > localStrings + cache->getLocalStringsSize() )
- localName = "<corrupt local symbol name>";
- macho_nlist<P> t = localNlists[i];
- t.set_n_strx((uint32_t)newSymNames.size());
- newSymNames.insert(newSymNames.end(),
- localName,
- localName + (strlen(localName) + 1));
- newSymTab.push_back(t);
- }
- }
if ( newSymCount != newSymTab.size() ) {
fprintf(stderr, "symbol count miscalculation\n");
const uint64_t newIndSymTabOffset = new_linkedit_data.size();
- // Copy indirect symbol table
+ // Copy (and adjust) indirect symbol table
const uint32_t* mergedIndSymTab = (uint32_t*)((char*)mapped_cache + dynamicSymTab->indirectsymoff);
new_linkedit_data.insert(new_linkedit_data.end(),
(char*)mergedIndSymTab,
(char*)(mergedIndSymTab + dynamicSymTab->nindirectsyms));
-
+ if ( undefSymbolShift != 0 ) {
+ uint32_t* newIndSymTab = (uint32_t*)&new_linkedit_data[newIndSymTabOffset];
+ for (int i=0; i < dynamicSymTab->nindirectsyms; ++i) {
+ newIndSymTab[i] += undefSymbolShift;
+ }
+ }
const uint64_t newStringPoolOffset = new_linkedit_data.size();
// pointer align string pool size
if ( dyldCache->hasSlideInfo() ) {
uint32_t pageSize = 0x4000; // fix me for intel
uint32_t possibleSlideValues = (uint32_t)(header->maxSlide/pageSize);
- uint32_t entropyBits = 32 - __builtin_clz(possibleSlideValues - 1);
+ uint32_t entropyBits = 0;
+ if ( possibleSlideValues > 1 )
+ entropyBits = __builtin_clz(possibleSlideValues - 1);
printf("ASLR entropy: %u-bits\n", entropyBits);
}
printf("mappings:\n");
- dyldCache->forEachRegion(^(const void *content, uint64_t vmAddr, uint64_t size, uint32_t permissions,
- uint64_t flags) {
+ dyldCache->forEachRegion(^(const void *content, uint64_t vmAddr, uint64_t size,
+ uint32_t initProt, uint32_t maxProt, uint64_t flags) {
std::string mappingName = "";
- if ( permissions & VM_PROT_EXECUTE ) {
+ if ( maxProt & VM_PROT_EXECUTE ) {
mappingName = "__TEXT";
- } else if ( permissions & VM_PROT_WRITE ) {
+ } else if ( maxProt & VM_PROT_WRITE ) {
// Start off with __DATA or __AUTH
if ( flags & DYLD_CACHE_MAPPING_AUTH_DATA )
mappingName = "__AUTH";
else if ( flags & DYLD_CACHE_MAPPING_CONST_DATA )
mappingName += "_CONST";
}
- else if ( permissions & VM_PROT_READ ) {
+ else if ( maxProt & VM_PROT_READ ) {
mappingName = "__LINKEDIT";
} else {
mappingName = "*unknown*";
#include <map>
#include <sys/stat.h>
+
static const uint64_t kMinBuildVersion = 1; //The minimum version BuildOptions struct we can support
static const uint64_t kMaxBuildVersion = 2; //The maximum version BuildOptions struct we can support
std::vector<CacheResult*> cacheResults;
std::vector<CacheResult> cacheResultStorage;
+
// The files to remove. These are in every copy of the caches we built
std::vector<const char*> filesToRemove;
buildInstance.builder.reset();
}
+
// Now that we have run all of the builds, collect the results
// First push file results for each of the shared caches we built
for (auto& buildInstance : builder->builders) {
//dyld::log(" interposedAddress: replacee=0x%08llX, replacement=0x%08llX, neverImage=%p, onlyImage=%p, inImage=%p\n",
// (uint64_t)it->replacee, (uint64_t)it->replacement, it->neverImage, it->onlyImage, inImage);
// replace all references to 'replacee' with 'replacement'
- if ( (address == it->replacee) && (inImage != it->neverImage) && ((it->onlyImage == NULL) || (inImage == it->onlyImage)) ) {
+ if ( (address == it->replacee) && (it->neverImage != inImage) && ((it->onlyImage == NULL) || (it->onlyImage == inImage)) ) {
if ( context.verboseInterposing ) {
dyld::log("dyld interposing: replace 0x%lX with 0x%lX\n", it->replacee, it->replacement);
}
return;
if (fgInterposingTuples.empty())
return;
+
+ // make the cache writable for this block
+ DyldSharedCache::DataConstScopedWriter patcher(context.dyldCache, mach_task_self(), (context.verboseMapping ? &dyld::log : nullptr));
+
// For each of the interposed addresses, see if any of them are in the shared cache. If so, find
// that image and apply its patch table to all uses.
uintptr_t cacheStart = (uintptr_t)context.dyldCache;
}
}
// bind lazies in this image
- this->doBindJustLazies(context);
+ DyldSharedCache::DataConstLazyScopedWriter patcher(context.dyldCache, mach_task_self(), context.verboseMapping ? &dyld::log : nullptr);
+ this->doBindJustLazies(context, patcher);
}
}
new (&context.weakDefMap) dyld3::Map<const char*, std::pair<const ImageLoader*, uintptr_t>, ImageLoader::HashCString, ImageLoader::EqualCString>();
context.weakDefMapInitialized = true;
}
+
+ // We might have to patch the shared cache __DATA_CONST. In that case, we'll create just a single
+ // patcher when needed.
+ DyldSharedCache::DataConstLazyScopedWriter patcher(context.dyldCache, mach_task_self(), context.verboseMapping ? &dyld::log : nullptr);
+
#if TARGET_OS_OSX
// only do alternate algorithm for dlopen(). Use traditional algorithm for launch
if ( !context.linkingMainExecutable ) {
}
}
if ( (targetAddr != 0) && (coalIterator.image != targetImage) ) {
+ if ( coalIterator.image->inSharedCache() )
+ patcher.makeWriteable();
coalIterator.image->updateUsesCoalIterator(coalIterator, targetAddr, (ImageLoader*)targetImage, 0, context);
if (weakDefIt == context.weakDefMap.end()) {
if (targetImage->neverUnload()) {
nameToCoalesce, iterators[i].image->getIndexedShortName((unsigned)iterators[i].imageIndex),
targetAddr, targetImage->getIndexedShortName(targetImageIndex));
}
- if ( ! iterators[i].image->weakSymbolsBound(imageIndexes[i]) )
+ if ( ! iterators[i].image->weakSymbolsBound(imageIndexes[i]) ) {
+ if ( iterators[i].image->inSharedCache() )
+ patcher.makeWriteable();
iterators[i].image->updateUsesCoalIterator(iterators[i], targetAddr, targetImage, targetImageIndex, context);
+ }
iterators[i].symbolMatches = false;
}
}
// but if main executable has non-weak override of operator new or delete it needs is handled here
for (const char* weakSymbolName : sTreatAsWeak) {
const ImageLoader* dummy;
- imagesNeedingCoalescing[i]->resolveWeak(context, weakSymbolName, true, false, &dummy);
+ imagesNeedingCoalescing[i]->resolveWeak(context, weakSymbolName, true, false, &dummy, patcher);
}
}
#if __arm64e__
while ( !coaler.done ) {
const ImageLoader* dummy;
// a side effect of resolveWeak() is to patch cache
- imagesNeedingCoalescing[i]->resolveWeak(context, coaler.symbolName, true, false, &dummy);
+ imagesNeedingCoalescing[i]->resolveWeak(context, coaler.symbolName, true, false, &dummy, patcher);
imagesNeedingCoalescing[i]->incrementCoalIterator(coaler);
}
}
// don't need to do any coalescing if only one image has overrides, or all have already been done
if ( (countOfImagesWithWeakDefinitionsNotInSharedCache > 0) && (countNotYetWeakBound > 0) ) {
+ // We might have to patch the shared cache __DATA_CONST. In that case, we'll create just a single
+ // patcher when needed.
+ DyldSharedCache::DataConstLazyScopedWriter patcher(context.dyldCache, mach_task_self(), context.verboseMapping ? &dyld::log : nullptr);
+
#if TARGET_OS_OSX
// only do alternate algorithm for dlopen(). Use traditional algorithm for launch
if ( !context.linkingMainExecutable ) {
}
}
if ( (targetAddr != 0) && (coalIterator.image != targetImage) ) {
+ if ( coalIterator.image->inSharedCache() )
+ patcher.makeWriteable();
coalIterator.image->updateUsesCoalIterator(coalIterator, targetAddr, (ImageLoader*)targetImage, 0, context);
if ( context.verboseWeakBind )
dyld::log("dyld: adjusting uses of %s in %s to use definition from %s\n", nameToCoalesce, coalIterator.image->getPath(), targetImage->getPath());
nameToCoalesce, iterators[i].image->getIndexedShortName((unsigned)iterators[i].imageIndex),
targetAddr, targetImage->getIndexedShortName(targetImageIndex));
}
- if ( ! iterators[i].image->weakSymbolsBound(imageIndexes[i]) )
+ if ( ! iterators[i].image->weakSymbolsBound(imageIndexes[i]) ) {
+ if ( iterators[i].image->inSharedCache() )
+ patcher.makeWriteable();
iterators[i].image->updateUsesCoalIterator(iterators[i], targetAddr, targetImage, targetImageIndex, context);
+ }
iterators[i].symbolMatches = false;
}
}
// but if main executable has non-weak override of operator new or delete it needs is handled here
for (const char* weakSymbolName : sTreatAsWeak) {
const ImageLoader* dummy;
- imagesNeedingCoalescing[i]->resolveWeak(context, weakSymbolName, true, false, &dummy);
+ imagesNeedingCoalescing[i]->resolveWeak(context, weakSymbolName, true, false, &dummy, patcher);
}
}
#if __arm64e__
else {
// support traditional arm64 app on an arm64e device
// look for weak def symbols in this image which may override the cache
+ patcher.makeWriteable();
ImageLoader::CoalIterator coaler;
imagesNeedingCoalescing[i]->initializeCoalIterator(coaler, i, 0);
imagesNeedingCoalescing[i]->incrementCoalIterator(coaler);
while ( !coaler.done ) {
const ImageLoader* dummy;
// a side effect of resolveWeak() is to patch cache
- imagesNeedingCoalescing[i]->resolveWeak(context, coaler.symbolName, true, false, &dummy);
+ imagesNeedingCoalescing[i]->resolveWeak(context, coaler.symbolName, true, false, &dummy, patcher);
imagesNeedingCoalescing[i]->incrementCoalIterator(coaler);
}
}
#endif
#include "DyldSharedCache.h"
-
#include "Map.h"
+#include "PointerAuth.h"
#if __arm__
#include <mach/vm_page_size.h>
virtual bool forceFlat() const = 0;
// called at runtime when a lazily bound function is first called
- virtual uintptr_t doBindLazySymbol(uintptr_t* lazyPointer, const LinkContext& context) = 0;
+ virtual uintptr_t doBindLazySymbol(uintptr_t* lazyPointer, const LinkContext& context,
+ DyldSharedCache::DataConstLazyScopedWriter& patcher) = 0;
// called at runtime when a fast lazily bound function is first called
virtual uintptr_t doBindFastLazySymbol(uint32_t lazyBindingInfoOffset, const LinkContext& context,
virtual void recursiveMakeDataReadOnly(const LinkContext& context);
virtual uintptr_t resolveWeak(const LinkContext& context, const char* symbolName, bool weak_import, bool runResolver,
- const ImageLoader** foundIn) { return 0; }
+ const ImageLoader** foundIn,
+ DyldSharedCache::DataConstLazyScopedWriter& patcher) { return 0; }
// triggered by DYLD_PRINT_STATISTICS to write info on work done and how fast
static void printStatistics(unsigned int imageCount, const InitializerTimingList& timingInfo);
bool objCMappedNotified() const { return fObjCMappedNotified; }
struct InterposeTuple {
- uintptr_t replacement;
- ImageLoader* neverImage; // don't apply replacement to this image
- ImageLoader* onlyImage; // only apply replacement to this image
- uintptr_t replacee;
+ uintptr_t replacement;
+ dyld3::AuthenticatedValue<const ImageLoader*> neverImage; // don't apply replacement to this image
+ dyld3::AuthenticatedValue<const ImageLoader*> onlyImage; // only apply replacement to this image
+ uintptr_t replacee;
};
static uintptr_t read_uleb128(const uint8_t*& p, const uint8_t* end);
virtual void doBind(const LinkContext& context, bool forceLazysBound, const ImageLoader* reExportParent) = 0;
// called later via API to force all lazy pointer to be bound
- virtual void doBindJustLazies(const LinkContext& context) = 0;
+ virtual void doBindJustLazies(const LinkContext& context, DyldSharedCache::DataConstLazyScopedWriter& patcher) = 0;
// if image has any dtrace DOF sections, append them to list to be registered
virtual void doGetDOFSections(const LinkContext& context, std::vector<DOFInfo>& dofs) = 0;
if ( !inCache && (startOfFileSegCmd == NULL) )
throw "malformed mach-o image: missing __TEXT segment that maps start of file";
// <rdar://problem/13145644> verify every segment does not overlap another segment
- if ( context.strictMachORequired ) {
+ if ( context.strictMachORequired && !inCache ) {
uintptr_t lastFileStart = 0;
uintptr_t linkeditFileStart = 0;
const struct load_command* cmd1 = startCmds;
virtual bool incrementCoalIterator(CoalIterator&) = 0;
virtual uintptr_t getAddressCoalIterator(CoalIterator&, const LinkContext& contex) = 0;
virtual void updateUsesCoalIterator(CoalIterator&, uintptr_t newAddr, ImageLoader* target, unsigned targetIndex, const LinkContext& context) = 0;
- virtual uintptr_t doBindLazySymbol(uintptr_t* lazyPointer, const LinkContext& context) = 0;
+ virtual uintptr_t doBindLazySymbol(uintptr_t* lazyPointer, const LinkContext& context,
+ DyldSharedCache::DataConstLazyScopedWriter& patcher) = 0;
virtual uintptr_t doBindFastLazySymbol(uint32_t lazyBindingInfoOffset, const LinkContext& context, void (*lock)(), void (*unlock)()) = 0;
virtual void doTermination(const LinkContext& context);
virtual bool needsInitialization();
virtual bool getUUID(uuid_t) const;
virtual void doRebase(const LinkContext& context);
virtual void doBind(const LinkContext& context, bool forceLazysBound, const ImageLoader* reExportParent) = 0;
- virtual void doBindJustLazies(const LinkContext& context) = 0;
+ virtual void doBindJustLazies(const LinkContext& context, DyldSharedCache::DataConstLazyScopedWriter& patcher) = 0;
virtual bool doInitialization(const LinkContext& context);
virtual void doGetDOFSections(const LinkContext& context, std::vector<ImageLoader::DOFInfo>& dofs);
virtual bool needsTermination();
throw "compressed LINKEDIT lazy binder called with classic LINKEDIT";
}
-uintptr_t ImageLoaderMachOClassic::doBindLazySymbol(uintptr_t* lazyPointer, const LinkContext& context)
+uintptr_t ImageLoaderMachOClassic::doBindLazySymbol(uintptr_t* lazyPointer, const LinkContext& context,
+ DyldSharedCache::DataConstLazyScopedWriter& patcher)
{
// scan for all lazy-pointer sections
const bool twoLevel = this->usesTwoLevelNameSpace();
CRSetCrashLogMessage2(NULL);
}
-void ImageLoaderMachOClassic::doBindJustLazies(const LinkContext& context)
+void ImageLoaderMachOClassic::doBindJustLazies(const LinkContext& context, DyldSharedCache::DataConstLazyScopedWriter& patcher)
{
// some API called requested that all lazy pointers in this image be force bound
this->bindIndirectSymbolPointers(context, false, true);
virtual bool libIsUpward(unsigned int) const;
virtual void setLibImage(unsigned int, ImageLoader*, bool, bool);
virtual void doBind(const LinkContext& context, bool forceLazysBound, const ImageLoader* reExportParent);
- virtual void doBindJustLazies(const LinkContext& context);
- virtual uintptr_t doBindLazySymbol(uintptr_t* lazyPointer, const LinkContext& context);
+ virtual void doBindJustLazies(const LinkContext& context, DyldSharedCache::DataConstLazyScopedWriter& patcher);
+ virtual uintptr_t doBindLazySymbol(uintptr_t* lazyPointer, const LinkContext& context,
+ DyldSharedCache::DataConstLazyScopedWriter& patcher);
virtual uintptr_t doBindFastLazySymbol(uint32_t lazyBindingInfoOffset, const LinkContext& context, void (*lock)(), void (*unlock)());
virtual const char* findClosestSymbol(const void* addr, const void** closestAddr) const;
virtual void initializeCoalIterator(CoalIterator&, unsigned int loadOrder, unsigned);
static void patchCacheUsesOf(const ImageLoader::LinkContext& context, const dyld3::closure::Image* overriddenImage,
- uint32_t cacheOffsetOfImpl, const char* symbolName, uintptr_t newImpl)
+ uint32_t cacheOffsetOfImpl, const char* symbolName, uintptr_t newImpl,
+ DyldSharedCache::DataConstLazyScopedWriter& patcher)
{
+ patcher.makeWriteable();
+
uintptr_t cacheStart = (uintptr_t)context.dyldCache;
uint32_t imageIndex = overriddenImage->imageNum() - (uint32_t)context.dyldCache->cachedDylibsImageArray()->startImageNum();
context.dyldCache->forEachPatchableUseOfExport(imageIndex, cacheOffsetOfImpl, ^(dyld_cache_patchable_location patchLocation) {
uintptr_t ImageLoaderMachOCompressed::resolveWeak(const LinkContext& context, const char* symbolName, bool weak_import,
- bool runResolver, const ImageLoader** foundIn)
+ bool runResolver, const ImageLoader** foundIn,
+ DyldSharedCache::DataConstLazyScopedWriter& patcher)
{
const Symbol* sym;
CoalesceNotifier notifier = nullptr;
__block uintptr_t foundOutsideCache = 0;
__block const char* foundOutsideCachePath = nullptr;
__block uintptr_t lastFoundInCache = 0;
+
if ( this->usesChainedFixups() ) {
notifier = ^(const Symbol* implSym, const ImageLoader* implIn, const mach_header* implMh) {
// This block is only called in dyld2 mode when a non-cached image is search for which weak-def implementation to use
uint32_t cacheOffsetOfImpl = (uint32_t)((uintptr_t)implAddr - (uintptr_t)context.dyldCache);
if ( context.verboseWeakBind )
dyld::log("dyld: weak bind, patching dyld cache uses of %s to use 0x%lX in %s\n", symbolName, foundOutsideCache, foundOutsideCachePath);
- patchCacheUsesOf(context, overriddenImage, cacheOffsetOfImpl, symbolName, foundOutsideCache);
+ patchCacheUsesOf(context, overriddenImage, cacheOffsetOfImpl, symbolName, foundOutsideCache, patcher);
}
}
}
uintptr_t ImageLoaderMachOCompressed::resolve(const LinkContext& context, const char* symbolName,
uint8_t symboFlags, long libraryOrdinal, const ImageLoader** targetImage,
+ DyldSharedCache::DataConstLazyScopedWriter& patcher,
LastLookup* last, bool runResolver)
{
*targetImage = NULL;
symbolAddress = this->resolveFlat(context, symbolName, weak_import, runResolver, targetImage);
}
else if ( libraryOrdinal == BIND_SPECIAL_DYLIB_WEAK_LOOKUP ) {
- symbolAddress = this->resolveWeak(context, symbolName, weak_import, runResolver, targetImage);
+ symbolAddress = this->resolveWeak(context, symbolName, weak_import, runResolver, targetImage, patcher);
}
else {
if ( libraryOrdinal == BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE ) {
uintptr_t addr, uint8_t type, const char* symbolName,
uint8_t symbolFlags, intptr_t addend, long libraryOrdinal,
ExtraBindData *extraBindData,
- const char* msg, LastLookup* last, bool runResolver)
+ const char* msg, DyldSharedCache::DataConstLazyScopedWriter& patcher,
+ LastLookup* last, bool runResolver)
{
const ImageLoader* targetImage;
uintptr_t symbolAddress;
symbolAddress = 0;
targetImage = nullptr;
} else
- symbolAddress = image->resolve(context, symbolName, symbolFlags, libraryOrdinal, &targetImage, last, runResolver);
+ symbolAddress = image->resolve(context, symbolName, symbolFlags, libraryOrdinal, &targetImage, patcher, last, runResolver);
// do actual update
return image->bindLocation(context, image->imageBaseAddress(), addr, symbolAddress, type, symbolName, addend, image->getPath(), targetImage ? targetImage->getPath() : NULL, msg, extraBindData, image->fSlide);
segActualLoadAddress(segmentIndex), segmentEndAddress);
}
-
void ImageLoaderMachOCompressed::doBind(const LinkContext& context, bool forceLazysBound, const ImageLoader* reExportParent)
{
CRSetCrashLogMessage2(this->getPath());
// except weak which may now be inline with the regular binds
if ( this->participatesInCoalescing() && (fDyldInfo != nullptr) ) {
// run through all binding opcodes
+ DyldSharedCache::DataConstLazyScopedWriter patcher(context.dyldCache, mach_task_self(), context.verboseMapping ? &dyld::log : nullptr);
+ auto* patcherPtr = &patcher;
eachBind(context, ^(const LinkContext& ctx, ImageLoaderMachOCompressed* image,
uintptr_t addr, uint8_t type, const char* symbolName,
uint8_t symbolFlags, intptr_t addend, long libraryOrdinal,
return (uintptr_t)0;
return ImageLoaderMachOCompressed::bindAt(ctx, image, addr, type, symbolName, symbolFlags,
addend, libraryOrdinal, extraBindData,
- msg, last, runResolver);
+ msg, *patcherPtr, last, runResolver);
});
}
}
vmAccountingSetSuspended(context, bindingBecauseOfRoot);
if ( fChainedFixups != NULL ) {
+ DyldSharedCache::DataConstLazyScopedWriter patcher(context.dyldCache, mach_task_self(), context.verboseMapping ? &dyld::log : nullptr);
const dyld_chained_fixups_header* fixupsHeader = (dyld_chained_fixups_header*)(fLinkEditBase + fChainedFixups->dataoff);
- doApplyFixups(context, fixupsHeader);
+ doApplyFixups(context, fixupsHeader, patcher);
}
else if ( fDyldInfo != nullptr ) {
#if TEXT_RELOC_SUPPORT
this->makeTextSegmentWritable(context, true);
#endif
+ // make the cache writable for this block
+ DyldSharedCache::DataConstLazyScopedWriter patcher(context.dyldCache, mach_task_self(), context.verboseMapping ? &dyld::log : nullptr);
+ auto* patcherPtr = &patcher;
+ if ( this->inSharedCache() )
+ patcher.makeWriteable();
+
// run through all binding opcodes
eachBind(context, ^(const LinkContext& ctx, ImageLoaderMachOCompressed* image,
uintptr_t addr, uint8_t type, const char* symbolName,
const char* msg, LastLookup* last, bool runResolver) {
return ImageLoaderMachOCompressed::bindAt(ctx, image, addr, type, symbolName, symbolFlags,
addend, libraryOrdinal, extraBindData,
- msg, last, runResolver);
+ msg, *patcherPtr, last, runResolver);
});
#if TEXT_RELOC_SUPPORT
// if this image is in the shared cache, but depends on something no longer in the shared cache,
// there is no way to reset the lazy pointers, so force bind them now
if ( forceLazysBound || fInSharedCache )
- this->doBindJustLazies(context);
+ this->doBindJustLazies(context, patcher);
// this image is in cache, but something below it is not. If
// this image has lazy pointer to a resolver function, then
// See if this dylib overrides something in the dyld cache
uint32_t dyldCacheOverrideImageNum;
if ( context.dyldCache && context.dyldCache->header.builtFromChainedFixups && overridesCachedDylib(dyldCacheOverrideImageNum) ) {
+
+ // make the cache writable for this block
+ DyldSharedCache::DataConstLazyScopedWriter patcher(context.dyldCache, mach_task_self(), context.verboseMapping ? &dyld::log : nullptr);
+ auto* patcherPtr = &patcher;
+
// need to patch all other places in cache that point to the overridden dylib, to point to this dylib instead
const dyld3::closure::Image* overriddenImage = context.dyldCache->cachedDylibsImageArray()->imageForNum(dyldCacheOverrideImageNum);
uint32_t imageIndex = dyldCacheOverrideImageNum - (uint32_t)context.dyldCache->cachedDylibsImageArray()->startImageNum();
const ImageLoader* foundIn = nullptr;
if ( this->findExportedSymbolAddress(context, exportName, NULL, 0, false, &foundIn, &newImpl) ) {
//dyld::log(" patchCacheUsesOf(%s) found in %s\n", exportName, foundIn->getPath());
- patchCacheUsesOf(context, overriddenImage, cacheOffsetOfImpl, exportName, newImpl);
+ patchCacheUsesOf(context, overriddenImage, cacheOffsetOfImpl, exportName, newImpl, *patcherPtr);
}
else {
// <rdar://problem/59196856> allow patched impls to move between re-export sibling dylibs
if ( reExportedDep->findExportedSymbolAddress(context, exportName, NULL, 0, false, &foundInSibling, &siblingImpl) ) {
stop = true;
//dyld::log(" patchCacheUsesOf(%s) found in sibling %s\n", exportName, foundInSibling->getPath());
- patchCacheUsesOf(context, overriddenImage, cacheOffsetOfImpl, exportName, siblingImpl);
+ patchCacheUsesOf(context, overriddenImage, cacheOffsetOfImpl, exportName, siblingImpl, *patcherPtr);
}
});
}
}
-void ImageLoaderMachOCompressed::doBindJustLazies(const LinkContext& context)
+void ImageLoaderMachOCompressed::doBindJustLazies(const LinkContext& context, DyldSharedCache::DataConstLazyScopedWriter& patcher)
{
eachLazyBind(context, ^(const LinkContext& ctx, ImageLoaderMachOCompressed* image,
uintptr_t addr, uint8_t type, const char* symbolName,
const char* msg, LastLookup* last, bool runResolver) {
return ImageLoaderMachOCompressed::bindAt(ctx, image, addr, type, symbolName, symbolFlags,
addend, libraryOrdinal, extraBindData,
- msg, last, runResolver);
+ msg, patcher, last, runResolver);
});
}
-void ImageLoaderMachOCompressed::doApplyFixups(const LinkContext& context, const dyld_chained_fixups_header* fixupsHeader)
+void ImageLoaderMachOCompressed::doApplyFixups(const LinkContext& context, const dyld_chained_fixups_header* fixupsHeader,
+ DyldSharedCache::DataConstLazyScopedWriter& patcher)
{
const dyld3::MachOLoaded* ml = (dyld3::MachOLoaded*)machHeader();
const dyld_chained_starts_in_image* starts = (dyld_chained_starts_in_image*)((uint8_t*)fixupsHeader + fixupsHeader->starts_offset);
const ImageLoader* targetImage;
uint8_t symbolFlags = weakImport ? BIND_SYMBOL_FLAGS_WEAK_IMPORT : 0;
try {
- uintptr_t symbolAddress = this->resolve(context, symbolName, symbolFlags, libOrdinal, &targetImage, NULL, true);
+ uintptr_t symbolAddress = this->resolve(context, symbolName, symbolFlags, libOrdinal, &targetImage, patcher, NULL, true);
targetAddrs.push_back((void*)(symbolAddress + addend));
}
catch (const char* msg) {
// mach-o files advertise interposing by having a __DATA __interpose section
struct InterposeData { uintptr_t replacement; uintptr_t replacee; };
+ // FIDME: It seems wrong to need a patcher here, but resolve may call resolveWeak and patch the cache.
+ // That would require weak symbols in the interposing section though, which may not be supported.
+ DyldSharedCache::DataConstLazyScopedWriter patcher(context.dyldCache, mach_task_self(), context.verboseMapping ? &dyld::log : nullptr);
+ auto* patcherPtr = &patcher;
+
__block Diagnostics diag;
const dyld3::MachOAnalyzer* ma = (dyld3::MachOAnalyzer*)fMachOData;
ma->forEachInterposingSection(diag, ^(uint64_t vmOffset, uint64_t vmSize, bool& stopSections) {
const ImageLoader* targetImage;
uintptr_t targetBindAddress = 0;
try {
- targetBindAddress = this->resolve(context, symbolName, 0, libraryOrdinal, &targetImage, last, false);
+ targetBindAddress = this->resolve(context, symbolName, 0, libraryOrdinal, &targetImage, *patcherPtr, last, false);
}
catch (const char* msg) {
if ( !weakImport )
const ImageLoader* targetImage;
uintptr_t targetBindAddress = 0;
try {
- targetBindAddress = this->resolve(context, symbolName, 0, libOrdinal, &targetImage, last, false);
+ targetBindAddress = this->resolve(context, symbolName, 0, libOrdinal, &targetImage, *patcherPtr, last, false);
}
catch (const char* msg) {
if ( !weakImport )
// A program built targeting 10.5 will have hybrid stubs. When used with weak symbols
// the classic lazy loader is used even when running on 10.6
-uintptr_t ImageLoaderMachOCompressed::doBindLazySymbol(uintptr_t* lazyPointer, const LinkContext& context)
+uintptr_t ImageLoaderMachOCompressed::doBindLazySymbol(uintptr_t* lazyPointer, const LinkContext& context,
+ DyldSharedCache::DataConstLazyScopedWriter& patcher)
{
// only works with compressed LINKEDIT if classic symbol table is also present
const macho_nlist* symbolTable = NULL;
libraryOrdinal = BIND_SPECIAL_DYLIB_FLAT_LOOKUP;
uintptr_t ptrToBind = (uintptr_t)lazyPointer;
uintptr_t symbolAddr = bindAt(context, this, ptrToBind, BIND_TYPE_POINTER, symbolName, 0, 0, libraryOrdinal,
- NULL, "lazy ", NULL);
+ NULL, "lazy ", patcher, NULL);
++fgTotalLazyBindFixups;
return symbolAddr;
}
if ( lock != NULL )
lock();
}
+
+ DyldSharedCache::DataConstLazyScopedWriter patcher(context.dyldCache, mach_task_self(), context.verboseMapping ? &dyld::log : nullptr);
const uint8_t* const start = fLinkEditBase + fDyldInfo->lazy_bind_off;
const uint8_t* const end = &start[fDyldInfo->lazy_bind_size];
dyld::throwf("BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB has offset 0x%08lX beyond segment size (0x%08lX)", segOffset, segSize(segIndex));
uintptr_t address = segActualLoadAddress(segIndex) + segOffset;
result = bindAt(context, this, address, BIND_TYPE_POINTER, symbolName, 0, 0, libraryOrdinal,
- NULL, "lazy ", NULL, true);
+ NULL, "lazy ", patcher, NULL, true);
// <rdar://problem/24140465> Some old apps had multiple lazy symbols bound at once
} while (!doneAfterBind && !context.strictMachORequired);
// Note: all binds that happen as part of normal loading and fixups will have interposing applied.
// There is only two cases where we need to parse bind opcodes and apply interposing:
+ // make the cache writable for this block
+ DyldSharedCache::DataConstLazyScopedWriter patcher(context.dyldCache, mach_task_self(), context.verboseMapping ? &dyld::log : nullptr);
+ if ( ma->inDyldCache() )
+ patcher.makeWriteable();
+
// 1) Lazy pointers are either not bound yet, or in dyld cache they are prebound (to uninterposed target)
eachLazyBind(context, ^(const LinkContext& ctx, ImageLoaderMachOCompressed* image,
uintptr_t addr, uint8_t type, const char* symbolName,
virtual bool libIsUpward(unsigned int) const;
virtual void setLibImage(unsigned int, ImageLoader*, bool, bool);
virtual void doBind(const LinkContext& context, bool forceLazysBound, const ImageLoader* reExportParent);
- virtual void doBindJustLazies(const LinkContext& context);
- virtual uintptr_t doBindLazySymbol(uintptr_t* lazyPointer, const LinkContext& context);
+ virtual void doBindJustLazies(const LinkContext& context, DyldSharedCache::DataConstLazyScopedWriter& patcher);
+ virtual uintptr_t doBindLazySymbol(uintptr_t* lazyPointer, const LinkContext& context,
+ DyldSharedCache::DataConstLazyScopedWriter& patcher);
virtual uintptr_t doBindFastLazySymbol(uint32_t lazyBindingInfoOffset, const LinkContext& context, void (*lock)(), void (*unlock)());
virtual const char* findClosestSymbol(const void* addr, const void** closestAddr) const;
virtual void initializeCoalIterator(CoalIterator&, unsigned int loadOrder, unsigned);
virtual void resetPreboundLazyPointers(const LinkContext& context);
#endif
virtual uintptr_t resolveWeak(const LinkContext& context, const char* symbolName, bool weak_import, bool runResolver,
- const ImageLoader** foundIn);
+ const ImageLoader** foundIn, DyldSharedCache::DataConstLazyScopedWriter& patcher);
private:
uint8_t symboFlags, intptr_t addend, long libraryOrdinal,
ExtraBindData *extraBindData,
const char* msg,
+ DyldSharedCache::DataConstLazyScopedWriter& patcher,
LastLookup* last, bool runResolver=false);
void bindCompressed(const LinkContext& context);
void throwBadBindingAddress(uintptr_t address, uintptr_t segmentEndAddress, int segmentIndex,
const uint8_t* startOpcodes, const uint8_t* endOpcodes, const uint8_t* pos);
uintptr_t resolve(const LinkContext& context, const char* symbolName,
- uint8_t symboFlags, long libraryOrdinal, const ImageLoader** targetImage,
+ uint8_t symboFlags, long libraryOrdinal, const ImageLoader** targetImage,
+ DyldSharedCache::DataConstLazyScopedWriter& patcher,
LastLookup* last = NULL, bool runResolver=false);
uintptr_t resolveFlat(const LinkContext& context, const char* symbolName, bool weak_import, bool runResolver,
const ImageLoader** foundIn);
void updateOptimizedLazyPointers(const LinkContext& context);
void updateAlternateLazyPointer(uint8_t* stub, void** originalLazyPointerAddr, const LinkContext& context);
void registerEncryption(const struct encryption_info_command* encryptCmd, const LinkContext& context);
- void doApplyFixups(const LinkContext& context, const dyld_chained_fixups_header* fixupsHeader);
+ void doApplyFixups(const LinkContext& context, const dyld_chained_fixups_header* fixupsHeader,
+ DyldSharedCache::DataConstLazyScopedWriter& patcher);
const struct dyld_info_command* fDyldInfo;
const struct linkedit_data_command* fChainedFixups;
virtual bool isExecutable() const { unreachable(); }
virtual bool isPositionIndependentExecutable() const { unreachable(); }
virtual bool forceFlat() const { unreachable(); }
- virtual uintptr_t doBindLazySymbol(uintptr_t* lazyPointer, const LinkContext& context) { unreachable(); }
+ virtual uintptr_t doBindLazySymbol(uintptr_t* lazyPointer, const LinkContext& context,
+ DyldSharedCache::DataConstLazyScopedWriter& patcher) { unreachable(); }
virtual uintptr_t doBindFastLazySymbol(uint32_t lazyBindingInfoOffset, const LinkContext& context,
void (*lock)(), void (*unlock)()) { unreachable(); }
virtual void doTermination(const LinkContext& context) { unreachable(); }
virtual LibraryInfo doGetLibraryInfo(const LibraryInfo& requestorInfo) { return requestorInfo; }
virtual void doRebase(const LinkContext& context) { unreachable(); }
virtual void doBind(const LinkContext& context, bool forceLazysBound, const ImageLoader* reExportParent) { unreachable(); }
- virtual void doBindJustLazies(const LinkContext& context) { unreachable(); }
+ virtual void doBindJustLazies(const LinkContext& context, DyldSharedCache::DataConstLazyScopedWriter& patcher) { unreachable(); }
virtual void doGetDOFSections(const LinkContext& context, std::vector<DOFInfo>& dofs) { unreachable(); }
virtual void doInterpose(const LinkContext& context) { unreachable(); }
virtual bool doInitialization(const LinkContext& context) { unreachable(); }
#include <libproc.h>
#include <sys/param.h>
#include <mach/mach_time.h> // mach_absolute_time()
-#include <mach/mach_init.h>
+#include <mach/mach_init.h>
+#include <mach/mach_traps.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <mach-o/ldsyms.h>
#include <libkern/OSByteOrder.h>
#include <libkern/OSAtomic.h>
-#include <mach/mach.h>
#include <sys/sysctl.h>
#include <sys/mman.h>
#include <sys/dtrace.h>
#include <sys/fsgetpath.h>
#include <System/sys/content_protection.h>
-#define SUPPORT_LOGGING_TO_CONSOLE !(__i386__ || __x86_64__ || TARGET_OS_SIMULATOR)
+#define SUPPORT_LOGGING_TO_CONSOLE !TARGET_OS_SIMULATOR
#if SUPPORT_LOGGING_TO_CONSOLE
#include <paths.h> // for logging to console
#endif
// magic linker symbol for start of dyld binary
extern "C" const macho_header __dso_handle;
+extern bool gEnableSharedCacheDataConst;
+
//
// The file contains the core of dyld used to get a process to main().
static cpu_type_t sHostCPU;
static cpu_subtype_t sHostCPUsubtype;
#endif
-static ImageLoaderMachO* sMainExecutable = NULL;
+typedef ImageLoaderMachO* __ptrauth_dyld_address_auth MainExecutablePointerType;
+static MainExecutablePointerType sMainExecutable = NULL;
static size_t sInsertedDylibCount = 0;
static std::vector<ImageLoader*> sAllImages;
static std::vector<ImageLoader*> sImageRoots;
#endif
#if !TARGET_OS_SIMULATOR
-static void sendMessage(unsigned portSlot, mach_msg_id_t msgId, mach_msg_size_t sendSize, mach_msg_header_t* buffer, mach_msg_size_t bufferSize) {
- // Allocate a port to listen on in this monitoring task
- mach_port_t sendPort = dyld::gProcessInfo->notifyPorts[portSlot];
- if (sendPort == MACH_PORT_NULL) {
- return;
+#define DYLD_PROCESS_INFO_NOTIFY_MAGIC 0x49414E46
+
+struct RemoteNotificationResponder {
+ RemoteNotificationResponder(const RemoteNotificationResponder&) = delete;
+ RemoteNotificationResponder(RemoteNotificationResponder&&) = delete;
+ RemoteNotificationResponder() {
+ if (dyld::gProcessInfo->notifyPorts[0] != DYLD_PROCESS_INFO_NOTIFY_MAGIC) {
+ // No notifier found, early out
+ _namesCnt = 0;
+ return;
+ }
+ kern_return_t kr = task_dyld_process_info_notify_get(_names, &_namesCnt);
+ while (kr == KERN_NO_SPACE) {
+ // In the future the SPI may return the size we need, but for now we just double the count. Since we don't want to depend on the
+ // return value in _nameCnt we set it to have a minimm of 16, double the inline storage value
+ _namesCnt = std::max<uint32_t>(16, 2*_namesCnt);
+ _namesSize = _namesCnt*sizeof(mach_port_t);
+ kr = vm_allocate(mach_task_self(), (vm_address_t*)&_names, _namesSize, VM_FLAGS_ANYWHERE);
+ if (kr != KERN_SUCCESS) {
+ // We could not allocate memory, time to error out
+ break;
+ }
+ kr = task_dyld_process_info_notify_get(_names, &_namesCnt);
+ if (kr != KERN_SUCCESS) {
+ // We failed, so deallocate the memory. If the failures was KERN_NO_SPACE we will loop back and try again
+ (void)vm_deallocate(mach_task_self(), (vm_address_t)_names, _namesSize);
+ _namesSize = 0;
+ }
+ }
+ if (kr != KERN_SUCCESS) {
+ // We failed, set _namesCnt to 0 so nothing else will happen
+ _namesCnt = 0;
+ }
}
- mach_port_t replyPort = MACH_PORT_NULL;
- mach_port_options_t options = { .flags = MPO_CONTEXT_AS_GUARD | MPO_STRICT,
- .mpl = { 1 }};
- kern_return_t kr = mach_port_construct(mach_task_self(), &options, (mach_port_context_t)&replyPort, &replyPort);
- if (kr != KERN_SUCCESS) {
- return;
+ ~RemoteNotificationResponder() {
+ if (_namesCnt) {
+ for (auto i = 0; i < _namesCnt; ++i) {
+ (void)mach_port_deallocate(mach_task_self(), _names[i]);
+ }
+ if (_namesSize != 0) {
+ // We are not using inline memory, we need to free it
+ (void)vm_deallocate(mach_task_self(), (vm_address_t)_names, _namesSize);
+ }
+ }
}
- // Assemble a message
- mach_msg_header_t* h = buffer;
- h->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,MACH_MSG_TYPE_MAKE_SEND_ONCE);
- h->msgh_id = msgId;
- h->msgh_local_port = replyPort;
- h->msgh_remote_port = sendPort;
- h->msgh_reserved = 0;
- h->msgh_size = sendSize;
- kr = mach_msg(h, MACH_SEND_MSG | MACH_RCV_MSG, h->msgh_size, bufferSize, replyPort, 0, MACH_PORT_NULL);
- mach_msg_destroy(h);
- if ( kr == MACH_SEND_INVALID_DEST ) {
- if (OSAtomicCompareAndSwap32(sendPort, 0, (volatile int32_t*)&dyld::gProcessInfo->notifyPorts[portSlot])) {
- mach_port_deallocate(mach_task_self(), sendPort);
+ void sendMessage(mach_msg_id_t msgId, mach_msg_size_t sendSize, mach_msg_header_t* buffer) {
+ if (_namesCnt == 0) { return; }
+ // Allocate a port to listen on in this monitoring task
+ mach_port_t replyPort = MACH_PORT_NULL;
+ mach_port_options_t options = { .flags = MPO_CONTEXT_AS_GUARD | MPO_STRICT, .mpl = { 1 }};
+ kern_return_t kr = mach_port_construct(mach_task_self(), &options, (mach_port_context_t)&replyPort, &replyPort);
+ if (kr != KERN_SUCCESS) {
+ return;
}
+ for (auto i = 0; i < _namesCnt; ++i) {
+ if (_names[i] == MACH_PORT_NULL) { continue; }
+ // Assemble a message
+ uint8_t replyBuffer[sizeof(mach_msg_header_t) + MAX_TRAILER_SIZE];
+ mach_msg_header_t* msg = buffer;
+ msg->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,MACH_MSG_TYPE_MAKE_SEND_ONCE);
+ msg->msgh_id = msgId;
+ msg->msgh_local_port = replyPort;
+ msg->msgh_remote_port = _names[i];
+ msg->msgh_reserved = 0;
+ msg->msgh_size = sendSize;
+ kr = mach_msg_overwrite(msg, MACH_SEND_MSG | MACH_RCV_MSG, msg->msgh_size, sizeof(replyBuffer), replyPort, 0, MACH_PORT_NULL,
+ (mach_msg_header_t*)&replyBuffer[0], 0);
+ if (kr != KERN_SUCCESS) {
+ // Send failed, we may have been psuedo recieved. destroy the message
+ (void)mach_msg_destroy(msg);
+ // Mark the port as null. It does not matter why we failed... if it is s single message we will not retry, if it
+ // is a fragmented message then subsequent messages will not decode correctly
+ _names[i] = MACH_PORT_NULL;
+ }
+ }
+ (void)mach_port_destruct(mach_task_self(), replyPort, 0, (mach_port_context_t)&replyPort);
+ }
+
+ bool const active() const {
+ for (auto i = 0; i < _namesCnt; ++i) {
+ if (_names[i] != MACH_PORT_NULL) {
+ return true;
+ }
+ }
+ return false;
+ }
+private:
+ mach_port_t _namesArray[8] = {0};
+ mach_port_name_array_t _names = (mach_port_name_array_t)&_namesArray[0];
+ mach_msg_type_number_t _namesCnt = 8;
+ vm_size_t _namesSize = 0;
+};
+
+//FIXME: Remove this once we drop support for iOS 11 simulators
+// This is an enormous hack to keep remote introspection of older simulators working
+// It works by interposing mach_msg, and redirecting message sent to a special port name. Messages to that portname will trigger a full set
+// of sends to all kernel registered notifiers. In this mode mach_msg_sim_interposed() must return KERN_SUCCESS or the older dyld_sim may
+// try to cleanup the notifer array.
+kern_return_t mach_msg_sim_interposed( mach_msg_header_t* msg, mach_msg_option_t option, mach_msg_size_t send_size, mach_msg_size_t rcv_size,
+ mach_port_name_t rcv_name, mach_msg_timeout_t timeout, mach_port_name_t notify) {
+ if (msg->msgh_remote_port != DYLD_PROCESS_INFO_NOTIFY_MAGIC) {
+ // Not the magic port, so just pass through to the real mach_msg()
+ return mach_msg(msg, option, send_size, rcv_size, rcv_name, timeout, notify);
+ }
+
+ // The magic port. We know dyld_sim is trying to message observers, so lets call into our messaging code directly.
+ // This is kind of weird since we effectively built a buffer in dyld_sim, then pass it to mach_msg, which we interpose, unpack, and then
+ // pass to send_message which then sends the buffer back out vis mach_message_overwrite(), but it should work at least as well as the old
+ // way.
+ RemoteNotificationResponder responder;
+ responder.sendMessage(msg->msgh_id, send_size, msg);
+
+ // We always return KERN_SUCCESS, otherwise old dyld_sims might clear the port
+ return KERN_SUCCESS;
+}
+
+static void notifyMonitoringDyld(RemoteNotificationResponder& responder, bool unloading, unsigned imageCount,
+ const struct mach_header* loadAddresses[], const char* imagePaths[])
+{
+ // Make sure there is at least enough room to hold a the largest single file entry that can exist.
+ static_assert((MAXPATHLEN + sizeof(dyld_process_info_image_entry) + 1 + MAX_TRAILER_SIZE) <= DYLD_PROCESS_INFO_NOTIFY_MAX_BUFFER_SIZE);
+
+ unsigned entriesSize = imageCount*sizeof(dyld_process_info_image_entry);
+ unsigned pathsSize = 0;
+ for (unsigned j=0; j < imageCount; ++j) {
+ pathsSize += (strlen(imagePaths[j]) + 1);
+ }
+
+ unsigned totalSize = (sizeof(struct dyld_process_info_notify_header) + entriesSize + pathsSize + 127) & -128; // align
+ // The reciever has a fixed buffer of DYLD_PROCESS_INFO_NOTIFY_MAX_BUFFER_SIZE, whcih needs to hold both the message and a trailer.
+ // If the total size exceeds that we need to fragment the message.
+ if ( (totalSize + MAX_TRAILER_SIZE) > DYLD_PROCESS_INFO_NOTIFY_MAX_BUFFER_SIZE ) {
+ // Putting all image paths into one message would make buffer too big.
+ // Instead split into two messages. Recurse as needed until paths fit in buffer.
+ unsigned imageHalfCount = imageCount/2;
+ notifyMonitoringDyld(responder, unloading, imageHalfCount, loadAddresses, imagePaths);
+ notifyMonitoringDyld(responder, unloading, imageCount - imageHalfCount, &loadAddresses[imageHalfCount], &imagePaths[imageHalfCount]);
+ return;
+ }
+ uint8_t buffer[totalSize + MAX_TRAILER_SIZE];
+ dyld_process_info_notify_header* header = (dyld_process_info_notify_header*)buffer;
+ header->version = 1;
+ header->imageCount = imageCount;
+ header->imagesOffset = sizeof(dyld_process_info_notify_header);
+ header->stringsOffset = sizeof(dyld_process_info_notify_header) + entriesSize;
+ header->timestamp = dyld::gProcessInfo->infoArrayChangeTimestamp;
+ dyld_process_info_image_entry* entries = (dyld_process_info_image_entry*)&buffer[header->imagesOffset];
+ char* const pathPoolStart = (char*)&buffer[header->stringsOffset];
+ char* pathPool = pathPoolStart;
+ for (unsigned j=0; j < imageCount; ++j) {
+ strcpy(pathPool, imagePaths[j]);
+ uint32_t len = (uint32_t)strlen(pathPool);
+ bzero(entries->uuid, 16);
+ dyld3::MachOFile* mf = (dyld3::MachOFile*)loadAddresses[j];
+ mf->getUuid(entries->uuid);
+ entries->loadAddress = (uint64_t)loadAddresses[j];
+ entries->pathStringOffset = (uint32_t)(pathPool - pathPoolStart);
+ entries->pathLength = len;
+ pathPool += (len +1);
+ ++entries;
+ }
+ if (unloading) {
+ responder.sendMessage(DYLD_PROCESS_INFO_NOTIFY_UNLOAD_ID, totalSize, (mach_msg_header_t*)buffer);
+ } else {
+ responder.sendMessage(DYLD_PROCESS_INFO_NOTIFY_LOAD_ID, totalSize, (mach_msg_header_t*)buffer);
}
- mach_port_destruct(mach_task_self(), replyPort, 0, (mach_port_context_t)&replyPort);
}
static void notifyMonitoringDyld(bool unloading, unsigned imageCount, const struct mach_header* loadAddresses[],
const char* imagePaths[])
{
dyld3::ScopedTimer(DBG_DYLD_REMOTE_IMAGE_NOTIFIER, 0, 0, 0);
- for (int slot=0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; ++slot) {
- if ( dyld::gProcessInfo->notifyPorts[slot] == 0) continue;
- unsigned entriesSize = imageCount*sizeof(dyld_process_info_image_entry);
- unsigned pathsSize = 0;
- for (unsigned j=0; j < imageCount; ++j) {
- pathsSize += (strlen(imagePaths[j]) + 1);
- }
-
- unsigned totalSize = (sizeof(struct dyld_process_info_notify_header) + entriesSize + pathsSize + 127) & -128; // align
- // The reciever has a fixed buffer of DYLD_PROCESS_INFO_NOTIFY_MAX_BUFFER_SIZE, whcih needs to hold both the message and a trailer.
- // If the total size exceeds that we need to fragment the message.
- if ( (totalSize + MAX_TRAILER_SIZE) > DYLD_PROCESS_INFO_NOTIFY_MAX_BUFFER_SIZE ) {
- // Putting all image paths into one message would make buffer too big.
- // Instead split into two messages. Recurse as needed until paths fit in buffer.
- unsigned imageHalfCount = imageCount/2;
- notifyMonitoringDyld(unloading, imageHalfCount, loadAddresses, imagePaths);
- notifyMonitoringDyld(unloading, imageCount - imageHalfCount, &loadAddresses[imageHalfCount], &imagePaths[imageHalfCount]);
- return;
- }
- uint8_t buffer[totalSize + MAX_TRAILER_SIZE];
- dyld_process_info_notify_header* header = (dyld_process_info_notify_header*)buffer;
- header->version = 1;
- header->imageCount = imageCount;
- header->imagesOffset = sizeof(dyld_process_info_notify_header);
- header->stringsOffset = sizeof(dyld_process_info_notify_header) + entriesSize;
- header->timestamp = dyld::gProcessInfo->infoArrayChangeTimestamp;
- dyld_process_info_image_entry* entries = (dyld_process_info_image_entry*)&buffer[header->imagesOffset];
- char* const pathPoolStart = (char*)&buffer[header->stringsOffset];
- char* pathPool = pathPoolStart;
- for (unsigned j=0; j < imageCount; ++j) {
- strcpy(pathPool, imagePaths[j]);
- uint32_t len = (uint32_t)strlen(pathPool);
- bzero(entries->uuid, 16);
- dyld3::MachOFile* mf = (dyld3::MachOFile*)loadAddresses[j];
- mf->getUuid(entries->uuid);
- entries->loadAddress = (uint64_t)loadAddresses[j];
- entries->pathStringOffset = (uint32_t)(pathPool - pathPoolStart);
- entries->pathLength = len;
- pathPool += (len +1);
- ++entries;
- }
- if (unloading) {
- sendMessage(slot, DYLD_PROCESS_INFO_NOTIFY_UNLOAD_ID, totalSize, (mach_msg_header_t*)buffer, totalSize);
- } else {
- sendMessage(slot, DYLD_PROCESS_INFO_NOTIFY_LOAD_ID, totalSize, (mach_msg_header_t*)buffer, totalSize);
- }
- }
+ RemoteNotificationResponder responder;
+ if (!responder.active()) { return; }
+ notifyMonitoringDyld(responder, unloading, imageCount, loadAddresses, imagePaths);
}
-static void notifyMonitoringDyldMain()
-{
+static void notifyMonitoringDyldMain() {
dyld3::ScopedTimer(DBG_DYLD_REMOTE_IMAGE_NOTIFIER, 0, 0, 0);
- for (int slot=0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; ++slot) {
- if ( dyld::gProcessInfo->notifyPorts[slot] == 0) continue;
- uint8_t buffer[sizeof(mach_msg_header_t) + MAX_TRAILER_SIZE];
- sendMessage(slot, DYLD_PROCESS_INFO_NOTIFY_MAIN_ID, sizeof(mach_msg_header_t), (mach_msg_header_t*)buffer, sizeof(mach_msg_header_t) + MAX_TRAILER_SIZE);
- }
+ RemoteNotificationResponder responder;
+ uint8_t buffer[sizeof(mach_msg_header_t) + MAX_TRAILER_SIZE];
+ responder.sendMessage(DYLD_PROCESS_INFO_NOTIFY_MAIN_ID, sizeof(mach_msg_header_t), (mach_msg_header_t*)buffer);
}
#else
extern void notifyMonitoringDyldMain() VIS_HIDDEN;
static void terminationRecorder(ImageLoader* image)
{
- sImageFilesNeedingTermination.push_back(image);
+ bool add = true;
+#if __arm64e__
+ // <rdar://problem/71820555> Don't run static terminator for arm64e
+ const mach_header* mh = image->machHeader();
+ if ( (mh->cputype == CPU_TYPE_ARM64) && ((mh->cpusubtype & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_ARM64E) )
+ add = false;
+#endif
+ if ( add )
+ sImageFilesNeedingTermination.push_back(image);
}
const char* getExecutablePath()
else if ( strcmp(key, "DYLD_USE_CLOSURES") == 0 ) {
// Handled elsewhere
}
+ else if ( strcmp(key, "DYLD_SHARED_REGION_DATA_CONST") == 0 ) {
+ // Handled elsewhere
+ }
else if ( strcmp(key, "DYLD_FORCE_INVALID_CACHE_CLOSURES") == 0 ) {
if ( dyld3::internalInstall() ) {
sForceInvalidSharedCacheClosureFormat = true;
#endif
if ( target == NULL )
throwf("image not found for lazy pointer at %p", lazyPointer);
- result = target->doBindLazySymbol(lazyPointer, gLinkContext);
+ DyldSharedCache::DataConstLazyScopedWriter patcher(gLinkContext.dyldCache, mach_task_self(), gLinkContext.verboseMapping ? &dyld::log : nullptr);
+ result = target->doBindLazySymbol(lazyPointer, gLinkContext, patcher);
}
catch (const char* message) {
dyld::log("dyld: lazy symbol binding failed: %s\n", message);
#if TARGET_OS_OSX
static void* getProcessInfo() { return dyld::gProcessInfo; }
static const SyscallHelpers sSysCalls = {
- 13,
+ 14,
// added in version 1
&open,
&close,
&getpid,
&mach_port_insert_right,
&mach_port_allocate,
- &mach_msg,
+ &mach_msg_sim_interposed,
// Added in version 6
&abort_with_payload,
// Added in version 7
&mach_msg_destroy,
&mach_port_construct,
&mach_port_destruct,
- // Add in version 13
+ // Added in version 13
&fstat,
- &vm_copy
+ &vm_copy,
+ // Added in version 14
+ &task_dyld_process_info_notify_get
};
__attribute__((noinline))
mainClosure->libDyldEntry(dyldEntry);
const dyld3::LibDyldEntryVector* libDyldEntry = (dyld3::LibDyldEntryVector*)loader.resolveTarget(dyldEntry);
+ // Set the logging function first so that libdyld can log from inside all other entry vector functions
+#if !TARGET_OS_SIMULATOR
+ if ( libDyldEntry->vectorVersion > 3 )
+ libDyldEntry->setLogFunction(&dyld::vlog);
+#endif
+
// send info on all images to libdyld.dylb
- libDyldEntry->setVars(mainExecutableMH, argc, argv, envp, apple, sKeysDisabled, sOnlyPlatformArm64e);
+ libDyldEntry->setVars(mainExecutableMH, argc, argv, envp, apple, sKeysDisabled, sOnlyPlatformArm64e, gEnableSharedCacheDataConst);
#if TARGET_OS_OSX
uint32_t progVarsOffset;
if ( mainClosure->hasProgramVars(progVarsOffset) ) {
if ( libDyldEntry->vectorVersion > 2 )
libDyldEntry->setChildForkFunction(&_dyld_fork_child);
-#if !TARGET_OS_SIMULATOR
- if ( libDyldEntry->vectorVersion > 3 )
- libDyldEntry->setLogFunction(&dyld::vlog);
-#endif
if ( libDyldEntry->vectorVersion >= 9 )
libDyldEntry->setLaunchMode(sLaunchModeUsed);
libDyldEntry->setOldAllImageInfo(gProcessInfo);
dyld3::LoadedImage* libSys = loader.findImage(mainClosure->libSystemImageNum());
- libDyldEntry->setInitialImageList(mainClosure, dyldCache, sSharedCacheLoadInfo.path, allImages, *libSys);
+ libDyldEntry->setInitialImageList(mainClosure, dyldCache, sSharedCacheLoadInfo.path, allImages, *libSys,
+ mach_task_self());
// run initializers
CRSetCrashLogMessage("dyld3: launch, running initializers");
libDyldEntry->runInitialzersBottomUp((mach_header*)mainExecutableMH);
}
}
+ // Check if we should force the shared cache __DATA_CONST to read-only or read-write
+ if ( dyld3::BootArgs::forceReadWriteDataConst() ) {
+ gEnableSharedCacheDataConst = false;
+ } else if ( dyld3::BootArgs::forceReadOnlyDataConst() ) {
+ gEnableSharedCacheDataConst = true;
+ } else {
+ // __DATA_CONST is enabled by default for arm64(e) for now
+#if __arm64__ && __LP64__
+ gEnableSharedCacheDataConst = true;
+#else
+ gEnableSharedCacheDataConst = false;
+#endif
+ }
+ bool sharedCacheDataConstIsEnabled = gEnableSharedCacheDataConst;
+
+ if ( dyld3::internalInstall() ) {
+ if (const char* dataConst = _simple_getenv(envp, "DYLD_SHARED_REGION_DATA_CONST")) {
+ if ( strcmp(dataConst, "RW") == 0 ) {
+ gEnableSharedCacheDataConst = false;
+ } else if ( strcmp(dataConst, "RO") == 0 ) {
+ gEnableSharedCacheDataConst = true;
+ } else {
+ dyld::warn("unknown option to DYLD_SHARED_REGION_DATA_CONST. Valid options are: RW and RO\n");
+ }
+
+ }
+ }
+
+
#if TARGET_OS_OSX
if ( !gLinkContext.allowEnvVarsPrint && !gLinkContext.allowEnvVarsPath && !gLinkContext.allowEnvVarsSharedCache ) {
pruneEnvironmentVariables(envp, &apple);
#else
mapSharedCache(mainExecutableSlide);
#endif
+
+ // If this process wants a different __DATA_CONST state from the shared region, then override that now
+ if ( (sSharedCacheLoadInfo.loadAddress != nullptr) && (gEnableSharedCacheDataConst != sharedCacheDataConstIsEnabled) ) {
+ uint32_t permissions = gEnableSharedCacheDataConst ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE);
+ sSharedCacheLoadInfo.loadAddress->changeDataConstPermissions(mach_task_self(), permissions,
+ (gLinkContext.verboseMapping ? &dyld::log : nullptr));
+ }
}
#if !TARGET_OS_SIMULATOR
ImageLoader* image = dyld::findImageContainingSymbol(symbol);
if ( image != NULL )
result = (void*)image->getExportedSymbolAddress(NSSymbolToSymbol(symbol), dyld::gLinkContext);
+
+#if __has_feature(ptrauth_calls)
+ // Sign the pointer if it points to a function
+ if ( result ) {
+ const ImageLoader* symbolImage = image;
+ if (!symbolImage->containsAddress(result)) {
+ symbolImage = dyld::findImageContainingAddress(result);
+ }
+ const macho_section *sect = symbolImage ? symbolImage->findSection(result) : NULL;
+ if ( sect && ((sect->flags & S_ATTR_PURE_INSTRUCTIONS) || (sect->flags & S_ATTR_SOME_INSTRUCTIONS)) )
+ result = __builtin_ptrauth_sign_unauthenticated(result, ptrauth_key_asia, 0);
+ }
+#endif
return result;
}
ImageLoader* image = dyld::findImageByMachHeader(mh);
if ( image == NULL )
return;
+
+ // make the cache writable for this block
+ DyldSharedCache::DataConstScopedWriter patcher(dyld::gLinkContext.dyldCache, mach_task_self(), (dyld::gLinkContext.verboseMapping ? &dyld::log : nullptr));
// make pass at bound references in this image and update them
dyld::gLinkContext.dynamicInterposeArray = array;
#include <mach-o/dyld.h>
#include <mach-o/dyld_priv.h>
+#include <ptrauth.h>
+
#include "dyld_cache_format.h"
#include "objc-shared-cache.h"
//
extern "C" int _dyld_func_lookup(const char* dyld_func_name, void **address);
+template<typename T>
+static void dyld_func_lookup_and_resign(const char *dyld_func_name, T *__ptrauth_dyld_function_ptr* address) {
+ void *funcAsVoidPtr;
+ int res = _dyld_func_lookup(dyld_func_name, &funcAsVoidPtr);
+ (void)res;
+
+ // If C function pointer discriminators are type-diverse this cast will be
+ // an authenticate and resign operation.
+ *address = reinterpret_cast<T *>(funcAsVoidPtr);
+}
+
#if TARGET_OS_IOS && !TARGET_OS_SIMULATOR
namespace dyld3 {
extern int compatFuncLookup(const char* name, void** address) __API_AVAILABLE(ios(13.0));
#endif
-extern bool gUseDyld3;
+extern void* __ptrauth_dyld_address_auth gUseDyld3;
// <rdar://problem/61161069> libdyld.dylib should use abort_with_payload() for asserts
dyld3::halt(str);
}
else {
- void (*p)(const char* msg) __attribute__((__noreturn__));
- _dyld_func_lookup("__dyld_halt", (void**)&p);
+ typedef void (*funcType)(const char* msg) __attribute__((__noreturn__));
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
+ dyld_func_lookup_and_resign("__dyld_halt", &p);
p(str);
}
// halt() doesn't return, so we can't call _simple_sfree
typedef NSModule (*mcallback_t)(NSSymbol s, NSModule old, NSModule newhandler);
typedef void (*lcallback_t)(NSLinkEditErrors c, int errorNumber,
const char* fileName, const char* errorString);
- static void (*p)(ucallback_t undefined, mcallback_t multiple, lcallback_t linkEdit) = NULL;
+ typedef void (*funcType)(ucallback_t undefined, mcallback_t multiple, lcallback_t linkEdit);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_install_handlers", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_install_handlers", &p);
mcallback_t m = handlers->multiple;
p(handlers->undefined, m, handlers->linkEdit);
}
return dyld3::NSNameOfModule(module);
DYLD_LOCK_THIS_BLOCK;
- static const char* (*p)(NSModule module) = NULL;
+ typedef const char* (*funcType)(NSModule module);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSNameOfModule", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSNameOfModule", &p);
return(p(module));
}
return dyld3::NSLibraryNameForModule(module);
DYLD_LOCK_THIS_BLOCK;
- static const char* (*p)(NSModule module) = NULL;
+ typedef const char* (*funcType)(NSModule module);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSLibraryNameForModule", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSLibraryNameForModule", &p);
return(p(module));
}
return dyld3::NSIsSymbolNameDefined(symbolName);
DYLD_LOCK_THIS_BLOCK;
- static bool (*p)(const char* symbolName) = NULL;
+ typedef bool (*funcType)(const char* symbolName);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSIsSymbolNameDefined", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSIsSymbolNameDefined", &p);
return(p(symbolName));
}
return dyld3::NSIsSymbolNameDefinedWithHint(symbolName, libraryNameHint);
DYLD_LOCK_THIS_BLOCK;
- static bool (*p)(const char* symbolName,
- const char* libraryNameHint) = NULL;
+ typedef bool (*funcType)(const char* symbolName,
+ const char* libraryNameHint);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSIsSymbolNameDefinedWithHint", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSIsSymbolNameDefinedWithHint", &p);
return(p(symbolName, libraryNameHint));
}
return dyld3::NSIsSymbolNameDefinedInImage(image, symbolName);
DYLD_LOCK_THIS_BLOCK;
- static bool (*p)(const struct mach_header *image,
- const char* symbolName) = NULL;
+ typedef bool (*funcType)(const struct mach_header *image,
+ const char* symbolName);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSIsSymbolNameDefinedInImage", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSIsSymbolNameDefinedInImage", &p);
return(p(image, symbolName));
}
return dyld3::NSLookupAndBindSymbol(symbolName);
DYLD_LOCK_THIS_BLOCK;
- static NSSymbol (*p)(const char* symbolName) = NULL;
+ typedef NSSymbol (*funcType)(const char* symbolName);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSLookupAndBindSymbol", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSLookupAndBindSymbol", &p);
return(p(symbolName));
}
return dyld3::NSLookupAndBindSymbolWithHint(symbolName, libraryNameHint);
DYLD_LOCK_THIS_BLOCK;
- static NSSymbol (*p)(const char* symbolName,
- const char* libraryNameHint) = NULL;
+ typedef NSSymbol (*funcType)(const char* symbolName,
+ const char* libraryNameHint);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSLookupAndBindSymbolWithHint", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSLookupAndBindSymbolWithHint", &p);
return(p(symbolName, libraryNameHint));
}
return dyld3::NSLookupSymbolInModule(module, symbolName);
DYLD_LOCK_THIS_BLOCK;
- static NSSymbol (*p)(NSModule module, const char* symbolName) = NULL;
+ typedef NSSymbol (*funcType)(NSModule module, const char* symbolName);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSLookupSymbolInModule", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSLookupSymbolInModule", &p);
return(p(module, symbolName));
}
return dyld3::NSLookupSymbolInImage(image, symbolName, options);
DYLD_LOCK_THIS_BLOCK;
- static NSSymbol (*p)(const struct mach_header *image,
- const char* symbolName,
- uint32_t options) = NULL;
+ typedef NSSymbol (*funcType)(const struct mach_header *image,
+ const char* symbolName,
+ uint32_t options);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSLookupSymbolInImage", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSLookupSymbolInImage", &p);
return(p(image, symbolName, options));
}
return dyld3::NSNameOfSymbol(symbol);
DYLD_LOCK_THIS_BLOCK;
- static char * (*p)(NSSymbol symbol) = NULL;
+ typedef char * (*funcType)(NSSymbol symbol);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSNameOfSymbol",(void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSNameOfSymbol",&p);
return(p(symbol));
}
return dyld3::NSAddressOfSymbol(symbol);
DYLD_LOCK_THIS_BLOCK;
- static void * (*p)(NSSymbol symbol) = NULL;
+ typedef void * (*funcType)(NSSymbol symbol);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSAddressOfSymbol", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSAddressOfSymbol", &p);
return(p(symbol));
}
return dyld3::NSModuleForSymbol(symbol);
DYLD_LOCK_THIS_BLOCK;
- static NSModule (*p)(NSSymbol symbol) = NULL;
+ typedef NSModule (*funcType)(NSSymbol symbol);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSModuleForSymbol", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSModuleForSymbol", &p);
return(p(symbol));
}
return dyld3::NSAddLibrary(pathName);
DYLD_LOCK_THIS_BLOCK;
- static bool (*p)(const char* pathName) = NULL;
+ typedef bool (*funcType)(const char* pathName);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSAddLibrary", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSAddLibrary", &p);
return(p(pathName));
}
return dyld3::NSAddLibrary(pathName);
DYLD_LOCK_THIS_BLOCK;
- static bool (*p)(const char* pathName) = NULL;
+ typedef bool (*funcType)(const char* pathName);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSAddLibraryWithSearching", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSAddLibraryWithSearching", &p);
return(p(pathName));
}
return dyld3::NSAddImage(image_name, options);
DYLD_LOCK_THIS_BLOCK;
- static const struct mach_header * (*p)(const char* image_name,
- uint32_t options) = NULL;
+ typedef const struct mach_header * (*funcType)(const char* image_name,
+ uint32_t options);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSAddImage", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSAddImage", &p);
return(p(image_name, options));
}
#endif // DEPRECATED_APIS_SUPPORTED
return dyld3::NSCreateObjectFileImageFromFile(pathName, objectFileImage);
DYLD_LOCK_THIS_BLOCK;
- static NSObjectFileImageReturnCode (*p)(const char*, NSObjectFileImage*) = NULL;
+ typedef NSObjectFileImageReturnCode (*funcType)(const char*, NSObjectFileImage*);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSCreateObjectFileImageFromFile", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSCreateObjectFileImageFromFile", &p);
return p(pathName, objectFileImage);
}
return dyld3::NSCreateObjectFileImageFromMemory(address, size, objectFileImage);
DYLD_LOCK_THIS_BLOCK;
- static NSObjectFileImageReturnCode (*p)(const void*, size_t, NSObjectFileImage*) = NULL;
+ typedef NSObjectFileImageReturnCode (*funcType)(const void*, size_t, NSObjectFileImage*);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSCreateObjectFileImageFromMemory", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSCreateObjectFileImageFromMemory", &p);
return p(address, size, objectFileImage);
}
NSObjectFileImage *objectFileImage)
{
DYLD_LOCK_THIS_BLOCK;
- static NSObjectFileImageReturnCode (*p)(const char*, NSObjectFileImage*) = NULL;
+ typedef NSObjectFileImageReturnCode (*funcType)(const char*, NSObjectFileImage*) = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSCreateCoreFileImageFromFile", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSCreateCoreFileImageFromFile", &p);
return p(pathName, objectFileImage);
}
#endif
return dyld3::NSDestroyObjectFileImage(objectFileImage);
DYLD_LOCK_THIS_BLOCK;
- static bool (*p)(NSObjectFileImage) = NULL;
+ typedef bool (*funcType)(NSObjectFileImage);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSDestroyObjectFileImage", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSDestroyObjectFileImage", &p);
return p(objectFileImage);
}
return dyld3::NSLinkModule(objectFileImage, moduleName, options);
DYLD_LOCK_THIS_BLOCK;
- static NSModule (*p)(NSObjectFileImage, const char*, unsigned long) = NULL;
+ typedef NSModule (*funcType)(NSObjectFileImage, const char*, unsigned long);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSLinkModule", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSLinkModule", &p);
return p(objectFileImage, moduleName, options);
}
return dyld3::NSSymbolDefinitionCountInObjectFileImage(objectFileImage);
DYLD_LOCK_THIS_BLOCK;
- static uint32_t (*p)(NSObjectFileImage) = NULL;
+ typedef uint32_t (*funcType)(NSObjectFileImage);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSSymbolDefinitionCountInObjectFileImage", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSSymbolDefinitionCountInObjectFileImage", &p);
return p(objectFileImage);
}
return dyld3::NSSymbolDefinitionNameInObjectFileImage(objectFileImage, ordinal);
DYLD_LOCK_THIS_BLOCK;
- static const char* (*p)(NSObjectFileImage, uint32_t) = NULL;
+ typedef const char* (*funcType)(NSObjectFileImage, uint32_t);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSSymbolDefinitionNameInObjectFileImage", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSSymbolDefinitionNameInObjectFileImage", &p);
return p(objectFileImage, ordinal);
}
return dyld3::NSSymbolReferenceCountInObjectFileImage(objectFileImage);
DYLD_LOCK_THIS_BLOCK;
- static uint32_t (*p)(NSObjectFileImage) = NULL;
+ typedef uint32_t (*funcType)(NSObjectFileImage);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSSymbolReferenceCountInObjectFileImage", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSSymbolReferenceCountInObjectFileImage", &p);
return p(objectFileImage);
}
return dyld3::NSSymbolReferenceNameInObjectFileImage(objectFileImage, ordinal, tentative_definition);
DYLD_LOCK_THIS_BLOCK;
- static const char* (*p)(NSObjectFileImage, uint32_t, bool*) = NULL;
+ typedef const char* (*funcType)(NSObjectFileImage, uint32_t, bool*);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSSymbolReferenceNameInObjectFileImage", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSSymbolReferenceNameInObjectFileImage", &p);
return p(objectFileImage, ordinal, tentative_definition);
}
return dyld3::NSIsSymbolDefinedInObjectFileImage(objectFileImage, symbolName);
DYLD_LOCK_THIS_BLOCK;
- static bool (*p)(NSObjectFileImage, const char*) = NULL;
+ typedef bool (*funcType)(NSObjectFileImage, const char*);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSIsSymbolDefinedInObjectFileImage", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSIsSymbolDefinedInObjectFileImage", &p);
return p(objectFileImage, symbolName);
}
return dyld3::NSGetSectionDataInObjectFileImage(objectFileImage, segmentName, sectionName, size);
DYLD_LOCK_THIS_BLOCK;
- static void* (*p)(NSObjectFileImage, const char*, const char*, unsigned long*) = NULL;
+ typedef void* (*funcType)(NSObjectFileImage, const char*, const char*, unsigned long*);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_NSGetSectionDataInObjectFileImage", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_NSGetSectionDataInObjectFileImage", &p);
return p(objectFileImage, segmentName, sectionName, size);
}
return dyld3::NSLinkEditError(c, errorNumber, fileName, errorString);
DYLD_LOCK_THIS_BLOCK;
- static void (*p)(NSLinkEditErrors *c,
- int *errorNumber,
- const char* *fileName,
- const char* *errorString) = NULL;
+ typedef void (*funcType)(NSLinkEditErrors *c,
+ int *errorNumber,
+ const char* *fileName,
+ const char* *errorString);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_link_edit_error", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_link_edit_error", &p);
if(p != NULL)
p(c, errorNumber, fileName, errorString);
}
return dyld3::NSUnLinkModule(module, options);
DYLD_LOCK_THIS_BLOCK;
- static bool (*p)(NSModule module, uint32_t options) = NULL;
+ typedef bool (*funcType)(NSModule module, uint32_t options);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_unlink_module", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_unlink_module", &p);
return p(module, options);
}
return dyld3::_NSGetExecutablePath(buf, bufsize);
DYLD_NO_LOCK_THIS_BLOCK;
- static int (*p)(char *buf, uint32_t *bufsize) = NULL;
+ typedef int (*funcType)(char *buf, uint32_t *bufsize);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld__NSGetExecutablePath", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld__NSGetExecutablePath", &p);
return(p(buf, bufsize));
}
return dyld3::_dyld_lookup_and_bind(symbol_name, address, module);
DYLD_LOCK_THIS_BLOCK;
- static void (*p)(const char*, void** , NSModule*) = NULL;
+ typedef void (*funcType)(const char*, void** , NSModule*);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_lookup_and_bind", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_lookup_and_bind", &p);
p(symbol_name, address, module);
}
NSModule* module)
{
DYLD_LOCK_THIS_BLOCK;
- static void (*p)(const char*, const char*, void**, NSModule*) = NULL;
+ typedef void (*funcType)(const char*, const char*, void**, NSModule*);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_lookup_and_bind_with_hint", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_lookup_and_bind_with_hint", &p);
p(symbol_name, library_name_hint, address, module);
}
NSModule* module)
{
DYLD_LOCK_THIS_BLOCK;
- static void (*p)(const char* , void**, NSModule*) = NULL;
+ typedef void (*funcType)(const char* , void**, NSModule*) = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_lookup_and_bind_objc", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_lookup_and_bind_objc", &p);
p(symbol_name, address, module);
}
#endif
NSModule* module)
{
DYLD_LOCK_THIS_BLOCK;
- static void (*p)(const char*, void**, NSModule*) = NULL;
+ typedef void (*funcType)(const char*, void**, NSModule*);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_lookup_and_bind_fully", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_lookup_and_bind_fully", &p);
p(symbol_name, address, module);
}
const void* address)
{
DYLD_LOCK_THIS_BLOCK;
- static bool (*p)(const void*) = NULL;
+ typedef bool (*funcType)(const void*);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_bind_fully_image_containing_address", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_bind_fully_image_containing_address", &p);
return p(address);
}
#endif // DEPRECATED_APIS_SUPPORTED
return dyld3::_dyld_register_func_for_add_image(func);
DYLD_LOCK_THIS_BLOCK;
- typedef void (*callback_t)(const struct mach_header *mh, intptr_t vmaddr_slide);
- static void (*p)(callback_t func) = NULL;
+ // Func must be a "void *" because dyld itself calls it. DriverKit
+ // libdyld.dylib uses diversified C function pointers but its dyld (the
+ // plain OS one) doesn't, so it must be resigned with 0 discriminator.
+ typedef void (*funcType)(void *func);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_register_func_for_add_image", (void**)&p);
- p(func);
+ dyld_func_lookup_and_resign("__dyld_register_func_for_add_image", &p);
+ p(reinterpret_cast<void *>(func));
}
/*
return dyld3::_dyld_register_func_for_remove_image(func);
DYLD_LOCK_THIS_BLOCK;
- typedef void (*callback_t)(const struct mach_header *mh, intptr_t vmaddr_slide);
- static void (*p)(callback_t func) = NULL;
+ // Func must be a "void *" because dyld itself calls it. DriverKit
+ // libdyld.dylib uses diversified C function pointers but its dyld (the
+ // plain OS one) doesn't, so it must be resigned with 0 discriminator.
+ typedef void (*funcType)(void *func);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_register_func_for_remove_image", (void**)&p);
- p(func);
+ dyld_func_lookup_and_resign("__dyld_register_func_for_remove_image", &p);
+ p(reinterpret_cast<void *>(func));
}
#if OBSOLETE_DYLD_API
void (*func)(NSModule module))
{
DYLD_LOCK_THIS_BLOCK;
- static void (*p)(void (*func)(NSModule module)) = NULL;
+ // Func must be a "void *" because dyld itself calls it. DriverKit
+ // libdyld.dylib uses diversified C function pointers but its dyld (the
+ // plain OS one) doesn't, so it must be resigned with 0 discriminator.
+ static void (*funcType)(void *func) = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_register_func_for_link_module", (void**)&p);
- p(func);
+ dyld_func_lookup_and_resign("__dyld_register_func_for_link_module", &p);
+ p(reinterpret_cast<void *>(func));
}
/*
void (*func)(NSModule module))
{
DYLD_LOCK_THIS_BLOCK;
- static void (*p)(void (*func)(NSModule module)) = NULL;
+ // Func must be a "void *" because dyld itself calls it. DriverKit
+ // libdyld.dylib uses diversified C function pointers but its dyld (the
+ // plain OS one) doesn't, so it must be resigned with 0 discriminator.
+ static void (*funcType)(void *func) = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_register_func_for_unlink_module", (void**)&p);
- p(func);
+ dyld_func_lookup_and_resign("__dyld_register_func_for_unlink_module", &p);
+ p(reinterpret_cast<void *>(func));
}
/*
void (*func)(NSModule oldmodule, NSModule newmodule))
{
DYLD_LOCK_THIS_BLOCK;
- static void (*p)(void (*func)(NSModule oldmodule,
- NSModule newmodule)) = NULL;
+ // Func must be a "void *" because dyld itself calls it. DriverKit
+ // libdyld.dylib uses diversified C function pointers but its dyld (the
+ // plain OS one) doesn't, so it must be resigned with 0 discriminator.
+ typedef void (*funcType)(void *func) = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_register_func_for_replace_module", (void**)&p);
- p(func);
+ dyld_func_lookup_and_resign("__dyld_register_func_for_replace_module", &p);
+ p(reinterpret_cast<void *>(func));
}
unsigned long *size)
{
DYLD_LOCK_THIS_BLOCK;
- static void (*p)(NSModule module,
- void **objc_module,
- unsigned long *size) = NULL;
+ typedef void (*funcType)(NSModule module,
+ void **objc_module,
+ unsigned long *size) = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_get_objc_module_sect_for_module", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_get_objc_module_sect_for_module", &p);
p(module, objc_module, size);
}
return dyld3::_dyld_image_count();
DYLD_NO_LOCK_THIS_BLOCK;
- static uint32_t (*p)(void) = NULL;
+ typedef uint32_t (*funcType)(void);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_image_count", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_image_count", &p);
return(p());
}
return dyld3::_dyld_get_image_header(image_index);
DYLD_NO_LOCK_THIS_BLOCK;
- static struct mach_header * (*p)(uint32_t image_index) = NULL;
+ typedef struct mach_header * (*funcType)(uint32_t image_index);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_get_image_header", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_get_image_header", &p);
return(p(image_index));
}
return dyld3::_dyld_get_image_vmaddr_slide(image_index);
DYLD_NO_LOCK_THIS_BLOCK;
- static unsigned long (*p)(uint32_t image_index) = NULL;
+ typedef unsigned long (*funcType)(uint32_t image_index);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_get_image_vmaddr_slide", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_get_image_vmaddr_slide", &p);
return(p(image_index));
}
return dyld3::_dyld_get_image_name(image_index);
DYLD_NO_LOCK_THIS_BLOCK;
- static const char* (*p)(uint32_t image_index) = NULL;
+ typedef const char* (*funcType)(uint32_t image_index);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_get_image_name", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_get_image_name", &p);
return(p(image_index));
}
return dyld3::_dyld_get_prog_image_header();
DYLD_LOCK_THIS_BLOCK;
- static const struct mach_header * (*p)(void) = NULL;
+ typedef const struct mach_header * (*funcType)(void);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_get_prog_image_header", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_get_prog_image_header", &p);
return p();
}
return dyld3::_dyld_image_containing_address(address);
DYLD_LOCK_THIS_BLOCK;
- static bool (*p)(const void*) = NULL;
+ typedef bool (*funcType)(const void*);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_image_containing_address", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_image_containing_address", &p);
return(p(address));
}
return dyld3::_dyld_get_image_header_containing_address(address);
DYLD_LOCK_THIS_BLOCK;
- static const struct mach_header * (*p)(const void*) = NULL;
+ typedef const struct mach_header * (*funcType)(const void*);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_get_image_header_containing_address", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_get_image_header_containing_address", &p);
return p(address);
}
bool _dyld_launched_prebound(void)
{
DYLD_LOCK_THIS_BLOCK;
- static bool (*p)(void) = NULL;
+ typedef bool (*funcType)(void);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_launched_prebound", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_launched_prebound", &p);
return(p());
}
bool _dyld_all_twolevel_modules_prebound(void)
{
DYLD_LOCK_THIS_BLOCK;
- static bool (*p)(void) = NULL;
+ typedef bool (*funcType)(void);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_all_twolevel_modules_prebound", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_all_twolevel_modules_prebound", &p);
return(p());
}
#endif // DEPRECATED_APIS_SUPPORTED
// leave until dyld's that might call this are rare
}
+// FIXME: This is a mess. Why can't Driverkit have its own dyld?
+static int cxa_atexit_thunk(void (*func)(void *), void *arg, void *dso)
+{
+ // Func will have come from dyld and so be signed with 0 discriminator,
+ // resign it appropriately before passing to the real __cxa_atexit.
+ func = ptrauth_auth_and_resign(func, ptrauth_key_function_pointer, 0,
+ ptrauth_key_function_pointer,
+ ptrauth_function_pointer_type_discriminator(__typeof__(func)));
+ return __cxa_atexit(func, arg, dso);
+}
+
+template<typename FTy> static FTy *resign_for_dyld(FTy *func) {
+ return ptrauth_auth_and_resign(func, ptrauth_key_function_pointer,
+ ptrauth_function_pointer_type_discriminator(__typeof__(func)),
+ ptrauth_key_function_pointer, 0);
+}
+
// the table passed to dyld containing thread helpers
-static dyld::LibSystemHelpers sHelpers = { 13, &dyldGlobalLockAcquire, &dyldGlobalLockRelease,
- &getPerThreadBufferFor_dlerror, &malloc, &free, &__cxa_atexit,
- &shared_cache_missing, &shared_cache_out_of_date,
- NULL, NULL,
- &pthread_key_create, &pthread_setspecific,
- &malloc_size,
- &pthread_getspecific,
- &__cxa_finalize,
- address_of_start,
- &hasPerThreadBufferFor_dlerror,
- &isLaunchdOwned,
- &vm_allocate,
- &mmap,
- &__cxa_finalize_ranges
- };
+static dyld::LibSystemHelpers sHelpers = { 13 };
static const objc_opt::objc_opt_t* gObjCOpt = nullptr;
//
//
extern "C" void tlv_initializer();
void _dyld_initializer()
-{
- void (*p)(dyld::LibSystemHelpers*);
+{
+ sHelpers.acquireGlobalDyldLock = resign_for_dyld(&dyldGlobalLockAcquire);
+ sHelpers.releaseGlobalDyldLock = resign_for_dyld(&dyldGlobalLockRelease);
+ sHelpers.getThreadBufferFor_dlerror = resign_for_dyld(&getPerThreadBufferFor_dlerror);
+ sHelpers.malloc = resign_for_dyld(&malloc);
+ sHelpers.free = resign_for_dyld(&free);
+ sHelpers.cxa_atexit = resign_for_dyld(&cxa_atexit_thunk);
+ sHelpers.dyld_shared_cache_missing = resign_for_dyld(&shared_cache_missing);
+ sHelpers.dyld_shared_cache_out_of_date = resign_for_dyld(&shared_cache_out_of_date);
+ sHelpers.acquireDyldInitializerLock = NULL;
+ sHelpers.releaseDyldInitializerLock = NULL;
+ sHelpers.pthread_key_create = resign_for_dyld(&pthread_key_create);
+ sHelpers.pthread_setspecific = resign_for_dyld(&pthread_setspecific);
+ sHelpers.malloc_size = resign_for_dyld(&malloc_size);
+ sHelpers.pthread_getspecific = resign_for_dyld(&pthread_getspecific);
+ sHelpers.cxa_finalize = resign_for_dyld(&__cxa_finalize);
+ sHelpers.startGlueToCallExit = address_of_start;
+ sHelpers.hasPerThreadBufferFor_dlerror = resign_for_dyld(&hasPerThreadBufferFor_dlerror);
+ sHelpers.isLaunchdOwned = resign_for_dyld(&isLaunchdOwned);
+ sHelpers.vm_alloc = resign_for_dyld(&vm_allocate);
+ sHelpers.mmap = resign_for_dyld(&mmap);
+ sHelpers.cxa_finalize_ranges = resign_for_dyld(&__cxa_finalize_ranges);
+
+ typedef void (*funcType)(dyld::LibSystemHelpers*);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
// Get the optimized objc pointer now that the cache is loaded
const dyld_all_image_infos* allInfo = _dyld_get_all_image_infos();
#endif
}
else {
- _dyld_func_lookup("__dyld_register_thread_helpers", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_register_thread_helpers", &p);
if(p != NULL)
p(&sHelpers);
}
result = dyld3::dladdr(addr, info);
} else {
DYLD_LOCK_THIS_BLOCK;
- static int (*p)(const void* , Dl_info*) = NULL;
+ typedef int (*funcType)(const void* , Dl_info*);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_dladdr", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_dladdr", &p);
result = p(addr, info);
}
timer.setData4(result);
return dyld3::dlerror();
DYLD_LOCK_THIS_BLOCK;
- static char* (*p)() = NULL;
+ typedef char* (*funcType)();
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_dlerror", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_dlerror", &p);
return(p());
}
}
DYLD_LOCK_THIS_BLOCK;
- static int (*p)(void* handle) = NULL;
+ typedef int (*funcType)(void* handle);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_dlclose", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_dlclose", &p);
result = p(handle);
timer.setData4(result);
return result;
// dlopen is special. locking is done inside dyld to allow initializer to run without lock
DYLD_NO_LOCK_THIS_BLOCK;
- static void* (*p)(const char* path, int, void*) = NULL;
+ typedef void* (*funcType)(const char* path, int, void*);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_dlopen_internal", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_dlopen_internal", &p);
result = p(path, mode, callerAddress);
// use asm block to prevent tail call optimization
// this is needed because dlopen uses __builtin_return_address() and depends on this glue being in the frame chain
}
DYLD_LOCK_THIS_BLOCK;
- static bool (*p)(const char* path, void* callerAddress) = NULL;
+ typedef bool (*funcType)(const char* path, void* callerAddress);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_dlopen_preflight_internal", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_dlopen_preflight_internal", &p);
result = p(path, __builtin_return_address(0));
timer.setData4(result);
return result;
}
DYLD_LOCK_THIS_BLOCK;
- static void* (*p)(void* handle, const char* symbol, void *callerAddress) = NULL;
+ typedef void* (*funcType)(void* handle, const char* symbol, void *callerAddress);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_dlsym_internal", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_dlsym_internal", &p);
result = p(handle, symbol, __builtin_return_address(0));
timer.setData4(result);
return result;
return dyld3::_dyld_get_all_image_infos();
DYLD_NO_LOCK_THIS_BLOCK;
- static struct dyld_all_image_infos* (*p)() = NULL;
+ typedef struct dyld_all_image_infos* (*funcType)();
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_get_all_image_infos", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_get_all_image_infos", &p);
return p();
}
return dyld3::_dyld_find_unwind_sections(addr, info);
DYLD_NO_LOCK_THIS_BLOCK;
- static void* (*p)(void*, dyld_unwind_sections*) = NULL;
+ typedef void* (*funcType)(void*, dyld_unwind_sections*);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_find_unwind_sections", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_find_unwind_sections", &p);
return p(addr, info);
}
#endif
void* _dyld_fast_stub_entry(void* loadercache, long lazyinfo)
{
DYLD_NO_LOCK_THIS_BLOCK;
- static void* (*p)(void*, long) = NULL;
+ typedef void* (*funcType)(void*, long);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_fast_stub_entry", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_fast_stub_entry", &p);
return p(loadercache, lazyinfo);
}
#endif
return dyld3::dyld_image_path_containing_address(addr);
DYLD_NO_LOCK_THIS_BLOCK;
- static const char* (*p)(const void*) = NULL;
+ typedef const char* (*funcType)(const void*);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_image_path_containing_address", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_image_path_containing_address", &p);
return p(addr);
}
return dyld3::dyld_image_header_containing_address(addr);
DYLD_NO_LOCK_THIS_BLOCK;
- static const mach_header* (*p)(const void*) = NULL;
+ typedef const mach_header* (*funcType)(const void*);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_get_image_header_containing_address", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_get_image_header_containing_address", &p);
return p(addr);
}
return dyld3::dyld_shared_cache_some_image_overridden();
DYLD_NO_LOCK_THIS_BLOCK;
- static bool (*p)() = NULL;
+ typedef bool (*funcType)();
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_shared_cache_some_image_overridden", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_shared_cache_some_image_overridden", &p);
return p();
}
return dyld3::_dyld_get_shared_cache_uuid(uuid);
DYLD_NO_LOCK_THIS_BLOCK;
- static bool (*p)(uuid_t) = NULL;
+ typedef bool (*funcType)(uuid_t);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_get_shared_cache_uuid", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_get_shared_cache_uuid", &p);
return p(uuid);
}
return dyld3::_dyld_get_shared_cache_range(length);
DYLD_NO_LOCK_THIS_BLOCK;
- static const void* (*p)(size_t*) = NULL;
+ typedef const void* (*funcType)(size_t*);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_get_shared_cache_range", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_get_shared_cache_range", &p);
return p(length);
}
return dyld3::_dyld_images_for_addresses(count, addresses, infos);
DYLD_NO_LOCK_THIS_BLOCK;
- static const void (*p)(unsigned, const void*[], struct dyld_image_uuid_offset[]) = NULL;
+ typedef const void (*funcType)(unsigned, const void*[], struct dyld_image_uuid_offset[]);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_images_for_addresses", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_images_for_addresses", &p);
return p(count, addresses, infos);
}
return dyld3::_dyld_register_for_image_loads(func);
DYLD_NO_LOCK_THIS_BLOCK;
- static const void (*p)(void (*)(const mach_header* mh, const char* path, bool unloadable)) = NULL;
+ typedef const void (*funcType)(void (*)(const mach_header* mh, const char* path, bool unloadable));
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_register_for_image_loads", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_register_for_image_loads", &p);
return p(func);
}
return dyld3::_dyld_register_for_bulk_image_loads(func);
DYLD_NO_LOCK_THIS_BLOCK;
- static const void (*p)(void (*)(unsigned imageCount, const mach_header* mhs[], const char* paths[])) = NULL;
+ typedef const void (*funcType)(void (*)(unsigned imageCount, const mach_header* mhs[], const char* paths[]));
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_register_for_bulk_image_loads", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_register_for_bulk_image_loads", &p);
return p(func);
}
return dyld3::dyld_process_is_restricted();
DYLD_NO_LOCK_THIS_BLOCK;
- static bool (*p)() = NULL;
+ typedef bool (*funcType)();
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_process_is_restricted", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_process_is_restricted", &p);
return p();
}
return dyld3::dyld_shared_cache_file_path();
DYLD_NO_LOCK_THIS_BLOCK;
- static const char* (*p)() = NULL;
+ typedef const char* (*funcType)();
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_shared_cache_file_path", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_shared_cache_file_path", &p);
return p();
}
return dyld3::dyld_has_inserted_or_interposing_libraries();
DYLD_NO_LOCK_THIS_BLOCK;
- static bool (*p)() = NULL;
+ typedef bool (*funcType)();
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if (p == NULL)
- _dyld_func_lookup("__dyld_has_inserted_or_interposing_libraries", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_has_inserted_or_interposing_libraries", &p);
return p();
}
return dyld3::dyld_dynamic_interpose(mh, array, count);
DYLD_LOCK_THIS_BLOCK;
- static void (*p)(const struct mach_header* mh, const struct dyld_interpose_tuple array[], size_t count) = NULL;
+ typedef void (*funcType)(const struct mach_header* mh, const struct dyld_interpose_tuple array[], size_t count);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if (p == NULL)
- _dyld_func_lookup("__dyld_dynamic_interpose", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_dynamic_interpose", &p);
p(mh, array, count);
}
return dyld3::_dyld_fork_child();
DYLD_NO_LOCK_THIS_BLOCK;
- static void (*p)() = NULL;
+ typedef void (*funcType)();
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_fork_child", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_fork_child", &p);
return p();
}
return dyld3::_dyld_is_memory_immutable(addr, length);
DYLD_NO_LOCK_THIS_BLOCK;
- static bool (*p)(const void*, size_t) = NULL;
+ typedef bool (*funcType)(const void*, size_t);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_is_memory_immutable", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_is_memory_immutable", &p);
return p(addr, length);
}
return dyld3::_dyld_objc_notify_register(mapped, init, unmapped);
DYLD_LOCK_THIS_BLOCK;
- static bool (*p)(_dyld_objc_notify_mapped, _dyld_objc_notify_init, _dyld_objc_notify_unmapped) = NULL;
+ typedef bool (*funcType)(_dyld_objc_notify_mapped, _dyld_objc_notify_init, _dyld_objc_notify_unmapped);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_objc_notify_register", (void**)&p);
+ dyld_func_lookup_and_resign("__dyld_objc_notify_register", &p);
p(mapped, init, unmapped);
}
if ( gUseDyld3 )
return dyld3::_dyld_register_driverkit_main(mainFunc);
- static bool (*p)(void (*mainFunc)(void)) = NULL;
+ typedef bool (*funcType)(void *);
+ static funcType __ptrauth_dyld_function_ptr p = NULL;
if(p == NULL)
- _dyld_func_lookup("__dyld_register_driverkit_main", (void**)&p);
- p(mainFunc);
+ dyld_func_lookup_and_resign("__dyld_register_driverkit_main", &p);
+ p(reinterpret_cast<void *>(mainFunc));
}
// This is populated in the shared cache builder, so that the ranges are protected by __DATA_CONST
//
struct __DATA__dyld {
long lazy;
- int (*lookup)(const char*, void**);
+ void *lookup;
// ProgramVars
const void* mh;
int* NXArgcPtr;
__attribute__((visibility("hidden")))
int _dyld_func_lookup(const char* dyld_func_name, void **address)
{
- return (*myDyldSection.lookup)(dyld_func_name, address);
+ // Cast lookup function here to resign from dyld's 0-discriminator to a real
+ // function pointer if needed.
+ int (*lookupFn)(const char*, void**) = myDyldSection.lookup;
+ return lookupFn(dyld_func_name, address);
}
#if TARGET_OS_IOS && !TARGET_OS_SIMULATOR
// Add in version 13
DYLD_SYSCALL_VTABLE_ENTRY(fstat);
DYLD_SYSCALL_VTABLE_ENTRY(vm_copy);
+ // Add in version 14
+ DYLD_SYSCALL_VTABLE_ENTRY(task_dyld_process_info_notify_get);
};
extern const struct SyscallHelpers* gSyscallHelpers;
* @APPLE_LICENSE_HEADER_END@
*/
+#include <dlfcn.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
std::swap(_localAddress, other._localAddress);
std::swap(_size, other._size);
std::swap(_kr, other._kr);
- std::swap(_shared, other._shared);
return *this;
}
-RemoteBuffer::RemoteBuffer() : _localAddress(0), _size(0), _kr(KERN_SUCCESS), _shared(false) {}
-RemoteBuffer::RemoteBuffer(std::tuple<mach_vm_address_t,vm_size_t,kern_return_t,bool> T)
- : _localAddress(std::get<0>(T)), _size(std::get<1>(T)), _kr(std::get<2>(T)), _shared(std::get<3>(T)) {}
+RemoteBuffer::RemoteBuffer() : _localAddress(0), _size(0), _kr(KERN_SUCCESS) {}
+RemoteBuffer::RemoteBuffer(std::tuple<mach_vm_address_t,vm_size_t,kern_return_t> T)
+ : _localAddress(std::get<0>(T)), _size(std::get<1>(T)), _kr(std::get<2>(T)) {}
-RemoteBuffer::RemoteBuffer(task_t task, mach_vm_address_t remote_address, size_t remote_size, bool shared, bool allow_truncation)
-: RemoteBuffer(RemoteBuffer::create(task, remote_address, remote_size, shared, allow_truncation)) {};
+RemoteBuffer::RemoteBuffer(task_t task, mach_vm_address_t remote_address, size_t remote_size, bool allow_truncation)
+: RemoteBuffer(RemoteBuffer::create(task, remote_address, remote_size, allow_truncation)) {};
std::pair<mach_vm_address_t, kern_return_t>
-RemoteBuffer::map(task_t task, mach_vm_address_t remote_address, vm_size_t size, bool shared) {
+RemoteBuffer::map(task_t task, mach_vm_address_t remote_address, vm_size_t size) {
+ static kern_return_t (*mvrn)(vm_map_t, mach_vm_address_t*, mach_vm_size_t, mach_vm_offset_t, int, vm_map_read_t, mach_vm_address_t,
+ boolean_t, vm_prot_t*, vm_prot_t*, vm_inherit_t) = nullptr;
vm_prot_t cur_protection = VM_PROT_NONE;
- vm_prot_t max_protection = VM_PROT_NONE;
- int flags;
+ vm_prot_t max_protection = VM_PROT_READ;
if (size == 0) {
return std::make_pair(MACH_VM_MIN_ADDRESS, KERN_INVALID_ARGUMENT);
}
- if (shared) {
- flags = VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR;
- } else {
- // <rdar://55343677>
- // Since we are getting rid of the flag probing we have to make sure that simulator libdyld's do not use VM_FLAGS_RESILIENT_MEDIA
- // FIXME: Remove this when simulator builds do not support back deployment to 10.14
+ mach_vm_address_t localAddress = 0;
#if TARGET_OS_SIMULATOR
- flags = VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR | VM_FLAGS_RESILIENT_CODESIGN;
+ static dispatch_once_t onceToken;
+ dispatch_once(&onceToken, ^{
+ mvrn = (kern_return_t (*)(vm_map_t, mach_vm_address_t*, mach_vm_size_t, mach_vm_offset_t, int, vm_map_read_t, mach_vm_address_t,
+ boolean_t, vm_prot_t*, vm_prot_t*, vm_inherit_t))dlsym(RTLD_DEFAULT, "mach_vm_remap_new");
+ if (mvrn == nullptr) {
+ // We are running on a system that does not support task_read ports, use the old call
+ mvrn = (kern_return_t (*)(vm_map_t, mach_vm_address_t*, mach_vm_size_t, mach_vm_offset_t, int, vm_map_read_t, mach_vm_address_t,
+ boolean_t, vm_prot_t*, vm_prot_t*, vm_inherit_t))dlsym(RTLD_DEFAULT, "mach_vm_remap");
+ }
+ });
#else
- flags = VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR | VM_FLAGS_RESILIENT_CODESIGN | VM_FLAGS_RESILIENT_MEDIA;
+ mvrn = &mach_vm_remap_new;
#endif
- }
- mach_vm_address_t localAddress = 0;
- auto kr = mach_vm_remap(mach_task_self(),
+ auto kr = mvrn(mach_task_self(),
&localAddress,
size,
0, // mask
- flags,
+ VM_FLAGS_ANYWHERE | VM_FLAGS_RESILIENT_CODESIGN | VM_FLAGS_RESILIENT_MEDIA,
task,
remote_address,
- !shared,
+ true,
&cur_protection,
&max_protection,
VM_INHERIT_NONE);
// we are copying some memory in the middle of a mach-o that is on a USB drive that is disconnected after we perform
// the mapping). Once we copy them into a local buffer the memory will be handled by the default pager instead of
// potentially being backed by the mmap pager, and thus will be guaranteed not to mutate out from under us.
- if (!shared) {
- void* buffer = malloc(size);
- if (buffer == nullptr) {
- (void)vm_deallocate(mach_task_self(), (vm_address_t)localAddress, size);
- return std::make_pair(MACH_VM_MIN_ADDRESS, KERN_NO_SPACE);
- }
- memcpy(buffer, (void *)localAddress, size);
+ void* buffer = malloc(size);
+ if (buffer == nullptr) {
(void)vm_deallocate(mach_task_self(), (vm_address_t)localAddress, size);
- return std::make_pair((vm_address_t)buffer, KERN_SUCCESS);
- }
- // A shared buffer was requested, if the permissions are not correct deallocate the region and return failure
- if (cur_protection != (VM_PROT_READ|VM_PROT_WRITE)) {
- if (localAddress != 0) {
- (void)vm_deallocate(mach_task_self(), (size_t)localAddress, size);
- }
- return std::make_pair(MACH_VM_MIN_ADDRESS, KERN_PROTECTION_FAILURE);
+ return std::make_pair(MACH_VM_MIN_ADDRESS, KERN_NO_SPACE);
}
- // We have a successfully created shared buffer with the correct permissions, return it
- return std::make_pair(localAddress, KERN_SUCCESS);
+ memcpy(buffer, (void *)localAddress, size);
+ (void)vm_deallocate(mach_task_self(), (vm_address_t)localAddress, size);
+ return std::make_pair((vm_address_t)buffer, KERN_SUCCESS);
}
-std::tuple<mach_vm_address_t,vm_size_t,kern_return_t,bool> RemoteBuffer::create(task_t task,
+std::tuple<mach_vm_address_t,vm_size_t,kern_return_t> RemoteBuffer::create(task_t task,
mach_vm_address_t remote_address,
size_t size,
- bool shared,
bool allow_truncation) {
mach_vm_address_t localAddress;
kern_return_t kr;
// Try the initial map
- std::tie(localAddress, kr) = map(task, remote_address, size, shared);
- if (kr == KERN_SUCCESS) return std::make_tuple(localAddress, size, kr, shared);
+ std::tie(localAddress, kr) = map(task, remote_address, size);
+ if (kr == KERN_SUCCESS) return std::make_tuple(localAddress, size, kr);
// The first attempt failed, truncate if possible and try again. We only need to try once since the largest
// truncatable buffer we map is less than a single page. To be more general we would need to try repeatedly in a
// loop.
if (allow_truncation) {
size = PAGE_SIZE - remote_address%PAGE_SIZE;
- std::tie(localAddress, kr) = map(task, remote_address, size, shared);
- if (kr == KERN_SUCCESS) return std::make_tuple(localAddress, size, kr, shared);
+ std::tie(localAddress, kr) = map(task, remote_address, size);
+ if (kr == KERN_SUCCESS) return std::make_tuple(localAddress, size, kr);
}
// If we reach this then the mapping completely failed
- return std::make_tuple(MACH_VM_MIN_ADDRESS, 0, kr, shared);
+ return std::make_tuple(MACH_VM_MIN_ADDRESS, 0, kr);
}
RemoteBuffer::~RemoteBuffer() {
if (!_localAddress) { return; }
-
- if (_shared) {
- (void)vm_deallocate(mach_task_self(), (vm_address_t)_localAddress, _size);
- } else {
- free((void*)_localAddress);
- }
+ free((void*)_localAddress);
}
void *RemoteBuffer::getLocalAddress() const { return (void *)_localAddress; }
size_t RemoteBuffer::getSize() const { return _size; }
kern_return_t RemoteBuffer::getKernelReturn() const { return _kr; }
-void withRemoteBuffer(task_t task, mach_vm_address_t remote_address, size_t remote_size, bool shared, bool allow_truncation, kern_return_t *kr, void (^block)(void *buffer, size_t size)) {
+void withRemoteBuffer(task_t task, mach_vm_address_t remote_address, size_t remote_size, bool allow_truncation, kern_return_t *kr, void (^block)(void *buffer, size_t size)) {
kern_return_t krSink = KERN_SUCCESS;
if (kr == nullptr) {
kr = &krSink;
}
- RemoteBuffer buffer(task, remote_address, remote_size, shared, allow_truncation);
+ RemoteBuffer buffer(task, remote_address, remote_size, allow_truncation);
*kr = buffer.getKernelReturn();
if (*kr == KERN_SUCCESS) {
block(buffer.getLocalAddress(), buffer.getSize());
if (result) {
// If it returned the process is suspended and there is nothing more to do
return std::move(result);
- } else {
- // Check to see if the process change timestamp is greater than 0, if not then sleep to let the process
- // finish initializing
- if (allImageInfo.infoArrayChangeTimestamp == 0) {
- usleep(1000 * 50); // 50ms
- }
}
+ usleep(1000 * 50); // 50ms
+ // Not exactly correct, but conveys that operation may succeed in the future
+ *kr = KERN_RESOURCE_SHORTAGE;
+ return nullptr;
}
// Test to see if there are no changes and we can exit early
return nullptr;
}
- for (uint32_t j=0; j < 10; ++j) {
- uint64_t currentTimestamp = allImageInfo.infoArrayChangeTimestamp;
- mach_vm_address_t infoArray = allImageInfo.infoArray;
- if (currentTimestamp == 0) continue;
- if (infoArray == 0) {
- // Check if the task is suspended mid dylib load and exit early
- mach_task_basic_info ti;
- mach_msg_type_number_t count = MACH_TASK_BASIC_INFO_COUNT;
- if ((*kr = task_info(task, MACH_TASK_BASIC_INFO, (task_info_t)&ti, &count))) {
- continue;
- }
+ uint64_t currentTimestamp = allImageInfo.infoArrayChangeTimestamp;
+ mach_vm_address_t infoArray = allImageInfo.infoArray;
+ if (infoArray == 0) {
+ usleep(1000 * 50); // 50ms
+ // Not exactly correct, but conveys that operation may succeed in the future
+ *kr = KERN_RESOURCE_SHORTAGE;
+ return nullptr;
+ };
- // The task is suspended, exit
- if (ti.suspend_count != 0) {
- // Not exactly correct, but conveys that operation may succeed in the future
- *kr = KERN_RESOURCE_SHORTAGE;
- return nullptr;
+ // For the moment we are going to truncate any image list longer than 8192 because some programs do
+ // terrible things that corrupt their own image lists and we need to stop clients from crashing
+ // reading them. We can try to do something more advanced in the future. rdar://27446361
+ uint32_t imageCount = allImageInfo.infoArrayCount;
+ imageCount = MIN(imageCount, 8192);
+ size_t imageArraySize = imageCount * sizeof(T2);
+
+ withRemoteBuffer(task, infoArray, imageArraySize, false, kr, ^(void *buffer, size_t size) {
+ // figure out how many path strings will need to be copied and their size
+ T2* imageArray = (T2 *)buffer;
+ const dyld_all_image_infos* myInfo = _dyld_get_all_image_infos();
+ bool sameCacheAsThisProcess = !allImageInfo.processDetachedFromSharedRegion
+ && !myInfo->processDetachedFromSharedRegion
+ && ((memcmp(myInfo->sharedCacheUUID, &allImageInfo.sharedCacheUUID[0], 16) == 0)
+ && (myInfo->sharedCacheSlide == allImageInfo.sharedCacheSlide));
+ unsigned countOfPathsNeedingCopying = 0;
+ if ( sameCacheAsThisProcess ) {
+ for (uint32_t i=0; i < imageCount; ++i) {
+ if ( !inCache(imageArray[i].imageFilePath) )
+ ++countOfPathsNeedingCopying;
}
- continue;
- };
-
- // For the moment we are going to truncate any image list longer than 8192 because some programs do
- // terrible things that corrupt their own image lists and we need to stop clients from crashing
- // reading them. We can try to do something more advanced in the future. rdar://27446361
- uint32_t imageCount = allImageInfo.infoArrayCount;
- imageCount = MIN(imageCount, 8192);
- size_t imageArraySize = imageCount * sizeof(T2);
-
- withRemoteBuffer(task, infoArray, imageArraySize, false, false, kr, ^(void *buffer, size_t size) {
- // figure out how many path strings will need to be copied and their size
- T2* imageArray = (T2 *)buffer;
- const dyld_all_image_infos* myInfo = _dyld_get_all_image_infos();
- bool sameCacheAsThisProcess = !allImageInfo.processDetachedFromSharedRegion
- && !myInfo->processDetachedFromSharedRegion
- && ((memcmp(myInfo->sharedCacheUUID, &allImageInfo.sharedCacheUUID[0], 16) == 0)
- && (myInfo->sharedCacheSlide == allImageInfo.sharedCacheSlide));
- unsigned countOfPathsNeedingCopying = 0;
- if ( sameCacheAsThisProcess ) {
- for (uint32_t i=0; i < imageCount; ++i) {
- if ( !inCache(imageArray[i].imageFilePath) )
- ++countOfPathsNeedingCopying;
- }
+ }
+ else {
+ countOfPathsNeedingCopying = imageCount+1;
+ }
+ unsigned imageCountWithDyld = imageCount+1;
+
+ // allocate result object
+ size_t allocationSize = sizeof(dyld_process_info_base)
+ + sizeof(dyld_process_cache_info)
+ + sizeof(dyld_process_aot_cache_info)
+ + sizeof(dyld_process_state_info)
+ + sizeof(ImageInfo)*(imageCountWithDyld)
+ + sizeof(dyld_aot_image_info_64)*(allImageInfo.aotInfoCount) // add the size necessary for aot info to this buffer
+ + sizeof(SegmentInfo)*imageCountWithDyld*10
+ + countOfPathsNeedingCopying*PATH_MAX;
+ void* storage = malloc(allocationSize);
+ if (storage == nullptr) {
+ *kr = KERN_NO_SPACE;
+ result = nullptr;
+ return;
+ }
+ auto info = dyld_process_info_ptr(new (storage) dyld_process_info_base(allImageInfo.platform, imageCountWithDyld, allImageInfo.aotInfoCount, allocationSize), deleter);
+ (void)info->reserveSpace(sizeof(dyld_process_info_base)+sizeof(dyld_process_cache_info)+sizeof(dyld_process_state_info)+sizeof(dyld_process_aot_cache_info));
+ (void)info->reserveSpace(sizeof(ImageInfo)*imageCountWithDyld);
+
+ // fill in base info
+ dyld_process_cache_info* cacheInfo = info->cacheInfo();
+ memcpy(cacheInfo->cacheUUID, &allImageInfo.sharedCacheUUID[0], 16);
+ cacheInfo->cacheBaseAddress = allImageInfo.sharedCacheBaseAddress;
+ cacheInfo->privateCache = allImageInfo.processDetachedFromSharedRegion;
+ // if no cache is used, allImageInfo has all zeros for cache UUID
+ cacheInfo->noCache = true;
+ for (int i=0; i < 16; ++i) {
+ if ( cacheInfo->cacheUUID[i] != 0 ) {
+ cacheInfo->noCache = false;
}
- else {
- countOfPathsNeedingCopying = imageCount+1;
+ }
+
+ // fill in aot shared cache info
+ dyld_process_aot_cache_info* aotCacheInfo = info->aotCacheInfo();
+ memcpy(aotCacheInfo->cacheUUID, &allImageInfo.aotSharedCacheUUID[0], 16);
+ aotCacheInfo->cacheBaseAddress = allImageInfo.aotSharedCacheBaseAddress;
+
+ dyld_process_state_info* stateInfo = info->stateInfo();
+ stateInfo->timestamp = currentTimestamp;
+ stateInfo->imageCount = imageCountWithDyld;
+ stateInfo->initialImageCount = (uint32_t)(allImageInfo.initialImageCount+1);
+ stateInfo->dyldState = dyld_process_state_dyld_initialized;
+
+ if ( allImageInfo.libSystemInitialized != 0 ) {
+ stateInfo->dyldState = dyld_process_state_libSystem_initialized;
+ if ( allImageInfo.initialImageCount != imageCount ) {
+ stateInfo->dyldState = dyld_process_state_program_running;
}
- unsigned imageCountWithDyld = imageCount+1;
-
- // allocate result object
- size_t allocationSize = sizeof(dyld_process_info_base)
- + sizeof(dyld_process_cache_info)
- + sizeof(dyld_process_aot_cache_info)
- + sizeof(dyld_process_state_info)
- + sizeof(ImageInfo)*(imageCountWithDyld)
- + sizeof(dyld_aot_image_info_64)*(allImageInfo.aotInfoCount) // add the size necessary for aot info to this buffer
- + sizeof(SegmentInfo)*imageCountWithDyld*10
- + countOfPathsNeedingCopying*PATH_MAX;
- void* storage = malloc(allocationSize);
- if (storage == nullptr) {
- *kr = KERN_NO_SPACE;
+ }
+ if ( allImageInfo.errorMessage != 0 ) {
+ stateInfo->dyldState = allImageInfo.terminationFlags ? dyld_process_state_terminated_before_inits : dyld_process_state_dyld_terminated;
+ }
+ // fill in info for dyld
+ if ( allImageInfo.dyldPath != 0 ) {
+ if ((*kr = info->addDyldImage(task, allImageInfo.dyldImageLoadAddress, allImageInfo.dyldPath, NULL))) {
+ *kr = KERN_FAILURE;
result = nullptr;
return;
}
- auto info = dyld_process_info_ptr(new (storage) dyld_process_info_base(allImageInfo.platform, imageCountWithDyld, allImageInfo.aotInfoCount, allocationSize), deleter);
- (void)info->reserveSpace(sizeof(dyld_process_info_base)+sizeof(dyld_process_cache_info)+sizeof(dyld_process_state_info)+sizeof(dyld_process_aot_cache_info));
- (void)info->reserveSpace(sizeof(ImageInfo)*imageCountWithDyld);
-
- // fill in base info
- dyld_process_cache_info* cacheInfo = info->cacheInfo();
- memcpy(cacheInfo->cacheUUID, &allImageInfo.sharedCacheUUID[0], 16);
- cacheInfo->cacheBaseAddress = allImageInfo.sharedCacheBaseAddress;
- cacheInfo->privateCache = allImageInfo.processDetachedFromSharedRegion;
- // if no cache is used, allImageInfo has all zeros for cache UUID
- cacheInfo->noCache = true;
- for (int i=0; i < 16; ++i) {
- if ( cacheInfo->cacheUUID[i] != 0 ) {
- cacheInfo->noCache = false;
- }
- }
-
- // fill in aot shared cache info
- dyld_process_aot_cache_info* aotCacheInfo = info->aotCacheInfo();
- memcpy(aotCacheInfo->cacheUUID, &allImageInfo.aotSharedCacheUUID[0], 16);
- aotCacheInfo->cacheBaseAddress = allImageInfo.aotSharedCacheBaseAddress;
-
- dyld_process_state_info* stateInfo = info->stateInfo();
- stateInfo->timestamp = currentTimestamp;
- stateInfo->imageCount = imageCountWithDyld;
- stateInfo->initialImageCount = (uint32_t)(allImageInfo.initialImageCount+1);
- stateInfo->dyldState = dyld_process_state_dyld_initialized;
-
- if ( allImageInfo.libSystemInitialized != 0 ) {
- stateInfo->dyldState = dyld_process_state_libSystem_initialized;
- if ( allImageInfo.initialImageCount != imageCount ) {
- stateInfo->dyldState = dyld_process_state_program_running;
- }
- }
- if ( allImageInfo.errorMessage != 0 ) {
- stateInfo->dyldState = allImageInfo.terminationFlags ? dyld_process_state_terminated_before_inits : dyld_process_state_dyld_terminated;
- }
- // fill in info for dyld
- if ( allImageInfo.dyldPath != 0 ) {
- if ((*kr = info->addDyldImage(task, allImageInfo.dyldImageLoadAddress, allImageInfo.dyldPath, NULL))) {
- result = nullptr;
- return;
- }
- }
- // fill in info for each image
- for (uint32_t i=0; i < imageCount; ++i) {
- if (!info->addImage(task, sameCacheAsThisProcess, imageArray[i].imageLoadAddress, imageArray[i].imageFilePath, NULL)) {
- result = nullptr;
- return;
- }
- }
- // sanity check internal data did not overflow
- if ( info->invalid() ) {
+ }
+ // fill in info for each image
+ for (uint32_t i=0; i < imageCount; ++i) {
+ if (!info->addImage(task, sameCacheAsThisProcess, imageArray[i].imageLoadAddress, imageArray[i].imageFilePath, NULL)) {
*kr = KERN_FAILURE;
result = nullptr;
return;
}
+ }
+ // sanity check internal data did not overflow
+ if ( info->invalid() ) {
+ *kr = KERN_FAILURE;
+ result = nullptr;
+ return;
+ }
- result = std::move(info);
- });
+ result = std::move(info);
+ });
- mach_vm_address_t aotImageArray = allImageInfo.aotInfoArray;
- // shortcircuit this code path if aotImageArray == 0 (32 vs 64 bit struct difference)
- // and if result == nullptr, since we need to append aot image infos to the process info struct
- if (aotImageArray != 0 && result != nullptr) {
- uint32_t aotImageCount = allImageInfo.aotInfoCount;
- size_t aotImageArraySize = aotImageCount * sizeof(dyld_aot_image_info_64);
-
- withRemoteBuffer(task, aotImageArray, aotImageArraySize, false, false, kr, ^(void *buffer, size_t size) {
- dyld_aot_image_info_64* imageArray = (dyld_aot_image_info_64*)buffer;
- for (uint32_t i = 0; i < aotImageCount; i++) {
- if (!result->addAotImage(imageArray[i])) {
- result = nullptr;
- return;
- }
+ mach_vm_address_t aotImageArray = allImageInfo.aotInfoArray;
+ // shortcircuit this code path if aotImageArray == 0 (32 vs 64 bit struct difference)
+ // and if result == nullptr, since we need to append aot image infos to the process info struct
+ if (aotImageArray != 0 && result != nullptr) {
+ uint32_t aotImageCount = allImageInfo.aotInfoCount;
+ size_t aotImageArraySize = aotImageCount * sizeof(dyld_aot_image_info_64);
+
+ withRemoteBuffer(task, aotImageArray, aotImageArraySize, false, kr, ^(void *buffer, size_t size) {
+ dyld_aot_image_info_64* imageArray = (dyld_aot_image_info_64*)buffer;
+ for (uint32_t i = 0; i < aotImageCount; i++) {
+ if (!result->addAotImage(imageArray[i])) {
+ *kr = KERN_FAILURE;
+ result = nullptr;
+ return;
}
- });
- }
-
- if (result) break;
+ }
+ });
}
-
return std::move(result);
}
if ( info.protection != (VM_PROT_READ|VM_PROT_EXECUTE) )
continue;
// read start of vm region to verify it is a mach header
- withRemoteObject(task, address, false, NULL, ^(mach_header_64 mhBuffer){
+ withRemoteObject(task, address, NULL, ^(mach_header_64 mhBuffer){
if ( (mhBuffer.magic != MH_MAGIC) && (mhBuffer.magic != MH_MAGIC_64) )
return;
// now know the region is the start of a mach-o file
const char* dyld_process_info_base::copyPath(task_t task, uint64_t stringAddressInTask)
{
__block const char* retval = "";
- withRemoteBuffer(task, stringAddressInTask, PATH_MAX, false, true, nullptr, ^(void *buffer, size_t size) {
+ withRemoteBuffer(task, stringAddressInTask, PATH_MAX, true, nullptr, ^(void *buffer, size_t size) {
retval = addString(static_cast<const char *>(buffer), size);
});
return retval;
__block bool done = false;
//Since the minimum we can reasonably map is a page, map that.
- withRemoteBuffer(task, remoteMH, PAGE_SIZE, false, false, &kr, ^(void * buffer, size_t size) {
+ withRemoteBuffer(task, remoteMH, PAGE_SIZE, false, &kr, ^(void * buffer, size_t size) {
const mach_header* mh = (const mach_header*)buffer;
headerPagesSize = sizeof(mach_header) + mh->sizeofcmds;
if (headerPagesSize <= PAGE_SIZE) {
if (kr != KERN_SUCCESS) {
return kr;
}
- withRemoteBuffer(task, remoteMH, headerPagesSize, false, false, &kr, ^(void * buffer, size_t size) {
+ withRemoteBuffer(task, remoteMH, headerPagesSize, false, &kr, ^(void * buffer, size_t size) {
addInfoFromLoadCommands((mach_header*)buffer, remoteMH, size);
});
}
}
//The kernel will return MACH_VM_MIN_ADDRESS for an executable that has not had dyld loaded
- if (task_dyld_info.all_image_info_addr == MACH_VM_MIN_ADDRESS)
+ if (task_dyld_info.all_image_info_addr == MACH_VM_MIN_ADDRESS) {
+ *kr = KERN_FAILURE;
return nullptr;
+ }
- // We use a true shared memory buffer here, that way by making sure that libdyld in both processes
- // reads and writes the the timestamp atomically we can make sure we get a coherent view of the
- // remote process.
- // That also means that we *MUST* directly read the memory, which is why we template the make() call
- withRemoteBuffer(task, task_dyld_info.all_image_info_addr, (size_t)task_dyld_info.all_image_info_size, true, false, kr, ^(void *buffer, size_t size) {
- dyld_process_info_ptr base;
- if (task_dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32 ) {
- const dyld_all_image_infos_32* info = (const dyld_all_image_infos_32*)buffer;
- base = dyld_process_info_base::make<dyld_all_image_infos_32, dyld_image_info_32>(task, *info, timestamp, kr);
- } else {
- const dyld_all_image_infos_64* info = (const dyld_all_image_infos_64*)buffer;
- base = dyld_process_info_base::make<dyld_all_image_infos_64, dyld_image_info_64>(task, *info, timestamp, kr);
- }
- if (base) {
- result = base.release();
- }
- });
+ for (auto i = 0; i < 10; ++i) {
+ withRemoteBuffer(task, task_dyld_info.all_image_info_addr, (size_t)task_dyld_info.all_image_info_size, false, kr, ^(void *buffer, size_t size) {
+ dyld_process_info_ptr base;
+ if (task_dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32 ) {
+ const dyld_all_image_infos_32* info = (const dyld_all_image_infos_32*)buffer;
+ base = dyld_process_info_base::make<dyld_all_image_infos_32, dyld_image_info_32>(task, *info, timestamp, kr);
+ } else {
+ const dyld_all_image_infos_64* info = (const dyld_all_image_infos_64*)buffer;
+ base = dyld_process_info_base::make<dyld_all_image_infos_64, dyld_image_info_64>(task, *info, timestamp, kr);
+ }
+ if (base) {
+ if (result) {
+ free((void*)result);
+ }
+ result = base.release();
+ }
+ });
+ if (kr == KERN_SUCCESS) { break; }
+ }
return result;
}
//FIXME: Refactor this out into a seperate file
struct VIS_HIDDEN RemoteBuffer {
RemoteBuffer();
- RemoteBuffer(task_t task, mach_vm_address_t remote_address, size_t remote_size, bool shared, bool allow_truncation);
+ RemoteBuffer(task_t task, mach_vm_address_t remote_address, size_t remote_size, bool allow_truncation);
RemoteBuffer& operator=(RemoteBuffer&& other);
~RemoteBuffer();
void *getLocalAddress() const;
kern_return_t getKernelReturn() const;
size_t getSize() const;
private:
- static std::pair<mach_vm_address_t, kern_return_t> map( task_t task, mach_vm_address_t remote_address,
- vm_size_t _size, bool shared);
- static std::tuple<mach_vm_address_t,vm_size_t,kern_return_t,bool>create( task_t task,
+ static std::pair<mach_vm_address_t, kern_return_t> map( task_t task, mach_vm_address_t remote_address, vm_size_t _size);
+ static std::tuple<mach_vm_address_t,vm_size_t,kern_return_t>create( task_t task,
mach_vm_address_t remote_address,
size_t remote_size,
- bool shared,
bool allow_truncation);
- RemoteBuffer(std::tuple<mach_vm_address_t,vm_size_t,kern_return_t,bool> T);
+ RemoteBuffer(std::tuple<mach_vm_address_t,vm_size_t,kern_return_t> T);
mach_vm_address_t _localAddress;
vm_size_t _size;
kern_return_t _kr;
- bool _shared;
};
// only called during libdyld set up
const struct mach_header* loadAddresses[],
const char* imagePaths[])) VIS_HIDDEN;
-void withRemoteBuffer(task_t task, mach_vm_address_t remote_address, size_t remote_size, bool shared, bool allow_truncation, kern_return_t *kr, void (^block)(void *buffer, size_t size)) __attribute__((visibility("hidden")));
+void withRemoteBuffer(task_t task, mach_vm_address_t remote_address, size_t remote_size, bool allow_truncation, kern_return_t *kr, void (^block)(void *buffer, size_t size)) __attribute__((visibility("hidden")));
template<typename T>
-VIS_HIDDEN void withRemoteObject(task_t task, mach_vm_address_t remote_address, bool shared, kern_return_t *kr, void (^block)(T t))
+VIS_HIDDEN void withRemoteObject(task_t task, mach_vm_address_t remote_address, kern_return_t *kr, void (^block)(T t))
{
- withRemoteBuffer(task, remote_address, sizeof(T), shared, false, kr, ^(void *buffer, size_t size) {
+ withRemoteBuffer(task, remote_address, sizeof(T), false, kr, ^(void *buffer, size_t size) {
block(*reinterpret_cast<T *>(buffer));
});
}
#include <mach-o/dyld_priv.h>
#include <mach-o/dyld_process_info.h>
#include <mach-o/dyld_images.h>
-
+#include <Block.h>
+#include <dlfcn.h>
#include "dyld_process_info_internal.h"
#include "Loading.h"
+#include "Tracing.h"
#include "AllImages.h"
extern "C" int _dyld_func_lookup(const char* name, void** address);
void retain();
void release();
- void setNotifyMain(NotifyMain notifyMain) const { _notifyMain = notifyMain; }
+ void setNotifyMain(NotifyMain notifyMain) const {
+ if (_notifyMain == notifyMain) { return; }
+ Block_release(_notifyMain);
+ _notifyMain = Block_copy(notifyMain);
+ }
// override new and delete so we don't need to link with libc++
static void* operator new(size_t sz) { return malloc(sz); }
private:
void handleEvent();
- void teardown();
+ void disconnect();
+ void teardownMachPorts();
void replyToMonitoredProcess(mach_msg_header_t& header);
+ kern_return_t task_dyld_process_info_notify_register(task_read_t target_task, mach_port_t notify);
+ kern_return_t task_dyld_process_info_notify_deregister(task_read_t target_task, mach_port_t notify);
+
RemoteBuffer _remoteAllImageInfoBuffer;
- uint32_t* _notifyMachPorts;
- uint32_t _notifySlot;
- mutable std::atomic<int32_t> _retainCount;
+ mutable std::atomic<uint32_t> _retainCount;
dispatch_queue_t _queue;
- Notify _notify;
- NotifyExit _notifyExit;
- mutable NotifyMain _notifyMain;
- task_t _targetTask;
- dispatch_source_t _machSource;
- mach_port_t _sendPortInTarget; // target is process being watched for image loading/unloading
- mach_port_t _receivePortInMonitor; // monitor is process being notified of image loading/unloading
- std::atomic<bool> _disabled;
+ mutable Notify _notify;
+ mutable NotifyExit _notifyExit;
+ mutable NotifyMain _notifyMain;
+ dispatch_source_t _machSource;
+ task_t _task;
+ mach_port_t _port; // monitor is process being notified of image loading/unloading
+ std::atomic<bool> _connected;
+#if TARGET_OS_SIMULATOR
+ uint32_t _portInTarget;
+#endif
};
+#if TARGET_OS_SIMULATOR
-dyld_process_info_notify_base::dyld_process_info_notify_base(dispatch_queue_t queue, Notify notify, NotifyExit notifyExit,
- task_t task, kern_return_t* kr) :
- _notifyMachPorts(nullptr), _notifySlot(0), _retainCount(1), _queue(queue), _notify(notify), _notifyExit(notifyExit),
- _notifyMain(nullptr), _targetTask(task), _machSource(nullptr), _sendPortInTarget(0), _receivePortInMonitor(0),
- _disabled(false)
-{
- assert(_disabled == false);
- dispatch_retain(_queue);
- // Allocate a port to listen on in this monitoring task
- mach_port_options_t options = { .flags = MPO_IMPORTANCE_RECEIVER | MPO_CONTEXT_AS_GUARD | MPO_STRICT,
- .mpl = { MACH_PORT_QLIMIT_DEFAULT }};
- if ((*kr = mach_port_construct(mach_task_self(), &options, (mach_port_context_t)this, &_receivePortInMonitor))) {
- teardown();
- return;
- }
- if (_targetTask == mach_task_self()) {
- _sendPortInTarget = _receivePortInMonitor;
- (void)mach_port_insert_right(_targetTask, _sendPortInTarget, _receivePortInMonitor, MACH_MSG_TYPE_MAKE_SEND);
- } else {
- // Insert a deadname right into the port to trigger notifications
- kern_return_t r = KERN_NAME_EXISTS;
- while (r == KERN_NAME_EXISTS) {
- _sendPortInTarget = MACH_PORT_NULL;
- //FIXME file radar
- r = mach_port_allocate(_targetTask, MACH_PORT_RIGHT_DEAD_NAME, &_sendPortInTarget);
- if (r != KERN_SUCCESS) {
- *kr = r;
- return;
- }
- (void)mach_port_deallocate(_targetTask, _sendPortInTarget);
- r = mach_port_insert_right(_targetTask, _sendPortInTarget, _receivePortInMonitor, MACH_MSG_TYPE_MAKE_SEND);
- }
- if (r != KERN_SUCCESS) {
- *kr = r;
- return;
- }
-
- // Notify us if the target dies
- mach_port_t previous = MACH_PORT_NULL;
- if ((*kr = mach_port_request_notification(_targetTask, _sendPortInTarget, MACH_NOTIFY_DEAD_NAME, 0, _receivePortInMonitor, MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous))) {
- (void)mach_port_deallocate(_targetTask, _sendPortInTarget);
- (void)mach_port_destruct(mach_task_self(), _receivePortInMonitor, 0, (mach_port_context_t)this);
- teardown();
- return;
- }
- // This is a new port, if there is a previous notifier attached then something is wrong... abort.
- if (previous != MACH_PORT_NULL) {
- (void)mach_port_deallocate(mach_task_self(), previous);
- (void)mach_port_deallocate(_targetTask, _sendPortInTarget);
- (void)mach_port_destruct(mach_task_self(), _receivePortInMonitor, 0, (mach_port_context_t)this);
- teardown();
- return;
- }
- }
-
- // Setup the event handler for the port
- _machSource = dispatch_source_create(DISPATCH_SOURCE_TYPE_MACH_RECV, _receivePortInMonitor, 0, _queue);
- if (_machSource == nullptr) {
- (void)mach_port_deallocate(_targetTask, _sendPortInTarget);
- (void)mach_port_destruct(mach_task_self(), _receivePortInMonitor, 0, (mach_port_context_t)this);
- teardown();
- return;
- }
- dispatch_source_set_event_handler(_machSource, ^{
- handleEvent();
- });
- dispatch_source_set_cancel_handler(_machSource, ^{
- if ( _receivePortInMonitor != 0 ) {
- (void)mach_port_destruct(mach_task_self(), _receivePortInMonitor, 0, (mach_port_context_t)this);
- _receivePortInMonitor = 0;
- }
- });
- dispatch_activate(_machSource);
-
- // get location on all_image_infos in the target task
+template<typename F>
+kern_return_t withRemotePortArray(task_t target_task, F f) {
+ // Get the all image info
task_dyld_info_data_t taskDyldInfo;
mach_msg_type_number_t taskDyldInfoCount = TASK_DYLD_INFO_COUNT;
- if ((*kr = task_info(_targetTask, TASK_DYLD_INFO, (task_info_t)&taskDyldInfo, &taskDyldInfoCount))) {
- (void)mach_port_deallocate(_targetTask, _sendPortInTarget);
- teardown();
- return;
+ auto kr = task_info(target_task, TASK_DYLD_INFO, (task_info_t)&taskDyldInfo, &taskDyldInfoCount);
+ if (kr != KERN_SUCCESS) {
+ return kr;
}
- // Poke the portname of our port into the target task
- _remoteAllImageInfoBuffer = RemoteBuffer(_targetTask, taskDyldInfo.all_image_info_addr, (size_t)taskDyldInfo.all_image_info_size, true, false);
- *kr = _remoteAllImageInfoBuffer.getKernelReturn();
- if (*kr) {
- (void)mach_port_deallocate(_targetTask, _sendPortInTarget);
- teardown();
- return;
+
+ vm_prot_t cur_protection = VM_PROT_NONE;
+ vm_prot_t max_protection = VM_PROT_NONE;
+ mach_vm_address_t localAddress = 0;
+ mach_vm_size_t size = sizeof(dyld_all_image_infos_64);
+ if ( taskDyldInfo.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32 ) {
+ size = sizeof(dyld_all_image_infos_32);
}
+ kr = mach_vm_remap(mach_task_self(),
+ &localAddress,
+ size,
+ 0, // mask
+ VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR| VM_FLAGS_RESILIENT_CODESIGN | VM_FLAGS_RESILIENT_MEDIA,
+ target_task,
+ taskDyldInfo.all_image_info_addr,
+ false,
+ &cur_protection,
+ &max_protection,
+ VM_INHERIT_NONE);
static_assert(sizeof(mach_port_t) == sizeof(uint32_t), "machport size not 32-bits");
+ uint32_t* notifyMachPorts;
if ( taskDyldInfo.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32 ) {
- _notifyMachPorts = (uint32_t *)((uint8_t *)_remoteAllImageInfoBuffer.getLocalAddress() + offsetof(dyld_all_image_infos_32,notifyMachPorts));
+ notifyMachPorts = (uint32_t *)((uint8_t *)localAddress + offsetof(dyld_all_image_infos_32,notifyMachPorts));
} else {
- _notifyMachPorts = (uint32_t *)((uint8_t *)_remoteAllImageInfoBuffer.getLocalAddress() + offsetof(dyld_all_image_infos_64,notifyMachPorts));
+ notifyMachPorts = (uint32_t *)((uint8_t *)localAddress + offsetof(dyld_all_image_infos_64,notifyMachPorts));
}
+ kr = f(notifyMachPorts);
+ (void)vm_deallocate(target_task, localAddress, size);
+ return kr;
+}
-#if 0
- //If all the slots are filled we will sleep and retry a few times before giving up
- for (uint32_t i=0; i<10; ++i) {
- for (_notifySlot=0; _notifySlot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; ++_notifySlot) {
- if (OSAtomicCompareAndSwap32(0, _sendPortInTarget, (volatile int32_t*)&_notifyMachPorts[_notifySlot])) {
- break;
+#endif
+
+kern_return_t dyld_process_info_notify_base::task_dyld_process_info_notify_register(task_t target_task, mach_port_t notify) {
+#if TARGET_OS_SIMULATOR
+ static dispatch_once_t onceToken;
+ static kern_return_t (*tdpinr)(task_t, mach_port_t) = nullptr;
+ dispatch_once(&onceToken, ^{
+ tdpinr = (kern_return_t (*)(task_t, mach_port_t))dlsym(RTLD_DEFAULT, "task_dyld_process_info_notify_register");
+ });
+ if (tdpinr) {
+ return tdpinr(target_task, notify);
+ }
+ // Our libsystem does not have task_dyld_process_info_notify_register, emulate
+ return withRemotePortArray(target_task, [this,target_task,notify](uint32_t* portArray){
+ mach_port_t portInTarget = MACH_PORT_NULL;
+ // Insert the right
+ kern_return_t kr = KERN_NAME_EXISTS;
+ while (kr == KERN_NAME_EXISTS) {
+ portInTarget = MACH_PORT_NULL;
+ kr = mach_port_allocate(target_task, MACH_PORT_RIGHT_DEAD_NAME, &portInTarget);
+ if (kr != KERN_SUCCESS) {
+ return kr;
}
+ (void)mach_port_deallocate(target_task, portInTarget);
+ kr = mach_port_insert_right(target_task, portInTarget, notify, MACH_MSG_TYPE_MAKE_SEND);
}
- if (_notifySlot == DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT) {
- // all the slots are filled, sleep and try again
- usleep(1000 * 50); // 50ms
- } else {
- // if _notifySlot is set we are done
- break;
+ // The call is not succesfull return
+ if (kr != KERN_SUCCESS) {
+ (void)mach_port_deallocate(target_task, portInTarget);
+ return kr;
}
- }
-#else
- for (_notifySlot=0; _notifySlot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; ++_notifySlot) {
- if (OSAtomicCompareAndSwap32(0, _sendPortInTarget, (volatile int32_t*)&_notifyMachPorts[_notifySlot])) {
- break;
+ // Find a slot for the right
+ for (uint8_t notifySlot=0; notifySlot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; ++notifySlot) {
+ if (OSAtomicCompareAndSwap32(0, portInTarget, (volatile int32_t*)&portArray[notifySlot])) {
+ _portInTarget = portInTarget;
+ return KERN_SUCCESS;
+ }
}
+ // The array was full, we need to fail
+ (void)mach_port_deallocate(target_task, portInTarget);
+ return KERN_UREFS_OVERFLOW;
+ });
+#else
+ return ::task_dyld_process_info_notify_register(target_task, notify);
+#endif
+}
+
+kern_return_t dyld_process_info_notify_base::task_dyld_process_info_notify_deregister(task_t target_task, mach_port_t notify) {
+#if TARGET_OS_SIMULATOR
+ static dispatch_once_t onceToken;
+ static kern_return_t (*tdpind)(task_t, mach_port_t) = nullptr;
+ dispatch_once(&onceToken, ^{
+ tdpind = (kern_return_t (*)(task_t, mach_port_t))dlsym(RTLD_DEFAULT, "task_dyld_process_info_notify_deregister");
+ });
+ if (tdpind) {
+ return tdpind(target_task, notify);
}
+ // Our libsystem does not have task_dyld_process_info_notify_deregister, emulate
+ return withRemotePortArray(target_task, [this](uint32_t* portArray){
+ // Find a slot for the right
+ for (uint8_t notifySlot=0; notifySlot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; ++notifySlot) {
+ if (OSAtomicCompareAndSwap32(0, _portInTarget, (volatile int32_t*)&portArray[notifySlot])) {
+ return KERN_SUCCESS;
+ }
+ }
+ return KERN_FAILURE;
+ });
+#else
+ // Our libsystem does not have task_dyld_process_info_notify_deregister, emulate
+ return ::task_dyld_process_info_notify_deregister(target_task, notify);
#endif
+}
- if (_notifySlot == DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT) {
- (void)mach_port_deallocate(_targetTask, _sendPortInTarget);
- teardown();
- *kr = KERN_UREFS_OVERFLOW;
+dyld_process_info_notify_base::dyld_process_info_notify_base(dispatch_queue_t queue, Notify notify, NotifyExit notifyExit,
+ task_t task, kern_return_t* kr) :
+ _retainCount(0), _queue(queue), _notify(Block_copy(notify)), _notifyExit(Block_copy(notifyExit)),
+ _notifyMain(nullptr), _machSource(nullptr), _task(task), _port(MACH_PORT_NULL), _connected(false)
+#if TARGET_OS_SIMULATOR
+ , _portInTarget(0)
+#endif
+{
+ assert(kr != NULL);
+ dispatch_retain(_queue);
+ // Allocate a port to listen on in this monitoring task
+ mach_port_options_t options = { .flags = MPO_IMPORTANCE_RECEIVER | MPO_CONTEXT_AS_GUARD | MPO_STRICT, .mpl = { MACH_PORT_QLIMIT_DEFAULT }};
+ *kr = mach_port_construct(mach_task_self(), &options, (mach_port_context_t)this, &_port);
+ if (*kr != KERN_SUCCESS) {
+ teardownMachPorts();
return;
}
- *kr = KERN_SUCCESS;
+ mach_port_t previous = MACH_PORT_NULL;
+ *kr = mach_port_request_notification(mach_task_self(), _port, MACH_NOTIFY_NO_SENDERS, 1, _port, MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous);
+ if ((*kr != KERN_SUCCESS) || previous != MACH_PORT_NULL) {
+ teardownMachPorts();
+ return;
+ }
+ //FIXME: Should we retry here if we fail?
+ *kr = task_dyld_process_info_notify_register(_task, _port);
+ dyld3::kdebug_trace_dyld_marker(DBG_DYLD_TASK_NOTIFY_REGISTER, (uint64_t)_task, (uint64_t)_port, *kr, 0);
+ if (*kr != KERN_SUCCESS) {
+ teardownMachPorts();
+ return;
+ }
+
+ // Setup the event handler for the port
+ _machSource = dispatch_source_create(DISPATCH_SOURCE_TYPE_MACH_RECV, _port, 0, _queue);
+ if (_machSource == nullptr) {
+ teardownMachPorts();
+ return;
+ }
+ dispatch_source_set_event_handler(_machSource, ^{ handleEvent(); });
+ dispatch_source_set_cancel_handler(_machSource, ^{ teardownMachPorts(); });
+ dispatch_activate(_machSource);
+ _connected = true;
}
dyld_process_info_notify_base::~dyld_process_info_notify_base() {
- if (!_disabled) {
- fprintf(stderr, "dyld: ~dyld_process_info_notify_base called while still enabled\n");
- }
+ if (_connected) { fprintf(stderr, "dyld: ~dyld_process_info_notify_base called while still connected\n"); }
+ Block_release(_notify);
+ Block_release(_notifyMain);
+ Block_release(_notifyExit);
dispatch_release(_queue);
}
-void dyld_process_info_notify_base::teardown() {
- if (!_disabled) {
- _disabled = true;
+void dyld_process_info_notify_base::teardownMachPorts() {
+ if ( _port != 0 ) {
+ kern_return_t kr = task_dyld_process_info_notify_deregister(_task, _port);
+ dyld3::kdebug_trace_dyld_marker(DBG_DYLD_TASK_NOTIFY_DEREGISTER, (uint64_t)_task, (uint64_t)_port, kr, 0);
+ (void)mach_port_destruct(mach_task_self(), _port, 0, (mach_port_context_t)this);
+ _port = 0;
+ }
+}
+
+void dyld_process_info_notify_base::disconnect() {
+ if (_connected) {
+ _connected = false;
// The connection to the target is dead. Clean up ports
- if ( _remoteAllImageInfoBuffer.getLocalAddress() != 0 && _notifySlot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT) {
- mach_port_t extractedPort = MACH_PORT_NULL;
- mach_msg_type_name_t extractedPortType;
- kern_return_t kr = mach_port_extract_right(_targetTask, _sendPortInTarget, MACH_MSG_TYPE_COPY_SEND, &extractedPort, &extractedPortType);
- if (kr == KERN_SUCCESS) {
- if (extractedPort == _receivePortInMonitor) {
- if (OSAtomicCompareAndSwap32(_sendPortInTarget, 0, (volatile int32_t*)&_notifyMachPorts[_notifySlot])) {
- (void)mach_port_deallocate(_targetTask, _sendPortInTarget);
- }
- }
- (void)mach_port_deallocate(mach_task_self(), extractedPort);
- }
- }
- _sendPortInTarget = 0;
if ( _machSource ) {
dispatch_source_cancel(_machSource);
dispatch_release(_machSource);
_machSource = NULL;
- }
+ }
if (_notifyExit) {
dispatch_async(_queue, ^{
+ // There was a not a mach source, so if we have any ports they will not get torn down by its cancel handler
_notifyExit();
});
}
bool dyld_process_info_notify_base::enabled() const
{
- return !_disabled;
+ return _connected;
}
void dyld_process_info_notify_base::retain()
{
- _retainCount++;
+ _retainCount.fetch_add(1, std::memory_order_relaxed);
}
void dyld_process_info_notify_base::release()
{
- uint32_t newCount = --_retainCount;
-
- if ( newCount == 0 ) {
- teardown();
+ if (_retainCount.fetch_sub(1, std::memory_order_acq_rel) == 0) {
+ // When we subtracted the ref count was 0, which means it was the last reference
+ disconnect();
+ dispatch_async(_queue, ^{
+ delete this;
+ });
}
- dispatch_async(_queue, ^{
- delete this;
- });
}
void dyld_process_info_notify_base::replyToMonitoredProcess(mach_msg_header_t& header) {
if (r == KERN_SUCCESS) {
header.msgh_remote_port = MACH_PORT_NULL;
} else {
- teardown();
+ disconnect();
}
}
void dyld_process_info_notify_base::handleEvent() {
// References object may still exist even after the ports are dead. Disable event dispatching
// if the ports have been torn down.
- if (_disabled) {
- return;
- }
+ if (!_connected) { return; }
+
// This event handler block has an implicit reference to "this"
// if incrementing the count goes to one, that means the object may have already been destroyed
uint8_t messageBuffer[DYLD_PROCESS_INFO_NOTIFY_MAX_BUFFER_SIZE] = {};
mach_msg_header_t* h = (mach_msg_header_t*)messageBuffer;
- kern_return_t r = mach_msg(h, MACH_RCV_MSG | MACH_RCV_VOUCHER| MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AUDIT) | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0), 0, sizeof(messageBuffer)-sizeof(mach_msg_audit_trailer_t), _receivePortInMonitor, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+ kern_return_t r = mach_msg(h, MACH_RCV_MSG | MACH_RCV_VOUCHER| MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AUDIT) | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0), 0, sizeof(messageBuffer)-sizeof(mach_msg_audit_trailer_t), _port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
if ( r == KERN_SUCCESS && !(h->msgh_bits & MACH_MSGH_BITS_COMPLEX)) {
//fprintf(stderr, "received message id=0x%X, size=%d\n", h->msgh_id, h->msgh_size);
//fprintf(stderr, "Notifying about: %s\n", stringPool + entries[i].pathStringOffset);
_notify(isUnload, header->timestamp, entries[i].loadAddress, entries[i].uuid, stringPool + entries[i].pathStringOffset);
} else {
- teardown();
+ disconnect();
break;
}
}
// reply to dyld, so it can continue
replyToMonitoredProcess(*h);
} else {
- teardown();
+ disconnect();
}
}
else if ( h->msgh_id == DYLD_PROCESS_INFO_NOTIFY_MAIN_ID ) {
if (h->msgh_size != sizeof(mach_msg_header_t)) {
- teardown();
+ disconnect();
} else if ( _notifyMain != NULL ) {
_notifyMain();
}
replyToMonitoredProcess(*h);
- } else if ( h->msgh_id == MACH_NOTIFY_PORT_DELETED ) {
- mach_port_t deadPort = ((mach_port_deleted_notification_t *)h)->not_port;
+ } else if ( h->msgh_id == MACH_NOTIFY_NO_SENDERS ) {
// Validate this notification came from the kernel
const mach_msg_audit_trailer_t *audit_tlr = (mach_msg_audit_trailer_t *)((uint8_t *)h + round_msg(h->msgh_size));
if (audit_tlr->msgh_trailer_type == MACH_MSG_TRAILER_FORMAT_0
&& audit_tlr->msgh_trailer_size >= sizeof(mach_msg_audit_trailer_t)
// We cannot link to libbsm, so we are hardcoding the audit token offset (5)
// And the value the represents the kernel (0)
- && audit_tlr->msgh_audit.val[5] == 0
- && deadPort == _sendPortInTarget ) {
- teardown();
+ && audit_tlr->msgh_audit.val[5] == 0) {
+ disconnect();
}
}
else {
fprintf(stderr, "dyld: received unknown message id=0x%X, size=%d\n", h->msgh_id, h->msgh_size);
}
+ } else {
+ fprintf(stderr, "dyld: received unknown message id=0x%X, size=%d\n", h->msgh_id, h->msgh_size);
}
mach_msg_destroy(h);
}
#include <stdio.h>
#include <mach/mach.h>
#include <mach/mach_time.h>
+#include <mach/mach_traps.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <sys/stat.h>
return KERN_NOT_SUPPORTED;
}
+kern_return_t task_dyld_process_info_notify_get( mach_port_name_array_t names_addr, mach_msg_type_number_t *names_count_addr) {
+ if ( gSyscallHelpers->version >= 14 ) {
+ return gSyscallHelpers->task_dyld_process_info_notify_get(names_addr, names_count_addr);
+ }
+ struct dyld_all_image_infos* imageInfo = (struct dyld_all_image_infos*)(gSyscallHelpers->getProcessInfo());
+ for (int slot=0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; ++slot) {
+ if ( imageInfo->notifyPorts[slot] != 0 ) {
+ // Bump the refs
+ (void)mach_port_mod_refs(mach_task_self(), imageInfo->notifyPorts[slot], MACH_PORT_RIGHT_SEND, 1);
+ }
+ }
+
+ return KERN_NOT_SUPPORTED;
+}
+
void abort_with_payload(uint32_t reason_namespace, uint64_t reason_code, void* payload, uint32_t payload_size, const char* reason_string, uint64_t reason_flags)
{
if ( gSyscallHelpers->version >= 6 )
gSyscallHelpers->notifyMonitoringDyld(unloading, imageCount, loadAddresses, imagePaths);
return;
}
-#if SUPPORT_HOST_10_11
- findHostFunctions();
- for (int slot=0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; ++slot) {
- notifyMonitoringDyld(unloading, slot, imageCount, loadAddresses, imagePaths);
- }
-#endif
}
int* __error(void) {
#if ! TARGET_OS_SIMULATOR
#include <mach-o/dyld_process_info.h>
+ // <rdar://problem/69456906> dyld should mark _dyld_debugger_notification `noinline`
+ __attribute__ ((noinline))
void _dyld_debugger_notification(enum dyld_notify_mode mode, unsigned long count, uint64_t machHeaders[])
{
// Do nothing. This exists for the debugger to set a break point on to see what images have been loaded or unloaded.
ret lr
LlazyAllocate:
+#if __has_feature(ptrauth_returns)
+ pacibsp
+#endif
stp fp, lr, [sp, #-16]!
mov fp, sp
sub sp, sp, #288
mov sp, fp
ldp fp, lr, [sp], #16
- ret lr
+#if __has_feature(ptrauth_returns)
+ retab
+#else
+ ret
+#endif
#endif
target = ninja.findTarget(args[1])
target.addVariable("entitlements", "--entitlements $SRCROOT/testing/get_task_allow_entitlement.plist")
elif args[0] == "$TASK_FOR_PID_ENABLE":
- if platform != "macos":
- target = ninja.findTarget(args[1])
- target.addVariable("entitlements", "--entitlements $SRCROOT/testing/task_for_pid_entitlement.plist")
+ target = ninja.findTarget(args[1])
+ target.addVariable("entitlements", "--entitlements $SRCROOT/testing/task_read_for_pid_entitlement.plist")
elif args[0] in ["$CC", "$CXX"]:
tool = args[0][1:].lower()
sources = []
if platforms and platform not in platforms: return -1, archs, foundPlatform
effectiveArchs = list(set(archs) & set(restrictedArchs))
if effectiveArchs: return idx + len(directive) + len(match.group()), effectiveArchs, foundPlatform
- return len(line), archs, foundPlatform
+ return line.find(':')+1, archs, foundPlatform
return -1, archs, False
if __name__ == "__main__":
void set_exit_handler(_dyld_test_exit_handler_t EH);
void set_crash_handler(_dyld_test_crash_handler_t CH);
void set_launch_suspended(bool S);
- void set_launch_async(bool S);
void set_launch_arch(cpu_type_t A);
pid_t launch();
void *operator new(size_t size);
--- /dev/null
+
+__attribute__((section(("__RWX, __data"))))
+int data = 1;
+
+int _start() {
+ return data;
+}
\ No newline at end of file
--- /dev/null
+#!/usr/bin/python2.7
+
+import os
+import KernelCollection
+
+# Check errors from canBePlacedInKernelCollection()
+# All arm64* binaries cannot use RWX permissions
+
+def check(kernel_cache):
+ kernel_cache.buildKernelCollection("arm64", "/rwx-permissions-error/main.kc", "/rwx-permissions-error/main.kernel", "", [], ["-json-errors"])
+ assert len(kernel_cache.dictionary()) == 1
+ # kernel
+ assert kernel_cache.dictionary()[0]["id"] == "com.apple.kernel"
+ assert len(kernel_cache.dictionary()[0]["errors"]) == 1
+ assert kernel_cache.dictionary()[0]["errors"][0] == "cannot be placed in kernel collection because: Segments are not allowed to be both writable and executable"
+
+
+# [~]> xcrun -sdk iphoneos cc -arch arm64 -Wl,-static -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-rename_section,__TEXT,__text,__TEXT_EXEC,__text -Wl,-e,__start -Wl,-pagezero_size,0x0 -Wl,-pie -segprot __RWX rwx rwx main.c -o main.kernel
+
Console,
XCTest
};
+ void emitBegin();
void runLeaks();
void dumpLogs();
void getLogsString(char** buffer);
}
_process::_process() : executablePath(nullptr), args(nullptr), env(nullptr), stdoutHandler(nullptr), stderrHandler(nullptr),
- crashHandler(nullptr), exitHandler(nullptr), pid(0), arch(currentArch), suspended(false), async(false) {}
+ crashHandler(nullptr), exitHandler(nullptr), pid(0), arch(currentArch), suspended(false) {}
_process::~_process() {
if (stdoutHandler) { Block_release(stdoutHandler);}
if (stderrHandler) { Block_release(stderrHandler);}
void _process::set_exit_handler(_dyld_test_exit_handler_t EH) { exitHandler = Block_copy(EH); }
void _process::set_crash_handler(_dyld_test_crash_handler_t CH) { crashHandler = Block_copy(CH); }
void _process::set_launch_suspended(bool S) { suspended = S; }
-void _process::set_launch_async(bool S) { async = S; }
void _process::set_launch_arch(cpu_type_t A) { arch = A; }
pid_t _process::launch() {
dispatch_queue_t queue = dispatch_queue_create("com.apple.dyld.test.launch", NULL);
- dispatch_block_t oneShotSemaphoreBlock = dispatch_block_create(DISPATCH_BLOCK_INHERIT_QOS_CLASS, ^{});
posix_spawn_file_actions_t fileActions = NULL;
posix_spawnattr_t attr = NULL;
dispatch_source_t stdoutSource = NULL;
if (stderrSource) {
dispatch_source_cancel(stderrSource);
}
- oneShotSemaphoreBlock();
dispatch_source_cancel(exitSource);
});
dispatch_resume(exitSource);
if (!suspended) {
kill(pid, SIGCONT);
}
- if (!async) {
- dispatch_block_wait(oneShotSemaphoreBlock, DISPATCH_TIME_FOREVER);
- }
- Block_release(oneShotSemaphoreBlock);
dispatch_release(queue);
return pid;
}
}
}
});
+}
+
+void TestState::emitBegin() {
if (output == BATS) {
printf("[BEGIN]");
if (checkForLeaks) {
if(!state->compare_exchange_strong(expected, newState)) {
newState->~TestState();
free(temp);
+ } else {
+ newState->emitBegin();
}
}
sState.store(*state);
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
-<dict>
- <key>com.apple.system-task-ports</key>
- <true/>
- <key>task_for_pid-allow</key>
- <true/>
-</dict>
-</plist>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>com.apple.system-task-ports.read</key>
+ <true/>
+</dict>
+</plist>
extern struct mach_header __dso_handle;
+int patatino(void) {
+ return 666;
+}
+
int main(int argc, const char* argv[], const char* envp[], const char* apple[]) {
NSSymbol sym = NSLookupSymbolInImage(&__dso_handle, "_main", NSLOOKUPSYMBOLINIMAGE_OPTION_RETURN_ON_ERROR);
if ( sym == NULL ) {
FAIL("address returned %p is not &main=%p", mainAddr, &main);
}
+ NSSymbol sym2 = NSLookupSymbolInImage(&__dso_handle, "_patatino", NSLOOKUPSYMBOLINIMAGE_OPTION_RETURN_ON_ERROR);
+ if ( sym2 == NULL ) {
+ FAIL("cant' find patatino");
+ }
+ void* funcAddr = NSAddressOfSymbol(sym2);
+ if ( funcAddr == NULL ) {
+ FAIL("address returned for patatino is NULL");
+ }
+ // This returns a signed pointer, so we want to make sure we can call it without crashing.
+ int (*func_ptr)(void) = funcAddr;
+ int result = (*func_ptr)();
+ if ( result != 666 ) {
+ FAIL("can't call the function correctly");
+ }
+
// verify NULL works
if ( NSAddressOfSymbol(NULL) != NULL ) {
FAIL("NULL not handle");
}
// foo can't see libCarbon.dylib as it's name isn't correct
- void* handle2 = dlopen("libCarbon.dylib", RTLD_LAZY);
- if ( handle2 != NULL ) {
+ void* handle3 = dlopen("libCarbon.dylib", RTLD_LAZY);
+ if ( handle3 != NULL ) {
FAIL("dlopen-jna, libfoo should not be able to dlopen()");
}
}
}
// jna should see libCarbon.dylib as it's name is correct
- void* handle2 = dlopen("libCarbon.dylib", RTLD_LAZY);
- if ( handle2 == NULL ) {
+ void* handle3 = dlopen("libCarbon.dylib", RTLD_LAZY);
+ if ( handle3 == NULL ) {
FAIL("dlopen-jna, libjna not be able to dlopen(): %s", dlerror());
}
}
#include <spawn.h>
#include <errno.h>
#include <sys/uio.h>
+#include <sys/proc.h>
#include <sys/wait.h>
#include <sys/types.h>
#include <mach/mach.h>
{
kern_return_t result;
dyld_process_info info = _dyld_process_info_create(task, 0, &result);
+ LOG("_dyld_process_info_create(): return(%u), info(0x%llx)", result, (uint64_t)info);
+
if (result != KERN_SUCCESS) {
- FAIL("dyld_process_info() should succeed");
+ FAIL("_dyld_process_info_create() should succeed");
}
if (info == NULL) {
- FAIL("dyld_process_info(task, 0) alwats return a value");
+ FAIL("_dyld_process_info_create(task, 0) alwats return a value");
}
dyld_process_state_info stateInfo;
_dyld_process_info_release(info);
}
+
+
#if __x86_64__
cpu_type_t otherArch[] = { CPU_TYPE_I386 };
#elif __i386__
_process process;
process.set_executable_path(RUN_DIR "/linksWithCF.exe");
process.set_launch_suspended(launchSuspended);
- process.set_launch_async(true);
if (forceIOSMac) {
LOG("Launching native");
const char* env[] = { "TEST_OUTPUT=None", "DYLD_FORCE_PLATFORM=6", NULL};
LOG("launchTest pid: %d", pid);
task_t task;
- if (task_for_pid(mach_task_self(), pid, &task) != KERN_SUCCESS) {
- FAIL("task_for_pid() failed");
+ kern_return_t kr = task_read_for_pid(mach_task_self(), pid, &task);
+ LOG("task_read_for_pid(mach_task_self(): return(%u), task(%u)", kr, task);
+ if (kr != KERN_SUCCESS) {
+ FAIL("task_read_for_pid() failed");
}
- LOG("launchTest task: %u", task);
// wait until process is up and has suspended itself
if (!launchSuspended) {
int main(int argc, const char* argv[], const char* envp[], const char* apple[]) {
signal(SIGUSR1, SIG_IGN);
- TIMEOUT(120);
launchTest(false, false, false);
launchTest(false, true, false);
#if __MAC_OS_X_VERSION_MIN_REQUIRED
});
dispatch_main();
}
+
+++ /dev/null
-
-// BUILD: $CC target.c -o $BUILD_DIR/target.exe -DRUN_DIR="$RUN_DIR"
-// BUILD: $CC foo.c -o $BUILD_DIR/libfoo.dylib -dynamiclib
-// BUILD: $CXX main.cpp -o $BUILD_DIR/dyld_process_info_notify.exe -DRUN_DIR="$RUN_DIR"
-// BUILD: $TASK_FOR_PID_ENABLE $BUILD_DIR/dyld_process_info_notify.exe
-
-// RUN_TIMEOUT: 2400
-// XFAIL: $SUDO ./dyld_process_info_notify.exe $RUN_DIR/target.exe
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <dlfcn.h>
-#include <unistd.h>
-#include <signal.h>
-#include <spawn.h>
-#include <errno.h>
-#include <mach/mach.h>
-#include <mach/machine.h>
-#include <mach-o/dyld_process_info.h>
-#include <dispatch/dispatch.h>
-#include <Availability.h>
-
-#include "test_support.h"
-
-extern char** environ;
-
-void launchTest(bool launchSuspended, bool disconnectEarly)
-{
- LOG("launchTest (%s)", launchSuspended ? "suspended" : "unsuspened");
- dispatch_queue_t queue = dispatch_queue_create("com.apple.dyld.test.dyld_process_info", NULL);
- // We do this instead of using a dispatch_semaphore to prevent priority inversions
- dispatch_block_t taskDone = dispatch_block_create(DISPATCH_BLOCK_INHERIT_QOS_CLASS, ^{});
- dispatch_block_t taskStarted = dispatch_block_create(DISPATCH_BLOCK_INHERIT_QOS_CLASS, ^{});
- pid_t pid;
-
- task_t task;
- char subTestNameBuffer[256];
- char *subTestName = &subTestNameBuffer[0];
- __block bool sawMainExecutable = false;
- __block bool sawlibSystem = false;
- __block bool gotTerminationNotice = false;
- __block bool gotEarlyNotice = false;
- __block bool gotMainNotice = false;
- __block bool gotMainNoticeBeforeAllInitialDylibs = false;
- __block bool gotFooNoticeBeforeMain = false;
-
- __block int libFooLoadCount = 0;
- __block int libFooUnloadCount = 0;
- __block dyld_process_info_notify handle;
-
- _process process;
- process.set_executable(RUN_DIR "/target.exe");
- const char* env[] = { "TEST_OUTPUT=None", NULL};
- process.set_env(env);
- process.set_launch_suspended(launchSuspended);
- if (!launchSuspended) {
- const char* args[] = {"suspend-in-main", NULL};
- _process_config_set_args(process, args);
- _process_config_set_stderr_handler(process, ^(int fd) {
- dispatch_semaphore_signal(taskStarted);
- });
- _process_config_set_exit_handler(process, ^(pid_t pid) {
- LOG("DIED (pid: %d)", pid);
- });
- }
- pid = process.launch(queue);
-
- if (!launchSuspended && dispatch_semaphore_wait(taskStarted, dispatch_time(DISPATCH_TIME_NOW, 5LL * NSEC_PER_SEC)) != 0) {
- FAIL("Child launch timeout");
- }
-#if 1
- snprintf(&subTestNameBuffer[0], 256, "%s (arch: %d)", launchSuspended ? "launch suspended" : "launch suspend-in-main", currentArch);
-
- if ( task_for_pid(mach_task_self(), pid, &task) != KERN_SUCCESS ) {
- FAIL("task_for_pid()");
- }
-
- kern_return_t kr;
- unsigned count = 0;
- do {
- handle = _dyld_process_info_notify(task, queue,
- ^(bool unload, uint64_t timestamp, uint64_t machHeader, const uuid_t uuid, const char* path) {
- if ( strstr(path, "/target.exe") != NULL )
- sawMainExecutable = true;
- if ( strstr(path, "/libSystem") != NULL )
- sawlibSystem = true;
- if ( strstr(path, "/libfoo.dylib") != NULL ) {
- if ( !gotMainNotice ) {
- gotFooNoticeBeforeMain = true;
- }
- if ( unload ) {
- ++libFooUnloadCount;
- } else {
- ++libFooLoadCount;
- }
- if ( disconnectEarly ) {
- LOG("EARLY DISCONNECT");
- gotEarlyNotice = true;
- dispatch_semaphore_signal(taskDone);
- }
- }
- },
- ^{
- LOG("TERMINATED (pid: %d)", pid);
- gotTerminationNotice = true;
- dispatch_semaphore_signal(taskDone);
- },
- &kr);
- ++count;
- if ( handle == NULL )
- LOG("_dyld_process_info_notify() returned NULL, result=%d, count=%d", kr, count);
- } while ( (handle == NULL) && (count < 5) );
-
- if ( handle == NULL ) {
- FAIL("%s: did not not get handle", subTestName);
- }
-
- if (launchSuspended) {
- // If the process starts suspended register for main(),
- // otherwise skip since this test is a race between
- // process setup and notification registration
- _dyld_process_info_notify_main(handle, ^{
- LOG("target entering main()");
- gotMainNotice = true;
- if ( !sawMainExecutable || !sawlibSystem )
- gotMainNoticeBeforeAllInitialDylibs = true;
- });
- kill(pid, SIGCONT);
- LOG("Sent SIGCONT");
- } else {
- kill(pid, SIGUSR1);
- LOG("Sent SIGUSR1");
- }
-
- // block waiting for notification that target has exited
- if (dispatch_semaphore_wait(taskDone, dispatch_time(DISPATCH_TIME_NOW, 10LL * NSEC_PER_SEC)) != 0) {
- FAIL("%s: did not get exit signal", subTestName);
- }
-
-// dispatch_release(taskDone);
-// dispatch_release(queue);
-// _dyld_process_info_notify_release(handle);
-
- // Do not run any tests associated with startup unless the kernel suspended us
- // before main()
- if (launchSuspended) {
- if ( !sawMainExecutable ) {
- FAIL("%s: did not get load notification of main executable", subTestName);
- }
-
- if ( !gotMainNotice ) {
- FAIL("%s: did not get notification of main()", subTestName);
- }
-
- if ( gotMainNoticeBeforeAllInitialDylibs ) {
- FAIL("%s: notification of main() arrived before all initial dylibs", subTestName);
- }
-
- if ( gotFooNoticeBeforeMain ) {
- FAIL("%s: notification of main() arrived after libfoo load notice", subTestName);
- }
-
- if ( !sawlibSystem ) {
- FAIL("%s: did not get load notification of libSystem", subTestName);
- }
- }
-
- if ( disconnectEarly ) {
- if ( libFooLoadCount != 1 ) {
- FAIL("%s: got %d load notifications about libFoo instead of 1", subTestName, libFooLoadCount);
- }
- if ( libFooUnloadCount != 0 ) {
- FAIL("%s: got %d unload notifications about libFoo instead of 1", subTestName, libFooUnloadCount);
- }
- }
- else {
- if ( libFooLoadCount != 3 ) {
- FAIL("%s: got %d load notifications about libFoo instead of 3", subTestName, libFooLoadCount);
- }
- if ( libFooUnloadCount != 3 ) {
- FAIL("%s: got %d unload notifications about libFoo instead of 3", subTestName, libFooUnloadCount);
- }
- }
-#endif
-}
-
-#if 0
-static void validateMaxNotifies(struct task_and_pid tp)
-{
- dispatch_queue_t serviceQueue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, 0);
- dyld_process_info_notify handles[10];
- // This loop goes through 10 iterations
- // i = 0..7 Should succeed
- // i = 8 Should fail, but trigger a release that frees up a slot
- // i = 9 Should succeed
- for (int i=0; i < 10; ++i) {
- kern_return_t kr;
- handles[i] = _dyld_process_info_notify(tp.task, serviceQueue,
- ^(bool unload, uint64_t timestamp, uint64_t machHeader, const uuid_t uuid, const char* path) {
- LOG("unload=%d, 0x%012llX <%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X> %s",
- unload, machHeader, uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7],
- uuid[8], uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15], path);
- },
- ^{
- LOG("target exited");
- },
- &kr);
- if ( handles[i] == NULL ) {
- if ( i == 8 ) {
- // expected failure, because only 8 simultaneous connections allowed
- // release one and try again
- _dyld_process_info_notify_release(handles[4]);
- handles[4] = NULL;
- }
- else {
- LOG("_dyld_process_info_notify() returned NULL and kern_result=%d, on count=%d", kr, i);
- killTest(tp);
- exit(0);
- }
- }
- }
- // release all
- for (int i=0; i < 10; ++i) {
- if ( handles[i] != NULL ) {
- _dyld_process_info_notify_release(handles[i]);
- }
- }
- dispatch_release(serviceQueue);
-}
-#endif
-
-static void testSelfAttach(void) {
- LOG("7");
- __block bool dylibLoadNotified = false;
- kern_return_t kr = KERN_SUCCESS;
- dispatch_queue_t queue = dispatch_queue_create("com.apple.dyld.test.dyld_process_info.self-attach", NULL);
- LOG("7.5");
- dyld_process_info_notify handle = _dyld_process_info_notify(mach_task_self(), queue,
- ^(bool unload, uint64_t timestamp, uint64_t machHeader, const uuid_t uuid, const char* path) {
- if ( strstr(path, "/libfoo.dylib") != NULL ) {
- dylibLoadNotified = true;
- }
- },
- ^{},
- &kr);
- LOG("8");
- if ( handle == NULL ) {
- LOG("_dyld_process_info_notify() returned NULL, result=%d", kr);
- }
- LOG("8.5");
- void* h = dlopen(RUN_DIR "/libfoo.dylib", 0);
- LOG("8.75");
- dlclose(h);
- if (!dylibLoadNotified) {
- FAIL("testSelfAttach");
- }
- LOG("9");
-}
-
-int main(int argc, const char* argv[], const char* envp[], const char* apple[]) {
-
- // test 1) launch test program suspended in same arch as this program
- launchTest(true, false);
-
- // test 2) launch test program in same arch as this program where it sleeps itself
- launchTest(false, false);
-// validateMaxNotifies(child);
-
- // test 3) launch test program where we disconnect from it after first dlopen
- launchTest(true, true);
-// monitor("disconnect", child, true, false);
-
- // test 4) attempt to monitor the monitoring process
-// testSelfAttach();
- PASS("Success");
-
-}
--- /dev/null
+
+// BUILD: $CC target.c -o $BUILD_DIR/target.exe -DRUN_DIR="$RUN_DIR"
+// BUILD: $CC foo.c -o $BUILD_DIR/libfoo.dylib -dynamiclib
+// BUILD: $CXX main.mm -o $BUILD_DIR/dyld_process_info_notify.exe -DRUN_DIR="$RUN_DIR"
+// BUILD: $TASK_FOR_PID_ENABLE $BUILD_DIR/dyld_process_info_notify.exe
+
+// RUN: $SUDO ./dyld_process_info_notify.exe
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <dlfcn.h>
+#include <unistd.h>
+#include <signal.h>
+#include <spawn.h>
+#include <errno.h>
+#include <libgen.h>
+#include <sys/proc.h>
+#include <mach/mach.h>
+#include <sys/param.h>
+#include <mach/machine.h>
+#include <mach-o/dyld_images.h>
+#include <mach-o/dyld_process_info.h>
+#include <dispatch/dispatch.h>
+#include <Availability.h>
+
+#include "test_support.h"
+
+//FIXME: We need to add some concurrent access tests
+//FIXME: Add cross architecture tests back now that arm64e macOS exists
+
+extern char** environ;
+
+// This is a one shot semaphore implementation that is QoS aware with integreated logging
+struct OneShotSemaphore {
+ OneShotSemaphore(const char* N) :_name(strdup(N)), _block(dispatch_block_create(DISPATCH_BLOCK_INHERIT_QOS_CLASS, ^{})) {}
+ bool wait() {
+ LOG("Waiting for semaphore %s", _name);
+ dispatch_time_t tenSecondFromNow = dispatch_time(DISPATCH_WALLTIME_NOW, 10 * NSEC_PER_SEC);
+ if (dispatch_block_wait(_block, tenSecondFromNow) != 0) {
+ LOG("Timeout for semaphore %s", _name);
+ return false;
+ }
+ return true;
+ }
+ void signal() {
+ LOG("Signalling semaphore %s", _name);
+ _block();
+ }
+private:
+ const char* _name;
+ dispatch_block_t _block;
+};
+
+void launchTest(bool launchSuspended, bool disconnectEarly)
+{
+
+ LOG("launchTest (%s)", launchSuspended ? "suspended" : "unsuspened");
+ LOG("launchTest (%s)", disconnectEarly ? "disconnect early" : "normal disconnnect");
+ dispatch_queue_t queue = dispatch_queue_create("com.apple.dyld.test.dyld_process_info", NULL);
+ dispatch_queue_t signalQueue = dispatch_queue_create("com.apple.dyld.test.dyld_process_info.signals", NULL);
+
+ // We use these blocks as semaphores. We do it this way so have ownership for QOS and so we get logging
+ __block OneShotSemaphore childReady("childReady");
+ __block OneShotSemaphore childExit("childExit");
+ __block OneShotSemaphore childDone("childDone");
+ __block OneShotSemaphore childExitNotification("childExitNotification");
+
+ // We control our interactions with the sub ordinate process via signals, but if we send signals before its signal handlers
+ // are installed it will terminate. We wait for it to SIGUSR1 us to indicate it is ready, so we need to setup a signal handler for
+ // that.
+ signal(SIGUSR1, SIG_IGN);
+ dispatch_source_t usr1SignalSource = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, signalQueue);
+ dispatch_source_set_event_handler(usr1SignalSource, ^{
+ LOG("Got SIGUSR1");
+ childReady.signal();
+ });
+ dispatch_resume(usr1SignalSource);
+
+ signal(SIGUSR2, SIG_IGN);
+ dispatch_source_t usr2SignalSource = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR2, 0, signalQueue);
+ dispatch_source_set_event_handler(usr2SignalSource, ^{
+ LOG("Got SIGUSR2");
+ childDone.signal();
+ });
+ dispatch_resume(usr2SignalSource);
+
+ pid_t pid;
+ task_t task;
+ __block bool sawMainExecutable = false;
+ __block bool sawlibSystem = false;
+ __block bool gotMainNotice = false;
+ __block bool gotMainNoticeBeforeAllInitialDylibs = false;
+ __block bool gotFooNoticeBeforeMain = false;
+ __block int libFooLoadCount = 0;
+ __block int libFooUnloadCount = 0;
+ __block dyld_process_info_notify handle;
+
+ _process process;
+ process.set_executable_path(RUN_DIR "/target.exe");
+ const char* env[] = { "TEST_OUTPUT=None", NULL};
+ process.set_env(env);
+ process.set_launch_suspended(launchSuspended);
+ process.set_exit_handler(^(pid_t pid) {
+ // This is almost all logging code, the only functional element of it
+ // is calling the childExit() semaphore
+ int status = 0;
+ int dispStatus = 0;
+ (void)waitpid(pid, &status, 0);
+ const char* exitType = "UNKNOWN";
+ if (WIFEXITED(status)) {
+ exitType = "exit()";
+ dispStatus = WEXITSTATUS(status);
+ }
+ if (WIFSIGNALED(status)) {
+ exitType = "signal";
+ dispStatus = WTERMSIG(status);
+ }
+ LOG("DIED via %s (pid: %d, status: %d)", exitType, pid, dispStatus);
+ childExit.signal();
+ });
+
+ // Launch process
+ pid = process.launch();
+ LOG("launchTest pid (%u)", pid);
+ if ( task_read_for_pid(mach_task_self(), pid, &task) != KERN_SUCCESS ) {
+ FAIL("task_read_for_pid()");
+ }
+
+ // Attach notifier
+ kern_return_t kr;
+ unsigned count = 0;
+ do {
+ handle = _dyld_process_info_notify( task, queue,
+ ^(bool unload, uint64_t timestamp, uint64_t machHeader, const uuid_t uuid, const char* path) {
+ LOG("Handler called");
+ if ( strstr(path, "/target.exe") != NULL )
+ sawMainExecutable = true;
+ if ( strstr(path, "/libSystem") != NULL )
+ sawlibSystem = true;
+ if ( strstr(path, "/libfoo.dylib") != NULL ) {
+ if ( !gotMainNotice ) {
+ gotFooNoticeBeforeMain = true;
+ }
+ if ( unload ) {
+ ++libFooUnloadCount;
+ } else {
+ ++libFooLoadCount;
+ if (disconnectEarly) {
+ _dyld_process_info_notify_release(handle);
+ }
+ }
+ }
+ },
+ ^{
+ LOG("TERMINATED (pid: %d)", pid);
+ childExitNotification.signal();
+ },
+ &kr);
+ ++count;
+ if ( handle == NULL )
+ LOG("_dyld_process_info_notify() returned NULL, result=%d, count=%d", kr, count);
+ } while ( (handle == NULL) && (count < 5) );
+ LOG("launchTest handler registered");
+
+ if ( handle == NULL ) {
+ FAIL("Did not not get handle");
+ }
+
+ // if suspended attach main notifier and unsuspend
+ if (launchSuspended) {
+ // If the process starts suspended register for main(),
+ // otherwise skip since this test is a race between
+ // process setup and notification registration
+ _dyld_process_info_notify_main(handle, ^{
+ LOG("target entering main()");
+ gotMainNotice = true;
+ if ( !sawMainExecutable || !sawlibSystem )
+ gotMainNoticeBeforeAllInitialDylibs = true;
+ });
+ kill(pid, SIGCONT);
+ LOG("Sent SIGCONT");
+ }
+
+ if (!childReady.wait()) {
+ FAIL("Timed out waiting for child to signal it is ready");
+ }
+ kill(pid, SIGUSR1);
+ LOG("Sent SIGUSR1");
+ if (!childDone.wait()) {
+ FAIL("Timed out waiting for child to finish dlopen()/dlclose() operations");
+ }
+ if (launchSuspended) {
+ if ( !sawMainExecutable ) {
+ FAIL("Did not get load notification of main executable");
+ }
+ if ( !gotMainNotice ) {
+ FAIL("Did not get notification of main()");
+ }
+ if ( gotMainNoticeBeforeAllInitialDylibs ) {
+ FAIL("Notification of main() arrived before all initial dylibs");
+ }
+ if ( gotFooNoticeBeforeMain ) {
+ FAIL("Notification of main() arrived after libfoo load notice");
+ }
+ if ( !sawlibSystem ) {
+ FAIL("Did not get load notification of libSystem");
+ }
+ }
+ kill(pid, SIGTERM);
+ LOG("Sent SIGTERM");
+ if (!childExitNotification.wait()) {
+ FAIL("Timed out waiting for child exit notification via _dyld_process_info_notify");
+ }
+ if ( disconnectEarly ) {
+ if ( libFooLoadCount != 1 ) {
+ FAIL("Got %d load notifications about libFoo instead of 1", libFooLoadCount);
+ }
+ if ( libFooUnloadCount != 0 ) {
+ FAIL("Got %d unload notifications about libFoo instead of 1", libFooUnloadCount);
+ }
+ } else {
+ if ( libFooLoadCount != 3 ) {
+ FAIL("Got %d load notifications about libFoo instead of 3", libFooLoadCount);
+ }
+ if ( libFooUnloadCount != 3 ) {
+ FAIL("Got %d unload notifications about libFoo instead of 3", libFooUnloadCount);
+ }
+ }
+ if (!childExit.wait()) {
+ FAIL("Timed out waiting for child cleanup");
+ }
+
+ // Tear down
+ dispatch_source_cancel(usr1SignalSource);
+ dispatch_source_cancel(usr2SignalSource);
+ if (!disconnectEarly) {
+ _dyld_process_info_notify_release(handle);
+ }
+}
+
+static void testSelfAttach(void) {
+ __block OneShotSemaphore teardownSempahore("self test teardownSempahore");
+ __block bool dylibLoadNotified = false;
+ kern_return_t kr = KERN_SUCCESS;
+ dispatch_queue_t queue = dispatch_queue_create("com.apple.dyld.test.dyld_process_info.self-attach", NULL);
+ dyld_process_info_notify handle = _dyld_process_info_notify(mach_task_self(), queue,
+ ^(bool unload, uint64_t timestamp, uint64_t machHeader, const uuid_t uuid, const char* path) {
+ if ( strstr(path, "/libfoo.dylib") != NULL ) {
+ dylibLoadNotified = true;
+ }
+ },
+ ^{ teardownSempahore.signal(); },
+ &kr);
+ if ( handle == NULL ) {
+ LOG("_dyld_process_info_notify() returned NULL, result=%d", kr);
+ }
+ void* h = dlopen(RUN_DIR "/libfoo.dylib", 0);
+ dlclose(h);
+ if (!dylibLoadNotified) {
+ FAIL("testSelfAttach");
+ }
+ _dyld_process_info_notify_release(handle);
+ teardownSempahore.wait();
+
+ // Get the all image info
+ task_dyld_info_data_t taskDyldInfo;
+ mach_msg_type_number_t taskDyldInfoCount = TASK_DYLD_INFO_COUNT;
+ if (task_info(mach_task_self(), TASK_DYLD_INFO, (task_info_t)&taskDyldInfo, &taskDyldInfoCount) != KERN_SUCCESS) {
+ FAIL("Could not find all image info");
+ }
+ dyld_all_image_infos* infos = (dyld_all_image_infos*)taskDyldInfo.all_image_info_addr;
+
+ // Find a slot for the right
+ uint8_t notifySlot;
+ for (uint8_t notifySlot = 0; notifySlot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; ++notifySlot) {
+ if (infos->notifyPorts[notifySlot] != 0) {
+ FAIL("Port array entry %u not cleaned up, expected 0, got %u", notifySlot, infos->notifyPorts[notifySlot]);
+ }
+ }
+}
+
+int main(int argc, const char* argv[], const char* envp[], const char* apple[]) {
+ // test 1) attempt to monitor the monitoring process
+ testSelfAttach();
+ // test 2) launch test program suspended and wait for it to run to completion
+ launchTest(true, false);
+ // test 3) launch test program in unsuspended and wait for it to run to completion
+ launchTest(false, false);
+ // test 4) launch test program suspended and disconnect from it after the first dlopen() in target.exe
+ launchTest(true, true);
+ // test 5) launch test program unsuspended and disconnect from it after the first dlopen() in target.exe
+ launchTest(false, true);
+
+ PASS("Success");
+}
#include <stdlib.h>
#include <string.h>
#include <dlfcn.h>
+#include <libgen.h>
#include <signal.h>
#include <unistd.h>
#include <mach/mach.h>
+#include <sys/param.h>
#include <dispatch/dispatch.h>
-void performDylibOperations(void) {
- for (int i=0; i < 3; ++i) {
- void* h = dlopen(RUN_DIR "/libfoo.dylib", 0);
- dlclose(h);
- }
- fprintf(stderr, "Done (pid: %d)\n", getpid());
- exit(0);
-}
+// The process starts, then sends its parent a SIGUSR1 to indiicate it is ready
+// At that point it waits for SIGUSR1, and when it recieves one it loads and unloads libfoo.dylib 3 times
+// The process remains running until it recieves a SIGTERM
+
+// This process will clean itself up in the event its parent dies
int main(int argc, const char* argv[], const char* envp[], const char* apple[]) {
+ // Setup parent death handler
+ dispatch_source_t parentDeathSource = dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC, getppid(), DISPATCH_PROC_EXIT, dispatch_get_main_queue());
+ dispatch_source_set_event_handler(parentDeathSource, ^{
+ exit(0);
+ });
+ dispatch_resume(parentDeathSource);
+
+ // Setup SIGTERM handler
+ signal(SIGTERM, SIG_IGN);
+ dispatch_source_t exitSignalSource = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGTERM, 0, dispatch_get_main_queue());
+ dispatch_source_set_event_handler(exitSignalSource, ^{
+ exit(0);
+ });
+ dispatch_resume(exitSignalSource);
+
+ // Setup SIGUSR1 handler
signal(SIGUSR1, SIG_IGN);
- dispatch_source_t signalSource = dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC, getppid(),
- DISPATCH_PROC_EXIT, dispatch_get_main_queue());
+ dispatch_source_t signalSource = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, dispatch_get_main_queue());
dispatch_source_set_event_handler(signalSource, ^{
- exit(0);
+ for (int i=0; i < 3; ++i) {
+ void* h = dlopen(RUN_DIR "/libfoo.dylib", 0);
+ dlclose(h);
+ }
+ kill(getppid(), SIGUSR2);
});
dispatch_resume(signalSource);
- if ( (argc > 1) && (strcmp(argv[1], "suspend-in-main") == 0) ) {
- dispatch_source_t signalSourceSource = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL,
- SIGUSR1, 0, dispatch_get_main_queue());
- dispatch_source_set_event_handler(signalSourceSource, ^{
- performDylibOperations();
- });
- dispatch_resume(signalSourceSource);
- dispatch_async(dispatch_get_main_queue(), ^{
- fprintf(stderr, "Ready (pid: %d)\n", getpid());
- });
- } else {
- performDylibOperations();
- }
-
+ // Message our parent to let them know our signal handlers are ready
+ kill(getppid(), SIGUSR1);
dispatch_main();
}
#include <signal.h>
#include <spawn.h>
#include <errno.h>
+#include <sys/proc.h>
#include <mach/mach.h>
#include <mach/machine.h>
#include <mach-o/dyld_process_info.h>
_process process;
process.set_executable_path(RUN_DIR "/target.exe");
process.set_launch_suspended(true);
- process.set_launch_async(true);
const char* env[] = { "TEST_OUTPUT=None", NULL};
process.set_env(env);
pid_t pid = process.launch();
task_t task;
- if (task_for_pid(mach_task_self(), pid, &task) != KERN_SUCCESS) {
- FAIL("task_for_pid() failed");
+ if (task_read_for_pid(mach_task_self(), pid, &task) != KERN_SUCCESS) {
+ FAIL("task_read_for_pid() failed");
}
dispatch_async(dispatch_get_main_queue(), ^{
{
_process dyldUsage;
dyldUsage.set_executable_path("/usr/local/bin/dyld_usage");
- dyldUsage.set_launch_async(true);
const char* args[] = { "-j", "dyld_usage_target.exe", NULL };
dyldUsage.set_args(args);
__block dispatch_data_t output = NULL;
-// BUILD(macos|x86_64): $CC bar.c -mmacosx-version-min=10.5 -dynamiclib -install_name $RUN_DIR/libbar.dylib -o $BUILD_DIR/libbar.dylib
-// BUILD(macos|x86_64): $CC foo.c -mmacosx-version-min=10.5 -dynamiclib $BUILD_DIR/libbar.dylib -sub_library libbar -install_name $RUN_DIR/libfoo.dylib -o $BUILD_DIR/libfoo.dylib
-// BUILD(macos|x86_64): $CC main.c -mmacosx-version-min=10.5 -o $BUILD_DIR/dylib-re-export.exe $BUILD_DIR/libfoo.dylib
+// BUILD(macos|x86_64): $CC bar.c -mmacosx-version-min=10.5 -dynamiclib -install_name $RUN_DIR/libbar.dylib -o $BUILD_DIR/libbar.dylib
+// BUILD(macos|x86_64): $CC foo.c -mmacosx-version-min=10.5 -dynamiclib $BUILD_DIR/libbar.dylib -sub_library libbar -install_name $RUN_DIR/libfoo.dylib -o $BUILD_DIR/libfoo.dylib
+// BUILD(macos|x86_64): $CC main.c -mmacosx-version-min=10.5 -o $BUILD_DIR/dylib-re-export.exe $BUILD_DIR/libfoo.dylib -L$BUILD_DIR
// BUILD(ios,tvos,watchos,bridgeos):
return true;
}
+#if !__arm64e__
#if SUPPORT_CUSTOM_SEGMENTS
__attribute__((section(("__MORETEXT,__text"))))
#endif
{
if ( gRanTerm != NULL )
*gRanTerm = true;
-}
\ No newline at end of file
+}
+#endif
FAIL("didn't dlclose");
}
+#if __arm64e__
+ if (ranTerm) {
+ FAIL("unexpectedly ran term");
+ }
+#else
if (!ranTerm) {
FAIL("didn't run term");
}
+#endif
PASS("Success");
}
// Create the base kernel collection
// BUILD(macos,ios,tvos,bridgeos|x86_64,arm64,arm64e): $CP extensions/kernel-export.kext/Info.plist $BUILD_DIR/extensions/kernel-export-kext/Info.plist
-// BUILD(macos,ios,tvos,bridgeos|x86_64,arm64,arm64e): $CC main.c -o $BUILD_DIR/kernel-auxkc-fixups.exe -Wl,-static -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-e,__start -Wl,-pie -Wl,-pagezero_size,0x0 -Wl,-install_name,/usr/lib/swift/split.seg.v2.hack -fno-stack-protector -fno-builtin -ffreestanding -Wl,-segprot,__HIB,rx,rx -Wl,-image_base,0x8000 -Wl,-segaddr,__HIB,0x4000 -fno-ptrauth-function-pointer-type-discrimination
+// BUILD(macos,ios,tvos,bridgeos|x86_64,arm64,arm64e): $CC main.c -o $BUILD_DIR/kernel-auxkc-fixups.exe -Wl,-static -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-e,__start -Wl,-pie -Wl,-pagezero_size,0x0 -Wl,-install_name,/usr/lib/swift/split.seg.v2.hack -fno-stack-protector -fno-builtin -ffreestanding -Wl,-segprot,__HIB,rx,rx -Wl,-image_base,0x8000 -Wl,-segaddr,__HIB,0x4000 -fno-ptrauth-function-pointer-type-discrimination -ftrivial-auto-var-init=uninitialized
// BUILD(macos,ios,tvos,bridgeos|x86_64,arm64,arm64e): $CC kernel-export.c -o $BUILD_DIR/extensions/kernel-export-kext/kernel-export -Wl,-kext -Wl,-kext_objects_dir,$BUILD_DIR/KextObjects -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-install_name,/usr/lib/swift/split.seg.v2.hack -fno-ptrauth-function-pointer-type-discrimination
// BUILD(macos,ios,tvos,bridgeos|x86_64,arm64,arm64e): $APP_CACHE_UTIL -create-kernel-collection $BUILD_DIR/kernel.kc -kernel $BUILD_DIR/kernel-auxkc-fixups.exe -extensions $BUILD_DIR/extensions -bundle-id com.apple.kernel.export $DEPENDS_ON $BUILD_DIR/extensions/kernel-export-kext/Info.plist $DEPENDS_ON $BUILD_DIR/extensions/kernel-export-kext/kernel-export
-// BOOT_ARGS: amfi=3 cs_enforcement_disable=1
+// BOOT_ARGSxx: amfi=3 cs_enforcement_disable=1
+// FIXME: re-enable for macOS when it work
+// xxBUILDxx(macos|x86_64): $CC main.c -o $BUILD_DIR/kernel-fixups.exe -Wl,-static -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-e,__start -Wl,-pie -Wl,-pagezero_size,0x0 -Wl,-install_name,/usr/lib/swift/split.seg.v2.hack -fno-stack-protector -fno-builtin -ffreestanding -Wl,-segprot,__HIB,rx,rx -Wl,-image_base,0x8000 -Wl,-segaddr,__HIB,0x4000
+// xxBUILDxx(macos|x86_64): $APP_CACHE_UTIL -create-kernel-collection $BUILD_DIR/kernel.kc -kernel $BUILD_DIR/kernel-fixups.exe
-// BUILD(macos|x86_64): $CC main.c -o $BUILD_DIR/kernel-fixups.exe -Wl,-static -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-e,__start -Wl,-pie -Wl,-pagezero_size,0x0 -Wl,-install_name,/usr/lib/swift/split.seg.v2.hack -fno-stack-protector -fno-builtin -ffreestanding -Wl,-segprot,__HIB,rx,rx -Wl,-image_base,0x8000 -Wl,-segaddr,__HIB,0x4000
-// BUILD(macos|x86_64): $APP_CACHE_UTIL -create-kernel-collection $BUILD_DIR/kernel.kc -kernel $BUILD_DIR/kernel-fixups.exe
+// BUILDxx(macos,ios,tvos,watchos,bridgeos):
-// BUILD(ios,tvos,watchos,bridgeos):
-
-// RUN_STATIC: $RUN_STATIC ./kernel.kc
+// xxRUN_STATIC: $RUN_STATIC ./kernel.kc
// This tests that unaligned fixups work in x86_64
// BOOT_ARGS: amfi=3 cs_enforcement_disable=1
-// BUILD(macos,ios,tvos,bridgeos|x86_64,arm64,arm64e): $CC main.c -o $BUILD_DIR/kernel-fixups.exe -Wl,-static -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-e,__start -Wl,-pie -Wl,-pagezero_size,0x0 -Wl,-install_name,/usr/lib/swift/split.seg.v2.hack -fno-stack-protector -fno-builtin -ffreestanding -Wl,-segprot,__HIB,rx,rx -Wl,-image_base,0x8000 -Wl,-segaddr,__HIB,0x4000 -fno-ptrauth-function-pointer-type-discrimination
+// BUILD(macos,ios,tvos,bridgeos|x86_64,arm64,arm64e): $CC main.c -o $BUILD_DIR/kernel-fixups.exe -Wl,-static -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-e,__start -Wl,-pie -Wl,-pagezero_size,0x0 -Wl,-install_name,/usr/lib/swift/split.seg.v2.hack -fno-stack-protector -fno-builtin -ffreestanding -Wl,-segprot,__HIB,rx,rx -Wl,-image_base,0x8000 -Wl,-segaddr,__HIB,0x4000 -fno-ptrauth-function-pointer-type-discrimination -ftrivial-auto-var-init=uninitialized
// BUILD(macos,ios,tvos,bridgeos|x86_64,arm64,arm64e): $APP_CACHE_UTIL -create-kernel-collection $BUILD_DIR/kernel.kc -kernel $BUILD_DIR/kernel-fixups.exe
// BUILD(watchos):
// BOOT_ARGS: amfi=3 cs_enforcement_disable=1
-// BUILD(macos,ios,tvos,bridgeos|x86_64,arm64,arm64e): $CC main.c -o $BUILD_DIR/kernel-hello-world.exe -Wl,-static -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-e,__start -Wl,-pie -Wl,-pagezero_size,0x0 -Wl,-install_name,/usr/lib/swift/split.seg.v2.hack -fno-stack-protector -fno-builtin -ffreestanding -Wl,-segprot,__HIB,rx,rx -Wl,-image_base,0x8000 -Wl,-segaddr,__HIB,0x4000 -fno-ptrauth-function-pointer-type-discrimination
+// BUILD(macos,ios,tvos,bridgeos|x86_64,arm64,arm64e): $CC main.c -o $BUILD_DIR/kernel-hello-world.exe -Wl,-static -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-e,__start -Wl,-pie -Wl,-pagezero_size,0x0 -Wl,-install_name,/usr/lib/swift/split.seg.v2.hack -fno-stack-protector -fno-builtin -ffreestanding -Wl,-segprot,__HIB,rx,rx -Wl,-image_base,0x8000 -Wl,-segaddr,__HIB,0x4000 -fno-ptrauth-function-pointer-type-discrimination -ftrivial-auto-var-init=uninitialized
// BUILD(macos,ios,tvos,bridgeos|x86_64,arm64,arm64e): $APP_CACHE_UTIL -create-kernel-collection $BUILD_DIR/kernel-hello-world.kc -kernel $BUILD_DIR/kernel-hello-world.exe -platform kernel
// BUILD(watchos):
// BOOT_ARGS: amfi=3 cs_enforcement_disable=1
// Create the base kernel collection
-// BUILD(macos,ios,tvos,bridgeos|x86_64,arm64,arm64e): $CC main.c -o $BUILD_DIR/kernel-auxkc-fixups.exe -Wl,-static -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-e,__start -Wl,-pie -Wl,-pagezero_size,0x0 -Wl,-install_name,/usr/lib/swift/split.seg.v2.hack -fno-stack-protector -fno-builtin -ffreestanding -Wl,-segprot,__HIB,rx,rx -Wl,-image_base,0x8000 -Wl,-segaddr,__HIB,0x4000 -fno-ptrauth-function-pointer-type-discrimination
+// BUILD(macos,ios,tvos,bridgeos|x86_64,arm64,arm64e): $CC main.c -o $BUILD_DIR/kernel-auxkc-fixups.exe -Wl,-static -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-e,__start -Wl,-pie -Wl,-pagezero_size,0x0 -Wl,-install_name,/usr/lib/swift/split.seg.v2.hack -fno-stack-protector -fno-builtin -ffreestanding -Wl,-segprot,__HIB,rx,rx -Wl,-image_base,0x8000 -Wl,-segaddr,__HIB,0x4000 -fno-ptrauth-function-pointer-type-discrimination -ftrivial-auto-var-init=uninitialized
// BUILD(macos,ios,tvos,bridgeos|x86_64,arm64,arm64e): $APP_CACHE_UTIL -create-kernel-collection $BUILD_DIR/kernel.kc -kernel $BUILD_DIR/kernel-auxkc-fixups.exe
// Create the pageable kernel collection
// Create the base kernel collection
// BUILD(macos,ios,tvos,bridgeos|x86_64,arm64,arm64e): $CP extensions/kernel-export.kext/Info.plist $BUILD_DIR/extensions/kernel-export-kext/Info.plist
-// BUILD(macos,ios,tvos,bridgeos|x86_64,arm64,arm64e): $CC main.c -o $BUILD_DIR/kernel-auxkc-fixups.exe -Wl,-static -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-e,__start -Wl,-pie -Wl,-pagezero_size,0x0 -Wl,-install_name,/usr/lib/swift/split.seg.v2.hack -fno-stack-protector -fno-builtin -ffreestanding -Wl,-segprot,__HIB,rx,rx -Wl,-image_base,0x8000 -Wl,-segaddr,__HIB,0x4000 -fno-ptrauth-function-pointer-type-discrimination
+// BUILD(macos,ios,tvos,bridgeos|x86_64,arm64,arm64e): $CC main.c -o $BUILD_DIR/kernel-auxkc-fixups.exe -Wl,-static -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-e,__start -Wl,-pie -Wl,-pagezero_size,0x0 -Wl,-install_name,/usr/lib/swift/split.seg.v2.hack -fno-stack-protector -fno-builtin -ffreestanding -Wl,-segprot,__HIB,rx,rx -Wl,-image_base,0x8000 -Wl,-segaddr,__HIB,0x4000 -fno-ptrauth-function-pointer-type-discrimination -ftrivial-auto-var-init=uninitialized
// BUILD(macos,ios,tvos,bridgeos|x86_64,arm64,arm64e): $CC kernel-export.c -o $BUILD_DIR/extensions/kernel-export-kext/kernel-export -Wl,-kext -Wl,-kext_objects_dir,$BUILD_DIR/KextObjects -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-install_name,/usr/lib/swift/split.seg.v2.hack -fno-ptrauth-function-pointer-type-discrimination
// BUILD(macos,ios,tvos,bridgeos|x86_64,arm64,arm64e): $APP_CACHE_UTIL -create-kernel-collection $BUILD_DIR/kernel.kc -kernel $BUILD_DIR/kernel-auxkc-fixups.exe -extensions $BUILD_DIR/extensions -bundle-id com.apple.kernel.export $DEPENDS_ON $BUILD_DIR/extensions/kernel-export-kext/Info.plist $DEPENDS_ON $BUILD_DIR/extensions/kernel-export-kext/kernel-export
--- /dev/null
+
+#include <new>
+
+extern "C" void* foo() {
+ return new int(1);
+}
--- /dev/null
+
+// BUILD: $CC foo.cpp -lc++ -dynamiclib -install_name $RUN_DIR/libfoo.dylib -o $BUILD_DIR/libfoo.dylib
+// BUILD: $CC main.cpp -lc++ -o $BUILD_DIR/weak-coalesce-dlopen.exe -DRUN_DIR="$RUN_DIR"
+
+// RUN: ./weak-coalesce-dlopen.exe
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <dlfcn.h>
+#include <new>
+
+#include "test_support.h"
+
+extern void* foo();
+
+void* lastAllocatedValue = NULL;
+
+void* operator new(size_t size) {
+ lastAllocatedValue = malloc(size);
+ return lastAllocatedValue;
+}
+
+int main()
+{
+ // The value we allocate should come from our new function
+ int* value1 = new int(1);
+ if ( value1 != lastAllocatedValue ) {
+ FAIL("value1 (%p) != lastAllocatedValue (%p)", value1, lastAllocatedValue);
+ }
+
+ // dlopen foo which defines "foo"
+ // In dyld2, for chained fixups, this will run weakBindOld which patches the cache for
+ // weak defs. That patching will fail if the cache uses __DATA_CONST and was not marked as
+ // RW prior to patching
+ void* handle = dlopen(RUN_DIR "/libfoo.dylib", RTLD_FIRST);
+ if ( handle == NULL ) {
+ FAIL("dlopen(\"%s\") failed with: %s", RUN_DIR "/libfoo.dylib", dlerror());
+ }
+
+ const void* symFoo = dlsym(handle, "foo");
+ if ( symFoo == NULL ) {
+ FAIL("dlsym(handle, foo) failed");
+ }
+
+ // The value foo allocates should come from our new function
+ void* value2 = ((__typeof(&foo))symFoo)();
+ if ( value2 != lastAllocatedValue ) {
+ FAIL("value2 (%p) != lastAllocatedValue (%p)", value2, lastAllocatedValue);
+ }
+
+ PASS("weak-coalesce-dlopen");
+}
+
--- /dev/null
+
+// BUILD: $CC main.cpp -lc++ -o $BUILD_DIR/weak-override-shared-cache.exe
+
+// RUN: ./weak-override-shared-cache.exe
+
+
+#include <stdexcept>
+#include <stdio.h>
+
+// Hack to get a strong definition of this symbol
+__attribute__((used))
+void* hack __asm("__ZTISt16nested_exception");
+
+#include "test_support.h"
+
+int main(int argc, const char* argv[], const char* envp[], const char* apple[]) {
+ try {
+ throw new std::nested_exception();
+ } catch (std::nested_exception* e) {
+ PASS("Success");
+ }
+ FAIL("Expected exception to be thrown");
+}
+