#include <sys/mount.h>
#include <sys/sysctl.h>
#include <libkern/OSAtomic.h>
+#include <string_view>
+
+#include <atomic>
#include "Tracing.h"
uint32_t ImageLoader::fgTotalPossibleLazyBindFixups = 0;
uint32_t ImageLoader::fgTotalSegmentsMapped = 0;
uint64_t ImageLoader::fgTotalBytesMapped = 0;
-uint64_t ImageLoader::fgTotalBytesPreFetched = 0;
uint64_t ImageLoader::fgTotalLoadLibrariesTime;
uint64_t ImageLoader::fgTotalObjCSetupTime = 0;
uint64_t ImageLoader::fgTotalDebuggerPausedTime = 0;
ImageLoader::ImageLoader(const char* path, unsigned int libCount)
- : fPath(path), fRealPath(NULL), fDevice(0), fInode(0), fLastModified(0),
+ : fPath(path), fRealPath(NULL), fDevice(0), fInode(0), fLastModified(0),
fPathHash(0), fDlopenReferenceCount(0), fInitializerRecursiveLock(NULL),
- fDepth(0), fLoadOrder(fgLoadOrdinal++), fState(0), fLibraryCount(libCount),
- fAllLibraryChecksumsAndLoadAddressesMatch(false), fLeaveMapped(false), fNeverUnload(false),
+ fLoadOrder(fgLoadOrdinal++), fDepth(0), fObjCMappedNotified(false), fState(0), fLibraryCount(libCount),
+ fMadeReadOnly(false), fAllLibraryChecksumsAndLoadAddressesMatch(false), fLeaveMapped(false), fNeverUnload(false),
fHideSymbols(false), fMatchByInstallName(false),
fInterposed(false), fRegisteredDOF(false), fAllLazyPointersBound(false),
fBeingRemoved(false), fAddFuncNotified(false),
fPathOwnedByImage(false), fIsReferencedDownward(false),
fWeakSymbolsBound(false)
{
+#if __x86_64__
+ fAotPath = NULL;
+#endif
if ( fPath != NULL )
fPathHash = hash(fPath);
if ( libCount > 512 )
delete [] fRealPath;
if ( fPathOwnedByImage && (fPath != NULL) )
delete [] fPath;
+#if __x86_64__
+ if ( fAotPath != NULL )
+ delete [] fAotPath;
+#endif
}
void ImageLoader::setFileInfo(dev_t device, ino_t inode, time_t modDate)
void ImageLoader::setPath(const char* path)
{
- if ( fPathOwnedByImage && (fPath != NULL) )
+ if ( fPathOwnedByImage && (fPath != NULL) )
delete [] fPath;
fPath = new char[strlen(path)+1];
strcpy((char*)fPath, path);
fPathOwnedByImage = true; // delete fPath when this image is destructed
fPathHash = hash(fPath);
- fRealPath = NULL;
+ if ( fRealPath != NULL ) {
+ delete [] fRealPath;
+ fRealPath = NULL;
+ }
}
void ImageLoader::setPathUnowned(const char* path)
delete [] fPath;
}
fPath = path;
- fPathOwnedByImage = false;
+ fPathOwnedByImage = false;
fPathHash = hash(fPath);
}
strcpy((char*)fRealPath, realPath);
}
-const char* ImageLoader::getRealPath() const
-{
- if ( fRealPath != NULL )
+
+const char* ImageLoader::getRealPath() const
+{
+ if ( fRealPath != NULL )
return fRealPath;
else
- return fPath;
+ return fPath;
}
-
uint32_t ImageLoader::hash(const char* path)
{
// this does not need to be a great hash
//dyld::log(" interposedAddress: replacee=0x%08llX, replacement=0x%08llX, neverImage=%p, onlyImage=%p, inImage=%p\n",
// (uint64_t)it->replacee, (uint64_t)it->replacement, it->neverImage, it->onlyImage, inImage);
// replace all references to 'replacee' with 'replacement'
- if ( (address == it->replacee) && (inImage != it->neverImage) && ((it->onlyImage == NULL) || (inImage == it->onlyImage)) ) {
+ if ( (address == it->replacee) && (it->neverImage != inImage) && ((it->onlyImage == NULL) || (it->onlyImage == inImage)) ) {
if ( context.verboseInterposing ) {
dyld::log("dyld interposing: replace 0x%lX with 0x%lX\n", it->replacee, it->replacement);
}
}
void ImageLoader::applyInterposingToDyldCache(const LinkContext& context) {
-#if USES_CHAINED_BINDS
if (!context.dyldCache)
return;
+ if (!context.dyldCache->header.builtFromChainedFixups)
+ return;
if (fgInterposingTuples.empty())
return;
+
+ // make the cache writable for this block
+ DyldSharedCache::DataConstScopedWriter patcher(context.dyldCache, mach_task_self(), (context.verboseMapping ? &dyld::log : nullptr));
+
// For each of the interposed addresses, see if any of them are in the shared cache. If so, find
// that image and apply its patch table to all uses.
uintptr_t cacheStart = (uintptr_t)context.dyldCache;
dyld3::closure::ImageNum imageInCache = imageIndex+1;
if ( context.verboseInterposing )
dyld::log("dyld: interpose: Found shared cache image %d for 0x%08llx\n", imageInCache, (uint64_t)it->replacee);
- const dyld3::closure::Image* image = context.dyldCache->cachedDylibsImageArray()->imageForNum(imageInCache);
- image->forEachPatchableExport(^(uint32_t cacheOffsetOfImpl, const char* exportName) {
+ context.dyldCache->forEachPatchableExport(imageIndex, ^(uint32_t cacheOffsetOfImpl, const char* exportName) {
// Skip patching anything other than this symbol
if (cacheOffsetOfImpl != cacheOffsetOfReplacee)
return;
- if ( context.verboseInterposing )
+ if ( context.verboseInterposing ) {
+ const dyld3::closure::Image* image = context.dyldCache->cachedDylibsImageArray()->imageForNum(imageInCache);
dyld::log("dyld: interpose: Patching uses of symbol %s in shared cache binary at %s\n", exportName, image->path());
+ }
uintptr_t newLoc = it->replacement;
- image->forEachPatchableUseOfExport(cacheOffsetOfImpl, ^(dyld3::closure::Image::PatchableExport::PatchLocation patchLocation) {
+ context.dyldCache->forEachPatchableUseOfExport(imageIndex, cacheOffsetOfImpl, ^(dyld_cache_patchable_location patchLocation) {
uintptr_t* loc = (uintptr_t*)(cacheStart+patchLocation.cacheOffset);
#if __has_feature(ptrauth_calls)
if ( patchLocation.authenticated ) {
- dyld3::MachOLoaded::ChainedFixupPointerOnDisk fixupInfo;
- fixupInfo.authRebase.auth = true;
- fixupInfo.authRebase.addrDiv = patchLocation.usesAddressDiversity;
- fixupInfo.authRebase.diversity = patchLocation.discriminator;
- fixupInfo.authRebase.key = patchLocation.key;
- *loc = fixupInfo.signPointer(loc, newLoc + patchLocation.getAddend());
+ dyld3::MachOLoaded::ChainedFixupPointerOnDisk ptr = *(dyld3::MachOLoaded::ChainedFixupPointerOnDisk*)loc;
+ ptr.arm64e.authRebase.auth = true;
+ ptr.arm64e.authRebase.addrDiv = patchLocation.usesAddressDiversity;
+ ptr.arm64e.authRebase.diversity = patchLocation.discriminator;
+ ptr.arm64e.authRebase.key = patchLocation.key;
+ *loc = ptr.arm64e.signPointer(loc, newLoc + DyldSharedCache::getAddend(patchLocation));
if ( context.verboseInterposing )
dyld::log("dyld: interpose: *%p = %p (JOP: diversity 0x%04X, addr-div=%d, key=%s)\n",
- loc, (void*)*loc, patchLocation.discriminator, patchLocation.usesAddressDiversity, patchLocation.keyName());
+ loc, (void*)*loc, patchLocation.discriminator, patchLocation.usesAddressDiversity, DyldSharedCache::keyName(patchLocation));
return;
}
#endif
if ( context.verboseInterposing )
- dyld::log("dyld: interpose: *%p = 0x%0llX (dyld cache patch) to %s\n", loc, newLoc + patchLocation.getAddend(), exportName);
- *loc = newLoc + patchLocation.getAddend();
+ dyld::log("dyld: interpose: *%p = 0x%0llX (dyld cache patch) to %s\n", loc, newLoc + DyldSharedCache::getAddend(patchLocation), exportName);
+ *loc = newLoc + (uintptr_t)DyldSharedCache::getAddend(patchLocation);
});
});
}
-#endif
}
void ImageLoader::addDynamicInterposingTuples(const struct dyld_interpose_tuple array[], size_t count)
// <rdar://problem/29099600> dyld should tell the kernel when it is doing root fix-ups
void ImageLoader::vmAccountingSetSuspended(const LinkContext& context, bool suspend)
{
-#if __arm__ || __arm64__
+#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
static bool sVmAccountingSuspended = false;
if ( suspend == sVmAccountingSuspended )
return;
uint64_t t1 = mach_absolute_time();
context.clearAllDepths();
- this->recursiveUpdateDepth(context.imageCount());
+ this->updateDepth(context.imageCount());
__block uint64_t t2, t3, t4, t5;
{
dyld3::ScopedTimer(DBG_DYLD_TIMING_APPLY_FIXUPS, 0, 0, 0);
t2 = mach_absolute_time();
- this->recursiveRebase(context);
+ this->recursiveRebaseWithAccounting(context);
context.notifyBatch(dyld_image_state_rebased, false);
t3 = mach_absolute_time();
t5 = mach_absolute_time();
}
- if ( !context.linkingMainExecutable )
- context.notifyBatch(dyld_image_state_bound, false);
- uint64_t t6 = mach_absolute_time();
-
- std::vector<DOFInfo> dofs;
- this->recursiveGetDOFSections(context, dofs);
- context.registerDOFs(dofs);
- uint64_t t7 = mach_absolute_time();
-
// interpose any dynamically loaded images
if ( !context.linkingMainExecutable && (fgInterposingTuples.size() != 0) ) {
dyld3::ScopedTimer timer(DBG_DYLD_TIMING_APPLY_INTERPOSING, 0, 0, 0);
this->recursiveApplyInterposing(context);
}
+ // now that all fixups are done, make __DATA_CONST segments read-only
+ if ( !context.linkingMainExecutable )
+ this->recursiveMakeDataReadOnly(context);
+
+ if ( !context.linkingMainExecutable )
+ context.notifyBatch(dyld_image_state_bound, false);
+ uint64_t t6 = mach_absolute_time();
+
+ if ( context.registerDOFs != NULL ) {
+ std::vector<DOFInfo> dofs;
+ this->recursiveGetDOFSections(context, dofs);
+ context.registerDOFs(dofs);
+ }
+ uint64_t t7 = mach_absolute_time();
+
// clear error strings
(*context.setErrorStrings)(0, NULL, NULL, NULL);
// Calling recursive init on all images in images list, building a new list of
// uninitialized upward dependencies.
for (uintptr_t i=0; i < images.count; ++i) {
- images.images[i]->recursiveInitialization(context, thisThread, images.images[i]->getPath(), timingInfo, ups);
+ images.imagesAndPaths[i].first->recursiveInitialization(context, thisThread, images.imagesAndPaths[i].second, timingInfo, ups);
}
// If any upward dependencies remain, init them.
if ( ups.count > 0 )
mach_port_t thisThread = mach_thread_self();
ImageLoader::UninitedUpwards up;
up.count = 1;
- up.images[0] = this;
+ up.imagesAndPaths[0] = { this, this->getPath() };
processInitializers(context, thisThread, timingInfo, up);
context.notifyBatch(dyld_image_state_initialized, false);
mach_port_deallocate(mach_task_self(), thisThread);
}
}
// bind lazies in this image
- this->doBindJustLazies(context);
+ DyldSharedCache::DataConstLazyScopedWriter patcher(context.dyldCache, mach_task_self(), context.verboseMapping ? &dyld::log : nullptr);
+ this->doBindJustLazies(context, patcher);
}
}
}
-unsigned int ImageLoader::recursiveUpdateDepth(unsigned int maxDepth)
+unsigned int ImageLoader::updateDepth(unsigned int maxDepth)
+{
+ STACK_ALLOC_ARRAY(ImageLoader*, danglingUpwards, maxDepth);
+ unsigned int depth = this->recursiveUpdateDepth(maxDepth, danglingUpwards);
+ for (auto& danglingUpward : danglingUpwards) {
+ if ( danglingUpward->fDepth != 0)
+ continue;
+ danglingUpward->recursiveUpdateDepth(maxDepth, danglingUpwards);
+ }
+ return depth;
+}
+
+unsigned int ImageLoader::recursiveUpdateDepth(unsigned int maxDepth, dyld3::Array<ImageLoader*>& danglingUpwards)
{
// the purpose of this phase is to make the images sortable such that
// in a sort list of images, every image that an image depends on
unsigned int minDependentDepth = maxDepth;
for(unsigned int i=0; i < libraryCount(); ++i) {
ImageLoader* dependentImage = libImage(i);
- if ( (dependentImage != NULL) && !libIsUpward(i) ) {
- unsigned int d = dependentImage->recursiveUpdateDepth(maxDepth);
- if ( d < minDependentDepth )
- minDependentDepth = d;
+ if ( dependentImage != NULL ) {
+ if ( libIsUpward(i) ) {
+ if ( dependentImage->fDepth == 0) {
+ if ( !danglingUpwards.contains(dependentImage) )
+ danglingUpwards.push_back(dependentImage);
+ }
+ } else {
+ unsigned int d = dependentImage->recursiveUpdateDepth(maxDepth, danglingUpwards);
+ if ( d < minDependentDepth )
+ minDependentDepth = d;
+ }
+ }
+ // <rdar://problem/60878811> make sure need to re-bind propagates up
+ if ( dependentImage != NULL ) {
+ if ( fAllLibraryChecksumsAndLoadAddressesMatch && !dependentImage->fAllLibraryChecksumsAndLoadAddressesMatch ) {
+ fAllLibraryChecksumsAndLoadAddressesMatch = false;
+ }
}
}
-
// make me less deep then all my dependents
fDepth = minDependentDepth - 1;
+
}
-
return fDepth;
}
for(unsigned int i=0; i < fLibraryCount; ++i){
ImageLoader* dependentLib;
bool depLibReExported = false;
- bool depLibRequired = false;
- bool depLibCheckSumsMatch = false;
DependentLibraryInfo& requiredLibInfo = libraryInfos[i];
if ( preflightOnly && context.inSharedCache(requiredLibInfo.name) ) {
// <rdar://problem/5910137> dlopen_preflight() on image in shared cache leaves it loaded but not objc initialized
}
try {
unsigned cacheIndex;
- bool enforceIOSMac = false;
- #if __MAC_OS_X_VERSION_MIN_REQUIRED
- const dyld3::MachOFile* mf = (dyld3::MachOFile*)this->machHeader();
- if ( mf->supportsPlatform(dyld3::Platform::iOSMac) && !mf->supportsPlatform(dyld3::Platform::macOS) )
- enforceIOSMac = true;
- #endif
- dependentLib = context.loadLibrary(requiredLibInfo.name, true, this->getPath(), &thisRPaths, enforceIOSMac, cacheIndex);
+ dependentLib = context.loadLibrary(requiredLibInfo.name, true, this->getPath(), &thisRPaths, cacheIndex);
if ( dependentLib == this ) {
// found circular reference, perhaps DYLD_LIBARY_PATH is causing this rdar://problem/3684168
- dependentLib = context.loadLibrary(requiredLibInfo.name, false, NULL, NULL, enforceIOSMac, cacheIndex);
+ dependentLib = context.loadLibrary(requiredLibInfo.name, false, NULL, NULL, cacheIndex);
if ( dependentLib != this )
dyld::warn("DYLD_ setting caused circular dependency in %s\n", this->getPath());
}
dependentLib->fIsReferencedDownward = true;
}
LibraryInfo actualInfo = dependentLib->doGetLibraryInfo(requiredLibInfo.info);
- depLibRequired = requiredLibInfo.required;
- depLibCheckSumsMatch = ( actualInfo.checksum == requiredLibInfo.info.checksum );
depLibReExported = requiredLibInfo.reExported;
if ( ! depLibReExported ) {
// for pre-10.5 binaries that did not use LC_REEXPORT_DYLIB
// tell each to load its dependents
for(unsigned int i=0; i < libraryCount(); ++i) {
ImageLoader* dependentImage = libImage(i);
- if ( dependentImage != NULL ) {
+ if ( dependentImage != NULL ) {
dependentImage->recursiveLoadLibraries(context, preflightOnly, thisRPaths, libraryInfos[i].name);
}
}
-
// do deep prebind check
if ( fAllLibraryChecksumsAndLoadAddressesMatch ) {
for(unsigned int i=0; i < libraryCount(); ++i){
ImageLoader* dependentImage = libImage(i);
- if ( dependentImage != NULL ) {
+ if ( dependentImage != NULL ) {
if ( !dependentImage->allDependentLibrariesAsWhenPreBound() )
fAllLibraryChecksumsAndLoadAddressesMatch = false;
}
}
}
-
+
// free rpaths (getRPaths() malloc'ed each string)
for(std::vector<const char*>::iterator it=rpathsFromThisImage.begin(); it != rpathsFromThisImage.end(); ++it) {
const char* str = *it;
}
}
+
+void ImageLoader::recursiveRebaseWithAccounting(const LinkContext& context)
+{
+ this->recursiveRebase(context);
+ vmAccountingSetSuspended(context, false);
+}
+
void ImageLoader::recursiveRebase(const LinkContext& context)
{
if ( fState < dyld_image_state_rebased ) {
}
}
+void ImageLoader::recursiveMakeDataReadOnly(const LinkContext& context)
+{
+ if ( ! fMadeReadOnly ) {
+ // break cycles
+ fMadeReadOnly = true;
+
+ try {
+ // handle lower level libraries first
+ for(unsigned int i=0; i < libraryCount(); ++i) {
+ ImageLoader* dependentImage = libImage(i);
+ if ( dependentImage != NULL )
+ dependentImage->recursiveMakeDataReadOnly(context);
+ }
+
+ // if this image has __DATA_CONST, make that segment read-only
+ makeDataReadOnly();
+ }
+ catch (const char* msg) {
+ fMadeReadOnly = false;
+ throw;
+ }
+ }
+}
+
+
void ImageLoader::recursiveBindWithAccounting(const LinkContext& context, bool forceLazysBound, bool neverUnload)
{
- this->recursiveBind(context, forceLazysBound, neverUnload);
+ this->recursiveBind(context, forceLazysBound, neverUnload, nullptr);
vmAccountingSetSuspended(context, false);
}
-void ImageLoader::recursiveBind(const LinkContext& context, bool forceLazysBound, bool neverUnload)
+void ImageLoader::recursiveBind(const LinkContext& context, bool forceLazysBound, bool neverUnload, const ImageLoader* parent)
{
// Normally just non-lazy pointers are bound immediately.
// The exceptions are:
// bind lower level libraries first
for(unsigned int i=0; i < libraryCount(); ++i) {
ImageLoader* dependentImage = libImage(i);
- if ( dependentImage != NULL )
- dependentImage->recursiveBind(context, forceLazysBound, neverUnload);
+ if ( dependentImage != NULL ) {
+ const ImageLoader* reExportParent = nullptr;
+ if ( libReExported(i) )
+ reExportParent = this;
+ dependentImage->recursiveBind(context, forceLazysBound, neverUnload, reExportParent);
+ }
}
// bind this image
- this->doBind(context, forceLazysBound);
+ this->doBind(context, forceLazysBound, parent);
// mark if lazys are also bound
if ( forceLazysBound || this->usablePrebinding(context) )
fAllLazyPointersBound = true;
}
+
// These are mangled symbols for all the variants of operator new and delete
// which a main executable can define (non-weak) and override the
// weak-def implementation in the OS.
-static const char* sTreatAsWeak[] = {
+static const char* const sTreatAsWeak[] = {
"__Znwm", "__ZnwmRKSt9nothrow_t",
"__Znam", "__ZnamRKSt9nothrow_t",
"__ZdlPv", "__ZdlPvRKSt9nothrow_t", "__ZdlPvm",
"__ZdaPvSt11align_val_t", "__ZdaPvSt11align_val_tRKSt9nothrow_t", "__ZdaPvmSt11align_val_t"
};
+size_t ImageLoader::HashCString::hash(const char* v) {
+ // FIXME: Use hash<string_view> when it has the correct visibility markup
+ return std::hash<std::string_view>{}(v);
+}
+bool ImageLoader::EqualCString::equal(const char* s1, const char* s2) {
+ return strcmp(s1, s2) == 0;
+}
void ImageLoader::weakBind(const LinkContext& context)
{
+
+ if (!context.useNewWeakBind) {
+ weakBindOld(context);
+ return;
+ }
+
if ( context.verboseWeakBind )
dyld::log("dyld: weak bind start:\n");
uint64_t t1 = mach_absolute_time();
+
// get set of ImageLoaders that participate in coalecsing
ImageLoader* imagesNeedingCoalescing[fgImagesRequiringCoalescing];
unsigned imageIndexes[fgImagesRequiringCoalescing];
// don't need to do any coalescing if only one image has overrides, or all have already been done
if ( (countOfImagesWithWeakDefinitionsNotInSharedCache > 0) && (countNotYetWeakBound > 0) ) {
+ if (!context.weakDefMapInitialized) {
+ // Initialize the weak def map as the link context doesn't run static initializers
+ new (&context.weakDefMap) dyld3::Map<const char*, std::pair<const ImageLoader*, uintptr_t>, ImageLoader::HashCString, ImageLoader::EqualCString>();
+ context.weakDefMapInitialized = true;
+ }
+
+ // We might have to patch the shared cache __DATA_CONST. In that case, we'll create just a single
+ // patcher when needed.
+ DyldSharedCache::DataConstLazyScopedWriter patcher(context.dyldCache, mach_task_self(), context.verboseMapping ? &dyld::log : nullptr);
+
+#if TARGET_OS_OSX
+ // only do alternate algorithm for dlopen(). Use traditional algorithm for launch
+ if ( !context.linkingMainExecutable ) {
+ // Don't take the memory hit of weak defs on the launch path until we hit a dlopen with more weak symbols to bind
+ if (!context.weakDefMapProcessedLaunchDefs) {
+ context.weakDefMapProcessedLaunchDefs = true;
+
+ // Walk the nlist for all binaries from launch and fill in the map with any other weak defs
+ for (int i=0; i < count; ++i) {
+ const ImageLoader* image = imagesNeedingCoalescing[i];
+ // skip images without defs. We've processed launch time refs already
+ if ( !image->hasCoalescedExports() )
+ continue;
+ // Only process binaries which have had their weak symbols bound, ie, not the new ones we are processing now
+ // from this dlopen
+ if ( !image->weakSymbolsBound(imageIndexes[i]) )
+ continue;
+
+ Diagnostics diag;
+ const dyld3::MachOAnalyzer* ma = (const dyld3::MachOAnalyzer*)image->machHeader();
+ ma->forEachWeakDef(diag, ^(const char *symbolName, uint64_t imageOffset, bool isFromExportTrie) {
+ uintptr_t targetAddr = (uintptr_t)ma + (uintptr_t)imageOffset;
+ if ( isFromExportTrie ) {
+ // Avoid duplicating the string if we already have the symbol name
+ if ( context.weakDefMap.find(symbolName) != context.weakDefMap.end() )
+ return;
+ symbolName = strdup(symbolName);
+ }
+ context.weakDefMap.insert({ symbolName, { image, targetAddr } });
+ });
+ }
+ }
+
+ // Walk the nlist for all binaries in dlopen and fill in the map with any other weak defs
+ for (int i=0; i < count; ++i) {
+ const ImageLoader* image = imagesNeedingCoalescing[i];
+ if ( image->weakSymbolsBound(imageIndexes[i]) )
+ continue;
+ // skip images without defs. We'll process refs later
+ if ( !image->hasCoalescedExports() )
+ continue;
+ Diagnostics diag;
+ const dyld3::MachOAnalyzer* ma = (const dyld3::MachOAnalyzer*)image->machHeader();
+ ma->forEachWeakDef(diag, ^(const char *symbolName, uint64_t imageOffset, bool isFromExportTrie) {
+ uintptr_t targetAddr = (uintptr_t)ma + (uintptr_t)imageOffset;
+ if ( isFromExportTrie ) {
+ // Avoid duplicating the string if we already have the symbol name
+ if ( context.weakDefMap.find(symbolName) != context.weakDefMap.end() )
+ return;
+ symbolName = strdup(symbolName);
+ }
+ context.weakDefMap.insert({ symbolName, { image, targetAddr } });
+ });
+ }
+ // for all images that need weak binding
+ for (int i=0; i < count; ++i) {
+ ImageLoader* imageBeingFixedUp = imagesNeedingCoalescing[i];
+ if ( imageBeingFixedUp->weakSymbolsBound(imageIndexes[i]) )
+ continue; // weak binding already completed
+ bool imageBeingFixedUpInCache = imageBeingFixedUp->inSharedCache();
+
+ if ( context.verboseWeakBind )
+ dyld::log("dyld: checking for weak symbols in %s\n", imageBeingFixedUp->getPath());
+ // for all symbols that need weak binding in this image
+ ImageLoader::CoalIterator coalIterator;
+ imageBeingFixedUp->initializeCoalIterator(coalIterator, i, imageIndexes[i]);
+ while ( !imageBeingFixedUp->incrementCoalIterator(coalIterator) ) {
+ const char* nameToCoalesce = coalIterator.symbolName;
+ uintptr_t targetAddr = 0;
+ const ImageLoader* targetImage;
+ // Seatch the map for a previous definition to use
+ auto weakDefIt = context.weakDefMap.find(nameToCoalesce);
+ if ( (weakDefIt != context.weakDefMap.end()) && (weakDefIt->second.first != nullptr) ) {
+ // Found a previous defition
+ targetImage = weakDefIt->second.first;
+ targetAddr = weakDefIt->second.second;
+ } else {
+ // scan all images looking for definition to use
+ for (int j=0; j < count; ++j) {
+ const ImageLoader* anImage = imagesNeedingCoalescing[j];
+ bool anImageInCache = anImage->inSharedCache();
+ // <rdar://problem/47986398> Don't look at images in dyld cache because cache is
+ // already coalesced. Only images outside cache can potentially override something in cache.
+ if ( anImageInCache && imageBeingFixedUpInCache )
+ continue;
+
+ //dyld::log("looking for %s in %s\n", nameToCoalesce, anImage->getPath());
+ const ImageLoader* foundIn;
+ const Symbol* sym = anImage->findExportedSymbol(nameToCoalesce, false, &foundIn);
+ if ( sym != NULL ) {
+ targetAddr = foundIn->getExportedSymbolAddress(sym, context);
+ targetImage = foundIn;
+ if ( context.verboseWeakBind )
+ dyld::log("dyld: found weak %s at 0x%lX in %s\n", nameToCoalesce, targetAddr, foundIn->getPath());
+ break;
+ }
+ }
+ }
+ if ( (targetAddr != 0) && (coalIterator.image != targetImage) ) {
+ if ( coalIterator.image->inSharedCache() )
+ patcher.makeWriteable();
+ coalIterator.image->updateUsesCoalIterator(coalIterator, targetAddr, (ImageLoader*)targetImage, 0, context);
+ if (weakDefIt == context.weakDefMap.end()) {
+ if (targetImage->neverUnload()) {
+ // Add never unload defs to the map for next time
+ context.weakDefMap.insert({ nameToCoalesce, { targetImage, targetAddr } });
+ if ( context.verboseWeakBind ) {
+ dyld::log("dyld: weak binding adding %s to map\n", nameToCoalesce);
+ }
+ } else {
+ // Add a placeholder for unloadable symbols which makes us fall back to the regular search
+ context.weakDefMap.insert({ nameToCoalesce, { targetImage, targetAddr } });
+ if ( context.verboseWeakBind ) {
+ dyld::log("dyld: weak binding adding unloadable placeholder %s to map\n", nameToCoalesce);
+ }
+ }
+ }
+ if ( context.verboseWeakBind )
+ dyld::log("dyld: adjusting uses of %s in %s to use definition from %s\n", nameToCoalesce, coalIterator.image->getPath(), targetImage->getPath());
+ }
+ }
+ imageBeingFixedUp->setWeakSymbolsBound(imageIndexes[i]);
+ }
+ }
+ else
+#endif // TARGET_OS_OSX
+ {
// make symbol iterators for each
ImageLoader::CoalIterator iterators[count];
ImageLoader::CoalIterator* sortedIts[count];
nameToCoalesce, iterators[i].image->getIndexedShortName((unsigned)iterators[i].imageIndex),
targetAddr, targetImage->getIndexedShortName(targetImageIndex));
}
- if ( ! iterators[i].image->weakSymbolsBound(imageIndexes[i]) )
+ if ( ! iterators[i].image->weakSymbolsBound(imageIndexes[i]) ) {
+ if ( iterators[i].image->inSharedCache() )
+ patcher.makeWriteable();
iterators[i].image->updateUsesCoalIterator(iterators[i], targetAddr, targetImage, targetImageIndex, context);
+ }
iterators[i].symbolMatches = false;
}
}
+ if (targetImage->neverUnload()) {
+ // Add never unload defs to the map for next time
+ context.weakDefMap.insert({ nameToCoalesce, { targetImage, targetAddr } });
+ if ( context.verboseWeakBind ) {
+ dyld::log("dyld: weak binding adding %s to map\n",
+ nameToCoalesce);
+ }
+ }
}
}
}
-#if __arm64e__
for (int i=0; i < count; ++i) {
+ if ( imagesNeedingCoalescing[i]->weakSymbolsBound(imageIndexes[i]) )
+ continue; // skip images already processed
+
if ( imagesNeedingCoalescing[i]->usesChainedFixups() ) {
// during binding of references to weak-def symbols, the dyld cache was patched
// but if main executable has non-weak override of operator new or delete it needs is handled here
- if ( !imagesNeedingCoalescing[i]->weakSymbolsBound(imageIndexes[i]) ) {
- for (const char* weakSymbolName : sTreatAsWeak) {
- const ImageLoader* dummy;
- imagesNeedingCoalescing[i]->resolveWeak(context, weakSymbolName, true, false, &dummy);
- }
+ for (const char* weakSymbolName : sTreatAsWeak) {
+ const ImageLoader* dummy;
+ imagesNeedingCoalescing[i]->resolveWeak(context, weakSymbolName, true, false, &dummy, patcher);
}
}
+#if __arm64e__
else {
+ // support traditional arm64 app on an arm64e device
// look for weak def symbols in this image which may override the cache
ImageLoader::CoalIterator coaler;
imagesNeedingCoalescing[i]->initializeCoalIterator(coaler, i, 0);
imagesNeedingCoalescing[i]->incrementCoalIterator(coaler);
while ( !coaler.done ) {
- imagesNeedingCoalescing[i]->incrementCoalIterator(coaler);
const ImageLoader* dummy;
// a side effect of resolveWeak() is to patch cache
- imagesNeedingCoalescing[i]->resolveWeak(context, coaler.symbolName, true, false, &dummy);
+ imagesNeedingCoalescing[i]->resolveWeak(context, coaler.symbolName, true, false, &dummy, patcher);
+ imagesNeedingCoalescing[i]->incrementCoalIterator(coaler);
}
}
- }
#endif
+ }
// mark all as having all weak symbols bound
for(int i=0; i < count; ++i) {
imagesNeedingCoalescing[i]->setWeakSymbolsBound(imageIndexes[i]);
}
+ }
}
uint64_t t2 = mach_absolute_time();
}
+void ImageLoader::weakBindOld(const LinkContext& context)
+{
+ if ( context.verboseWeakBind )
+ dyld::log("dyld: weak bind start:\n");
+ uint64_t t1 = mach_absolute_time();
+ // get set of ImageLoaders that participate in coalecsing
+ ImageLoader* imagesNeedingCoalescing[fgImagesRequiringCoalescing];
+ unsigned imageIndexes[fgImagesRequiringCoalescing];
+ int count = context.getCoalescedImages(imagesNeedingCoalescing, imageIndexes);
+
+ // count how many have not already had weakbinding done
+ int countNotYetWeakBound = 0;
+ int countOfImagesWithWeakDefinitionsNotInSharedCache = 0;
+ for(int i=0; i < count; ++i) {
+ if ( ! imagesNeedingCoalescing[i]->weakSymbolsBound(imageIndexes[i]) )
+ ++countNotYetWeakBound;
+ if ( ! imagesNeedingCoalescing[i]->inSharedCache() )
+ ++countOfImagesWithWeakDefinitionsNotInSharedCache;
+ }
+
+ // don't need to do any coalescing if only one image has overrides, or all have already been done
+ if ( (countOfImagesWithWeakDefinitionsNotInSharedCache > 0) && (countNotYetWeakBound > 0) ) {
+ // We might have to patch the shared cache __DATA_CONST. In that case, we'll create just a single
+ // patcher when needed.
+ DyldSharedCache::DataConstLazyScopedWriter patcher(context.dyldCache, mach_task_self(), context.verboseMapping ? &dyld::log : nullptr);
+
+#if TARGET_OS_OSX
+ // only do alternate algorithm for dlopen(). Use traditional algorithm for launch
+ if ( !context.linkingMainExecutable ) {
+ // for all images that need weak binding
+ for (int i=0; i < count; ++i) {
+ ImageLoader* imageBeingFixedUp = imagesNeedingCoalescing[i];
+ if ( imageBeingFixedUp->weakSymbolsBound(imageIndexes[i]) )
+ continue; // weak binding already completed
+ bool imageBeingFixedUpInCache = imageBeingFixedUp->inSharedCache();
+
+ if ( context.verboseWeakBind )
+ dyld::log("dyld: checking for weak symbols in %s\n", imageBeingFixedUp->getPath());
+ // for all symbols that need weak binding in this image
+ ImageLoader::CoalIterator coalIterator;
+ imageBeingFixedUp->initializeCoalIterator(coalIterator, i, imageIndexes[i]);
+ while ( !imageBeingFixedUp->incrementCoalIterator(coalIterator) ) {
+ const char* nameToCoalesce = coalIterator.symbolName;
+ uintptr_t targetAddr = 0;
+ const ImageLoader* targetImage;
+ // scan all images looking for definition to use
+ for (int j=0; j < count; ++j) {
+ const ImageLoader* anImage = imagesNeedingCoalescing[j];
+ bool anImageInCache = anImage->inSharedCache();
+ // <rdar://problem/47986398> Don't look at images in dyld cache because cache is
+ // already coalesced. Only images outside cache can potentially override something in cache.
+ if ( anImageInCache && imageBeingFixedUpInCache )
+ continue;
+
+ //dyld::log("looking for %s in %s\n", nameToCoalesce, anImage->getPath());
+ const ImageLoader* foundIn;
+ const Symbol* sym = anImage->findExportedSymbol(nameToCoalesce, false, &foundIn);
+ if ( sym != NULL ) {
+ if ( (foundIn->getExportedSymbolInfo(sym) & ImageLoader::kWeakDefinition) == 0 ) {
+ // found non-weak def, use it and stop looking
+ targetAddr = foundIn->getExportedSymbolAddress(sym, context);
+ targetImage = foundIn;
+ if ( context.verboseWeakBind )
+ dyld::log("dyld: found strong %s at 0x%lX in %s\n", nameToCoalesce, targetAddr, foundIn->getPath());
+ break;
+ }
+ else {
+ // found weak-def, only use if no weak found yet
+ if ( targetAddr == 0 ) {
+ targetAddr = foundIn->getExportedSymbolAddress(sym, context);
+ targetImage = foundIn;
+ if ( context.verboseWeakBind )
+ dyld::log("dyld: found weak %s at 0x%lX in %s\n", nameToCoalesce, targetAddr, foundIn->getPath());
+ }
+ }
+ }
+ }
+ if ( (targetAddr != 0) && (coalIterator.image != targetImage) ) {
+ if ( coalIterator.image->inSharedCache() )
+ patcher.makeWriteable();
+ coalIterator.image->updateUsesCoalIterator(coalIterator, targetAddr, (ImageLoader*)targetImage, 0, context);
+ if ( context.verboseWeakBind )
+ dyld::log("dyld: adjusting uses of %s in %s to use definition from %s\n", nameToCoalesce, coalIterator.image->getPath(), targetImage->getPath());
+ }
+ }
+ imageBeingFixedUp->setWeakSymbolsBound(imageIndexes[i]);
+ }
+ }
+ else
+#endif // TARGET_OS_OSX
+ {
+ // make symbol iterators for each
+ ImageLoader::CoalIterator iterators[count];
+ ImageLoader::CoalIterator* sortedIts[count];
+ for(int i=0; i < count; ++i) {
+ imagesNeedingCoalescing[i]->initializeCoalIterator(iterators[i], i, imageIndexes[i]);
+ sortedIts[i] = &iterators[i];
+ if ( context.verboseWeakBind )
+ dyld::log("dyld: weak bind load order %d/%d for %s\n", i, count, imagesNeedingCoalescing[i]->getIndexedPath(imageIndexes[i]));
+ }
+
+ // walk all symbols keeping iterators in sync by
+ // only ever incrementing the iterator with the lowest symbol
+ int doneCount = 0;
+ while ( doneCount != count ) {
+ //for(int i=0; i < count; ++i)
+ // dyld::log("sym[%d]=%s ", sortedIts[i]->loadOrder, sortedIts[i]->symbolName);
+ //dyld::log("\n");
+ // increment iterator with lowest symbol
+ if ( sortedIts[0]->image->incrementCoalIterator(*sortedIts[0]) )
+ ++doneCount;
+ // re-sort iterators
+ for(int i=1; i < count; ++i) {
+ int result = strcmp(sortedIts[i-1]->symbolName, sortedIts[i]->symbolName);
+ if ( result == 0 )
+ sortedIts[i-1]->symbolMatches = true;
+ if ( result > 0 ) {
+ // new one is bigger then next, so swap
+ ImageLoader::CoalIterator* temp = sortedIts[i-1];
+ sortedIts[i-1] = sortedIts[i];
+ sortedIts[i] = temp;
+ }
+ if ( result < 0 )
+ break;
+ }
+ // process all matching symbols just before incrementing the lowest one that matches
+ if ( sortedIts[0]->symbolMatches && !sortedIts[0]->done ) {
+ const char* nameToCoalesce = sortedIts[0]->symbolName;
+ // pick first symbol in load order (and non-weak overrides weak)
+ uintptr_t targetAddr = 0;
+ ImageLoader* targetImage = NULL;
+ unsigned targetImageIndex = 0;
+ for(int i=0; i < count; ++i) {
+ if ( strcmp(iterators[i].symbolName, nameToCoalesce) == 0 ) {
+ if ( context.verboseWeakBind )
+ dyld::log("dyld: weak bind, found %s weak=%d in %s \n", nameToCoalesce, iterators[i].weakSymbol, iterators[i].image->getIndexedPath((unsigned)iterators[i].imageIndex));
+ if ( iterators[i].weakSymbol ) {
+ if ( targetAddr == 0 ) {
+ targetAddr = iterators[i].image->getAddressCoalIterator(iterators[i], context);
+ if ( targetAddr != 0 ) {
+ targetImage = iterators[i].image;
+ targetImageIndex = (unsigned)iterators[i].imageIndex;
+ }
+ }
+ }
+ else {
+ targetAddr = iterators[i].image->getAddressCoalIterator(iterators[i], context);
+ if ( targetAddr != 0 ) {
+ targetImage = iterators[i].image;
+ targetImageIndex = (unsigned)iterators[i].imageIndex;
+ // strong implementation found, stop searching
+ break;
+ }
+ }
+ }
+ }
+ // tell each to bind to this symbol (unless already bound)
+ if ( targetAddr != 0 ) {
+ if ( context.verboseWeakBind ) {
+ dyld::log("dyld: weak binding all uses of %s to copy from %s\n",
+ nameToCoalesce, targetImage->getIndexedShortName(targetImageIndex));
+ }
+ for(int i=0; i < count; ++i) {
+ if ( strcmp(iterators[i].symbolName, nameToCoalesce) == 0 ) {
+ if ( context.verboseWeakBind ) {
+ dyld::log("dyld: weak bind, setting all uses of %s in %s to 0x%lX from %s\n",
+ nameToCoalesce, iterators[i].image->getIndexedShortName((unsigned)iterators[i].imageIndex),
+ targetAddr, targetImage->getIndexedShortName(targetImageIndex));
+ }
+ if ( ! iterators[i].image->weakSymbolsBound(imageIndexes[i]) ) {
+ if ( iterators[i].image->inSharedCache() )
+ patcher.makeWriteable();
+ iterators[i].image->updateUsesCoalIterator(iterators[i], targetAddr, targetImage, targetImageIndex, context);
+ }
+ iterators[i].symbolMatches = false;
+ }
+ }
+ }
+
+ }
+ }
+
+ for (int i=0; i < count; ++i) {
+ if ( imagesNeedingCoalescing[i]->weakSymbolsBound(imageIndexes[i]) )
+ continue; // skip images already processed
+
+ if ( imagesNeedingCoalescing[i]->usesChainedFixups() ) {
+ // during binding of references to weak-def symbols, the dyld cache was patched
+ // but if main executable has non-weak override of operator new or delete it needs is handled here
+ for (const char* weakSymbolName : sTreatAsWeak) {
+ const ImageLoader* dummy;
+ imagesNeedingCoalescing[i]->resolveWeak(context, weakSymbolName, true, false, &dummy, patcher);
+ }
+ }
+#if __arm64e__
+ else {
+ // support traditional arm64 app on an arm64e device
+ // look for weak def symbols in this image which may override the cache
+ patcher.makeWriteable();
+ ImageLoader::CoalIterator coaler;
+ imagesNeedingCoalescing[i]->initializeCoalIterator(coaler, i, 0);
+ imagesNeedingCoalescing[i]->incrementCoalIterator(coaler);
+ while ( !coaler.done ) {
+ const ImageLoader* dummy;
+ // a side effect of resolveWeak() is to patch cache
+ imagesNeedingCoalescing[i]->resolveWeak(context, coaler.symbolName, true, false, &dummy, patcher);
+ imagesNeedingCoalescing[i]->incrementCoalIterator(coaler);
+ }
+ }
+#endif
+ }
+
+ // mark all as having all weak symbols bound
+ for(int i=0; i < count; ++i) {
+ imagesNeedingCoalescing[i]->setWeakSymbolsBound(imageIndexes[i]);
+ }
+ }
+ }
+
+ uint64_t t2 = mach_absolute_time();
+ fgTotalWeakBindTime += t2 - t1;
+
+ if ( context.verboseWeakBind )
+ dyld::log("dyld: weak bind end\n");
+}
+
+
void ImageLoader::recursiveGetDOFSections(const LinkContext& context, std::vector<DOFInfo>& dofs)
{
{
// try to set image's ivar fInitializerRecursiveLock to point to this lock_info
// keep trying until success (spin)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
while ( ! OSAtomicCompareAndSwapPtrBarrier(NULL, &rlock, (void**)&fInitializerRecursiveLock) ) {
// if fInitializerRecursiveLock already points to a different lock_info, if it is for
// the same thread we are on, the increment the lock count, otherwise continue to spin
if ( (fInitializerRecursiveLock != NULL) && (fInitializerRecursiveLock->thread == rlock.thread) )
break;
}
- ++(fInitializerRecursiveLock->count);
+#pragma clang diagnostic pop
+ ++(fInitializerRecursiveLock->count);
}
void ImageLoader::recursiveSpinUnLock()
if ( dependentImage != NULL ) {
// don't try to initialize stuff "above" me yet
if ( libIsUpward(i) ) {
- uninitUps.images[uninitUps.count] = dependentImage;
+ uninitUps.imagesAndPaths[uninitUps.count] = { dependentImage, libPath(i) };
uninitUps.count++;
}
else if ( dependentImage->fDepth >= fDepth ) {
// record termination order
if ( this->needsTermination() )
context.terminationRecorder(this);
-
+
// let objc know we are about to initialize this image
uint64_t t1 = mach_absolute_time();
fState = dyld_image_state_dependents_initialized;
printTime(" total time", totalTime, totalTime);
dyld::log(" total images loaded: %d (%u from dyld shared cache)\n", imageCount, fgImagesUsedFromSharedCache);
- dyld::log(" total segments mapped: %u, into %llu pages with %llu pages pre-fetched\n", fgTotalSegmentsMapped, fgTotalBytesMapped/4096, fgTotalBytesPreFetched/4096);
+ dyld::log(" total segments mapped: %u, into %llu pages\n", fgTotalSegmentsMapped, fgTotalBytesMapped/4096);
printTime(" total images loading time", fgTotalLoadLibrariesTime, totalTime);
printTime(" total load time in ObjC", fgTotalObjCSetupTime, totalTime);
printTime(" total debugger pause time", fgTotalDebuggerPausedTime, totalTime);
bit += 7;
} while (byte & 0x80);
// sign extend negative numbers
- if ( (byte & 0x40) != 0 )
+ if ( ((byte & 0x40) != 0) && (bit < 64) )
result |= (~0ULL) << bit;
return (intptr_t)result;
}
+void ImageLoader::forEachReExportDependent( void (^callback)(const ImageLoader*, bool& stop)) const
+{
+ bool stop = false;
+ for (unsigned int i=0; i < libraryCount(); ++i) {
+ if ( libReExported(i) ) {
+ if ( ImageLoader* dependentImage = libImage(i) ) {
+ callback(dependentImage, stop);
+ }
+ }
+ if (stop)
+ break;
+ }
+}
+
VECTOR_NEVER_DESTRUCTED_IMPL(ImageLoader::InterposeTuple);
VECTOR_NEVER_DESTRUCTED_IMPL(ImagePair);