X-Git-Url: https://git.saurik.com/apple/dyld.git/blobdiff_plain/a9a4db61c29ec8455dcb07f607391cd82e933936..refs/heads/master:/src/ImageLoader.cpp diff --git a/src/ImageLoader.cpp b/src/ImageLoader.cpp index 839ab99..764b3b8 100644 --- a/src/ImageLoader.cpp +++ b/src/ImageLoader.cpp @@ -84,6 +84,9 @@ ImageLoader::ImageLoader(const char* path, unsigned int libCount) fPathOwnedByImage(false), fIsReferencedDownward(false), fWeakSymbolsBound(false) { +#if __x86_64__ + fAotPath = NULL; +#endif if ( fPath != NULL ) fPathHash = hash(fPath); if ( libCount > 512 ) @@ -103,6 +106,10 @@ ImageLoader::~ImageLoader() delete [] fRealPath; if ( fPathOwnedByImage && (fPath != NULL) ) delete [] fPath; +#if __x86_64__ + if ( fAotPath != NULL ) + delete [] fAotPath; +#endif } void ImageLoader::setFileInfo(dev_t device, ino_t inode, time_t modDate) @@ -389,7 +396,7 @@ uintptr_t ImageLoader::interposedAddress(const LinkContext& context, uintptr_t a //dyld::log(" interposedAddress: replacee=0x%08llX, replacement=0x%08llX, neverImage=%p, onlyImage=%p, inImage=%p\n", // (uint64_t)it->replacee, (uint64_t)it->replacement, it->neverImage, it->onlyImage, inImage); // replace all references to 'replacee' with 'replacement' - if ( (address == it->replacee) && (inImage != it->neverImage) && ((it->onlyImage == NULL) || (inImage == it->onlyImage)) ) { + if ( (address == it->replacee) && (it->neverImage != inImage) && ((it->onlyImage == NULL) || (it->onlyImage == inImage)) ) { if ( context.verboseInterposing ) { dyld::log("dyld interposing: replace 0x%lX with 0x%lX\n", it->replacee, it->replacement); } @@ -402,12 +409,14 @@ uintptr_t ImageLoader::interposedAddress(const LinkContext& context, uintptr_t a void ImageLoader::applyInterposingToDyldCache(const LinkContext& context) { if (!context.dyldCache) return; -#if !__arm64e__ // until arm64e cache builder sets builtFromChainedFixups if (!context.dyldCache->header.builtFromChainedFixups) return; -#endif if (fgInterposingTuples.empty()) return; + + // make the cache writable for this block + DyldSharedCache::DataConstScopedWriter patcher(context.dyldCache, mach_task_self(), (context.verboseMapping ? &dyld::log : nullptr)); + // For each of the interposed addresses, see if any of them are in the shared cache. If so, find // that image and apply its patch table to all uses. uintptr_t cacheStart = (uintptr_t)context.dyldCache; @@ -475,7 +484,7 @@ void ImageLoader::addDynamicInterposingTuples(const struct dyld_interpose_tuple // dyld should tell the kernel when it is doing root fix-ups void ImageLoader::vmAccountingSetSuspended(const LinkContext& context, bool suspend) { -#if __arm__ || __arm64__ +#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR static bool sVmAccountingSuspended = false; if ( suspend == sVmAccountingSuspended ) return; @@ -510,7 +519,7 @@ void ImageLoader::link(const LinkContext& context, bool forceLazysBound, bool pr uint64_t t1 = mach_absolute_time(); context.clearAllDepths(); - this->recursiveUpdateDepth(context.imageCount()); + this->updateDepth(context.imageCount()); __block uint64_t t2, t3, t4, t5; { @@ -630,7 +639,8 @@ void ImageLoader::bindAllLazyPointers(const LinkContext& context, bool recursive } } // bind lazies in this image - this->doBindJustLazies(context); + DyldSharedCache::DataConstLazyScopedWriter patcher(context.dyldCache, mach_task_self(), context.verboseMapping ? &dyld::log : nullptr); + this->doBindJustLazies(context, patcher); } } @@ -664,7 +674,19 @@ void ImageLoader::markedUsedRecursive(const std::vector& dynam } -unsigned int ImageLoader::recursiveUpdateDepth(unsigned int maxDepth) +unsigned int ImageLoader::updateDepth(unsigned int maxDepth) +{ + STACK_ALLOC_ARRAY(ImageLoader*, danglingUpwards, maxDepth); + unsigned int depth = this->recursiveUpdateDepth(maxDepth, danglingUpwards); + for (auto& danglingUpward : danglingUpwards) { + if ( danglingUpward->fDepth != 0) + continue; + danglingUpward->recursiveUpdateDepth(maxDepth, danglingUpwards); + } + return depth; +} + +unsigned int ImageLoader::recursiveUpdateDepth(unsigned int maxDepth, dyld3::Array& danglingUpwards) { // the purpose of this phase is to make the images sortable such that // in a sort list of images, every image that an image depends on @@ -677,17 +699,29 @@ unsigned int ImageLoader::recursiveUpdateDepth(unsigned int maxDepth) unsigned int minDependentDepth = maxDepth; for(unsigned int i=0; i < libraryCount(); ++i) { ImageLoader* dependentImage = libImage(i); - if ( (dependentImage != NULL) && !libIsUpward(i) ) { - unsigned int d = dependentImage->recursiveUpdateDepth(maxDepth); - if ( d < minDependentDepth ) - minDependentDepth = d; + if ( dependentImage != NULL ) { + if ( libIsUpward(i) ) { + if ( dependentImage->fDepth == 0) { + if ( !danglingUpwards.contains(dependentImage) ) + danglingUpwards.push_back(dependentImage); + } + } else { + unsigned int d = dependentImage->recursiveUpdateDepth(maxDepth, danglingUpwards); + if ( d < minDependentDepth ) + minDependentDepth = d; + } + } + // make sure need to re-bind propagates up + if ( dependentImage != NULL ) { + if ( fAllLibraryChecksumsAndLoadAddressesMatch && !dependentImage->fAllLibraryChecksumsAndLoadAddressesMatch ) { + fAllLibraryChecksumsAndLoadAddressesMatch = false; + } } } - // make me less deep then all my dependents fDepth = minDependentDepth - 1; + } - return fDepth; } @@ -799,22 +833,21 @@ void ImageLoader::recursiveLoadLibraries(const LinkContext& context, bool prefli // tell each to load its dependents for(unsigned int i=0; i < libraryCount(); ++i) { ImageLoader* dependentImage = libImage(i); - if ( dependentImage != NULL ) { + if ( dependentImage != NULL ) { dependentImage->recursiveLoadLibraries(context, preflightOnly, thisRPaths, libraryInfos[i].name); } } - // do deep prebind check if ( fAllLibraryChecksumsAndLoadAddressesMatch ) { for(unsigned int i=0; i < libraryCount(); ++i){ ImageLoader* dependentImage = libImage(i); - if ( dependentImage != NULL ) { + if ( dependentImage != NULL ) { if ( !dependentImage->allDependentLibrariesAsWhenPreBound() ) fAllLibraryChecksumsAndLoadAddressesMatch = false; } } } - + // free rpaths (getRPaths() malloc'ed each string) for(std::vector::iterator it=rpathsFromThisImage.begin(); it != rpathsFromThisImage.end(); ++it) { const char* str = *it; @@ -912,11 +945,11 @@ void ImageLoader::recursiveMakeDataReadOnly(const LinkContext& context) void ImageLoader::recursiveBindWithAccounting(const LinkContext& context, bool forceLazysBound, bool neverUnload) { - this->recursiveBind(context, forceLazysBound, neverUnload); + this->recursiveBind(context, forceLazysBound, neverUnload, nullptr); vmAccountingSetSuspended(context, false); } -void ImageLoader::recursiveBind(const LinkContext& context, bool forceLazysBound, bool neverUnload) +void ImageLoader::recursiveBind(const LinkContext& context, bool forceLazysBound, bool neverUnload, const ImageLoader* parent) { // Normally just non-lazy pointers are bound immediately. // The exceptions are: @@ -930,11 +963,15 @@ void ImageLoader::recursiveBind(const LinkContext& context, bool forceLazysBound // bind lower level libraries first for(unsigned int i=0; i < libraryCount(); ++i) { ImageLoader* dependentImage = libImage(i); - if ( dependentImage != NULL ) - dependentImage->recursiveBind(context, forceLazysBound, neverUnload); + if ( dependentImage != NULL ) { + const ImageLoader* reExportParent = nullptr; + if ( libReExported(i) ) + reExportParent = this; + dependentImage->recursiveBind(context, forceLazysBound, neverUnload, reExportParent); + } } // bind this image - this->doBind(context, forceLazysBound); + this->doBind(context, forceLazysBound, parent); // mark if lazys are also bound if ( forceLazysBound || this->usablePrebinding(context) ) fAllLazyPointersBound = true; @@ -1012,7 +1049,12 @@ void ImageLoader::weakBind(const LinkContext& context) new (&context.weakDefMap) dyld3::Map, ImageLoader::HashCString, ImageLoader::EqualCString>(); context.weakDefMapInitialized = true; } -#if __MAC_OS_X_VERSION_MIN_REQUIRED + + // We might have to patch the shared cache __DATA_CONST. In that case, we'll create just a single + // patcher when needed. + DyldSharedCache::DataConstLazyScopedWriter patcher(context.dyldCache, mach_task_self(), context.verboseMapping ? &dyld::log : nullptr); + +#if TARGET_OS_OSX // only do alternate algorithm for dlopen(). Use traditional algorithm for launch if ( !context.linkingMainExecutable ) { // Don't take the memory hit of weak defs on the launch path until we hit a dlopen with more weak symbols to bind @@ -1032,8 +1074,8 @@ void ImageLoader::weakBind(const LinkContext& context) Diagnostics diag; const dyld3::MachOAnalyzer* ma = (const dyld3::MachOAnalyzer*)image->machHeader(); - ma->forEachWeakDef(diag, ^(const char *symbolName, uintptr_t imageOffset, bool isFromExportTrie) { - uintptr_t targetAddr = (uintptr_t)ma + imageOffset; + ma->forEachWeakDef(diag, ^(const char *symbolName, uint64_t imageOffset, bool isFromExportTrie) { + uintptr_t targetAddr = (uintptr_t)ma + (uintptr_t)imageOffset; if ( isFromExportTrie ) { // Avoid duplicating the string if we already have the symbol name if ( context.weakDefMap.find(symbolName) != context.weakDefMap.end() ) @@ -1055,8 +1097,8 @@ void ImageLoader::weakBind(const LinkContext& context) continue; Diagnostics diag; const dyld3::MachOAnalyzer* ma = (const dyld3::MachOAnalyzer*)image->machHeader(); - ma->forEachWeakDef(diag, ^(const char *symbolName, uintptr_t imageOffset, bool isFromExportTrie) { - uintptr_t targetAddr = (uintptr_t)ma + imageOffset; + ma->forEachWeakDef(diag, ^(const char *symbolName, uint64_t imageOffset, bool isFromExportTrie) { + uintptr_t targetAddr = (uintptr_t)ma + (uintptr_t)imageOffset; if ( isFromExportTrie ) { // Avoid duplicating the string if we already have the symbol name if ( context.weakDefMap.find(symbolName) != context.weakDefMap.end() ) @@ -1111,6 +1153,8 @@ void ImageLoader::weakBind(const LinkContext& context) } } if ( (targetAddr != 0) && (coalIterator.image != targetImage) ) { + if ( coalIterator.image->inSharedCache() ) + patcher.makeWriteable(); coalIterator.image->updateUsesCoalIterator(coalIterator, targetAddr, (ImageLoader*)targetImage, 0, context); if (weakDefIt == context.weakDefMap.end()) { if (targetImage->neverUnload()) { @@ -1135,7 +1179,7 @@ void ImageLoader::weakBind(const LinkContext& context) } } else -#endif // __MAC_OS_X_VERSION_MIN_REQUIRED +#endif // TARGET_OS_OSX { // make symbol iterators for each ImageLoader::CoalIterator iterators[count]; @@ -1215,8 +1259,11 @@ void ImageLoader::weakBind(const LinkContext& context) nameToCoalesce, iterators[i].image->getIndexedShortName((unsigned)iterators[i].imageIndex), targetAddr, targetImage->getIndexedShortName(targetImageIndex)); } - if ( ! iterators[i].image->weakSymbolsBound(imageIndexes[i]) ) + if ( ! iterators[i].image->weakSymbolsBound(imageIndexes[i]) ) { + if ( iterators[i].image->inSharedCache() ) + patcher.makeWriteable(); iterators[i].image->updateUsesCoalIterator(iterators[i], targetAddr, targetImage, targetImageIndex, context); + } iterators[i].symbolMatches = false; } } @@ -1242,7 +1289,7 @@ void ImageLoader::weakBind(const LinkContext& context) // but if main executable has non-weak override of operator new or delete it needs is handled here for (const char* weakSymbolName : sTreatAsWeak) { const ImageLoader* dummy; - imagesNeedingCoalescing[i]->resolveWeak(context, weakSymbolName, true, false, &dummy); + imagesNeedingCoalescing[i]->resolveWeak(context, weakSymbolName, true, false, &dummy, patcher); } } #if __arm64e__ @@ -1255,7 +1302,7 @@ void ImageLoader::weakBind(const LinkContext& context) while ( !coaler.done ) { const ImageLoader* dummy; // a side effect of resolveWeak() is to patch cache - imagesNeedingCoalescing[i]->resolveWeak(context, coaler.symbolName, true, false, &dummy); + imagesNeedingCoalescing[i]->resolveWeak(context, coaler.symbolName, true, false, &dummy, patcher); imagesNeedingCoalescing[i]->incrementCoalIterator(coaler); } } @@ -1299,7 +1346,11 @@ void ImageLoader::weakBindOld(const LinkContext& context) // don't need to do any coalescing if only one image has overrides, or all have already been done if ( (countOfImagesWithWeakDefinitionsNotInSharedCache > 0) && (countNotYetWeakBound > 0) ) { -#if __MAC_OS_X_VERSION_MIN_REQUIRED + // We might have to patch the shared cache __DATA_CONST. In that case, we'll create just a single + // patcher when needed. + DyldSharedCache::DataConstLazyScopedWriter patcher(context.dyldCache, mach_task_self(), context.verboseMapping ? &dyld::log : nullptr); + +#if TARGET_OS_OSX // only do alternate algorithm for dlopen(). Use traditional algorithm for launch if ( !context.linkingMainExecutable ) { // for all images that need weak binding @@ -1351,6 +1402,8 @@ void ImageLoader::weakBindOld(const LinkContext& context) } } if ( (targetAddr != 0) && (coalIterator.image != targetImage) ) { + if ( coalIterator.image->inSharedCache() ) + patcher.makeWriteable(); coalIterator.image->updateUsesCoalIterator(coalIterator, targetAddr, (ImageLoader*)targetImage, 0, context); if ( context.verboseWeakBind ) dyld::log("dyld: adjusting uses of %s in %s to use definition from %s\n", nameToCoalesce, coalIterator.image->getPath(), targetImage->getPath()); @@ -1360,7 +1413,7 @@ void ImageLoader::weakBindOld(const LinkContext& context) } } else -#endif // __MAC_OS_X_VERSION_MIN_REQUIRED +#endif // TARGET_OS_OSX { // make symbol iterators for each ImageLoader::CoalIterator iterators[count]; @@ -1440,8 +1493,11 @@ void ImageLoader::weakBindOld(const LinkContext& context) nameToCoalesce, iterators[i].image->getIndexedShortName((unsigned)iterators[i].imageIndex), targetAddr, targetImage->getIndexedShortName(targetImageIndex)); } - if ( ! iterators[i].image->weakSymbolsBound(imageIndexes[i]) ) + if ( ! iterators[i].image->weakSymbolsBound(imageIndexes[i]) ) { + if ( iterators[i].image->inSharedCache() ) + patcher.makeWriteable(); iterators[i].image->updateUsesCoalIterator(iterators[i], targetAddr, targetImage, targetImageIndex, context); + } iterators[i].symbolMatches = false; } } @@ -1459,20 +1515,21 @@ void ImageLoader::weakBindOld(const LinkContext& context) // but if main executable has non-weak override of operator new or delete it needs is handled here for (const char* weakSymbolName : sTreatAsWeak) { const ImageLoader* dummy; - imagesNeedingCoalescing[i]->resolveWeak(context, weakSymbolName, true, false, &dummy); + imagesNeedingCoalescing[i]->resolveWeak(context, weakSymbolName, true, false, &dummy, patcher); } } #if __arm64e__ else { // support traditional arm64 app on an arm64e device // look for weak def symbols in this image which may override the cache + patcher.makeWriteable(); ImageLoader::CoalIterator coaler; imagesNeedingCoalescing[i]->initializeCoalIterator(coaler, i, 0); imagesNeedingCoalescing[i]->incrementCoalIterator(coaler); while ( !coaler.done ) { const ImageLoader* dummy; // a side effect of resolveWeak() is to patch cache - imagesNeedingCoalescing[i]->resolveWeak(context, coaler.symbolName, true, false, &dummy); + imagesNeedingCoalescing[i]->resolveWeak(context, coaler.symbolName, true, false, &dummy, patcher); imagesNeedingCoalescing[i]->incrementCoalIterator(coaler); } } @@ -1883,11 +1940,25 @@ intptr_t ImageLoader::read_sleb128(const uint8_t*& p, const uint8_t* end) bit += 7; } while (byte & 0x80); // sign extend negative numbers - if ( (byte & 0x40) != 0 ) + if ( ((byte & 0x40) != 0) && (bit < 64) ) result |= (~0ULL) << bit; return (intptr_t)result; } +void ImageLoader::forEachReExportDependent( void (^callback)(const ImageLoader*, bool& stop)) const +{ + bool stop = false; + for (unsigned int i=0; i < libraryCount(); ++i) { + if ( libReExported(i) ) { + if ( ImageLoader* dependentImage = libImage(i) ) { + callback(dependentImage, stop); + } + } + if (stop) + break; + } +} + VECTOR_NEVER_DESTRUCTED_IMPL(ImageLoader::InterposeTuple); VECTOR_NEVER_DESTRUCTED_IMPL(ImagePair);