fPathOwnedByImage(false), fIsReferencedDownward(false),
fWeakSymbolsBound(false)
{
+#if __x86_64__
+ fAotPath = NULL;
+#endif
if ( fPath != NULL )
fPathHash = hash(fPath);
if ( libCount > 512 )
delete [] fRealPath;
if ( fPathOwnedByImage && (fPath != NULL) )
delete [] fPath;
+#if __x86_64__
+ if ( fAotPath != NULL )
+ delete [] fAotPath;
+#endif
}
void ImageLoader::setFileInfo(dev_t device, ino_t inode, time_t modDate)
//dyld::log(" interposedAddress: replacee=0x%08llX, replacement=0x%08llX, neverImage=%p, onlyImage=%p, inImage=%p\n",
// (uint64_t)it->replacee, (uint64_t)it->replacement, it->neverImage, it->onlyImage, inImage);
// replace all references to 'replacee' with 'replacement'
- if ( (address == it->replacee) && (inImage != it->neverImage) && ((it->onlyImage == NULL) || (inImage == it->onlyImage)) ) {
+ if ( (address == it->replacee) && (it->neverImage != inImage) && ((it->onlyImage == NULL) || (it->onlyImage == inImage)) ) {
if ( context.verboseInterposing ) {
dyld::log("dyld interposing: replace 0x%lX with 0x%lX\n", it->replacee, it->replacement);
}
return;
if (fgInterposingTuples.empty())
return;
+
+ // make the cache writable for this block
+ DyldSharedCache::DataConstScopedWriter patcher(context.dyldCache, mach_task_self(), (context.verboseMapping ? &dyld::log : nullptr));
+
// For each of the interposed addresses, see if any of them are in the shared cache. If so, find
// that image and apply its patch table to all uses.
uintptr_t cacheStart = (uintptr_t)context.dyldCache;
// <rdar://problem/29099600> dyld should tell the kernel when it is doing root fix-ups
void ImageLoader::vmAccountingSetSuspended(const LinkContext& context, bool suspend)
{
-#if __arm__ || __arm64__
+#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
static bool sVmAccountingSuspended = false;
if ( suspend == sVmAccountingSuspended )
return;
uint64_t t1 = mach_absolute_time();
context.clearAllDepths();
- this->recursiveUpdateDepth(context.imageCount());
+ this->updateDepth(context.imageCount());
__block uint64_t t2, t3, t4, t5;
{
}
}
// bind lazies in this image
- this->doBindJustLazies(context);
+ DyldSharedCache::DataConstLazyScopedWriter patcher(context.dyldCache, mach_task_self(), context.verboseMapping ? &dyld::log : nullptr);
+ this->doBindJustLazies(context, patcher);
}
}
}
-unsigned int ImageLoader::recursiveUpdateDepth(unsigned int maxDepth)
+unsigned int ImageLoader::updateDepth(unsigned int maxDepth)
+{
+ STACK_ALLOC_ARRAY(ImageLoader*, danglingUpwards, maxDepth);
+ unsigned int depth = this->recursiveUpdateDepth(maxDepth, danglingUpwards);
+ for (auto& danglingUpward : danglingUpwards) {
+ if ( danglingUpward->fDepth != 0)
+ continue;
+ danglingUpward->recursiveUpdateDepth(maxDepth, danglingUpwards);
+ }
+ return depth;
+}
+
+unsigned int ImageLoader::recursiveUpdateDepth(unsigned int maxDepth, dyld3::Array<ImageLoader*>& danglingUpwards)
{
// the purpose of this phase is to make the images sortable such that
// in a sort list of images, every image that an image depends on
unsigned int minDependentDepth = maxDepth;
for(unsigned int i=0; i < libraryCount(); ++i) {
ImageLoader* dependentImage = libImage(i);
- if ( (dependentImage != NULL) && !libIsUpward(i) ) {
- unsigned int d = dependentImage->recursiveUpdateDepth(maxDepth);
- if ( d < minDependentDepth )
- minDependentDepth = d;
+ if ( dependentImage != NULL ) {
+ if ( libIsUpward(i) ) {
+ if ( dependentImage->fDepth == 0) {
+ if ( !danglingUpwards.contains(dependentImage) )
+ danglingUpwards.push_back(dependentImage);
+ }
+ } else {
+ unsigned int d = dependentImage->recursiveUpdateDepth(maxDepth, danglingUpwards);
+ if ( d < minDependentDepth )
+ minDependentDepth = d;
+ }
+ }
+ // <rdar://problem/60878811> make sure need to re-bind propagates up
+ if ( dependentImage != NULL ) {
+ if ( fAllLibraryChecksumsAndLoadAddressesMatch && !dependentImage->fAllLibraryChecksumsAndLoadAddressesMatch ) {
+ fAllLibraryChecksumsAndLoadAddressesMatch = false;
+ }
}
}
-
// make me less deep then all my dependents
fDepth = minDependentDepth - 1;
+
}
-
return fDepth;
}
// tell each to load its dependents
for(unsigned int i=0; i < libraryCount(); ++i) {
ImageLoader* dependentImage = libImage(i);
- if ( dependentImage != NULL ) {
+ if ( dependentImage != NULL ) {
dependentImage->recursiveLoadLibraries(context, preflightOnly, thisRPaths, libraryInfos[i].name);
}
}
-
// do deep prebind check
if ( fAllLibraryChecksumsAndLoadAddressesMatch ) {
for(unsigned int i=0; i < libraryCount(); ++i){
ImageLoader* dependentImage = libImage(i);
- if ( dependentImage != NULL ) {
+ if ( dependentImage != NULL ) {
if ( !dependentImage->allDependentLibrariesAsWhenPreBound() )
fAllLibraryChecksumsAndLoadAddressesMatch = false;
}
}
}
-
+
// free rpaths (getRPaths() malloc'ed each string)
for(std::vector<const char*>::iterator it=rpathsFromThisImage.begin(); it != rpathsFromThisImage.end(); ++it) {
const char* str = *it;
void ImageLoader::recursiveBindWithAccounting(const LinkContext& context, bool forceLazysBound, bool neverUnload)
{
- this->recursiveBind(context, forceLazysBound, neverUnload);
+ this->recursiveBind(context, forceLazysBound, neverUnload, nullptr);
vmAccountingSetSuspended(context, false);
}
-void ImageLoader::recursiveBind(const LinkContext& context, bool forceLazysBound, bool neverUnload)
+void ImageLoader::recursiveBind(const LinkContext& context, bool forceLazysBound, bool neverUnload, const ImageLoader* parent)
{
// Normally just non-lazy pointers are bound immediately.
// The exceptions are:
// bind lower level libraries first
for(unsigned int i=0; i < libraryCount(); ++i) {
ImageLoader* dependentImage = libImage(i);
- if ( dependentImage != NULL )
- dependentImage->recursiveBind(context, forceLazysBound, neverUnload);
+ if ( dependentImage != NULL ) {
+ const ImageLoader* reExportParent = nullptr;
+ if ( libReExported(i) )
+ reExportParent = this;
+ dependentImage->recursiveBind(context, forceLazysBound, neverUnload, reExportParent);
+ }
}
// bind this image
- this->doBind(context, forceLazysBound);
+ this->doBind(context, forceLazysBound, parent);
// mark if lazys are also bound
if ( forceLazysBound || this->usablePrebinding(context) )
fAllLazyPointersBound = true;
new (&context.weakDefMap) dyld3::Map<const char*, std::pair<const ImageLoader*, uintptr_t>, ImageLoader::HashCString, ImageLoader::EqualCString>();
context.weakDefMapInitialized = true;
}
-#if __MAC_OS_X_VERSION_MIN_REQUIRED
+
+ // We might have to patch the shared cache __DATA_CONST. In that case, we'll create just a single
+ // patcher when needed.
+ DyldSharedCache::DataConstLazyScopedWriter patcher(context.dyldCache, mach_task_self(), context.verboseMapping ? &dyld::log : nullptr);
+
+#if TARGET_OS_OSX
// only do alternate algorithm for dlopen(). Use traditional algorithm for launch
if ( !context.linkingMainExecutable ) {
// Don't take the memory hit of weak defs on the launch path until we hit a dlopen with more weak symbols to bind
Diagnostics diag;
const dyld3::MachOAnalyzer* ma = (const dyld3::MachOAnalyzer*)image->machHeader();
- ma->forEachWeakDef(diag, ^(const char *symbolName, uintptr_t imageOffset, bool isFromExportTrie) {
- uintptr_t targetAddr = (uintptr_t)ma + imageOffset;
+ ma->forEachWeakDef(diag, ^(const char *symbolName, uint64_t imageOffset, bool isFromExportTrie) {
+ uintptr_t targetAddr = (uintptr_t)ma + (uintptr_t)imageOffset;
if ( isFromExportTrie ) {
// Avoid duplicating the string if we already have the symbol name
if ( context.weakDefMap.find(symbolName) != context.weakDefMap.end() )
continue;
Diagnostics diag;
const dyld3::MachOAnalyzer* ma = (const dyld3::MachOAnalyzer*)image->machHeader();
- ma->forEachWeakDef(diag, ^(const char *symbolName, uintptr_t imageOffset, bool isFromExportTrie) {
- uintptr_t targetAddr = (uintptr_t)ma + imageOffset;
+ ma->forEachWeakDef(diag, ^(const char *symbolName, uint64_t imageOffset, bool isFromExportTrie) {
+ uintptr_t targetAddr = (uintptr_t)ma + (uintptr_t)imageOffset;
if ( isFromExportTrie ) {
// Avoid duplicating the string if we already have the symbol name
if ( context.weakDefMap.find(symbolName) != context.weakDefMap.end() )
}
}
if ( (targetAddr != 0) && (coalIterator.image != targetImage) ) {
+ if ( coalIterator.image->inSharedCache() )
+ patcher.makeWriteable();
coalIterator.image->updateUsesCoalIterator(coalIterator, targetAddr, (ImageLoader*)targetImage, 0, context);
if (weakDefIt == context.weakDefMap.end()) {
if (targetImage->neverUnload()) {
}
}
else
-#endif // __MAC_OS_X_VERSION_MIN_REQUIRED
+#endif // TARGET_OS_OSX
{
// make symbol iterators for each
ImageLoader::CoalIterator iterators[count];
nameToCoalesce, iterators[i].image->getIndexedShortName((unsigned)iterators[i].imageIndex),
targetAddr, targetImage->getIndexedShortName(targetImageIndex));
}
- if ( ! iterators[i].image->weakSymbolsBound(imageIndexes[i]) )
+ if ( ! iterators[i].image->weakSymbolsBound(imageIndexes[i]) ) {
+ if ( iterators[i].image->inSharedCache() )
+ patcher.makeWriteable();
iterators[i].image->updateUsesCoalIterator(iterators[i], targetAddr, targetImage, targetImageIndex, context);
+ }
iterators[i].symbolMatches = false;
}
}
// but if main executable has non-weak override of operator new or delete it needs is handled here
for (const char* weakSymbolName : sTreatAsWeak) {
const ImageLoader* dummy;
- imagesNeedingCoalescing[i]->resolveWeak(context, weakSymbolName, true, false, &dummy);
+ imagesNeedingCoalescing[i]->resolveWeak(context, weakSymbolName, true, false, &dummy, patcher);
}
}
#if __arm64e__
while ( !coaler.done ) {
const ImageLoader* dummy;
// a side effect of resolveWeak() is to patch cache
- imagesNeedingCoalescing[i]->resolveWeak(context, coaler.symbolName, true, false, &dummy);
+ imagesNeedingCoalescing[i]->resolveWeak(context, coaler.symbolName, true, false, &dummy, patcher);
imagesNeedingCoalescing[i]->incrementCoalIterator(coaler);
}
}
// don't need to do any coalescing if only one image has overrides, or all have already been done
if ( (countOfImagesWithWeakDefinitionsNotInSharedCache > 0) && (countNotYetWeakBound > 0) ) {
-#if __MAC_OS_X_VERSION_MIN_REQUIRED
+ // We might have to patch the shared cache __DATA_CONST. In that case, we'll create just a single
+ // patcher when needed.
+ DyldSharedCache::DataConstLazyScopedWriter patcher(context.dyldCache, mach_task_self(), context.verboseMapping ? &dyld::log : nullptr);
+
+#if TARGET_OS_OSX
// only do alternate algorithm for dlopen(). Use traditional algorithm for launch
if ( !context.linkingMainExecutable ) {
// for all images that need weak binding
}
}
if ( (targetAddr != 0) && (coalIterator.image != targetImage) ) {
+ if ( coalIterator.image->inSharedCache() )
+ patcher.makeWriteable();
coalIterator.image->updateUsesCoalIterator(coalIterator, targetAddr, (ImageLoader*)targetImage, 0, context);
if ( context.verboseWeakBind )
dyld::log("dyld: adjusting uses of %s in %s to use definition from %s\n", nameToCoalesce, coalIterator.image->getPath(), targetImage->getPath());
}
}
else
-#endif // __MAC_OS_X_VERSION_MIN_REQUIRED
+#endif // TARGET_OS_OSX
{
// make symbol iterators for each
ImageLoader::CoalIterator iterators[count];
nameToCoalesce, iterators[i].image->getIndexedShortName((unsigned)iterators[i].imageIndex),
targetAddr, targetImage->getIndexedShortName(targetImageIndex));
}
- if ( ! iterators[i].image->weakSymbolsBound(imageIndexes[i]) )
+ if ( ! iterators[i].image->weakSymbolsBound(imageIndexes[i]) ) {
+ if ( iterators[i].image->inSharedCache() )
+ patcher.makeWriteable();
iterators[i].image->updateUsesCoalIterator(iterators[i], targetAddr, targetImage, targetImageIndex, context);
+ }
iterators[i].symbolMatches = false;
}
}
// but if main executable has non-weak override of operator new or delete it needs is handled here
for (const char* weakSymbolName : sTreatAsWeak) {
const ImageLoader* dummy;
- imagesNeedingCoalescing[i]->resolveWeak(context, weakSymbolName, true, false, &dummy);
+ imagesNeedingCoalescing[i]->resolveWeak(context, weakSymbolName, true, false, &dummy, patcher);
}
}
#if __arm64e__
else {
// support traditional arm64 app on an arm64e device
// look for weak def symbols in this image which may override the cache
+ patcher.makeWriteable();
ImageLoader::CoalIterator coaler;
imagesNeedingCoalescing[i]->initializeCoalIterator(coaler, i, 0);
imagesNeedingCoalescing[i]->incrementCoalIterator(coaler);
while ( !coaler.done ) {
const ImageLoader* dummy;
// a side effect of resolveWeak() is to patch cache
- imagesNeedingCoalescing[i]->resolveWeak(context, coaler.symbolName, true, false, &dummy);
+ imagesNeedingCoalescing[i]->resolveWeak(context, coaler.symbolName, true, false, &dummy, patcher);
imagesNeedingCoalescing[i]->incrementCoalIterator(coaler);
}
}
bit += 7;
} while (byte & 0x80);
// sign extend negative numbers
- if ( (byte & 0x40) != 0 )
+ if ( ((byte & 0x40) != 0) && (bit < 64) )
result |= (~0ULL) << bit;
return (intptr_t)result;
}
+void ImageLoader::forEachReExportDependent( void (^callback)(const ImageLoader*, bool& stop)) const
+{
+ bool stop = false;
+ for (unsigned int i=0; i < libraryCount(); ++i) {
+ if ( libReExported(i) ) {
+ if ( ImageLoader* dependentImage = libImage(i) ) {
+ callback(dependentImage, stop);
+ }
+ }
+ if (stop)
+ break;
+ }
+}
+
VECTOR_NEVER_DESTRUCTED_IMPL(ImageLoader::InterposeTuple);
VECTOR_NEVER_DESTRUCTED_IMPL(ImagePair);