#include <stdint.h>
+#include <fcntl.h>
+#include <sys/types.h>
#include <sys/stat.h>
#include <sys/sysctl.h>
#include <mach/mach_time.h> // mach_absolute_time()
-#include <pthread/pthread.h>
#include <libkern/OSAtomic.h>
#include <vector>
#include <algorithm>
#include "AllImages.h"
-#include "MachOParser.h"
#include "libdyldEntryVector.h"
#include "Logging.h"
#include "Loading.h"
#include "Tracing.h"
-#include "LaunchCache.h"
#include "DyldSharedCache.h"
#include "PathOverrides.h"
-#include "DyldCacheParser.h"
+#include "Closure.h"
+#include "ClosureBuilder.h"
+#include "ClosureFileSystemPhysical.h"
extern const char** appleParams;
namespace dyld3 {
-class VIS_HIDDEN LoadedImage {
-public:
- enum class State { uninited=3, beingInited=2, inited=0 };
- typedef launch_cache::binary_format::Image BinaryImage;
-
- LoadedImage(const mach_header* mh, const BinaryImage* bi);
- bool operator==(const LoadedImage& rhs) const;
- void init(const mach_header* mh, const BinaryImage* bi);
- const mach_header* loadedAddress() const { return (mach_header*)((uintptr_t)_loadAddress & ~0x7ULL); }
- State state() const { return (State)((uintptr_t)_loadAddress & 0x3ULL); }
- const BinaryImage* image() const { return _image; }
- bool neverUnload() const { return ((uintptr_t)_loadAddress & 0x4ULL); }
- void setState(State s) { _loadAddress = (mach_header*)((((uintptr_t)_loadAddress) & ~0x3ULL) | (uintptr_t)s); }
- void setNeverUnload() { _loadAddress = (mach_header*)(((uintptr_t)_loadAddress) | 0x4ULL); }
-
-private:
- const mach_header* _loadAddress; // low bits: bit2=neverUnload, bit1/bit0 contain State
- const BinaryImage* _image;
-};
-
-
-bool LoadedImage::operator==(const LoadedImage& rhs) const
-{
- return (_image == rhs._image) && (loadedAddress() == rhs.loadedAddress());
-}
+///////////////////// AllImages ////////////////////////////
-struct VIS_HIDDEN DlopenCount {
- bool operator==(const DlopenCount& rhs) const;
- const mach_header* loadAddress;
- uintptr_t refCount;
-};
-
-bool DlopenCount::operator==(const DlopenCount& rhs) const
-{
- return (loadAddress == rhs.loadAddress) && (refCount == rhs.refCount);
-}
-
-LoadedImage::LoadedImage(const mach_header* mh, const BinaryImage* bi)
- : _loadAddress(mh), _image(bi)
-{
- assert(loadedAddress() == mh);
- setState(State::uninited);
-}
-
-void LoadedImage::init(const mach_header* mh, const BinaryImage* bi)
-{
- _loadAddress = mh;
- _image = bi;
- assert(loadedAddress() == mh);
- setState(State::uninited);
-}
-
-// forward reference
-template <typename T, int C> class ReaderWriterChunkedVector;
-
-template <typename T, int C>
-class VIS_HIDDEN ChunkedVector {
-public:
- static ChunkedVector<T,C>* make(uint32_t count);
-
- void forEach(uint32_t& startIndex, bool& outerStop, void (^callback)(uint32_t index, const T& value, bool& stop)) const;
- void forEach(uint32_t& startIndex, bool& outerStop, void (^callback)(uint32_t index, T& value, bool& stop));
- T* add(const T& value);
- T* add(uint32_t count, const T values[]);
- void remove(uint32_t index);
- uint32_t count() const { return _inUseCount; }
- uint32_t freeCount() const { return _allocCount - _inUseCount; }
-private:
- T& element(uint32_t index) { return ((T*)_elements)[index]; }
- const T& element(uint32_t index) const { return ((T*)_elements)[index]; }
-
- friend class ReaderWriterChunkedVector<T,C>;
-
- ChunkedVector<T,C>* _next = nullptr;
- uint32_t _allocCount = C;
- uint32_t _inUseCount = 0;
- uint8_t _elements[C*sizeof(T)] = { 0 };
-};
-
-template <typename T, int C>
-class VIS_HIDDEN ReaderWriterChunkedVector {
-public:
- T* add(uint32_t count, const T values[]);
- T* add(const T& value) { return add(1, &value); }
- T* addNoLock(uint32_t count, const T values[]);
- T* addNoLock(const T& value) { return addNoLock(1, &value); }
- void remove(const T& value);
- uint32_t count() const;
- void forEachWithReadLock(void (^callback)(uint32_t index, const T& value, bool& stop)) const;
- void forEachWithWriteLock(void (^callback)(uint32_t index, T& value, bool& stop));
- void forEachNoLock(void (^callback)(uint32_t index, const T& value, bool& stop)) const;
- T& operator[](size_t index);
- uint32_t countNoLock() const;
-
- void withReadLock(void (^withLock)()) const;
- void withWriteLock(void (^withLock)()) const;
- void acquireWriteLock();
- void releaseWriteLock();
- void dump(void (^callback)(const T& value)) const;
-
-private:
- mutable pthread_rwlock_t _lock = PTHREAD_RWLOCK_INITIALIZER;
- ChunkedVector<T,C> _firstChunk;
-};
-
-
-typedef void (*NotifyFunc)(const mach_header* mh, intptr_t slide);
-static ReaderWriterChunkedVector<NotifyFunc, 4> sLoadNotifiers;
-static ReaderWriterChunkedVector<NotifyFunc, 4> sUnloadNotifiers;
-static ReaderWriterChunkedVector<LoadedImage, 4> sLoadedImages;
-static ReaderWriterChunkedVector<DlopenCount, 4> sDlopenRefCounts;
-static ReaderWriterChunkedVector<const launch_cache::BinaryImageGroupData*, 4> sKnownGroups;
-#if __MAC_OS_X_VERSION_MIN_REQUIRED
-static ReaderWriterChunkedVector<__NSObjectFileImage, 2> sNSObjectFileImages;
-#endif
+AllImages gAllImages;
-///////////////////// ChunkedVector ////////////////////////////
-template <typename T, int C>
-ChunkedVector<T,C>* ChunkedVector<T,C>::make(uint32_t count)
+void AllImages::init(const closure::LaunchClosure* closure, const DyldSharedCache* dyldCacheLoadAddress, const char* dyldCachePath,
+ const Array<LoadedImage>& initialImages)
{
- size_t size = sizeof(ChunkedVector) + sizeof(T) * (count-C);
- ChunkedVector<T,C>* result = (ChunkedVector<T,C>*)malloc(size);
- result->_next = nullptr;
- result->_allocCount = count;
- result->_inUseCount = 0;
- return result;
-}
-
-template <typename T, int C>
-void ChunkedVector<T,C>::forEach(uint32_t& outerIndex, bool& outerStop, void (^callback)(uint32_t index, const T& value, bool& stop)) const
-{
- for (uint32_t i=0; i < _inUseCount; ++i) {
- callback(outerIndex, element(i), outerStop);
- ++outerIndex;
- if ( outerStop )
- break;
- }
-}
+ _mainClosure = closure;
+ _initialImages = &initialImages;
+ _dyldCacheAddress = dyldCacheLoadAddress;
+ _dyldCachePath = dyldCachePath;
-template <typename T, int C>
-void ChunkedVector<T,C>::forEach(uint32_t& outerIndex, bool& outerStop, void (^callback)(uint32_t index, T& value, bool& stop))
-{
- for (uint32_t i=0; i < _inUseCount; ++i) {
- callback(outerIndex, element(i), outerStop);
- ++outerIndex;
- if ( outerStop )
- break;
+ if ( _dyldCacheAddress ) {
+ const dyld_cache_mapping_info* const fileMappings = (dyld_cache_mapping_info*)((uint64_t)_dyldCacheAddress + _dyldCacheAddress->header.mappingOffset);
+ _dyldCacheSlide = (uint64_t)dyldCacheLoadAddress - fileMappings[0].address;
+ _imagesArrays.push_back(dyldCacheLoadAddress->cachedDylibsImageArray());
+ if ( auto others = dyldCacheLoadAddress->otherOSImageArray() )
+ _imagesArrays.push_back(others);
}
-}
-
-template <typename T, int C>
-T* ChunkedVector<T,C>::add(const T& value)
-{
- return add(1, &value);
-}
-
-template <typename T, int C>
-T* ChunkedVector<T,C>::add(uint32_t count, const T values[])
-{
- assert(count <= (_allocCount - _inUseCount));
- T* result = &element(_inUseCount);
- memmove(result, values, sizeof(T)*count);
- _inUseCount += count;
- return result;
-}
+ _imagesArrays.push_back(_mainClosure->images());
-template <typename T, int C>
-void ChunkedVector<T,C>::remove(uint32_t index)
-{
- assert(index < _inUseCount);
- int moveCount = _inUseCount - index - 1;
- if ( moveCount >= 1 ) {
- memmove(&element(index), &element(index+1), sizeof(T)*moveCount);
+ // record first ImageNum to do use for dlopen() calls
+ _mainClosure->images()->forEachImage(^(const dyld3::closure::Image* image, bool& stop) {
+ closure::ImageNum num = image->imageNum();
+ if ( num >= _nextImageNum )
+ _nextImageNum = num+1;
+ });
+
+ // Make temporary old image array, so libSystem initializers can be debugged
+ STACK_ALLOC_ARRAY(dyld_image_info, oldDyldInfo, initialImages.count());
+ for (const LoadedImage& li : initialImages) {
+ oldDyldInfo.push_back({li.loadedAddress(), li.image()->path(), 0});
}
- _inUseCount--;
-}
-
-
-///////////////////// ReaderWriterChunkedVector ////////////////////////////
-
-
-
-template <typename T, int C>
-void ReaderWriterChunkedVector<T,C>::withReadLock(void (^work)()) const
-{
- assert(pthread_rwlock_rdlock(&_lock) == 0);
- work();
- assert(pthread_rwlock_unlock(&_lock) == 0);
-}
+ _oldAllImageInfos->infoArray = &oldDyldInfo[0];
+ _oldAllImageInfos->infoArrayCount = (uint32_t)oldDyldInfo.count();
+ _oldAllImageInfos->notification(dyld_image_adding, _oldAllImageInfos->infoArrayCount, _oldAllImageInfos->infoArray);
+ _oldAllImageInfos->infoArray = nullptr;
+ _oldAllImageInfos->infoArrayCount = 0;
-template <typename T, int C>
-void ReaderWriterChunkedVector<T,C>::withWriteLock(void (^work)()) const
-{
- assert(pthread_rwlock_wrlock(&_lock) == 0);
- work();
- assert(pthread_rwlock_unlock(&_lock) == 0);
+ _processDOFs = Loader::dtraceUserProbesEnabled();
}
-template <typename T, int C>
-void ReaderWriterChunkedVector<T,C>::acquireWriteLock()
+void AllImages::setProgramVars(ProgramVars* vars)
{
- assert(pthread_rwlock_wrlock(&_lock) == 0);
+ _programVars = vars;
+ const dyld3::MachOFile* mf = (dyld3::MachOFile*)_programVars->mh;
+ mf->forEachSupportedPlatform(^(dyld3::Platform platform, uint32_t minOS, uint32_t sdk) {
+ _platform = (dyld_platform_t)platform;
+ //FIXME assert there is only one?
+ });
}
-template <typename T, int C>
-void ReaderWriterChunkedVector<T,C>::releaseWriteLock()
+void AllImages::setRestrictions(bool allowAtPaths, bool allowEnvPaths)
{
- assert(pthread_rwlock_unlock(&_lock) == 0);
+ _allowAtPaths = allowAtPaths;
+ _allowEnvPaths = allowEnvPaths;
}
-template <typename T, int C>
-uint32_t ReaderWriterChunkedVector<T,C>::count() const
+void AllImages::applyInitialImages()
{
- __block uint32_t result = 0;
- withReadLock(^() {
- for (const ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
- result += chunk->count();
- }
- });
- return result;
+ addImages(*_initialImages);
+ runImageNotifiers(*_initialImages);
+ _initialImages = nullptr; // this was stack allocated
}
-template <typename T, int C>
-uint32_t ReaderWriterChunkedVector<T,C>::countNoLock() const
+void AllImages::withReadLock(void (^work)()) const
{
- uint32_t result = 0;
- for (const ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
- result += chunk->count();
- }
- return result;
+#ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
+ os_unfair_recursive_lock_lock(&_loadImagesLock);
+ work();
+ os_unfair_recursive_lock_unlock(&_loadImagesLock);
+#else
+ pthread_mutex_lock(&_loadImagesLock);
+ work();
+ pthread_mutex_unlock(&_loadImagesLock);
+#endif
}
-template <typename T, int C>
-T* ReaderWriterChunkedVector<T,C>::addNoLock(uint32_t count, const T values[])
+void AllImages::withWriteLock(void (^work)())
{
- T* result = nullptr;
- ChunkedVector<T,C>* lastChunk = &_firstChunk;
- while ( lastChunk->_next != nullptr )
- lastChunk = lastChunk->_next;
-
- if ( lastChunk->freeCount() >= count ) {
- // append to last chunk
- result = lastChunk->add(count, values);
- }
- else {
- // append new chunk
- uint32_t allocCount = count;
- uint32_t remainder = count % C;
- if ( remainder != 0 )
- allocCount = count + C - remainder;
- ChunkedVector<T,C>* newChunk = ChunkedVector<T,C>::make(allocCount);
- result = newChunk->add(count, values);
- lastChunk->_next = newChunk;
- }
-
- return result;
+#ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
+ os_unfair_recursive_lock_lock(&_loadImagesLock);
+ work();
+ os_unfair_recursive_lock_unlock(&_loadImagesLock);
+#else
+ pthread_mutex_lock(&_loadImagesLock);
+ work();
+ pthread_mutex_unlock(&_loadImagesLock);
+#endif
}
-template <typename T, int C>
-T* ReaderWriterChunkedVector<T,C>::add(uint32_t count, const T values[])
+void AllImages::withNotifiersLock(void (^work)()) const
{
- __block T* result = nullptr;
- withWriteLock(^() {
- result = addNoLock(count, values);
- });
- return result;
+#ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
+ os_unfair_recursive_lock_lock(&_notifiersLock);
+ work();
+ os_unfair_recursive_lock_unlock(&_notifiersLock);
+#else
+ pthread_mutex_lock(&_notifiersLock);
+ work();
+ pthread_mutex_unlock(&_notifiersLock);
+#endif
}
-template <typename T, int C>
-void ReaderWriterChunkedVector<T,C>::remove(const T& valueToRemove)
+void AllImages::mirrorToOldAllImageInfos()
{
- __block bool stopStorage = false;
- withWriteLock(^() {
- ChunkedVector<T,C>* chunkNowEmpty = nullptr;
- __block uint32_t indexStorage = 0;
- __block bool found = false;
- for (ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
- uint32_t chunkStartIndex = indexStorage;
- __block uint32_t foundIndex = 0;
- chunk->forEach(indexStorage, stopStorage, ^(uint32_t index, const T& value, bool& stop) {
- if ( value == valueToRemove ) {
- foundIndex = index - chunkStartIndex;
- found = true;
- stop = true;
- }
- });
- if ( found ) {
- chunk->remove(foundIndex);
- found = false;
- if ( chunk->count() == 0 )
- chunkNowEmpty = chunk;
- }
- }
- // if chunk is now empty, remove from linked list and free
- if ( chunkNowEmpty ) {
- for (ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
- if ( chunk->_next == chunkNowEmpty ) {
- chunk->_next = chunkNowEmpty->_next;
- if ( chunkNowEmpty != &_firstChunk )
- free(chunkNowEmpty);
- break;
- }
+ withReadLock(^(){
+ // set infoArray to NULL to denote it is in-use
+ _oldAllImageInfos->infoArray = nullptr;
+
+ // if array not large enough, re-alloc it
+ uint32_t imageCount = (uint32_t)_loadedImages.count();
+ if ( _oldArrayAllocCount < imageCount ) {
+ uint32_t newAllocCount = imageCount + 16;
+ dyld_image_info* newArray = (dyld_image_info*)::malloc(sizeof(dyld_image_info)*newAllocCount);
+ if ( _oldAllImageArray != nullptr ) {
+ ::memcpy(newArray, _oldAllImageArray, sizeof(dyld_image_info)*_oldAllImageInfos->infoArrayCount);
+ ::free(_oldAllImageArray);
}
+ _oldAllImageArray = newArray;
+ _oldArrayAllocCount = newAllocCount;
}
- });
-}
-template <typename T, int C>
-void ReaderWriterChunkedVector<T,C>::forEachWithReadLock(void (^callback)(uint32_t index, const T& value, bool& stop)) const
-{
- __block uint32_t index = 0;
- __block bool stop = false;
- withReadLock(^() {
- for (const ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
- chunk->forEach(index, stop, callback);
- if ( stop )
- break;
+ // fill out array to mirror current image list
+ int index = 0;
+ for (const LoadedImage& li : _loadedImages) {
+ _oldAllImageArray[index].imageLoadAddress = li.loadedAddress();
+ _oldAllImageArray[index].imageFilePath = imagePath(li.image());
+ _oldAllImageArray[index].imageFileModDate = 0;
+ ++index;
}
- });
-}
-template <typename T, int C>
-void ReaderWriterChunkedVector<T,C>::forEachWithWriteLock(void (^callback)(uint32_t index, T& value, bool& stop))
-{
- __block uint32_t index = 0;
- __block bool stop = false;
- withReadLock(^() {
- for (ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
- chunk->forEach(index, stop, callback);
- if ( stop )
- break;
- }
- });
-}
-
-template <typename T, int C>
-void ReaderWriterChunkedVector<T,C>::forEachNoLock(void (^callback)(uint32_t index, const T& value, bool& stop)) const
-{
- uint32_t index = 0;
- bool stop = false;
- for (const ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
- chunk->forEach(index, stop, callback);
- if ( stop )
- break;
- }
-}
+ // set infoArray back to base address of array (so other process can now read)
+ _oldAllImageInfos->infoArrayCount = imageCount;
+ _oldAllImageInfos->infoArrayChangeTimestamp = mach_absolute_time();
+ _oldAllImageInfos->infoArray = _oldAllImageArray;
-template <typename T, int C>
-T& ReaderWriterChunkedVector<T,C>::operator[](size_t targetIndex)
-{
- __block T* result = nullptr;
- forEachNoLock(^(uint32_t index, T const& value, bool& stop) {
- if ( index == targetIndex ) {
- result = (T*)&value;
- stop = true;
- }
- });
- return *result;
-}
-
-template <typename T, int C>
-void ReaderWriterChunkedVector<T,C>::dump(void (^callback)(const T& value)) const
-{
- log("dump ReaderWriterChunkedVector at %p\n", this);
- __block uint32_t index = 0;
- __block bool stop = false;
- withReadLock(^() {
- for (const ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
- log(" chunk at %p\n", chunk);
- chunk->forEach(index, stop, ^(uint32_t i, const T& value, bool& s) {
- callback(value);
- });
- }
});
}
-
-
-///////////////////// AllImages ////////////////////////////
-
-
-AllImages gAllImages;
-
-
-
-void AllImages::init(const BinaryClosure* closure, const void* dyldCacheLoadAddress, const char* dyldCachePath,
- const dyld3::launch_cache::DynArray<loader::ImageInfo>& initialImages)
-{
- _mainClosure = closure;
- _initialImages = &initialImages;
- _dyldCacheAddress = dyldCacheLoadAddress;
- _dyldCachePath = dyldCachePath;
-
- if ( _dyldCacheAddress ) {
- const DyldSharedCache* cache = (DyldSharedCache*)_dyldCacheAddress;
- const dyld_cache_mapping_info* const fileMappings = (dyld_cache_mapping_info*)((uint64_t)_dyldCacheAddress + cache->header.mappingOffset);
- _dyldCacheSlide = (uint64_t)dyldCacheLoadAddress - fileMappings[0].address;
- }
-
- // Make temporary old image array, so libSystem initializers can be debugged
- uint32_t count = (uint32_t)initialImages.count();
- dyld_image_info oldDyldInfo[count];
- for (int i=0; i < count; ++i) {
- launch_cache::Image img(initialImages[i].imageData);
- oldDyldInfo[i].imageLoadAddress = initialImages[i].loadAddress;
- oldDyldInfo[i].imageFilePath = img.path();
- oldDyldInfo[i].imageFileModDate = 0;
- }
- _oldAllImageInfos->infoArray = oldDyldInfo;
- _oldAllImageInfos->infoArrayCount = count;
- _oldAllImageInfos->notification(dyld_image_adding, count, oldDyldInfo);
- _oldAllImageInfos->infoArray = nullptr;
- _oldAllImageInfos->infoArrayCount = 0;
-}
-
-void AllImages::setProgramVars(ProgramVars* vars)
-{
- _programVars = vars;
-}
-
-void AllImages::applyInitialImages()
-{
- addImages(*_initialImages);
- _initialImages = nullptr; // this was stack allocated
-}
-
-void AllImages::mirrorToOldAllImageInfos()
+void AllImages::addImages(const Array<LoadedImage>& newImages)
{
- // set infoArray to NULL to denote it is in-use
- _oldAllImageInfos->infoArray = nullptr;
-
- // if array not large enough, re-alloc it
- uint32_t imageCount = sLoadedImages.countNoLock();
- if ( _oldArrayAllocCount < imageCount ) {
- uint32_t newAllocCount = imageCount + 16;
- dyld_image_info* newArray = (dyld_image_info*)malloc(sizeof(dyld_image_info)*newAllocCount);
- if ( _oldAllImageArray != nullptr ) {
- memcpy(newArray, _oldAllImageArray, sizeof(dyld_image_info)*_oldAllImageInfos->infoArrayCount);
- free(_oldAllImageArray);
+ // copy into _loadedImages
+ withWriteLock(^(){
+ _loadedImages.append(newImages);
+ // if any image not in the shared cache added, recompute bounds
+ for (const LoadedImage& li : newImages) {
+ if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
+ recomputeBounds();
+ break;
+ }
}
- _oldAllImageArray = newArray;
- _oldArrayAllocCount = newAllocCount;
- }
-
- // fill out array to mirror current image list
- sLoadedImages.forEachNoLock(^(uint32_t index, const LoadedImage& loadedImage, bool& stop) {
- launch_cache::Image img(loadedImage.image());
- _oldAllImageArray[index].imageLoadAddress = loadedImage.loadedAddress();
- _oldAllImageArray[index].imageFilePath = imagePath(loadedImage.image());
- _oldAllImageArray[index].imageFileModDate = 0;
});
-
- // set infoArray back to base address of array (so other process can now read)
- _oldAllImageInfos->infoArrayCount = imageCount;
- _oldAllImageInfos->infoArrayChangeTimestamp = mach_absolute_time();
- _oldAllImageInfos->infoArray = _oldAllImageArray;
}
-void AllImages::addImages(const launch_cache::DynArray<loader::ImageInfo>& newImages)
+void AllImages::runImageNotifiers(const Array<LoadedImage>& newImages)
{
uint32_t count = (uint32_t)newImages.count();
assert(count != 0);
- // build stack array of LoadedImage to copy into sLoadedImages
- STACK_ALLOC_DYNARRAY(LoadedImage, count, loadedImagesArray);
- for (uint32_t i=0; i < count; ++i) {
- loadedImagesArray[i].init(newImages[i].loadAddress, newImages[i].imageData);
- if (newImages[i].neverUnload)
- loadedImagesArray[i].setNeverUnload();
- }
- sLoadedImages.add(count, &loadedImagesArray[0]);
-
if ( _oldAllImageInfos != nullptr ) {
// sync to old all image infos struct
- if ( _initialImages != nullptr ) {
- // libSystem not initialized yet, don't use locks
- mirrorToOldAllImageInfos();
- }
- else {
- sLoadedImages.withReadLock(^{
- mirrorToOldAllImageInfos();
- });
- }
+ mirrorToOldAllImageInfos();
// tell debugger about new images
dyld_image_info oldDyldInfo[count];
- for (int i=0; i < count; ++i) {
- launch_cache::Image img(newImages[i].imageData);
- oldDyldInfo[i].imageLoadAddress = newImages[i].loadAddress;
- oldDyldInfo[i].imageFilePath = imagePath(newImages[i].imageData);
+ for (uint32_t i=0; i < count; ++i) {
+ oldDyldInfo[i].imageLoadAddress = newImages[i].loadedAddress();
+ oldDyldInfo[i].imageFilePath = imagePath(newImages[i].image());
oldDyldInfo[i].imageFileModDate = 0;
}
_oldAllImageInfos->notification(dyld_image_adding, count, oldDyldInfo);
}
// log loads
- for (int i=0; i < count; ++i) {
- launch_cache::Image img(newImages[i].imageData);
- log_loads("dyld: %s\n", imagePath(newImages[i].imageData));
+ for (const LoadedImage& li : newImages) {
+ log_loads("dyld: %s\n", imagePath(li.image()));
}
#if !TARGET_IPHONE_SIMULATOR
// call kdebug trace for each image
if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD, DBG_DYLD_UUID, DBG_DYLD_UUID_MAP_A))) {
- for (uint32_t i=0; i < count; ++i) {
- launch_cache::Image img(newImages[i].imageData);
- struct stat stat_buf;
- fsid_t fsid = {{ 0, 0 }};
- fsobj_id_t fsobjid = { 0, 0 };
- if (img.isDiskImage() && stat(imagePath(newImages[i].imageData), &stat_buf) == 0 ) {
+ for (const LoadedImage& li : newImages) {
+ const closure::Image* image = li.image();
+ struct stat stat_buf;
+ fsid_t fsid = {{ 0, 0 }};
+ fsobj_id_t fsobjid = { 0, 0 };
+ if ( !image->inDyldCache() && (stat(imagePath(image), &stat_buf) == 0) ) {
fsobjid = *(fsobj_id_t*)&stat_buf.st_ino;
- fsid = {{ stat_buf.st_dev, 0 }};
+ fsid = {{ stat_buf.st_dev, 0 }};
}
- kdebug_trace_dyld_image(DBG_DYLD_UUID_MAP_A, img.uuid(), fsobjid, fsid, newImages[i].loadAddress);
+ uuid_t uuid;
+ image->getUuid(uuid);
+ kdebug_trace_dyld_image(DBG_DYLD_UUID_MAP_A, &uuid, fsobjid, fsid, li.loadedAddress());
}
}
#endif
// call each _dyld_register_func_for_add_image function with each image
- const uint32_t existingNotifierCount = sLoadNotifiers.count();
- NotifyFunc existingNotifiers[existingNotifierCount];
- NotifyFunc* existingNotifierArray = existingNotifiers;
- sLoadNotifiers.forEachWithReadLock(^(uint32_t index, const NotifyFunc& func, bool& stop) {
- if ( index < existingNotifierCount )
- existingNotifierArray[index] = func;
- });
- // we don't want to hold lock while calling out, so prebuild array (with lock) then do calls on that array (without lock)
- for (uint32_t j=0; j < existingNotifierCount; ++j) {
- NotifyFunc func = existingNotifierArray[j];
- for (uint32_t i=0; i < count; ++i) {
- log_notifications("dyld: add notifier %p called with mh=%p\n", func, newImages[i].loadAddress);
- if (newImages[i].justUsedFromDyldCache) {
- func(newImages[i].loadAddress, _dyldCacheSlide);
- } else {
- MachOParser parser(newImages[i].loadAddress);
- func(newImages[i].loadAddress, parser.getSlide());
+ withNotifiersLock(^{
+ for (NotifyFunc func : _loadNotifiers) {
+ for (const LoadedImage& li : newImages) {
+ dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
+ log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
+ if ( li.image()->inDyldCache() )
+ func(li.loadedAddress(), (uintptr_t)_dyldCacheSlide);
+ else
+ func(li.loadedAddress(), li.loadedAddress()->getSlide());
}
}
- }
+ for (LoadNotifyFunc func : _loadNotifiers2) {
+ for (const LoadedImage& li : newImages) {
+ dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
+ log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
+ if ( li.image()->inDyldCache() )
+ func(li.loadedAddress(), li.image()->path(), false);
+ else
+ func(li.loadedAddress(), li.image()->path(), !li.image()->neverUnload());
+ }
+ }
+ });
// call objc about images that use objc
if ( _objcNotifyMapped != nullptr ) {
const char* pathsBuffer[count];
const mach_header* mhBuffer[count];
uint32_t imagesWithObjC = 0;
- for (uint32_t i=0; i < count; ++i) {
- launch_cache::Image img(newImages[i].imageData);
- if ( img.hasObjC() ) {
- pathsBuffer[imagesWithObjC] = imagePath(newImages[i].imageData);
- mhBuffer[imagesWithObjC] = newImages[i].loadAddress;
+ for (const LoadedImage& li : newImages) {
+ const closure::Image* image = li.image();
+ if ( image->hasObjC() ) {
+ pathsBuffer[imagesWithObjC] = imagePath(image);
+ mhBuffer[imagesWithObjC] = li.loadedAddress();
++imagesWithObjC;
}
}
if ( imagesWithObjC != 0 ) {
+ dyld3::ScopedTimer timer(DBG_DYLD_TIMING_OBJC_MAP, 0, 0, 0);
(*_objcNotifyMapped)(imagesWithObjC, pathsBuffer, mhBuffer);
if ( log_notifications("dyld: objc-mapped-notifier called with %d images:\n", imagesWithObjC) ) {
for (uint32_t i=0; i < imagesWithObjC; ++i) {
notifyMonitorLoads(newImages);
}
-void AllImages::removeImages(const launch_cache::DynArray<loader::ImageInfo>& unloadImages)
+void AllImages::removeImages(const Array<LoadedImage>& unloadImages)
{
- uint32_t count = (uint32_t)unloadImages.count();
- assert(count != 0);
-
// call each _dyld_register_func_for_remove_image function with each image
- // do this before removing image from internal data structures so that the callback can query dyld about the image
- const uint32_t existingNotifierCount = sUnloadNotifiers.count();
- NotifyFunc existingNotifiers[existingNotifierCount];
- NotifyFunc* existingNotifierArray = existingNotifiers;
- sUnloadNotifiers.forEachWithReadLock(^(uint32_t index, const NotifyFunc& func, bool& stop) {
- if ( index < existingNotifierCount )
- existingNotifierArray[index] = func;
- });
- // we don't want to hold lock while calling out, so prebuild array (with lock) then do calls on that array (without lock)
- for (uint32_t j=0; j < existingNotifierCount; ++j) {
- NotifyFunc func = existingNotifierArray[j];
- for (uint32_t i=0; i < count; ++i) {
- MachOParser parser(unloadImages[i].loadAddress);
- log_notifications("dyld: remove notifier %p called with mh=%p\n", func, unloadImages[i].loadAddress);
- func(unloadImages[i].loadAddress, parser.getSlide());
+ withNotifiersLock(^{
+ for (NotifyFunc func : _unloadNotifiers) {
+ for (const LoadedImage& li : unloadImages) {
+ dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_REMOVE_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
+ log_notifications("dyld: remove notifier %p called with mh=%p\n", func, li.loadedAddress());
+ if ( li.image()->inDyldCache() )
+ func(li.loadedAddress(), (uintptr_t)_dyldCacheSlide);
+ else
+ func(li.loadedAddress(), li.loadedAddress()->getSlide());
+ }
}
- }
+ });
// call objc about images going away
if ( _objcNotifyUnmapped != nullptr ) {
- for (uint32_t i=0; i < count; ++i) {
- launch_cache::Image img(unloadImages[i].imageData);
- if ( img.hasObjC() ) {
- (*_objcNotifyUnmapped)(imagePath(unloadImages[i].imageData), unloadImages[i].loadAddress);
- log_notifications("dyld: objc-unmapped-notifier called with image %p %s\n", unloadImages[i].loadAddress, imagePath(unloadImages[i].imageData));
+ for (const LoadedImage& li : unloadImages) {
+ if ( li.image()->hasObjC() ) {
+ (*_objcNotifyUnmapped)(imagePath(li.image()), li.loadedAddress());
+ log_notifications("dyld: objc-unmapped-notifier called with image %p %s\n", li.loadedAddress(), imagePath(li.image()));
}
}
}
#if !TARGET_IPHONE_SIMULATOR
// call kdebug trace for each image
if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD, DBG_DYLD_UUID, DBG_DYLD_UUID_MAP_A))) {
- for (uint32_t i=0; i < count; ++i) {
- launch_cache::Image img(unloadImages[i].imageData);
- struct stat stat_buf;
- fsid_t fsid = {{ 0, 0 }};
- fsobj_id_t fsobjid = { 0, 0 };
- if (stat(imagePath(unloadImages[i].imageData), &stat_buf) == 0 ) {
+ for (const LoadedImage& li : unloadImages) {
+ const closure::Image* image = li.image();
+ struct stat stat_buf;
+ fsid_t fsid = {{ 0, 0 }};
+ fsobj_id_t fsobjid = { 0, 0 };
+ if ( stat(imagePath(image), &stat_buf) == 0 ) {
fsobjid = *(fsobj_id_t*)&stat_buf.st_ino;
- fsid = {{ stat_buf.st_dev, 0 }};
+ fsid = {{ stat_buf.st_dev, 0 }};
}
- kdebug_trace_dyld_image(DBG_DYLD_UUID_UNMAP_A, img.uuid(), fsobjid, fsid, unloadImages[i].loadAddress);
+ uuid_t uuid;
+ image->getUuid(uuid);
+ kdebug_trace_dyld_image(DBG_DYLD_UUID_UNMAP_A, &uuid, fsobjid, fsid, li.loadedAddress());
}
}
#endif
- // remove each from sLoadedImages
- for (uint32_t i=0; i < count; ++i) {
- LoadedImage info(unloadImages[i].loadAddress, unloadImages[i].imageData);
- sLoadedImages.remove(info);
- }
+ // remove each from _loadedImages
+ withWriteLock(^(){
+ for (const LoadedImage& uli : unloadImages) {
+ for (LoadedImage& li : _loadedImages) {
+ if ( uli.loadedAddress() == li.loadedAddress() ) {
+ _loadedImages.erase(li);
+ break;
+ }
+ }
+ }
+ recomputeBounds();
+ });
// sync to old all image infos struct
- sLoadedImages.withReadLock(^{
- mirrorToOldAllImageInfos();
- });
+ mirrorToOldAllImageInfos();
// tell debugger about removed images
- dyld_image_info oldDyldInfo[count];
- for (int i=0; i < count; ++i) {
- launch_cache::Image img(unloadImages[i].imageData);
- oldDyldInfo[i].imageLoadAddress = unloadImages[i].loadAddress;
- oldDyldInfo[i].imageFilePath = imagePath(unloadImages[i].imageData);
- oldDyldInfo[i].imageFileModDate = 0;
- }
- _oldAllImageInfos->notification(dyld_image_removing, count, oldDyldInfo);
-
- // unmap images
- for (int i=0; i < count; ++i) {
- launch_cache::Image img(unloadImages[i].imageData);
- loader::unmapImage(unloadImages[i].imageData, unloadImages[i].loadAddress);
- log_loads("dyld: unloaded %s\n", imagePath(unloadImages[i].imageData));
+ STACK_ALLOC_ARRAY(dyld_image_info, oldDyldInfo, unloadImages.count());
+ for (const LoadedImage& li : unloadImages) {
+ oldDyldInfo.push_back({li.loadedAddress(), li.image()->path(), 0});
}
+ _oldAllImageInfos->notification(dyld_image_removing, (uint32_t)oldDyldInfo.count(), &oldDyldInfo[0]);
// notify any processes tracking loads in this process
notifyMonitorUnloads(unloadImages);
+
+ // finally, unmap images
+ for (const LoadedImage& li : unloadImages) {
+ if ( li.leaveMapped() ) {
+ log_loads("dyld: unloaded but left mmapped %s\n", imagePath(li.image()));
+ }
+ else {
+ // unmapImage() modifies parameter, so use copy
+ LoadedImage copy = li;
+ Loader::unmapImage(copy);
+ log_loads("dyld: unloaded %s\n", imagePath(li.image()));
+ }
+ }
+}
+
+// must be called with writeLock held
+void AllImages::recomputeBounds()
+{
+ _lowestNonCached = UINTPTR_MAX;
+ _highestNonCached = 0;
+ for (const LoadedImage& li : _loadedImages) {
+ const MachOLoaded* ml = li.loadedAddress();
+ uintptr_t start = (uintptr_t)ml;
+ if ( !((MachOAnalyzer*)ml)->inDyldCache() ) {
+ if ( start < _lowestNonCached )
+ _lowestNonCached = start;
+ uintptr_t end = start + (uintptr_t)(li.image()->vmSizeToMap());
+ if ( end > _highestNonCached )
+ _highestNonCached = end;
+ }
+ }
+}
+
+uint32_t AllImages::count() const
+{
+ return (uint32_t)_loadedImages.count();
+}
+
+bool AllImages::dyldCacheHasPath(const char* path) const
+{
+ uint32_t dyldCacheImageIndex;
+ if ( _dyldCacheAddress != nullptr )
+ return _dyldCacheAddress->hasImagePath(path, dyldCacheImageIndex);
+ return false;
+}
+
+const char* AllImages::imagePathByIndex(uint32_t index) const
+{
+ if ( index < _loadedImages.count() )
+ return imagePath(_loadedImages[index].image());
+ return nullptr;
}
-void AllImages::setNeverUnload(const loader::ImageInfo& existingImage)
+const mach_header* AllImages::imageLoadAddressByIndex(uint32_t index) const
{
- sLoadedImages.forEachWithWriteLock(^(uint32_t index, dyld3::LoadedImage &value, bool &stop) {
- if (value.image() == existingImage.imageData) {
- value.setNeverUnload();
- stop = true;
+ if ( index < _loadedImages.count() )
+ return _loadedImages[index].loadedAddress();
+ return nullptr;
+}
+
+bool AllImages::findImage(const mach_header* loadAddress, LoadedImage& foundImage) const
+{
+ __block bool result = false;
+ withReadLock(^(){
+ for (const LoadedImage& li : _loadedImages) {
+ if ( li.loadedAddress() == loadAddress ) {
+ foundImage = li;
+ result = true;
+ break;
+ }
}
});
+ return result;
}
-uint32_t AllImages::count() const
+void AllImages::forEachImage(void (^handler)(const LoadedImage& loadedImage, bool& stop)) const
{
- return sLoadedImages.count();
+ withReadLock(^{
+ bool stop = false;
+ for (const LoadedImage& li : _loadedImages) {
+ handler(li, stop);
+ if ( stop )
+ break;
+ }
+ });
}
-launch_cache::Image AllImages::findByLoadOrder(uint32_t index, const mach_header** loadAddress) const
-{
- __block const BinaryImage* foundImage = nullptr;
- sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
- if ( anIndex == index ) {
- foundImage = loadedImage.image();
- *loadAddress = loadedImage.loadedAddress();
- stop = true;
+const char* AllImages::pathForImageMappedAt(const void* addr) const
+{
+ if ( _initialImages != nullptr ) {
+ // being called during libSystem initialization, so _loadedImages not allocated yet
+ for (const LoadedImage& li : *_initialImages) {
+ uint8_t permissions;
+ if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
+ return li.image()->path();
+ }
+ }
+ return nullptr;
+ }
+
+ // if address is in cache, do fast search of TEXT segments in cache
+ __block const char* result = nullptr;
+ if ( (_dyldCacheAddress != nullptr) && (addr > _dyldCacheAddress) ) {
+ if ( addr < (void*)((uint8_t*)_dyldCacheAddress+_dyldCacheAddress->mappedSize()) ) {
+ uint64_t cacheSlide = (uint64_t)_dyldCacheAddress - _dyldCacheAddress->unslidLoadAddress();
+ uint64_t unslidTargetAddr = (uint64_t)addr - cacheSlide;
+ _dyldCacheAddress->forEachImageTextSegment(^(uint64_t loadAddressUnslid, uint64_t textSegmentSize, const unsigned char* dylibUUID, const char* installName, bool& stop) {
+ if ( (loadAddressUnslid <= unslidTargetAddr) && (unslidTargetAddr < loadAddressUnslid+textSegmentSize) ) {
+ result = installName;
+ stop = true;
+ }
+ });
+ if ( result != nullptr )
+ return result;
+ }
+ }
+
+ // slow path - search image list
+ infoForImageMappedAt(addr, ^(const LoadedImage& foundImage, uint8_t permissions) {
+ result = foundImage.image()->path();
+ });
+
+ return result;
+}
+
+void AllImages::infoForImageMappedAt(const void* addr, void (^handler)(const LoadedImage& foundImage, uint8_t permissions)) const
+{
+ __block uint8_t permissions;
+ if ( _initialImages != nullptr ) {
+ // being called during libSystem initialization, so _loadedImages not allocated yet
+ for (const LoadedImage& li : *_initialImages) {
+ if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
+ handler(li, permissions);
+ break;
+ }
+ }
+ return;
+ }
+
+ withReadLock(^{
+ for (const LoadedImage& li : _loadedImages) {
+ if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
+ handler(li, permissions);
+ break;
+ }
+ }
+ });
+}
+
+
+bool AllImages::infoForImageMappedAt(const void* addr, const MachOLoaded** ml, uint64_t* textSize, const char** path) const
+{
+ if ( _initialImages != nullptr ) {
+ // being called during libSystem initialization, so _loadedImages not allocated yet
+ for (const LoadedImage& li : *_initialImages) {
+ uint8_t permissions;
+ if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
+ if ( ml != nullptr )
+ *ml = li.loadedAddress();
+ if ( path != nullptr )
+ *path = li.image()->path();
+ if ( textSize != nullptr ) {
+ *textSize = li.image()->textSize();
+ }
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // if address is in cache, do fast search of TEXT segments in cache
+ __block bool result = false;
+ if ( (_dyldCacheAddress != nullptr) && (addr > _dyldCacheAddress) ) {
+ if ( addr < (void*)((uint8_t*)_dyldCacheAddress+_dyldCacheAddress->mappedSize()) ) {
+ uint64_t cacheSlide = (uint64_t)_dyldCacheAddress - _dyldCacheAddress->unslidLoadAddress();
+ uint64_t unslidTargetAddr = (uint64_t)addr - cacheSlide;
+ _dyldCacheAddress->forEachImageTextSegment(^(uint64_t loadAddressUnslid, uint64_t textSegmentSize, const unsigned char* dylibUUID, const char* installName, bool& stop) {
+ if ( (loadAddressUnslid <= unslidTargetAddr) && (unslidTargetAddr < loadAddressUnslid+textSegmentSize) ) {
+ if ( ml != nullptr )
+ *ml = (MachOLoaded*)(loadAddressUnslid + cacheSlide);
+ if ( path != nullptr )
+ *path = installName;
+ if ( textSize != nullptr )
+ *textSize = textSegmentSize;
+ stop = true;
+ result = true;
+ }
+ });
+ if ( result )
+ return result;
+ }
+ }
+
+ // slow path - search image list
+ infoForImageMappedAt(addr, ^(const LoadedImage& foundImage, uint8_t permissions) {
+ if ( ml != nullptr )
+ *ml = foundImage.loadedAddress();
+ if ( path != nullptr )
+ *path = foundImage.image()->path();
+ if ( textSize != nullptr )
+ *textSize = foundImage.image()->textSize();
+ result = true;
+ });
+
+ return result;
+}
+
+// same as infoForImageMappedAt(), but only look at images not in the dyld cache
+void AllImages::infoForNonCachedImageMappedAt(const void* addr, void (^handler)(const LoadedImage& foundImage, uint8_t permissions)) const
+{
+ __block uint8_t permissions;
+ if ( _initialImages != nullptr ) {
+ // being called during libSystem initialization, so _loadedImages not allocated yet
+ for (const LoadedImage& li : *_initialImages) {
+ if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
+ if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
+ handler(li, permissions);
+ break;
+ }
+ }
+ }
+ return;
+ }
+
+ withReadLock(^{
+ for (const LoadedImage& li : _loadedImages) {
+ if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
+ if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
+ handler(li, permissions);
+ break;
+ }
+ }
}
});
- return launch_cache::Image(foundImage);
}
-launch_cache::Image AllImages::findByLoadAddress(const mach_header* loadAddress) const
+bool AllImages::immutableMemory(const void* addr, size_t length) const
{
- __block const BinaryImage* foundImage = nullptr;
- sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
- if ( loadedImage.loadedAddress() == loadAddress ) {
- foundImage = loadedImage.image();
- stop = true;
+ // quick check to see if in shared cache
+ if ( _dyldCacheAddress != nullptr ) {
+ bool readOnly;
+ if ( _dyldCacheAddress->inCache(addr, length, readOnly) ) {
+ return readOnly;
}
- });
- return launch_cache::Image(foundImage);
-}
+ }
-bool AllImages::findIndexForLoadAddress(const mach_header* loadAddress, uint32_t& index)
-{
__block bool result = false;
- sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
- if ( loadedImage.loadedAddress() == loadAddress ) {
- index = anIndex;
- result = true;
- stop = true;
+ withReadLock(^() {
+ // quick check to see if it is not any non-cached image loaded
+ if ( ((uintptr_t)addr < _lowestNonCached) || ((uintptr_t)addr+length > _highestNonCached) ) {
+ result = false;
+ return;
+ }
+ // slow walk through all images, only look at images not in dyld cache
+ for (const LoadedImage& li : _loadedImages) {
+ if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
+ uint8_t permissions;
+ if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
+ result = ((permissions & VM_PROT_WRITE) == 0) && li.image()->neverUnload();
+ break;
+ }
+ }
}
});
+
return result;
}
-void AllImages::forEachImage(void (^handler)(uint32_t imageIndex, const mach_header* loadAddress, const launch_cache::Image image, bool& stop)) const
+void AllImages::infoForImageWithLoadAddress(const MachOLoaded* mh, void (^handler)(const LoadedImage& foundImage)) const
{
- sLoadedImages.forEachWithReadLock(^(uint32_t imageIndex, const LoadedImage& loadedImage, bool& stop) {
- handler(imageIndex, loadedImage.loadedAddress(), launch_cache::Image(loadedImage.image()), stop);
+ withReadLock(^{
+ for (const LoadedImage& li : _loadedImages) {
+ if ( li.loadedAddress() == mh ) {
+ handler(li);
+ break;
+ }
+ }
});
}
-launch_cache::Image AllImages::findByOwnedAddress(const void* addr, const mach_header** loadAddress, uint8_t* permissions) const
+bool AllImages::findImageNum(closure::ImageNum imageNum, LoadedImage& foundImage) const
{
- if ( _initialImages != nullptr ) {
- // being called during libSystem initialization, so sLoadedImages not allocated yet
- for (int i=0; i < _initialImages->count(); ++i) {
- const loader::ImageInfo& entry = (*_initialImages)[i];
- launch_cache::Image anImage(entry.imageData);
- if ( anImage.containsAddress(addr, entry.loadAddress, permissions) ) {
- *loadAddress = entry.loadAddress;
- return entry.imageData;
+ if ( _initialImages != nullptr ) {
+ // being called during libSystem initialization, so _loadedImages not allocated yet
+ for (const LoadedImage& li : *_initialImages) {
+ if ( li.image()->representsImageNum(imageNum) ) {
+ foundImage = li;
+ return true;
}
}
- return launch_cache::Image(nullptr);
+ return false;
}
- // if address is in cache, do fast search of cache
- if ( (_dyldCacheAddress != nullptr) && (addr > _dyldCacheAddress) ) {
- const DyldSharedCache* dyldCache = (DyldSharedCache*)_dyldCacheAddress;
- if ( addr < (void*)((uint8_t*)_dyldCacheAddress+dyldCache->mappedSize()) ) {
- size_t cacheVmOffset = ((uint8_t*)addr - (uint8_t*)_dyldCacheAddress);
- DyldCacheParser cacheParser(dyldCache, false);
- launch_cache::ImageGroup cachedDylibsGroup(cacheParser.cachedDylibsGroup());
- uint32_t mhCacheOffset;
- uint8_t foundPermissions;
- launch_cache::Image image(cachedDylibsGroup.findImageByCacheOffset(cacheVmOffset, mhCacheOffset, foundPermissions));
- if ( image.valid() ) {
- *loadAddress = (mach_header*)((uint8_t*)_dyldCacheAddress + mhCacheOffset);
- if ( permissions != nullptr )
- *permissions = foundPermissions;
- return image;
- }
+ bool result = false;
+ for (const LoadedImage& li : _loadedImages) {
+ if ( li.image()->representsImageNum(imageNum) ) {
+ foundImage = li;
+ result = true;
+ break;
}
}
- __block const BinaryImage* foundImage = nullptr;
- sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
- launch_cache::Image anImage(loadedImage.image());
- if ( anImage.containsAddress(addr, loadedImage.loadedAddress(), permissions) ) {
- *loadAddress = loadedImage.loadedAddress();
- foundImage = loadedImage.image();
- stop = true;
+ return result;
+}
+
+const MachOLoaded* AllImages::findDependent(const MachOLoaded* mh, uint32_t depIndex)
+{
+ __block const MachOLoaded* result = nullptr;
+ withReadLock(^{
+ for (const LoadedImage& li : _loadedImages) {
+ if ( li.loadedAddress() == mh ) {
+ closure::ImageNum depImageNum = li.image()->dependentImageNum(depIndex);
+ LoadedImage depLi;
+ if ( findImageNum(depImageNum, depLi) )
+ result = depLi.loadedAddress();
+ break;
+ }
}
});
- return launch_cache::Image(foundImage);
+ return result;
}
-const mach_header* AllImages::findLoadAddressByImage(const BinaryImage* targetImage) const
-{
- __block const mach_header* foundAddress = nullptr;
- sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
- if ( targetImage == loadedImage.image() ) {
- foundAddress = loadedImage.loadedAddress();
- stop = true;
+
+void AllImages::breadthFirstRecurseDependents(Array<closure::ImageNum>& visited, const LoadedImage& nodeLi, bool& stopped, void (^handler)(const LoadedImage& aLoadedImage, bool& stop)) const
+{
+ // call handler on all direct dependents (unless already visited)
+ STACK_ALLOC_ARRAY(LoadedImage, dependentsToRecurse, 256);
+ nodeLi.image()->forEachDependentImage(^(uint32_t depIndex, closure::Image::LinkKind kind, closure::ImageNum depImageNum, bool& depStop) {
+ if ( kind == closure::Image::LinkKind::upward )
+ return;
+ if ( visited.contains(depImageNum) )
+ return;
+ LoadedImage depLi;
+ if ( !findImageNum(depImageNum, depLi) )
+ return;
+ handler(depLi, depStop);
+ visited.push_back(depImageNum);
+ if ( depStop ) {
+ stopped = true;
+ return;
}
+ dependentsToRecurse.push_back(depLi);
+ });
+ if ( stopped )
+ return;
+ // recurse on all dependents just visited
+ for (LoadedImage& depLi : dependentsToRecurse) {
+ breadthFirstRecurseDependents(visited, depLi, stopped, handler);
+ }
+}
+
+void AllImages::visitDependentsTopDown(const LoadedImage& start, void (^handler)(const LoadedImage& aLoadedImage, bool& stop)) const
+{
+ withReadLock(^{
+ STACK_ALLOC_ARRAY(closure::ImageNum, visited, count());
+ bool stop = false;
+ handler(start, stop);
+ if ( stop )
+ return;
+ visited.push_back(start.image()->imageNum());
+ breadthFirstRecurseDependents(visited, start, stop, handler);
});
- return foundAddress;
}
-const mach_header* AllImages::mainExecutable() const
+const MachOLoaded* AllImages::mainExecutable() const
{
assert(_programVars != nullptr);
- return (const mach_header*)_programVars->mh;
+ return (const MachOLoaded*)_programVars->mh;
}
-launch_cache::Image AllImages::mainExecutableImage() const
+const closure::Image* AllImages::mainExecutableImage() const
{
assert(_mainClosure != nullptr);
- const launch_cache::Closure mainClosure(_mainClosure);
- const dyld3::launch_cache::ImageGroup mainGroup = mainClosure.group();
- const uint32_t mainExecutableIndex = mainClosure.mainExecutableImageIndex();
- const dyld3::launch_cache::Image mainImage = mainGroup.image(mainExecutableIndex);
- return mainImage;
+ return _mainClosure->images()->imageForNum(_mainClosure->topImage());
}
void AllImages::setMainPath(const char* path )
_mainExeOverridePath = path;
}
-const char* AllImages::imagePath(const BinaryImage* binImage) const
+const char* AllImages::imagePath(const closure::Image* image) const
{
#if __IPHONE_OS_VERSION_MIN_REQUIRED
// on iOS and watchOS, apps may be moved on device after closure built
if ( _mainExeOverridePath != nullptr ) {
- if ( binImage == mainExecutableImage().binaryData() )
+ if ( image == mainExecutableImage() )
return _mainExeOverridePath;
}
#endif
- launch_cache::Image image(binImage);
- return image.path();
-}
-
-void AllImages::setInitialGroups()
-{
- DyldCacheParser cacheParser((DyldSharedCache*)_dyldCacheAddress, false);
- sKnownGroups.addNoLock(cacheParser.cachedDylibsGroup());
- sKnownGroups.addNoLock(cacheParser.otherDylibsGroup());
- launch_cache::Closure closure(_mainClosure);
- sKnownGroups.addNoLock(closure.group().binaryData());
-}
-
-const launch_cache::binary_format::ImageGroup* AllImages::cachedDylibsGroup()
-{
- return sKnownGroups[0];
-}
-
-const launch_cache::binary_format::ImageGroup* AllImages::otherDylibsGroup()
-{
- return sKnownGroups[1];
-}
-
-const AllImages::BinaryImageGroup* AllImages::mainClosureGroup()
-{
- return sKnownGroups[2];
-}
-
-uint32_t AllImages::currentGroupsCount() const
-{
- return sKnownGroups.count();
-}
-
-void AllImages::copyCurrentGroups(ImageGroupList& groups) const
-{
- sKnownGroups.forEachWithReadLock(^(uint32_t index, const dyld3::launch_cache::binary_format::ImageGroup* const &grpData, bool &stop) {
- if ( index < groups.count() )
- groups[index] = grpData;
- });
+ return image->path();
}
-void AllImages::copyCurrentGroupsNoLock(ImageGroupList& groups) const
-{
- sKnownGroups.forEachNoLock(^(uint32_t index, const dyld3::launch_cache::binary_format::ImageGroup* const &grpData, bool &stop) {
- if ( index < groups.count() )
- groups[index] = grpData;
- });
-}
-
-const mach_header* AllImages::alreadyLoaded(uint64_t inode, uint64_t mtime, bool bumpRefCount)
-{
- __block const mach_header* result = nullptr;
- sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
- launch_cache::Image img(loadedImage.image());
- if ( img.validateUsingModTimeAndInode() ) {
- if ( (img.fileINode() == inode) && (img.fileModTime() == mtime) ) {
- result = loadedImage.loadedAddress();
- if ( bumpRefCount && !loadedImage.neverUnload() )
- incRefCount(loadedImage.loadedAddress());
- stop = true;
- }
- }
- });
- return result;
-}
-
-const mach_header* AllImages::alreadyLoaded(const char* path, bool bumpRefCount)
-{
- __block const mach_header* result = nullptr;
- uint32_t targetHash = launch_cache::ImageGroup::hashFunction(path);
- sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
- launch_cache::Image img(loadedImage.image());
- if ( (img.pathHash() == targetHash) && (strcmp(path, imagePath(loadedImage.image())) == 0) ) {
- result = loadedImage.loadedAddress();
- if ( bumpRefCount && !loadedImage.neverUnload() )
- incRefCount(loadedImage.loadedAddress());
- stop = true;
- }
- });
- if ( result == nullptr ) {
- // perhaps there was an image override
- launch_cache::ImageGroup mainGroup(mainClosureGroup());
- STACK_ALLOC_DYNARRAY(const launch_cache::BinaryImageGroupData*, currentGroupsCount(), currentGroupsList);
- copyCurrentGroups(currentGroupsList);
- mainGroup.forEachImageRefOverride(currentGroupsList, ^(launch_cache::Image standardDylib, launch_cache::Image overrideDyilb, bool& stop) {
- if ( strcmp(standardDylib.path(), path) == 0 ) {
- result = alreadyLoaded(overrideDyilb.path(), bumpRefCount);
- stop = true;
- }
- });
- }
- return result;
-}
-
-const mach_header* AllImages::alreadyLoaded(const BinaryImage* binImage, bool bumpRefCount)
-{
- const mach_header* result = findLoadAddressByImage(binImage);
- if ( result != nullptr ) {
- launch_cache::Image loadedImage(binImage);
- if ( bumpRefCount && !loadedImage.neverUnload() )
- incRefCount(result);
- }
- return result;
+dyld_platform_t AllImages::platform() const {
+ return _platform;
}
void AllImages::incRefCount(const mach_header* loadAddress)
{
- __block bool found = false;
- sDlopenRefCounts.forEachWithWriteLock(^(uint32_t index, DlopenCount& entry, bool& stop) {
+ for (DlopenCount& entry : _dlopenRefCounts) {
if ( entry.loadAddress == loadAddress ) {
- found = true;
+ // found existing DlopenCount entry, bump counter
entry.refCount += 1;
- stop = true;
+ return;
}
- });
- if ( !found ) {
- DlopenCount newEnty = { loadAddress, 1 };
- sDlopenRefCounts.add(newEnty);
}
+
+ // no existing DlopenCount, add new one
+ _dlopenRefCounts.push_back({ loadAddress, 1 });
}
void AllImages::decRefCount(const mach_header* loadAddress)
{
- __block bool refCountNowZero = false;
- sDlopenRefCounts.forEachWithWriteLock(^(uint32_t index, DlopenCount& entry, bool& stop) {
+ bool doCollect = false;
+ for (DlopenCount& entry : _dlopenRefCounts) {
if ( entry.loadAddress == loadAddress ) {
+ // found existing DlopenCount entry, bump counter
entry.refCount -= 1;
- stop = true;
- if ( entry.refCount == 0 )
- refCountNowZero = true;
+ if ( entry.refCount == 0 ) {
+ _dlopenRefCounts.erase(entry);
+ doCollect = true;
+ break;
+ }
+ return;
}
- });
- if ( refCountNowZero ) {
- DlopenCount delEnty = { loadAddress, 0 };
- sDlopenRefCounts.remove(delEnty);
- garbageCollectImages();
}
+ if ( doCollect )
+ garbageCollectImages();
}
#if __MAC_OS_X_VERSION_MIN_REQUIRED
-__NSObjectFileImage* AllImages::addNSObjectFileImage()
+NSObjectFileImage AllImages::addNSObjectFileImage(const OFIInfo& image)
{
- // look for empty slot first
- __block __NSObjectFileImage* result = nullptr;
- sNSObjectFileImages.forEachWithWriteLock(^(uint32_t index, __NSObjectFileImage& value, bool& stop) {
- if ( (value.path == nullptr) && (value.memSource == nullptr) ) {
- result = &value;
- stop = true;
- }
+ __block uint64_t imageNum = 0;
+ withWriteLock(^{
+ imageNum = ++_nextObjectFileImageNum;
+ _objectFileImages.push_back(image);
+ _objectFileImages.back().imageNum = imageNum;
});
- if ( result != nullptr )
- return result;
-
- // otherwise allocate new slot
- __NSObjectFileImage empty;
- return sNSObjectFileImages.add(empty);
-}
-
-bool AllImages::hasNSObjectFileImage(__NSObjectFileImage* ofi)
-{
- __block bool result = false;
- sNSObjectFileImages.forEachNoLock(^(uint32_t index, const __NSObjectFileImage& value, bool& stop) {
- if ( &value == ofi ) {
- result = ((value.memSource != nullptr) || (value.path != nullptr));
- stop = true;
+ return (NSObjectFileImage)imageNum;
+}
+
+bool AllImages::forNSObjectFileImage(NSObjectFileImage imageHandle,
+ void (^handler)(OFIInfo& image)) {
+ uint64_t imageNum = (uint64_t)imageHandle;
+ bool __block foundImage = false;
+ withReadLock(^{
+ for (OFIInfo& ofi : _objectFileImages) {
+ if ( ofi.imageNum == imageNum ) {
+ handler(ofi);
+ foundImage = true;
+ return;
+ }
}
});
- return result;
+
+ return foundImage;
}
-void AllImages::removeNSObjectFileImage(__NSObjectFileImage* ofi)
+void AllImages::removeNSObjectFileImage(NSObjectFileImage imageHandle)
{
- sNSObjectFileImages.forEachWithWriteLock(^(uint32_t index, __NSObjectFileImage& value, bool& stop) {
- if ( &value == ofi ) {
- // mark slot as empty
- ofi->path = nullptr;
- ofi->memSource = nullptr;
- ofi->memLength = 0;
- ofi->loadAddress = nullptr;
- ofi->binImage = nullptr;
- stop = true;
+ uint64_t imageNum = (uint64_t)imageHandle;
+ withWriteLock(^{
+ for (OFIInfo& ofi : _objectFileImages) {
+ if ( ofi.imageNum == imageNum ) {
+ _objectFileImages.erase(ofi);
+ return;
+ }
}
});
}
class VIS_HIDDEN Reaper
{
public:
- Reaper(uint32_t count, const LoadedImage** unloadables, bool* inUseArray);
+ struct ImageAndUse
+ {
+ const LoadedImage* li;
+ bool inUse;
+ };
+ Reaper(Array<ImageAndUse>& unloadables, AllImages*);
void garbageCollect();
void finalizeDeadImages();
-
private:
- typedef launch_cache::binary_format::Image BinaryImage;
void markDirectlyDlopenedImagesAsUsed();
void markDependentOfInUseImages();
void markDependentsOf(const LoadedImage*);
- bool loadAddressIsUnloadable(const mach_header* loadAddr, uint32_t& index);
- bool imageIsUnloadable(const BinaryImage* binImage, uint32_t& foundIndex);
uint32_t inUseCount();
void dump(const char* msg);
- const LoadedImage** _unloadablesArray;
- bool* _inUseArray;
- uint32_t _arrayCount;
+ Array<ImageAndUse>& _unloadables;
+ AllImages* _allImages;
uint32_t _deadCount;
};
-Reaper::Reaper(uint32_t count, const LoadedImage** unloadables, bool* inUseArray)
- : _unloadablesArray(unloadables), _inUseArray(inUseArray),_arrayCount(count)
-{
-}
-
-
-bool Reaper::loadAddressIsUnloadable(const mach_header* loadAddr, uint32_t& foundIndex)
-{
- for (uint32_t i=0; i < _arrayCount; ++i) {
- if ( _unloadablesArray[i]->loadedAddress() == loadAddr ) {
- foundIndex = i;
- return true;
- }
- }
- return false;
-}
-
-bool Reaper::imageIsUnloadable(const BinaryImage* binImage, uint32_t& foundIndex)
+Reaper::Reaper(Array<ImageAndUse>& unloadables, AllImages* all)
+ : _unloadables(unloadables), _allImages(all), _deadCount(0)
{
- for (uint32_t i=0; i < _arrayCount; ++i) {
- if ( _unloadablesArray[i]->image() == binImage ) {
- foundIndex = i;
- return true;
- }
- }
- return false;
}
void Reaper::markDirectlyDlopenedImagesAsUsed()
{
- sDlopenRefCounts.forEachWithReadLock(^(uint32_t refCountIndex, const dyld3::DlopenCount& dlEntry, bool& stop) {
- if ( dlEntry.refCount != 0 ) {
- uint32_t foundIndex;
- if ( loadAddressIsUnloadable(dlEntry.loadAddress, foundIndex) ) {
- _inUseArray[foundIndex] = true;
+ for (AllImages::DlopenCount& entry : _allImages->_dlopenRefCounts) {
+ if ( entry.refCount != 0 ) {
+ for (ImageAndUse& iu : _unloadables) {
+ if ( iu.li->loadedAddress() == entry.loadAddress ) {
+ iu.inUse = true;
+ break;
+ }
}
- }
- });
+ }
+ }
}
uint32_t Reaper::inUseCount()
{
uint32_t count = 0;
- for (uint32_t i=0; i < _arrayCount; ++i) {
- if ( _inUseArray[i] )
+ for (ImageAndUse& iu : _unloadables) {
+ if ( iu.inUse )
++count;
}
return count;
}
-void Reaper::markDependentsOf(const LoadedImage* entry)
+void Reaper::markDependentsOf(const LoadedImage* li)
{
- const launch_cache::Image image(entry->image());
- STACK_ALLOC_DYNARRAY(const launch_cache::BinaryImageGroupData*, gAllImages.currentGroupsCount(), currentGroupsList);
- gAllImages.copyCurrentGroups(currentGroupsList);
- image.forEachDependentImage(currentGroupsList, ^(uint32_t depIndex, dyld3::launch_cache::Image depImage, dyld3::launch_cache::Image::LinkKind kind, bool& stop) {
- uint32_t foundIndex;
- if ( !depImage.neverUnload() && imageIsUnloadable(depImage.binaryData(), foundIndex) ) {
- _inUseArray[foundIndex] = true;
+ li->image()->forEachDependentImage(^(uint32_t depIndex, closure::Image::LinkKind kind, closure::ImageNum depImageNum, bool& stop) {
+ for (ImageAndUse& iu : _unloadables) {
+ if ( !iu.inUse && iu.li->image()->representsImageNum(depImageNum) ) {
+ iu.inUse = true;
+ break;
+ }
}
});
}
void Reaper::markDependentOfInUseImages()
{
- for (uint32_t i=0; i < _arrayCount; ++i) {
- if ( _inUseArray[i] )
- markDependentsOf(_unloadablesArray[i]);
+ for (ImageAndUse& iu : _unloadables) {
+ if ( iu.inUse )
+ markDependentsOf(iu.li);
}
}
void Reaper::dump(const char* msg)
{
//log("%s:\n", msg);
- for (uint32_t i=0; i < _arrayCount; ++i) {
- dyld3::launch_cache::Image image(_unloadablesArray[i]->image());
- //log(" in-used=%d %s\n", _inUseArray[i], image.path());
- }
+ //for (ImageAndUse& iu : _unloadables) {
+ // log(" in-used=%d %s\n", iu.inUse, iu.li->image()->path());
+ //}
}
void Reaper::garbageCollect()
lastCount = newCount;
} while (countChanged);
- _deadCount = _arrayCount - inUseCount();
+ _deadCount = (uint32_t)_unloadables.count() - inUseCount();
}
void Reaper::finalizeDeadImages()
__cxa_range_t ranges[_deadCount];
__cxa_range_t* rangesArray = ranges;
__block unsigned int rangesCount = 0;
- for (uint32_t i=0; i < _arrayCount; ++i) {
- if ( _inUseArray[i] )
+ for (ImageAndUse& iu : _unloadables) {
+ if ( iu.inUse )
continue;
- dyld3::launch_cache::Image image(_unloadablesArray[i]->image());
- image.forEachDiskSegment(^(uint32_t segIndex, uint32_t fileOffset, uint32_t fileSize, int64_t vmOffset, uint64_t vmSize, uint8_t permissions, bool &stop) {
+ iu.li->image()->forEachDiskSegment(^(uint32_t segIndex, uint32_t fileOffset, uint32_t fileSize, int64_t vmOffset, uint64_t vmSize, uint8_t permissions, bool &stop) {
if ( permissions & VM_PROT_EXECUTE ) {
- rangesArray[rangesCount].addr = (char*)(_unloadablesArray[i]->loadedAddress()) + vmOffset;
+ rangesArray[rangesCount].addr = (char*)(iu.li->loadedAddress()) + vmOffset;
rangesArray[rangesCount].length = (size_t)vmSize;
++rangesCount;
}
// which calls garbageCollectImages() will just set a flag to re-do the garbage collection
// when the current pass is done.
//
-// Also note that this is done within the sLoadedImages writer lock, so any dlopen/dlclose
+// Also note that this is done within the _loadedImages writer lock, so any dlopen/dlclose
// on other threads are blocked while this garbage collections runs
//
void AllImages::garbageCollectImages()
return;
do {
- const uint32_t loadedImageCount = sLoadedImages.count();
- const LoadedImage* unloadables[loadedImageCount];
- bool unloadableInUse[loadedImageCount];
- const LoadedImage** unloadablesArray = unloadables;
- bool* unloadableInUseArray = unloadableInUse;
- __block uint32_t unloadableCount = 0;
- // do GC with lock, so no other images can be added during GC
- sLoadedImages.withReadLock(^() {
- sLoadedImages.forEachNoLock(^(uint32_t index, const LoadedImage& entry, bool& stop) {
- const launch_cache::Image image(entry.image());
- if ( !image.neverUnload() && !entry.neverUnload() ) {
- unloadablesArray[unloadableCount] = &entry;
- unloadableInUseArray[unloadableCount] = false;
- //log("unloadable[%d] %p %s\n", unloadableCount, entry.loadedAddress(), image.path());
- ++unloadableCount;
+ STACK_ALLOC_ARRAY(Reaper::ImageAndUse, unloadables, _loadedImages.count());
+ withReadLock(^{
+ for (const LoadedImage& li : _loadedImages) {
+ if ( !li.image()->neverUnload() /*&& !li.neverUnload()*/ ) {
+ unloadables.push_back({&li, false});
+ //fprintf(stderr, "unloadable[%lu] %p %s\n", unloadables.count(), li.loadedAddress(), li.image()->path());
}
- });
- // make reaper object to do garbage collection and notifications
- Reaper reaper(unloadableCount, unloadablesArray, unloadableInUseArray);
- reaper.garbageCollect();
+ }
+ });
+ // make reaper object to do garbage collection and notifications
+ Reaper reaper(unloadables, this);
+ reaper.garbageCollect();
- // FIXME: we should sort dead images so higher level ones are terminated first
+ // FIXME: we should sort dead images so higher level ones are terminated first
- // call cxa_finalize_ranges of dead images
- reaper.finalizeDeadImages();
+ // call cxa_finalize_ranges of dead images
+ reaper.finalizeDeadImages();
- // FIXME: call static terminators of dead images
+ // FIXME: call static terminators of dead images
- // FIXME: DOF unregister
- });
+ // FIXME: DOF unregister
- //log("sLoadedImages before GC removals:\n");
- //sLoadedImages.dump(^(const LoadedImage& entry) {
- // const launch_cache::Image image(entry.image());
- // log(" loadAddr=%p, path=%s\n", entry.loadedAddress(), image.path());
- //});
+ //fprintf(stderr, "_loadedImages before GC removals:\n");
+ //for (const LoadedImage& li : _loadedImages) {
+ // fprintf(stderr, " loadAddr=%p, path=%s\n", li.loadedAddress(), li.image()->path());
+ //}
// make copy of LoadedImages we want to remove
- // because unloadables[] points into ChunkVector we are shrinking
- uint32_t removalCount = 0;
- for (uint32_t i=0; i < unloadableCount; ++i) {
- if ( !unloadableInUse[i] )
- ++removalCount;
+ // because unloadables[] points into LoadedImage we are shrinking
+ STACK_ALLOC_ARRAY(LoadedImage, unloadImages, _loadedImages.count());
+ for (const Reaper::ImageAndUse& iu : unloadables) {
+ if ( !iu.inUse )
+ unloadImages.push_back(*iu.li);
}
- if ( removalCount > 0 ) {
- STACK_ALLOC_DYNARRAY(loader::ImageInfo, removalCount, unloadImages);
- uint32_t removalIndex = 0;
- for (uint32_t i=0; i < unloadableCount; ++i) {
- if ( !unloadableInUse[i] ) {
- unloadImages[removalIndex].loadAddress = unloadables[i]->loadedAddress();
- unloadImages[removalIndex].imageData = unloadables[i]->image();
- ++removalIndex;
- }
- }
- // remove entries from sLoadedImages
+ // remove entries from _loadedImages
+ if ( !unloadImages.empty() ) {
removeImages(unloadImages);
- //log("sLoadedImages after GC removals:\n");
- //sLoadedImages.dump(^(const LoadedImage& entry) {
- // const launch_cache::Image image(entry.image());
- // //log(" loadAddr=%p, path=%s\n", entry.loadedAddress(), image.path());
- //});
+ //fprintf(stderr, "_loadedImages after GC removals:\n");
+ //for (const LoadedImage& li : _loadedImages) {
+ // fprintf(stderr, " loadAddr=%p, path=%s\n", li.loadedAddress(), li.image()->path());
+ //}
}
// if some other thread called GC during our work, redo GC on its behalf
-VIS_HIDDEN
-const launch_cache::binary_format::Image* AllImages::messageClosured(const char* path, const char* apiName, const char* closuredErrorMessages[3], int& closuredErrorMessagesCount)
+void AllImages::addLoadNotifier(NotifyFunc func)
{
- __block const launch_cache::binary_format::Image* result = nullptr;
- sKnownGroups.withWriteLock(^() {
- ClosureBuffer::CacheIdent cacheIdent;
- bzero(&cacheIdent, sizeof(cacheIdent));
- if ( _dyldCacheAddress != nullptr ) {
- const DyldSharedCache* dyldCache = (DyldSharedCache*)_dyldCacheAddress;
- dyldCache->getUUID(cacheIdent.cacheUUID);
- cacheIdent.cacheAddress = (unsigned long)_dyldCacheAddress;
- cacheIdent.cacheMappedSize = dyldCache->mappedSize();
+ // callback about already loaded images
+ withReadLock(^{
+ for (const LoadedImage& li : _loadedImages) {
+ dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
+ log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
+ if ( li.image()->inDyldCache() )
+ func(li.loadedAddress(), (uintptr_t)_dyldCacheSlide);
+ else
+ func(li.loadedAddress(), li.loadedAddress()->getSlide());
}
- gPathOverrides.forEachPathVariant(path, ^(const char* possiblePath, bool& stopVariants) {
- struct stat statBuf;
- if ( stat(possiblePath, &statBuf) == 0 ) {
- if ( S_ISDIR(statBuf.st_mode) ) {
- log_apis(" %s: path is directory: %s\n", apiName, possiblePath);
- if ( closuredErrorMessagesCount < 3 )
- closuredErrorMessages[closuredErrorMessagesCount++] = strdup("not a file");
- }
- else {
- // file exists, ask closured to build info for it
- STACK_ALLOC_DYNARRAY(const launch_cache::BinaryImageGroupData*, sKnownGroups.countNoLock(), currentGroupsList);
- gAllImages.copyCurrentGroupsNoLock(currentGroupsList);
- dyld3::launch_cache::DynArray<const dyld3::launch_cache::binary_format::ImageGroup*> nonCacheGroupList(currentGroupsList.count()-2, ¤tGroupsList[2]);
- const dyld3::launch_cache::binary_format::ImageGroup* closuredCreatedGroupData = nullptr;
- ClosureBuffer closureBuilderInput(cacheIdent, path, nonCacheGroupList, gPathOverrides);
- ClosureBuffer closureBuilderOutput = dyld3::closured_CreateImageGroup(closureBuilderInput);
- if ( !closureBuilderOutput.isError() ) {
- vm_protect(mach_task_self(), closureBuilderOutput.vmBuffer(), closureBuilderOutput.vmBufferSize(), false, VM_PROT_READ);
- closuredCreatedGroupData = closureBuilderOutput.imageGroup();
- log_apis(" %s: closured built ImageGroup for path: %s\n", apiName, possiblePath);
- sKnownGroups.addNoLock(closuredCreatedGroupData);
- launch_cache::ImageGroup group(closuredCreatedGroupData);
- result = group.imageBinary(0);
- stopVariants = true;
- }
- else {
- log_apis(" %s: closured failed for path: %s, error: %s\n", apiName, possiblePath, closureBuilderOutput.errorMessage());
- if ( closuredErrorMessagesCount < 3 ) {
- closuredErrorMessages[closuredErrorMessagesCount++] = strdup(closureBuilderOutput.errorMessage());
- }
- closureBuilderOutput.free();
- }
- }
- }
- else {
- log_apis(" %s: file does not exist for path: %s\n", apiName, possiblePath);
- }
- });
});
- return result;
-}
-
-const AllImages::BinaryImage* AllImages::findImageInKnownGroups(const char* path)
-{
- __block const AllImages::BinaryImage* result = nullptr;
- sKnownGroups.forEachWithReadLock(^(uint32_t index, const dyld3::launch_cache::binary_format::ImageGroup* const& grpData, bool& stop) {
- launch_cache::ImageGroup group(grpData);
- uint32_t ignore;
- if ( const AllImages::BinaryImage* binImage = group.findImageByPath(path, ignore) ) {
- result = binImage;
- stop = true;
- }
+ // add to list of functions to call about future loads
+ withNotifiersLock(^{
+ _loadNotifiers.push_back(func);
});
- return result;
}
-bool AllImages::imageUnloadable(const launch_cache::Image& image, const mach_header* loadAddress) const
+void AllImages::addUnloadNotifier(NotifyFunc func)
{
- // check if statically determined in clousre that this can never be unloaded
- if ( image.neverUnload() )
- return false;
-
- // check if some runtime decision made this be never-unloadable
- __block bool foundAsNeverUnload = false;
- sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
- if ( loadedImage.loadedAddress() == loadAddress ) {
- stop = true;
- if ( loadedImage.neverUnload() )
- foundAsNeverUnload = true;
- }
+ // add to list of functions to call about future unloads
+ withNotifiersLock(^{
+ _unloadNotifiers.push_back(func);
});
- if ( foundAsNeverUnload )
- return false;
-
- return true;
}
-void AllImages::addLoadNotifier(NotifyFunc func)
+void AllImages::addLoadNotifier(LoadNotifyFunc func)
{
// callback about already loaded images
- const uint32_t existingCount = sLoadedImages.count();
- const mach_header* existingMHs[existingCount];
- const mach_header** existingArray = existingMHs;
- sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
- if ( anIndex < existingCount )
- existingArray[anIndex] = loadedImage.loadedAddress();
+ withReadLock(^{
+ for (const LoadedImage& li : _loadedImages) {
+ dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
+ log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
+ func(li.loadedAddress(), li.image()->path(), !li.image()->neverUnload());
+ }
});
- // we don't want to hold lock while calling out, so prebuild array (with lock) then do calls on that array (without lock)
- for (uint32_t i=0; i < existingCount; i++) {
- MachOParser parser(existingArray[i]);
- log_notifications("dyld: add notifier %p called with mh=%p\n", func, existingArray[i]);
- func(existingArray[i], parser.getSlide());
- }
// add to list of functions to call about future loads
- sLoadNotifiers.add(func);
+ withNotifiersLock(^{
+ _loadNotifiers2.push_back(func);
+ });
}
-void AllImages::addUnloadNotifier(NotifyFunc func)
-{
- // add to list of functions to call about future unloads
- sUnloadNotifiers.add(func);
-}
void AllImages::setObjCNotifiers(_dyld_objc_notify_mapped map, _dyld_objc_notify_init init, _dyld_objc_notify_unmapped unmap)
{
_objcNotifyUnmapped = unmap;
// callback about already loaded images
- uint32_t maxCount = count();
- const char* pathsBuffer[maxCount];
- const mach_header* mhBuffer[maxCount];
- __block const char** paths = pathsBuffer;
- __block const mach_header** mhs = mhBuffer;
- __block uint32_t imagesWithObjC = 0;
- sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
- launch_cache::Image img(loadedImage.image());
- if ( img.hasObjC() ) {
- mhs[imagesWithObjC] = loadedImage.loadedAddress();
- paths[imagesWithObjC] = imagePath(loadedImage.image());
- ++imagesWithObjC;
- }
- });
- if ( imagesWithObjC != 0 ) {
- (*map)(imagesWithObjC, pathsBuffer, mhBuffer);
- if ( log_notifications("dyld: objc-mapped-notifier called with %d images:\n", imagesWithObjC) ) {
- for (uint32_t i=0; i < imagesWithObjC; ++i) {
- log_notifications("dyld: objc-mapped: %p %s\n", mhBuffer[i], pathsBuffer[i]);
+ uint32_t maxCount = count();
+ STACK_ALLOC_ARRAY(const mach_header*, mhs, maxCount);
+ STACK_ALLOC_ARRAY(const char*, paths, maxCount);
+ // don't need _mutex here because this is called when process is still single threaded
+ for (const LoadedImage& li : _loadedImages) {
+ if ( li.image()->hasObjC() ) {
+ paths.push_back(imagePath(li.image()));
+ mhs.push_back(li.loadedAddress());
+ }
+ }
+ if ( !mhs.empty() ) {
+ (*map)((uint32_t)mhs.count(), &paths[0], &mhs[0]);
+ if ( log_notifications("dyld: objc-mapped-notifier called with %ld images:\n", mhs.count()) ) {
+ for (uintptr_t i=0; i < mhs.count(); ++i) {
+ log_notifications("dyld: objc-mapped: %p %s\n", mhs[i], paths[i]);
}
}
}
}
-void AllImages::vmAccountingSetSuspended(bool suspend)
+void AllImages::applyInterposingToDyldCache(const closure::Closure* closure)
{
-#if __arm__ || __arm64__
- // <rdar://problem/29099600> dyld should tell the kernel when it is doing fix-ups caused by roots
- log_fixups("vm.footprint_suspend=%d\n", suspend);
- int newValue = suspend ? 1 : 0;
- int oldValue = 0;
- size_t newlen = sizeof(newValue);
- size_t oldlen = sizeof(oldValue);
- sysctlbyname("vm.footprint_suspend", &oldValue, &oldlen, &newValue, newlen);
+ dyld3::ScopedTimer timer(DBG_DYLD_TIMING_APPLY_INTERPOSING, 0, 0, 0);
+ const uintptr_t cacheStart = (uintptr_t)_dyldCacheAddress;
+ __block closure::ImageNum lastCachedDylibImageNum = 0;
+ __block const closure::Image* lastCachedDylibImage = nullptr;
+ __block bool suspendedAccounting = false;
+ closure->forEachPatchEntry(^(const closure::Closure::PatchEntry& entry) {
+ if ( entry.overriddenDylibInCache != lastCachedDylibImageNum ) {
+ lastCachedDylibImage = closure::ImageArray::findImage(imagesArrays(), entry.overriddenDylibInCache);
+ assert(lastCachedDylibImage != nullptr);
+ lastCachedDylibImageNum = entry.overriddenDylibInCache;
+ }
+ if ( !suspendedAccounting ) {
+ Loader::vmAccountingSetSuspended(true, log_fixups);
+ suspendedAccounting = true;
+ }
+ uintptr_t newValue = 0;
+ LoadedImage foundImage;
+ switch ( entry.replacement.image.kind ) {
+ case closure::Image::ResolvedSymbolTarget::kindImage:
+ assert(findImageNum(entry.replacement.image.imageNum, foundImage));
+ newValue = (uintptr_t)(foundImage.loadedAddress()) + (uintptr_t)entry.replacement.image.offset;
+ break;
+ case closure::Image::ResolvedSymbolTarget::kindSharedCache:
+ newValue = (uintptr_t)_dyldCacheAddress + (uintptr_t)entry.replacement.sharedCache.offset;
+ break;
+ case closure::Image::ResolvedSymbolTarget::kindAbsolute:
+ // this means the symbol was missing in the cache override dylib, so set any uses to NULL
+ newValue = (uintptr_t)entry.replacement.absolute.value;
+ break;
+ default:
+ assert(0 && "bad replacement kind");
+ }
+ lastCachedDylibImage->forEachPatchableUseOfExport(entry.exportCacheOffset, ^(closure::Image::PatchableExport::PatchLocation patchLocation) {
+ uintptr_t* loc = (uintptr_t*)(cacheStart+patchLocation.cacheOffset);
+ #if __has_feature(ptrauth_calls)
+ if ( patchLocation.authenticated ) {
+ MachOLoaded::ChainedFixupPointerOnDisk fixupInfo;
+ fixupInfo.authRebase.auth = true;
+ fixupInfo.authRebase.addrDiv = patchLocation.usesAddressDiversity;
+ fixupInfo.authRebase.diversity = patchLocation.discriminator;
+ fixupInfo.authRebase.key = patchLocation.key;
+ *loc = fixupInfo.signPointer(loc, newValue + patchLocation.getAddend());
+ log_fixups("dyld: cache fixup: *%p = %p (JOP: diversity 0x%04X, addr-div=%d, key=%s)\n",
+ loc, (void*)*loc, patchLocation.discriminator, patchLocation.usesAddressDiversity, patchLocation.keyName());
+ return;
+ }
#endif
+ log_fixups("dyld: cache fixup: *%p = 0x%0lX (dyld cache patch)\n", loc, newValue + (uintptr_t)patchLocation.getAddend());
+ *loc = newValue + (uintptr_t)patchLocation.getAddend();
+ });
+ });
+ if ( suspendedAccounting )
+ Loader::vmAccountingSetSuspended(false, log_fixups);
}
-void AllImages::applyInterposingToDyldCache(const launch_cache::binary_format::Closure* closure, const dyld3::launch_cache::DynArray<loader::ImageInfo>& initialImages)
+void AllImages::runStartupInitialzers()
{
- launch_cache::Closure mainClosure(closure);
- launch_cache::ImageGroup mainGroup = mainClosure.group();
- DyldCacheParser cacheParser((DyldSharedCache*)_dyldCacheAddress, false);
- const launch_cache::binary_format::ImageGroup* dylibsGroupData = cacheParser.cachedDylibsGroup();
- launch_cache::ImageGroup dyldCacheDylibGroup(dylibsGroupData);
- __block bool suspendedAccounting = false;
- mainGroup.forEachDyldCacheSymbolOverride(^(uint32_t patchTableIndex, const launch_cache::binary_format::Image* imageData, uint32_t imageOffset, bool& stop) {
- bool foundInImages = false;
- for (int i=0; i < initialImages.count(); ++i) {
- if ( initialImages[i].imageData == imageData ) {
- foundInImages = true;
- uintptr_t replacement = (uintptr_t)(initialImages[i].loadAddress) + imageOffset;
- dyldCacheDylibGroup.forEachDyldCachePatchLocation(_dyldCacheAddress, patchTableIndex, ^(uintptr_t* locationToPatch, uintptr_t addend, bool& innerStop) {
- if ( !suspendedAccounting ) {
- vmAccountingSetSuspended(true);
- suspendedAccounting = true;
- }
- log_fixups("dyld: cache fixup: *%p = %p\n", locationToPatch, (void*)replacement);
- *locationToPatch = replacement + addend;
- });
- break;
+ __block bool mainExecutableInitializerNeedsToRun = true;
+ __block uint32_t imageIndex = 0;
+ while ( mainExecutableInitializerNeedsToRun ) {
+ __block const closure::Image* image = nullptr;
+ withReadLock(^{
+ image = _loadedImages[imageIndex].image();
+ if ( _loadedImages[imageIndex].loadedAddress()->isMainExecutable() )
+ mainExecutableInitializerNeedsToRun = false;
+ });
+ runInitialzersBottomUp(image);
+ ++imageIndex;
+ }
+}
+
+
+// Find image in _loadedImages which has ImageNum == num.
+// Try indexHint first, if hint is wrong, updated it, so next use is faster.
+LoadedImage AllImages::findImageNum(closure::ImageNum num, uint32_t& indexHint)
+{
+ __block LoadedImage copy;
+ withReadLock(^{
+ if ( (indexHint >= _loadedImages.count()) || !_loadedImages[indexHint].image()->representsImageNum(num) ) {
+ indexHint = 0;
+ for (indexHint=0; indexHint < _loadedImages.count(); ++indexHint) {
+ if ( _loadedImages[indexHint].image()->representsImageNum(num) )
+ break;
+ }
+ assert(indexHint < _loadedImages.count());
+ }
+ copy = _loadedImages[indexHint];
+ });
+ return copy;
+}
+
+
+// Change the state of the LoadedImage in _loadedImages which has ImageNum == num.
+// Only change state if current state is expectedCurrentState (atomic swap).
+bool AllImages::swapImageState(closure::ImageNum num, uint32_t& indexHint, LoadedImage::State expectedCurrentState, LoadedImage::State newState)
+{
+ __block bool result = false;
+ withWriteLock(^{
+ if ( (indexHint >= _loadedImages.count()) || !_loadedImages[indexHint].image()->representsImageNum(num) ) {
+ indexHint = 0;
+ for (indexHint=0; indexHint < _loadedImages.count(); ++indexHint) {
+ if ( _loadedImages[indexHint].image()->representsImageNum(num) )
+ break;
}
+ assert(indexHint < _loadedImages.count());
}
- if ( !foundInImages ) {
- launch_cache::Image img(imageData);
- log_fixups("did not find loaded image to patch into cache: %s\n", img.path());
+ if ( _loadedImages[indexHint].state() == expectedCurrentState ) {
+ _loadedImages[indexHint].setState(newState);
+ result = true;
}
});
- if ( suspendedAccounting )
- vmAccountingSetSuspended(false);
+ return result;
+}
+
+// dyld3 pre-builds the order initializers need to be run (bottom up) in a list in the closure.
+// This method uses that list to run all initializers.
+// Because an initializer may call dlopen() and/or create threads, the _loadedImages array
+// may move under us. So, never keep a pointer into it. Always reference images by ImageNum
+// and use hint to make that faster in the case where the _loadedImages does not move.
+void AllImages::runInitialzersBottomUp(const closure::Image* topImage)
+{
+ // walk closure specified initializer list, already ordered bottom up
+ topImage->forEachImageToInitBefore(^(closure::ImageNum imageToInit, bool& stop) {
+ // get copy of LoadedImage about imageToInit, but don't keep reference into _loadedImages, because it may move if initialzers call dlopen()
+ uint32_t indexHint = 0;
+ LoadedImage loadedImageCopy = findImageNum(imageToInit, indexHint);
+ // skip if the image is already inited, or in process of being inited (dependency cycle)
+ if ( (loadedImageCopy.state() == LoadedImage::State::fixedUp) && swapImageState(imageToInit, indexHint, LoadedImage::State::fixedUp, LoadedImage::State::beingInited) ) {
+ // tell objc to run any +load methods in image
+ if ( (_objcNotifyInit != nullptr) && loadedImageCopy.image()->mayHavePlusLoads() ) {
+ dyld3::ScopedTimer timer(DBG_DYLD_TIMING_OBJC_INIT, (uint64_t)loadedImageCopy.loadedAddress(), 0, 0);
+ const char* path = imagePath(loadedImageCopy.image());
+ log_notifications("dyld: objc-init-notifier called with mh=%p, path=%s\n", loadedImageCopy.loadedAddress(), path);
+ (*_objcNotifyInit)(path, loadedImageCopy.loadedAddress());
+ }
+
+ // run all initializers in image
+ runAllInitializersInImage(loadedImageCopy.image(), loadedImageCopy.loadedAddress());
+
+ // advance state to inited
+ swapImageState(imageToInit, indexHint, LoadedImage::State::beingInited, LoadedImage::State::inited);
+ }
+ });
+}
+
+
+void AllImages::runLibSystemInitializer(const LoadedImage& libSystem)
+{
+ // run all initializers in libSystem.dylib
+ runAllInitializersInImage(libSystem.image(), libSystem.loadedAddress());
+
+ // Note: during libSystem's initialization, libdyld_initializer() is called which copies _initialImages to _loadedImages
+
+ // mark libSystem.dylib as being inited, so later recursive-init would re-run it
+ for (LoadedImage& li : _loadedImages) {
+ if ( li.loadedAddress() == libSystem.loadedAddress() ) {
+ li.setState(LoadedImage::State::inited);
+ break;
+ }
+ }
}
-void AllImages::runLibSystemInitializer(const mach_header* libSystemAddress, const launch_cache::binary_format::Image* libSystemBinImage)
+void AllImages::runAllInitializersInImage(const closure::Image* image, const MachOLoaded* ml)
{
- // run all initializers in image
- launch_cache::Image libSystemImage(libSystemBinImage);
- libSystemImage.forEachInitializer(libSystemAddress, ^(const void* func) {
+ image->forEachInitializer(ml, ^(const void* func) {
Initializer initFunc = (Initializer)func;
- dyld3::kdebug_trace_dyld_duration(DBG_DYLD_TIMING_STATIC_INITIALIZER, (uint64_t)func, 0, ^{
+#if __has_feature(ptrauth_calls)
+ initFunc = (Initializer)__builtin_ptrauth_sign_unauthenticated((void*)initFunc, 0, 0);
+#endif
+ {
+ ScopedTimer(DBG_DYLD_TIMING_STATIC_INITIALIZER, (uint64_t)ml, (uint64_t)func, 0);
initFunc(NXArgc, NXArgv, environ, appleParams, _programVars);
- });
- log_initializers("called initialzer %p in %s\n", initFunc, libSystemImage.path());
- });
- // mark libSystem.dylib as being init, so later recursive-init would re-run it
- sLoadedImages.forEachWithWriteLock(^(uint32_t anIndex, LoadedImage& loadedImage, bool& stop) {
- if ( loadedImage.loadedAddress() == libSystemAddress ) {
- loadedImage.setState(LoadedImage::State::inited);
- stop = true;
}
+ log_initializers("dyld: called initialzer %p in %s\n", initFunc, image->path());
});
}
-void AllImages::runInitialzersBottomUp(const mach_header* imageLoadAddress)
+const MachOLoaded* AllImages::dlopen(Diagnostics& diag, const char* path, bool rtldNoLoad, bool rtldLocal, bool rtldNoDelete, bool fromOFI, const void* callerAddress)
{
- launch_cache::Image topImage = findByLoadAddress(imageLoadAddress);
- if ( topImage.isInvalid() )
- return;
-
- // closure contains list of intializers to run in-order
- STACK_ALLOC_DYNARRAY(const launch_cache::BinaryImageGroupData*, currentGroupsCount(), currentGroupsList);
- copyCurrentGroups(currentGroupsList);
- topImage.forEachInitBefore(currentGroupsList, ^(launch_cache::Image imageToInit) {
- // find entry
- __block LoadedImage* foundEntry = nullptr;
- sLoadedImages.forEachWithReadLock(^(uint32_t index, const LoadedImage& entry, bool& stop) {
- if ( entry.image() == imageToInit.binaryData() ) {
- foundEntry = (LoadedImage*)&entry;
- stop = true;
- }
- });
- assert(foundEntry != nullptr);
- pthread_mutex_lock(&_initializerLock);
- // Note, due to the large lock in dlopen, we can't be waiting on another thread
- // here, but its possible that we are in a dlopen which is initialising us again
- if ( foundEntry->state() == LoadedImage::State::beingInited ) {
- log_initializers("dyld: already initializing '%s'\n", imagePath(imageToInit.binaryData()));
- }
- // at this point, the image is either initialized or not
- // if not, initialize it on this thread
- if ( foundEntry->state() == LoadedImage::State::uninited ) {
- foundEntry->setState(LoadedImage::State::beingInited);
- // release initializer lock, so other threads can run initializers
- pthread_mutex_unlock(&_initializerLock);
- // tell objc to run any +load methods in image
- if ( (_objcNotifyInit != nullptr) && imageToInit.mayHavePlusLoads() ) {
- log_notifications("dyld: objc-init-notifier called with mh=%p, path=%s\n", foundEntry->loadedAddress(), imagePath(imageToInit.binaryData()));
- (*_objcNotifyInit)(imagePath(imageToInit.binaryData()), foundEntry->loadedAddress());
+ // quick check if path is in shared cache and already loaded
+ if ( _dyldCacheAddress != nullptr ) {
+ uint32_t dyldCacheImageIndex;
+ if ( _dyldCacheAddress->hasImagePath(path, dyldCacheImageIndex) ) {
+ uint64_t mTime;
+ uint64_t inode;
+ const MachOLoaded* mh = (MachOLoaded*)_dyldCacheAddress->getIndexedImageEntry(dyldCacheImageIndex, mTime, inode);
+ // Note: we do not need readLock because this is within global dlopen lock
+ for (const LoadedImage& li : _loadedImages) {
+ if ( li.loadedAddress() == mh ) {
+ return mh;
}
- // run all initializers in image
- imageToInit.forEachInitializer(foundEntry->loadedAddress(), ^(const void* func) {
- Initializer initFunc = (Initializer)func;
- dyld3::kdebug_trace_dyld_duration(DBG_DYLD_TIMING_STATIC_INITIALIZER, (uint64_t)func, 0, ^{
- initFunc(NXArgc, NXArgv, environ, appleParams, _programVars);
- });
- log_initializers("dyld: called initialzer %p in %s\n", initFunc, imageToInit.path());
- });
- // reaquire initializer lock to switch state to inited
- pthread_mutex_lock(&_initializerLock);
- foundEntry->setState(LoadedImage::State::inited);
}
- pthread_mutex_unlock(&_initializerLock);
- });
+ }
+ }
+
+ __block closure::ImageNum callerImageNum = 0;
+ STACK_ALLOC_ARRAY(LoadedImage, loadedList, 1024);
+ for (const LoadedImage& li : _loadedImages) {
+ loadedList.push_back(li);
+ uint8_t permissions;
+ if ( (callerImageNum == 0) && li.image()->containsAddress(callerAddress, li.loadedAddress(), &permissions) ) {
+ callerImageNum = li.image()->imageNum();
+ }
+ //fprintf(stderr, "mh=%p, image=%p, imageNum=0x%04X, path=%s\n", li.loadedAddress(), li.image(), li.image()->imageNum(), li.image()->path());
+ }
+ uintptr_t alreadyLoadedCount = loadedList.count();
+
+ // make closure
+ closure::ImageNum topImageNum = 0;
+ const closure::DlopenClosure* newClosure;
+
+ // First try with closures from the shared cache permitted.
+ // Then try again with forcing a new closure
+ for (bool canUseSharedCacheClosure : { true, false }) {
+ closure::FileSystemPhysical fileSystem;
+ closure::ClosureBuilder::AtPath atPathHanding = (_allowAtPaths ? closure::ClosureBuilder::AtPath::all : closure::ClosureBuilder::AtPath::onlyInRPaths);
+ closure::ClosureBuilder cb(_nextImageNum, fileSystem, _dyldCacheAddress, true, closure::gPathOverrides, atPathHanding);
+ newClosure = cb.makeDlopenClosure(path, _mainClosure, loadedList, callerImageNum, rtldNoLoad, canUseSharedCacheClosure, &topImageNum);
+ if ( newClosure == closure::ClosureBuilder::sRetryDlopenClosure ) {
+ log_apis(" dlopen: closure builder needs to retry: %s\n", path);
+ assert(canUseSharedCacheClosure);
+ continue;
+ }
+ if ( (newClosure == nullptr) && (topImageNum == 0) ) {
+ if ( cb.diagnostics().hasError())
+ diag.error("%s", cb.diagnostics().errorMessage());
+ else if ( !rtldNoLoad )
+ diag.error("dlopen(): file not found: %s", path);
+ return nullptr;
+ }
+ // save off next available ImageNum for use by next call to dlopen()
+ _nextImageNum = cb.nextFreeImageNum();
+ break;
+ }
+
+ if ( newClosure != nullptr ) {
+ // if new closure contains an ImageArray, add it to list
+ if ( const closure::ImageArray* newArray = newClosure->images() ) {
+ appendToImagesArray(newArray);
+ }
+ log_apis(" dlopen: made closure: %p\n", newClosure);
+ }
+
+ // if already loaded, just bump refCount and return
+ if ( (newClosure == nullptr) && (topImageNum != 0) ) {
+ for (LoadedImage& li : _loadedImages) {
+ if ( li.image()->imageNum() == topImageNum ) {
+ // is already loaded
+ const MachOLoaded* topLoadAddress = li.loadedAddress();
+ if ( !li.image()->inDyldCache() )
+ incRefCount(topLoadAddress);
+ log_apis(" dlopen: already loaded as '%s'\n", li.image()->path());
+ // if previously opened with RTLD_LOCAL, but now opened with RTLD_GLOBAL, unhide it
+ if ( !rtldLocal && li.hideFromFlatSearch() )
+ li.setHideFromFlatSearch(false);
+ // if called with RTLD_NODELETE, mark it as never-unload
+ if ( rtldNoDelete )
+ li.markLeaveMapped();
+ return topLoadAddress;
+ }
+ }
+ }
+
+ // run loader to load all new images
+ Loader loader(loadedList, _dyldCacheAddress, imagesArrays(), &dyld3::log_loads, &dyld3::log_segments, &dyld3::log_fixups, &dyld3::log_dofs);
+ const closure::Image* topImage = closure::ImageArray::findImage(imagesArrays(), topImageNum);
+ if ( newClosure == nullptr ) {
+ if ( topImageNum < dyld3::closure::kLastDyldCacheImageNum )
+ log_apis(" dlopen: using image in dyld shared cache %p\n", topImage);
+ else
+ log_apis(" dlopen: using pre-built dlopen closure %p\n", topImage);
+ }
+ uintptr_t topIndex = loadedList.count();
+ LoadedImage topLoadedImage = LoadedImage::make(topImage);
+ if ( rtldLocal && !topImage->inDyldCache() )
+ topLoadedImage.setHideFromFlatSearch(true);
+ if ( rtldNoDelete && !topImage->inDyldCache() )
+ topLoadedImage.markLeaveMapped();
+ loader.addImage(topLoadedImage);
+
+
+ // recursively load all dependents and fill in allImages array
+ loader.completeAllDependents(diag, topIndex);
+ if ( diag.hasError() )
+ return nullptr;
+ loader.mapAndFixupAllImages(diag, _processDOFs, fromOFI, topIndex);
+ if ( diag.hasError() )
+ return nullptr;
+
+ const MachOLoaded* topLoadAddress = loadedList[topIndex].loadedAddress();
+
+ // bump dlopen refcount of image directly loaded
+ if ( !topImage->inDyldCache() )
+ incRefCount(topLoadAddress);
+
+ // tell gAllImages about new images
+ const uint32_t newImageCount = (uint32_t)(loadedList.count() - alreadyLoadedCount);
+ addImages(loadedList.subArray(alreadyLoadedCount, newImageCount));
+
+ // if closure adds images that override dyld cache, patch cache
+ if ( newClosure != nullptr )
+ applyInterposingToDyldCache(newClosure);
+
+ runImageNotifiers(loadedList.subArray(alreadyLoadedCount, newImageCount));
+
+ // run initializers
+ runInitialzersBottomUp(topImage);
+
+ return topLoadAddress;
+}
+
+void AllImages::appendToImagesArray(const closure::ImageArray* newArray)
+{
+ _imagesArrays.push_back(newArray);
}
+const Array<const closure::ImageArray*>& AllImages::imagesArrays()
+{
+ return _imagesArrays.array();
+}
+
+bool AllImages::isRestricted() const
+{
+ return !_allowEnvPaths;
+}
+
+
+
} // namespace dyld3