#include "mach-o/dyld_images.h"
// in dyldStartup.s
-extern "C" void fast_stub_binding_helper_interface();
+extern "C" void stub_binding_helper_i386_old();
#if __x86_64__
// for PIE record end of program, to know where to start loading dylibs
if ( slide != 0 )
fgNextPIEDylibAddress = (uintptr_t)image->getEnd();
-
+
+ image->disableCoverageCheck();
image->instantiateFinish(context);
image->setMapped(context);
}
// create image by mapping in a mach-o file
-ImageLoaderMachOClassic* ImageLoaderMachOClassic::instantiateFromFile(const char* path, int fd, const uint8_t* fileData,
+ImageLoaderMachOClassic* ImageLoaderMachOClassic::instantiateFromFile(const char* path, int fd, const uint8_t* fileData, size_t lenFileData,
uint64_t offsetInFat, uint64_t lenInFat, const struct stat& info,
unsigned int segCount, unsigned int libCount,
const struct linkedit_data_command* codeSigCmd, const LinkContext& context)
// record info about file
image->setFileInfo(info.st_dev, info.st_ino, info.st_mtime);
- #if CODESIGNING_SUPPORT
// if this image is code signed, let kernel validate signature before mapping any pages from image
- if ( codeSigCmd != NULL )
- image->loadCodeSignature(codeSigCmd, fd, offsetInFat);
- #endif
+ image->loadCodeSignature(codeSigCmd, fd, offsetInFat, context);
+ // Validate that first data we read with pread actually matches with code signature
+ image->validateFirstPages(codeSigCmd, fd, fileData, lenFileData, offsetInFat, context);
+
// mmap segments
image->mapSegmentsClassic(fd, offsetInFat, lenInFat, info.st_size, context);
// remember this is from shared cache and cannot be unloaded
image->fInSharedCache = true;
image->setNeverUnload();
+ image->disableCoverageCheck();
// segments already mapped in cache
if ( context.verboseMapping ) {
// for compatibility, never unload dylibs loaded from memory
image->setNeverUnload();
+ image->disableCoverageCheck();
+
// bundle loads need path copied
if ( moduleName != NULL )
image->setPath(moduleName);
void ImageLoaderMachOClassic::instantiateFinish(const LinkContext& context)
{
// now that segments are mapped in, get real fMachOData, fLinkEditBase, and fSlide
- this->parseLoadCmds();
+ this->parseLoadCmds(context);
}
ImageLoaderMachOClassic::~ImageLoaderMachOClassic()
end += fSymbolTable[fDynamicInfo->iextdefsym+fDynamicInfo->nextdefsym-1].n_un.n_strx;
// round to whole pages
- start = start & (-4096);
- end = (end + 4095) & (-4096);
+ start = dyld_page_trunc(start);
+ end = dyld_page_round(end);
// skip if there is only one page
- if ( (end-start) > 4096 ) {
+ if ( (end-start) > dyld_page_size ) {
madvise((void*)start, end-start, MADV_WILLNEED);
fgTotalBytesPreFetched += (end-start);
if ( context.verboseMapping ) {
while ( ! foundRoom ) {
foundRoom = true;
for(unsigned int i=0; i < regionCount; ++i) {
- vm_address_t addr = nextAltLoadAddress + regions[i].sfm_address - regions[0].sfm_address;
- vm_size_t size = regions[i].sfm_size ;
+ vm_address_t addr = (vm_address_t)(nextAltLoadAddress + regions[i].sfm_address - regions[0].sfm_address);
+ vm_size_t size = (vm_size_t)regions[i].sfm_size ;
r = vm_allocate(mach_task_self(), &addr, size, false /*only this range*/);
if ( 0 != r ) {
// no room here, deallocate what has succeeded so far
for(unsigned int j=0; j < i; ++j) {
- vm_address_t addr = nextAltLoadAddress + regions[j].sfm_address - regions[0].sfm_address;
- vm_size_t size = regions[j].sfm_size ;
+ addr = (vm_address_t)(nextAltLoadAddress + regions[j].sfm_address - regions[0].sfm_address);
+ size = (vm_size_t)(regions[j].sfm_size);
(void)vm_deallocate(mach_task_self(), addr, size);
}
nextAltLoadAddress += 0x00100000; // skip ahead 1MB and try again
}
// map in each region
- uintptr_t slide = nextAltLoadAddress - regions[0].sfm_address;
+ uintptr_t slide = (uintptr_t)(nextAltLoadAddress - regions[0].sfm_address);
this->setSlide(slide);
for(unsigned int i=0; i < regionCount; ++i) {
if ( ((regions[i].sfm_init_prot & VM_PROT_ZF) != 0) || (regions[i].sfm_size == 0) ) {
}
else {
void* mmapAddress = (void*)(uintptr_t)(regions[i].sfm_address + slide);
- size_t size = regions[i].sfm_size;
+ size_t size = (size_t)regions[i].sfm_size;
int protection = 0;
if ( regions[i].sfm_init_prot & VM_PROT_EXECUTE )
protection |= PROT_EXEC;
return true;
if ( context.imageSuffix != NULL ) {
// when DYLD_IMAGE_SUFFIX is used, lastSlash string needs imageSuffix removed from end
- char reexportAndSuffix[strlen(context.imageSuffix)+strlen(exportThruName)+1];
- strcpy(reexportAndSuffix, exportThruName);
- strcat(reexportAndSuffix, context.imageSuffix);
- if ( strcmp(&lastSlash[1], reexportAndSuffix) == 0 )
- return true;
+ for(const char* const* suffix = context.imageSuffix; *suffix != NULL; ++suffix) {
+ char reexportAndSuffix[strlen(*suffix)+strlen(exportThruName)+1];
+ strcpy(reexportAndSuffix, exportThruName);
+ strcat(reexportAndSuffix, *suffix);
+ if ( strcmp(&lastSlash[1], reexportAndSuffix) == 0 )
+ return true;
+ }
}
}
}
const char* lastSlash = strrchr(childInstallPath, '/');
if ( lastSlash != NULL ) {
const char* firstDot = strchr(lastSlash, '.');
- int len;
+ size_t len;
if ( firstDot == NULL )
len = strlen(lastSlash);
else
return true;
if ( context.imageSuffix != NULL ) {
// when DYLD_IMAGE_SUFFIX is used, childLeafName string needs imageSuffix removed from end
- char aSubLibNameAndSuffix[strlen(context.imageSuffix)+strlen(aSubLibName)+1];
- strcpy(aSubLibNameAndSuffix, aSubLibName);
- strcat(aSubLibNameAndSuffix, context.imageSuffix);
- if ( strcmp(aSubLibNameAndSuffix, childLeafName) == 0 )
- return true;
+ for(const char* const* suffix = context.imageSuffix; *suffix != NULL; ++suffix) {
+ char aSubLibNameAndSuffix[strlen(*suffix)+strlen(aSubLibName)+1];
+ strcpy(aSubLibNameAndSuffix, aSubLibName);
+ strcat(aSubLibNameAndSuffix, *suffix);
+ if ( strcmp(aSubLibNameAndSuffix, childLeafName) == 0 )
+ return true;
+ }
}
}
break;
return true;
if ( context.imageSuffix != NULL ) {
// when DYLD_IMAGE_SUFFIX is used, lastSlash string needs imageSuffix removed from end
- char umbrellaAndSuffix[strlen(context.imageSuffix)+strlen(aSubUmbrellaName)+1];
- strcpy(umbrellaAndSuffix, aSubUmbrellaName);
- strcat(umbrellaAndSuffix, context.imageSuffix);
- if ( strcmp(umbrellaAndSuffix, &lastSlash[1]) == 0 )
- return true;
+ for(const char* const* suffix = context.imageSuffix; *suffix != NULL; ++suffix) {
+ char umbrellaAndSuffix[strlen(*suffix)+strlen(aSubUmbrellaName)+1];
+ strcpy(umbrellaAndSuffix, aSubUmbrellaName);
+ strcat(umbrellaAndSuffix, *suffix);
+ if ( strcmp(umbrellaAndSuffix, &lastSlash[1]) == 0 )
+ return true;
+ }
}
}
break;
{
// loop through all local (internal) relocation records looking for pre-bound-lazy-pointer values
const uintptr_t relocBase = this->getRelocBase();
- register const uintptr_t slide = this->fSlide;
+ const uintptr_t slide = this->fSlide;
const relocation_info* const relocsStart = (struct relocation_info*)(&fLinkEditBase[fDynamicInfo->locreloff]);
const relocation_info* const relocsEnd = &relocsStart[fDynamicInfo->nlocrel];
for (const relocation_info* reloc=relocsStart; reloc < relocsEnd; ++reloc) {
-void ImageLoaderMachOClassic::rebase(const LinkContext& context)
+void ImageLoaderMachOClassic::rebase(const LinkContext& context, uintptr_t slide)
{
CRSetCrashLogMessage2(this->getPath());
- register const uintptr_t slide = this->fSlide;
const uintptr_t relocBase = this->getRelocBase();
// prefetch any LINKEDIT pages needed
}
-const ImageLoader::Symbol* ImageLoaderMachOClassic::findExportedSymbol(const char* name, const ImageLoader** foundIn) const
+const ImageLoader::Symbol* ImageLoaderMachOClassic::findShallowExportedSymbol(const char* name, const ImageLoader** foundIn) const
{
const struct macho_nlist* sym = NULL;
if ( fDynamicInfo->tocoff == 0 )
}
-uintptr_t ImageLoaderMachOClassic::exportedSymbolAddress(const LinkContext& context, const Symbol* symbol, bool runResolver) const
+uintptr_t ImageLoaderMachOClassic::exportedSymbolAddress(const LinkContext& context, const Symbol* symbol, const ImageLoader* requestor, bool runResolver) const
{
const struct macho_nlist* sym = (macho_nlist*)symbol;
uintptr_t result = sym->n_value + fSlide;
}
uintptr_t ImageLoaderMachOClassic::resolveUndefined(const LinkContext& context, const struct macho_nlist* undefinedSymbol,
- bool twoLevel, bool dontCoalesce, const ImageLoader** foundIn)
+ bool twoLevel, bool dontCoalesce, bool runResolver, const ImageLoader** foundIn)
{
++fgTotalBindSymbolsResolved;
const char* symbolName = &fStrings[undefinedSymbol->n_un.n_strx];
}
const Symbol* sym;
if ( context.flatExportFinder(symbolName, &sym, foundIn) ) {
- if ( (*foundIn != this) && !(*foundIn)->neverUnload() )
- this->addDynamicReference(*foundIn);
+ if ( *foundIn != this )
+ context.addDynamicReference(this, const_cast<ImageLoader*>(*foundIn));
return (*foundIn)->getExportedSymbolAddress(sym, context, this);
}
// if a bundle is loaded privately the above will not find its exports
if ( this->isBundle() && this->hasHiddenExports() ) {
// look in self for needed symbol
- sym = this->findExportedSymbol(symbolName, foundIn);
+ sym = this->findShallowExportedSymbol(symbolName, foundIn);
if ( sym != NULL )
return (*foundIn)->getExportedSymbolAddress(sym, context, this);
}
// if reference is weak_import, then it is ok, just return 0
return 0;
}
- throwSymbolNotFound(context, symbolName, this->getPath(), "flat namespace");
+ throwSymbolNotFound(context, symbolName, this->getPath(), "", "flat namespace");
}
else {
// symbol requires searching images with coalesced symbols (not done during prebinding)
if ( !context.prebinding && !dontCoalesce && (symbolIsWeakReference(undefinedSymbol) || symbolIsWeakDefinition(undefinedSymbol)) ) {
const Symbol* sym;
- if ( context.coalescedExportFinder(symbolName, &sym, foundIn) ) {
- if ( (*foundIn != this) && !(*foundIn)->neverUnload() )
- this->addDynamicReference(*foundIn);
+ if ( context.coalescedExportFinder(symbolName, &sym, foundIn, nullptr) ) {
+ if ( *foundIn != this )
+ context.addDynamicReference(this, const_cast<ImageLoader*>(*foundIn));
return (*foundIn)->getExportedSymbolAddress(sym, context, this);
}
//throwSymbolNotFound(context, symbolName, this->getPath(), "coalesced namespace");
if ( context.flatExportFinder(symbolName, &sym, foundIn) )
return (*foundIn)->getExportedSymbolAddress(sym, context, this);
- throwSymbolNotFound(context, symbolName, this->getPath(), "dynamic lookup");
+ throwSymbolNotFound(context, symbolName, this->getPath(), "", "dynamic lookup");
}
else if ( ord <= libraryCount() ) {
target = libImage(ord-1);
//dyld::log("resolveUndefined(%s) in %s\n", symbolName, this->getPath());
throw "symbol not found";
}
-
- const Symbol* sym = target->findExportedSymbol(symbolName, true, foundIn);
- if ( sym!= NULL ) {
- return (*foundIn)->getExportedSymbolAddress(sym, context, this);
- }
- else if ( (undefinedSymbol->n_type & N_PEXT) != 0 ) {
+
+ uintptr_t address;
+ if ( target->findExportedSymbolAddress(context, symbolName, this, ord, runResolver, foundIn, &address) )
+ return address;
+
+ if ( (undefinedSymbol->n_type & N_PEXT) != 0 ) {
// don't know why the static linker did not eliminate the internal reference to a private extern definition
*foundIn = this;
return this->getSymbolAddress(undefinedSymbol, context, false);
}
// nowhere to be found
- throwSymbolNotFound(context, symbolName, this->getPath(), target->getPath());
+ throwSymbolNotFound(context, symbolName, this->getPath(), "", target->getPath());
}
}
// range of global symbols. To handle that case we do the coalesing now.
dontCoalesce = false;
}
- symbolAddr = this->resolveUndefined(context, undefinedSymbol, twoLevel, dontCoalesce, &image);
+ symbolAddr = this->resolveUndefined(context, undefinedSymbol, twoLevel, dontCoalesce, false, &image);
lastUndefinedSymbol = undefinedSymbol;
symbolAddrCached = false;
}
const uint8_t type = sect->flags & SECTION_TYPE;
uint32_t symbolIndex = INDIRECT_SYMBOL_LOCAL;
if ( type == S_LAZY_SYMBOL_POINTERS ) {
- const uint32_t pointerCount = sect->size / sizeof(uintptr_t);
+ const size_t pointerCount = sect->size / sizeof(uintptr_t);
uintptr_t* const symbolPointers = (uintptr_t*)(sect->addr + fSlide);
if ( (lazyPointer >= symbolPointers) && (lazyPointer < &symbolPointers[pointerCount]) ) {
const uint32_t indirectTableOffset = sect->reserved1;
- const uint32_t lazyIndex = lazyPointer - symbolPointers;
+ const size_t lazyIndex = lazyPointer - symbolPointers;
symbolIndex = indirectTable[indirectTableOffset + lazyIndex];
}
}
if ( symbolIndex != INDIRECT_SYMBOL_ABS && symbolIndex != INDIRECT_SYMBOL_LOCAL ) {
const char* symbolName = &fStrings[fSymbolTable[symbolIndex].n_un.n_strx];
const ImageLoader* image = NULL;
- uintptr_t symbolAddr = this->resolveUndefined(context, &fSymbolTable[symbolIndex], twoLevel, false, &image);
+ uintptr_t symbolAddr = this->resolveUndefined(context, &fSymbolTable[symbolIndex], twoLevel, false, true, &image);
symbolAddr = this->bindIndirectSymbol(lazyPointer, sect, symbolName, symbolAddr, image, context);
++fgTotalLazyBindFixups;
return symbolAddr;
-void ImageLoaderMachOClassic::initializeCoalIterator(CoalIterator& it, unsigned int loadOrder)
+void ImageLoaderMachOClassic::initializeCoalIterator(CoalIterator& it, unsigned int loadOrder, unsigned)
{
it.image = this;
it.symbolName = " ";
symbol_index = toc[it.curIndex-1].symbol_index;
}
else {
- symbol_index = fDynamicInfo->iextdefsym+it.curIndex-1;
+ symbol_index = fDynamicInfo->iextdefsym + (uint32_t)it.curIndex - 1;
}
const struct macho_nlist* sym = &fSymbolTable[symbol_index];
//dyld::log("getAddressCoalIterator() => 0x%llX, %s symbol_index=%d, in %s\n", (uint64_t)(sym->n_value + fSlide), &fStrings[sym->n_un.n_strx], symbol_index, this->getPath());
}
-void ImageLoaderMachOClassic::updateUsesCoalIterator(CoalIterator& it, uintptr_t value, ImageLoader* targetImage, const LinkContext& context)
+void ImageLoaderMachOClassic::updateUsesCoalIterator(CoalIterator& it, uintptr_t value, ImageLoader* targetImage, unsigned targetIndex, const LinkContext& context)
{
// flat_namespace images with classic LINKEDIT do not need late coalescing.
// They still need to be iterated becuase they may implement
symbol_index = toc[it.curIndex-1].symbol_index;
}
else {
- symbol_index = fDynamicInfo->iextdefsym+it.curIndex-1;
+ symbol_index = fDynamicInfo->iextdefsym + (uint32_t)it.curIndex - 1;
}
// if this image's copy of the symbol is not a weak definition nor a weak reference then nothing to coalesce here
if ( reloc->r_pcrel )
type = BIND_TYPE_TEXT_PCREL32;
#endif
- this->bindLocation(context, (uintptr_t)location, value, targetImage, type, symbolName, addend, "weak ");
+ this->bindLocation(context, this->imageBaseAddress(), (uintptr_t)location, value, type, symbolName, addend, this->getPath(), targetImage ? targetImage->getPath() : NULL, "weak ", NULL, fSlide);
boundSomething = true;
}
}
case S_NON_LAZY_SYMBOL_POINTERS:
case S_LAZY_SYMBOL_POINTERS:
{
- uint32_t elementCount = sect->size / elementSize;
+ size_t elementCount = sect->size / elementSize;
const uint32_t indirectTableOffset = sect->reserved1;
uint8_t* ptrToBind = (uint8_t*)(sect->addr + fSlide);
//dyld::log(" scanning section %s of %s starting at %p\n", sect->sectname, this->getShortName(), ptrToBind);
- for (uint32_t j=0; j < elementCount; ++j, ptrToBind += elementSize) {
+ for (size_t j=0; j < elementCount; ++j, ptrToBind += elementSize) {
if ( indirectTable[indirectTableOffset + j] == symbol_index ) {
//dyld::log(" found symbol index match at %d/%d, ptrToBind=%p\n", j, elementCount, ptrToBind);
// update pointer
}
cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
}
- if ( boundSomething && (targetImage != this) && !targetImage->neverUnload() )
- this->addDynamicReference(targetImage);
+ if ( boundSomething && (targetImage != this) ) {
+ context.addDynamicReference(this, targetImage);
+ }
// mark that this symbol has already been bound, so we don't try to bind again
it.type = 1;
bool isLazySymbol = false;
const uint8_t type = sect->flags & SECTION_TYPE;
uint32_t elementSize = sizeof(uintptr_t);
- uint32_t elementCount = sect->size / elementSize;
+ size_t elementCount = sect->size / elementSize;
if ( type == S_NON_LAZY_SYMBOL_POINTERS ) {
if ( ! bindNonLazys )
continue;
}
const uint32_t indirectTableOffset = sect->reserved1;
uint8_t* ptrToBind = (uint8_t*)(sect->addr + fSlide);
- for (uint32_t j=0; j < elementCount; ++j, ptrToBind += elementSize) {
+ for (size_t j=0; j < elementCount; ++j, ptrToBind += elementSize) {
#if LINKEDIT_USAGE_DEBUG
noteAccessedLinkEditAddress(&indirectTable[indirectTableOffset + j]);
#endif
// range of global symbols. To handle that case we do the coalesing now.
dontCoalesce = false;
}
- uintptr_t symbolAddr = resolveUndefined(context, sym, twoLevel, dontCoalesce, &image);
+ uintptr_t symbolAddr = resolveUndefined(context, sym, twoLevel, dontCoalesce, false, &image);
// update pointer
symbolAddr = this->bindIndirectSymbol((uintptr_t*)ptrToBind, sect, &fStrings[sym->n_un.n_strx], symbolAddr, image, context);
// update stats
const uint32_t* const indirectTable = (uint32_t*)&fLinkEditBase[fDynamicInfo->indirectsymoff];
uint8_t* start = (uint8_t*)(sect->addr + this->fSlide);
uint8_t* end = start + sect->size;
- uintptr_t dyldHandler = (uintptr_t)&fast_stub_binding_helper_interface;
+ uintptr_t dyldHandler = (uintptr_t)&stub_binding_helper_i386_old;
uint32_t entryIndex = 0;
for (uint8_t* entry = start; entry < end; entry += 5, ++entryIndex) {
bool installLazyHandler = true;
const char* symbolName = &fStrings[fSymbolTable[symbolIndex].n_un.n_strx];
const ImageLoader* image = NULL;
try {
- uintptr_t symbolAddr = this->resolveUndefined(context, &fSymbolTable[symbolIndex], this->usesTwoLevelNameSpace(), false, &image);
+ uintptr_t symbolAddr = this->resolveUndefined(context, &fSymbolTable[symbolIndex], this->usesTwoLevelNameSpace(), false, false, &image);
symbolAddr = this->bindIndirectSymbol((uintptr_t*)entry, sect, symbolName, symbolAddr, image, context);
++fgTotalBindFixups;
uint32_t rel32 = symbolAddr - (((uint32_t)entry)+5);
for (const struct macho_section* sect=sectionsStart; sect < sectionsEnd; ++sect) {
const uint8_t type = sect->flags & SECTION_TYPE;
if ( (type == S_NON_LAZY_SYMBOL_POINTERS) || (type == S_LAZY_SYMBOL_POINTERS) ) {
- const uint32_t pointerCount = sect->size / sizeof(uintptr_t);
+ const size_t pointerCount = sect->size / sizeof(uintptr_t);
uintptr_t* const symbolPointers = (uintptr_t*)(sect->addr + fSlide);
- for (uint32_t pointerIndex=0; pointerIndex < pointerCount; ++pointerIndex) {
- for (std::vector<InterposeTuple>::iterator it=fgInterposingTuples.begin(); it != fgInterposingTuples.end(); it++) {
- // replace all references to 'replacee' with 'replacement'
- if ( (symbolPointers[pointerIndex] == it->replacee) && (this != it->replacementImage) ) {
- if ( context.verboseInterposing ) {
- dyld::log("dyld: interposing: at %p replace 0x%lX with 0x%lX in %s\n",
- &symbolPointers[pointerIndex], it->replacee, it->replacement, this->getPath());
- }
- symbolPointers[pointerIndex] = it->replacement;
- }
- }
+ for (size_t pointerIndex=0; pointerIndex < pointerCount; ++pointerIndex) {
+ uintptr_t newValue = interposedAddress(context, symbolPointers[pointerIndex], this);
+ if ( newValue != symbolPointers[pointerIndex] )
+ symbolPointers[pointerIndex] = newValue;
}
}
#if __i386__
if ( entry[0] == 0xE9 ) { // 0xE9 == JMP
uint32_t rel32 = *((uint32_t*)&entry[1]); // assume unaligned load of uint32_t is ok
uint32_t target = (uint32_t)&entry[5] + rel32;
- for (std::vector<InterposeTuple>::iterator it=fgInterposingTuples.begin(); it != fgInterposingTuples.end(); it++) {
- // replace all references to 'replacee' with 'replacement'
- if ( (it->replacee == target) && (this != it->replacementImage) ) {
- if ( context.verboseInterposing ) {
- dyld::log("dyld: interposing: at %p replace JMP 0x%lX with JMP 0x%lX in %s\n",
- &entry[1], it->replacee, it->replacement, this->getPath());
- }
- uint32_t newRel32 = it->replacement - (uint32_t)&entry[5];
- *((uint32_t*)&entry[1]) = newRel32; // assume unaligned store of uint32_t is ok
- }
+ uint32_t newTarget = interposedAddress(context, target, this);
+ if ( newTarget != target ) {
+ uint32_t newRel32 = newTarget - (uint32_t)&entry[5];
+ *((uint32_t*)&entry[1]) = newRel32; // assume unaligned store of uint32_t is ok
}
}
}
case POINTER_RELOC:
{
uintptr_t* location = ((uintptr_t*)(reloc->r_address + relocBase));
- for (std::vector<InterposeTuple>::iterator it=fgInterposingTuples.begin(); it != fgInterposingTuples.end(); it++) {
+ uintptr_t value = *location;
+ uintptr_t newValue = interposedAddress(context, value, this);
+ if ( newValue != value )
+ *location = newValue;
+ }
+ break;
+ }
+ }
+ }
+}
+
+void ImageLoaderMachOClassic::dynamicInterpose(const LinkContext& context)
+{
+ if ( context.verboseInterposing )
+ dyld::log("dyld: dynamic interposing %lu tuples onto image: %s\n", context.dynamicInterposeCount, this->getPath());
+
+ // scan indirect symbols
+ const uint32_t cmd_count = ((macho_header*)fMachOData)->ncmds;
+ const struct load_command* const cmds = (struct load_command*)&fMachOData[sizeof(macho_header)];
+ const struct load_command* cmd = cmds;
+ for (uint32_t i = 0; i < cmd_count; ++i) {
+ switch (cmd->cmd) {
+ case LC_SEGMENT_COMMAND:
+ {
+ const struct macho_segment_command* seg = (struct macho_segment_command*)cmd;
+ const struct macho_section* const sectionsStart = (struct macho_section*)((char*)seg + sizeof(struct macho_segment_command));
+ const struct macho_section* const sectionsEnd = §ionsStart[seg->nsects];
+ for (const struct macho_section* sect=sectionsStart; sect < sectionsEnd; ++sect) {
+ const uint8_t type = sect->flags & SECTION_TYPE;
+ if ( (type == S_NON_LAZY_SYMBOL_POINTERS) || (type == S_LAZY_SYMBOL_POINTERS) ) {
+ const size_t pointerCount = sect->size / sizeof(uintptr_t);
+ uintptr_t* const symbolPointers = (uintptr_t*)(sect->addr + fSlide);
+ for (size_t pointerIndex=0; pointerIndex < pointerCount; ++pointerIndex) {
+ for(size_t j=0; j < context.dynamicInterposeCount; ++j) {
+ // replace all references to 'replacee' with 'replacement'
+ if ( symbolPointers[pointerIndex] == (uintptr_t)context.dynamicInterposeArray[j].replacee ) {
+ if ( context.verboseInterposing ) {
+ dyld::log("dyld: dynamic interposing: at %p replace %p with %p in %s\n",
+ &symbolPointers[pointerIndex], context.dynamicInterposeArray[j].replacee, context.dynamicInterposeArray[j].replacement, this->getPath());
+ }
+ symbolPointers[pointerIndex] = (uintptr_t)context.dynamicInterposeArray[j].replacement;
+ }
+ }
+ }
+ }
+ }
+ }
+ break;
+ }
+ cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
+ }
+
+ // scan external relocations
+ const uintptr_t relocBase = this->getRelocBase();
+ const relocation_info* const relocsStart = (struct relocation_info*)(&fLinkEditBase[fDynamicInfo->extreloff]);
+ const relocation_info* const relocsEnd = &relocsStart[fDynamicInfo->nextrel];
+ for (const relocation_info* reloc=relocsStart; reloc < relocsEnd; ++reloc) {
+ if (reloc->r_length == RELOC_SIZE) {
+ switch(reloc->r_type) {
+ case POINTER_RELOC:
+ {
+ uintptr_t* location = ((uintptr_t*)(reloc->r_address + relocBase));
+ for(size_t i=0; i < context.dynamicInterposeCount; ++i) {
// replace all references to 'replacee' with 'replacement'
- if ( (*location == it->replacee) && (this != it->replacementImage) ) {
+ if ( *location == (uintptr_t)context.dynamicInterposeArray[i].replacee ) {
if ( context.verboseInterposing ) {
- dyld::log("dyld: interposing: at %p replace 0x%lX with 0x%lX in %s\n",
- location, it->replacee, it->replacement, this->getPath());
+ dyld::log("dyld: dynamic interposing: at %p replace %p with %p in %s\n",
+ location, context.dynamicInterposeArray[i].replacee, context.dynamicInterposeArray[i].replacement, this->getPath());
}
- *location = it->replacement;
+ *location = (uintptr_t)context.dynamicInterposeArray[i].replacement;
}
}
}