#include <mach-o/ppc/reloc.h>
#endif
+#ifndef S_ATTR_SELF_MODIFYING_CODE
+ #define S_ATTR_SELF_MODIFYING_CODE 0x04000000
+#endif
+
#include "ImageLoaderMachO.h"
#include "mach-o/dyld_gdb.h"
uint32_t ImageLoaderMachO::fgHintedBinaryTreeSearchs = 0;
uint32_t ImageLoaderMachO::fgUnhintedBinaryTreeSearchs = 0;
+uint32_t ImageLoaderMachO::fgCountOfImagesWithWeakExports = 0;
//#define LINKEDIT_USAGE_DEBUG 1
this->parseLoadCmds();
}
+ImageLoaderMachO::~ImageLoaderMachO()
+{
+ // keep count of images with weak exports
+ if ( this->hasCoalescedExports() )
+ --fgCountOfImagesWithWeakExports;
+}
const struct load_command* cmd = cmds;
for (unsigned long i = 0; i < cmd_count; ++i) {
if ( cmd->cmd == LC_SEGMENT_COMMAND ) {
- fSegments.push_back(new SegmentMachO((struct macho_segment_command*)cmd, this, fileData));
+ if ( (((struct macho_segment_command*)cmd)->vmsize != 0) || !fIsSplitSeg )
+ fSegments.push_back(new SegmentMachO((struct macho_segment_command*)cmd, this, fileData));
}
cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
}
}
}
+ // keep count of prebound images with weak exports
+ if ( this->hasCoalescedExports() )
+ ++fgCountOfImagesWithWeakExports;
+
// walk load commands (mapped in at start of __TEXT segment)
const uint32_t cmd_count = ((macho_header*)fMachOData)->ncmds;
const struct load_command* const cmds = (struct load_command*)&fMachOData[sizeof(macho_header)];
return (const void*)seg->getActualLoadAddress();
}
+uintptr_t ImageLoaderMachO::bindIndirectSymbol(uintptr_t* ptrToBind, const struct macho_section* sect, const char* symbolName, uintptr_t targetAddr, ImageLoader* targetImage, const LinkContext& context)
+{
+ if ( context.verboseBind ) {
+ const char* path = NULL;
+ if ( targetImage != NULL )
+ path = targetImage->getShortName();
+ fprintf(stderr, "dyld: bind: %s:%s$%s = %s:%s, *0x%08lx = 0x%08lx\n",
+ this->getShortName(), symbolName, (((sect->flags & SECTION_TYPE)==S_NON_LAZY_SYMBOL_POINTERS) ? "non_lazy_ptr" : "lazy_ptr"),
+ path, symbolName, (uintptr_t)ptrToBind, targetAddr);
+ }
+ if ( context.bindingHandler != NULL ) {
+ const char* path = NULL;
+ if ( targetImage != NULL )
+ path = targetImage->getShortName();
+ targetAddr = (uintptr_t)context.bindingHandler(path, symbolName, (void *)targetAddr);
+ }
+#if __i386__
+ // i386 has special self-modifying stubs that change from "CALL rel32" to "JMP rel32"
+ if ( ((sect->flags & SECTION_TYPE) == S_SYMBOL_STUBS) && ((sect->flags & S_ATTR_SELF_MODIFYING_CODE) != 0) && (sect->reserved2 == 5) ) {
+ uint8_t* const jmpTableEntryToPatch = (uint8_t*)ptrToBind;
+ uint32_t rel32 = targetAddr - (((uint32_t)ptrToBind)+5);
+ //fprintf(stderr, "rewriting stub at %p\n", jmpTableEntryToPatch);
+ jmpTableEntryToPatch[0] = 0xE9; // JMP rel32
+ jmpTableEntryToPatch[1] = rel32 & 0xFF;
+ jmpTableEntryToPatch[2] = (rel32 >> 8) & 0xFF;
+ jmpTableEntryToPatch[3] = (rel32 >> 16) & 0xFF;
+ jmpTableEntryToPatch[4] = (rel32 >> 24) & 0xFF;
+ }
+ else
+#endif
+ *ptrToBind = targetAddr;
+ return targetAddr;
+}
+
uintptr_t ImageLoaderMachO::doBindLazySymbol(uintptr_t* lazyPointer, const LinkContext& context)
{
const struct macho_section* const sectionsEnd = §ionsStart[seg->nsects];
for (const struct macho_section* sect=sectionsStart; sect < sectionsEnd; ++sect) {
const uint8_t type = sect->flags & SECTION_TYPE;
+ uint32_t symbolIndex = INDIRECT_SYMBOL_LOCAL;
if ( type == S_LAZY_SYMBOL_POINTERS ) {
const uint32_t pointerCount = sect->size / sizeof(uintptr_t);
uintptr_t* const symbolPointers = (uintptr_t*)(sect->addr + fSlide);
if ( (lazyPointer >= symbolPointers) && (lazyPointer < &symbolPointers[pointerCount]) ) {
const uint32_t indirectTableOffset = sect->reserved1;
const uint32_t lazyIndex = lazyPointer - symbolPointers;
- uint32_t symbolIndex = indirectTable[indirectTableOffset + lazyIndex];
- if ( symbolIndex != INDIRECT_SYMBOL_ABS && symbolIndex != INDIRECT_SYMBOL_LOCAL ) {
- ImageLoader *image = NULL;
- const char *path = NULL;
- uintptr_t symbolAddr = this->resolveUndefined(context, &fSymbolTable[symbolIndex], twoLevel, &image);
- if ( context.verboseBind ) {
- if(NULL == path && NULL != image) {
- path = image->getShortName();
- }
- fprintf(stderr, "dyld: bind: %s:%s$%s = %s:%s, *0x%08lx = 0x%08lx\n",
- this->getShortName(), &fStrings[fSymbolTable[symbolIndex].n_un.n_strx], "lazy_ptr",
- path, &fStrings[fSymbolTable[symbolIndex].n_un.n_strx], (uintptr_t)&symbolPointers[lazyIndex], symbolAddr);
- }
- if ( NULL != context.bindingHandler ) {
- if(NULL == path && NULL != image) {
- path = image->getPath();
- }
- symbolAddr = (uintptr_t)context.bindingHandler(path, &fStrings[fSymbolTable[symbolIndex].n_un.n_strx], (void *)symbolAddr);
- }
- symbolPointers[lazyIndex] = symbolAddr;
- // update stats
- fgTotalLazyBindFixups++;
- return symbolPointers[lazyIndex];
- }
+ symbolIndex = indirectTable[indirectTableOffset + lazyIndex];
}
}
+ #if __i386__
+ else if ( (type == S_SYMBOL_STUBS) && (sect->flags & S_ATTR_SELF_MODIFYING_CODE) && (sect->reserved2 == 5) ) {
+ // 5 bytes stubs on i386 are new "fast stubs"
+ uint8_t* const jmpTableBase = (uint8_t*)(sect->addr + fSlide);
+ uint8_t* const jmpTableEnd = jmpTableBase + sect->size;
+ // initial CALL instruction in jump table leaves pointer to next entry, so back up
+ uint8_t* const jmpTableEntryToPatch = ((uint8_t*)lazyPointer) - 5;
+ lazyPointer = (uintptr_t*)jmpTableEntryToPatch;
+ if ( (jmpTableEntryToPatch >= jmpTableBase) && (jmpTableEntryToPatch < jmpTableEnd) ) {
+ const uint32_t indirectTableOffset = sect->reserved1;
+ const uint32_t entryIndex = (jmpTableEntryToPatch - jmpTableBase)/5;
+ symbolIndex = indirectTable[indirectTableOffset + entryIndex];
+ }
+ }
+ #endif
+ if ( symbolIndex != INDIRECT_SYMBOL_ABS && symbolIndex != INDIRECT_SYMBOL_LOCAL ) {
+ const char* symbolName = &fStrings[fSymbolTable[symbolIndex].n_un.n_strx];
+ ImageLoader* image = NULL;
+ uintptr_t symbolAddr = this->resolveUndefined(context, &fSymbolTable[symbolIndex], twoLevel, &image);
+ symbolAddr = this->bindIndirectSymbol(lazyPointer, sect, symbolName, symbolAddr, image, context);
+ ++fgTotalLazyBindFixups;
+ return symbolAddr;
+ }
}
}
break;
+
void ImageLoaderMachO::doBindIndirectSymbolPointers(const LinkContext& context, BindingLaziness bindness, bool onlyCoalescedSymbols)
{
// scan for all non-lazy-pointer sections
const struct macho_section* const sectionsEnd = §ionsStart[seg->nsects];
for (const struct macho_section* sect=sectionsStart; sect < sectionsEnd; ++sect) {
const uint8_t type = sect->flags & SECTION_TYPE;
- const uint32_t pointerCount = sect->size / sizeof(uintptr_t);
+ uint32_t elementSize = sizeof(uintptr_t);
+ uint32_t elementCount = sect->size / elementSize;
if ( type == S_NON_LAZY_SYMBOL_POINTERS ) {
if ( (bindness == kLazyOnly) || (bindness == kLazyOnlyNoDependents) )
continue;
}
else if ( type == S_LAZY_SYMBOL_POINTERS ) {
// process each symbol pointer in this section
- fgTotalPossibleLazyBindFixups += pointerCount;
+ fgTotalPossibleLazyBindFixups += elementCount;
if ( bindness == kNonLazyOnly )
continue;
}
+ #if __i386__
+ else if ( (type == S_SYMBOL_STUBS) && (sect->flags & S_ATTR_SELF_MODIFYING_CODE) && (sect->reserved2 == 5) ) {
+ // process each jmp entry in this section
+ elementCount = sect->size / 5;
+ elementSize = 5;
+ fgTotalPossibleLazyBindFixups += elementCount;
+ if ( bindness == kNonLazyOnly )
+ continue;
+ }
+ #endif
else {
continue;
}
const uint32_t indirectTableOffset = sect->reserved1;
- uintptr_t* const symbolPointers = (uintptr_t*)(sect->addr + fSlide);
- for (uint32_t j=0; j < pointerCount; ++j) {
+ uint8_t* ptrToBind = (uint8_t*)(sect->addr + fSlide);
+ for (uint32_t j=0; j < elementCount; ++j, ptrToBind += elementSize) {
uint32_t symbolIndex = indirectTable[indirectTableOffset + j];
if ( symbolIndex == INDIRECT_SYMBOL_LOCAL) {
- symbolPointers[j] += this->fSlide;
+ *((uintptr_t*)ptrToBind) += this->fSlide;
}
else if ( symbolIndex == INDIRECT_SYMBOL_ABS) {
// do nothing since already has absolute address
continue;
uintptr_t symbolAddr;
symbolAddr = resolveUndefined(context, sym, twoLevel, &image);
- if ( context.verboseBind ) {
- const char *path = NULL;
- if(NULL != image) {
- path = image->getShortName();
- }
- const char *typeName;
- if ( type == S_LAZY_SYMBOL_POINTERS ) {
- typeName = "lazy_ptr";
- }
- else {
- typeName = "non_lazy_ptr";
- }
- fprintf(stderr, "dyld: bind: %s:%s$%s = %s:%s, *0x%08lx = 0x%08lx\n",
- this->getShortName(), &fStrings[sym->n_un.n_strx], typeName,
- path, &fStrings[sym->n_un.n_strx], (uintptr_t)&symbolPointers[j], symbolAddr);
- }
- symbolPointers[j] = symbolAddr;
+
+ // update pointer
+ symbolAddr = this->bindIndirectSymbol((uintptr_t*)ptrToBind, sect, &fStrings[sym->n_un.n_strx], symbolAddr, image, context);
}
}
// update stats
- fgTotalBindFixups += pointerCount;
+ fgTotalBindFixups += elementCount;
}
}
break;
// These are defined in dyldStartup.s
extern "C" void stub_binding_helper();
extern "C" bool dyld_func_lookup(const char* name, uintptr_t* address);
+extern "C" void fast_stub_binding_helper_interface();
-void ImageLoaderMachO::setupLazyPointerHandler()
+void ImageLoaderMachO::setupLazyPointerHandler(const LinkContext& context)
{
if ( fDATAdyld != NULL ) {
struct DATAdyld* dd = (struct DATAdyld*)(fDATAdyld->addr + fSlide);
// save = dd->stubBindHelper;
#endif
}
+#if __i386__
+ if ( ! this->usablePrebinding(context) || !this->usesTwoLevelNameSpace() ) {
+ // reset all "fast" stubs
+ const uint32_t cmd_count = ((macho_header*)fMachOData)->ncmds;
+ const struct load_command* const cmds = (struct load_command*)&fMachOData[sizeof(macho_header)];
+ const struct load_command* cmd = cmds;
+ for (uint32_t i = 0; i < cmd_count; ++i) {
+ switch (cmd->cmd) {
+ case LC_SEGMENT_COMMAND:
+ {
+ const struct macho_segment_command* seg = (struct macho_segment_command*)cmd;
+ const struct macho_section* const sectionsStart = (struct macho_section*)((char*)seg + sizeof(struct macho_segment_command));
+ const struct macho_section* const sectionsEnd = §ionsStart[seg->nsects];
+ for (const struct macho_section* sect=sectionsStart; sect < sectionsEnd; ++sect) {
+ const uint8_t type = sect->flags & SECTION_TYPE;
+ if ( (type == S_SYMBOL_STUBS) && (sect->flags & S_ATTR_SELF_MODIFYING_CODE) && (sect->reserved2 == 5) ) {
+ // reset each jmp entry in this section
+ uint8_t* start = (uint8_t*)(sect->addr + this->fSlide);
+ uint8_t* end = start + sect->size;
+ uintptr_t dyldHandler = (uintptr_t)&fast_stub_binding_helper_interface;
+ for (uint8_t* entry = start; entry < end; entry += 5) {
+ uint32_t rel32 = dyldHandler - (((uint32_t)entry)+5);
+ entry[0] = 0xE8; // CALL rel32
+ entry[1] = rel32 & 0xFF;
+ entry[2] = (rel32 >> 8) & 0xFF;
+ entry[3] = (rel32 >> 16) & 0xFF;
+ entry[4] = (rel32 >> 24) & 0xFF;
+ }
+ }
+ }
+ }
+ }
+ cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
+ }
+ }
+#endif
}
bool ImageLoaderMachO::usablePrebinding(const LinkContext& context) const
void ImageLoaderMachO::doBind(const LinkContext& context, BindingLaziness bindness)
{
// set dyld entry points in image
- this->setupLazyPointerHandler();
+ this->setupLazyPointerHandler(context);
// if prebound and loaded at prebound address, and all libraries are same as when this was prebound, then no need to bind
// note: flat-namespace binaries need to be imports rebound (even if correctly prebound)
if ( this->usablePrebinding(context) && this->usesTwoLevelNameSpace() ) {
- // if image has coalesced symbols, then these need to be rebound
- if ( this->needsCoalescing() ) {
+ // if image has coalesced symbols, then these need to be rebound, unless this is the only image with weak symbols
+ if ( this->needsCoalescing() && (fgCountOfImagesWithWeakExports > 1) ) {
this->doBindExternalRelocations(context, true);
this->doBindIndirectSymbolPointers(context, kLazyAndNonLazy, true);
}
ImageLoader::printStatistics(imageCount);
fprintf(stderr, "total hinted binary tree searches: %d\n", fgHintedBinaryTreeSearchs);
fprintf(stderr, "total unhinted binary tree searches: %d\n", fgUnhintedBinaryTreeSearchs);
+ fprintf(stderr, "total images with weak exports: %d\n", fgCountOfImagesWithWeakExports);
#if LINKEDIT_USAGE_DEBUG
fprintf(stderr, "linkedit pages accessed (%lu):\n", sLinkEditPageBuckets.size());
void removeImage(ImageLoader* image)
{
- // flush find-by-address cache
- if ( sLastImageByAddressCache == image )
- sLastImageByAddressCache = NULL;
-
// if in termination list, pull it out and run terminator
for (std::vector<ImageLoader*>::iterator it=sImageFilesNeedingTermination.begin(); it != sImageFilesNeedingTermination.end(); it++) {
if ( *it == image ) {
}
}
+ // flush find-by-address cache
+ if ( sLastImageByAddressCache == image )
+ sLastImageByAddressCache = NULL;
+
// if in announcement list, pull it out
for (std::vector<ImageLoader*>::iterator it=sImagesToNotifyAboutOtherImages.begin(); it != sImagesToNotifyAboutOtherImages.end(); it++) {
if ( *it == image ) {
return NULL;
}
+ImageLoader* findImageContainingAddressThreadSafe(const void* addr)
+{
+ // do exhastive search
+ // todo: consider maintaining a list sorted by address ranges and do a binary search on that
+ const unsigned int imageCount = sAllImages.size();
+ for(unsigned int i=0; i < imageCount; ++i) {
+ ImageLoader* anImage = sAllImages[i];
+ if ( anImage->containsAddress(addr) ) {
+ return anImage;
+ }
+ }
+ return NULL;
+}
+
void forEachImageDo( void (*callback)(ImageLoader*, void* userData), void* userData)
{
#endif
// lookup and bind lazy pointer and get target address
try {
+ ImageLoader* target;
+ #if __i386__
+ // fast stubs pass NULL for mh and image is instead found via the location of stub (aka lazyPointer)
+ if ( mh == NULL )
+ target = dyld::findImageContainingAddressThreadSafe(lazyPointer);
+ else
+ target = dyld::findImageByMachHeader(mh);
+ #else
// note, target should always be mach-o, because only mach-o lazy handler wired up to this
- ImageLoader* target = dyld::findImageByMachHeader(mh);
+ target = dyld::findImageByMachHeader(mh);
+ #endif
if ( target == NULL )
throw "image not found for lazy pointer";
result = target->doBindLazySymbol(lazyPointer, gLinkContext);
}
+//
+// _pthread_keys is partitioned in a lower part that dyld will use; libSystem
+// will use the upper part. We set __pthread_tsd_first to 1 as the start of
+// the lower part. Libc will take #1 and c++ exceptions will take #2. There
+// is one free key=3 left.
+//
+extern "C" {
+ extern int __pthread_tsd_first;
+}
+
+
//
// Entry point for dyld. The kernel loads dyld and jumps to __dyld_start which
// sets up some registers and call this function.
uintptr_t
_main(const struct mach_header* mainExecutableMH, int argc, const char* argv[], const char* envp[], const char* apple[])
{
+ // set pthread keys to dyld range
+ __pthread_tsd_first = 1;
+
+ bool isEmulated = checkEmulation();
// Pickup the pointer to the exec path.
sExecPath = apple[0];
+ if (isEmulated) {
+ // under Rosetta
+ sExecPath = strdup(apple[0] + strlen(apple[0]) + 1);
+ }
if ( sExecPath[0] != '/' ) {
// have relative path, use cwd to make absolute
char cwdbuff[MAXPATHLEN];
}
uintptr_t result = 0;
sMainExecutableMachHeader = mainExecutableMH;
- bool isEmulated = checkEmulation();
checkEnvironmentVariables(envp, isEmulated);
if ( sEnv.DYLD_PRINT_OPTS )
printOptions(argv);