#include <mach-o/loader.h>
#include <mach-o/fat.h>
#include <mach-o/reloc.h>
-#include <mach-o/ppc/reloc.h>
#include <mach-o/x86_64/reloc.h>
#include <mach-o/arm/reloc.h>
#include <vector>
virtual cpu_type_t getArchitecture() const = 0;
virtual uint64_t getBaseAddress() const = 0;
virtual uint64_t getVMSize() const = 0;
- virtual void rebase() = 0;
+ virtual void rebase(std::vector<void*>&) = 0;
};
virtual cpu_type_t getArchitecture() const;
virtual uint64_t getBaseAddress() const;
virtual uint64_t getVMSize() const;
- virtual void rebase();
+ virtual void rebase(std::vector<void*>&);
protected:
typedef typename A::P P;
void calculateRelocBase();
void adjustLoadCommands();
void adjustSymbolTable();
+ void optimzeStubs();
+ void makeNoPicStub(uint8_t* stub, pint_t logicalAddress);
void adjustDATA();
void adjustCode();
- void applyRebaseInfo();
+ void applyRebaseInfo(std::vector<void*>& pointersInData);
void adjustExportInfo();
- void doRebase(int segIndex, uint64_t segOffset, uint8_t type);
+ void doRebase(int segIndex, uint64_t segOffset, uint8_t type, std::vector<void*>& pointersInData);
void adjustSegmentLoadCommand(macho_segment_command<P>* seg);
pint_t getSlideForVMAddress(pint_t vmaddress);
+ pint_t maskedVMAddress(pint_t vmaddress);
pint_t* mappedAddressForVMAddress(pint_t vmaddress);
pint_t* mappedAddressForRelocAddress(pint_t r_address);
void adjustRelocBaseAddresses();
const macho_dyld_info_command<P>* fDyldInfo;
bool fSplittingSegments;
bool fOrignalVMRelocBaseAddressValid;
+ pint_t fSkipSplitSegInfoStart;
+ pint_t fSkipSplitSegInfoEnd;
};
template <typename A>
Rebaser<A>::Rebaser(const MachOLayoutAbstraction& layout)
- : fLayout(layout), fOrignalVMRelocBaseAddress(NULL), fLinkEditBase(NULL),
- fSymbolTable(NULL), fDynamicSymbolTable(NULL), fDyldInfo(NULL), fSplittingSegments(false), fOrignalVMRelocBaseAddressValid(false)
+ : fLayout(layout), fOrignalVMRelocBaseAddress(0), fLinkEditBase(0),
+ fSymbolTable(NULL), fDynamicSymbolTable(NULL), fDyldInfo(NULL), fSplittingSegments(false),
+ fOrignalVMRelocBaseAddressValid(false), fSkipSplitSegInfoStart(0), fSkipSplitSegInfoEnd(0)
{
fHeader = (const macho_header<P>*)fLayout.getSegments()[0].mappedAddress();
switch ( fHeader->filetype() ) {
fSplittingSegments = layout.hasSplitSegInfo() && this->unequalSlides();
}
-template <> cpu_type_t Rebaser<ppc>::getArchitecture() const { return CPU_TYPE_POWERPC; }
template <> cpu_type_t Rebaser<x86>::getArchitecture() const { return CPU_TYPE_I386; }
template <> cpu_type_t Rebaser<x86_64>::getArchitecture() const { return CPU_TYPE_X86_64; }
template <> cpu_type_t Rebaser<arm>::getArchitecture() const { return CPU_TYPE_ARM; }
+template <> cpu_type_t Rebaser<arm64>::getArchitecture() const { return CPU_TYPE_ARM64; }
template <typename A>
bool Rebaser<A>::unequalSlides() const
template <typename A>
-void Rebaser<A>::rebase()
+void Rebaser<A>::rebase(std::vector<void*>& pointersInData)
{
// update writable segments that have internal pointers
if ( fDyldInfo != NULL )
- this->applyRebaseInfo();
+ this->applyRebaseInfo(pointersInData);
else
this->adjustDATA();
// update symbol table
this->adjustSymbolTable();
+ // optimize stubs
+ this->optimzeStubs();
+
// update export info
if ( fDyldInfo != NULL )
this->adjustExportInfo();
case LC_LOAD_DYLIB:
case LC_LOAD_WEAK_DYLIB:
case LC_REEXPORT_DYLIB:
+ case LC_LOAD_UPWARD_DYLIB:
if ( (fHeader->flags() & MH_PREBOUND) != 0 ) {
// clear expected timestamps so that this image will load with invalid prebinding
macho_dylib_command<P>* dylib = (macho_dylib_command<P>*)cmd;
}
}
+template <>
+uint64_t Rebaser<arm64>::maskedVMAddress(pint_t vmaddress)
+{
+ return (vmaddress & 0x0FFFFFFFFFFFFFFF);
+}
+
+template <typename A>
+typename A::P::uint_t Rebaser<A>::maskedVMAddress(pint_t vmaddress)
+{
+ return vmaddress;
+}
template <typename A>
typename A::P::uint_t Rebaser<A>::getSlideForVMAddress(pint_t vmaddress)
{
+ pint_t vmaddr = this->maskedVMAddress(vmaddress);
const std::vector<MachOLayoutAbstraction::Segment>& segments = fLayout.getSegments();
for(std::vector<MachOLayoutAbstraction::Segment>::const_iterator it = segments.begin(); it != segments.end(); ++it) {
const MachOLayoutAbstraction::Segment& seg = *it;
- if ( (seg.address() <= vmaddress) && (seg.size() != 0) && ((vmaddress < (seg.address()+seg.size())) || (seg.address() == vmaddress)) ) {
+ if ( (seg.address() <= vmaddr) && (seg.size() != 0) && ((vmaddr < (seg.address()+seg.size())) || (seg.address() == vmaddr)) ) {
return seg.newAddress() - seg.address();
}
}
- throwf("vm address 0x%08llX not found", (uint64_t)vmaddress);
+ throwf("vm address 0x%08llX not found", (uint64_t)vmaddr);
}
template <typename A>
typename A::P::uint_t* Rebaser<A>::mappedAddressForVMAddress(pint_t vmaddress)
{
+ pint_t vmaddr = this->maskedVMAddress(vmaddress);
const std::vector<MachOLayoutAbstraction::Segment>& segments = fLayout.getSegments();
for(std::vector<MachOLayoutAbstraction::Segment>::const_iterator it = segments.begin(); it != segments.end(); ++it) {
const MachOLayoutAbstraction::Segment& seg = *it;
- if ( (seg.address() <= vmaddress) && (vmaddress < (seg.address()+seg.size())) ) {
- return (pint_t*)((vmaddress - seg.address()) + (uint8_t*)seg.mappedAddress());
+ if ( (seg.address() <= vmaddr) && (vmaddr < (seg.address()+seg.size())) ) {
+ return (pint_t*)((vmaddr - seg.address()) + (uint8_t*)seg.mappedAddress());
}
}
- throwf("mappedAddressForVMAddress(0x%08llX) not found", (uint64_t)vmaddress);
+ throwf("mappedAddressForVMAddress(0x%08llX) not found", (uint64_t)vmaddr);
}
template <typename A>
}
+template <>
+void Rebaser<arm>::makeNoPicStub(uint8_t* stub, pint_t logicalAddress)
+{
+ uint32_t* instructions = (uint32_t*)stub;
+ if ( (LittleEndian::get32(instructions[0]) == 0xE59FC004) &&
+ (LittleEndian::get32(instructions[1]) == 0xE08FC00C) &&
+ (LittleEndian::get32(instructions[2]) == 0xE59CF000) ) {
+ uint32_t lazyPtrAddress = instructions[3] + logicalAddress + 12;
+ LittleEndian::set32(instructions[0], 0xE59FC000); // ldr ip, [pc, #0]
+ LittleEndian::set32(instructions[1], 0xE59CF000); // ldr pc, [ip]
+ LittleEndian::set32(instructions[2], lazyPtrAddress); // .long L_foo$lazy_ptr
+ LittleEndian::set32(instructions[3], 0xE1A00000); // nop
+ }
+ else
+ fprintf(stderr, "unoptimized stub in %s at 0x%08X\n", fLayout.getFilePath(), logicalAddress);
+}
+
+
+#if 0
+// disable this optimization do allow cache to slide
+template <>
+void Rebaser<arm>::optimzeStubs()
+{
+ // convert pic stubs to no-pic stubs in dyld shared cache
+ const macho_load_command<P>* const cmds = (macho_load_command<P>*)((uint8_t*)fHeader + sizeof(macho_header<P>));
+ const uint32_t cmd_count = fHeader->ncmds();
+ const macho_load_command<P>* cmd = cmds;
+ for (uint32_t i = 0; i < cmd_count; ++i) {
+ if ( cmd->cmd() == macho_segment_command<P>::CMD ) {
+ macho_segment_command<P>* seg = (macho_segment_command<P>*)cmd;
+ macho_section<P>* const sectionsStart = (macho_section<P>*)((char*)seg + sizeof(macho_segment_command<P>));
+ macho_section<P>* const sectionsEnd = §ionsStart[seg->nsects()];
+ for(macho_section<P>* sect = sectionsStart; sect < sectionsEnd; ++sect) {
+ if ( (sect->flags() & SECTION_TYPE) == S_SYMBOL_STUBS ) {
+ const uint32_t stubSize = sect->reserved2();
+ // ARM PIC stubs are 4 32-bit instructions long
+ if ( stubSize == 16 ) {
+ uint32_t stubCount = sect->size() / 16;
+ pint_t stubLogicalAddress = sect->addr();
+ uint8_t* stubMappedAddress = (uint8_t*)mappedAddressForNewAddress(stubLogicalAddress);
+ for(uint32_t s=0; s < stubCount; ++s) {
+ makeNoPicStub(stubMappedAddress, stubLogicalAddress);
+ stubLogicalAddress += 16;
+ stubMappedAddress += 16;
+ }
+ }
+ }
+ }
+ }
+ cmd = (const macho_load_command<P>*)(((uint8_t*)cmd)+cmd->cmdsize());
+ }
+}
+#endif
+
+template <typename A>
+void Rebaser<A>::optimzeStubs()
+{
+ // other architectures don't need stubs changed in shared cache
+}
+
template <typename A>
void Rebaser<A>::adjustSymbolTable()
{
// since export info addresses are offsets from mach_header, everything in __TEXT is fine
// only __DATA addresses need to be updated
- const uint8_t* start = &fLinkEditBase[fDyldInfo->export_off()];
+ const uint8_t* start = fLayout.getDyldInfoExports();
const uint8_t* end = &start[fDyldInfo->export_size()];
std::vector<mach_o::trie::Entry> originalExports;
try {
while ( (newExportTrieBytes.size() % sizeof(pint_t)) != 0 )
newExportTrieBytes.push_back(0);
- // copy into place, zero pad
+ // allocate new buffer and set export_off to use new buffer instead
uint32_t newExportsSize = newExportTrieBytes.size();
- if ( newExportsSize > fDyldInfo->export_size() ) {
- // it is possible that the new export trie is larger than the old one
- // for those cases will malloc a block on the side and set up
- // export_off to point to it.
- uint8_t* sideTrie = new uint8_t[newExportsSize];
- memcpy(sideTrie, &newExportTrieBytes[0], newExportsSize);
- //fprintf(stderr, "set_export_off()=%ld, fLinkEditBase=%p, sideTrie=%p\n", (long)(sideTrie - fLinkEditBase), fLinkEditBase, sideTrie);
- // warning, export_off is only 32-bits so if the trie grows it must be allocated with 32-bits of fLinkeditBase
- int64_t offset = sideTrie - fLinkEditBase;
- int32_t offset32 = (int32_t)offset;
- if ( offset != offset32 )
- throw "internal error, new trie allocated to far from fLinkeditBase";
- ((macho_dyld_info_command<P>*)fDyldInfo)->set_export_off(offset32);
- ((macho_dyld_info_command<P>*)fDyldInfo)->set_export_size(newExportsSize);
- }
- else {
- uint8_t* trie = (uint8_t*)&fLinkEditBase[fDyldInfo->export_off()];
- memcpy(trie, &newExportTrieBytes[0], newExportsSize);
- bzero(trie+newExportsSize, fDyldInfo->export_size() - newExportsSize);
- ((macho_dyld_info_command<P>*)fDyldInfo)->set_export_size(newExportsSize);
- }
+ uint8_t* sideTrie = new uint8_t[newExportsSize];
+ memcpy(sideTrie, &newExportTrieBytes[0], newExportsSize);
+ fLayout.setDyldInfoExports(sideTrie);
+ ((macho_dyld_info_command<P>*)fDyldInfo)->set_export_off(0); // invalidate old trie
+ ((macho_dyld_info_command<P>*)fDyldInfo)->set_export_size(newExportsSize);
}
template <typename A>
void Rebaser<A>::doCodeUpdate(uint8_t kind, uint64_t address, int64_t codeToDataDelta, int64_t codeToImportDelta)
{
- //fprintf(stderr, "doCodeUpdate(kind=%d, address=0x%0llX, dataDelta=0x%08llX, importDelta=0x%08llX)\n", kind, address, codeToDataDelta, codeToImportDelta);
+ // begin hack for <rdar://problem/8253549> split seg info wrong for x86_64 stub helpers
+ if ( (fSkipSplitSegInfoStart <= address) && (address < fSkipSplitSegInfoEnd) ) {
+ uint8_t* p = (uint8_t*)mappedAddressForVMAddress(address);
+ // only ignore split seg info for "push" instructions
+ if ( p[-1] == 0x68 )
+ return;
+ }
+ // end hack for <rdar://problem/8253549>
+
+ //fprintf(stderr, "doCodeUpdate(kind=%d, address=0x%0llX, dataDelta=0x%08llX, importDelta=0x%08llX, path=%s)\n",
+ // kind, address, codeToDataDelta, codeToImportDelta, fLayout.getFilePath());
uint32_t* p;
uint32_t instruction;
uint32_t value;
value64 += codeToDataDelta;
A::P::E::set64(*(uint64_t*)p, value64);
break;
- case 3: // used only for ppc, an instruction that sets the hi16 of a register
- // adjust low 16 bits of instruction which contain hi16 of distance to something in DATA
- if ( (codeToDataDelta & 0xFFFF) != 0 )
- throwf("codeToDataDelta=0x%0llX is not a multiple of 64K", codeToDataDelta);
- p = (uint32_t*)mappedAddressForVMAddress(address);
- instruction = BigEndian::get32(*p);
- {
- uint16_t originalLo16 = instruction & 0x0000FFFF;
- uint16_t delta64Ks = codeToDataDelta >> 16;
- instruction = (instruction & 0xFFFF0000) | ((originalLo16+delta64Ks) & 0x0000FFFF);
- }
- BigEndian::set32(*p, instruction);
- break;
case 4: // only used for i386, a reference to something in the IMPORT segment
p = (uint32_t*)mappedAddressForVMAddress(address);
value = A::P::E::get32(*p);
value += codeToImportDelta;
A::P::E::set32(*p, value);
+ break;
+ case 5: // used by thumb2 movw
+ p = (uint32_t*)mappedAddressForVMAddress(address);
+ instruction = A::P::E::get32(*p);
+ // codeToDataDelta is always a multiple of 4096, so only top 4 bits of lo16 will ever need adjusting
+ value = (instruction & 0x0000000F) + (codeToDataDelta >> 12);
+ instruction = (instruction & 0xFFFFFFF0) | (value & 0x0000000F);
+ A::P::E::set32(*p, instruction);
+ break;
+ case 6: // used by ARM movw
+ p = (uint32_t*)mappedAddressForVMAddress(address);
+ instruction = A::P::E::get32(*p);
+ // codeToDataDelta is always a multiple of 4096, so only top 4 bits of lo16 will ever need adjusting
+ value = ((instruction & 0x000F0000) >> 16) + (codeToDataDelta >> 12);
+ instruction = (instruction & 0xFFF0FFFF) | ((value <<16) & 0x000F0000);
+ A::P::E::set32(*p, instruction);
+ break;
+ case 0x10:
+ case 0x11:
+ case 0x12:
+ case 0x13:
+ case 0x14:
+ case 0x15:
+ case 0x16:
+ case 0x17:
+ case 0x18:
+ case 0x19:
+ case 0x1A:
+ case 0x1B:
+ case 0x1C:
+ case 0x1D:
+ case 0x1E:
+ case 0x1F:
+ // used by thumb2 movt (low nibble of kind is high 4-bits of paired movw)
+ {
+ p = (uint32_t*)mappedAddressForVMAddress(address);
+ instruction = A::P::E::get32(*p);
+ // extract 16-bit value from instruction
+ uint32_t i = ((instruction & 0x00000400) >> 10);
+ uint32_t imm4 = (instruction & 0x0000000F);
+ uint32_t imm3 = ((instruction & 0x70000000) >> 28);
+ uint32_t imm8 = ((instruction & 0x00FF0000) >> 16);
+ uint32_t imm16 = (imm4 << 12) | (i << 11) | (imm3 << 8) | imm8;
+ // combine with codeToDataDelta and kind nibble
+ uint32_t targetValue = (imm16 << 16) | ((kind & 0xF) << 12);
+ uint32_t newTargetValue = targetValue + codeToDataDelta;
+ // construct new bits slices
+ uint32_t imm4_ = (newTargetValue & 0xF0000000) >> 28;
+ uint32_t i_ = (newTargetValue & 0x08000000) >> 27;
+ uint32_t imm3_ = (newTargetValue & 0x07000000) >> 24;
+ uint32_t imm8_ = (newTargetValue & 0x00FF0000) >> 16;
+ // update instruction to match codeToDataDelta
+ uint32_t newInstruction = (instruction & 0x8F00FBF0) | imm4_ | (i_ << 10) | (imm3_ << 28) | (imm8_ << 16);
+ A::P::E::set32(*p, newInstruction);
+ }
+ break;
+ case 0x20:
+ case 0x21:
+ case 0x22:
+ case 0x23:
+ case 0x24:
+ case 0x25:
+ case 0x26:
+ case 0x27:
+ case 0x28:
+ case 0x29:
+ case 0x2A:
+ case 0x2B:
+ case 0x2C:
+ case 0x2D:
+ case 0x2E:
+ case 0x2F:
+ // used by arm movt (low nibble of kind is high 4-bits of paired movw)
+ {
+ p = (uint32_t*)mappedAddressForVMAddress(address);
+ instruction = A::P::E::get32(*p);
+ // extract 16-bit value from instruction
+ uint32_t imm4 = ((instruction & 0x000F0000) >> 16);
+ uint32_t imm12 = (instruction & 0x00000FFF);
+ uint32_t imm16 = (imm4 << 12) | imm12;
+ // combine with codeToDataDelta and kind nibble
+ uint32_t targetValue = (imm16 << 16) | ((kind & 0xF) << 12);
+ uint32_t newTargetValue = targetValue + codeToDataDelta;
+ // construct new bits slices
+ uint32_t imm4_ = (newTargetValue & 0xF0000000) >> 28;
+ uint32_t imm12_ = (newTargetValue & 0x0FFF0000) >> 16;
+ // update instruction to match codeToDataDelta
+ uint32_t newInstruction = (instruction & 0xFFF0F000) | (imm4_ << 16) | imm12_;
+ A::P::E::set32(*p, newInstruction);
+ }
+ break;
+ case 3: // used for arm64 ADRP
+ p = (uint32_t*)mappedAddressForVMAddress(address);
+ instruction = A::P::E::get32(*p);
+ if ( (instruction & 0x9F000000) == 0x90000000 ) {
+ // codeToDataDelta is always a multiple of 4096, so only top 4 bits of lo16 will ever need adjusting
+ value64 = ((instruction & 0x60000000) >> 17) | ((instruction & 0x00FFFFE0) << 9);
+ value64 += codeToDataDelta;
+ instruction = (instruction & 0x9F00001F) | ((value64 << 17) & 0x60000000) | ((value64 >> 9) & 0x00FFFFE0);
+ A::P::E::set32(*p, instruction);
+ }
break;
default:
throwf("invalid kind=%d in split seg info", kind);
// get uleb128 compressed runs of code addresses to update
const uint8_t* infoStart = NULL;
const uint8_t* infoEnd = NULL;
+ const macho_segment_command<P>* seg;
const macho_load_command<P>* const cmds = (macho_load_command<P>*)((uint8_t*)fHeader + sizeof(macho_header<P>));
const uint32_t cmd_count = fHeader->ncmds();
const macho_load_command<P>* cmd = cmds;
infoEnd = &infoStart[segInfo->datasize()];
}
break;
+ // begin hack for <rdar://problem/8253549> split seg info wrong for x86_64 stub helpers
+ case macho_segment_command<P>::CMD:
+ seg = (macho_segment_command<P>*)cmd;
+ if ( (getArchitecture() == CPU_TYPE_X86_64) && (strcmp(seg->segname(), "__TEXT") == 0) ) {
+ const macho_section<P>* const sectionsStart = (macho_section<P>*)((char*)seg + sizeof(macho_segment_command<P>));
+ const macho_section<P>* const sectionsEnd = §ionsStart[seg->nsects()];
+ for(const macho_section<P>* sect = sectionsStart; sect < sectionsEnd; ++sect) {
+ if ( strcmp(sect->sectname(), "__stub_helper") == 0 ) {
+ fSkipSplitSegInfoStart = sect->addr();
+ fSkipSplitSegInfoEnd = sect->addr() + sect->size() - 16;
+ }
+ }
+ }
+ break;
+ // end hack for <rdar://problem/8253549> split seg info wrong for x86_64 stub helpers
}
cmd = (const macho_load_command<P>*)(((uint8_t*)cmd)+cmd->cmdsize());
}
const MachOLayoutAbstraction::Segment& dataSeg = *it;
if ( strcmp(dataSeg.name(), "__IMPORT") == 0 )
codeToImportDelta = (dataSeg.newAddress() - codeSeg.newAddress()) - (dataSeg.address() - codeSeg.address());
- else if ( dataSeg.writable() )
+ else if ( dataSeg.writable() ) {
+ if ( (strcmp(dataSeg.name(), "__DATA") != 0) && (strcmp(dataSeg.name(), "__OBJC") != 0) )
+ throwf("only one rw segment named '__DATA' can be used in dylibs placed in the dyld shared cache (%s)", fLayout.getFilePath());
codeToDataDelta = (dataSeg.newAddress() - codeSeg.newAddress()) - (dataSeg.address() - codeSeg.address());
+ }
}
// decompress and call doCodeUpdate() on each address
- for(const uint8_t* p = infoStart; *p != 0;) {
+ for(const uint8_t* p = infoStart; (*p != 0) && (p < infoEnd);) {
uint8_t kind = *p++;
p = this->doCodeUpdateForEachULEB128Address(p, kind, orgBaseAddress, codeToDataDelta, codeToImportDelta);
}
}
template <typename A>
-void Rebaser<A>::doRebase(int segIndex, uint64_t segOffset, uint8_t type)
+void Rebaser<A>::doRebase(int segIndex, uint64_t segOffset, uint8_t type, std::vector<void*>& pointersInData)
{
const std::vector<MachOLayoutAbstraction::Segment>& segments = fLayout.getSegments();
if ( segIndex > segments.size() )
switch ( type ) {
case REBASE_TYPE_POINTER:
valueP= P::getP(*mappedAddrP);
- P::setP(*mappedAddrP, valueP + this->getSlideForVMAddress(valueP));
+ try {
+ P::setP(*mappedAddrP, valueP + this->getSlideForVMAddress(valueP));
+ }
+ catch (const char* msg) {
+ throwf("at offset=0x%08llX in seg=%s, pointer cannot be rebased because it does not point to __TEXT or __DATA. %s\n",
+ segOffset, seg.name(), msg);
+ }
break;
case REBASE_TYPE_TEXT_ABSOLUTE32:
default:
throw "bad rebase type";
}
+ pointersInData.push_back(mappedAddr);
}
template <typename A>
-void Rebaser<A>::applyRebaseInfo()
+void Rebaser<A>::applyRebaseInfo(std::vector<void*>& pointersInData)
{
const uint8_t* p = &fLinkEditBase[fDyldInfo->rebase_off()];
const uint8_t* end = &p[fDyldInfo->rebase_size()];
break;
case REBASE_OPCODE_DO_REBASE_IMM_TIMES:
for (int i=0; i < immediate; ++i) {
- doRebase(segIndex, segOffset, type);
+ doRebase(segIndex, segOffset, type, pointersInData);
segOffset += sizeof(pint_t);
}
break;
case REBASE_OPCODE_DO_REBASE_ULEB_TIMES:
count = read_uleb128(p, end);
for (uint32_t i=0; i < count; ++i) {
- doRebase(segIndex, segOffset, type);
+ doRebase(segIndex, segOffset, type, pointersInData);
segOffset += sizeof(pint_t);
}
break;
case REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB:
- doRebase(segIndex, segOffset, type);
+ doRebase(segIndex, segOffset, type, pointersInData);
segOffset += read_uleb128(p, end) + sizeof(pint_t);
break;
case REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB:
count = read_uleb128(p, end);
skip = read_uleb128(p, end);
for (uint32_t i=0; i < count; ++i) {
- doRebase(segIndex, segOffset, type);
+ doRebase(segIndex, segOffset, type, pointersInData);
segOffset += skip + sizeof(pint_t);
}
break;
}
}
-template <>
-void Rebaser<ppc>::doLocalRelocation(const macho_relocation_info<P>* reloc)
-{
- if ( (reloc->r_address() & R_SCATTERED) == 0 ) {
- if ( reloc->r_type() == GENERIC_RELOC_VANILLA ) {
- pint_t* addr = this->mappedAddressForRelocAddress(reloc->r_address());
- pint_t value = P::getP(*addr);
- P::setP(*addr, value + this->getSlideForVMAddress(value));
- }
- }
- else {
- macho_scattered_relocation_info<P>* sreloc = (macho_scattered_relocation_info<P>*)reloc;
- if ( sreloc->r_type() == PPC_RELOC_PB_LA_PTR ) {
- sreloc->set_r_value( sreloc->r_value() + this->getSlideForVMAddress(sreloc->r_value()) );
- }
- else {
- throw "cannot rebase final linked image with scattered relocations";
- }
- }
-}
-
template <>
void Rebaser<x86>::doLocalRelocation(const macho_relocation_info<P>* reloc)
{
uint32_t fileOffset = OSSwapBigToHostInt32(archs[i].offset);
try {
switch ( OSSwapBigToHostInt32(archs[i].cputype) ) {
- case CPU_TYPE_POWERPC:
- fRebasers.push_back(new Rebaser<ppc>(&p[fileOffset]));
- break;
case CPU_TYPE_I386:
fRebasers.push_back(new Rebaser<x86>(&p[fileOffset]));
break;
}
else {
try {
- if ( (OSSwapBigToHostInt32(mh->magic) == MH_MAGIC) && (OSSwapBigToHostInt32(mh->cputype) == CPU_TYPE_POWERPC)) {
- fRebasers.push_back(new Rebaser<ppc>(mh));
- }
- else if ( (OSSwapLittleToHostInt32(mh->magic) == MH_MAGIC) && (OSSwapLittleToHostInt32(mh->cputype) == CPU_TYPE_I386)) {
+ if ( (OSSwapLittleToHostInt32(mh->magic) == MH_MAGIC) && (OSSwapLittleToHostInt32(mh->cputype) == CPU_TYPE_I386)) {
fRebasers.push_back(new Rebaser<x86>(mh));
}
else if ( (OSSwapLittleToHostInt32(mh->magic) == MH_MAGIC_64) && (OSSwapLittleToHostInt32(mh->cputype) == CPU_TYPE_X86_64)) {