uint32_t flags;
};
-#define OBJC_IMAGE_IS_REPLACEMENT (1<<0)
#define OBJC_IMAGE_SUPPORTS_GC (1<<1)
#define OBJC_IMAGE_REQUIRES_GC (1<<2)
#define OBJC_IMAGE_OPTIMIZED_BY_DYLD (1<<3)
#define OBJC_IMAGE_SUPPORTS_COMPACTION (1<<4)
+#define OBJC_IMAGE_IS_SIMULATED (1<<5)
class ObjCImageInfoAtom : public ld::Atom {
public:
ObjCImageInfoAtom(ld::File::ObjcConstraint objcConstraint,
- bool compaction, bool objcReplacementClasses, bool abi2);
+ bool compaction, bool abi2);
virtual const ld::File* file() const { return NULL; }
- virtual bool translationUnitSource(const char** dir, const char**) const
- { return false; }
virtual const char* name() const { return "objc image info"; }
virtual uint64_t size() const { return sizeof(objc_image_info); }
virtual uint64_t objectAddress() const { return 0; }
template <typename A>
ObjCImageInfoAtom<A>::ObjCImageInfoAtom(ld::File::ObjcConstraint objcConstraint, bool compaction,
- bool objcReplacementClasses, bool abi2)
+ bool abi2)
: ld::Atom(abi2 ? _s_sectionABI2 : _s_sectionABI1, ld::Atom::definitionRegular, ld::Atom::combineNever,
ld::Atom::scopeLinkageUnit, ld::Atom::typeUnclassified,
symbolTableNotIn, false, false, false, ld::Atom::Alignment(2))
{
uint32_t value = 0;
- if ( objcReplacementClasses )
- value = OBJC_IMAGE_IS_REPLACEMENT;
switch ( objcConstraint ) {
case ld::File::objcConstraintNone:
case ld::File::objcConstraintRetainRelease:
if ( compaction )
value |= OBJC_IMAGE_SUPPORTS_COMPACTION;
break;
+ case ld::File::objcConstraintRetainReleaseForSimulator:
+ value |= OBJC_IMAGE_IS_SIMULATED;
+ break;
}
_content.version = 0;
std::set<const ld::Atom*>& deadAtoms);
virtual const ld::File* file() const { return _file; }
- virtual bool translationUnitSource(const char** dir, const char**) const
- { return false; }
virtual const char* name() const { return "objc merged method list"; }
virtual uint64_t size() const { return _methodCount*3*sizeof(pint_t) + 8; }
virtual uint64_t objectAddress() const { return 0; }
virtual void setScope(Scope) { }
virtual void copyRawContent(uint8_t buffer[]) const {
bzero(buffer, size());
- A::P::E::set32(*((uint32_t*)(&buffer[0])), 24);
+ A::P::E::set32(*((uint32_t*)(&buffer[0])), 3*sizeof(pint_t)); // entry size
A::P::E::set32(*((uint32_t*)(&buffer[4])), _methodCount);
}
virtual ld::Fixup::iterator fixupsBegin() const { return (ld::Fixup*)&_fixups[0]; }
std::set<const ld::Atom*>& deadAtoms);
virtual const ld::File* file() const { return _file; }
- virtual bool translationUnitSource(const char** dir, const char**) const
- { return false; }
virtual const char* name() const { return "objc merged protocol list"; }
virtual uint64_t size() const { return (_protocolCount+1)*sizeof(pint_t); }
virtual uint64_t objectAddress() const { return 0; }
std::set<const ld::Atom*>& deadAtoms);
virtual const ld::File* file() const { return _file; }
- virtual bool translationUnitSource(const char** dir, const char**) const
- { return false; }
virtual const char* name() const { return "objc merged property list"; }
virtual uint64_t size() const { return _propertyCount*2*sizeof(pint_t) + 8; }
virtual uint64_t objectAddress() const { return 0; }
// overrides of ld::Atom
virtual const ld::File* file() const { return _atom->file(); }
- virtual bool translationUnitSource(const char** dir, const char** nm) const
- { return _atom->translationUnitSource(dir, nm); }
virtual const char* name() const { return _atom->name(); }
virtual uint64_t size() const { return _atom->size(); }
virtual uint64_t objectAddress() const { return _atom->objectAddress(); }
const std::set<const ld::Atom*>& _dead;
};
+ struct AtomSorter
+ {
+ bool operator()(const Atom* left, const Atom* right)
+ {
+ // sort by file ordinal, then object address, then zero size, then symbol name
+ // only file based atoms are supported (file() != NULL)
+ if (left==right) return false;
+ const File *leftf = left->file();
+ const File *rightf = right->file();
+
+ if (leftf == rightf) {
+ if (left->objectAddress() != right->objectAddress()) {
+ return left->objectAddress() < right->objectAddress();
+ } else {
+ // for atoms in the same file with the same address, zero sized
+ // atoms must sort before nonzero sized atoms
+ if ((left->size() == 0 && right->size() > 0) || (left->size() > 0 && right->size() == 0))
+ return left->size() < right->size();
+ return strcmp(left->name(), right->name());
+ }
+ }
+ return (leftf->ordinal() < rightf->ordinal());
+ }
+ };
+
+ static void sortAtomVector(std::vector<const Atom*> &atoms) {
+ std::sort(atoms.begin(), atoms.end(), AtomSorter());
+ }
+
template <typename A>
void OptimizeCategories<A>::doit(const Options& opts, ld::Internal& state)
{
// build map of all classes in this image that have categories on them
typedef std::map<const ld::Atom*, std::vector<const ld::Atom*>*> CatMap;
CatMap classToCategories;
+ std::vector<const ld::Atom*> classOrder;
std::set<const ld::Atom*> deadAtoms;
ld::Internal::FinalSection* methodListSection = NULL;
for (std::vector<ld::Internal::FinalSection*>::iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
continue;
}
assert(categoryAtom != NULL);
- assert(categoryAtom->size() == Category<A>::size());
+ assert(categoryAtom->size() >= Category<A>::size());
// ignore categories also in __objc_nlcatlist
if ( nlcatListAtoms.count(categoryAtom) != 0 )
continue;
CatMap::iterator pos = classToCategories.find(categoryOnClassAtom);
if ( pos == classToCategories.end() ) {
classToCategories[categoryOnClassAtom] = new std::vector<const ld::Atom*>();
+ classOrder.push_back(categoryOnClassAtom);
}
classToCategories[categoryOnClassAtom]->push_back(categoryAtom);
// mark category atom and catlist atom as dead
// if found some categories
if ( classToCategories.size() != 0 ) {
assert(methodListSection != NULL);
+ sortAtomVector(classOrder);
// alter each class definition to have new method list which includes all category methods
- for (CatMap::iterator it=classToCategories.begin(); it != classToCategories.end(); ++it) {
- const ld::Atom* classAtom = it->first;
- const std::vector<const ld::Atom*>* categories = it->second;
+ for (std::vector<const ld::Atom*>::iterator it = classOrder.begin(); it != classOrder.end(); it++) {
+ const ld::Atom* classAtom = *it;
+ const std::vector<const ld::Atom*>* categories = classToCategories[classAtom];
assert(categories->size() != 0);
// if any category adds instance methods, generate new merged method list, and replace
if ( OptimizeCategories<A>::hasInstanceMethods(state, categories) ) {
// add image info atom
switch ( opts.architecture() ) {
+#if SUPPORT_ARCH_x86_64
case CPU_TYPE_X86_64:
state.addAtom(*new ObjCImageInfoAtom<x86_64>(state.objcObjectConstraint, compaction,
- state.hasObjcReplacementClasses, true));
+ true));
break;
+#endif
+#if SUPPORT_ARCH_i386
case CPU_TYPE_I386:
state.addAtom(*new ObjCImageInfoAtom<x86>(state.objcObjectConstraint, compaction,
- state.hasObjcReplacementClasses, opts.objCABIVersion2POverride() ? true : false));
+ opts.objCABIVersion2POverride() ? true : false));
break;
+#endif
+#if SUPPORT_ARCH_arm_any
case CPU_TYPE_ARM:
state.addAtom(*new ObjCImageInfoAtom<arm>(state.objcObjectConstraint, compaction,
- state.hasObjcReplacementClasses, true));
+ true));
break;
+#endif
+#if SUPPORT_ARCH_arm64
+ case CPU_TYPE_ARM64:
+ state.addAtom(*new ObjCImageInfoAtom<arm64>(state.objcObjectConstraint, compaction,
+ true));
+ break;
+#endif
default:
assert(0 && "unknown objc arch");
}
if ( opts.objcCategoryMerging() ) {
// optimize classes defined in this linkage unit by merging in categories also in this linkage unit
switch ( opts.architecture() ) {
+#if SUPPORT_ARCH_x86_64
case CPU_TYPE_X86_64:
OptimizeCategories<x86_64>::doit(opts, state);
break;
+#endif
+#if SUPPORT_ARCH_i386
case CPU_TYPE_I386:
- // disable optimization until fully tested
- //if ( opts.objCABIVersion2POverride() )
- // OptimizeCategories<x86>::doit(opts, state);
+ if ( opts.objCABIVersion2POverride() )
+ OptimizeCategories<x86>::doit(opts, state);
break;
+#endif
+#if SUPPORT_ARCH_arm_any
case CPU_TYPE_ARM:
- // disable optimization until fully tested
- //OptimizeCategories<arm>::doit(opts, state);
+ OptimizeCategories<arm>::doit(opts, state);
+ break;
+#endif
+#if SUPPORT_ARCH_arm64
+ case CPU_TYPE_ARM64:
+ // disabled until tested
break;
+#endif
default:
assert(0 && "unknown objc arch");
}