/* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
*
- * Copyright (c) 2010 Apple Inc. All rights reserved.
+ * Copyright (c) 2010-2011 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
uint32_t flags;
};
-#define OBJC_IMAGE_IS_REPLACEMENT (1<<0)
#define OBJC_IMAGE_SUPPORTS_GC (1<<1)
#define OBJC_IMAGE_REQUIRES_GC (1<<2)
#define OBJC_IMAGE_OPTIMIZED_BY_DYLD (1<<3)
#define OBJC_IMAGE_SUPPORTS_COMPACTION (1<<4)
+#define OBJC_IMAGE_IS_SIMULATED (1<<5)
class ObjCImageInfoAtom : public ld::Atom {
public:
ObjCImageInfoAtom(ld::File::ObjcConstraint objcConstraint,
- bool compaction, bool objcReplacementClasses, bool abi2);
+ bool compaction, bool abi2, uint8_t swiftVersion);
virtual const ld::File* file() const { return NULL; }
- virtual bool translationUnitSource(const char** dir, const char**) const
- { return false; }
virtual const char* name() const { return "objc image info"; }
virtual uint64_t size() const { return sizeof(objc_image_info); }
virtual uint64_t objectAddress() const { return 0; }
template <typename A>
ObjCImageInfoAtom<A>::ObjCImageInfoAtom(ld::File::ObjcConstraint objcConstraint, bool compaction,
- bool objcReplacementClasses, bool abi2)
+ bool abi2, uint8_t swiftVersion)
: ld::Atom(abi2 ? _s_sectionABI2 : _s_sectionABI1, ld::Atom::definitionRegular, ld::Atom::combineNever,
ld::Atom::scopeLinkageUnit, ld::Atom::typeUnclassified,
symbolTableNotIn, false, false, false, ld::Atom::Alignment(2))
{
uint32_t value = 0;
- if ( objcReplacementClasses )
- value = OBJC_IMAGE_IS_REPLACEMENT;
switch ( objcConstraint ) {
case ld::File::objcConstraintNone:
case ld::File::objcConstraintRetainRelease:
if ( compaction )
value |= OBJC_IMAGE_SUPPORTS_COMPACTION;
break;
+ case ld::File::objcConstraintRetainReleaseForSimulator:
+ value |= OBJC_IMAGE_IS_SIMULATED;
+ break;
}
+ // provide swift language version in final binary for runtime to inspect
+ value |= (swiftVersion << 8);
+
_content.version = 0;
A::P::E::set32(_content.flags, value);
}
std::set<const ld::Atom*>& deadAtoms);
virtual const ld::File* file() const { return _file; }
- virtual bool translationUnitSource(const char** dir, const char**) const
- { return false; }
virtual const char* name() const { return "objc merged method list"; }
virtual uint64_t size() const { return _methodCount*3*sizeof(pint_t) + 8; }
virtual uint64_t objectAddress() const { return 0; }
virtual void setScope(Scope) { }
virtual void copyRawContent(uint8_t buffer[]) const {
bzero(buffer, size());
- A::P::E::set32(*((uint32_t*)(&buffer[0])), 24);
+ A::P::E::set32(*((uint32_t*)(&buffer[0])), 3*sizeof(pint_t)); // entry size
A::P::E::set32(*((uint32_t*)(&buffer[4])), _methodCount);
}
virtual ld::Fixup::iterator fixupsBegin() const { return (ld::Fixup*)&_fixups[0]; }
std::set<const ld::Atom*>& deadAtoms);
virtual const ld::File* file() const { return _file; }
- virtual bool translationUnitSource(const char** dir, const char**) const
- { return false; }
virtual const char* name() const { return "objc merged protocol list"; }
virtual uint64_t size() const { return (_protocolCount+1)*sizeof(pint_t); }
virtual uint64_t objectAddress() const { return 0; }
std::set<const ld::Atom*>& deadAtoms);
virtual const ld::File* file() const { return _file; }
- virtual bool translationUnitSource(const char** dir, const char**) const
- { return false; }
virtual const char* name() const { return "objc merged property list"; }
virtual uint64_t size() const { return _propertyCount*2*sizeof(pint_t) + 8; }
virtual uint64_t objectAddress() const { return 0; }
// overrides of ld::Atom
virtual const ld::File* file() const { return _atom->file(); }
- virtual bool translationUnitSource(const char** dir, const char** nm) const
- { return _atom->translationUnitSource(dir, nm); }
virtual const char* name() const { return _atom->name(); }
virtual uint64_t size() const { return _atom->size(); }
virtual uint64_t objectAddress() const { return _atom->objectAddress(); }
template <typename A>
class Category : public ObjCData<A> {
public:
- static const ld::Atom* getClass(ld::Internal& state, const ld::Atom* contentAtom);
+ static const ld::Atom* getClass(ld::Internal& state, const ld::Atom* contentAtom, bool& hasAddend);
static const ld::Atom* getInstanceMethods(ld::Internal& state, const ld::Atom* contentAtom);
static const ld::Atom* getClassMethods(ld::Internal& state, const ld::Atom* contentAtom);
static const ld::Atom* getProtocols(ld::Internal& state, const ld::Atom* contentAtom);
template <typename A>
-const ld::Atom* Category<A>::getClass(ld::Internal& state, const ld::Atom* contentAtom)
+const ld::Atom* Category<A>::getClass(ld::Internal& state, const ld::Atom* contentAtom, bool& hasAddend)
{
- return ObjCData<A>::getPointerInContent(state, contentAtom, sizeof(pint_t)); // category_t.cls
+ return ObjCData<A>::getPointerInContent(state, contentAtom, sizeof(pint_t), &hasAddend); // category_t.cls
}
template <typename A>
const std::set<const ld::Atom*>& _dead;
};
+ struct AtomSorter
+ {
+ bool operator()(const Atom* left, const Atom* right)
+ {
+ // sort by file ordinal, then object address, then zero size, then symbol name
+ // only file based atoms are supported (file() != NULL)
+ if (left==right) return false;
+ const File *leftf = left->file();
+ const File *rightf = right->file();
+
+ if (leftf == rightf) {
+ if (left->objectAddress() != right->objectAddress()) {
+ return left->objectAddress() < right->objectAddress();
+ } else {
+ // for atoms in the same file with the same address, zero sized
+ // atoms must sort before nonzero sized atoms
+ if ((left->size() == 0 && right->size() > 0) || (left->size() > 0 && right->size() == 0))
+ return left->size() < right->size();
+ return strcmp(left->name(), right->name());
+ }
+ }
+ return (leftf->ordinal() < rightf->ordinal());
+ }
+ };
+
+ static void sortAtomVector(std::vector<const Atom*> &atoms) {
+ std::sort(atoms.begin(), atoms.end(), AtomSorter());
+ }
+
template <typename A>
void OptimizeCategories<A>::doit(const Options& opts, ld::Internal& state)
{
// build map of all classes in this image that have categories on them
typedef std::map<const ld::Atom*, std::vector<const ld::Atom*>*> CatMap;
CatMap classToCategories;
+ std::vector<const ld::Atom*> classOrder;
std::set<const ld::Atom*> deadAtoms;
ld::Internal::FinalSection* methodListSection = NULL;
for (std::vector<ld::Internal::FinalSection*>::iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
continue;
}
assert(categoryAtom != NULL);
- assert(categoryAtom->size() == Category<A>::size());
+ assert(categoryAtom->size() >= Category<A>::size());
// ignore categories also in __objc_nlcatlist
if ( nlcatListAtoms.count(categoryAtom) != 0 )
continue;
- const ld::Atom* categoryOnClassAtom = Category<A>::getClass(state, categoryAtom);
+ const ld::Atom* categoryOnClassAtom = Category<A>::getClass(state, categoryAtom, hasAddend);
assert(categoryOnClassAtom != NULL);
+ // only look at classes defined in this image
if ( categoryOnClassAtom->definition() != ld::Atom::definitionProxy ) {
- // only look at classes defined in this image
+ // <rdar://problem/16107696> for now, back off optimization on new style classes
+ if ( hasAddend != 0 )
+ continue;
+ // <rdar://problem/17249777> don't apply categories to swift classes
+ if ( categoryOnClassAtom->hasFixupsOfKind(ld::Fixup::kindNoneGroupSubordinate) )
+ continue;
+
CatMap::iterator pos = classToCategories.find(categoryOnClassAtom);
if ( pos == classToCategories.end() ) {
classToCategories[categoryOnClassAtom] = new std::vector<const ld::Atom*>();
+ classOrder.push_back(categoryOnClassAtom);
}
classToCategories[categoryOnClassAtom]->push_back(categoryAtom);
// mark category atom and catlist atom as dead
// if found some categories
if ( classToCategories.size() != 0 ) {
assert(methodListSection != NULL);
+ sortAtomVector(classOrder);
// alter each class definition to have new method list which includes all category methods
- for (CatMap::iterator it=classToCategories.begin(); it != classToCategories.end(); ++it) {
- const ld::Atom* classAtom = it->first;
- const std::vector<const ld::Atom*>* categories = it->second;
+ for (std::vector<const ld::Atom*>::iterator it = classOrder.begin(); it != classOrder.end(); it++) {
+ const ld::Atom* classAtom = *it;
+ const std::vector<const ld::Atom*>* categories = classToCategories[classAtom];
assert(categories->size() != 0);
// if any category adds instance methods, generate new merged method list, and replace
if ( OptimizeCategories<A>::hasInstanceMethods(state, categories) ) {
symbolTableNotIn, false, false, false, ld::Atom::Alignment(3)), _file(NULL), _methodCount(0)
{
unsigned int fixupCount = 0;
+ std::set<const ld::Atom*> baseMethodListMethodNameAtoms;
// if base class has method list, then associate new method list with file defining class
if ( baseMethodList != NULL ) {
_file = baseMethodList->file();
_methodCount = MethodList<A>::count(state, baseMethodList);
deadAtoms.insert(baseMethodList);
fixupCount = baseMethodList->fixupsEnd() - baseMethodList->fixupsBegin();
+ for (ld::Fixup::iterator fit=baseMethodList->fixupsBegin(); fit != baseMethodList->fixupsEnd(); ++fit) {
+ if ( (fit->offsetInAtom - 8) % (3*sizeof(pint_t)) == 0 ) {
+ assert(fit->binding == ld::Fixup::bindingsIndirectlyBound && "malformed method list");
+ const ld::Atom* target = state.indirectBindingTable[fit->u.bindingIndex];
+ assert(target->contentType() == ld::Atom::typeCString && "malformed method list");
+ baseMethodListMethodNameAtoms.insert(target);
+ }
+ }
}
for (std::vector<const ld::Atom*>::const_iterator ait=categories->begin(); ait != categories->end(); ++ait) {
const ld::Atom* categoryMethodListAtom;
// copy fixups and adjust offsets (in reverse order to simulator objc runtime)
_fixups.reserve(fixupCount);
uint32_t slide = 0;
+ std::set<const ld::Atom*> categoryMethodNameAtoms;
for (std::vector<const ld::Atom*>::const_reverse_iterator rit=categories->rbegin(); rit != categories->rend(); ++rit) {
const ld::Atom* categoryMethodListAtom;
if ( meta )
ld::Fixup fixup = *fit;
fixup.offsetInAtom += slide;
_fixups.push_back(fixup);
- //if ( fixup.binding == ld::Fixup::bindingDirectlyBound )
- // fprintf(stderr, "offset=0x%08X, name=%s\n", fixup.offsetInAtom, fixup.u.target->name());
+ if ( (fixup.offsetInAtom - 8) % (3*sizeof(pint_t)) == 0 ) {
+ // <rdar://problem/8642343> warning when a method is overridden in a category in the same link unit
+ assert(fixup.binding == ld::Fixup::bindingsIndirectlyBound && "malformed category method list");
+ const ld::Atom* target = state.indirectBindingTable[fixup.u.bindingIndex];
+ assert(target->contentType() == ld::Atom::typeCString && "malformed method list");
+ // this objc pass happens after cstrings are coalesced, so we can just compare the atom addres instead of its content
+ if ( baseMethodListMethodNameAtoms.count(target) != 0 ) {
+ warning("%s method '%s' in category from %s overrides method from class in %s",
+ (meta ? "meta" : "instance"), target->rawContentPointer(),
+ categoryMethodListAtom->file()->path(), baseMethodList->file()->path() );
+ }
+ if ( categoryMethodNameAtoms.count(target) != 0 ) {
+ warning("%s method '%s' in category from %s conflicts with same method from another category",
+ (meta ? "meta" : "instance"), target->rawContentPointer(),
+ categoryMethodListAtom->file()->path());
+ }
+ categoryMethodNameAtoms.insert(target);
+ }
}
slide += 3*sizeof(pint_t) * MethodList<A>::count(state, categoryMethodListAtom);
}
// add image info atom
switch ( opts.architecture() ) {
+#if SUPPORT_ARCH_x86_64
case CPU_TYPE_X86_64:
state.addAtom(*new ObjCImageInfoAtom<x86_64>(state.objcObjectConstraint, compaction,
- state.hasObjcReplacementClasses, true));
+ true, state.swiftVersion));
break;
+#endif
+#if SUPPORT_ARCH_i386
case CPU_TYPE_I386:
state.addAtom(*new ObjCImageInfoAtom<x86>(state.objcObjectConstraint, compaction,
- state.hasObjcReplacementClasses, opts.objCABIVersion2POverride() ? true : false));
- break;
- case CPU_TYPE_POWERPC:
- state.addAtom(*new ObjCImageInfoAtom<ppc>(state.objcObjectConstraint, compaction,
- state.hasObjcReplacementClasses, false));
+ opts.objCABIVersion2POverride() ? true : false, state.swiftVersion));
break;
+#endif
+#if SUPPORT_ARCH_arm_any
case CPU_TYPE_ARM:
state.addAtom(*new ObjCImageInfoAtom<arm>(state.objcObjectConstraint, compaction,
- state.hasObjcReplacementClasses, true));
+ true, state.swiftVersion));
break;
- case CPU_TYPE_POWERPC64:
- state.addAtom(*new ObjCImageInfoAtom<ppc64>(state.objcObjectConstraint, compaction,
- state.hasObjcReplacementClasses, true));
+#endif
+#if SUPPORT_ARCH_arm64
+ case CPU_TYPE_ARM64:
+ state.addAtom(*new ObjCImageInfoAtom<arm64>(state.objcObjectConstraint, compaction,
+ true, state.swiftVersion));
break;
+#endif
default:
assert(0 && "unknown objc arch");
}
if ( opts.objcCategoryMerging() ) {
// optimize classes defined in this linkage unit by merging in categories also in this linkage unit
switch ( opts.architecture() ) {
+#if SUPPORT_ARCH_x86_64
case CPU_TYPE_X86_64:
OptimizeCategories<x86_64>::doit(opts, state);
break;
+#endif
+#if SUPPORT_ARCH_i386
case CPU_TYPE_I386:
- // disable optimization until fully tested
- //if ( opts.objCABIVersion2POverride() )
- // OptimizeCategories<x86>::doit(opts, state);
+ if ( opts.objCABIVersion2POverride() )
+ OptimizeCategories<x86>::doit(opts, state);
break;
+#endif
+#if SUPPORT_ARCH_arm_any
case CPU_TYPE_ARM:
- // disable optimization until fully tested
- //OptimizeCategories<arm>::doit(opts, state);
+ OptimizeCategories<arm>::doit(opts, state);
break;
- case CPU_TYPE_POWERPC64:
- case CPU_TYPE_POWERPC:
+#endif
+#if SUPPORT_ARCH_arm64
+ case CPU_TYPE_ARM64:
+ // disabled until tested
break;
+#endif
default:
assert(0 && "unknown objc arch");
}