]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. Please obtain a copy of the License at | |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
11 | * file. | |
12 | * | |
13 | * The Original Code and all software distributed under the License are | |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
18 | * Please see the License for the specific language governing rights and | |
19 | * limitations under the License. | |
20 | * | |
21 | * @APPLE_LICENSE_HEADER_END@ | |
22 | */ | |
23 | /* | |
24 | * objc-private.h | |
25 | * Copyright 1988-1996, NeXT Software, Inc. | |
26 | */ | |
27 | ||
28 | #ifndef _OBJC_PRIVATE_H_ | |
29 | #define _OBJC_PRIVATE_H_ | |
30 | ||
31 | #include "objc-config.h" | |
32 | ||
33 | /* Isolate ourselves from the definitions of id and Class in the compiler | |
34 | * and public headers. | |
35 | */ | |
36 | ||
37 | #ifdef _OBJC_OBJC_H_ | |
38 | #error include objc-private.h before other headers | |
39 | #endif | |
40 | ||
41 | #define OBJC_TYPES_DEFINED 1 | |
42 | #undef OBJC_OLD_DISPATCH_PROTOTYPES | |
43 | #define OBJC_OLD_DISPATCH_PROTOTYPES 0 | |
44 | ||
45 | #include <cstddef> // for nullptr_t | |
46 | #include <stdint.h> | |
47 | #include <assert.h> | |
48 | ||
49 | // An assert that's disabled for release builds but still ensures the expression compiles. | |
50 | #ifdef NDEBUG | |
51 | #define ASSERT(x) (void)sizeof(!(x)) | |
52 | #else | |
53 | #define ASSERT(x) assert(x) | |
54 | #endif | |
55 | ||
56 | // `this` is never NULL in C++ unless we encounter UB, but checking for what's impossible | |
57 | // is the point of these asserts, so disable the corresponding warning, and let's hope | |
58 | // we will reach the assert despite the UB | |
59 | #define ASSERT_THIS_NOT_NULL \ | |
60 | _Pragma("clang diagnostic push") \ | |
61 | _Pragma("clang diagnostic ignored \"-Wundefined-bool-conversion\"") \ | |
62 | ASSERT(this) \ | |
63 | _Pragma("clang diagnostic pop") | |
64 | ||
65 | ||
66 | struct objc_class; | |
67 | struct objc_object; | |
68 | struct category_t; | |
69 | ||
70 | typedef struct objc_class *Class; | |
71 | typedef struct objc_object *id; | |
72 | typedef struct classref *classref_t; | |
73 | ||
74 | namespace { | |
75 | struct SideTable; | |
76 | }; | |
77 | ||
78 | #include "isa.h" | |
79 | ||
80 | union isa_t { | |
81 | isa_t() { } | |
82 | isa_t(uintptr_t value) : bits(value) { } | |
83 | ||
84 | uintptr_t bits; | |
85 | ||
86 | private: | |
87 | // Accessing the class requires custom ptrauth operations, so | |
88 | // force clients to go through setClass/getClass by making this | |
89 | // private. | |
90 | Class cls; | |
91 | ||
92 | public: | |
93 | #if defined(ISA_BITFIELD) | |
94 | struct { | |
95 | ISA_BITFIELD; // defined in isa.h | |
96 | }; | |
97 | ||
98 | bool isDeallocating() { | |
99 | return extra_rc == 0 && has_sidetable_rc == 0; | |
100 | } | |
101 | void setDeallocating() { | |
102 | extra_rc = 0; | |
103 | has_sidetable_rc = 0; | |
104 | } | |
105 | #endif | |
106 | ||
107 | void setClass(Class cls, objc_object *obj); | |
108 | Class getClass(bool authenticated); | |
109 | Class getDecodedClass(bool authenticated); | |
110 | }; | |
111 | ||
112 | ||
113 | struct objc_object { | |
114 | private: | |
115 | isa_t isa; | |
116 | ||
117 | public: | |
118 | ||
119 | // ISA() assumes this is NOT a tagged pointer object | |
120 | Class ISA(bool authenticated = false); | |
121 | ||
122 | // rawISA() assumes this is NOT a tagged pointer object or a non pointer ISA | |
123 | Class rawISA(); | |
124 | ||
125 | // getIsa() allows this to be a tagged pointer object | |
126 | Class getIsa(); | |
127 | ||
128 | uintptr_t isaBits() const; | |
129 | ||
130 | // initIsa() should be used to init the isa of new objects only. | |
131 | // If this object already has an isa, use changeIsa() for correctness. | |
132 | // initInstanceIsa(): objects with no custom RR/AWZ | |
133 | // initClassIsa(): class objects | |
134 | // initProtocolIsa(): protocol objects | |
135 | // initIsa(): other objects | |
136 | void initIsa(Class cls /*nonpointer=false*/); | |
137 | void initClassIsa(Class cls /*nonpointer=maybe*/); | |
138 | void initProtocolIsa(Class cls /*nonpointer=maybe*/); | |
139 | void initInstanceIsa(Class cls, bool hasCxxDtor); | |
140 | ||
141 | // changeIsa() should be used to change the isa of existing objects. | |
142 | // If this is a new object, use initIsa() for performance. | |
143 | Class changeIsa(Class newCls); | |
144 | ||
145 | bool hasNonpointerIsa(); | |
146 | bool isTaggedPointer(); | |
147 | bool isTaggedPointerOrNil(); | |
148 | bool isBasicTaggedPointer(); | |
149 | bool isExtTaggedPointer(); | |
150 | bool isClass(); | |
151 | ||
152 | // object may have associated objects? | |
153 | bool hasAssociatedObjects(); | |
154 | void setHasAssociatedObjects(); | |
155 | ||
156 | // object may be weakly referenced? | |
157 | bool isWeaklyReferenced(); | |
158 | void setWeaklyReferenced_nolock(); | |
159 | ||
160 | // object may have -.cxx_destruct implementation? | |
161 | bool hasCxxDtor(); | |
162 | ||
163 | // Optimized calls to retain/release methods | |
164 | id retain(); | |
165 | void release(); | |
166 | id autorelease(); | |
167 | ||
168 | // Implementations of retain/release methods | |
169 | id rootRetain(); | |
170 | bool rootRelease(); | |
171 | id rootAutorelease(); | |
172 | bool rootTryRetain(); | |
173 | bool rootReleaseShouldDealloc(); | |
174 | uintptr_t rootRetainCount(); | |
175 | ||
176 | // Implementation of dealloc methods | |
177 | bool rootIsDeallocating(); | |
178 | void clearDeallocating(); | |
179 | void rootDealloc(); | |
180 | ||
181 | private: | |
182 | void initIsa(Class newCls, bool nonpointer, bool hasCxxDtor); | |
183 | ||
184 | // Slow paths for inline control | |
185 | id rootAutorelease2(); | |
186 | uintptr_t overrelease_error(); | |
187 | ||
188 | #if SUPPORT_NONPOINTER_ISA | |
189 | // Controls what parts of root{Retain,Release} to emit/inline | |
190 | // - Full means the full (slow) implementation | |
191 | // - Fast means the fastpaths only | |
192 | // - FastOrMsgSend means the fastpaths but checking whether we should call | |
193 | // -retain/-release or Swift, for the usage of objc_{retain,release} | |
194 | enum class RRVariant { | |
195 | Full, | |
196 | Fast, | |
197 | FastOrMsgSend, | |
198 | }; | |
199 | ||
200 | // Unified retain count manipulation for nonpointer isa | |
201 | inline id rootRetain(bool tryRetain, RRVariant variant); | |
202 | inline bool rootRelease(bool performDealloc, RRVariant variant); | |
203 | id rootRetain_overflow(bool tryRetain); | |
204 | uintptr_t rootRelease_underflow(bool performDealloc); | |
205 | ||
206 | void clearDeallocating_slow(); | |
207 | ||
208 | // Side table retain count overflow for nonpointer isa | |
209 | struct SidetableBorrow { size_t borrowed, remaining; }; | |
210 | ||
211 | void sidetable_lock(); | |
212 | void sidetable_unlock(); | |
213 | ||
214 | void sidetable_moveExtraRC_nolock(size_t extra_rc, bool isDeallocating, bool weaklyReferenced); | |
215 | bool sidetable_addExtraRC_nolock(size_t delta_rc); | |
216 | SidetableBorrow sidetable_subExtraRC_nolock(size_t delta_rc); | |
217 | size_t sidetable_getExtraRC_nolock(); | |
218 | void sidetable_clearExtraRC_nolock(); | |
219 | #endif | |
220 | ||
221 | // Side-table-only retain count | |
222 | bool sidetable_isDeallocating(); | |
223 | void sidetable_clearDeallocating(); | |
224 | ||
225 | bool sidetable_isWeaklyReferenced(); | |
226 | void sidetable_setWeaklyReferenced_nolock(); | |
227 | ||
228 | id sidetable_retain(bool locked = false); | |
229 | id sidetable_retain_slow(SideTable& table); | |
230 | ||
231 | uintptr_t sidetable_release(bool locked = false, bool performDealloc = true); | |
232 | uintptr_t sidetable_release_slow(SideTable& table, bool performDealloc = true); | |
233 | ||
234 | bool sidetable_tryRetain(); | |
235 | ||
236 | uintptr_t sidetable_retainCount(); | |
237 | #if DEBUG | |
238 | bool sidetable_present(); | |
239 | #endif | |
240 | }; | |
241 | ||
242 | ||
243 | #if __OBJC2__ | |
244 | typedef struct method_t *Method; | |
245 | typedef struct ivar_t *Ivar; | |
246 | typedef struct category_t *Category; | |
247 | typedef struct property_t *objc_property_t; | |
248 | #else | |
249 | typedef struct old_method *Method; | |
250 | typedef struct old_ivar *Ivar; | |
251 | typedef struct old_category *Category; | |
252 | typedef struct old_property *objc_property_t; | |
253 | #endif | |
254 | ||
255 | // Public headers | |
256 | ||
257 | #include "objc.h" | |
258 | #include "runtime.h" | |
259 | #include "objc-os.h" | |
260 | #include "objc-abi.h" | |
261 | #include "objc-api.h" | |
262 | #include "objc-config.h" | |
263 | #include "objc-internal.h" | |
264 | #include "maptable.h" | |
265 | #include "hashtable2.h" | |
266 | ||
267 | /* Do not include message.h here. */ | |
268 | /* #include "message.h" */ | |
269 | ||
270 | #define __APPLE_API_PRIVATE | |
271 | #include "objc-gdb.h" | |
272 | #undef __APPLE_API_PRIVATE | |
273 | ||
274 | ||
275 | // Private headers | |
276 | ||
277 | #include "objc-ptrauth.h" | |
278 | ||
279 | #if __OBJC2__ | |
280 | #include "objc-runtime-new.h" | |
281 | #else | |
282 | #include "objc-runtime-old.h" | |
283 | #endif | |
284 | ||
285 | #include "objc-references.h" | |
286 | #include "objc-initialize.h" | |
287 | #include "objc-loadmethod.h" | |
288 | ||
289 | ||
290 | #define STRINGIFY(x) #x | |
291 | #define STRINGIFY2(x) STRINGIFY(x) | |
292 | ||
293 | __BEGIN_DECLS | |
294 | ||
295 | namespace objc { | |
296 | ||
297 | struct SafeRanges { | |
298 | private: | |
299 | struct Range { | |
300 | uintptr_t start; | |
301 | uintptr_t end; | |
302 | ||
303 | inline bool contains(uintptr_t ptr) const { | |
304 | uintptr_t m_start, m_end; | |
305 | #if __arm64__ | |
306 | // <rdar://problem/48304934> Force the compiler to use ldp | |
307 | // we really don't want 2 loads and 2 jumps. | |
308 | __asm__( | |
309 | # if __LP64__ | |
310 | "ldp %x[one], %x[two], [%x[src]]" | |
311 | # else | |
312 | "ldp %w[one], %w[two], [%x[src]]" | |
313 | # endif | |
314 | : [one] "=r" (m_start), [two] "=r" (m_end) | |
315 | : [src] "r" (this) | |
316 | ); | |
317 | #else | |
318 | m_start = start; | |
319 | m_end = end; | |
320 | #endif | |
321 | return m_start <= ptr && ptr < m_end; | |
322 | } | |
323 | }; | |
324 | ||
325 | struct Range shared_cache; | |
326 | struct Range *ranges; | |
327 | uint32_t count; | |
328 | uint32_t size : 31; | |
329 | uint32_t sorted : 1; | |
330 | ||
331 | public: | |
332 | inline bool inSharedCache(uintptr_t ptr) const { | |
333 | return shared_cache.contains(ptr); | |
334 | } | |
335 | inline bool contains(uint16_t witness, uintptr_t ptr) const { | |
336 | return witness < count && ranges[witness].contains(ptr); | |
337 | } | |
338 | ||
339 | inline void setSharedCacheRange(uintptr_t start, uintptr_t end) { | |
340 | shared_cache = Range{start, end}; | |
341 | add(start, end); | |
342 | } | |
343 | bool find(uintptr_t ptr, uint32_t &pos); | |
344 | void add(uintptr_t start, uintptr_t end); | |
345 | void remove(uintptr_t start, uintptr_t end); | |
346 | }; | |
347 | ||
348 | extern struct SafeRanges dataSegmentsRanges; | |
349 | ||
350 | static inline bool inSharedCache(uintptr_t ptr) { | |
351 | return dataSegmentsRanges.inSharedCache(ptr); | |
352 | } | |
353 | ||
354 | } // objc | |
355 | ||
356 | struct header_info; | |
357 | ||
358 | // Split out the rw data from header info. For now put it in a huge array | |
359 | // that more than exceeds the space needed. In future we'll just allocate | |
360 | // this in the shared cache builder. | |
361 | typedef struct header_info_rw { | |
362 | ||
363 | bool getLoaded() const { | |
364 | return isLoaded; | |
365 | } | |
366 | ||
367 | void setLoaded(bool v) { | |
368 | isLoaded = v ? 1: 0; | |
369 | } | |
370 | ||
371 | bool getAllClassesRealized() const { | |
372 | return allClassesRealized; | |
373 | } | |
374 | ||
375 | void setAllClassesRealized(bool v) { | |
376 | allClassesRealized = v ? 1: 0; | |
377 | } | |
378 | ||
379 | header_info *getNext() const { | |
380 | return (header_info *)(next << 2); | |
381 | } | |
382 | ||
383 | void setNext(header_info *v) { | |
384 | next = ((uintptr_t)v) >> 2; | |
385 | } | |
386 | ||
387 | private: | |
388 | #ifdef __LP64__ | |
389 | uintptr_t isLoaded : 1; | |
390 | uintptr_t allClassesRealized : 1; | |
391 | uintptr_t next : 62; | |
392 | #else | |
393 | uintptr_t isLoaded : 1; | |
394 | uintptr_t allClassesRealized : 1; | |
395 | uintptr_t next : 30; | |
396 | #endif | |
397 | } header_info_rw; | |
398 | ||
399 | struct header_info_rw* getPreoptimizedHeaderRW(const struct header_info *const hdr); | |
400 | ||
401 | typedef struct header_info { | |
402 | private: | |
403 | // Note, this is no longer a pointer, but instead an offset to a pointer | |
404 | // from this location. | |
405 | intptr_t mhdr_offset; | |
406 | ||
407 | // Note, this is no longer a pointer, but instead an offset to a pointer | |
408 | // from this location. | |
409 | intptr_t info_offset; | |
410 | ||
411 | // Offset from this location to the non-lazy class list | |
412 | intptr_t nlclslist_offset; | |
413 | uintptr_t nlclslist_count; | |
414 | ||
415 | // Offset from this location to the non-lazy category list | |
416 | intptr_t nlcatlist_offset; | |
417 | uintptr_t nlcatlist_count; | |
418 | ||
419 | // Offset from this location to the category list | |
420 | intptr_t catlist_offset; | |
421 | uintptr_t catlist_count; | |
422 | ||
423 | // Offset from this location to the category list 2 | |
424 | intptr_t catlist2_offset; | |
425 | uintptr_t catlist2_count; | |
426 | ||
427 | // Do not add fields without editing ObjCModernAbstraction.hpp | |
428 | public: | |
429 | ||
430 | header_info_rw *getHeaderInfoRW() { | |
431 | header_info_rw *preopt = | |
432 | isPreoptimized() ? getPreoptimizedHeaderRW(this) : nil; | |
433 | if (preopt) return preopt; | |
434 | else return &rw_data[0]; | |
435 | } | |
436 | ||
437 | const headerType *mhdr() const { | |
438 | return (const headerType *)(((intptr_t)&mhdr_offset) + mhdr_offset); | |
439 | } | |
440 | ||
441 | void setmhdr(const headerType *mhdr) { | |
442 | mhdr_offset = (intptr_t)mhdr - (intptr_t)&mhdr_offset; | |
443 | } | |
444 | ||
445 | const objc_image_info *info() const { | |
446 | return (const objc_image_info *)(((intptr_t)&info_offset) + info_offset); | |
447 | } | |
448 | ||
449 | void setinfo(const objc_image_info *info) { | |
450 | info_offset = (intptr_t)info - (intptr_t)&info_offset; | |
451 | } | |
452 | ||
453 | const classref_t *nlclslist(size_t *outCount) const; | |
454 | ||
455 | void set_nlclslist(const void *list) { | |
456 | nlclslist_offset = (intptr_t)list - (intptr_t)&nlclslist_offset; | |
457 | } | |
458 | ||
459 | category_t * const *nlcatlist(size_t *outCount) const; | |
460 | ||
461 | void set_nlcatlist(const void *list) { | |
462 | nlcatlist_offset = (intptr_t)list - (intptr_t)&nlcatlist_offset; | |
463 | } | |
464 | ||
465 | category_t * const *catlist(size_t *outCount) const; | |
466 | ||
467 | void set_catlist(const void *list) { | |
468 | catlist_offset = (intptr_t)list - (intptr_t)&catlist_offset; | |
469 | } | |
470 | ||
471 | category_t * const *catlist2(size_t *outCount) const; | |
472 | ||
473 | void set_catlist2(const void *list) { | |
474 | catlist2_offset = (intptr_t)list - (intptr_t)&catlist2_offset; | |
475 | } | |
476 | ||
477 | bool isLoaded() { | |
478 | return getHeaderInfoRW()->getLoaded(); | |
479 | } | |
480 | ||
481 | void setLoaded(bool v) { | |
482 | getHeaderInfoRW()->setLoaded(v); | |
483 | } | |
484 | ||
485 | bool areAllClassesRealized() { | |
486 | return getHeaderInfoRW()->getAllClassesRealized(); | |
487 | } | |
488 | ||
489 | void setAllClassesRealized(bool v) { | |
490 | getHeaderInfoRW()->setAllClassesRealized(v); | |
491 | } | |
492 | ||
493 | header_info *getNext() { | |
494 | return getHeaderInfoRW()->getNext(); | |
495 | } | |
496 | ||
497 | void setNext(header_info *v) { | |
498 | getHeaderInfoRW()->setNext(v); | |
499 | } | |
500 | ||
501 | bool isBundle() { | |
502 | return mhdr()->filetype == MH_BUNDLE; | |
503 | } | |
504 | ||
505 | const char *fname() const { | |
506 | return dyld_image_path_containing_address(mhdr()); | |
507 | } | |
508 | ||
509 | bool isPreoptimized() const; | |
510 | ||
511 | bool hasPreoptimizedSelectors() const; | |
512 | ||
513 | bool hasPreoptimizedClasses() const; | |
514 | ||
515 | bool hasPreoptimizedProtocols() const; | |
516 | ||
517 | bool hasPreoptimizedSectionLookups() const; | |
518 | ||
519 | #if !__OBJC2__ | |
520 | struct old_protocol **proto_refs; | |
521 | struct objc_module *mod_ptr; | |
522 | size_t mod_count; | |
523 | # if TARGET_OS_WIN32 | |
524 | struct objc_module **modules; | |
525 | size_t moduleCount; | |
526 | struct old_protocol **protocols; | |
527 | size_t protocolCount; | |
528 | void *imageinfo; | |
529 | size_t imageinfoBytes; | |
530 | SEL *selrefs; | |
531 | size_t selrefCount; | |
532 | struct objc_class **clsrefs; | |
533 | size_t clsrefCount; | |
534 | TCHAR *moduleName; | |
535 | # endif | |
536 | #endif | |
537 | ||
538 | private: | |
539 | // Images in the shared cache will have an empty array here while those | |
540 | // allocated at run time will allocate a single entry. | |
541 | header_info_rw rw_data[]; | |
542 | } header_info; | |
543 | ||
544 | extern header_info *FirstHeader; | |
545 | extern header_info *LastHeader; | |
546 | ||
547 | extern void appendHeader(header_info *hi); | |
548 | extern void removeHeader(header_info *hi); | |
549 | ||
550 | extern objc_image_info *_getObjcImageInfo(const headerType *head, size_t *size); | |
551 | extern bool _hasObjcContents(const header_info *hi); | |
552 | ||
553 | ||
554 | // Mach-O segment and section names are 16 bytes and may be un-terminated. | |
555 | ||
556 | static inline bool segnameEquals(const char *lhs, const char *rhs) { | |
557 | return 0 == strncmp(lhs, rhs, 16); | |
558 | } | |
559 | ||
560 | static inline bool segnameStartsWith(const char *segname, const char *prefix) { | |
561 | return 0 == strncmp(segname, prefix, strlen(prefix)); | |
562 | } | |
563 | ||
564 | static inline bool sectnameEquals(const char *lhs, const char *rhs) { | |
565 | return segnameEquals(lhs, rhs); | |
566 | } | |
567 | ||
568 | static inline bool sectnameStartsWith(const char *sectname, const char *prefix){ | |
569 | return segnameStartsWith(sectname, prefix); | |
570 | } | |
571 | ||
572 | ||
573 | #if __OBJC2__ | |
574 | extern bool didCallDyldNotifyRegister; | |
575 | #endif | |
576 | ||
577 | ||
578 | /* selectors */ | |
579 | extern void sel_init(size_t selrefCount); | |
580 | extern SEL sel_registerNameNoLock(const char *str, bool copy); | |
581 | ||
582 | extern SEL SEL_cxx_construct; | |
583 | extern SEL SEL_cxx_destruct; | |
584 | ||
585 | /* preoptimization */ | |
586 | extern void preopt_init(void); | |
587 | extern void disableSharedCacheOptimizations(void); | |
588 | extern bool isPreoptimized(void); | |
589 | extern bool noMissingWeakSuperclasses(void); | |
590 | extern header_info *preoptimizedHinfoForHeader(const headerType *mhdr); | |
591 | ||
592 | extern Protocol *getPreoptimizedProtocol(const char *name); | |
593 | extern Protocol *getSharedCachePreoptimizedProtocol(const char *name); | |
594 | ||
595 | extern unsigned getPreoptimizedClassUnreasonableCount(); | |
596 | extern Class getPreoptimizedClass(const char *name); | |
597 | extern Class* copyPreoptimizedClasses(const char *name, int *outCount); | |
598 | ||
599 | extern Class _calloc_class(size_t size); | |
600 | ||
601 | /* method lookup */ | |
602 | enum { | |
603 | LOOKUP_INITIALIZE = 1, | |
604 | LOOKUP_RESOLVER = 2, | |
605 | LOOKUP_NIL = 4, | |
606 | LOOKUP_NOCACHE = 8, | |
607 | }; | |
608 | extern IMP lookUpImpOrForward(id obj, SEL, Class cls, int behavior); | |
609 | extern IMP lookUpImpOrForwardTryCache(id obj, SEL, Class cls, int behavior = 0); | |
610 | extern IMP lookUpImpOrNilTryCache(id obj, SEL, Class cls, int behavior = 0); | |
611 | ||
612 | extern IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel); | |
613 | ||
614 | struct IMPAndSEL { | |
615 | IMP imp; | |
616 | SEL sel; | |
617 | }; | |
618 | ||
619 | extern IMPAndSEL _method_getImplementationAndName(Method m); | |
620 | ||
621 | extern BOOL class_respondsToSelector_inst(id inst, SEL sel, Class cls); | |
622 | extern Class class_initialize(Class cls, id inst); | |
623 | ||
624 | extern bool objcMsgLogEnabled; | |
625 | extern bool logMessageSend(bool isClassMethod, | |
626 | const char *objectsClass, | |
627 | const char *implementingClass, | |
628 | SEL selector); | |
629 | ||
630 | /* message dispatcher */ | |
631 | ||
632 | #if !OBJC_OLD_DISPATCH_PROTOTYPES | |
633 | extern void _objc_msgForward_impcache(void); | |
634 | #else | |
635 | extern id _objc_msgForward_impcache(id, SEL, ...); | |
636 | #endif | |
637 | ||
638 | /* errors */ | |
639 | extern id(*badAllocHandler)(Class); | |
640 | extern id _objc_callBadAllocHandler(Class cls) __attribute__((cold, noinline)); | |
641 | extern void __objc_error(id, const char *, ...) __attribute__((cold, format (printf, 2, 3), noreturn)); | |
642 | extern void _objc_inform(const char *fmt, ...) __attribute__((cold, format(printf, 1, 2))); | |
643 | extern void _objc_inform_on_crash(const char *fmt, ...) __attribute__((cold, format (printf, 1, 2))); | |
644 | extern void _objc_inform_now_and_on_crash(const char *fmt, ...) __attribute__((cold, format (printf, 1, 2))); | |
645 | extern void _objc_inform_deprecated(const char *oldname, const char *newname) __attribute__((cold, noinline)); | |
646 | extern void inform_duplicate(const char *name, Class oldCls, Class cls); | |
647 | ||
648 | /* magic */ | |
649 | extern Class _objc_getFreedObjectClass (void); | |
650 | ||
651 | /* map table additions */ | |
652 | extern void *NXMapKeyCopyingInsert(NXMapTable *table, const void *key, const void *value); | |
653 | extern void *NXMapKeyFreeingRemove(NXMapTable *table, const void *key); | |
654 | ||
655 | /* hash table additions */ | |
656 | extern unsigned _NXHashCapacity(NXHashTable *table); | |
657 | extern void _NXHashRehashToCapacity(NXHashTable *table, unsigned newCapacity); | |
658 | ||
659 | /* property attribute parsing */ | |
660 | extern const char *copyPropertyAttributeString(const objc_property_attribute_t *attrs, unsigned int count); | |
661 | extern objc_property_attribute_t *copyPropertyAttributeList(const char *attrs, unsigned int *outCount); | |
662 | extern char *copyPropertyAttributeValue(const char *attrs, const char *name); | |
663 | ||
664 | /* locking */ | |
665 | ||
666 | class monitor_locker_t : nocopy_t { | |
667 | monitor_t& lock; | |
668 | public: | |
669 | monitor_locker_t(monitor_t& newLock) : lock(newLock) { lock.enter(); } | |
670 | ~monitor_locker_t() { lock.leave(); } | |
671 | }; | |
672 | ||
673 | class recursive_mutex_locker_t : nocopy_t { | |
674 | recursive_mutex_t& lock; | |
675 | public: | |
676 | recursive_mutex_locker_t(recursive_mutex_t& newLock) | |
677 | : lock(newLock) { lock.lock(); } | |
678 | ~recursive_mutex_locker_t() { lock.unlock(); } | |
679 | }; | |
680 | ||
681 | ||
682 | /* Exceptions */ | |
683 | struct alt_handler_list; | |
684 | extern void exception_init(void); | |
685 | extern void _destroyAltHandlerList(struct alt_handler_list *list); | |
686 | ||
687 | /* Class change notifications (gdb only for now) */ | |
688 | #define OBJC_CLASS_ADDED (1<<0) | |
689 | #define OBJC_CLASS_REMOVED (1<<1) | |
690 | #define OBJC_CLASS_IVARS_CHANGED (1<<2) | |
691 | #define OBJC_CLASS_METHODS_CHANGED (1<<3) | |
692 | extern void gdb_objc_class_changed(Class cls, unsigned long changes, const char *classname) | |
693 | __attribute__((noinline)); | |
694 | ||
695 | ||
696 | // Settings from environment variables | |
697 | #define OPTION(var, env, help) extern bool var; | |
698 | #include "objc-env.h" | |
699 | #undef OPTION | |
700 | ||
701 | extern void environ_init(void); | |
702 | extern void runtime_init(void); | |
703 | ||
704 | extern void logReplacedMethod(const char *className, SEL s, bool isMeta, const char *catName, IMP oldImp, IMP newImp); | |
705 | ||
706 | ||
707 | // objc per-thread storage | |
708 | typedef struct { | |
709 | struct _objc_initializing_classes *initializingClasses; // for +initialize | |
710 | struct SyncCache *syncCache; // for @synchronize | |
711 | struct alt_handler_list *handlerList; // for exception alt handlers | |
712 | char *printableNames[4]; // temporary demangled names for logging | |
713 | const char **classNameLookups; // for objc_getClass() hooks | |
714 | unsigned classNameLookupsAllocated; | |
715 | unsigned classNameLookupsUsed; | |
716 | ||
717 | // If you add new fields here, don't forget to update | |
718 | // _objc_pthread_destroyspecific() | |
719 | ||
720 | } _objc_pthread_data; | |
721 | ||
722 | extern _objc_pthread_data *_objc_fetch_pthread_data(bool create); | |
723 | extern void tls_init(void); | |
724 | ||
725 | // encoding.h | |
726 | extern unsigned int encoding_getNumberOfArguments(const char *typedesc); | |
727 | extern unsigned int encoding_getSizeOfArguments(const char *typedesc); | |
728 | extern unsigned int encoding_getArgumentInfo(const char *typedesc, unsigned int arg, const char **type, int *offset); | |
729 | extern void encoding_getReturnType(const char *t, char *dst, size_t dst_len); | |
730 | extern char * encoding_copyReturnType(const char *t); | |
731 | extern void encoding_getArgumentType(const char *t, unsigned int index, char *dst, size_t dst_len); | |
732 | extern char *encoding_copyArgumentType(const char *t, unsigned int index); | |
733 | ||
734 | // sync.h | |
735 | extern void _destroySyncCache(struct SyncCache *cache); | |
736 | ||
737 | // arr | |
738 | extern void arr_init(void); | |
739 | extern id objc_autoreleaseReturnValue(id obj); | |
740 | ||
741 | // block trampolines | |
742 | extern void _imp_implementationWithBlock_init(void); | |
743 | extern IMP _imp_implementationWithBlockNoCopy(id block); | |
744 | ||
745 | // layout.h | |
746 | typedef struct { | |
747 | uint8_t *bits; | |
748 | size_t bitCount; | |
749 | size_t bitsAllocated; | |
750 | bool weak; | |
751 | } layout_bitmap; | |
752 | extern layout_bitmap layout_bitmap_create(const unsigned char *layout_string, size_t layoutStringInstanceSize, size_t instanceSize, bool weak); | |
753 | extern layout_bitmap layout_bitmap_create_empty(size_t instanceSize, bool weak); | |
754 | extern void layout_bitmap_free(layout_bitmap bits); | |
755 | extern const unsigned char *layout_string_create(layout_bitmap bits); | |
756 | extern void layout_bitmap_set_ivar(layout_bitmap bits, const char *type, size_t offset); | |
757 | extern void layout_bitmap_grow(layout_bitmap *bits, size_t newCount); | |
758 | extern void layout_bitmap_slide(layout_bitmap *bits, size_t oldPos, size_t newPos); | |
759 | extern void layout_bitmap_slide_anywhere(layout_bitmap *bits, size_t oldPos, size_t newPos); | |
760 | extern bool layout_bitmap_splat(layout_bitmap dst, layout_bitmap src, | |
761 | size_t oldSrcInstanceSize); | |
762 | extern bool layout_bitmap_or(layout_bitmap dst, layout_bitmap src, const char *msg); | |
763 | extern bool layout_bitmap_clear(layout_bitmap dst, layout_bitmap src, const char *msg); | |
764 | extern void layout_bitmap_print(layout_bitmap bits); | |
765 | ||
766 | ||
767 | // fixme runtime | |
768 | extern bool MultithreadedForkChild; | |
769 | extern id objc_noop_imp(id self, SEL _cmd); | |
770 | extern Class look_up_class(const char *aClassName, bool includeUnconnected, bool includeClassHandler); | |
771 | extern "C" void map_images(unsigned count, const char * const paths[], | |
772 | const struct mach_header * const mhdrs[]); | |
773 | extern void map_images_nolock(unsigned count, const char * const paths[], | |
774 | const struct mach_header * const mhdrs[]); | |
775 | extern void load_images(const char *path, const struct mach_header *mh); | |
776 | extern void unmap_image(const char *path, const struct mach_header *mh); | |
777 | extern void unmap_image_nolock(const struct mach_header *mh); | |
778 | extern void _read_images(header_info **hList, uint32_t hCount, int totalClasses, int unoptimizedTotalClass); | |
779 | extern void _unload_image(header_info *hi); | |
780 | ||
781 | extern const header_info *_headerForClass(Class cls); | |
782 | ||
783 | extern Class _class_remap(Class cls); | |
784 | extern Ivar _class_getVariable(Class cls, const char *name); | |
785 | ||
786 | extern unsigned _class_createInstancesFromZone(Class cls, size_t extraBytes, void *zone, id *results, unsigned num_requested); | |
787 | ||
788 | extern const char *_category_getName(Category cat); | |
789 | extern const char *_category_getClassName(Category cat); | |
790 | extern Class _category_getClass(Category cat); | |
791 | extern IMP _category_getLoadMethod(Category cat); | |
792 | ||
793 | enum { | |
794 | OBJECT_CONSTRUCT_NONE = 0, | |
795 | OBJECT_CONSTRUCT_FREE_ONFAILURE = 1, | |
796 | OBJECT_CONSTRUCT_CALL_BADALLOC = 2, | |
797 | }; | |
798 | extern id object_cxxConstructFromClass(id obj, Class cls, int flags); | |
799 | extern void object_cxxDestruct(id obj); | |
800 | ||
801 | extern void fixupCopiedIvars(id newObject, id oldObject); | |
802 | extern Class _class_getClassForIvar(Class cls, Ivar ivar); | |
803 | ||
804 | ||
805 | #define OBJC_WARN_DEPRECATED \ | |
806 | do { \ | |
807 | static int warned = 0; \ | |
808 | if (!warned) { \ | |
809 | warned = 1; \ | |
810 | _objc_inform_deprecated(__FUNCTION__, NULL); \ | |
811 | } \ | |
812 | } while (0) \ | |
813 | ||
814 | __END_DECLS | |
815 | ||
816 | ||
817 | #ifndef STATIC_ASSERT | |
818 | # define STATIC_ASSERT(x) _STATIC_ASSERT2(x, __LINE__) | |
819 | # define _STATIC_ASSERT2(x, line) _STATIC_ASSERT3(x, line) | |
820 | # define _STATIC_ASSERT3(x, line) \ | |
821 | typedef struct { \ | |
822 | int _static_assert[(x) ? 0 : -1]; \ | |
823 | } _static_assert_ ## line __attribute__((unavailable)) | |
824 | #endif | |
825 | ||
826 | #define countof(arr) (sizeof(arr) / sizeof((arr)[0])) | |
827 | ||
828 | ||
829 | static __inline uint32_t _objc_strhash(const char *s) { | |
830 | uint32_t hash = 0; | |
831 | for (;;) { | |
832 | int a = *s++; | |
833 | if (0 == a) break; | |
834 | hash += (hash << 8) + a; | |
835 | } | |
836 | return hash; | |
837 | } | |
838 | ||
839 | #if __cplusplus | |
840 | ||
841 | template <typename T> | |
842 | static inline T log2u(T x) { | |
843 | return (x<2) ? 0 : log2u(x>>1)+1; | |
844 | } | |
845 | ||
846 | template <typename T> | |
847 | static inline T exp2u(T x) { | |
848 | return (1 << x); | |
849 | } | |
850 | ||
851 | template <typename T> | |
852 | static T exp2m1u(T x) { | |
853 | return (1 << x) - 1; | |
854 | } | |
855 | ||
856 | #endif | |
857 | ||
858 | // Misalignment-safe integer types | |
859 | __attribute__((aligned(1))) typedef uintptr_t unaligned_uintptr_t; | |
860 | __attribute__((aligned(1))) typedef intptr_t unaligned_intptr_t; | |
861 | __attribute__((aligned(1))) typedef uint64_t unaligned_uint64_t; | |
862 | __attribute__((aligned(1))) typedef int64_t unaligned_int64_t; | |
863 | __attribute__((aligned(1))) typedef uint32_t unaligned_uint32_t; | |
864 | __attribute__((aligned(1))) typedef int32_t unaligned_int32_t; | |
865 | __attribute__((aligned(1))) typedef uint16_t unaligned_uint16_t; | |
866 | __attribute__((aligned(1))) typedef int16_t unaligned_int16_t; | |
867 | ||
868 | ||
869 | // Global operator new and delete. We must not use any app overrides. | |
870 | // This ALSO REQUIRES each of these be in libobjc's unexported symbol list. | |
871 | #if __cplusplus && !defined(TEST_OVERRIDES_NEW) | |
872 | #pragma clang diagnostic push | |
873 | #pragma clang diagnostic ignored "-Winline-new-delete" | |
874 | #include <new> | |
875 | inline void* operator new(std::size_t size) { return malloc(size); } | |
876 | inline void* operator new[](std::size_t size) { return malloc(size); } | |
877 | inline void* operator new(std::size_t size, const std::nothrow_t&) noexcept(true) { return malloc(size); } | |
878 | inline void* operator new[](std::size_t size, const std::nothrow_t&) noexcept(true) { return malloc(size); } | |
879 | inline void operator delete(void* p) noexcept(true) { free(p); } | |
880 | inline void operator delete[](void* p) noexcept(true) { free(p); } | |
881 | inline void operator delete(void* p, const std::nothrow_t&) noexcept(true) { free(p); } | |
882 | inline void operator delete[](void* p, const std::nothrow_t&) noexcept(true) { free(p); } | |
883 | #pragma clang diagnostic pop | |
884 | #endif | |
885 | ||
886 | ||
887 | class TimeLogger { | |
888 | uint64_t mStart; | |
889 | bool mRecord; | |
890 | public: | |
891 | TimeLogger(bool record = true) | |
892 | : mStart(nanoseconds()) | |
893 | , mRecord(record) | |
894 | { } | |
895 | ||
896 | void log(const char *msg) { | |
897 | if (mRecord) { | |
898 | uint64_t end = nanoseconds(); | |
899 | _objc_inform("%.2f ms: %s", (end - mStart) / 1000000.0, msg); | |
900 | mStart = nanoseconds(); | |
901 | } | |
902 | } | |
903 | }; | |
904 | ||
905 | enum { CacheLineSize = 64 }; | |
906 | ||
907 | // StripedMap<T> is a map of void* -> T, sized appropriately | |
908 | // for cache-friendly lock striping. | |
909 | // For example, this may be used as StripedMap<spinlock_t> | |
910 | // or as StripedMap<SomeStruct> where SomeStruct stores a spin lock. | |
911 | template<typename T> | |
912 | class StripedMap { | |
913 | #if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR | |
914 | enum { StripeCount = 8 }; | |
915 | #else | |
916 | enum { StripeCount = 64 }; | |
917 | #endif | |
918 | ||
919 | struct PaddedT { | |
920 | T value alignas(CacheLineSize); | |
921 | }; | |
922 | ||
923 | PaddedT array[StripeCount]; | |
924 | ||
925 | static unsigned int indexForPointer(const void *p) { | |
926 | uintptr_t addr = reinterpret_cast<uintptr_t>(p); | |
927 | return ((addr >> 4) ^ (addr >> 9)) % StripeCount; | |
928 | } | |
929 | ||
930 | public: | |
931 | T& operator[] (const void *p) { | |
932 | return array[indexForPointer(p)].value; | |
933 | } | |
934 | const T& operator[] (const void *p) const { | |
935 | return const_cast<StripedMap<T>>(this)[p]; | |
936 | } | |
937 | ||
938 | // Shortcuts for StripedMaps of locks. | |
939 | void lockAll() { | |
940 | for (unsigned int i = 0; i < StripeCount; i++) { | |
941 | array[i].value.lock(); | |
942 | } | |
943 | } | |
944 | ||
945 | void unlockAll() { | |
946 | for (unsigned int i = 0; i < StripeCount; i++) { | |
947 | array[i].value.unlock(); | |
948 | } | |
949 | } | |
950 | ||
951 | void forceResetAll() { | |
952 | for (unsigned int i = 0; i < StripeCount; i++) { | |
953 | array[i].value.forceReset(); | |
954 | } | |
955 | } | |
956 | ||
957 | void defineLockOrder() { | |
958 | for (unsigned int i = 1; i < StripeCount; i++) { | |
959 | lockdebug_lock_precedes_lock(&array[i-1].value, &array[i].value); | |
960 | } | |
961 | } | |
962 | ||
963 | void precedeLock(const void *newlock) { | |
964 | // assumes defineLockOrder is also called | |
965 | lockdebug_lock_precedes_lock(&array[StripeCount-1].value, newlock); | |
966 | } | |
967 | ||
968 | void succeedLock(const void *oldlock) { | |
969 | // assumes defineLockOrder is also called | |
970 | lockdebug_lock_precedes_lock(oldlock, &array[0].value); | |
971 | } | |
972 | ||
973 | const void *getLock(int i) { | |
974 | if (i < StripeCount) return &array[i].value; | |
975 | else return nil; | |
976 | } | |
977 | ||
978 | #if DEBUG | |
979 | StripedMap() { | |
980 | // Verify alignment expectations. | |
981 | uintptr_t base = (uintptr_t)&array[0].value; | |
982 | uintptr_t delta = (uintptr_t)&array[1].value - base; | |
983 | ASSERT(delta % CacheLineSize == 0); | |
984 | ASSERT(base % CacheLineSize == 0); | |
985 | } | |
986 | #else | |
987 | constexpr StripedMap() {} | |
988 | #endif | |
989 | }; | |
990 | ||
991 | ||
992 | // DisguisedPtr<T> acts like pointer type T*, except the | |
993 | // stored value is disguised to hide it from tools like `leaks`. | |
994 | // nil is disguised as itself so zero-filled memory works as expected, | |
995 | // which means 0x80..00 is also disguised as itself but we don't care. | |
996 | // Note that weak_entry_t knows about this encoding. | |
997 | template <typename T> | |
998 | class DisguisedPtr { | |
999 | uintptr_t value; | |
1000 | ||
1001 | static uintptr_t disguise(T* ptr) { | |
1002 | return -(uintptr_t)ptr; | |
1003 | } | |
1004 | ||
1005 | static T* undisguise(uintptr_t val) { | |
1006 | return (T*)-val; | |
1007 | } | |
1008 | ||
1009 | public: | |
1010 | DisguisedPtr() { } | |
1011 | DisguisedPtr(T* ptr) | |
1012 | : value(disguise(ptr)) { } | |
1013 | DisguisedPtr(const DisguisedPtr<T>& ptr) | |
1014 | : value(ptr.value) { } | |
1015 | ||
1016 | DisguisedPtr<T>& operator = (T* rhs) { | |
1017 | value = disguise(rhs); | |
1018 | return *this; | |
1019 | } | |
1020 | DisguisedPtr<T>& operator = (const DisguisedPtr<T>& rhs) { | |
1021 | value = rhs.value; | |
1022 | return *this; | |
1023 | } | |
1024 | ||
1025 | operator T* () const { | |
1026 | return undisguise(value); | |
1027 | } | |
1028 | T* operator -> () const { | |
1029 | return undisguise(value); | |
1030 | } | |
1031 | T& operator * () const { | |
1032 | return *undisguise(value); | |
1033 | } | |
1034 | T& operator [] (size_t i) const { | |
1035 | return undisguise(value)[i]; | |
1036 | } | |
1037 | ||
1038 | // pointer arithmetic operators omitted | |
1039 | // because we don't currently use them anywhere | |
1040 | }; | |
1041 | ||
1042 | // fixme type id is weird and not identical to objc_object* | |
1043 | static inline bool operator == (DisguisedPtr<objc_object> lhs, id rhs) { | |
1044 | return lhs == (objc_object *)rhs; | |
1045 | } | |
1046 | static inline bool operator != (DisguisedPtr<objc_object> lhs, id rhs) { | |
1047 | return lhs != (objc_object *)rhs; | |
1048 | } | |
1049 | ||
1050 | ||
1051 | // Storage for a thread-safe chained hook function. | |
1052 | // get() returns the value for calling. | |
1053 | // set() installs a new function and returns the old one for chaining. | |
1054 | // More precisely, set() writes the old value to a variable supplied by | |
1055 | // the caller. get() and set() use appropriate barriers so that the | |
1056 | // old value is safely written to the variable before the new value is | |
1057 | // called to use it. | |
1058 | // | |
1059 | // T1: store to old variable; store-release to hook variable | |
1060 | // T2: load-acquire from hook variable; call it; called hook loads old variable | |
1061 | ||
1062 | template <typename Fn> | |
1063 | class ChainedHookFunction { | |
1064 | std::atomic<Fn> hook{nil}; | |
1065 | ||
1066 | public: | |
1067 | constexpr ChainedHookFunction(Fn f) : hook{f} { }; | |
1068 | ||
1069 | Fn get() { | |
1070 | return hook.load(std::memory_order_acquire); | |
1071 | } | |
1072 | ||
1073 | void set(Fn newValue, Fn *oldVariable) | |
1074 | { | |
1075 | Fn oldValue = hook.load(std::memory_order_relaxed); | |
1076 | do { | |
1077 | *oldVariable = oldValue; | |
1078 | } while (!hook.compare_exchange_weak(oldValue, newValue, | |
1079 | std::memory_order_release, | |
1080 | std::memory_order_relaxed)); | |
1081 | } | |
1082 | }; | |
1083 | ||
1084 | ||
1085 | // A small vector for use as a global variable. Only supports appending and | |
1086 | // iteration. Stores up to N elements inline, and multiple elements in a heap | |
1087 | // allocation. There is no attempt to amortize reallocation cost; this is | |
1088 | // intended to be used in situation where a small number of elements is | |
1089 | // common, more might happen, and significantly more is very rare. | |
1090 | // | |
1091 | // This does not clean up its allocation, and thus cannot be used as a local | |
1092 | // variable or member of something with limited lifetime. | |
1093 | ||
1094 | template <typename T, unsigned InlineCount> | |
1095 | class GlobalSmallVector { | |
1096 | static_assert(std::is_pod<T>::value, "SmallVector requires POD types"); | |
1097 | ||
1098 | protected: | |
1099 | unsigned count{0}; | |
1100 | union { | |
1101 | T inlineElements[InlineCount]; | |
1102 | T *elements{nullptr}; | |
1103 | }; | |
1104 | ||
1105 | public: | |
1106 | void append(const T &val) { | |
1107 | if (count < InlineCount) { | |
1108 | // We have space. Store the new value inline. | |
1109 | inlineElements[count] = val; | |
1110 | } else if (count == InlineCount) { | |
1111 | // Inline storage is full. Switch to a heap allocation. | |
1112 | T *newElements = (T *)malloc((count + 1) * sizeof(T)); | |
1113 | memcpy(newElements, inlineElements, count * sizeof(T)); | |
1114 | newElements[count] = val; | |
1115 | elements = newElements; | |
1116 | } else { | |
1117 | // Resize the heap allocation and append. | |
1118 | elements = (T *)realloc(elements, (count + 1) * sizeof(T)); | |
1119 | elements[count] = val; | |
1120 | } | |
1121 | count++; | |
1122 | } | |
1123 | ||
1124 | const T *begin() const { | |
1125 | return count <= InlineCount ? inlineElements : elements; | |
1126 | } | |
1127 | ||
1128 | const T *end() const { | |
1129 | return begin() + count; | |
1130 | } | |
1131 | }; | |
1132 | ||
1133 | // A small vector that cleans up its internal memory allocation when destroyed. | |
1134 | template <typename T, unsigned InlineCount> | |
1135 | class SmallVector: public GlobalSmallVector<T, InlineCount> { | |
1136 | public: | |
1137 | ~SmallVector() { | |
1138 | if (this->count > InlineCount) | |
1139 | free(this->elements); | |
1140 | } | |
1141 | ||
1142 | template <unsigned OtherCount> | |
1143 | void initFrom(const GlobalSmallVector<T, OtherCount> &other) { | |
1144 | ASSERT(this->count == 0); | |
1145 | this->count = (unsigned)(other.end() - other.begin()); | |
1146 | if (this->count > InlineCount) { | |
1147 | this->elements = (T *)memdup(other.begin(), this->count * sizeof(T)); | |
1148 | } else { | |
1149 | memcpy(this->inlineElements, other.begin(), this->count * sizeof(T)); | |
1150 | } | |
1151 | } | |
1152 | }; | |
1153 | ||
1154 | // Pointer hash function. | |
1155 | // This is not a terrific hash, but it is fast | |
1156 | // and not outrageously flawed for our purposes. | |
1157 | ||
1158 | // Based on principles from http://locklessinc.com/articles/fast_hash/ | |
1159 | // and evaluation ideas from http://floodyberry.com/noncryptohashzoo/ | |
1160 | #if __LP64__ | |
1161 | static inline uint32_t ptr_hash(uint64_t key) | |
1162 | { | |
1163 | key ^= key >> 4; | |
1164 | key *= 0x8a970be7488fda55; | |
1165 | key ^= __builtin_bswap64(key); | |
1166 | return (uint32_t)key; | |
1167 | } | |
1168 | #else | |
1169 | static inline uint32_t ptr_hash(uint32_t key) | |
1170 | { | |
1171 | key ^= key >> 4; | |
1172 | key *= 0x5052acdb; | |
1173 | key ^= __builtin_bswap32(key); | |
1174 | return key; | |
1175 | } | |
1176 | #endif | |
1177 | ||
1178 | /* | |
1179 | Higher-quality hash function. This is measurably slower in some workloads. | |
1180 | #if __LP64__ | |
1181 | uint32_t ptr_hash(uint64_t key) | |
1182 | { | |
1183 | key -= __builtin_bswap64(key); | |
1184 | key *= 0x8a970be7488fda55; | |
1185 | key ^= __builtin_bswap64(key); | |
1186 | key *= 0x8a970be7488fda55; | |
1187 | key ^= __builtin_bswap64(key); | |
1188 | return (uint32_t)key; | |
1189 | } | |
1190 | #else | |
1191 | static uint32_t ptr_hash(uint32_t key) | |
1192 | { | |
1193 | key -= __builtin_bswap32(key); | |
1194 | key *= 0x5052acdb; | |
1195 | key ^= __builtin_bswap32(key); | |
1196 | key *= 0x5052acdb; | |
1197 | key ^= __builtin_bswap32(key); | |
1198 | return key; | |
1199 | } | |
1200 | #endif | |
1201 | */ | |
1202 | ||
1203 | ||
1204 | ||
1205 | // Lock declarations | |
1206 | #include "objc-locks.h" | |
1207 | ||
1208 | // Inlined parts of objc_object's implementation | |
1209 | #include "objc-object.h" | |
1210 | ||
1211 | #endif /* _OBJC_PRIVATE_H_ */ | |
1212 |