2 * Copyright (c) 2017 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
27 #include <sys/sysctl.h>
28 #include <mach/mach_time.h> // mach_absolute_time()
29 #include <pthread/pthread.h>
30 #include <libkern/OSAtomic.h>
35 #include "AllImages.h"
36 #include "MachOParser.h"
37 #include "libdyldEntryVector.h"
41 #include "LaunchCache.h"
42 #include "DyldSharedCache.h"
43 #include "PathOverrides.h"
44 #include "DyldCacheParser.h"
46 extern const char** appleParams
;
48 // should be a header for these
49 struct __cxa_range_t
{
53 extern "C" void __cxa_finalize_ranges(const __cxa_range_t ranges
[], unsigned int count
);
55 VIS_HIDDEN
bool gUseDyld3
= false;
60 class VIS_HIDDEN LoadedImage
{
62 enum class State
{ uninited
=3, beingInited
=2, inited
=0 };
63 typedef launch_cache::binary_format::Image BinaryImage
;
65 LoadedImage(const mach_header
* mh
, const BinaryImage
* bi
);
66 bool operator==(const LoadedImage
& rhs
) const;
67 void init(const mach_header
* mh
, const BinaryImage
* bi
);
68 const mach_header
* loadedAddress() const { return (mach_header
*)((uintptr_t)_loadAddress
& ~0x7ULL
); }
69 State
state() const { return (State
)((uintptr_t)_loadAddress
& 0x3ULL
); }
70 const BinaryImage
* image() const { return _image
; }
71 bool neverUnload() const { return ((uintptr_t)_loadAddress
& 0x4ULL
); }
72 void setState(State s
) { _loadAddress
= (mach_header
*)((((uintptr_t)_loadAddress
) & ~0x3ULL
) | (uintptr_t)s
); }
73 void setNeverUnload() { _loadAddress
= (mach_header
*)(((uintptr_t)_loadAddress
) | 0x4ULL
); }
76 const mach_header
* _loadAddress
; // low bits: bit2=neverUnload, bit1/bit0 contain State
77 const BinaryImage
* _image
;
81 bool LoadedImage::operator==(const LoadedImage
& rhs
) const
83 return (_image
== rhs
._image
) && (loadedAddress() == rhs
.loadedAddress());
88 struct VIS_HIDDEN DlopenCount
{
89 bool operator==(const DlopenCount
& rhs
) const;
90 const mach_header
* loadAddress
;
94 bool DlopenCount::operator==(const DlopenCount
& rhs
) const
96 return (loadAddress
== rhs
.loadAddress
) && (refCount
== rhs
.refCount
);
99 LoadedImage::LoadedImage(const mach_header
* mh
, const BinaryImage
* bi
)
100 : _loadAddress(mh
), _image(bi
)
102 assert(loadedAddress() == mh
);
103 setState(State::uninited
);
106 void LoadedImage::init(const mach_header
* mh
, const BinaryImage
* bi
)
110 assert(loadedAddress() == mh
);
111 setState(State::uninited
);
115 template <typename T
, int C
> class ReaderWriterChunkedVector
;
117 template <typename T
, int C
>
118 class VIS_HIDDEN ChunkedVector
{
120 static ChunkedVector
<T
,C
>* make(uint32_t count
);
122 void forEach(uint32_t& startIndex
, bool& outerStop
, void (^callback
)(uint32_t index
, const T
& value
, bool& stop
)) const;
123 void forEach(uint32_t& startIndex
, bool& outerStop
, void (^callback
)(uint32_t index
, T
& value
, bool& stop
));
124 T
* add(const T
& value
);
125 T
* add(uint32_t count
, const T values
[]);
126 void remove(uint32_t index
);
127 uint32_t count() const { return _inUseCount
; }
128 uint32_t freeCount() const { return _allocCount
- _inUseCount
; }
130 T
& element(uint32_t index
) { return ((T
*)_elements
)[index
]; }
131 const T
& element(uint32_t index
) const { return ((T
*)_elements
)[index
]; }
133 friend class ReaderWriterChunkedVector
<T
,C
>;
135 ChunkedVector
<T
,C
>* _next
= nullptr;
136 uint32_t _allocCount
= C
;
137 uint32_t _inUseCount
= 0;
138 uint8_t _elements
[C
*sizeof(T
)] = { 0 };
141 template <typename T
, int C
>
142 class VIS_HIDDEN ReaderWriterChunkedVector
{
144 T
* add(uint32_t count
, const T values
[]);
145 T
* add(const T
& value
) { return add(1, &value
); }
146 T
* addNoLock(uint32_t count
, const T values
[]);
147 T
* addNoLock(const T
& value
) { return addNoLock(1, &value
); }
148 void remove(const T
& value
);
149 uint32_t count() const;
150 void forEachWithReadLock(void (^callback
)(uint32_t index
, const T
& value
, bool& stop
)) const;
151 void forEachWithWriteLock(void (^callback
)(uint32_t index
, T
& value
, bool& stop
));
152 void forEachNoLock(void (^callback
)(uint32_t index
, const T
& value
, bool& stop
)) const;
153 T
& operator[](size_t index
);
154 uint32_t countNoLock() const;
156 void withReadLock(void (^withLock
)()) const;
157 void withWriteLock(void (^withLock
)()) const;
158 void acquireWriteLock();
159 void releaseWriteLock();
160 void dump(void (^callback
)(const T
& value
)) const;
163 mutable pthread_rwlock_t _lock
= PTHREAD_RWLOCK_INITIALIZER
;
164 ChunkedVector
<T
,C
> _firstChunk
;
168 typedef void (*NotifyFunc
)(const mach_header
* mh
, intptr_t slide
);
170 static ReaderWriterChunkedVector
<NotifyFunc
, 4> sLoadNotifiers
;
171 static ReaderWriterChunkedVector
<NotifyFunc
, 4> sUnloadNotifiers
;
172 static ReaderWriterChunkedVector
<LoadedImage
, 4> sLoadedImages
;
173 static ReaderWriterChunkedVector
<DlopenCount
, 4> sDlopenRefCounts
;
174 static ReaderWriterChunkedVector
<const launch_cache::BinaryImageGroupData
*, 4> sKnownGroups
;
175 #if __MAC_OS_X_VERSION_MIN_REQUIRED
176 static ReaderWriterChunkedVector
<__NSObjectFileImage
, 2> sNSObjectFileImages
;
180 ///////////////////// ChunkedVector ////////////////////////////
182 template <typename T
, int C
>
183 ChunkedVector
<T
,C
>* ChunkedVector
<T
,C
>::make(uint32_t count
)
185 size_t size
= sizeof(ChunkedVector
) + sizeof(T
) * (count
-C
);
186 ChunkedVector
<T
,C
>* result
= (ChunkedVector
<T
,C
>*)malloc(size
);
187 result
->_next
= nullptr;
188 result
->_allocCount
= count
;
189 result
->_inUseCount
= 0;
193 template <typename T
, int C
>
194 void ChunkedVector
<T
,C
>::forEach(uint32_t& outerIndex
, bool& outerStop
, void (^callback
)(uint32_t index
, const T
& value
, bool& stop
)) const
196 for (uint32_t i
=0; i
< _inUseCount
; ++i
) {
197 callback(outerIndex
, element(i
), outerStop
);
204 template <typename T
, int C
>
205 void ChunkedVector
<T
,C
>::forEach(uint32_t& outerIndex
, bool& outerStop
, void (^callback
)(uint32_t index
, T
& value
, bool& stop
))
207 for (uint32_t i
=0; i
< _inUseCount
; ++i
) {
208 callback(outerIndex
, element(i
), outerStop
);
215 template <typename T
, int C
>
216 T
* ChunkedVector
<T
,C
>::add(const T
& value
)
218 return add(1, &value
);
221 template <typename T
, int C
>
222 T
* ChunkedVector
<T
,C
>::add(uint32_t count
, const T values
[])
224 assert(count
<= (_allocCount
- _inUseCount
));
225 T
* result
= &element(_inUseCount
);
226 memmove(result
, values
, sizeof(T
)*count
);
227 _inUseCount
+= count
;
231 template <typename T
, int C
>
232 void ChunkedVector
<T
,C
>::remove(uint32_t index
)
234 assert(index
< _inUseCount
);
235 int moveCount
= _inUseCount
- index
- 1;
236 if ( moveCount
>= 1 ) {
237 memmove(&element(index
), &element(index
+1), sizeof(T
)*moveCount
);
243 ///////////////////// ReaderWriterChunkedVector ////////////////////////////
247 template <typename T
, int C
>
248 void ReaderWriterChunkedVector
<T
,C
>::withReadLock(void (^work
)()) const
250 assert(pthread_rwlock_rdlock(&_lock
) == 0);
252 assert(pthread_rwlock_unlock(&_lock
) == 0);
255 template <typename T
, int C
>
256 void ReaderWriterChunkedVector
<T
,C
>::withWriteLock(void (^work
)()) const
258 assert(pthread_rwlock_wrlock(&_lock
) == 0);
260 assert(pthread_rwlock_unlock(&_lock
) == 0);
263 template <typename T
, int C
>
264 void ReaderWriterChunkedVector
<T
,C
>::acquireWriteLock()
266 assert(pthread_rwlock_wrlock(&_lock
) == 0);
269 template <typename T
, int C
>
270 void ReaderWriterChunkedVector
<T
,C
>::releaseWriteLock()
272 assert(pthread_rwlock_unlock(&_lock
) == 0);
275 template <typename T
, int C
>
276 uint32_t ReaderWriterChunkedVector
<T
,C
>::count() const
278 __block
uint32_t result
= 0;
280 for (const ChunkedVector
<T
,C
>* chunk
= &_firstChunk
; chunk
!= nullptr; chunk
= chunk
->_next
) {
281 result
+= chunk
->count();
287 template <typename T
, int C
>
288 uint32_t ReaderWriterChunkedVector
<T
,C
>::countNoLock() const
291 for (const ChunkedVector
<T
,C
>* chunk
= &_firstChunk
; chunk
!= nullptr; chunk
= chunk
->_next
) {
292 result
+= chunk
->count();
297 template <typename T
, int C
>
298 T
* ReaderWriterChunkedVector
<T
,C
>::addNoLock(uint32_t count
, const T values
[])
301 ChunkedVector
<T
,C
>* lastChunk
= &_firstChunk
;
302 while ( lastChunk
->_next
!= nullptr )
303 lastChunk
= lastChunk
->_next
;
305 if ( lastChunk
->freeCount() >= count
) {
306 // append to last chunk
307 result
= lastChunk
->add(count
, values
);
311 uint32_t allocCount
= count
;
312 uint32_t remainder
= count
% C
;
313 if ( remainder
!= 0 )
314 allocCount
= count
+ C
- remainder
;
315 ChunkedVector
<T
,C
>* newChunk
= ChunkedVector
<T
,C
>::make(allocCount
);
316 result
= newChunk
->add(count
, values
);
317 lastChunk
->_next
= newChunk
;
323 template <typename T
, int C
>
324 T
* ReaderWriterChunkedVector
<T
,C
>::add(uint32_t count
, const T values
[])
326 __block T
* result
= nullptr;
328 result
= addNoLock(count
, values
);
333 template <typename T
, int C
>
334 void ReaderWriterChunkedVector
<T
,C
>::remove(const T
& valueToRemove
)
336 __block
bool stopStorage
= false;
338 ChunkedVector
<T
,C
>* chunkNowEmpty
= nullptr;
339 __block
uint32_t indexStorage
= 0;
340 __block
bool found
= false;
341 for (ChunkedVector
<T
,C
>* chunk
= &_firstChunk
; chunk
!= nullptr; chunk
= chunk
->_next
) {
342 uint32_t chunkStartIndex
= indexStorage
;
343 __block
uint32_t foundIndex
= 0;
344 chunk
->forEach(indexStorage
, stopStorage
, ^(uint32_t index
, const T
& value
, bool& stop
) {
345 if ( value
== valueToRemove
) {
346 foundIndex
= index
- chunkStartIndex
;
352 chunk
->remove(foundIndex
);
354 if ( chunk
->count() == 0 )
355 chunkNowEmpty
= chunk
;
358 // if chunk is now empty, remove from linked list and free
359 if ( chunkNowEmpty
) {
360 for (ChunkedVector
<T
,C
>* chunk
= &_firstChunk
; chunk
!= nullptr; chunk
= chunk
->_next
) {
361 if ( chunk
->_next
== chunkNowEmpty
) {
362 chunk
->_next
= chunkNowEmpty
->_next
;
363 if ( chunkNowEmpty
!= &_firstChunk
)
372 template <typename T
, int C
>
373 void ReaderWriterChunkedVector
<T
,C
>::forEachWithReadLock(void (^callback
)(uint32_t index
, const T
& value
, bool& stop
)) const
375 __block
uint32_t index
= 0;
376 __block
bool stop
= false;
378 for (const ChunkedVector
<T
,C
>* chunk
= &_firstChunk
; chunk
!= nullptr; chunk
= chunk
->_next
) {
379 chunk
->forEach(index
, stop
, callback
);
386 template <typename T
, int C
>
387 void ReaderWriterChunkedVector
<T
,C
>::forEachWithWriteLock(void (^callback
)(uint32_t index
, T
& value
, bool& stop
))
389 __block
uint32_t index
= 0;
390 __block
bool stop
= false;
392 for (ChunkedVector
<T
,C
>* chunk
= &_firstChunk
; chunk
!= nullptr; chunk
= chunk
->_next
) {
393 chunk
->forEach(index
, stop
, callback
);
400 template <typename T
, int C
>
401 void ReaderWriterChunkedVector
<T
,C
>::forEachNoLock(void (^callback
)(uint32_t index
, const T
& value
, bool& stop
)) const
405 for (const ChunkedVector
<T
,C
>* chunk
= &_firstChunk
; chunk
!= nullptr; chunk
= chunk
->_next
) {
406 chunk
->forEach(index
, stop
, callback
);
412 template <typename T
, int C
>
413 T
& ReaderWriterChunkedVector
<T
,C
>::operator[](size_t targetIndex
)
415 __block T
* result
= nullptr;
416 forEachNoLock(^(uint32_t index
, T
const& value
, bool& stop
) {
417 if ( index
== targetIndex
) {
425 template <typename T
, int C
>
426 void ReaderWriterChunkedVector
<T
,C
>::dump(void (^callback
)(const T
& value
)) const
428 log("dump ReaderWriterChunkedVector at %p\n", this);
429 __block
uint32_t index
= 0;
430 __block
bool stop
= false;
432 for (const ChunkedVector
<T
,C
>* chunk
= &_firstChunk
; chunk
!= nullptr; chunk
= chunk
->_next
) {
433 log(" chunk at %p\n", chunk
);
434 chunk
->forEach(index
, stop
, ^(uint32_t i
, const T
& value
, bool& s
) {
443 ///////////////////// AllImages ////////////////////////////
446 AllImages gAllImages
;
450 void AllImages::init(const BinaryClosure
* closure
, const void* dyldCacheLoadAddress
, const char* dyldCachePath
,
451 const dyld3::launch_cache::DynArray
<loader::ImageInfo
>& initialImages
)
453 _mainClosure
= closure
;
454 _initialImages
= &initialImages
;
455 _dyldCacheAddress
= dyldCacheLoadAddress
;
456 _dyldCachePath
= dyldCachePath
;
458 if ( _dyldCacheAddress
) {
459 const DyldSharedCache
* cache
= (DyldSharedCache
*)_dyldCacheAddress
;
460 const dyld_cache_mapping_info
* const fileMappings
= (dyld_cache_mapping_info
*)((uint64_t)_dyldCacheAddress
+ cache
->header
.mappingOffset
);
461 _dyldCacheSlide
= (uint64_t)dyldCacheLoadAddress
- fileMappings
[0].address
;
464 // Make temporary old image array, so libSystem initializers can be debugged
465 uint32_t count
= (uint32_t)initialImages
.count();
466 dyld_image_info oldDyldInfo
[count
];
467 for (int i
=0; i
< count
; ++i
) {
468 launch_cache::Image
img(initialImages
[i
].imageData
);
469 oldDyldInfo
[i
].imageLoadAddress
= initialImages
[i
].loadAddress
;
470 oldDyldInfo
[i
].imageFilePath
= img
.path();
471 oldDyldInfo
[i
].imageFileModDate
= 0;
473 _oldAllImageInfos
->infoArray
= oldDyldInfo
;
474 _oldAllImageInfos
->infoArrayCount
= count
;
475 _oldAllImageInfos
->notification(dyld_image_adding
, count
, oldDyldInfo
);
476 _oldAllImageInfos
->infoArray
= nullptr;
477 _oldAllImageInfos
->infoArrayCount
= 0;
480 void AllImages::setProgramVars(ProgramVars
* vars
)
485 void AllImages::applyInitialImages()
487 addImages(*_initialImages
);
488 _initialImages
= nullptr; // this was stack allocated
491 void AllImages::mirrorToOldAllImageInfos()
493 // set infoArray to NULL to denote it is in-use
494 _oldAllImageInfos
->infoArray
= nullptr;
496 // if array not large enough, re-alloc it
497 uint32_t imageCount
= sLoadedImages
.countNoLock();
498 if ( _oldArrayAllocCount
< imageCount
) {
499 uint32_t newAllocCount
= imageCount
+ 16;
500 dyld_image_info
* newArray
= (dyld_image_info
*)malloc(sizeof(dyld_image_info
)*newAllocCount
);
501 if ( _oldAllImageArray
!= nullptr ) {
502 memcpy(newArray
, _oldAllImageArray
, sizeof(dyld_image_info
)*_oldAllImageInfos
->infoArrayCount
);
503 free(_oldAllImageArray
);
505 _oldAllImageArray
= newArray
;
506 _oldArrayAllocCount
= newAllocCount
;
509 // fill out array to mirror current image list
510 sLoadedImages
.forEachNoLock(^(uint32_t index
, const LoadedImage
& loadedImage
, bool& stop
) {
511 launch_cache::Image
img(loadedImage
.image());
512 _oldAllImageArray
[index
].imageLoadAddress
= loadedImage
.loadedAddress();
513 _oldAllImageArray
[index
].imageFilePath
= imagePath(loadedImage
.image());
514 _oldAllImageArray
[index
].imageFileModDate
= 0;
517 // set infoArray back to base address of array (so other process can now read)
518 _oldAllImageInfos
->infoArrayCount
= imageCount
;
519 _oldAllImageInfos
->infoArrayChangeTimestamp
= mach_absolute_time();
520 _oldAllImageInfos
->infoArray
= _oldAllImageArray
;
523 void AllImages::addImages(const launch_cache::DynArray
<loader::ImageInfo
>& newImages
)
525 uint32_t count
= (uint32_t)newImages
.count();
528 // build stack array of LoadedImage to copy into sLoadedImages
529 STACK_ALLOC_DYNARRAY(LoadedImage
, count
, loadedImagesArray
);
530 for (uint32_t i
=0; i
< count
; ++i
) {
531 loadedImagesArray
[i
].init(newImages
[i
].loadAddress
, newImages
[i
].imageData
);
532 if (newImages
[i
].neverUnload
)
533 loadedImagesArray
[i
].setNeverUnload();
535 sLoadedImages
.add(count
, &loadedImagesArray
[0]);
537 if ( _oldAllImageInfos
!= nullptr ) {
538 // sync to old all image infos struct
539 if ( _initialImages
!= nullptr ) {
540 // libSystem not initialized yet, don't use locks
541 mirrorToOldAllImageInfos();
544 sLoadedImages
.withReadLock(^{
545 mirrorToOldAllImageInfos();
549 // tell debugger about new images
550 dyld_image_info oldDyldInfo
[count
];
551 for (int i
=0; i
< count
; ++i
) {
552 launch_cache::Image
img(newImages
[i
].imageData
);
553 oldDyldInfo
[i
].imageLoadAddress
= newImages
[i
].loadAddress
;
554 oldDyldInfo
[i
].imageFilePath
= imagePath(newImages
[i
].imageData
);
555 oldDyldInfo
[i
].imageFileModDate
= 0;
557 _oldAllImageInfos
->notification(dyld_image_adding
, count
, oldDyldInfo
);
561 for (int i
=0; i
< count
; ++i
) {
562 launch_cache::Image
img(newImages
[i
].imageData
);
563 log_loads("dyld: %s\n", imagePath(newImages
[i
].imageData
));
566 #if !TARGET_IPHONE_SIMULATOR
567 // call kdebug trace for each image
568 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD
, DBG_DYLD_UUID
, DBG_DYLD_UUID_MAP_A
))) {
569 for (uint32_t i
=0; i
< count
; ++i
) {
570 launch_cache::Image
img(newImages
[i
].imageData
);
571 struct stat stat_buf
;
572 fsid_t fsid
= {{ 0, 0 }};
573 fsobj_id_t fsobjid
= { 0, 0 };
574 if (img
.isDiskImage() && stat(imagePath(newImages
[i
].imageData
), &stat_buf
) == 0 ) {
575 fsobjid
= *(fsobj_id_t
*)&stat_buf
.st_ino
;
576 fsid
= {{ stat_buf
.st_dev
, 0 }};
578 kdebug_trace_dyld_image(DBG_DYLD_UUID_MAP_A
, img
.uuid(), fsobjid
, fsid
, newImages
[i
].loadAddress
);
582 // call each _dyld_register_func_for_add_image function with each image
583 const uint32_t existingNotifierCount
= sLoadNotifiers
.count();
584 NotifyFunc existingNotifiers
[existingNotifierCount
];
585 NotifyFunc
* existingNotifierArray
= existingNotifiers
;
586 sLoadNotifiers
.forEachWithReadLock(^(uint32_t index
, const NotifyFunc
& func
, bool& stop
) {
587 if ( index
< existingNotifierCount
)
588 existingNotifierArray
[index
] = func
;
590 // we don't want to hold lock while calling out, so prebuild array (with lock) then do calls on that array (without lock)
591 for (uint32_t j
=0; j
< existingNotifierCount
; ++j
) {
592 NotifyFunc func
= existingNotifierArray
[j
];
593 for (uint32_t i
=0; i
< count
; ++i
) {
594 log_notifications("dyld: add notifier %p called with mh=%p\n", func
, newImages
[i
].loadAddress
);
595 if (newImages
[i
].justUsedFromDyldCache
) {
596 func(newImages
[i
].loadAddress
, _dyldCacheSlide
);
598 MachOParser
parser(newImages
[i
].loadAddress
);
599 func(newImages
[i
].loadAddress
, parser
.getSlide());
604 // call objc about images that use objc
605 if ( _objcNotifyMapped
!= nullptr ) {
606 const char* pathsBuffer
[count
];
607 const mach_header
* mhBuffer
[count
];
608 uint32_t imagesWithObjC
= 0;
609 for (uint32_t i
=0; i
< count
; ++i
) {
610 launch_cache::Image
img(newImages
[i
].imageData
);
611 if ( img
.hasObjC() ) {
612 pathsBuffer
[imagesWithObjC
] = imagePath(newImages
[i
].imageData
);
613 mhBuffer
[imagesWithObjC
] = newImages
[i
].loadAddress
;
617 if ( imagesWithObjC
!= 0 ) {
618 (*_objcNotifyMapped
)(imagesWithObjC
, pathsBuffer
, mhBuffer
);
619 if ( log_notifications("dyld: objc-mapped-notifier called with %d images:\n", imagesWithObjC
) ) {
620 for (uint32_t i
=0; i
< imagesWithObjC
; ++i
) {
621 log_notifications("dyld: objc-mapped: %p %s\n", mhBuffer
[i
], pathsBuffer
[i
]);
627 // notify any processes tracking loads in this process
628 notifyMonitorLoads(newImages
);
631 void AllImages::removeImages(const launch_cache::DynArray
<loader::ImageInfo
>& unloadImages
)
633 uint32_t count
= (uint32_t)unloadImages
.count();
636 // call each _dyld_register_func_for_remove_image function with each image
637 // do this before removing image from internal data structures so that the callback can query dyld about the image
638 const uint32_t existingNotifierCount
= sUnloadNotifiers
.count();
639 NotifyFunc existingNotifiers
[existingNotifierCount
];
640 NotifyFunc
* existingNotifierArray
= existingNotifiers
;
641 sUnloadNotifiers
.forEachWithReadLock(^(uint32_t index
, const NotifyFunc
& func
, bool& stop
) {
642 if ( index
< existingNotifierCount
)
643 existingNotifierArray
[index
] = func
;
645 // we don't want to hold lock while calling out, so prebuild array (with lock) then do calls on that array (without lock)
646 for (uint32_t j
=0; j
< existingNotifierCount
; ++j
) {
647 NotifyFunc func
= existingNotifierArray
[j
];
648 for (uint32_t i
=0; i
< count
; ++i
) {
649 MachOParser
parser(unloadImages
[i
].loadAddress
);
650 log_notifications("dyld: remove notifier %p called with mh=%p\n", func
, unloadImages
[i
].loadAddress
);
651 func(unloadImages
[i
].loadAddress
, parser
.getSlide());
655 // call objc about images going away
656 if ( _objcNotifyUnmapped
!= nullptr ) {
657 for (uint32_t i
=0; i
< count
; ++i
) {
658 launch_cache::Image
img(unloadImages
[i
].imageData
);
659 if ( img
.hasObjC() ) {
660 (*_objcNotifyUnmapped
)(imagePath(unloadImages
[i
].imageData
), unloadImages
[i
].loadAddress
);
661 log_notifications("dyld: objc-unmapped-notifier called with image %p %s\n", unloadImages
[i
].loadAddress
, imagePath(unloadImages
[i
].imageData
));
666 #if !TARGET_IPHONE_SIMULATOR
667 // call kdebug trace for each image
668 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD
, DBG_DYLD_UUID
, DBG_DYLD_UUID_MAP_A
))) {
669 for (uint32_t i
=0; i
< count
; ++i
) {
670 launch_cache::Image
img(unloadImages
[i
].imageData
);
671 struct stat stat_buf
;
672 fsid_t fsid
= {{ 0, 0 }};
673 fsobj_id_t fsobjid
= { 0, 0 };
674 if (stat(imagePath(unloadImages
[i
].imageData
), &stat_buf
) == 0 ) {
675 fsobjid
= *(fsobj_id_t
*)&stat_buf
.st_ino
;
676 fsid
= {{ stat_buf
.st_dev
, 0 }};
678 kdebug_trace_dyld_image(DBG_DYLD_UUID_UNMAP_A
, img
.uuid(), fsobjid
, fsid
, unloadImages
[i
].loadAddress
);
683 // remove each from sLoadedImages
684 for (uint32_t i
=0; i
< count
; ++i
) {
685 LoadedImage
info(unloadImages
[i
].loadAddress
, unloadImages
[i
].imageData
);
686 sLoadedImages
.remove(info
);
689 // sync to old all image infos struct
690 sLoadedImages
.withReadLock(^{
691 mirrorToOldAllImageInfos();
694 // tell debugger about removed images
695 dyld_image_info oldDyldInfo
[count
];
696 for (int i
=0; i
< count
; ++i
) {
697 launch_cache::Image
img(unloadImages
[i
].imageData
);
698 oldDyldInfo
[i
].imageLoadAddress
= unloadImages
[i
].loadAddress
;
699 oldDyldInfo
[i
].imageFilePath
= imagePath(unloadImages
[i
].imageData
);
700 oldDyldInfo
[i
].imageFileModDate
= 0;
702 _oldAllImageInfos
->notification(dyld_image_removing
, count
, oldDyldInfo
);
705 for (int i
=0; i
< count
; ++i
) {
706 launch_cache::Image
img(unloadImages
[i
].imageData
);
707 loader::unmapImage(unloadImages
[i
].imageData
, unloadImages
[i
].loadAddress
);
708 log_loads("dyld: unloaded %s\n", imagePath(unloadImages
[i
].imageData
));
711 // notify any processes tracking loads in this process
712 notifyMonitorUnloads(unloadImages
);
715 void AllImages::setNeverUnload(const loader::ImageInfo
& existingImage
)
717 sLoadedImages
.forEachWithWriteLock(^(uint32_t index
, dyld3::LoadedImage
&value
, bool &stop
) {
718 if (value
.image() == existingImage
.imageData
) {
719 value
.setNeverUnload();
725 uint32_t AllImages::count() const
727 return sLoadedImages
.count();
731 launch_cache::Image
AllImages::findByLoadOrder(uint32_t index
, const mach_header
** loadAddress
) const
733 __block
const BinaryImage
* foundImage
= nullptr;
734 sLoadedImages
.forEachWithReadLock(^(uint32_t anIndex
, const LoadedImage
& loadedImage
, bool& stop
) {
735 if ( anIndex
== index
) {
736 foundImage
= loadedImage
.image();
737 *loadAddress
= loadedImage
.loadedAddress();
741 return launch_cache::Image(foundImage
);
744 launch_cache::Image
AllImages::findByLoadAddress(const mach_header
* loadAddress
) const
746 __block
const BinaryImage
* foundImage
= nullptr;
747 sLoadedImages
.forEachWithReadLock(^(uint32_t anIndex
, const LoadedImage
& loadedImage
, bool& stop
) {
748 if ( loadedImage
.loadedAddress() == loadAddress
) {
749 foundImage
= loadedImage
.image();
753 return launch_cache::Image(foundImage
);
756 bool AllImages::findIndexForLoadAddress(const mach_header
* loadAddress
, uint32_t& index
)
758 __block
bool result
= false;
759 sLoadedImages
.forEachWithReadLock(^(uint32_t anIndex
, const LoadedImage
& loadedImage
, bool& stop
) {
760 if ( loadedImage
.loadedAddress() == loadAddress
) {
769 void AllImages::forEachImage(void (^handler
)(uint32_t imageIndex
, const mach_header
* loadAddress
, const launch_cache::Image image
, bool& stop
)) const
771 sLoadedImages
.forEachWithReadLock(^(uint32_t imageIndex
, const LoadedImage
& loadedImage
, bool& stop
) {
772 handler(imageIndex
, loadedImage
.loadedAddress(), launch_cache::Image(loadedImage
.image()), stop
);
776 launch_cache::Image
AllImages::findByOwnedAddress(const void* addr
, const mach_header
** loadAddress
, uint8_t* permissions
) const
778 if ( _initialImages
!= nullptr ) {
779 // being called during libSystem initialization, so sLoadedImages not allocated yet
780 for (int i
=0; i
< _initialImages
->count(); ++i
) {
781 const loader::ImageInfo
& entry
= (*_initialImages
)[i
];
782 launch_cache::Image
anImage(entry
.imageData
);
783 if ( anImage
.containsAddress(addr
, entry
.loadAddress
, permissions
) ) {
784 *loadAddress
= entry
.loadAddress
;
785 return entry
.imageData
;
788 return launch_cache::Image(nullptr);
791 // if address is in cache, do fast search of cache
792 if ( (_dyldCacheAddress
!= nullptr) && (addr
> _dyldCacheAddress
) ) {
793 const DyldSharedCache
* dyldCache
= (DyldSharedCache
*)_dyldCacheAddress
;
794 if ( addr
< (void*)((uint8_t*)_dyldCacheAddress
+dyldCache
->mappedSize()) ) {
795 size_t cacheVmOffset
= ((uint8_t*)addr
- (uint8_t*)_dyldCacheAddress
);
796 DyldCacheParser
cacheParser(dyldCache
, false);
797 launch_cache::ImageGroup
cachedDylibsGroup(cacheParser
.cachedDylibsGroup());
798 uint32_t mhCacheOffset
;
799 uint8_t foundPermissions
;
800 launch_cache::Image
image(cachedDylibsGroup
.findImageByCacheOffset(cacheVmOffset
, mhCacheOffset
, foundPermissions
));
801 if ( image
.valid() ) {
802 *loadAddress
= (mach_header
*)((uint8_t*)_dyldCacheAddress
+ mhCacheOffset
);
803 if ( permissions
!= nullptr )
804 *permissions
= foundPermissions
;
810 __block
const BinaryImage
* foundImage
= nullptr;
811 sLoadedImages
.forEachWithReadLock(^(uint32_t anIndex
, const LoadedImage
& loadedImage
, bool& stop
) {
812 launch_cache::Image
anImage(loadedImage
.image());
813 if ( anImage
.containsAddress(addr
, loadedImage
.loadedAddress(), permissions
) ) {
814 *loadAddress
= loadedImage
.loadedAddress();
815 foundImage
= loadedImage
.image();
819 return launch_cache::Image(foundImage
);
822 const mach_header
* AllImages::findLoadAddressByImage(const BinaryImage
* targetImage
) const
824 __block
const mach_header
* foundAddress
= nullptr;
825 sLoadedImages
.forEachWithReadLock(^(uint32_t anIndex
, const LoadedImage
& loadedImage
, bool& stop
) {
826 if ( targetImage
== loadedImage
.image() ) {
827 foundAddress
= loadedImage
.loadedAddress();
834 const mach_header
* AllImages::mainExecutable() const
836 assert(_programVars
!= nullptr);
837 return (const mach_header
*)_programVars
->mh
;
840 launch_cache::Image
AllImages::mainExecutableImage() const
842 assert(_mainClosure
!= nullptr);
843 const launch_cache::Closure
mainClosure(_mainClosure
);
844 const dyld3::launch_cache::ImageGroup mainGroup
= mainClosure
.group();
845 const uint32_t mainExecutableIndex
= mainClosure
.mainExecutableImageIndex();
846 const dyld3::launch_cache::Image mainImage
= mainGroup
.image(mainExecutableIndex
);
850 void AllImages::setMainPath(const char* path
)
852 _mainExeOverridePath
= path
;
855 const char* AllImages::imagePath(const BinaryImage
* binImage
) const
857 #if __IPHONE_OS_VERSION_MIN_REQUIRED
858 // on iOS and watchOS, apps may be moved on device after closure built
859 if ( _mainExeOverridePath
!= nullptr ) {
860 if ( binImage
== mainExecutableImage().binaryData() )
861 return _mainExeOverridePath
;
864 launch_cache::Image
image(binImage
);
868 void AllImages::setInitialGroups()
870 DyldCacheParser
cacheParser((DyldSharedCache
*)_dyldCacheAddress
, false);
871 sKnownGroups
.addNoLock(cacheParser
.cachedDylibsGroup());
872 sKnownGroups
.addNoLock(cacheParser
.otherDylibsGroup());
873 launch_cache::Closure
closure(_mainClosure
);
874 sKnownGroups
.addNoLock(closure
.group().binaryData());
877 const launch_cache::binary_format::ImageGroup
* AllImages::cachedDylibsGroup()
879 return sKnownGroups
[0];
882 const launch_cache::binary_format::ImageGroup
* AllImages::otherDylibsGroup()
884 return sKnownGroups
[1];
887 const AllImages::BinaryImageGroup
* AllImages::mainClosureGroup()
889 return sKnownGroups
[2];
892 uint32_t AllImages::currentGroupsCount() const
894 return sKnownGroups
.count();
897 void AllImages::copyCurrentGroups(ImageGroupList
& groups
) const
899 sKnownGroups
.forEachWithReadLock(^(uint32_t index
, const dyld3::launch_cache::binary_format::ImageGroup
* const &grpData
, bool &stop
) {
900 if ( index
< groups
.count() )
901 groups
[index
] = grpData
;
905 void AllImages::copyCurrentGroupsNoLock(ImageGroupList
& groups
) const
907 sKnownGroups
.forEachNoLock(^(uint32_t index
, const dyld3::launch_cache::binary_format::ImageGroup
* const &grpData
, bool &stop
) {
908 if ( index
< groups
.count() )
909 groups
[index
] = grpData
;
913 const mach_header
* AllImages::alreadyLoaded(uint64_t inode
, uint64_t mtime
, bool bumpRefCount
)
915 __block
const mach_header
* result
= nullptr;
916 sLoadedImages
.forEachWithReadLock(^(uint32_t anIndex
, const LoadedImage
& loadedImage
, bool& stop
) {
917 launch_cache::Image
img(loadedImage
.image());
918 if ( img
.validateUsingModTimeAndInode() ) {
919 if ( (img
.fileINode() == inode
) && (img
.fileModTime() == mtime
) ) {
920 result
= loadedImage
.loadedAddress();
921 if ( bumpRefCount
&& !loadedImage
.neverUnload() )
922 incRefCount(loadedImage
.loadedAddress());
930 const mach_header
* AllImages::alreadyLoaded(const char* path
, bool bumpRefCount
)
932 __block
const mach_header
* result
= nullptr;
933 uint32_t targetHash
= launch_cache::ImageGroup::hashFunction(path
);
934 sLoadedImages
.forEachWithReadLock(^(uint32_t anIndex
, const LoadedImage
& loadedImage
, bool& stop
) {
935 launch_cache::Image
img(loadedImage
.image());
936 if ( (img
.pathHash() == targetHash
) && (strcmp(path
, imagePath(loadedImage
.image())) == 0) ) {
937 result
= loadedImage
.loadedAddress();
938 if ( bumpRefCount
&& !loadedImage
.neverUnload() )
939 incRefCount(loadedImage
.loadedAddress());
943 if ( result
== nullptr ) {
944 // perhaps there was an image override
945 launch_cache::ImageGroup
mainGroup(mainClosureGroup());
946 STACK_ALLOC_DYNARRAY(const launch_cache::BinaryImageGroupData
*, currentGroupsCount(), currentGroupsList
);
947 copyCurrentGroups(currentGroupsList
);
948 mainGroup
.forEachImageRefOverride(currentGroupsList
, ^(launch_cache::Image standardDylib
, launch_cache::Image overrideDyilb
, bool& stop
) {
949 if ( strcmp(standardDylib
.path(), path
) == 0 ) {
950 result
= alreadyLoaded(overrideDyilb
.path(), bumpRefCount
);
958 const mach_header
* AllImages::alreadyLoaded(const BinaryImage
* binImage
, bool bumpRefCount
)
960 const mach_header
* result
= findLoadAddressByImage(binImage
);
961 if ( result
!= nullptr ) {
962 launch_cache::Image
loadedImage(binImage
);
963 if ( bumpRefCount
&& !loadedImage
.neverUnload() )
969 void AllImages::incRefCount(const mach_header
* loadAddress
)
971 __block
bool found
= false;
972 sDlopenRefCounts
.forEachWithWriteLock(^(uint32_t index
, DlopenCount
& entry
, bool& stop
) {
973 if ( entry
.loadAddress
== loadAddress
) {
980 DlopenCount newEnty
= { loadAddress
, 1 };
981 sDlopenRefCounts
.add(newEnty
);
985 void AllImages::decRefCount(const mach_header
* loadAddress
)
987 __block
bool refCountNowZero
= false;
988 sDlopenRefCounts
.forEachWithWriteLock(^(uint32_t index
, DlopenCount
& entry
, bool& stop
) {
989 if ( entry
.loadAddress
== loadAddress
) {
992 if ( entry
.refCount
== 0 )
993 refCountNowZero
= true;
996 if ( refCountNowZero
) {
997 DlopenCount delEnty
= { loadAddress
, 0 };
998 sDlopenRefCounts
.remove(delEnty
);
999 garbageCollectImages();
1004 #if __MAC_OS_X_VERSION_MIN_REQUIRED
1005 __NSObjectFileImage
* AllImages::addNSObjectFileImage()
1007 // look for empty slot first
1008 __block __NSObjectFileImage
* result
= nullptr;
1009 sNSObjectFileImages
.forEachWithWriteLock(^(uint32_t index
, __NSObjectFileImage
& value
, bool& stop
) {
1010 if ( (value
.path
== nullptr) && (value
.memSource
== nullptr) ) {
1015 if ( result
!= nullptr )
1018 // otherwise allocate new slot
1019 __NSObjectFileImage empty
;
1020 return sNSObjectFileImages
.add(empty
);
1023 bool AllImages::hasNSObjectFileImage(__NSObjectFileImage
* ofi
)
1025 __block
bool result
= false;
1026 sNSObjectFileImages
.forEachNoLock(^(uint32_t index
, const __NSObjectFileImage
& value
, bool& stop
) {
1027 if ( &value
== ofi
) {
1028 result
= ((value
.memSource
!= nullptr) || (value
.path
!= nullptr));
1035 void AllImages::removeNSObjectFileImage(__NSObjectFileImage
* ofi
)
1037 sNSObjectFileImages
.forEachWithWriteLock(^(uint32_t index
, __NSObjectFileImage
& value
, bool& stop
) {
1038 if ( &value
== ofi
) {
1039 // mark slot as empty
1040 ofi
->path
= nullptr;
1041 ofi
->memSource
= nullptr;
1043 ofi
->loadAddress
= nullptr;
1044 ofi
->binImage
= nullptr;
1052 class VIS_HIDDEN Reaper
1055 Reaper(uint32_t count
, const LoadedImage
** unloadables
, bool* inUseArray
);
1056 void garbageCollect();
1057 void finalizeDeadImages();
1060 typedef launch_cache::binary_format::Image BinaryImage
;
1062 void markDirectlyDlopenedImagesAsUsed();
1063 void markDependentOfInUseImages();
1064 void markDependentsOf(const LoadedImage
*);
1065 bool loadAddressIsUnloadable(const mach_header
* loadAddr
, uint32_t& index
);
1066 bool imageIsUnloadable(const BinaryImage
* binImage
, uint32_t& foundIndex
);
1067 uint32_t inUseCount();
1068 void dump(const char* msg
);
1070 const LoadedImage
** _unloadablesArray
;
1072 uint32_t _arrayCount
;
1073 uint32_t _deadCount
;
1076 Reaper::Reaper(uint32_t count
, const LoadedImage
** unloadables
, bool* inUseArray
)
1077 : _unloadablesArray(unloadables
), _inUseArray(inUseArray
),_arrayCount(count
)
1082 bool Reaper::loadAddressIsUnloadable(const mach_header
* loadAddr
, uint32_t& foundIndex
)
1084 for (uint32_t i
=0; i
< _arrayCount
; ++i
) {
1085 if ( _unloadablesArray
[i
]->loadedAddress() == loadAddr
) {
1093 bool Reaper::imageIsUnloadable(const BinaryImage
* binImage
, uint32_t& foundIndex
)
1095 for (uint32_t i
=0; i
< _arrayCount
; ++i
) {
1096 if ( _unloadablesArray
[i
]->image() == binImage
) {
1104 void Reaper::markDirectlyDlopenedImagesAsUsed()
1106 sDlopenRefCounts
.forEachWithReadLock(^(uint32_t refCountIndex
, const dyld3::DlopenCount
& dlEntry
, bool& stop
) {
1107 if ( dlEntry
.refCount
!= 0 ) {
1108 uint32_t foundIndex
;
1109 if ( loadAddressIsUnloadable(dlEntry
.loadAddress
, foundIndex
) ) {
1110 _inUseArray
[foundIndex
] = true;
1116 uint32_t Reaper::inUseCount()
1119 for (uint32_t i
=0; i
< _arrayCount
; ++i
) {
1120 if ( _inUseArray
[i
] )
1126 void Reaper::markDependentsOf(const LoadedImage
* entry
)
1128 const launch_cache::Image
image(entry
->image());
1129 STACK_ALLOC_DYNARRAY(const launch_cache::BinaryImageGroupData
*, gAllImages
.currentGroupsCount(), currentGroupsList
);
1130 gAllImages
.copyCurrentGroups(currentGroupsList
);
1131 image
.forEachDependentImage(currentGroupsList
, ^(uint32_t depIndex
, dyld3::launch_cache::Image depImage
, dyld3::launch_cache::Image::LinkKind kind
, bool& stop
) {
1132 uint32_t foundIndex
;
1133 if ( !depImage
.neverUnload() && imageIsUnloadable(depImage
.binaryData(), foundIndex
) ) {
1134 _inUseArray
[foundIndex
] = true;
1139 void Reaper::markDependentOfInUseImages()
1141 for (uint32_t i
=0; i
< _arrayCount
; ++i
) {
1142 if ( _inUseArray
[i
] )
1143 markDependentsOf(_unloadablesArray
[i
]);
1147 void Reaper::dump(const char* msg
)
1149 //log("%s:\n", msg);
1150 for (uint32_t i
=0; i
< _arrayCount
; ++i
) {
1151 dyld3::launch_cache::Image
image(_unloadablesArray
[i
]->image());
1152 //log(" in-used=%d %s\n", _inUseArray[i], image.path());
1156 void Reaper::garbageCollect()
1158 //dump("all unloadable images");
1160 // mark all dylibs directly dlopen'ed as in use
1161 markDirectlyDlopenedImagesAsUsed();
1163 //dump("directly dlopen()'ed marked");
1165 // iteratively mark dependents of in-use dylibs as in-use until in-use count stops changing
1166 uint32_t lastCount
= inUseCount();
1167 bool countChanged
= false;
1169 markDependentOfInUseImages();
1170 //dump("dependents marked");
1171 uint32_t newCount
= inUseCount();
1172 countChanged
= (newCount
!= lastCount
);
1173 lastCount
= newCount
;
1174 } while (countChanged
);
1176 _deadCount
= _arrayCount
- inUseCount();
1179 void Reaper::finalizeDeadImages()
1181 if ( _deadCount
== 0 )
1183 __cxa_range_t ranges
[_deadCount
];
1184 __cxa_range_t
* rangesArray
= ranges
;
1185 __block
unsigned int rangesCount
= 0;
1186 for (uint32_t i
=0; i
< _arrayCount
; ++i
) {
1187 if ( _inUseArray
[i
] )
1189 dyld3::launch_cache::Image
image(_unloadablesArray
[i
]->image());
1190 image
.forEachDiskSegment(^(uint32_t segIndex
, uint32_t fileOffset
, uint32_t fileSize
, int64_t vmOffset
, uint64_t vmSize
, uint8_t permissions
, bool &stop
) {
1191 if ( permissions
& VM_PROT_EXECUTE
) {
1192 rangesArray
[rangesCount
].addr
= (char*)(_unloadablesArray
[i
]->loadedAddress()) + vmOffset
;
1193 rangesArray
[rangesCount
].length
= (size_t)vmSize
;
1198 __cxa_finalize_ranges(ranges
, rangesCount
);
1202 // This function is called at the end of dlclose() when the reference count goes to zero.
1203 // The dylib being unloaded may have brought in other dependent dylibs when it was loaded.
1204 // Those dependent dylibs need to be unloaded, but only if they are not referenced by
1205 // something else. We use a standard mark and sweep garbage collection.
1207 // The tricky part is that when a dylib is unloaded it may have a termination function that
1208 // can run and itself call dlclose() on yet another dylib. The problem is that this
1209 // sort of gabage collection is not re-entrant. Instead a terminator's call to dlclose()
1210 // which calls garbageCollectImages() will just set a flag to re-do the garbage collection
1211 // when the current pass is done.
1213 // Also note that this is done within the sLoadedImages writer lock, so any dlopen/dlclose
1214 // on other threads are blocked while this garbage collections runs
1216 void AllImages::garbageCollectImages()
1218 // if some other thread is currently GC'ing images, let other thread do the work
1219 int32_t newCount
= OSAtomicIncrement32(&_gcCount
);
1220 if ( newCount
!= 1 )
1224 const uint32_t loadedImageCount
= sLoadedImages
.count();
1225 const LoadedImage
* unloadables
[loadedImageCount
];
1226 bool unloadableInUse
[loadedImageCount
];
1227 const LoadedImage
** unloadablesArray
= unloadables
;
1228 bool* unloadableInUseArray
= unloadableInUse
;
1229 __block
uint32_t unloadableCount
= 0;
1230 // do GC with lock, so no other images can be added during GC
1231 sLoadedImages
.withReadLock(^() {
1232 sLoadedImages
.forEachNoLock(^(uint32_t index
, const LoadedImage
& entry
, bool& stop
) {
1233 const launch_cache::Image
image(entry
.image());
1234 if ( !image
.neverUnload() && !entry
.neverUnload() ) {
1235 unloadablesArray
[unloadableCount
] = &entry
;
1236 unloadableInUseArray
[unloadableCount
] = false;
1237 //log("unloadable[%d] %p %s\n", unloadableCount, entry.loadedAddress(), image.path());
1241 // make reaper object to do garbage collection and notifications
1242 Reaper
reaper(unloadableCount
, unloadablesArray
, unloadableInUseArray
);
1243 reaper
.garbageCollect();
1245 // FIXME: we should sort dead images so higher level ones are terminated first
1247 // call cxa_finalize_ranges of dead images
1248 reaper
.finalizeDeadImages();
1250 // FIXME: call static terminators of dead images
1252 // FIXME: DOF unregister
1255 //log("sLoadedImages before GC removals:\n");
1256 //sLoadedImages.dump(^(const LoadedImage& entry) {
1257 // const launch_cache::Image image(entry.image());
1258 // log(" loadAddr=%p, path=%s\n", entry.loadedAddress(), image.path());
1261 // make copy of LoadedImages we want to remove
1262 // because unloadables[] points into ChunkVector we are shrinking
1263 uint32_t removalCount
= 0;
1264 for (uint32_t i
=0; i
< unloadableCount
; ++i
) {
1265 if ( !unloadableInUse
[i
] )
1268 if ( removalCount
> 0 ) {
1269 STACK_ALLOC_DYNARRAY(loader::ImageInfo
, removalCount
, unloadImages
);
1270 uint32_t removalIndex
= 0;
1271 for (uint32_t i
=0; i
< unloadableCount
; ++i
) {
1272 if ( !unloadableInUse
[i
] ) {
1273 unloadImages
[removalIndex
].loadAddress
= unloadables
[i
]->loadedAddress();
1274 unloadImages
[removalIndex
].imageData
= unloadables
[i
]->image();
1278 // remove entries from sLoadedImages
1279 removeImages(unloadImages
);
1281 //log("sLoadedImages after GC removals:\n");
1282 //sLoadedImages.dump(^(const LoadedImage& entry) {
1283 // const launch_cache::Image image(entry.image());
1284 // //log(" loadAddr=%p, path=%s\n", entry.loadedAddress(), image.path());
1288 // if some other thread called GC during our work, redo GC on its behalf
1289 newCount
= OSAtomicDecrement32(&_gcCount
);
1291 while (newCount
> 0);
1297 const launch_cache::binary_format::Image
* AllImages::messageClosured(const char* path
, const char* apiName
, const char* closuredErrorMessages
[3], int& closuredErrorMessagesCount
)
1299 __block
const launch_cache::binary_format::Image
* result
= nullptr;
1300 sKnownGroups
.withWriteLock(^() {
1301 ClosureBuffer::CacheIdent cacheIdent
;
1302 bzero(&cacheIdent
, sizeof(cacheIdent
));
1303 if ( _dyldCacheAddress
!= nullptr ) {
1304 const DyldSharedCache
* dyldCache
= (DyldSharedCache
*)_dyldCacheAddress
;
1305 dyldCache
->getUUID(cacheIdent
.cacheUUID
);
1306 cacheIdent
.cacheAddress
= (unsigned long)_dyldCacheAddress
;
1307 cacheIdent
.cacheMappedSize
= dyldCache
->mappedSize();
1309 gPathOverrides
.forEachPathVariant(path
, ^(const char* possiblePath
, bool& stopVariants
) {
1310 struct stat statBuf
;
1311 if ( stat(possiblePath
, &statBuf
) == 0 ) {
1312 if ( S_ISDIR(statBuf
.st_mode
) ) {
1313 log_apis(" %s: path is directory: %s\n", apiName
, possiblePath
);
1314 if ( closuredErrorMessagesCount
< 3 )
1315 closuredErrorMessages
[closuredErrorMessagesCount
++] = strdup("not a file");
1318 // file exists, ask closured to build info for it
1319 STACK_ALLOC_DYNARRAY(const launch_cache::BinaryImageGroupData
*, sKnownGroups
.countNoLock(), currentGroupsList
);
1320 gAllImages
.copyCurrentGroupsNoLock(currentGroupsList
);
1321 dyld3::launch_cache::DynArray
<const dyld3::launch_cache::binary_format::ImageGroup
*> nonCacheGroupList(currentGroupsList
.count()-2, ¤tGroupsList
[2]);
1322 const dyld3::launch_cache::binary_format::ImageGroup
* closuredCreatedGroupData
= nullptr;
1323 ClosureBuffer
closureBuilderInput(cacheIdent
, path
, nonCacheGroupList
, gPathOverrides
);
1324 ClosureBuffer closureBuilderOutput
= dyld3::closured_CreateImageGroup(closureBuilderInput
);
1325 if ( !closureBuilderOutput
.isError() ) {
1326 vm_protect(mach_task_self(), closureBuilderOutput
.vmBuffer(), closureBuilderOutput
.vmBufferSize(), false, VM_PROT_READ
);
1327 closuredCreatedGroupData
= closureBuilderOutput
.imageGroup();
1328 log_apis(" %s: closured built ImageGroup for path: %s\n", apiName
, possiblePath
);
1329 sKnownGroups
.addNoLock(closuredCreatedGroupData
);
1330 launch_cache::ImageGroup
group(closuredCreatedGroupData
);
1331 result
= group
.imageBinary(0);
1332 stopVariants
= true;
1335 log_apis(" %s: closured failed for path: %s, error: %s\n", apiName
, possiblePath
, closureBuilderOutput
.errorMessage());
1336 if ( closuredErrorMessagesCount
< 3 ) {
1337 closuredErrorMessages
[closuredErrorMessagesCount
++] = strdup(closureBuilderOutput
.errorMessage());
1339 closureBuilderOutput
.free();
1344 log_apis(" %s: file does not exist for path: %s\n", apiName
, possiblePath
);
1352 const AllImages::BinaryImage
* AllImages::findImageInKnownGroups(const char* path
)
1354 __block
const AllImages::BinaryImage
* result
= nullptr;
1355 sKnownGroups
.forEachWithReadLock(^(uint32_t index
, const dyld3::launch_cache::binary_format::ImageGroup
* const& grpData
, bool& stop
) {
1356 launch_cache::ImageGroup
group(grpData
);
1358 if ( const AllImages::BinaryImage
* binImage
= group
.findImageByPath(path
, ignore
) ) {
1366 bool AllImages::imageUnloadable(const launch_cache::Image
& image
, const mach_header
* loadAddress
) const
1368 // check if statically determined in clousre that this can never be unloaded
1369 if ( image
.neverUnload() )
1372 // check if some runtime decision made this be never-unloadable
1373 __block
bool foundAsNeverUnload
= false;
1374 sLoadedImages
.forEachWithReadLock(^(uint32_t anIndex
, const LoadedImage
& loadedImage
, bool& stop
) {
1375 if ( loadedImage
.loadedAddress() == loadAddress
) {
1377 if ( loadedImage
.neverUnload() )
1378 foundAsNeverUnload
= true;
1381 if ( foundAsNeverUnload
)
1387 void AllImages::addLoadNotifier(NotifyFunc func
)
1389 // callback about already loaded images
1390 const uint32_t existingCount
= sLoadedImages
.count();
1391 const mach_header
* existingMHs
[existingCount
];
1392 const mach_header
** existingArray
= existingMHs
;
1393 sLoadedImages
.forEachWithReadLock(^(uint32_t anIndex
, const LoadedImage
& loadedImage
, bool& stop
) {
1394 if ( anIndex
< existingCount
)
1395 existingArray
[anIndex
] = loadedImage
.loadedAddress();
1397 // we don't want to hold lock while calling out, so prebuild array (with lock) then do calls on that array (without lock)
1398 for (uint32_t i
=0; i
< existingCount
; i
++) {
1399 MachOParser
parser(existingArray
[i
]);
1400 log_notifications("dyld: add notifier %p called with mh=%p\n", func
, existingArray
[i
]);
1401 func(existingArray
[i
], parser
.getSlide());
1404 // add to list of functions to call about future loads
1405 sLoadNotifiers
.add(func
);
1408 void AllImages::addUnloadNotifier(NotifyFunc func
)
1410 // add to list of functions to call about future unloads
1411 sUnloadNotifiers
.add(func
);
1414 void AllImages::setObjCNotifiers(_dyld_objc_notify_mapped map
, _dyld_objc_notify_init init
, _dyld_objc_notify_unmapped unmap
)
1416 _objcNotifyMapped
= map
;
1417 _objcNotifyInit
= init
;
1418 _objcNotifyUnmapped
= unmap
;
1420 // callback about already loaded images
1421 uint32_t maxCount
= count();
1422 const char* pathsBuffer
[maxCount
];
1423 const mach_header
* mhBuffer
[maxCount
];
1424 __block
const char** paths
= pathsBuffer
;
1425 __block
const mach_header
** mhs
= mhBuffer
;
1426 __block
uint32_t imagesWithObjC
= 0;
1427 sLoadedImages
.forEachWithReadLock(^(uint32_t anIndex
, const LoadedImage
& loadedImage
, bool& stop
) {
1428 launch_cache::Image
img(loadedImage
.image());
1429 if ( img
.hasObjC() ) {
1430 mhs
[imagesWithObjC
] = loadedImage
.loadedAddress();
1431 paths
[imagesWithObjC
] = imagePath(loadedImage
.image());
1435 if ( imagesWithObjC
!= 0 ) {
1436 (*map
)(imagesWithObjC
, pathsBuffer
, mhBuffer
);
1437 if ( log_notifications("dyld: objc-mapped-notifier called with %d images:\n", imagesWithObjC
) ) {
1438 for (uint32_t i
=0; i
< imagesWithObjC
; ++i
) {
1439 log_notifications("dyld: objc-mapped: %p %s\n", mhBuffer
[i
], pathsBuffer
[i
]);
1445 void AllImages::vmAccountingSetSuspended(bool suspend
)
1447 #if __arm__ || __arm64__
1448 // <rdar://problem/29099600> dyld should tell the kernel when it is doing fix-ups caused by roots
1449 log_fixups("vm.footprint_suspend=%d\n", suspend
);
1450 int newValue
= suspend
? 1 : 0;
1452 size_t newlen
= sizeof(newValue
);
1453 size_t oldlen
= sizeof(oldValue
);
1454 sysctlbyname("vm.footprint_suspend", &oldValue
, &oldlen
, &newValue
, newlen
);
1458 void AllImages::applyInterposingToDyldCache(const launch_cache::binary_format::Closure
* closure
, const dyld3::launch_cache::DynArray
<loader::ImageInfo
>& initialImages
)
1460 launch_cache::Closure
mainClosure(closure
);
1461 launch_cache::ImageGroup mainGroup
= mainClosure
.group();
1462 DyldCacheParser
cacheParser((DyldSharedCache
*)_dyldCacheAddress
, false);
1463 const launch_cache::binary_format::ImageGroup
* dylibsGroupData
= cacheParser
.cachedDylibsGroup();
1464 launch_cache::ImageGroup
dyldCacheDylibGroup(dylibsGroupData
);
1465 __block
bool suspendedAccounting
= false;
1466 mainGroup
.forEachDyldCacheSymbolOverride(^(uint32_t patchTableIndex
, const launch_cache::binary_format::Image
* imageData
, uint32_t imageOffset
, bool& stop
) {
1467 bool foundInImages
= false;
1468 for (int i
=0; i
< initialImages
.count(); ++i
) {
1469 if ( initialImages
[i
].imageData
== imageData
) {
1470 foundInImages
= true;
1471 uintptr_t replacement
= (uintptr_t)(initialImages
[i
].loadAddress
) + imageOffset
;
1472 dyldCacheDylibGroup
.forEachDyldCachePatchLocation(_dyldCacheAddress
, patchTableIndex
, ^(uintptr_t* locationToPatch
, uintptr_t addend
, bool& innerStop
) {
1473 if ( !suspendedAccounting
) {
1474 vmAccountingSetSuspended(true);
1475 suspendedAccounting
= true;
1477 log_fixups("dyld: cache fixup: *%p = %p\n", locationToPatch
, (void*)replacement
);
1478 *locationToPatch
= replacement
+ addend
;
1483 if ( !foundInImages
) {
1484 launch_cache::Image
img(imageData
);
1485 log_fixups("did not find loaded image to patch into cache: %s\n", img
.path());
1488 if ( suspendedAccounting
)
1489 vmAccountingSetSuspended(false);
1492 void AllImages::runLibSystemInitializer(const mach_header
* libSystemAddress
, const launch_cache::binary_format::Image
* libSystemBinImage
)
1494 // run all initializers in image
1495 launch_cache::Image
libSystemImage(libSystemBinImage
);
1496 libSystemImage
.forEachInitializer(libSystemAddress
, ^(const void* func
) {
1497 Initializer initFunc
= (Initializer
)func
;
1498 dyld3::kdebug_trace_dyld_duration(DBG_DYLD_TIMING_STATIC_INITIALIZER
, (uint64_t)func
, 0, ^{
1499 initFunc(NXArgc
, NXArgv
, environ
, appleParams
, _programVars
);
1501 log_initializers("called initialzer %p in %s\n", initFunc
, libSystemImage
.path());
1504 // mark libSystem.dylib as being init, so later recursive-init would re-run it
1505 sLoadedImages
.forEachWithWriteLock(^(uint32_t anIndex
, LoadedImage
& loadedImage
, bool& stop
) {
1506 if ( loadedImage
.loadedAddress() == libSystemAddress
) {
1507 loadedImage
.setState(LoadedImage::State::inited
);
1513 void AllImages::runInitialzersBottomUp(const mach_header
* imageLoadAddress
)
1515 launch_cache::Image topImage
= findByLoadAddress(imageLoadAddress
);
1516 if ( topImage
.isInvalid() )
1519 // closure contains list of intializers to run in-order
1520 STACK_ALLOC_DYNARRAY(const launch_cache::BinaryImageGroupData
*, currentGroupsCount(), currentGroupsList
);
1521 copyCurrentGroups(currentGroupsList
);
1522 topImage
.forEachInitBefore(currentGroupsList
, ^(launch_cache::Image imageToInit
) {
1524 __block LoadedImage
* foundEntry
= nullptr;
1525 sLoadedImages
.forEachWithReadLock(^(uint32_t index
, const LoadedImage
& entry
, bool& stop
) {
1526 if ( entry
.image() == imageToInit
.binaryData() ) {
1527 foundEntry
= (LoadedImage
*)&entry
;
1531 assert(foundEntry
!= nullptr);
1532 pthread_mutex_lock(&_initializerLock
);
1533 // Note, due to the large lock in dlopen, we can't be waiting on another thread
1534 // here, but its possible that we are in a dlopen which is initialising us again
1535 if ( foundEntry
->state() == LoadedImage::State::beingInited
) {
1536 log_initializers("dyld: already initializing '%s'\n", imagePath(imageToInit
.binaryData()));
1538 // at this point, the image is either initialized or not
1539 // if not, initialize it on this thread
1540 if ( foundEntry
->state() == LoadedImage::State::uninited
) {
1541 foundEntry
->setState(LoadedImage::State::beingInited
);
1542 // release initializer lock, so other threads can run initializers
1543 pthread_mutex_unlock(&_initializerLock
);
1544 // tell objc to run any +load methods in image
1545 if ( (_objcNotifyInit
!= nullptr) && imageToInit
.mayHavePlusLoads() ) {
1546 log_notifications("dyld: objc-init-notifier called with mh=%p, path=%s\n", foundEntry
->loadedAddress(), imagePath(imageToInit
.binaryData()));
1547 (*_objcNotifyInit
)(imagePath(imageToInit
.binaryData()), foundEntry
->loadedAddress());
1549 // run all initializers in image
1550 imageToInit
.forEachInitializer(foundEntry
->loadedAddress(), ^(const void* func
) {
1551 Initializer initFunc
= (Initializer
)func
;
1552 dyld3::kdebug_trace_dyld_duration(DBG_DYLD_TIMING_STATIC_INITIALIZER
, (uint64_t)func
, 0, ^{
1553 initFunc(NXArgc
, NXArgv
, environ
, appleParams
, _programVars
);
1555 log_initializers("dyld: called initialzer %p in %s\n", initFunc
, imageToInit
.path());
1557 // reaquire initializer lock to switch state to inited
1558 pthread_mutex_lock(&_initializerLock
);
1559 foundEntry
->setState(LoadedImage::State::inited
);
1561 pthread_mutex_unlock(&_initializerLock
);
1566 } // namespace dyld3