2 * Copyright (c) 2017 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
27 #include <sys/sysctl.h>
28 #include <mach/mach_time.h> // mach_absolute_time()
29 #include <pthread/pthread.h>
30 #include <libkern/OSAtomic.h>
35 #include "AllImages.h"
36 #include "MachOParser.h"
37 #include "libdyldEntryVector.h"
41 #include "LaunchCache.h"
42 #include "DyldSharedCache.h"
43 #include "PathOverrides.h"
44 #include "DyldCacheParser.h"
46 extern const char** appleParams
;
48 // should be a header for these
49 struct __cxa_range_t
{
53 extern "C" void __cxa_finalize_ranges(const __cxa_range_t ranges
[], unsigned int count
);
55 VIS_HIDDEN
bool gUseDyld3
= false;
60 class VIS_HIDDEN LoadedImage
{
62 enum class State
{ uninited
=3, beingInited
=2, inited
=0 };
63 typedef launch_cache::binary_format::Image BinaryImage
;
65 LoadedImage(const mach_header
* mh
, const BinaryImage
* bi
);
66 bool operator==(const LoadedImage
& rhs
) const;
67 void init(const mach_header
* mh
, const BinaryImage
* bi
);
68 const mach_header
* loadedAddress() const { return (mach_header
*)((uintptr_t)_loadAddress
& ~0x7ULL
); }
69 State
state() const { return (State
)((uintptr_t)_loadAddress
& 0x3ULL
); }
70 const BinaryImage
* image() const { return _image
; }
71 bool neverUnload() const { return ((uintptr_t)_loadAddress
& 0x4ULL
); }
72 void setState(State s
) { _loadAddress
= (mach_header
*)((((uintptr_t)_loadAddress
) & ~0x3ULL
) | (uintptr_t)s
); }
73 void setNeverUnload() { _loadAddress
= (mach_header
*)(((uintptr_t)_loadAddress
) | 0x4ULL
); }
76 const mach_header
* _loadAddress
; // low bits: bit2=neverUnload, bit1/bit0 contain State
77 const BinaryImage
* _image
;
81 bool LoadedImage::operator==(const LoadedImage
& rhs
) const
83 return (_image
== rhs
._image
) && (loadedAddress() == rhs
.loadedAddress());
88 struct VIS_HIDDEN DlopenCount
{
89 bool operator==(const DlopenCount
& rhs
) const;
90 const mach_header
* loadAddress
;
94 bool DlopenCount::operator==(const DlopenCount
& rhs
) const
96 return (loadAddress
== rhs
.loadAddress
) && (refCount
== rhs
.refCount
);
99 LoadedImage::LoadedImage(const mach_header
* mh
, const BinaryImage
* bi
)
100 : _loadAddress(mh
), _image(bi
)
102 assert(loadedAddress() == mh
);
103 setState(State::uninited
);
106 void LoadedImage::init(const mach_header
* mh
, const BinaryImage
* bi
)
110 assert(loadedAddress() == mh
);
111 setState(State::uninited
);
115 template <typename T
, int C
> class ReaderWriterChunkedVector
;
117 template <typename T
, int C
>
118 class VIS_HIDDEN ChunkedVector
{
120 static ChunkedVector
<T
,C
>* make(uint32_t count
);
122 void forEach(uint32_t& startIndex
, bool& outerStop
, void (^callback
)(uint32_t index
, const T
& value
, bool& stop
)) const;
123 void forEach(uint32_t& startIndex
, bool& outerStop
, void (^callback
)(uint32_t index
, T
& value
, bool& stop
));
124 T
* add(const T
& value
);
125 T
* add(uint32_t count
, const T values
[]);
126 void remove(uint32_t index
);
127 uint32_t count() const { return _inUseCount
; }
128 uint32_t freeCount() const { return _allocCount
- _inUseCount
; }
130 T
& element(uint32_t index
) { return ((T
*)_elements
)[index
]; }
131 const T
& element(uint32_t index
) const { return ((T
*)_elements
)[index
]; }
133 friend class ReaderWriterChunkedVector
<T
,C
>;
135 ChunkedVector
<T
,C
>* _next
= nullptr;
136 uint32_t _allocCount
= C
;
137 uint32_t _inUseCount
= 0;
138 uint8_t _elements
[C
*sizeof(T
)] = { 0 };
141 template <typename T
, int C
>
142 class VIS_HIDDEN ReaderWriterChunkedVector
{
144 T
* add(uint32_t count
, const T values
[]);
145 T
* add(const T
& value
) { return add(1, &value
); }
146 T
* addNoLock(uint32_t count
, const T values
[]);
147 T
* addNoLock(const T
& value
) { return addNoLock(1, &value
); }
148 void remove(const T
& value
);
149 uint32_t count() const;
150 void forEachWithReadLock(void (^callback
)(uint32_t index
, const T
& value
, bool& stop
)) const;
151 void forEachWithWriteLock(void (^callback
)(uint32_t index
, T
& value
, bool& stop
));
152 void forEachNoLock(void (^callback
)(uint32_t index
, const T
& value
, bool& stop
)) const;
153 T
& operator[](size_t index
);
154 uint32_t countNoLock() const;
156 void withReadLock(void (^withLock
)()) const;
157 void withWriteLock(void (^withLock
)()) const;
158 void acquireWriteLock();
159 void releaseWriteLock();
160 void dump(void (^callback
)(const T
& value
)) const;
163 mutable pthread_rwlock_t _lock
= PTHREAD_RWLOCK_INITIALIZER
;
164 ChunkedVector
<T
,C
> _firstChunk
;
168 typedef void (*NotifyFunc
)(const mach_header
* mh
, intptr_t slide
);
170 static ReaderWriterChunkedVector
<NotifyFunc
, 4> sLoadNotifiers
;
171 static ReaderWriterChunkedVector
<NotifyFunc
, 4> sUnloadNotifiers
;
172 static ReaderWriterChunkedVector
<LoadedImage
, 4> sLoadedImages
;
173 static ReaderWriterChunkedVector
<DlopenCount
, 4> sDlopenRefCounts
;
174 static ReaderWriterChunkedVector
<const launch_cache::BinaryImageGroupData
*, 4> sKnownGroups
;
175 #if __MAC_OS_X_VERSION_MIN_REQUIRED
176 static ReaderWriterChunkedVector
<__NSObjectFileImage
, 2> sNSObjectFileImages
;
180 ///////////////////// ChunkedVector ////////////////////////////
182 template <typename T
, int C
>
183 ChunkedVector
<T
,C
>* ChunkedVector
<T
,C
>::make(uint32_t count
)
185 size_t size
= sizeof(ChunkedVector
) + sizeof(T
) * (count
-C
);
186 ChunkedVector
<T
,C
>* result
= (ChunkedVector
<T
,C
>*)malloc(size
);
187 result
->_next
= nullptr;
188 result
->_allocCount
= count
;
189 result
->_inUseCount
= 0;
193 template <typename T
, int C
>
194 void ChunkedVector
<T
,C
>::forEach(uint32_t& outerIndex
, bool& outerStop
, void (^callback
)(uint32_t index
, const T
& value
, bool& stop
)) const
196 for (uint32_t i
=0; i
< _inUseCount
; ++i
) {
197 callback(outerIndex
, element(i
), outerStop
);
204 template <typename T
, int C
>
205 void ChunkedVector
<T
,C
>::forEach(uint32_t& outerIndex
, bool& outerStop
, void (^callback
)(uint32_t index
, T
& value
, bool& stop
))
207 for (uint32_t i
=0; i
< _inUseCount
; ++i
) {
208 callback(outerIndex
, element(i
), outerStop
);
215 template <typename T
, int C
>
216 T
* ChunkedVector
<T
,C
>::add(const T
& value
)
218 return add(1, &value
);
221 template <typename T
, int C
>
222 T
* ChunkedVector
<T
,C
>::add(uint32_t count
, const T values
[])
224 assert(count
<= (_allocCount
- _inUseCount
));
225 T
* result
= &element(_inUseCount
);
226 memmove(result
, values
, sizeof(T
)*count
);
227 _inUseCount
+= count
;
231 template <typename T
, int C
>
232 void ChunkedVector
<T
,C
>::remove(uint32_t index
)
234 assert(index
< _inUseCount
);
235 int moveCount
= _inUseCount
- index
- 1;
236 if ( moveCount
>= 1 ) {
237 memmove(&element(index
), &element(index
+1), sizeof(T
)*moveCount
);
243 ///////////////////// ReaderWriterChunkedVector ////////////////////////////
247 template <typename T
, int C
>
248 void ReaderWriterChunkedVector
<T
,C
>::withReadLock(void (^work
)()) const
250 assert(pthread_rwlock_rdlock(&_lock
) == 0);
252 assert(pthread_rwlock_unlock(&_lock
) == 0);
255 template <typename T
, int C
>
256 void ReaderWriterChunkedVector
<T
,C
>::withWriteLock(void (^work
)()) const
258 assert(pthread_rwlock_wrlock(&_lock
) == 0);
260 assert(pthread_rwlock_unlock(&_lock
) == 0);
263 template <typename T
, int C
>
264 void ReaderWriterChunkedVector
<T
,C
>::acquireWriteLock()
266 assert(pthread_rwlock_wrlock(&_lock
) == 0);
269 template <typename T
, int C
>
270 void ReaderWriterChunkedVector
<T
,C
>::releaseWriteLock()
272 assert(pthread_rwlock_unlock(&_lock
) == 0);
275 template <typename T
, int C
>
276 uint32_t ReaderWriterChunkedVector
<T
,C
>::count() const
278 __block
uint32_t result
= 0;
280 for (const ChunkedVector
<T
,C
>* chunk
= &_firstChunk
; chunk
!= nullptr; chunk
= chunk
->_next
) {
281 result
+= chunk
->count();
287 template <typename T
, int C
>
288 uint32_t ReaderWriterChunkedVector
<T
,C
>::countNoLock() const
291 for (const ChunkedVector
<T
,C
>* chunk
= &_firstChunk
; chunk
!= nullptr; chunk
= chunk
->_next
) {
292 result
+= chunk
->count();
297 template <typename T
, int C
>
298 T
* ReaderWriterChunkedVector
<T
,C
>::addNoLock(uint32_t count
, const T values
[])
301 ChunkedVector
<T
,C
>* lastChunk
= &_firstChunk
;
302 while ( lastChunk
->_next
!= nullptr )
303 lastChunk
= lastChunk
->_next
;
305 if ( lastChunk
->freeCount() >= count
) {
306 // append to last chunk
307 result
= lastChunk
->add(count
, values
);
311 uint32_t allocCount
= count
;
312 uint32_t remainder
= count
% C
;
313 if ( remainder
!= 0 )
314 allocCount
= count
+ C
- remainder
;
315 ChunkedVector
<T
,C
>* newChunk
= ChunkedVector
<T
,C
>::make(allocCount
);
316 result
= newChunk
->add(count
, values
);
317 lastChunk
->_next
= newChunk
;
323 template <typename T
, int C
>
324 T
* ReaderWriterChunkedVector
<T
,C
>::add(uint32_t count
, const T values
[])
326 __block T
* result
= nullptr;
328 result
= addNoLock(count
, values
);
333 template <typename T
, int C
>
334 void ReaderWriterChunkedVector
<T
,C
>::remove(const T
& valueToRemove
)
336 __block
bool stopStorage
= false;
338 ChunkedVector
<T
,C
>* chunkNowEmpty
= nullptr;
339 __block
uint32_t indexStorage
= 0;
340 __block
bool found
= false;
341 for (ChunkedVector
<T
,C
>* chunk
= &_firstChunk
; chunk
!= nullptr; chunk
= chunk
->_next
) {
342 uint32_t chunkStartIndex
= indexStorage
;
343 __block
uint32_t foundIndex
= 0;
344 chunk
->forEach(indexStorage
, stopStorage
, ^(uint32_t index
, const T
& value
, bool& stop
) {
345 if ( value
== valueToRemove
) {
346 foundIndex
= index
- chunkStartIndex
;
352 chunk
->remove(foundIndex
);
354 if ( chunk
->count() == 0 )
355 chunkNowEmpty
= chunk
;
358 // if chunk is now empty, remove from linked list and free
359 if ( chunkNowEmpty
) {
360 for (ChunkedVector
<T
,C
>* chunk
= &_firstChunk
; chunk
!= nullptr; chunk
= chunk
->_next
) {
361 if ( chunk
->_next
== chunkNowEmpty
) {
362 chunk
->_next
= chunkNowEmpty
->_next
;
363 if ( chunkNowEmpty
!= &_firstChunk
)
372 template <typename T
, int C
>
373 void ReaderWriterChunkedVector
<T
,C
>::forEachWithReadLock(void (^callback
)(uint32_t index
, const T
& value
, bool& stop
)) const
375 __block
uint32_t index
= 0;
376 __block
bool stop
= false;
378 for (const ChunkedVector
<T
,C
>* chunk
= &_firstChunk
; chunk
!= nullptr; chunk
= chunk
->_next
) {
379 chunk
->forEach(index
, stop
, callback
);
386 template <typename T
, int C
>
387 void ReaderWriterChunkedVector
<T
,C
>::forEachWithWriteLock(void (^callback
)(uint32_t index
, T
& value
, bool& stop
))
389 __block
uint32_t index
= 0;
390 __block
bool stop
= false;
392 for (ChunkedVector
<T
,C
>* chunk
= &_firstChunk
; chunk
!= nullptr; chunk
= chunk
->_next
) {
393 chunk
->forEach(index
, stop
, callback
);
400 template <typename T
, int C
>
401 void ReaderWriterChunkedVector
<T
,C
>::forEachNoLock(void (^callback
)(uint32_t index
, const T
& value
, bool& stop
)) const
405 for (const ChunkedVector
<T
,C
>* chunk
= &_firstChunk
; chunk
!= nullptr; chunk
= chunk
->_next
) {
406 chunk
->forEach(index
, stop
, callback
);
412 template <typename T
, int C
>
413 T
& ReaderWriterChunkedVector
<T
,C
>::operator[](size_t targetIndex
)
415 __block T
* result
= nullptr;
416 forEachNoLock(^(uint32_t index
, T
const& value
, bool& stop
) {
417 if ( index
== targetIndex
) {
425 template <typename T
, int C
>
426 void ReaderWriterChunkedVector
<T
,C
>::dump(void (^callback
)(const T
& value
)) const
428 log("dump ReaderWriterChunkedVector at %p\n", this);
429 __block
uint32_t index
= 0;
430 __block
bool stop
= false;
432 for (const ChunkedVector
<T
,C
>* chunk
= &_firstChunk
; chunk
!= nullptr; chunk
= chunk
->_next
) {
433 log(" chunk at %p\n", chunk
);
434 chunk
->forEach(index
, stop
, ^(uint32_t i
, const T
& value
, bool& s
) {
443 ///////////////////// AllImages ////////////////////////////
446 AllImages gAllImages
;
450 void AllImages::init(const BinaryClosure
* closure
, const void* dyldCacheLoadAddress
, const char* dyldCachePath
,
451 const dyld3::launch_cache::DynArray
<loader::ImageInfo
>& initialImages
)
453 _mainClosure
= closure
;
454 _initialImages
= &initialImages
;
455 _dyldCacheAddress
= dyldCacheLoadAddress
;
456 _dyldCachePath
= dyldCachePath
;
458 // Make temporary old image array, so libSystem initializers can be debugged
459 uint32_t count
= (uint32_t)initialImages
.count();
460 dyld_image_info oldDyldInfo
[count
];
461 for (int i
=0; i
< count
; ++i
) {
462 launch_cache::Image
img(initialImages
[i
].imageData
);
463 oldDyldInfo
[i
].imageLoadAddress
= initialImages
[i
].loadAddress
;
464 oldDyldInfo
[i
].imageFilePath
= img
.path();
465 oldDyldInfo
[i
].imageFileModDate
= 0;
467 _oldAllImageInfos
->infoArray
= oldDyldInfo
;
468 _oldAllImageInfos
->infoArrayCount
= count
;
469 _oldAllImageInfos
->notification(dyld_image_adding
, count
, oldDyldInfo
);
470 _oldAllImageInfos
->infoArray
= nullptr;
471 _oldAllImageInfos
->infoArrayCount
= 0;
474 void AllImages::setProgramVars(ProgramVars
* vars
)
479 void AllImages::applyInitialImages()
481 addImages(*_initialImages
);
482 _initialImages
= nullptr; // this was stack allocated
485 void AllImages::mirrorToOldAllImageInfos()
487 // set infoArray to NULL to denote it is in-use
488 _oldAllImageInfos
->infoArray
= nullptr;
490 // if array not large enough, re-alloc it
491 uint32_t imageCount
= sLoadedImages
.countNoLock();
492 if ( _oldArrayAllocCount
< imageCount
) {
493 uint32_t newAllocCount
= imageCount
+ 16;
494 dyld_image_info
* newArray
= (dyld_image_info
*)malloc(sizeof(dyld_image_info
)*newAllocCount
);
495 if ( _oldAllImageArray
!= nullptr ) {
496 memcpy(newArray
, _oldAllImageArray
, sizeof(dyld_image_info
)*_oldAllImageInfos
->infoArrayCount
);
497 free(_oldAllImageArray
);
499 _oldAllImageArray
= newArray
;
500 _oldArrayAllocCount
= newAllocCount
;
503 // fill out array to mirror current image list
504 sLoadedImages
.forEachNoLock(^(uint32_t index
, const LoadedImage
& loadedImage
, bool& stop
) {
505 launch_cache::Image
img(loadedImage
.image());
506 _oldAllImageArray
[index
].imageLoadAddress
= loadedImage
.loadedAddress();
507 _oldAllImageArray
[index
].imageFilePath
= imagePath(loadedImage
.image());
508 _oldAllImageArray
[index
].imageFileModDate
= 0;
511 // set infoArray back to base address of array (so other process can now read)
512 _oldAllImageInfos
->infoArrayCount
= imageCount
;
513 _oldAllImageInfos
->infoArrayChangeTimestamp
= mach_absolute_time();
514 _oldAllImageInfos
->infoArray
= _oldAllImageArray
;
517 void AllImages::addImages(const launch_cache::DynArray
<loader::ImageInfo
>& newImages
)
519 uint32_t count
= (uint32_t)newImages
.count();
522 // build stack array of LoadedImage to copy into sLoadedImages
523 STACK_ALLOC_DYNARRAY(LoadedImage
, count
, loadedImagesArray
);
524 for (uint32_t i
=0; i
< count
; ++i
) {
525 loadedImagesArray
[i
].init(newImages
[i
].loadAddress
, newImages
[i
].imageData
);
526 if (newImages
[i
].neverUnload
)
527 loadedImagesArray
[i
].setNeverUnload();
529 sLoadedImages
.add(count
, &loadedImagesArray
[0]);
531 if ( _oldAllImageInfos
!= nullptr ) {
532 // sync to old all image infos struct
533 if ( _initialImages
!= nullptr ) {
534 // libSystem not initialized yet, don't use locks
535 mirrorToOldAllImageInfos();
538 sLoadedImages
.withReadLock(^{
539 mirrorToOldAllImageInfos();
543 // tell debugger about new images
544 dyld_image_info oldDyldInfo
[count
];
545 for (int i
=0; i
< count
; ++i
) {
546 launch_cache::Image
img(newImages
[i
].imageData
);
547 oldDyldInfo
[i
].imageLoadAddress
= newImages
[i
].loadAddress
;
548 oldDyldInfo
[i
].imageFilePath
= imagePath(newImages
[i
].imageData
);
549 oldDyldInfo
[i
].imageFileModDate
= 0;
551 _oldAllImageInfos
->notification(dyld_image_adding
, count
, oldDyldInfo
);
555 for (int i
=0; i
< count
; ++i
) {
556 launch_cache::Image
img(newImages
[i
].imageData
);
557 log_loads("dyld: %s\n", imagePath(newImages
[i
].imageData
));
560 #if !TARGET_IPHONE_SIMULATOR
561 // call kdebug trace for each image
562 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD
, DBG_DYLD_UUID
, DBG_DYLD_UUID_MAP_A
))) {
563 for (uint32_t i
=0; i
< count
; ++i
) {
564 launch_cache::Image
img(newImages
[i
].imageData
);
565 struct stat stat_buf
;
566 fsid_t fsid
= {{ 0, 0 }};
567 fsobj_id_t fsobjid
= { 0, 0 };
568 if (img
.isDiskImage() && stat(imagePath(newImages
[i
].imageData
), &stat_buf
) == 0 ) {
569 fsobjid
= *(fsobj_id_t
*)&stat_buf
.st_ino
;
570 fsid
= {{ stat_buf
.st_dev
, 0 }};
572 kdebug_trace_dyld_image(DBG_DYLD_UUID_MAP_A
, img
.uuid(), fsobjid
, fsid
, newImages
[i
].loadAddress
);
576 // call each _dyld_register_func_for_add_image function with each image
577 const uint32_t existingNotifierCount
= sLoadNotifiers
.count();
578 NotifyFunc existingNotifiers
[existingNotifierCount
];
579 NotifyFunc
* existingNotifierArray
= existingNotifiers
;
580 sLoadNotifiers
.forEachWithReadLock(^(uint32_t index
, const NotifyFunc
& func
, bool& stop
) {
581 if ( index
< existingNotifierCount
)
582 existingNotifierArray
[index
] = func
;
584 // we don't want to hold lock while calling out, so prebuild array (with lock) then do calls on that array (without lock)
585 for (uint32_t j
=0; j
< existingNotifierCount
; ++j
) {
586 NotifyFunc func
= existingNotifierArray
[j
];
587 for (uint32_t i
=0; i
< count
; ++i
) {
588 MachOParser
parser(newImages
[i
].loadAddress
);
589 log_notifications("dyld: add notifier %p called with mh=%p\n", func
, newImages
[i
].loadAddress
);
590 func(newImages
[i
].loadAddress
, parser
.getSlide());
594 // call objc about images that use objc
595 if ( _objcNotifyMapped
!= nullptr ) {
596 const char* pathsBuffer
[count
];
597 const mach_header
* mhBuffer
[count
];
598 uint32_t imagesWithObjC
= 0;
599 for (uint32_t i
=0; i
< count
; ++i
) {
600 launch_cache::Image
img(newImages
[i
].imageData
);
601 if ( img
.hasObjC() ) {
602 pathsBuffer
[imagesWithObjC
] = imagePath(newImages
[i
].imageData
);
603 mhBuffer
[imagesWithObjC
] = newImages
[i
].loadAddress
;
607 if ( imagesWithObjC
!= 0 ) {
608 (*_objcNotifyMapped
)(imagesWithObjC
, pathsBuffer
, mhBuffer
);
609 if ( log_notifications("dyld: objc-mapped-notifier called with %d images:\n", imagesWithObjC
) ) {
610 for (uint32_t i
=0; i
< imagesWithObjC
; ++i
) {
611 log_notifications("dyld: objc-mapped: %p %s\n", mhBuffer
[i
], pathsBuffer
[i
]);
617 // notify any processes tracking loads in this process
618 notifyMonitorLoads(newImages
);
621 void AllImages::removeImages(const launch_cache::DynArray
<loader::ImageInfo
>& unloadImages
)
623 uint32_t count
= (uint32_t)unloadImages
.count();
626 // call each _dyld_register_func_for_remove_image function with each image
627 // do this before removing image from internal data structures so that the callback can query dyld about the image
628 const uint32_t existingNotifierCount
= sUnloadNotifiers
.count();
629 NotifyFunc existingNotifiers
[existingNotifierCount
];
630 NotifyFunc
* existingNotifierArray
= existingNotifiers
;
631 sUnloadNotifiers
.forEachWithReadLock(^(uint32_t index
, const NotifyFunc
& func
, bool& stop
) {
632 if ( index
< existingNotifierCount
)
633 existingNotifierArray
[index
] = func
;
635 // we don't want to hold lock while calling out, so prebuild array (with lock) then do calls on that array (without lock)
636 for (uint32_t j
=0; j
< existingNotifierCount
; ++j
) {
637 NotifyFunc func
= existingNotifierArray
[j
];
638 for (uint32_t i
=0; i
< count
; ++i
) {
639 MachOParser
parser(unloadImages
[i
].loadAddress
);
640 log_notifications("dyld: remove notifier %p called with mh=%p\n", func
, unloadImages
[i
].loadAddress
);
641 func(unloadImages
[i
].loadAddress
, parser
.getSlide());
645 // call objc about images going away
646 if ( _objcNotifyUnmapped
!= nullptr ) {
647 for (uint32_t i
=0; i
< count
; ++i
) {
648 launch_cache::Image
img(unloadImages
[i
].imageData
);
649 if ( img
.hasObjC() ) {
650 (*_objcNotifyUnmapped
)(imagePath(unloadImages
[i
].imageData
), unloadImages
[i
].loadAddress
);
651 log_notifications("dyld: objc-unmapped-notifier called with image %p %s\n", unloadImages
[i
].loadAddress
, imagePath(unloadImages
[i
].imageData
));
656 #if !TARGET_IPHONE_SIMULATOR
657 // call kdebug trace for each image
658 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD
, DBG_DYLD_UUID
, DBG_DYLD_UUID_MAP_A
))) {
659 for (uint32_t i
=0; i
< count
; ++i
) {
660 launch_cache::Image
img(unloadImages
[i
].imageData
);
661 struct stat stat_buf
;
662 fsid_t fsid
= {{ 0, 0 }};
663 fsobj_id_t fsobjid
= { 0, 0 };
664 if (stat(imagePath(unloadImages
[i
].imageData
), &stat_buf
) == 0 ) {
665 fsobjid
= *(fsobj_id_t
*)&stat_buf
.st_ino
;
666 fsid
= {{ stat_buf
.st_dev
, 0 }};
668 kdebug_trace_dyld_image(DBG_DYLD_UUID_UNMAP_A
, img
.uuid(), fsobjid
, fsid
, unloadImages
[i
].loadAddress
);
673 // remove each from sLoadedImages
674 for (uint32_t i
=0; i
< count
; ++i
) {
675 LoadedImage
info(unloadImages
[i
].loadAddress
, unloadImages
[i
].imageData
);
676 sLoadedImages
.remove(info
);
679 // sync to old all image infos struct
680 sLoadedImages
.withReadLock(^{
681 mirrorToOldAllImageInfos();
684 // tell debugger about removed images
685 dyld_image_info oldDyldInfo
[count
];
686 for (int i
=0; i
< count
; ++i
) {
687 launch_cache::Image
img(unloadImages
[i
].imageData
);
688 oldDyldInfo
[i
].imageLoadAddress
= unloadImages
[i
].loadAddress
;
689 oldDyldInfo
[i
].imageFilePath
= imagePath(unloadImages
[i
].imageData
);
690 oldDyldInfo
[i
].imageFileModDate
= 0;
692 _oldAllImageInfos
->notification(dyld_image_removing
, count
, oldDyldInfo
);
695 for (int i
=0; i
< count
; ++i
) {
696 launch_cache::Image
img(unloadImages
[i
].imageData
);
697 loader::unmapImage(unloadImages
[i
].imageData
, unloadImages
[i
].loadAddress
);
698 log_loads("dyld: unloaded %s\n", imagePath(unloadImages
[i
].imageData
));
701 // notify any processes tracking loads in this process
702 notifyMonitorUnloads(unloadImages
);
705 void AllImages::setNeverUnload(const loader::ImageInfo
& existingImage
)
707 sLoadedImages
.forEachWithWriteLock(^(uint32_t index
, dyld3::LoadedImage
&value
, bool &stop
) {
708 if (value
.image() == existingImage
.imageData
) {
709 value
.setNeverUnload();
715 uint32_t AllImages::count() const
717 return sLoadedImages
.count();
721 launch_cache::Image
AllImages::findByLoadOrder(uint32_t index
, const mach_header
** loadAddress
) const
723 __block
const BinaryImage
* foundImage
= nullptr;
724 sLoadedImages
.forEachWithReadLock(^(uint32_t anIndex
, const LoadedImage
& loadedImage
, bool& stop
) {
725 if ( anIndex
== index
) {
726 foundImage
= loadedImage
.image();
727 *loadAddress
= loadedImage
.loadedAddress();
731 return launch_cache::Image(foundImage
);
734 launch_cache::Image
AllImages::findByLoadAddress(const mach_header
* loadAddress
) const
736 __block
const BinaryImage
* foundImage
= nullptr;
737 sLoadedImages
.forEachWithReadLock(^(uint32_t anIndex
, const LoadedImage
& loadedImage
, bool& stop
) {
738 if ( loadedImage
.loadedAddress() == loadAddress
) {
739 foundImage
= loadedImage
.image();
743 return launch_cache::Image(foundImage
);
746 bool AllImages::findIndexForLoadAddress(const mach_header
* loadAddress
, uint32_t& index
)
748 __block
bool result
= false;
749 sLoadedImages
.forEachWithReadLock(^(uint32_t anIndex
, const LoadedImage
& loadedImage
, bool& stop
) {
750 if ( loadedImage
.loadedAddress() == loadAddress
) {
759 void AllImages::forEachImage(void (^handler
)(uint32_t imageIndex
, const mach_header
* loadAddress
, const launch_cache::Image image
, bool& stop
)) const
761 sLoadedImages
.forEachWithReadLock(^(uint32_t imageIndex
, const LoadedImage
& loadedImage
, bool& stop
) {
762 handler(imageIndex
, loadedImage
.loadedAddress(), launch_cache::Image(loadedImage
.image()), stop
);
766 launch_cache::Image
AllImages::findByOwnedAddress(const void* addr
, const mach_header
** loadAddress
, uint8_t* permissions
) const
768 if ( _initialImages
!= nullptr ) {
769 // being called during libSystem initialization, so sLoadedImages not allocated yet
770 for (int i
=0; i
< _initialImages
->count(); ++i
) {
771 const loader::ImageInfo
& entry
= (*_initialImages
)[i
];
772 launch_cache::Image
anImage(entry
.imageData
);
773 if ( anImage
.containsAddress(addr
, entry
.loadAddress
, permissions
) ) {
774 *loadAddress
= entry
.loadAddress
;
775 return entry
.imageData
;
778 return launch_cache::Image(nullptr);
781 // if address is in cache, do fast search of cache
782 if ( (_dyldCacheAddress
!= nullptr) && (addr
> _dyldCacheAddress
) ) {
783 const DyldSharedCache
* dyldCache
= (DyldSharedCache
*)_dyldCacheAddress
;
784 if ( addr
< (void*)((uint8_t*)_dyldCacheAddress
+dyldCache
->mappedSize()) ) {
785 size_t cacheVmOffset
= ((uint8_t*)addr
- (uint8_t*)_dyldCacheAddress
);
786 DyldCacheParser
cacheParser(dyldCache
, false);
787 launch_cache::ImageGroup
cachedDylibsGroup(cacheParser
.cachedDylibsGroup());
788 uint32_t mhCacheOffset
;
789 uint8_t foundPermissions
;
790 launch_cache::Image
image(cachedDylibsGroup
.findImageByCacheOffset(cacheVmOffset
, mhCacheOffset
, foundPermissions
));
791 if ( image
.valid() ) {
792 *loadAddress
= (mach_header
*)((uint8_t*)_dyldCacheAddress
+ mhCacheOffset
);
793 if ( permissions
!= nullptr )
794 *permissions
= foundPermissions
;
800 __block
const BinaryImage
* foundImage
= nullptr;
801 sLoadedImages
.forEachWithReadLock(^(uint32_t anIndex
, const LoadedImage
& loadedImage
, bool& stop
) {
802 launch_cache::Image
anImage(loadedImage
.image());
803 if ( anImage
.containsAddress(addr
, loadedImage
.loadedAddress(), permissions
) ) {
804 *loadAddress
= loadedImage
.loadedAddress();
805 foundImage
= loadedImage
.image();
809 return launch_cache::Image(foundImage
);
812 const mach_header
* AllImages::findLoadAddressByImage(const BinaryImage
* targetImage
) const
814 __block
const mach_header
* foundAddress
= nullptr;
815 sLoadedImages
.forEachWithReadLock(^(uint32_t anIndex
, const LoadedImage
& loadedImage
, bool& stop
) {
816 if ( targetImage
== loadedImage
.image() ) {
817 foundAddress
= loadedImage
.loadedAddress();
824 const mach_header
* AllImages::mainExecutable() const
826 assert(_programVars
!= nullptr);
827 return (const mach_header
*)_programVars
->mh
;
830 launch_cache::Image
AllImages::mainExecutableImage() const
832 assert(_mainClosure
!= nullptr);
833 const launch_cache::Closure
mainClosure(_mainClosure
);
834 const dyld3::launch_cache::ImageGroup mainGroup
= mainClosure
.group();
835 const uint32_t mainExecutableIndex
= mainClosure
.mainExecutableImageIndex();
836 const dyld3::launch_cache::Image mainImage
= mainGroup
.image(mainExecutableIndex
);
840 void AllImages::setMainPath(const char* path
)
842 _mainExeOverridePath
= path
;
845 const char* AllImages::imagePath(const BinaryImage
* binImage
) const
847 #if __IPHONE_OS_VERSION_MIN_REQUIRED
848 // on iOS and watchOS, apps may be moved on device after closure built
849 if ( _mainExeOverridePath
!= nullptr ) {
850 if ( binImage
== mainExecutableImage().binaryData() )
851 return _mainExeOverridePath
;
854 launch_cache::Image
image(binImage
);
858 void AllImages::setInitialGroups()
860 DyldCacheParser
cacheParser((DyldSharedCache
*)_dyldCacheAddress
, false);
861 sKnownGroups
.addNoLock(cacheParser
.cachedDylibsGroup());
862 sKnownGroups
.addNoLock(cacheParser
.otherDylibsGroup());
863 launch_cache::Closure
closure(_mainClosure
);
864 sKnownGroups
.addNoLock(closure
.group().binaryData());
867 const launch_cache::binary_format::ImageGroup
* AllImages::cachedDylibsGroup()
869 return sKnownGroups
[0];
872 const launch_cache::binary_format::ImageGroup
* AllImages::otherDylibsGroup()
874 return sKnownGroups
[1];
877 const AllImages::BinaryImageGroup
* AllImages::mainClosureGroup()
879 return sKnownGroups
[2];
882 uint32_t AllImages::currentGroupsCount() const
884 return sKnownGroups
.count();
887 void AllImages::copyCurrentGroups(ImageGroupList
& groups
) const
889 sKnownGroups
.forEachWithReadLock(^(uint32_t index
, const dyld3::launch_cache::binary_format::ImageGroup
* const &grpData
, bool &stop
) {
890 if ( index
< groups
.count() )
891 groups
[index
] = grpData
;
895 void AllImages::copyCurrentGroupsNoLock(ImageGroupList
& groups
) const
897 sKnownGroups
.forEachNoLock(^(uint32_t index
, const dyld3::launch_cache::binary_format::ImageGroup
* const &grpData
, bool &stop
) {
898 if ( index
< groups
.count() )
899 groups
[index
] = grpData
;
903 const mach_header
* AllImages::alreadyLoaded(uint64_t inode
, uint64_t mtime
, bool bumpRefCount
)
905 __block
const mach_header
* result
= nullptr;
906 sLoadedImages
.forEachWithReadLock(^(uint32_t anIndex
, const LoadedImage
& loadedImage
, bool& stop
) {
907 launch_cache::Image
img(loadedImage
.image());
908 if ( img
.validateUsingModTimeAndInode() ) {
909 if ( (img
.fileINode() == inode
) && (img
.fileModTime() == mtime
) ) {
910 result
= loadedImage
.loadedAddress();
911 if ( bumpRefCount
&& !loadedImage
.neverUnload() )
912 incRefCount(loadedImage
.loadedAddress());
920 const mach_header
* AllImages::alreadyLoaded(const char* path
, bool bumpRefCount
)
922 __block
const mach_header
* result
= nullptr;
923 uint32_t targetHash
= launch_cache::ImageGroup::hashFunction(path
);
924 sLoadedImages
.forEachWithReadLock(^(uint32_t anIndex
, const LoadedImage
& loadedImage
, bool& stop
) {
925 launch_cache::Image
img(loadedImage
.image());
926 if ( (img
.pathHash() == targetHash
) && (strcmp(path
, imagePath(loadedImage
.image())) == 0) ) {
927 result
= loadedImage
.loadedAddress();
928 if ( bumpRefCount
&& !loadedImage
.neverUnload() )
929 incRefCount(loadedImage
.loadedAddress());
933 if ( result
== nullptr ) {
934 // perhaps there was an image override
935 launch_cache::ImageGroup
mainGroup(mainClosureGroup());
936 STACK_ALLOC_DYNARRAY(const launch_cache::BinaryImageGroupData
*, currentGroupsCount(), currentGroupsList
);
937 copyCurrentGroups(currentGroupsList
);
938 mainGroup
.forEachImageRefOverride(currentGroupsList
, ^(launch_cache::Image standardDylib
, launch_cache::Image overrideDyilb
, bool& stop
) {
939 if ( strcmp(standardDylib
.path(), path
) == 0 ) {
940 result
= alreadyLoaded(overrideDyilb
.path(), bumpRefCount
);
948 const mach_header
* AllImages::alreadyLoaded(const BinaryImage
* binImage
, bool bumpRefCount
)
950 const mach_header
* result
= findLoadAddressByImage(binImage
);
951 if ( result
!= nullptr ) {
952 launch_cache::Image
loadedImage(binImage
);
953 if ( bumpRefCount
&& !loadedImage
.neverUnload() )
959 void AllImages::incRefCount(const mach_header
* loadAddress
)
961 __block
bool found
= false;
962 sDlopenRefCounts
.forEachWithWriteLock(^(uint32_t index
, DlopenCount
& entry
, bool& stop
) {
963 if ( entry
.loadAddress
== loadAddress
) {
970 DlopenCount newEnty
= { loadAddress
, 1 };
971 sDlopenRefCounts
.add(newEnty
);
975 void AllImages::decRefCount(const mach_header
* loadAddress
)
977 __block
bool refCountNowZero
= false;
978 sDlopenRefCounts
.forEachWithWriteLock(^(uint32_t index
, DlopenCount
& entry
, bool& stop
) {
979 if ( entry
.loadAddress
== loadAddress
) {
982 if ( entry
.refCount
== 0 )
983 refCountNowZero
= true;
986 if ( refCountNowZero
) {
987 DlopenCount delEnty
= { loadAddress
, 0 };
988 sDlopenRefCounts
.remove(delEnty
);
989 garbageCollectImages();
994 #if __MAC_OS_X_VERSION_MIN_REQUIRED
995 __NSObjectFileImage
* AllImages::addNSObjectFileImage()
997 // look for empty slot first
998 __block __NSObjectFileImage
* result
= nullptr;
999 sNSObjectFileImages
.forEachWithWriteLock(^(uint32_t index
, __NSObjectFileImage
& value
, bool& stop
) {
1000 if ( (value
.path
== nullptr) && (value
.memSource
== nullptr) ) {
1005 if ( result
!= nullptr )
1008 // otherwise allocate new slot
1009 __NSObjectFileImage empty
;
1010 return sNSObjectFileImages
.add(empty
);
1013 bool AllImages::hasNSObjectFileImage(__NSObjectFileImage
* ofi
)
1015 __block
bool result
= false;
1016 sNSObjectFileImages
.forEachNoLock(^(uint32_t index
, const __NSObjectFileImage
& value
, bool& stop
) {
1017 if ( &value
== ofi
) {
1018 result
= ((value
.memSource
!= nullptr) || (value
.path
!= nullptr));
1025 void AllImages::removeNSObjectFileImage(__NSObjectFileImage
* ofi
)
1027 sNSObjectFileImages
.forEachWithWriteLock(^(uint32_t index
, __NSObjectFileImage
& value
, bool& stop
) {
1028 if ( &value
== ofi
) {
1029 // mark slot as empty
1030 ofi
->path
= nullptr;
1031 ofi
->memSource
= nullptr;
1033 ofi
->loadAddress
= nullptr;
1034 ofi
->binImage
= nullptr;
1042 class VIS_HIDDEN Reaper
1045 Reaper(uint32_t count
, const LoadedImage
** unloadables
, bool* inUseArray
);
1046 void garbageCollect();
1047 void finalizeDeadImages();
1050 typedef launch_cache::binary_format::Image BinaryImage
;
1052 void markDirectlyDlopenedImagesAsUsed();
1053 void markDependentOfInUseImages();
1054 void markDependentsOf(const LoadedImage
*);
1055 bool loadAddressIsUnloadable(const mach_header
* loadAddr
, uint32_t& index
);
1056 bool imageIsUnloadable(const BinaryImage
* binImage
, uint32_t& foundIndex
);
1057 uint32_t inUseCount();
1058 void dump(const char* msg
);
1060 const LoadedImage
** _unloadablesArray
;
1062 uint32_t _arrayCount
;
1063 uint32_t _deadCount
;
1066 Reaper::Reaper(uint32_t count
, const LoadedImage
** unloadables
, bool* inUseArray
)
1067 : _unloadablesArray(unloadables
), _inUseArray(inUseArray
),_arrayCount(count
)
1072 bool Reaper::loadAddressIsUnloadable(const mach_header
* loadAddr
, uint32_t& foundIndex
)
1074 for (uint32_t i
=0; i
< _arrayCount
; ++i
) {
1075 if ( _unloadablesArray
[i
]->loadedAddress() == loadAddr
) {
1083 bool Reaper::imageIsUnloadable(const BinaryImage
* binImage
, uint32_t& foundIndex
)
1085 for (uint32_t i
=0; i
< _arrayCount
; ++i
) {
1086 if ( _unloadablesArray
[i
]->image() == binImage
) {
1094 void Reaper::markDirectlyDlopenedImagesAsUsed()
1096 sDlopenRefCounts
.forEachWithReadLock(^(uint32_t refCountIndex
, const dyld3::DlopenCount
& dlEntry
, bool& stop
) {
1097 if ( dlEntry
.refCount
!= 0 ) {
1098 uint32_t foundIndex
;
1099 if ( loadAddressIsUnloadable(dlEntry
.loadAddress
, foundIndex
) ) {
1100 _inUseArray
[foundIndex
] = true;
1106 uint32_t Reaper::inUseCount()
1109 for (uint32_t i
=0; i
< _arrayCount
; ++i
) {
1110 if ( _inUseArray
[i
] )
1116 void Reaper::markDependentsOf(const LoadedImage
* entry
)
1118 const launch_cache::Image
image(entry
->image());
1119 STACK_ALLOC_DYNARRAY(const launch_cache::BinaryImageGroupData
*, gAllImages
.currentGroupsCount(), currentGroupsList
);
1120 gAllImages
.copyCurrentGroups(currentGroupsList
);
1121 image
.forEachDependentImage(currentGroupsList
, ^(uint32_t depIndex
, dyld3::launch_cache::Image depImage
, dyld3::launch_cache::Image::LinkKind kind
, bool& stop
) {
1122 uint32_t foundIndex
;
1123 if ( !depImage
.neverUnload() && imageIsUnloadable(depImage
.binaryData(), foundIndex
) ) {
1124 _inUseArray
[foundIndex
] = true;
1129 void Reaper::markDependentOfInUseImages()
1131 for (uint32_t i
=0; i
< _arrayCount
; ++i
) {
1132 if ( _inUseArray
[i
] )
1133 markDependentsOf(_unloadablesArray
[i
]);
1137 void Reaper::dump(const char* msg
)
1139 //log("%s:\n", msg);
1140 for (uint32_t i
=0; i
< _arrayCount
; ++i
) {
1141 dyld3::launch_cache::Image
image(_unloadablesArray
[i
]->image());
1142 //log(" in-used=%d %s\n", _inUseArray[i], image.path());
1146 void Reaper::garbageCollect()
1148 //dump("all unloadable images");
1150 // mark all dylibs directly dlopen'ed as in use
1151 markDirectlyDlopenedImagesAsUsed();
1153 //dump("directly dlopen()'ed marked");
1155 // iteratively mark dependents of in-use dylibs as in-use until in-use count stops changing
1156 uint32_t lastCount
= inUseCount();
1157 bool countChanged
= false;
1159 markDependentOfInUseImages();
1160 //dump("dependents marked");
1161 uint32_t newCount
= inUseCount();
1162 countChanged
= (newCount
!= lastCount
);
1163 lastCount
= newCount
;
1164 } while (countChanged
);
1166 _deadCount
= _arrayCount
- inUseCount();
1169 void Reaper::finalizeDeadImages()
1171 if ( _deadCount
== 0 )
1173 __cxa_range_t ranges
[_deadCount
];
1174 __cxa_range_t
* rangesArray
= ranges
;
1175 __block
unsigned int rangesCount
= 0;
1176 for (uint32_t i
=0; i
< _arrayCount
; ++i
) {
1177 if ( _inUseArray
[i
] )
1179 dyld3::launch_cache::Image
image(_unloadablesArray
[i
]->image());
1180 image
.forEachDiskSegment(^(uint32_t segIndex
, uint32_t fileOffset
, uint32_t fileSize
, int64_t vmOffset
, uint64_t vmSize
, uint8_t permissions
, bool &stop
) {
1181 if ( permissions
& VM_PROT_EXECUTE
) {
1182 rangesArray
[rangesCount
].addr
= (char*)(_unloadablesArray
[i
]->loadedAddress()) + vmOffset
;
1183 rangesArray
[rangesCount
].length
= (size_t)vmSize
;
1188 __cxa_finalize_ranges(ranges
, rangesCount
);
1192 // This function is called at the end of dlclose() when the reference count goes to zero.
1193 // The dylib being unloaded may have brought in other dependent dylibs when it was loaded.
1194 // Those dependent dylibs need to be unloaded, but only if they are not referenced by
1195 // something else. We use a standard mark and sweep garbage collection.
1197 // The tricky part is that when a dylib is unloaded it may have a termination function that
1198 // can run and itself call dlclose() on yet another dylib. The problem is that this
1199 // sort of gabage collection is not re-entrant. Instead a terminator's call to dlclose()
1200 // which calls garbageCollectImages() will just set a flag to re-do the garbage collection
1201 // when the current pass is done.
1203 // Also note that this is done within the sLoadedImages writer lock, so any dlopen/dlclose
1204 // on other threads are blocked while this garbage collections runs
1206 void AllImages::garbageCollectImages()
1208 // if some other thread is currently GC'ing images, let other thread do the work
1209 int32_t newCount
= OSAtomicIncrement32(&_gcCount
);
1210 if ( newCount
!= 1 )
1214 const uint32_t loadedImageCount
= sLoadedImages
.count();
1215 const LoadedImage
* unloadables
[loadedImageCount
];
1216 bool unloadableInUse
[loadedImageCount
];
1217 const LoadedImage
** unloadablesArray
= unloadables
;
1218 bool* unloadableInUseArray
= unloadableInUse
;
1219 __block
uint32_t unloadableCount
= 0;
1220 // do GC with lock, so no other images can be added during GC
1221 sLoadedImages
.withReadLock(^() {
1222 sLoadedImages
.forEachNoLock(^(uint32_t index
, const LoadedImage
& entry
, bool& stop
) {
1223 const launch_cache::Image
image(entry
.image());
1224 if ( !image
.neverUnload() && !entry
.neverUnload() ) {
1225 unloadablesArray
[unloadableCount
] = &entry
;
1226 unloadableInUseArray
[unloadableCount
] = false;
1227 //log("unloadable[%d] %p %s\n", unloadableCount, entry.loadedAddress(), image.path());
1231 // make reaper object to do garbage collection and notifications
1232 Reaper
reaper(unloadableCount
, unloadablesArray
, unloadableInUseArray
);
1233 reaper
.garbageCollect();
1235 // FIXME: we should sort dead images so higher level ones are terminated first
1237 // call cxa_finalize_ranges of dead images
1238 reaper
.finalizeDeadImages();
1240 // FIXME: call static terminators of dead images
1242 // FIXME: DOF unregister
1245 //log("sLoadedImages before GC removals:\n");
1246 //sLoadedImages.dump(^(const LoadedImage& entry) {
1247 // const launch_cache::Image image(entry.image());
1248 // log(" loadAddr=%p, path=%s\n", entry.loadedAddress(), image.path());
1251 // make copy of LoadedImages we want to remove
1252 // because unloadables[] points into ChunkVector we are shrinking
1253 uint32_t removalCount
= 0;
1254 for (uint32_t i
=0; i
< unloadableCount
; ++i
) {
1255 if ( !unloadableInUse
[i
] )
1258 if ( removalCount
> 0 ) {
1259 STACK_ALLOC_DYNARRAY(loader::ImageInfo
, removalCount
, unloadImages
);
1260 uint32_t removalIndex
= 0;
1261 for (uint32_t i
=0; i
< unloadableCount
; ++i
) {
1262 if ( !unloadableInUse
[i
] ) {
1263 unloadImages
[removalIndex
].loadAddress
= unloadables
[i
]->loadedAddress();
1264 unloadImages
[removalIndex
].imageData
= unloadables
[i
]->image();
1268 // remove entries from sLoadedImages
1269 removeImages(unloadImages
);
1271 //log("sLoadedImages after GC removals:\n");
1272 //sLoadedImages.dump(^(const LoadedImage& entry) {
1273 // const launch_cache::Image image(entry.image());
1274 // //log(" loadAddr=%p, path=%s\n", entry.loadedAddress(), image.path());
1278 // if some other thread called GC during our work, redo GC on its behalf
1279 newCount
= OSAtomicDecrement32(&_gcCount
);
1281 while (newCount
> 0);
1287 const launch_cache::binary_format::Image
* AllImages::messageClosured(const char* path
, const char* apiName
, const char* closuredErrorMessages
[3], int& closuredErrorMessagesCount
)
1289 __block
const launch_cache::binary_format::Image
* result
= nullptr;
1290 sKnownGroups
.withWriteLock(^() {
1291 ClosureBuffer::CacheIdent cacheIdent
;
1292 bzero(&cacheIdent
, sizeof(cacheIdent
));
1293 if ( _dyldCacheAddress
!= nullptr ) {
1294 const DyldSharedCache
* dyldCache
= (DyldSharedCache
*)_dyldCacheAddress
;
1295 dyldCache
->getUUID(cacheIdent
.cacheUUID
);
1296 cacheIdent
.cacheAddress
= (unsigned long)_dyldCacheAddress
;
1297 cacheIdent
.cacheMappedSize
= dyldCache
->mappedSize();
1299 gPathOverrides
.forEachPathVariant(path
, ^(const char* possiblePath
, bool& stopVariants
) {
1300 struct stat statBuf
;
1301 if ( stat(possiblePath
, &statBuf
) == 0 ) {
1302 if ( S_ISDIR(statBuf
.st_mode
) ) {
1303 log_apis(" %s: path is directory: %s\n", apiName
, possiblePath
);
1304 if ( closuredErrorMessagesCount
< 3 )
1305 closuredErrorMessages
[closuredErrorMessagesCount
++] = strdup("not a file");
1308 // file exists, ask closured to build info for it
1309 STACK_ALLOC_DYNARRAY(const launch_cache::BinaryImageGroupData
*, sKnownGroups
.countNoLock(), currentGroupsList
);
1310 gAllImages
.copyCurrentGroupsNoLock(currentGroupsList
);
1311 dyld3::launch_cache::DynArray
<const dyld3::launch_cache::binary_format::ImageGroup
*> nonCacheGroupList(currentGroupsList
.count()-2, ¤tGroupsList
[2]);
1312 const dyld3::launch_cache::binary_format::ImageGroup
* closuredCreatedGroupData
= nullptr;
1313 ClosureBuffer
closureBuilderInput(cacheIdent
, path
, nonCacheGroupList
, gPathOverrides
);
1314 ClosureBuffer closureBuilderOutput
= dyld3::closured_CreateImageGroup(closureBuilderInput
);
1315 if ( !closureBuilderOutput
.isError() ) {
1316 vm_protect(mach_task_self(), closureBuilderOutput
.vmBuffer(), closureBuilderOutput
.vmBufferSize(), false, VM_PROT_READ
);
1317 closuredCreatedGroupData
= closureBuilderOutput
.imageGroup();
1318 log_apis(" %s: closured built ImageGroup for path: %s\n", apiName
, possiblePath
);
1319 sKnownGroups
.addNoLock(closuredCreatedGroupData
);
1320 launch_cache::ImageGroup
group(closuredCreatedGroupData
);
1321 result
= group
.imageBinary(0);
1322 stopVariants
= true;
1325 log_apis(" %s: closured failed for path: %s, error: %s\n", apiName
, possiblePath
, closureBuilderOutput
.errorMessage());
1326 if ( closuredErrorMessagesCount
< 3 ) {
1327 closuredErrorMessages
[closuredErrorMessagesCount
++] = strdup(closureBuilderOutput
.errorMessage());
1329 closureBuilderOutput
.free();
1334 log_apis(" %s: file does not exist for path: %s\n", apiName
, possiblePath
);
1342 const AllImages::BinaryImage
* AllImages::findImageInKnownGroups(const char* path
)
1344 __block
const AllImages::BinaryImage
* result
= nullptr;
1345 sKnownGroups
.forEachWithReadLock(^(uint32_t index
, const dyld3::launch_cache::binary_format::ImageGroup
* const& grpData
, bool& stop
) {
1346 launch_cache::ImageGroup
group(grpData
);
1348 if ( const AllImages::BinaryImage
* binImage
= group
.findImageByPath(path
, ignore
) ) {
1356 bool AllImages::imageUnloadable(const launch_cache::Image
& image
, const mach_header
* loadAddress
) const
1358 // check if statically determined in clousre that this can never be unloaded
1359 if ( image
.neverUnload() )
1362 // check if some runtime decision made this be never-unloadable
1363 __block
bool foundAsNeverUnload
= false;
1364 sLoadedImages
.forEachWithReadLock(^(uint32_t anIndex
, const LoadedImage
& loadedImage
, bool& stop
) {
1365 if ( loadedImage
.loadedAddress() == loadAddress
) {
1367 if ( loadedImage
.neverUnload() )
1368 foundAsNeverUnload
= true;
1371 if ( foundAsNeverUnload
)
1377 void AllImages::addLoadNotifier(NotifyFunc func
)
1379 // callback about already loaded images
1380 const uint32_t existingCount
= sLoadedImages
.count();
1381 const mach_header
* existingMHs
[existingCount
];
1382 const mach_header
** existingArray
= existingMHs
;
1383 sLoadedImages
.forEachWithReadLock(^(uint32_t anIndex
, const LoadedImage
& loadedImage
, bool& stop
) {
1384 if ( anIndex
< existingCount
)
1385 existingArray
[anIndex
] = loadedImage
.loadedAddress();
1387 // we don't want to hold lock while calling out, so prebuild array (with lock) then do calls on that array (without lock)
1388 for (uint32_t i
=0; i
< existingCount
; i
++) {
1389 MachOParser
parser(existingArray
[i
]);
1390 log_notifications("dyld: add notifier %p called with mh=%p\n", func
, existingArray
[i
]);
1391 func(existingArray
[i
], parser
.getSlide());
1394 // add to list of functions to call about future loads
1395 sLoadNotifiers
.add(func
);
1398 void AllImages::addUnloadNotifier(NotifyFunc func
)
1400 // add to list of functions to call about future unloads
1401 sUnloadNotifiers
.add(func
);
1404 void AllImages::setObjCNotifiers(_dyld_objc_notify_mapped map
, _dyld_objc_notify_init init
, _dyld_objc_notify_unmapped unmap
)
1406 _objcNotifyMapped
= map
;
1407 _objcNotifyInit
= init
;
1408 _objcNotifyUnmapped
= unmap
;
1410 // callback about already loaded images
1411 uint32_t maxCount
= count();
1412 const char* pathsBuffer
[maxCount
];
1413 const mach_header
* mhBuffer
[maxCount
];
1414 __block
const char** paths
= pathsBuffer
;
1415 __block
const mach_header
** mhs
= mhBuffer
;
1416 __block
uint32_t imagesWithObjC
= 0;
1417 sLoadedImages
.forEachWithReadLock(^(uint32_t anIndex
, const LoadedImage
& loadedImage
, bool& stop
) {
1418 launch_cache::Image
img(loadedImage
.image());
1419 if ( img
.hasObjC() ) {
1420 mhs
[imagesWithObjC
] = loadedImage
.loadedAddress();
1421 paths
[imagesWithObjC
] = imagePath(loadedImage
.image());
1425 if ( imagesWithObjC
!= 0 ) {
1426 (*map
)(imagesWithObjC
, pathsBuffer
, mhBuffer
);
1427 if ( log_notifications("dyld: objc-mapped-notifier called with %d images:\n", imagesWithObjC
) ) {
1428 for (uint32_t i
=0; i
< imagesWithObjC
; ++i
) {
1429 log_notifications("dyld: objc-mapped: %p %s\n", mhBuffer
[i
], pathsBuffer
[i
]);
1435 void AllImages::vmAccountingSetSuspended(bool suspend
)
1437 #if __arm__ || __arm64__
1438 // <rdar://problem/29099600> dyld should tell the kernel when it is doing fix-ups caused by roots
1439 log_fixups("vm.footprint_suspend=%d\n", suspend
);
1440 int newValue
= suspend
? 1 : 0;
1442 size_t newlen
= sizeof(newValue
);
1443 size_t oldlen
= sizeof(oldValue
);
1444 sysctlbyname("vm.footprint_suspend", &oldValue
, &oldlen
, &newValue
, newlen
);
1448 void AllImages::applyInterposingToDyldCache(const launch_cache::binary_format::Closure
* closure
, const dyld3::launch_cache::DynArray
<loader::ImageInfo
>& initialImages
)
1450 launch_cache::Closure
mainClosure(closure
);
1451 launch_cache::ImageGroup mainGroup
= mainClosure
.group();
1452 DyldCacheParser
cacheParser((DyldSharedCache
*)_dyldCacheAddress
, false);
1453 const launch_cache::binary_format::ImageGroup
* dylibsGroupData
= cacheParser
.cachedDylibsGroup();
1454 launch_cache::ImageGroup
dyldCacheDylibGroup(dylibsGroupData
);
1455 __block
bool suspendedAccounting
= false;
1456 mainGroup
.forEachDyldCacheSymbolOverride(^(uint32_t patchTableIndex
, const launch_cache::binary_format::Image
* imageData
, uint32_t imageOffset
, bool& stop
) {
1457 bool foundInImages
= false;
1458 for (int i
=0; i
< initialImages
.count(); ++i
) {
1459 if ( initialImages
[i
].imageData
== imageData
) {
1460 foundInImages
= true;
1461 uintptr_t replacement
= (uintptr_t)(initialImages
[i
].loadAddress
) + imageOffset
;
1462 dyldCacheDylibGroup
.forEachDyldCachePatchLocation(_dyldCacheAddress
, patchTableIndex
, ^(uintptr_t* locationToPatch
, uintptr_t addend
, bool& innerStop
) {
1463 if ( !suspendedAccounting
) {
1464 vmAccountingSetSuspended(true);
1465 suspendedAccounting
= true;
1467 log_fixups("dyld: cache fixup: *%p = %p\n", locationToPatch
, (void*)replacement
);
1468 *locationToPatch
= replacement
+ addend
;
1473 if ( !foundInImages
) {
1474 launch_cache::Image
img(imageData
);
1475 log_fixups("did not find loaded image to patch into cache: %s\n", img
.path());
1478 if ( suspendedAccounting
)
1479 vmAccountingSetSuspended(false);
1482 void AllImages::runLibSystemInitializer(const mach_header
* libSystemAddress
, const launch_cache::binary_format::Image
* libSystemBinImage
)
1484 // run all initializers in image
1485 launch_cache::Image
libSystemImage(libSystemBinImage
);
1486 libSystemImage
.forEachInitializer(libSystemAddress
, ^(const void* func
) {
1487 Initializer initFunc
= (Initializer
)func
;
1488 dyld3::kdebug_trace_dyld_duration(DBG_DYLD_TIMING_STATIC_INITIALIZER
, (uint64_t)func
, 0, ^{
1489 initFunc(NXArgc
, NXArgv
, environ
, appleParams
, _programVars
);
1491 log_initializers("called initialzer %p in %s\n", initFunc
, libSystemImage
.path());
1494 // mark libSystem.dylib as being init, so later recursive-init would re-run it
1495 sLoadedImages
.forEachWithWriteLock(^(uint32_t anIndex
, LoadedImage
& loadedImage
, bool& stop
) {
1496 if ( loadedImage
.loadedAddress() == libSystemAddress
) {
1497 loadedImage
.setState(LoadedImage::State::inited
);
1503 void AllImages::runInitialzersBottomUp(const mach_header
* imageLoadAddress
)
1505 launch_cache::Image topImage
= findByLoadAddress(imageLoadAddress
);
1506 if ( topImage
.isInvalid() )
1509 // closure contains list of intializers to run in-order
1510 STACK_ALLOC_DYNARRAY(const launch_cache::BinaryImageGroupData
*, currentGroupsCount(), currentGroupsList
);
1511 copyCurrentGroups(currentGroupsList
);
1512 topImage
.forEachInitBefore(currentGroupsList
, ^(launch_cache::Image imageToInit
) {
1514 __block LoadedImage
* foundEntry
= nullptr;
1515 sLoadedImages
.forEachWithReadLock(^(uint32_t index
, const LoadedImage
& entry
, bool& stop
) {
1516 if ( entry
.image() == imageToInit
.binaryData() ) {
1517 foundEntry
= (LoadedImage
*)&entry
;
1521 assert(foundEntry
!= nullptr);
1522 pthread_mutex_lock(&_initializerLock
);
1523 // Note, due to the large lock in dlopen, we can't be waiting on another thread
1524 // here, but its possible that we are in a dlopen which is initialising us again
1525 if ( foundEntry
->state() == LoadedImage::State::beingInited
) {
1526 log_initializers("dyld: already initializing '%s'\n", imagePath(imageToInit
.binaryData()));
1528 // at this point, the image is either initialized or not
1529 // if not, initialize it on this thread
1530 if ( foundEntry
->state() == LoadedImage::State::uninited
) {
1531 foundEntry
->setState(LoadedImage::State::beingInited
);
1532 // release initializer lock, so other threads can run initializers
1533 pthread_mutex_unlock(&_initializerLock
);
1534 // tell objc to run any +load methods in image
1535 if ( (_objcNotifyInit
!= nullptr) && imageToInit
.mayHavePlusLoads() ) {
1536 log_notifications("dyld: objc-init-notifier called with mh=%p, path=%s\n", foundEntry
->loadedAddress(), imagePath(imageToInit
.binaryData()));
1537 (*_objcNotifyInit
)(imagePath(imageToInit
.binaryData()), foundEntry
->loadedAddress());
1539 // run all initializers in image
1540 imageToInit
.forEachInitializer(foundEntry
->loadedAddress(), ^(const void* func
) {
1541 Initializer initFunc
= (Initializer
)func
;
1542 dyld3::kdebug_trace_dyld_duration(DBG_DYLD_TIMING_STATIC_INITIALIZER
, (uint64_t)func
, 0, ^{
1543 initFunc(NXArgc
, NXArgv
, environ
, appleParams
, _programVars
);
1545 log_initializers("dyld: called initialzer %p in %s\n", initFunc
, imageToInit
.path());
1547 // reaquire initializer lock to switch state to inited
1548 pthread_mutex_lock(&_initializerLock
);
1549 foundEntry
->setState(LoadedImage::State::inited
);
1551 pthread_mutex_unlock(&_initializerLock
);
1556 } // namespace dyld3