2 * Copyright (c) 2017 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
27 #include <sys/types.h>
29 #include <sys/sysctl.h>
30 #include <mach/mach_time.h> // mach_absolute_time()
31 #include <libkern/OSAtomic.h>
36 #include "AllImages.h"
37 #include "libdyldEntryVector.h"
41 #include "DyldSharedCache.h"
42 #include "PathOverrides.h"
44 #include "ClosureBuilder.h"
45 #include "ClosureFileSystemPhysical.h"
47 extern const char** appleParams
;
49 // should be a header for these
50 struct __cxa_range_t
{
54 extern "C" void __cxa_finalize_ranges(const __cxa_range_t ranges
[], unsigned int count
);
56 VIS_HIDDEN
bool gUseDyld3
= false;
63 ///////////////////// AllImages ////////////////////////////
70 void AllImages::init(const closure::LaunchClosure
* closure
, const DyldSharedCache
* dyldCacheLoadAddress
, const char* dyldCachePath
,
71 const Array
<LoadedImage
>& initialImages
)
73 _mainClosure
= closure
;
74 _initialImages
= &initialImages
;
75 _dyldCacheAddress
= dyldCacheLoadAddress
;
76 _dyldCachePath
= dyldCachePath
;
78 if ( _dyldCacheAddress
) {
79 const dyld_cache_mapping_info
* const fileMappings
= (dyld_cache_mapping_info
*)((uint64_t)_dyldCacheAddress
+ _dyldCacheAddress
->header
.mappingOffset
);
80 _dyldCacheSlide
= (uint64_t)dyldCacheLoadAddress
- fileMappings
[0].address
;
81 _imagesArrays
.push_back(dyldCacheLoadAddress
->cachedDylibsImageArray());
82 if ( auto others
= dyldCacheLoadAddress
->otherOSImageArray() )
83 _imagesArrays
.push_back(others
);
85 _imagesArrays
.push_back(_mainClosure
->images());
87 // record first ImageNum to do use for dlopen() calls
88 _mainClosure
->images()->forEachImage(^(const dyld3::closure::Image
* image
, bool& stop
) {
89 closure::ImageNum num
= image
->imageNum();
90 if ( num
>= _nextImageNum
)
91 _nextImageNum
= num
+1;
94 // Make temporary old image array, so libSystem initializers can be debugged
95 STACK_ALLOC_ARRAY(dyld_image_info
, oldDyldInfo
, initialImages
.count());
96 for (const LoadedImage
& li
: initialImages
) {
97 oldDyldInfo
.push_back({li
.loadedAddress(), li
.image()->path(), 0});
99 _oldAllImageInfos
->infoArray
= &oldDyldInfo
[0];
100 _oldAllImageInfos
->infoArrayCount
= (uint32_t)oldDyldInfo
.count();
101 _oldAllImageInfos
->notification(dyld_image_adding
, _oldAllImageInfos
->infoArrayCount
, _oldAllImageInfos
->infoArray
);
102 _oldAllImageInfos
->infoArray
= nullptr;
103 _oldAllImageInfos
->infoArrayCount
= 0;
105 _processDOFs
= Loader::dtraceUserProbesEnabled();
108 void AllImages::setProgramVars(ProgramVars
* vars
)
111 const dyld3::MachOFile
* mf
= (dyld3::MachOFile
*)_programVars
->mh
;
112 mf
->forEachSupportedPlatform(^(dyld3::Platform platform
, uint32_t minOS
, uint32_t sdk
) {
113 _platform
= (dyld_platform_t
)platform
;
114 //FIXME assert there is only one?
118 void AllImages::setRestrictions(bool allowAtPaths
, bool allowEnvPaths
)
120 _allowAtPaths
= allowAtPaths
;
121 _allowEnvPaths
= allowEnvPaths
;
124 void AllImages::applyInitialImages()
126 addImages(*_initialImages
);
127 runImageNotifiers(*_initialImages
);
128 _initialImages
= nullptr; // this was stack allocated
131 void AllImages::withReadLock(void (^work
)()) const
133 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
134 os_unfair_recursive_lock_lock(&_loadImagesLock
);
136 os_unfair_recursive_lock_unlock(&_loadImagesLock
);
138 pthread_mutex_lock(&_loadImagesLock
);
140 pthread_mutex_unlock(&_loadImagesLock
);
144 void AllImages::withWriteLock(void (^work
)())
146 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
147 os_unfair_recursive_lock_lock(&_loadImagesLock
);
149 os_unfair_recursive_lock_unlock(&_loadImagesLock
);
151 pthread_mutex_lock(&_loadImagesLock
);
153 pthread_mutex_unlock(&_loadImagesLock
);
157 void AllImages::withNotifiersLock(void (^work
)()) const
159 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
160 os_unfair_recursive_lock_lock(&_notifiersLock
);
162 os_unfair_recursive_lock_unlock(&_notifiersLock
);
164 pthread_mutex_lock(&_notifiersLock
);
166 pthread_mutex_unlock(&_notifiersLock
);
170 void AllImages::mirrorToOldAllImageInfos()
173 // set infoArray to NULL to denote it is in-use
174 _oldAllImageInfos
->infoArray
= nullptr;
176 // if array not large enough, re-alloc it
177 uint32_t imageCount
= (uint32_t)_loadedImages
.count();
178 if ( _oldArrayAllocCount
< imageCount
) {
179 uint32_t newAllocCount
= imageCount
+ 16;
180 dyld_image_info
* newArray
= (dyld_image_info
*)::malloc(sizeof(dyld_image_info
)*newAllocCount
);
181 if ( _oldAllImageArray
!= nullptr ) {
182 ::memcpy(newArray
, _oldAllImageArray
, sizeof(dyld_image_info
)*_oldAllImageInfos
->infoArrayCount
);
183 ::free(_oldAllImageArray
);
185 _oldAllImageArray
= newArray
;
186 _oldArrayAllocCount
= newAllocCount
;
189 // fill out array to mirror current image list
191 for (const LoadedImage
& li
: _loadedImages
) {
192 _oldAllImageArray
[index
].imageLoadAddress
= li
.loadedAddress();
193 _oldAllImageArray
[index
].imageFilePath
= imagePath(li
.image());
194 _oldAllImageArray
[index
].imageFileModDate
= 0;
198 // set infoArray back to base address of array (so other process can now read)
199 _oldAllImageInfos
->infoArrayCount
= imageCount
;
200 _oldAllImageInfos
->infoArrayChangeTimestamp
= mach_absolute_time();
201 _oldAllImageInfos
->infoArray
= _oldAllImageArray
;
203 // <radr://problem/42668846> update UUID array if needed
204 uint32_t nonCachedCount
= 1; // always add dyld
205 for (const LoadedImage
& li
: _loadedImages
) {
206 if ( !li
.loadedAddress()->inDyldCache() )
209 if ( nonCachedCount
!= _oldAllImageInfos
->uuidArrayCount
) {
210 // set infoArray to NULL to denote it is in-use
211 _oldAllImageInfos
->uuidArray
= nullptr;
212 // make sure allocation can hold all uuids
213 if ( _oldUUIDAllocCount
< nonCachedCount
) {
214 uint32_t newAllocCount
= (nonCachedCount
+ 3) & (-4); // round up to multiple of 4
215 dyld_uuid_info
* newArray
= (dyld_uuid_info
*)::malloc(sizeof(dyld_uuid_info
)*newAllocCount
);
216 if ( _oldUUIDArray
!= nullptr )
217 ::free(_oldUUIDArray
);
218 _oldUUIDArray
= newArray
;
219 _oldUUIDAllocCount
= newAllocCount
;
221 // add dyld then all images not in dyld cache
222 const MachOFile
* dyldMF
= (MachOFile
*)_oldAllImageInfos
->dyldImageLoadAddress
;
223 _oldUUIDArray
[0].imageLoadAddress
= dyldMF
;
224 dyldMF
->getUuid(_oldUUIDArray
[0].imageUUID
);
226 for (const LoadedImage
& li
: _loadedImages
) {
227 if ( !li
.loadedAddress()->inDyldCache() ) {
228 _oldUUIDArray
[index
].imageLoadAddress
= li
.loadedAddress();
229 li
.loadedAddress()->getUuid(_oldUUIDArray
[index
].imageUUID
);
233 // set uuidArray back to base address of array (so kernel can now read)
234 _oldAllImageInfos
->uuidArray
= _oldUUIDArray
;
235 _oldAllImageInfos
->uuidArrayCount
= nonCachedCount
;
240 void AllImages::addImages(const Array
<LoadedImage
>& newImages
)
242 // copy into _loadedImages
244 _loadedImages
.append(newImages
);
245 // if any image not in the shared cache added, recompute bounds
246 for (const LoadedImage
& li
: newImages
) {
247 if ( !((MachOAnalyzer
*)li
.loadedAddress())->inDyldCache() ) {
255 void AllImages::runImageNotifiers(const Array
<LoadedImage
>& newImages
)
257 uint32_t count
= (uint32_t)newImages
.count();
260 if ( _oldAllImageInfos
!= nullptr ) {
261 // sync to old all image infos struct
262 mirrorToOldAllImageInfos();
264 // tell debugger about new images
265 dyld_image_info oldDyldInfo
[count
];
266 for (uint32_t i
=0; i
< count
; ++i
) {
267 oldDyldInfo
[i
].imageLoadAddress
= newImages
[i
].loadedAddress();
268 oldDyldInfo
[i
].imageFilePath
= imagePath(newImages
[i
].image());
269 oldDyldInfo
[i
].imageFileModDate
= 0;
271 _oldAllImageInfos
->notification(dyld_image_adding
, count
, oldDyldInfo
);
275 for (const LoadedImage
& li
: newImages
) {
276 log_loads("dyld: %s\n", imagePath(li
.image()));
279 #if !TARGET_IPHONE_SIMULATOR
280 // call kdebug trace for each image
281 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD
, DBG_DYLD_UUID
, DBG_DYLD_UUID_MAP_A
))) {
282 for (const LoadedImage
& li
: newImages
) {
283 const closure::Image
* image
= li
.image();
284 struct stat stat_buf
;
285 fsid_t fsid
= {{ 0, 0 }};
286 fsobj_id_t fsobjid
= { 0, 0 };
287 if ( !image
->inDyldCache() && (stat(imagePath(image
), &stat_buf
) == 0) ) {
288 fsobjid
= *(fsobj_id_t
*)&stat_buf
.st_ino
;
289 fsid
= {{ stat_buf
.st_dev
, 0 }};
292 image
->getUuid(uuid
);
293 kdebug_trace_dyld_image(DBG_DYLD_UUID_MAP_A
, &uuid
, fsobjid
, fsid
, li
.loadedAddress());
297 // call each _dyld_register_func_for_add_image function with each image
299 for (NotifyFunc func
: _loadNotifiers
) {
300 for (const LoadedImage
& li
: newImages
) {
301 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE
, (uint64_t)li
.loadedAddress(), (uint64_t)func
, 0);
302 log_notifications("dyld: add notifier %p called with mh=%p\n", func
, li
.loadedAddress());
303 if ( li
.image()->inDyldCache() )
304 func(li
.loadedAddress(), (uintptr_t)_dyldCacheSlide
);
306 func(li
.loadedAddress(), li
.loadedAddress()->getSlide());
309 for (LoadNotifyFunc func
: _loadNotifiers2
) {
310 for (const LoadedImage
& li
: newImages
) {
311 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE
, (uint64_t)li
.loadedAddress(), (uint64_t)func
, 0);
312 log_notifications("dyld: add notifier %p called with mh=%p\n", func
, li
.loadedAddress());
313 if ( li
.image()->inDyldCache() )
314 func(li
.loadedAddress(), li
.image()->path(), false);
316 func(li
.loadedAddress(), li
.image()->path(), !li
.image()->neverUnload());
321 // call objc about images that use objc
322 if ( _objcNotifyMapped
!= nullptr ) {
323 const char* pathsBuffer
[count
];
324 const mach_header
* mhBuffer
[count
];
325 uint32_t imagesWithObjC
= 0;
326 for (const LoadedImage
& li
: newImages
) {
327 const closure::Image
* image
= li
.image();
328 if ( image
->hasObjC() ) {
329 pathsBuffer
[imagesWithObjC
] = imagePath(image
);
330 mhBuffer
[imagesWithObjC
] = li
.loadedAddress();
334 if ( imagesWithObjC
!= 0 ) {
335 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_OBJC_MAP
, 0, 0, 0);
336 (*_objcNotifyMapped
)(imagesWithObjC
, pathsBuffer
, mhBuffer
);
337 if ( log_notifications("dyld: objc-mapped-notifier called with %d images:\n", imagesWithObjC
) ) {
338 for (uint32_t i
=0; i
< imagesWithObjC
; ++i
) {
339 log_notifications("dyld: objc-mapped: %p %s\n", mhBuffer
[i
], pathsBuffer
[i
]);
345 // notify any processes tracking loads in this process
346 notifyMonitorLoads(newImages
);
349 void AllImages::removeImages(const Array
<LoadedImage
>& unloadImages
)
351 // call each _dyld_register_func_for_remove_image function with each image
353 for (NotifyFunc func
: _unloadNotifiers
) {
354 for (const LoadedImage
& li
: unloadImages
) {
355 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_FUNC_FOR_REMOVE_IMAGE
, (uint64_t)li
.loadedAddress(), (uint64_t)func
, 0);
356 log_notifications("dyld: remove notifier %p called with mh=%p\n", func
, li
.loadedAddress());
357 if ( li
.image()->inDyldCache() )
358 func(li
.loadedAddress(), (uintptr_t)_dyldCacheSlide
);
360 func(li
.loadedAddress(), li
.loadedAddress()->getSlide());
365 // call objc about images going away
366 if ( _objcNotifyUnmapped
!= nullptr ) {
367 for (const LoadedImage
& li
: unloadImages
) {
368 if ( li
.image()->hasObjC() ) {
369 (*_objcNotifyUnmapped
)(imagePath(li
.image()), li
.loadedAddress());
370 log_notifications("dyld: objc-unmapped-notifier called with image %p %s\n", li
.loadedAddress(), imagePath(li
.image()));
375 #if !TARGET_IPHONE_SIMULATOR
376 // call kdebug trace for each image
377 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD
, DBG_DYLD_UUID
, DBG_DYLD_UUID_MAP_A
))) {
378 for (const LoadedImage
& li
: unloadImages
) {
379 const closure::Image
* image
= li
.image();
380 struct stat stat_buf
;
381 fsid_t fsid
= {{ 0, 0 }};
382 fsobj_id_t fsobjid
= { 0, 0 };
383 if ( stat(imagePath(image
), &stat_buf
) == 0 ) {
384 fsobjid
= *(fsobj_id_t
*)&stat_buf
.st_ino
;
385 fsid
= {{ stat_buf
.st_dev
, 0 }};
388 image
->getUuid(uuid
);
389 kdebug_trace_dyld_image(DBG_DYLD_UUID_UNMAP_A
, &uuid
, fsobjid
, fsid
, li
.loadedAddress());
394 // remove each from _loadedImages
396 for (const LoadedImage
& uli
: unloadImages
) {
397 for (LoadedImage
& li
: _loadedImages
) {
398 if ( uli
.loadedAddress() == li
.loadedAddress() ) {
399 _loadedImages
.erase(li
);
407 // sync to old all image infos struct
408 mirrorToOldAllImageInfos();
410 // tell debugger about removed images
411 STACK_ALLOC_ARRAY(dyld_image_info
, oldDyldInfo
, unloadImages
.count());
412 for (const LoadedImage
& li
: unloadImages
) {
413 oldDyldInfo
.push_back({li
.loadedAddress(), li
.image()->path(), 0});
415 _oldAllImageInfos
->notification(dyld_image_removing
, (uint32_t)oldDyldInfo
.count(), &oldDyldInfo
[0]);
417 // notify any processes tracking loads in this process
418 notifyMonitorUnloads(unloadImages
);
420 // finally, unmap images
421 for (const LoadedImage
& li
: unloadImages
) {
422 if ( li
.leaveMapped() ) {
423 log_loads("dyld: unloaded but left mmapped %s\n", imagePath(li
.image()));
426 // unmapImage() modifies parameter, so use copy
427 LoadedImage copy
= li
;
428 Loader::unmapImage(copy
);
429 log_loads("dyld: unloaded %s\n", imagePath(li
.image()));
434 // must be called with writeLock held
435 void AllImages::recomputeBounds()
437 _lowestNonCached
= UINTPTR_MAX
;
438 _highestNonCached
= 0;
439 for (const LoadedImage
& li
: _loadedImages
) {
440 const MachOLoaded
* ml
= li
.loadedAddress();
441 uintptr_t start
= (uintptr_t)ml
;
442 if ( !((MachOAnalyzer
*)ml
)->inDyldCache() ) {
443 if ( start
< _lowestNonCached
)
444 _lowestNonCached
= start
;
445 uintptr_t end
= start
+ (uintptr_t)(li
.image()->vmSizeToMap());
446 if ( end
> _highestNonCached
)
447 _highestNonCached
= end
;
452 uint32_t AllImages::count() const
454 return (uint32_t)_loadedImages
.count();
457 bool AllImages::dyldCacheHasPath(const char* path
) const
459 uint32_t dyldCacheImageIndex
;
460 if ( _dyldCacheAddress
!= nullptr )
461 return _dyldCacheAddress
->hasImagePath(path
, dyldCacheImageIndex
);
465 const char* AllImages::imagePathByIndex(uint32_t index
) const
467 if ( index
< _loadedImages
.count() )
468 return imagePath(_loadedImages
[index
].image());
472 const mach_header
* AllImages::imageLoadAddressByIndex(uint32_t index
) const
474 if ( index
< _loadedImages
.count() )
475 return _loadedImages
[index
].loadedAddress();
479 bool AllImages::findImage(const mach_header
* loadAddress
, LoadedImage
& foundImage
) const
481 __block
bool result
= false;
483 for (const LoadedImage
& li
: _loadedImages
) {
484 if ( li
.loadedAddress() == loadAddress
) {
494 void AllImages::forEachImage(void (^handler
)(const LoadedImage
& loadedImage
, bool& stop
)) const
498 for (const LoadedImage
& li
: _loadedImages
) {
507 const char* AllImages::pathForImageMappedAt(const void* addr
) const
509 if ( _initialImages
!= nullptr ) {
510 // being called during libSystem initialization, so _loadedImages not allocated yet
511 for (const LoadedImage
& li
: *_initialImages
) {
513 if ( li
.image()->containsAddress(addr
, li
.loadedAddress(), &permissions
) ) {
514 return li
.image()->path();
520 // if address is in cache, do fast search of TEXT segments in cache
521 __block
const char* result
= nullptr;
522 if ( (_dyldCacheAddress
!= nullptr) && (addr
> _dyldCacheAddress
) ) {
523 if ( addr
< (void*)((uint8_t*)_dyldCacheAddress
+_dyldCacheAddress
->mappedSize()) ) {
524 uint64_t cacheSlide
= (uint64_t)_dyldCacheAddress
- _dyldCacheAddress
->unslidLoadAddress();
525 uint64_t unslidTargetAddr
= (uint64_t)addr
- cacheSlide
;
526 _dyldCacheAddress
->forEachImageTextSegment(^(uint64_t loadAddressUnslid
, uint64_t textSegmentSize
, const unsigned char* dylibUUID
, const char* installName
, bool& stop
) {
527 if ( (loadAddressUnslid
<= unslidTargetAddr
) && (unslidTargetAddr
< loadAddressUnslid
+textSegmentSize
) ) {
528 result
= installName
;
532 if ( result
!= nullptr )
537 // slow path - search image list
538 infoForImageMappedAt(addr
, ^(const LoadedImage
& foundImage
, uint8_t permissions
) {
539 result
= foundImage
.image()->path();
545 void AllImages::infoForImageMappedAt(const void* addr
, void (^handler
)(const LoadedImage
& foundImage
, uint8_t permissions
)) const
547 __block
uint8_t permissions
;
548 if ( _initialImages
!= nullptr ) {
549 // being called during libSystem initialization, so _loadedImages not allocated yet
550 for (const LoadedImage
& li
: *_initialImages
) {
551 if ( li
.image()->containsAddress(addr
, li
.loadedAddress(), &permissions
) ) {
552 handler(li
, permissions
);
560 for (const LoadedImage
& li
: _loadedImages
) {
561 if ( li
.image()->containsAddress(addr
, li
.loadedAddress(), &permissions
) ) {
562 handler(li
, permissions
);
570 bool AllImages::infoForImageMappedAt(const void* addr
, const MachOLoaded
** ml
, uint64_t* textSize
, const char** path
) const
572 if ( _initialImages
!= nullptr ) {
573 // being called during libSystem initialization, so _loadedImages not allocated yet
574 for (const LoadedImage
& li
: *_initialImages
) {
576 if ( li
.image()->containsAddress(addr
, li
.loadedAddress(), &permissions
) ) {
578 *ml
= li
.loadedAddress();
579 if ( path
!= nullptr )
580 *path
= li
.image()->path();
581 if ( textSize
!= nullptr ) {
582 *textSize
= li
.image()->textSize();
590 // if address is in cache, do fast search of TEXT segments in cache
591 __block
bool result
= false;
592 if ( (_dyldCacheAddress
!= nullptr) && (addr
> _dyldCacheAddress
) ) {
593 if ( addr
< (void*)((uint8_t*)_dyldCacheAddress
+_dyldCacheAddress
->mappedSize()) ) {
594 uint64_t cacheSlide
= (uint64_t)_dyldCacheAddress
- _dyldCacheAddress
->unslidLoadAddress();
595 uint64_t unslidTargetAddr
= (uint64_t)addr
- cacheSlide
;
596 _dyldCacheAddress
->forEachImageTextSegment(^(uint64_t loadAddressUnslid
, uint64_t textSegmentSize
, const unsigned char* dylibUUID
, const char* installName
, bool& stop
) {
597 if ( (loadAddressUnslid
<= unslidTargetAddr
) && (unslidTargetAddr
< loadAddressUnslid
+textSegmentSize
) ) {
599 *ml
= (MachOLoaded
*)(loadAddressUnslid
+ cacheSlide
);
600 if ( path
!= nullptr )
602 if ( textSize
!= nullptr )
603 *textSize
= textSegmentSize
;
613 // slow path - search image list
614 infoForImageMappedAt(addr
, ^(const LoadedImage
& foundImage
, uint8_t permissions
) {
616 *ml
= foundImage
.loadedAddress();
617 if ( path
!= nullptr )
618 *path
= foundImage
.image()->path();
619 if ( textSize
!= nullptr )
620 *textSize
= foundImage
.image()->textSize();
627 // same as infoForImageMappedAt(), but only look at images not in the dyld cache
628 void AllImages::infoForNonCachedImageMappedAt(const void* addr
, void (^handler
)(const LoadedImage
& foundImage
, uint8_t permissions
)) const
630 __block
uint8_t permissions
;
631 if ( _initialImages
!= nullptr ) {
632 // being called during libSystem initialization, so _loadedImages not allocated yet
633 for (const LoadedImage
& li
: *_initialImages
) {
634 if ( !((MachOAnalyzer
*)li
.loadedAddress())->inDyldCache() ) {
635 if ( li
.image()->containsAddress(addr
, li
.loadedAddress(), &permissions
) ) {
636 handler(li
, permissions
);
645 for (const LoadedImage
& li
: _loadedImages
) {
646 if ( !((MachOAnalyzer
*)li
.loadedAddress())->inDyldCache() ) {
647 if ( li
.image()->containsAddress(addr
, li
.loadedAddress(), &permissions
) ) {
648 handler(li
, permissions
);
656 bool AllImages::immutableMemory(const void* addr
, size_t length
) const
658 // quick check to see if in shared cache
659 if ( _dyldCacheAddress
!= nullptr ) {
661 if ( _dyldCacheAddress
->inCache(addr
, length
, readOnly
) ) {
666 __block
bool result
= false;
668 // quick check to see if it is not any non-cached image loaded
669 if ( ((uintptr_t)addr
< _lowestNonCached
) || ((uintptr_t)addr
+length
> _highestNonCached
) ) {
673 // slow walk through all images, only look at images not in dyld cache
674 for (const LoadedImage
& li
: _loadedImages
) {
675 if ( !((MachOAnalyzer
*)li
.loadedAddress())->inDyldCache() ) {
677 if ( li
.image()->containsAddress(addr
, li
.loadedAddress(), &permissions
) ) {
678 result
= ((permissions
& VM_PROT_WRITE
) == 0) && li
.image()->neverUnload();
688 void AllImages::infoForImageWithLoadAddress(const MachOLoaded
* mh
, void (^handler
)(const LoadedImage
& foundImage
)) const
691 for (const LoadedImage
& li
: _loadedImages
) {
692 if ( li
.loadedAddress() == mh
) {
700 bool AllImages::findImageNum(closure::ImageNum imageNum
, LoadedImage
& foundImage
) const
702 if ( _initialImages
!= nullptr ) {
703 // being called during libSystem initialization, so _loadedImages not allocated yet
704 for (const LoadedImage
& li
: *_initialImages
) {
705 if ( li
.image()->representsImageNum(imageNum
) ) {
714 for (const LoadedImage
& li
: _loadedImages
) {
715 if ( li
.image()->representsImageNum(imageNum
) ) {
725 const MachOLoaded
* AllImages::findDependent(const MachOLoaded
* mh
, uint32_t depIndex
)
727 __block
const MachOLoaded
* result
= nullptr;
729 for (const LoadedImage
& li
: _loadedImages
) {
730 if ( li
.loadedAddress() == mh
) {
731 closure::ImageNum depImageNum
= li
.image()->dependentImageNum(depIndex
);
733 if ( findImageNum(depImageNum
, depLi
) )
734 result
= depLi
.loadedAddress();
743 void AllImages::breadthFirstRecurseDependents(Array
<closure::ImageNum
>& visited
, const LoadedImage
& nodeLi
, bool& stopped
, void (^handler
)(const LoadedImage
& aLoadedImage
, bool& stop
)) const
745 // call handler on all direct dependents (unless already visited)
746 STACK_ALLOC_ARRAY(LoadedImage
, dependentsToRecurse
, 256);
747 nodeLi
.image()->forEachDependentImage(^(uint32_t depIndex
, closure::Image::LinkKind kind
, closure::ImageNum depImageNum
, bool& depStop
) {
748 if ( kind
== closure::Image::LinkKind::upward
)
750 if ( visited
.contains(depImageNum
) )
753 if ( !findImageNum(depImageNum
, depLi
) )
755 handler(depLi
, depStop
);
756 visited
.push_back(depImageNum
);
761 dependentsToRecurse
.push_back(depLi
);
765 // recurse on all dependents just visited
766 for (LoadedImage
& depLi
: dependentsToRecurse
) {
767 breadthFirstRecurseDependents(visited
, depLi
, stopped
, handler
);
771 void AllImages::visitDependentsTopDown(const LoadedImage
& start
, void (^handler
)(const LoadedImage
& aLoadedImage
, bool& stop
)) const
774 STACK_ALLOC_ARRAY(closure::ImageNum
, visited
, count());
776 handler(start
, stop
);
779 visited
.push_back(start
.image()->imageNum());
780 breadthFirstRecurseDependents(visited
, start
, stop
, handler
);
784 const MachOLoaded
* AllImages::mainExecutable() const
786 assert(_programVars
!= nullptr);
787 return (const MachOLoaded
*)_programVars
->mh
;
790 const closure::Image
* AllImages::mainExecutableImage() const
792 assert(_mainClosure
!= nullptr);
793 return _mainClosure
->images()->imageForNum(_mainClosure
->topImage());
796 void AllImages::setMainPath(const char* path
)
798 _mainExeOverridePath
= path
;
801 const char* AllImages::imagePath(const closure::Image
* image
) const
803 #if __IPHONE_OS_VERSION_MIN_REQUIRED
804 // on iOS and watchOS, apps may be moved on device after closure built
805 if ( _mainExeOverridePath
!= nullptr ) {
806 if ( image
== mainExecutableImage() )
807 return _mainExeOverridePath
;
810 return image
->path();
813 dyld_platform_t
AllImages::platform() const {
817 void AllImages::incRefCount(const mach_header
* loadAddress
)
819 for (DlopenCount
& entry
: _dlopenRefCounts
) {
820 if ( entry
.loadAddress
== loadAddress
) {
821 // found existing DlopenCount entry, bump counter
827 // no existing DlopenCount, add new one
828 _dlopenRefCounts
.push_back({ loadAddress
, 1 });
831 void AllImages::decRefCount(const mach_header
* loadAddress
)
833 bool doCollect
= false;
834 for (DlopenCount
& entry
: _dlopenRefCounts
) {
835 if ( entry
.loadAddress
== loadAddress
) {
836 // found existing DlopenCount entry, bump counter
838 if ( entry
.refCount
== 0 ) {
839 _dlopenRefCounts
.erase(entry
);
847 garbageCollectImages();
851 #if __MAC_OS_X_VERSION_MIN_REQUIRED
852 NSObjectFileImage
AllImages::addNSObjectFileImage(const OFIInfo
& image
)
854 __block
uint64_t imageNum
= 0;
856 imageNum
= ++_nextObjectFileImageNum
;
857 _objectFileImages
.push_back(image
);
858 _objectFileImages
.back().imageNum
= imageNum
;
860 return (NSObjectFileImage
)imageNum
;
863 bool AllImages::forNSObjectFileImage(NSObjectFileImage imageHandle
,
864 void (^handler
)(OFIInfo
& image
)) {
865 uint64_t imageNum
= (uint64_t)imageHandle
;
866 bool __block foundImage
= false;
868 for (OFIInfo
& ofi
: _objectFileImages
) {
869 if ( ofi
.imageNum
== imageNum
) {
880 void AllImages::removeNSObjectFileImage(NSObjectFileImage imageHandle
)
882 uint64_t imageNum
= (uint64_t)imageHandle
;
884 for (OFIInfo
& ofi
: _objectFileImages
) {
885 if ( ofi
.imageNum
== imageNum
) {
886 _objectFileImages
.erase(ofi
);
895 class VIS_HIDDEN Reaper
900 const LoadedImage
* li
;
903 Reaper(Array
<ImageAndUse
>& unloadables
, AllImages
*);
904 void garbageCollect();
905 void finalizeDeadImages();
908 void markDirectlyDlopenedImagesAsUsed();
909 void markDependentOfInUseImages();
910 void markDependentsOf(const LoadedImage
*);
911 uint32_t inUseCount();
912 void dump(const char* msg
);
914 Array
<ImageAndUse
>& _unloadables
;
915 AllImages
* _allImages
;
919 Reaper::Reaper(Array
<ImageAndUse
>& unloadables
, AllImages
* all
)
920 : _unloadables(unloadables
), _allImages(all
), _deadCount(0)
924 void Reaper::markDirectlyDlopenedImagesAsUsed()
926 for (AllImages::DlopenCount
& entry
: _allImages
->_dlopenRefCounts
) {
927 if ( entry
.refCount
!= 0 ) {
928 for (ImageAndUse
& iu
: _unloadables
) {
929 if ( iu
.li
->loadedAddress() == entry
.loadAddress
) {
938 uint32_t Reaper::inUseCount()
941 for (ImageAndUse
& iu
: _unloadables
) {
948 void Reaper::markDependentsOf(const LoadedImage
* li
)
950 li
->image()->forEachDependentImage(^(uint32_t depIndex
, closure::Image::LinkKind kind
, closure::ImageNum depImageNum
, bool& stop
) {
951 for (ImageAndUse
& iu
: _unloadables
) {
952 if ( !iu
.inUse
&& iu
.li
->image()->representsImageNum(depImageNum
) ) {
960 void Reaper::markDependentOfInUseImages()
962 for (ImageAndUse
& iu
: _unloadables
) {
964 markDependentsOf(iu
.li
);
968 void Reaper::dump(const char* msg
)
971 //for (ImageAndUse& iu : _unloadables) {
972 // log(" in-used=%d %s\n", iu.inUse, iu.li->image()->path());
976 void Reaper::garbageCollect()
978 //dump("all unloadable images");
980 // mark all dylibs directly dlopen'ed as in use
981 markDirectlyDlopenedImagesAsUsed();
983 //dump("directly dlopen()'ed marked");
985 // iteratively mark dependents of in-use dylibs as in-use until in-use count stops changing
986 uint32_t lastCount
= inUseCount();
987 bool countChanged
= false;
989 markDependentOfInUseImages();
990 //dump("dependents marked");
991 uint32_t newCount
= inUseCount();
992 countChanged
= (newCount
!= lastCount
);
993 lastCount
= newCount
;
994 } while (countChanged
);
996 _deadCount
= (uint32_t)_unloadables
.count() - inUseCount();
999 void Reaper::finalizeDeadImages()
1001 if ( _deadCount
== 0 )
1003 __cxa_range_t ranges
[_deadCount
];
1004 __cxa_range_t
* rangesArray
= ranges
;
1005 __block
unsigned int rangesCount
= 0;
1006 for (ImageAndUse
& iu
: _unloadables
) {
1009 iu
.li
->image()->forEachDiskSegment(^(uint32_t segIndex
, uint32_t fileOffset
, uint32_t fileSize
, int64_t vmOffset
, uint64_t vmSize
, uint8_t permissions
, bool &stop
) {
1010 if ( permissions
& VM_PROT_EXECUTE
) {
1011 rangesArray
[rangesCount
].addr
= (char*)(iu
.li
->loadedAddress()) + vmOffset
;
1012 rangesArray
[rangesCount
].length
= (size_t)vmSize
;
1017 __cxa_finalize_ranges(ranges
, rangesCount
);
1021 // This function is called at the end of dlclose() when the reference count goes to zero.
1022 // The dylib being unloaded may have brought in other dependent dylibs when it was loaded.
1023 // Those dependent dylibs need to be unloaded, but only if they are not referenced by
1024 // something else. We use a standard mark and sweep garbage collection.
1026 // The tricky part is that when a dylib is unloaded it may have a termination function that
1027 // can run and itself call dlclose() on yet another dylib. The problem is that this
1028 // sort of gabage collection is not re-entrant. Instead a terminator's call to dlclose()
1029 // which calls garbageCollectImages() will just set a flag to re-do the garbage collection
1030 // when the current pass is done.
1032 // Also note that this is done within the _loadedImages writer lock, so any dlopen/dlclose
1033 // on other threads are blocked while this garbage collections runs
1035 void AllImages::garbageCollectImages()
1037 // if some other thread is currently GC'ing images, let other thread do the work
1038 int32_t newCount
= OSAtomicIncrement32(&_gcCount
);
1039 if ( newCount
!= 1 )
1043 STACK_ALLOC_ARRAY(Reaper::ImageAndUse
, unloadables
, _loadedImages
.count());
1045 for (const LoadedImage
& li
: _loadedImages
) {
1046 if ( !li
.image()->neverUnload() /*&& !li.neverUnload()*/ ) {
1047 unloadables
.push_back({&li
, false});
1048 //fprintf(stderr, "unloadable[%lu] %p %s\n", unloadables.count(), li.loadedAddress(), li.image()->path());
1052 // make reaper object to do garbage collection and notifications
1053 Reaper
reaper(unloadables
, this);
1054 reaper
.garbageCollect();
1056 // FIXME: we should sort dead images so higher level ones are terminated first
1058 // call cxa_finalize_ranges of dead images
1059 reaper
.finalizeDeadImages();
1061 // FIXME: call static terminators of dead images
1063 // FIXME: DOF unregister
1065 //fprintf(stderr, "_loadedImages before GC removals:\n");
1066 //for (const LoadedImage& li : _loadedImages) {
1067 // fprintf(stderr, " loadAddr=%p, path=%s\n", li.loadedAddress(), li.image()->path());
1070 // make copy of LoadedImages we want to remove
1071 // because unloadables[] points into LoadedImage we are shrinking
1072 STACK_ALLOC_ARRAY(LoadedImage
, unloadImages
, _loadedImages
.count());
1073 for (const Reaper::ImageAndUse
& iu
: unloadables
) {
1075 unloadImages
.push_back(*iu
.li
);
1077 // remove entries from _loadedImages
1078 if ( !unloadImages
.empty() ) {
1079 removeImages(unloadImages
);
1081 //fprintf(stderr, "_loadedImages after GC removals:\n");
1082 //for (const LoadedImage& li : _loadedImages) {
1083 // fprintf(stderr, " loadAddr=%p, path=%s\n", li.loadedAddress(), li.image()->path());
1087 // if some other thread called GC during our work, redo GC on its behalf
1088 newCount
= OSAtomicDecrement32(&_gcCount
);
1090 while (newCount
> 0);
1095 void AllImages::addLoadNotifier(NotifyFunc func
)
1097 // callback about already loaded images
1099 for (const LoadedImage
& li
: _loadedImages
) {
1100 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE
, (uint64_t)li
.loadedAddress(), (uint64_t)func
, 0);
1101 log_notifications("dyld: add notifier %p called with mh=%p\n", func
, li
.loadedAddress());
1102 if ( li
.image()->inDyldCache() )
1103 func(li
.loadedAddress(), (uintptr_t)_dyldCacheSlide
);
1105 func(li
.loadedAddress(), li
.loadedAddress()->getSlide());
1109 // add to list of functions to call about future loads
1110 withNotifiersLock(^{
1111 _loadNotifiers
.push_back(func
);
1115 void AllImages::addUnloadNotifier(NotifyFunc func
)
1117 // add to list of functions to call about future unloads
1118 withNotifiersLock(^{
1119 _unloadNotifiers
.push_back(func
);
1123 void AllImages::addLoadNotifier(LoadNotifyFunc func
)
1125 // callback about already loaded images
1127 for (const LoadedImage
& li
: _loadedImages
) {
1128 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE
, (uint64_t)li
.loadedAddress(), (uint64_t)func
, 0);
1129 log_notifications("dyld: add notifier %p called with mh=%p\n", func
, li
.loadedAddress());
1130 func(li
.loadedAddress(), li
.image()->path(), !li
.image()->neverUnload());
1134 // add to list of functions to call about future loads
1135 withNotifiersLock(^{
1136 _loadNotifiers2
.push_back(func
);
1141 void AllImages::setObjCNotifiers(_dyld_objc_notify_mapped map
, _dyld_objc_notify_init init
, _dyld_objc_notify_unmapped unmap
)
1143 _objcNotifyMapped
= map
;
1144 _objcNotifyInit
= init
;
1145 _objcNotifyUnmapped
= unmap
;
1147 // callback about already loaded images
1148 uint32_t maxCount
= count();
1149 STACK_ALLOC_ARRAY(const mach_header
*, mhs
, maxCount
);
1150 STACK_ALLOC_ARRAY(const char*, paths
, maxCount
);
1151 // don't need _mutex here because this is called when process is still single threaded
1152 for (const LoadedImage
& li
: _loadedImages
) {
1153 if ( li
.image()->hasObjC() ) {
1154 paths
.push_back(imagePath(li
.image()));
1155 mhs
.push_back(li
.loadedAddress());
1158 if ( !mhs
.empty() ) {
1159 (*map
)((uint32_t)mhs
.count(), &paths
[0], &mhs
[0]);
1160 if ( log_notifications("dyld: objc-mapped-notifier called with %ld images:\n", mhs
.count()) ) {
1161 for (uintptr_t i
=0; i
< mhs
.count(); ++i
) {
1162 log_notifications("dyld: objc-mapped: %p %s\n", mhs
[i
], paths
[i
]);
1168 void AllImages::applyInterposingToDyldCache(const closure::Closure
* closure
)
1170 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_APPLY_INTERPOSING
, 0, 0, 0);
1171 const uintptr_t cacheStart
= (uintptr_t)_dyldCacheAddress
;
1172 __block
closure::ImageNum lastCachedDylibImageNum
= 0;
1173 __block
const closure::Image
* lastCachedDylibImage
= nullptr;
1174 __block
bool suspendedAccounting
= false;
1175 closure
->forEachPatchEntry(^(const closure::Closure::PatchEntry
& entry
) {
1176 if ( entry
.overriddenDylibInCache
!= lastCachedDylibImageNum
) {
1177 lastCachedDylibImage
= closure::ImageArray::findImage(imagesArrays(), entry
.overriddenDylibInCache
);
1178 assert(lastCachedDylibImage
!= nullptr);
1179 lastCachedDylibImageNum
= entry
.overriddenDylibInCache
;
1181 if ( !suspendedAccounting
) {
1182 Loader::vmAccountingSetSuspended(true, log_fixups
);
1183 suspendedAccounting
= true;
1185 uintptr_t newValue
= 0;
1186 LoadedImage foundImage
;
1187 switch ( entry
.replacement
.image
.kind
) {
1188 case closure::Image::ResolvedSymbolTarget::kindImage
:
1189 assert(findImageNum(entry
.replacement
.image
.imageNum
, foundImage
));
1190 newValue
= (uintptr_t)(foundImage
.loadedAddress()) + (uintptr_t)entry
.replacement
.image
.offset
;
1192 case closure::Image::ResolvedSymbolTarget::kindSharedCache
:
1193 newValue
= (uintptr_t)_dyldCacheAddress
+ (uintptr_t)entry
.replacement
.sharedCache
.offset
;
1195 case closure::Image::ResolvedSymbolTarget::kindAbsolute
:
1196 // this means the symbol was missing in the cache override dylib, so set any uses to NULL
1197 newValue
= (uintptr_t)entry
.replacement
.absolute
.value
;
1200 assert(0 && "bad replacement kind");
1202 lastCachedDylibImage
->forEachPatchableUseOfExport(entry
.exportCacheOffset
, ^(closure::Image::PatchableExport::PatchLocation patchLocation
) {
1203 uintptr_t* loc
= (uintptr_t*)(cacheStart
+patchLocation
.cacheOffset
);
1204 #if __has_feature(ptrauth_calls)
1205 if ( patchLocation
.authenticated
) {
1206 MachOLoaded::ChainedFixupPointerOnDisk fixupInfo
;
1207 fixupInfo
.authRebase
.auth
= true;
1208 fixupInfo
.authRebase
.addrDiv
= patchLocation
.usesAddressDiversity
;
1209 fixupInfo
.authRebase
.diversity
= patchLocation
.discriminator
;
1210 fixupInfo
.authRebase
.key
= patchLocation
.key
;
1211 *loc
= fixupInfo
.signPointer(loc
, newValue
+ patchLocation
.getAddend());
1212 log_fixups("dyld: cache fixup: *%p = %p (JOP: diversity 0x%04X, addr-div=%d, key=%s)\n",
1213 loc
, (void*)*loc
, patchLocation
.discriminator
, patchLocation
.usesAddressDiversity
, patchLocation
.keyName());
1217 log_fixups("dyld: cache fixup: *%p = 0x%0lX (dyld cache patch)\n", loc
, newValue
+ (uintptr_t)patchLocation
.getAddend());
1218 *loc
= newValue
+ (uintptr_t)patchLocation
.getAddend();
1221 if ( suspendedAccounting
)
1222 Loader::vmAccountingSetSuspended(false, log_fixups
);
1225 void AllImages::runStartupInitialzers()
1227 __block
bool mainExecutableInitializerNeedsToRun
= true;
1228 __block
uint32_t imageIndex
= 0;
1229 while ( mainExecutableInitializerNeedsToRun
) {
1230 __block
const closure::Image
* image
= nullptr;
1232 image
= _loadedImages
[imageIndex
].image();
1233 if ( _loadedImages
[imageIndex
].loadedAddress()->isMainExecutable() )
1234 mainExecutableInitializerNeedsToRun
= false;
1236 runInitialzersBottomUp(image
);
1242 // Find image in _loadedImages which has ImageNum == num.
1243 // Try indexHint first, if hint is wrong, updated it, so next use is faster.
1244 LoadedImage
AllImages::findImageNum(closure::ImageNum num
, uint32_t& indexHint
)
1246 __block LoadedImage copy
;
1248 if ( (indexHint
>= _loadedImages
.count()) || !_loadedImages
[indexHint
].image()->representsImageNum(num
) ) {
1250 for (indexHint
=0; indexHint
< _loadedImages
.count(); ++indexHint
) {
1251 if ( _loadedImages
[indexHint
].image()->representsImageNum(num
) )
1254 assert(indexHint
< _loadedImages
.count());
1256 copy
= _loadedImages
[indexHint
];
1262 // Change the state of the LoadedImage in _loadedImages which has ImageNum == num.
1263 // Only change state if current state is expectedCurrentState (atomic swap).
1264 bool AllImages::swapImageState(closure::ImageNum num
, uint32_t& indexHint
, LoadedImage::State expectedCurrentState
, LoadedImage::State newState
)
1266 __block
bool result
= false;
1268 if ( (indexHint
>= _loadedImages
.count()) || !_loadedImages
[indexHint
].image()->representsImageNum(num
) ) {
1270 for (indexHint
=0; indexHint
< _loadedImages
.count(); ++indexHint
) {
1271 if ( _loadedImages
[indexHint
].image()->representsImageNum(num
) )
1274 assert(indexHint
< _loadedImages
.count());
1276 if ( _loadedImages
[indexHint
].state() == expectedCurrentState
) {
1277 _loadedImages
[indexHint
].setState(newState
);
1284 // dyld3 pre-builds the order initializers need to be run (bottom up) in a list in the closure.
1285 // This method uses that list to run all initializers.
1286 // Because an initializer may call dlopen() and/or create threads, the _loadedImages array
1287 // may move under us. So, never keep a pointer into it. Always reference images by ImageNum
1288 // and use hint to make that faster in the case where the _loadedImages does not move.
1289 void AllImages::runInitialzersBottomUp(const closure::Image
* topImage
)
1291 // walk closure specified initializer list, already ordered bottom up
1292 topImage
->forEachImageToInitBefore(^(closure::ImageNum imageToInit
, bool& stop
) {
1293 // get copy of LoadedImage about imageToInit, but don't keep reference into _loadedImages, because it may move if initialzers call dlopen()
1294 uint32_t indexHint
= 0;
1295 LoadedImage loadedImageCopy
= findImageNum(imageToInit
, indexHint
);
1296 // skip if the image is already inited, or in process of being inited (dependency cycle)
1297 if ( (loadedImageCopy
.state() == LoadedImage::State::fixedUp
) && swapImageState(imageToInit
, indexHint
, LoadedImage::State::fixedUp
, LoadedImage::State::beingInited
) ) {
1298 // tell objc to run any +load methods in image
1299 if ( (_objcNotifyInit
!= nullptr) && loadedImageCopy
.image()->mayHavePlusLoads() ) {
1300 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_OBJC_INIT
, (uint64_t)loadedImageCopy
.loadedAddress(), 0, 0);
1301 const char* path
= imagePath(loadedImageCopy
.image());
1302 log_notifications("dyld: objc-init-notifier called with mh=%p, path=%s\n", loadedImageCopy
.loadedAddress(), path
);
1303 (*_objcNotifyInit
)(path
, loadedImageCopy
.loadedAddress());
1306 // run all initializers in image
1307 runAllInitializersInImage(loadedImageCopy
.image(), loadedImageCopy
.loadedAddress());
1309 // advance state to inited
1310 swapImageState(imageToInit
, indexHint
, LoadedImage::State::beingInited
, LoadedImage::State::inited
);
1316 void AllImages::runLibSystemInitializer(const LoadedImage
& libSystem
)
1318 // run all initializers in libSystem.dylib
1319 runAllInitializersInImage(libSystem
.image(), libSystem
.loadedAddress());
1321 // Note: during libSystem's initialization, libdyld_initializer() is called which copies _initialImages to _loadedImages
1323 // mark libSystem.dylib as being inited, so later recursive-init would re-run it
1324 for (LoadedImage
& li
: _loadedImages
) {
1325 if ( li
.loadedAddress() == libSystem
.loadedAddress() ) {
1326 li
.setState(LoadedImage::State::inited
);
1332 void AllImages::runAllInitializersInImage(const closure::Image
* image
, const MachOLoaded
* ml
)
1334 image
->forEachInitializer(ml
, ^(const void* func
) {
1335 Initializer initFunc
= (Initializer
)func
;
1336 #if __has_feature(ptrauth_calls)
1337 initFunc
= (Initializer
)__builtin_ptrauth_sign_unauthenticated((void*)initFunc
, 0, 0);
1340 ScopedTimer(DBG_DYLD_TIMING_STATIC_INITIALIZER
, (uint64_t)ml
, (uint64_t)func
, 0);
1341 initFunc(NXArgc
, NXArgv
, environ
, appleParams
, _programVars
);
1344 log_initializers("dyld: called initialzer %p in %s\n", initFunc
, image
->path());
1348 const MachOLoaded
* AllImages::dlopen(Diagnostics
& diag
, const char* path
, bool rtldNoLoad
, bool rtldLocal
, bool rtldNoDelete
, bool fromOFI
, const void* callerAddress
)
1350 // quick check if path is in shared cache and already loaded
1351 if ( _dyldCacheAddress
!= nullptr ) {
1352 uint32_t dyldCacheImageIndex
;
1353 if ( _dyldCacheAddress
->hasImagePath(path
, dyldCacheImageIndex
) ) {
1356 const MachOLoaded
* mh
= (MachOLoaded
*)_dyldCacheAddress
->getIndexedImageEntry(dyldCacheImageIndex
, mTime
, inode
);
1357 // Note: we do not need readLock because this is within global dlopen lock
1358 for (const LoadedImage
& li
: _loadedImages
) {
1359 if ( li
.loadedAddress() == mh
) {
1366 __block
closure::ImageNum callerImageNum
= 0;
1367 STACK_ALLOC_ARRAY(LoadedImage
, loadedList
, 1024);
1368 for (const LoadedImage
& li
: _loadedImages
) {
1369 loadedList
.push_back(li
);
1370 uint8_t permissions
;
1371 if ( (callerImageNum
== 0) && li
.image()->containsAddress(callerAddress
, li
.loadedAddress(), &permissions
) ) {
1372 callerImageNum
= li
.image()->imageNum();
1374 //fprintf(stderr, "mh=%p, image=%p, imageNum=0x%04X, path=%s\n", li.loadedAddress(), li.image(), li.image()->imageNum(), li.image()->path());
1376 uintptr_t alreadyLoadedCount
= loadedList
.count();
1379 closure::ImageNum topImageNum
= 0;
1380 const closure::DlopenClosure
* newClosure
;
1382 // First try with closures from the shared cache permitted.
1383 // Then try again with forcing a new closure
1384 for (bool canUseSharedCacheClosure
: { true, false }) {
1385 closure::FileSystemPhysical fileSystem
;
1386 closure::ClosureBuilder::AtPath atPathHanding
= (_allowAtPaths
? closure::ClosureBuilder::AtPath::all
: closure::ClosureBuilder::AtPath::onlyInRPaths
);
1387 closure::ClosureBuilder
cb(_nextImageNum
, fileSystem
, _dyldCacheAddress
, true, closure::gPathOverrides
, atPathHanding
);
1388 newClosure
= cb
.makeDlopenClosure(path
, _mainClosure
, loadedList
, callerImageNum
, rtldNoLoad
, canUseSharedCacheClosure
, &topImageNum
);
1389 if ( newClosure
== closure::ClosureBuilder::sRetryDlopenClosure
) {
1390 log_apis(" dlopen: closure builder needs to retry: %s\n", path
);
1391 assert(canUseSharedCacheClosure
);
1394 if ( (newClosure
== nullptr) && (topImageNum
== 0) ) {
1395 if ( cb
.diagnostics().hasError())
1396 diag
.error("%s", cb
.diagnostics().errorMessage());
1397 else if ( !rtldNoLoad
)
1398 diag
.error("dlopen(): file not found: %s", path
);
1401 // save off next available ImageNum for use by next call to dlopen()
1402 _nextImageNum
= cb
.nextFreeImageNum();
1406 if ( newClosure
!= nullptr ) {
1407 // if new closure contains an ImageArray, add it to list
1408 if ( const closure::ImageArray
* newArray
= newClosure
->images() ) {
1409 appendToImagesArray(newArray
);
1411 log_apis(" dlopen: made closure: %p\n", newClosure
);
1414 // if already loaded, just bump refCount and return
1415 if ( (newClosure
== nullptr) && (topImageNum
!= 0) ) {
1416 for (LoadedImage
& li
: _loadedImages
) {
1417 if ( li
.image()->imageNum() == topImageNum
) {
1418 // is already loaded
1419 const MachOLoaded
* topLoadAddress
= li
.loadedAddress();
1420 if ( !li
.image()->inDyldCache() )
1421 incRefCount(topLoadAddress
);
1422 log_apis(" dlopen: already loaded as '%s'\n", li
.image()->path());
1423 // if previously opened with RTLD_LOCAL, but now opened with RTLD_GLOBAL, unhide it
1424 if ( !rtldLocal
&& li
.hideFromFlatSearch() )
1425 li
.setHideFromFlatSearch(false);
1426 // if called with RTLD_NODELETE, mark it as never-unload
1428 li
.markLeaveMapped();
1429 return topLoadAddress
;
1434 // run loader to load all new images
1435 Loader
loader(loadedList
, _dyldCacheAddress
, imagesArrays(), &dyld3::log_loads
, &dyld3::log_segments
, &dyld3::log_fixups
, &dyld3::log_dofs
);
1436 const closure::Image
* topImage
= closure::ImageArray::findImage(imagesArrays(), topImageNum
);
1437 if ( newClosure
== nullptr ) {
1438 if ( topImageNum
< dyld3::closure::kLastDyldCacheImageNum
)
1439 log_apis(" dlopen: using image in dyld shared cache %p\n", topImage
);
1441 log_apis(" dlopen: using pre-built dlopen closure %p\n", topImage
);
1443 uintptr_t topIndex
= loadedList
.count();
1444 LoadedImage topLoadedImage
= LoadedImage::make(topImage
);
1445 if ( rtldLocal
&& !topImage
->inDyldCache() )
1446 topLoadedImage
.setHideFromFlatSearch(true);
1447 if ( rtldNoDelete
&& !topImage
->inDyldCache() )
1448 topLoadedImage
.markLeaveMapped();
1449 loader
.addImage(topLoadedImage
);
1452 // recursively load all dependents and fill in allImages array
1453 loader
.completeAllDependents(diag
, topIndex
);
1454 if ( diag
.hasError() )
1456 loader
.mapAndFixupAllImages(diag
, _processDOFs
, fromOFI
, topIndex
);
1457 if ( diag
.hasError() )
1460 const MachOLoaded
* topLoadAddress
= loadedList
[topIndex
].loadedAddress();
1462 // bump dlopen refcount of image directly loaded
1463 if ( !topImage
->inDyldCache() )
1464 incRefCount(topLoadAddress
);
1466 // tell gAllImages about new images
1467 const uint32_t newImageCount
= (uint32_t)(loadedList
.count() - alreadyLoadedCount
);
1468 addImages(loadedList
.subArray(alreadyLoadedCount
, newImageCount
));
1470 // if closure adds images that override dyld cache, patch cache
1471 if ( newClosure
!= nullptr )
1472 applyInterposingToDyldCache(newClosure
);
1474 runImageNotifiers(loadedList
.subArray(alreadyLoadedCount
, newImageCount
));
1477 runInitialzersBottomUp(topImage
);
1479 return topLoadAddress
;
1482 void AllImages::appendToImagesArray(const closure::ImageArray
* newArray
)
1484 _imagesArrays
.push_back(newArray
);
1487 const Array
<const closure::ImageArray
*>& AllImages::imagesArrays()
1489 return _imagesArrays
.array();
1492 bool AllImages::isRestricted() const
1494 return !_allowEnvPaths
;
1500 } // namespace dyld3