2 * Copyright (c) 2017 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
27 #include <sys/types.h>
29 #include <sys/sysctl.h>
30 #include <mach/mach_time.h> // mach_absolute_time()
31 #include <libkern/OSAtomic.h>
32 #include <uuid/uuid.h>
33 #include <mach-o/dyld_images.h>
34 #include <libc_private.h>
39 #include "AllImages.h"
40 #include "libdyldEntryVector.h"
44 #include "DyldSharedCache.h"
45 #include "PathOverrides.h"
47 #include "ClosureBuilder.h"
48 #include "ClosureFileSystemPhysical.h"
49 #include "RootsChecker.h"
51 #include "objc-shared-cache.h"
53 extern const char** appleParams
;
55 // should be a header for these
56 struct __cxa_range_t
{
60 extern "C" void __cxa_finalize_ranges(const __cxa_range_t ranges
[], unsigned int count
);
62 extern "C" int __cxa_atexit(void (*func
)(void *), void* arg
, void* dso
);
65 VIS_HIDDEN
void* __ptrauth_dyld_address_auth gUseDyld3
= nullptr;
72 ///////////////////// AllImages ////////////////////////////
79 void AllImages::init(const closure::LaunchClosure
* closure
, const DyldSharedCache
* dyldCacheLoadAddress
, const char* dyldCachePath
,
80 const Array
<LoadedImage
>& initialImages
)
82 _mainClosure
= closure
;
83 _initialImages
= &initialImages
;
84 _dyldCacheAddress
= dyldCacheLoadAddress
;
85 _dyldCachePath
= dyldCachePath
;
87 if ( _dyldCacheAddress
) {
88 _dyldCacheSlide
= (uint64_t)dyldCacheLoadAddress
- dyldCacheLoadAddress
->unslidLoadAddress();
89 _imagesArrays
.push_back(dyldCacheLoadAddress
->cachedDylibsImageArray());
90 if ( auto others
= dyldCacheLoadAddress
->otherOSImageArray() )
91 _imagesArrays
.push_back(others
);
93 _imagesArrays
.push_back(_mainClosure
->images());
95 // record first ImageNum to do use for dlopen() calls
96 _mainClosure
->images()->forEachImage(^(const dyld3::closure::Image
* image
, bool& stop
) {
97 closure::ImageNum num
= image
->imageNum();
98 if ( num
>= _nextImageNum
)
99 _nextImageNum
= num
+1;
102 // Make temporary old image array, so libSystem initializers can be debugged
103 STACK_ALLOC_ARRAY(dyld_image_info
, oldDyldInfo
, initialImages
.count());
104 for (const LoadedImage
& li
: initialImages
) {
105 oldDyldInfo
.push_back({li
.loadedAddress(), li
.image()->path(), 0});
107 _oldAllImageInfos
->infoArray
= &oldDyldInfo
[0];
108 _oldAllImageInfos
->infoArrayCount
= (uint32_t)oldDyldInfo
.count();
109 _oldAllImageInfos
->notification(dyld_image_adding
, _oldAllImageInfos
->infoArrayCount
, _oldAllImageInfos
->infoArray
);
110 _oldAllImageInfos
->infoArray
= nullptr;
111 _oldAllImageInfos
->infoArrayCount
= 0;
113 _processDOFs
= Loader::dtraceUserProbesEnabled();
116 void AllImages::setProgramVars(ProgramVars
* vars
, bool keysOff
, bool osBinariesOnly
)
119 _archs
= &GradedArchs::forCurrentOS(keysOff
, osBinariesOnly
);
122 void AllImages::setLaunchMode(uint32_t flags
)
127 AllImages::MainFunc
AllImages::getDriverkitMain()
129 return _driverkitMain
;
132 void AllImages::setDriverkitMain(MainFunc mainFunc
)
134 _driverkitMain
= mainFunc
;
137 void AllImages::setRestrictions(bool allowAtPaths
, bool allowEnvPaths
)
139 _allowAtPaths
= allowAtPaths
;
140 _allowEnvPaths
= allowEnvPaths
;
143 void AllImages::setHasCacheOverrides(bool someCacheImageOverriden
)
145 _someImageOverridden
= someCacheImageOverriden
;
148 bool AllImages::hasCacheOverrides() const {
149 return _someImageOverridden
;
152 void AllImages::applyInitialImages()
154 addImages(*_initialImages
);
155 runImageNotifiers(*_initialImages
);
156 runImageCallbacks(*_initialImages
);
157 _initialImages
= nullptr; // this was stack allocated
160 void AllImages::withReadLock(void (^work
)()) const
162 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
163 os_unfair_recursive_lock_lock(&_globalLock
);
165 os_unfair_recursive_lock_unlock(&_globalLock
);
167 pthread_mutex_lock(&_globalLock
);
169 pthread_mutex_unlock(&_globalLock
);
173 void AllImages::withWriteLock(void (^work
)())
175 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
176 os_unfair_recursive_lock_lock(&_globalLock
);
178 os_unfair_recursive_lock_unlock(&_globalLock
);
180 pthread_mutex_lock(&_globalLock
);
182 pthread_mutex_unlock(&_globalLock
);
186 void AllImages::withNotifiersLock(void (^work
)()) const
188 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
189 os_unfair_recursive_lock_lock(&_globalLock
);
191 os_unfair_recursive_lock_unlock(&_globalLock
);
193 pthread_mutex_lock(&_globalLock
);
195 pthread_mutex_unlock(&_globalLock
);
199 void AllImages::mirrorToOldAllImageInfos()
202 // set infoArray to NULL to denote it is in-use
203 _oldAllImageInfos
->infoArray
= nullptr;
205 // if array not large enough, re-alloc it
206 uint32_t imageCount
= (uint32_t)_loadedImages
.count();
207 if ( _oldArrayAllocCount
< imageCount
) {
208 uint32_t newAllocCount
= imageCount
+ 16;
209 dyld_image_info
* newArray
= (dyld_image_info
*)::malloc(sizeof(dyld_image_info
)*newAllocCount
);
210 if ( _oldAllImageArray
!= nullptr ) {
211 ::memcpy(newArray
, _oldAllImageArray
, sizeof(dyld_image_info
)*_oldAllImageInfos
->infoArrayCount
);
212 ::free(_oldAllImageArray
);
214 _oldAllImageArray
= newArray
;
215 _oldArrayAllocCount
= newAllocCount
;
218 // fill out array to mirror current image list
220 for (const LoadedImage
& li
: _loadedImages
) {
221 _oldAllImageArray
[index
].imageLoadAddress
= li
.loadedAddress();
222 _oldAllImageArray
[index
].imageFilePath
= imagePath(li
.image());
223 _oldAllImageArray
[index
].imageFileModDate
= 0;
227 // set infoArray back to base address of array (so other process can now read)
228 _oldAllImageInfos
->infoArrayCount
= imageCount
;
229 _oldAllImageInfos
->infoArrayChangeTimestamp
= mach_absolute_time();
230 _oldAllImageInfos
->infoArray
= _oldAllImageArray
;
232 // <radr://problem/42668846> update UUID array if needed
233 uint32_t nonCachedCount
= 1; // always add dyld
234 for (const LoadedImage
& li
: _loadedImages
) {
235 if ( _oldAllImageInfos
->processDetachedFromSharedRegion
|| !li
.loadedAddress()->inDyldCache())
238 if ( nonCachedCount
!= _oldAllImageInfos
->uuidArrayCount
) {
239 // set infoArray to NULL to denote it is in-use
240 _oldAllImageInfos
->uuidArray
= nullptr;
241 // make sure allocation can hold all uuids
242 if ( _oldUUIDAllocCount
< nonCachedCount
) {
243 uint32_t newAllocCount
= (nonCachedCount
+ 3) & (-4); // round up to multiple of 4
244 dyld_uuid_info
* newArray
= (dyld_uuid_info
*)::malloc(sizeof(dyld_uuid_info
)*newAllocCount
);
245 if ( _oldUUIDArray
!= nullptr )
246 ::free(_oldUUIDArray
);
247 _oldUUIDArray
= newArray
;
248 _oldUUIDAllocCount
= newAllocCount
;
250 // add dyld then all images not in dyld cache
251 const MachOFile
* dyldMF
= (MachOFile
*)_oldAllImageInfos
->dyldImageLoadAddress
;
252 _oldUUIDArray
[0].imageLoadAddress
= dyldMF
;
253 dyldMF
->getUuid(_oldUUIDArray
[0].imageUUID
);
255 for (const LoadedImage
& li
: _loadedImages
) {
256 if ( _oldAllImageInfos
->processDetachedFromSharedRegion
|| !li
.loadedAddress()->inDyldCache() ) {
257 _oldUUIDArray
[index
].imageLoadAddress
= li
.loadedAddress();
258 li
.loadedAddress()->getUuid(_oldUUIDArray
[index
].imageUUID
);
262 // set uuidArray back to base address of array (so kernel can now read)
263 _oldAllImageInfos
->uuidArray
= _oldUUIDArray
;
264 _oldAllImageInfos
->uuidArrayCount
= nonCachedCount
;
269 void AllImages::addImages(const Array
<LoadedImage
>& newImages
)
271 // copy into _loadedImages
273 _loadedImages
.append(newImages
);
277 void AllImages::addImmutableRange(uintptr_t start
, uintptr_t end
)
279 //fprintf(stderr, "AllImages::addImmutableRange(0x%09lX, 0x%09lX)\n", start, end);
280 // first look in existing range buckets for empty slot
281 ImmutableRanges
* lastRange
= nullptr;
282 for (ImmutableRanges
* ranges
= &_immutableRanges
; ranges
!= nullptr; ranges
= ranges
->next
.load(std::memory_order_acquire
)) {
284 for (uintptr_t i
=0; i
< ranges
->arraySize
; ++i
) {
285 if ( ranges
->array
[i
].start
.load(std::memory_order_acquire
) == 0 ) {
286 // set 'end' before 'start' so readers always see consistent state
287 ranges
->array
[i
].end
.store(end
, std::memory_order_release
);
288 ranges
->array
[i
].start
.store(start
, std::memory_order_release
);
293 // if we got here, there are no empty slots, so add new ImmutableRanges
294 const uintptr_t newSize
= 15; // allocation is 256 bytes on 64-bit processes
295 ImmutableRanges
* newRange
= (ImmutableRanges
*)calloc(offsetof(ImmutableRanges
,array
[newSize
]), 1);
296 newRange
->arraySize
= newSize
;
297 newRange
->array
[0].end
.store(end
, std::memory_order_release
);
298 newRange
->array
[0].start
.store(start
, std::memory_order_release
);
299 // tie into previous list last
300 lastRange
->next
.store(newRange
, std::memory_order_release
);
303 void AllImages::runImageNotifiers(const Array
<LoadedImage
>& newImages
)
305 uint32_t count
= (uint32_t)newImages
.count();
308 if ( _oldAllImageInfos
!= nullptr ) {
309 // sync to old all image infos struct
310 mirrorToOldAllImageInfos();
312 // tell debugger about new images
313 dyld_image_info oldDyldInfo
[count
];
314 for (uint32_t i
=0; i
< count
; ++i
) {
315 oldDyldInfo
[i
].imageLoadAddress
= newImages
[i
].loadedAddress();
316 oldDyldInfo
[i
].imageFilePath
= imagePath(newImages
[i
].image());
317 oldDyldInfo
[i
].imageFileModDate
= 0;
319 _oldAllImageInfos
->notification(dyld_image_adding
, count
, oldDyldInfo
);
322 // if any image not in the shared cache added, recompute bounds
323 for (const LoadedImage
& li
: newImages
) {
324 if ( !((MachOAnalyzer
*)li
.loadedAddress())->inDyldCache() ) {
330 // update immutable ranges
331 for (const LoadedImage
& li
: newImages
) {
332 if ( !li
.image()->inDyldCache() && li
.image()->neverUnload() ) {
333 uintptr_t baseAddr
= (uintptr_t)li
.loadedAddress();
334 li
.image()->forEachDiskSegment(^(uint32_t segIndex
, uint32_t fileOffset
, uint32_t fileSize
, int64_t vmOffset
, uint64_t vmSize
, uint8_t permissions
, bool laterReadOnly
, bool &stop
) {
335 if ( (permissions
& (VM_PROT_READ
|VM_PROT_WRITE
)) == VM_PROT_READ
) {
336 addImmutableRange(baseAddr
+ (uintptr_t)vmOffset
, (uintptr_t)(baseAddr
+ vmOffset
+ vmSize
));
343 for (const LoadedImage
& li
: newImages
) {
344 const char *path
= imagePath(li
.image());
346 if ( li
.image()->getUuid(imageUUID
)) {
347 uuid_string_t imageUUIDStr
;
348 uuid_unparse_upper(imageUUID
, imageUUIDStr
);
349 log_loads("dyld: <%s> %s\n", imageUUIDStr
, path
);
352 log_loads("dyld: %s\n", path
);
356 // call kdebug trace for each image
357 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD
, DBG_DYLD_UUID
, DBG_DYLD_UUID_MAP_A
))) {
358 for (const LoadedImage
& li
: newImages
) {
359 const closure::Image
* image
= li
.image();
360 struct stat stat_buf
;
361 const char *path
= imagePath(image
);
363 image
->getUuid(uuid
);
364 fsid_t fsid
= {{ 0, 0 }};
365 fsobj_id_t fsobjid
= { 0, 0 };
366 if ( !li
.loadedAddress()->inDyldCache() && (dyld3::stat(path
, &stat_buf
) == 0) ) {
367 fsobjid
= *(fsobj_id_t
*)&stat_buf
.st_ino
;
368 fsid
= {{ stat_buf
.st_dev
, 0 }};
370 kdebug_trace_dyld_image(DBG_DYLD_UUID_MAP_A
, path
, &uuid
, fsobjid
, fsid
, li
.loadedAddress());
375 void AllImages::runImageCallbacks(const Array
<LoadedImage
>& newImages
)
377 uint32_t count
= (uint32_t)newImages
.count();
380 // call each _dyld_register_func_for_add_image function with each image
382 for (NotifyFunc func
: _loadNotifiers
) {
383 for (const LoadedImage
& li
: newImages
) {
384 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE
, (uint64_t)li
.loadedAddress(), (uint64_t)func
, 0);
385 log_notifications("dyld: add notifier %p called with mh=%p\n", func
, li
.loadedAddress());
386 if ( li
.image()->inDyldCache() )
387 func(li
.loadedAddress(), (uintptr_t)_dyldCacheSlide
);
389 func(li
.loadedAddress(), li
.loadedAddress()->getSlide());
392 for (LoadNotifyFunc func
: _loadNotifiers2
) {
393 for (const LoadedImage
& li
: newImages
) {
394 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE
, (uint64_t)li
.loadedAddress(), (uint64_t)func
, 0);
395 log_notifications("dyld: add notifier %p called with mh=%p\n", func
, li
.loadedAddress());
396 if ( li
.image()->inDyldCache() )
397 func(li
.loadedAddress(), li
.image()->path(), false);
399 func(li
.loadedAddress(), li
.image()->path(), !li
.image()->neverUnload());
402 for (BulkLoadNotifier func
: _loadBulkNotifiers
) {
403 const mach_header
* mhs
[count
];
404 const char* paths
[count
];
405 for (unsigned i
=0; i
< count
; ++i
) {
406 mhs
[i
] = newImages
[i
].loadedAddress();
407 paths
[i
] = newImages
[i
].image()->path();
409 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE
, (uint64_t)mhs
[0], (uint64_t)func
, 0);
410 log_notifications("dyld: add notifier %p called with %d images\n", func
, count
);
411 func(count
, mhs
, paths
);
415 // call objc about images that use objc
416 if ( _objcNotifyMapped
!= nullptr ) {
417 const char* pathsBuffer
[count
];
418 const mach_header
* mhBuffer
[count
];
419 uint32_t imagesWithObjC
= 0;
420 for (const LoadedImage
& li
: newImages
) {
421 const closure::Image
* image
= li
.image();
422 if ( image
->hasObjC() ) {
423 pathsBuffer
[imagesWithObjC
] = imagePath(image
);
424 mhBuffer
[imagesWithObjC
] = li
.loadedAddress();
428 if ( imagesWithObjC
!= 0 ) {
429 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_OBJC_MAP
, 0, 0, 0);
430 (*_objcNotifyMapped
)(imagesWithObjC
, pathsBuffer
, mhBuffer
);
431 if ( log_notifications("dyld: objc-mapped-notifier called with %d images:\n", imagesWithObjC
) ) {
432 for (uint32_t i
=0; i
< imagesWithObjC
; ++i
) {
433 log_notifications("dyld: objc-mapped: %p %s\n", mhBuffer
[i
], pathsBuffer
[i
]);
439 #if !TARGET_OS_DRIVERKIT
440 // FIXME: This may make more sense in runImageCallbacks, but the present order
441 // is after callbacks. Can we safely move it?
442 // notify any processes tracking loads in this process
443 notifyMonitorLoads(newImages
);
447 void AllImages::removeImages(const Array
<LoadedImage
>& unloadImages
)
449 // call each _dyld_register_func_for_remove_image function with each image
451 for (NotifyFunc func
: _unloadNotifiers
) {
452 for (const LoadedImage
& li
: unloadImages
) {
453 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_FUNC_FOR_REMOVE_IMAGE
, (uint64_t)li
.loadedAddress(), (uint64_t)func
, 0);
454 log_notifications("dyld: remove notifier %p called with mh=%p\n", func
, li
.loadedAddress());
455 if ( li
.image()->inDyldCache() )
456 func(li
.loadedAddress(), (uintptr_t)_dyldCacheSlide
);
458 func(li
.loadedAddress(), li
.loadedAddress()->getSlide());
463 // call objc about images going away
464 if ( _objcNotifyUnmapped
!= nullptr ) {
465 for (const LoadedImage
& li
: unloadImages
) {
466 if ( li
.image()->hasObjC() ) {
467 (*_objcNotifyUnmapped
)(imagePath(li
.image()), li
.loadedAddress());
468 log_notifications("dyld: objc-unmapped-notifier called with image %p %s\n", li
.loadedAddress(), imagePath(li
.image()));
473 // call kdebug trace for each image
474 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD
, DBG_DYLD_UUID
, DBG_DYLD_UUID_MAP_A
))) {
475 for (const LoadedImage
& li
: unloadImages
) {
476 const closure::Image
* image
= li
.image();
477 struct stat stat_buf
;
478 const char *path
= imagePath(image
);
480 image
->getUuid(uuid
);
481 fsid_t fsid
= {{ 0, 0 }};
482 fsobj_id_t fsobjid
= { 0, 0 };
483 if ( dyld3::stat(path
, &stat_buf
) == 0 ) {
484 fsobjid
= *(fsobj_id_t
*)&stat_buf
.st_ino
;
485 fsid
= {{ stat_buf
.st_dev
, 0 }};
487 kdebug_trace_dyld_image(DBG_DYLD_UUID_UNMAP_A
, path
, &uuid
, fsobjid
, fsid
, li
.loadedAddress());
491 // remove each from _loadedImages
493 for (const LoadedImage
& uli
: unloadImages
) {
494 for (LoadedImage
& li
: _loadedImages
) {
495 if ( uli
.loadedAddress() == li
.loadedAddress() ) {
496 _loadedImages
.erase(li
);
504 // sync to old all image infos struct
505 mirrorToOldAllImageInfos();
507 // tell debugger about removed images
508 STACK_ALLOC_ARRAY(dyld_image_info
, oldDyldInfo
, unloadImages
.count());
509 for (const LoadedImage
& li
: unloadImages
) {
510 oldDyldInfo
.push_back({li
.loadedAddress(), li
.image()->path(), 0});
512 _oldAllImageInfos
->notification(dyld_image_removing
, (uint32_t)oldDyldInfo
.count(), &oldDyldInfo
[0]);
514 // notify any processes tracking loads in this process
515 notifyMonitorUnloads(unloadImages
);
517 // finally, unmap images
518 for (const LoadedImage
& li
: unloadImages
) {
519 if ( li
.leaveMapped() ) {
520 log_loads("dyld: unloaded but left mmapped %s\n", imagePath(li
.image()));
523 // unmapImage() modifies parameter, so use copy
524 LoadedImage copy
= li
;
525 Loader::unmapImage(copy
);
526 log_loads("dyld: unloaded %s\n", imagePath(li
.image()));
531 // must be called with writeLock held
532 void AllImages::recomputeBounds()
534 _lowestNonCached
= UINTPTR_MAX
;
535 _highestNonCached
= 0;
536 for (const LoadedImage
& li
: _loadedImages
) {
537 const MachOLoaded
* ml
= li
.loadedAddress();
538 uintptr_t start
= (uintptr_t)ml
;
539 if ( !((MachOAnalyzer
*)ml
)->inDyldCache() ) {
540 if ( start
< _lowestNonCached
)
541 _lowestNonCached
= start
;
542 uintptr_t end
= start
+ (uintptr_t)(li
.image()->vmSizeToMap());
543 if ( end
> _highestNonCached
)
544 _highestNonCached
= end
;
549 uint32_t AllImages::count() const
551 return (uint32_t)_loadedImages
.count();
554 bool AllImages::dyldCacheHasPath(const char* path
) const
556 uint32_t dyldCacheImageIndex
;
557 if ( _dyldCacheAddress
!= nullptr )
558 return _dyldCacheAddress
->hasImagePath(path
, dyldCacheImageIndex
);
562 const char* AllImages::imagePathByIndex(uint32_t index
) const
564 __block
const char* result
= nullptr;
566 if ( index
< _loadedImages
.count() ) {
567 result
= imagePath(_loadedImages
[index
].image());
574 const mach_header
* AllImages::imageLoadAddressByIndex(uint32_t index
) const
576 __block
const mach_header
* result
= nullptr;
578 if ( index
< _loadedImages
.count() ) {
579 result
= _loadedImages
[index
].loadedAddress();
586 bool AllImages::findImage(const mach_header
* loadAddress
, LoadedImage
& foundImage
) const
588 __block
bool result
= false;
590 for (const LoadedImage
& li
: _loadedImages
) {
591 if ( li
.loadedAddress() == loadAddress
) {
601 void AllImages::forEachImage(void (^handler
)(const LoadedImage
& loadedImage
, bool& stop
)) const
603 if ( _initialImages
!= nullptr ) {
604 // being called during libSystem initialization, so _loadedImages not allocated yet
606 for (const LoadedImage
& li
: *_initialImages
) {
616 for (const LoadedImage
& li
: _loadedImages
) {
625 const char* AllImages::pathForImageMappedAt(const void* addr
) const
627 if ( _initialImages
!= nullptr ) {
628 // being called during libSystem initialization, so _loadedImages not allocated yet
629 for (const LoadedImage
& li
: *_initialImages
) {
631 if ( li
.image()->containsAddress(addr
, li
.loadedAddress(), &permissions
) ) {
632 return li
.image()->path();
638 // if address is in cache, do fast search of TEXT segments in cache
639 __block
const char* result
= nullptr;
640 if ( (_dyldCacheAddress
!= nullptr) && (addr
> _dyldCacheAddress
) ) {
641 if ( addr
< (void*)((uint8_t*)_dyldCacheAddress
+_dyldCacheAddress
->mappedSize()) ) {
642 uint64_t cacheSlide
= (uint64_t)_dyldCacheAddress
- _dyldCacheAddress
->unslidLoadAddress();
643 uint64_t unslidTargetAddr
= (uint64_t)addr
- cacheSlide
;
644 _dyldCacheAddress
->forEachImageTextSegment(^(uint64_t loadAddressUnslid
, uint64_t textSegmentSize
, const unsigned char* dylibUUID
, const char* installName
, bool& stop
) {
645 if ( (loadAddressUnslid
<= unslidTargetAddr
) && (unslidTargetAddr
< loadAddressUnslid
+textSegmentSize
) ) {
646 result
= installName
;
650 if ( result
!= nullptr )
655 // slow path - search image list
656 infoForImageMappedAt(addr
, ^(const LoadedImage
& foundImage
, uint8_t permissions
) {
657 result
= foundImage
.image()->path();
663 void AllImages::infoForImageMappedAt(const void* addr
, void (^handler
)(const LoadedImage
& foundImage
, uint8_t permissions
)) const
665 __block
uint8_t permissions
;
666 if ( _initialImages
!= nullptr ) {
667 // being called during libSystem initialization, so _loadedImages not allocated yet
668 for (const LoadedImage
& li
: *_initialImages
) {
669 if ( li
.image()->containsAddress(addr
, li
.loadedAddress(), &permissions
) ) {
670 handler(li
, permissions
);
678 for (const LoadedImage
& li
: _loadedImages
) {
679 if ( li
.image()->containsAddress(addr
, li
.loadedAddress(), &permissions
) ) {
680 handler(li
, permissions
);
688 bool AllImages::infoForImageMappedAt(const void* addr
, const MachOLoaded
** ml
, uint64_t* textSize
, const char** path
) const
690 if ( _initialImages
!= nullptr ) {
691 // being called during libSystem initialization, so _loadedImages not allocated yet
692 for (const LoadedImage
& li
: *_initialImages
) {
694 if ( li
.image()->containsAddress(addr
, li
.loadedAddress(), &permissions
) ) {
696 *ml
= li
.loadedAddress();
697 if ( path
!= nullptr )
698 *path
= li
.image()->path();
699 if ( textSize
!= nullptr ) {
700 *textSize
= li
.image()->textSize();
708 // if address is in cache, do fast search of TEXT segments in cache
709 __block
bool result
= false;
710 if ( (_dyldCacheAddress
!= nullptr) && (addr
> _dyldCacheAddress
) ) {
711 if ( addr
< (void*)((uint8_t*)_dyldCacheAddress
+_dyldCacheAddress
->mappedSize()) ) {
712 uint64_t cacheSlide
= (uint64_t)_dyldCacheAddress
- _dyldCacheAddress
->unslidLoadAddress();
713 uint64_t unslidTargetAddr
= (uint64_t)addr
- cacheSlide
;
714 _dyldCacheAddress
->forEachImageTextSegment(^(uint64_t loadAddressUnslid
, uint64_t textSegmentSize
, const unsigned char* dylibUUID
, const char* installName
, bool& stop
) {
715 if ( (loadAddressUnslid
<= unslidTargetAddr
) && (unslidTargetAddr
< loadAddressUnslid
+textSegmentSize
) ) {
717 *ml
= (MachOLoaded
*)(loadAddressUnslid
+ cacheSlide
);
718 if ( path
!= nullptr )
720 if ( textSize
!= nullptr )
721 *textSize
= textSegmentSize
;
728 // in shared cache, but not in a TEXT segment, do slow search of all loaded cache images
730 for (const LoadedImage
& li
: _loadedImages
) {
731 if ( ((MachOAnalyzer
*)li
.loadedAddress())->inDyldCache() ) {
733 if ( li
.image()->containsAddress(addr
, li
.loadedAddress(), &permissions
) ) {
735 *ml
= li
.loadedAddress();
736 if ( path
!= nullptr )
737 *path
= li
.image()->path();
738 if ( textSize
!= nullptr )
739 *textSize
= li
.image()->textSize();
750 // address not in dyld cache, check each non-cache image
751 infoForNonCachedImageMappedAt(addr
, ^(const LoadedImage
& foundImage
, uint8_t permissions
) {
753 *ml
= foundImage
.loadedAddress();
754 if ( path
!= nullptr )
755 *path
= foundImage
.image()->path();
756 if ( textSize
!= nullptr )
757 *textSize
= foundImage
.image()->textSize();
764 // same as infoForImageMappedAt(), but only look at images not in the dyld cache
765 void AllImages::infoForNonCachedImageMappedAt(const void* addr
, void (^handler
)(const LoadedImage
& foundImage
, uint8_t permissions
)) const
767 __block
uint8_t permissions
;
768 if ( _initialImages
!= nullptr ) {
769 // being called during libSystem initialization, so _loadedImages not allocated yet
770 for (const LoadedImage
& li
: *_initialImages
) {
771 if ( !((MachOAnalyzer
*)li
.loadedAddress())->inDyldCache() ) {
772 if ( li
.image()->containsAddress(addr
, li
.loadedAddress(), &permissions
) ) {
773 handler(li
, permissions
);
782 for (const LoadedImage
& li
: _loadedImages
) {
783 if ( !((MachOAnalyzer
*)li
.loadedAddress())->inDyldCache() ) {
784 if ( li
.image()->containsAddress(addr
, li
.loadedAddress(), &permissions
) ) {
785 handler(li
, permissions
);
793 bool AllImages::immutableMemory(const void* addr
, size_t length
) const
795 // check to see if in shared cache
796 if ( _dyldCacheAddress
!= nullptr ) {
798 if ( _dyldCacheAddress
->inCache(addr
, length
, readOnly
) ) {
803 // check to see if it is outside the range of any loaded image
804 if ( ((uintptr_t)addr
< _lowestNonCached
) || ((uintptr_t)addr
+length
> _highestNonCached
) ) {
808 // check immutable ranges
809 for (const ImmutableRanges
* ranges
= &_immutableRanges
; ranges
!= nullptr; ranges
= ranges
->next
.load(std::memory_order_acquire
)) {
810 for (uintptr_t i
=0; i
< ranges
->arraySize
; ++i
) {
811 if ( ranges
->array
[i
].start
.load(std::memory_order_acquire
) == 0 )
812 break; // no more entries in use
813 if ( (ranges
->array
[i
].start
.load(std::memory_order_acquire
) <= (uintptr_t)addr
)
814 && (ranges
->array
[i
].end
.load(std::memory_order_acquire
) > ((uintptr_t)addr
)+length
) )
823 uintptr_t AllImages::resolveTarget(closure::Image::ResolvedSymbolTarget target
) const
825 switch ( target
.sharedCache
.kind
) {
826 case closure::Image::ResolvedSymbolTarget::kindSharedCache
:
827 assert(_dyldCacheAddress
!= nullptr);
828 return (uintptr_t)_dyldCacheAddress
+ (uintptr_t)target
.sharedCache
.offset
;
830 case closure::Image::ResolvedSymbolTarget::kindImage
: {
832 bool foundImage
= findImageNum(target
.image
.imageNum
, info
);
834 return (uintptr_t)(info
.loadedAddress()) + (uintptr_t)target
.image
.offset
;
837 case closure::Image::ResolvedSymbolTarget::kindAbsolute
:
838 if ( target
.absolute
.value
& (1ULL << 62) )
839 return (uintptr_t)(target
.absolute
.value
| 0xC000000000000000ULL
);
841 return (uintptr_t)target
.absolute
.value
;
843 assert(0 && "malformed ResolvedSymbolTarget");
847 void* AllImages::interposeValue(void *value
) const {
848 if ( !_mainClosure
->hasInterposings() )
851 __block
void* replacementValue
= nullptr;
852 __block
bool foundReplacement
= false;
853 _mainClosure
->forEachInterposingTuple(^(const closure::InterposingTuple
& tuple
, bool& stop
) {
854 void* stockPointer
= (void*)resolveTarget(tuple
.stockImplementation
);
855 if ( stockPointer
== value
) {
856 replacementValue
= (void*)resolveTarget(tuple
.newImplementation
);
857 foundReplacement
= true;
862 if ( foundReplacement
)
863 return replacementValue
;
868 void AllImages::infoForImageWithLoadAddress(const MachOLoaded
* mh
, void (^handler
)(const LoadedImage
& foundImage
)) const
871 for (const LoadedImage
& li
: _loadedImages
) {
872 if ( li
.loadedAddress() == mh
) {
880 bool AllImages::findImageNum(closure::ImageNum imageNum
, LoadedImage
& foundImage
) const
882 if ( _initialImages
!= nullptr ) {
883 // being called during libSystem initialization, so _loadedImages not allocated yet
884 for (const LoadedImage
& li
: *_initialImages
) {
885 if ( li
.image()->representsImageNum(imageNum
) ) {
894 for (const LoadedImage
& li
: _loadedImages
) {
895 if ( li
.image()->representsImageNum(imageNum
) ) {
905 const MachOLoaded
* AllImages::findDependent(const MachOLoaded
* mh
, uint32_t depIndex
)
907 __block
const MachOLoaded
* result
= nullptr;
909 for (const LoadedImage
& li
: _loadedImages
) {
910 if ( li
.loadedAddress() == mh
) {
911 closure::ImageNum depImageNum
= li
.image()->dependentImageNum(depIndex
);
913 if ( findImageNum(depImageNum
, depLi
) )
914 result
= depLi
.loadedAddress();
923 void AllImages::breadthFirstRecurseDependents(Array
<closure::ImageNum
>& visited
, const LoadedImage
& nodeLi
, bool& stopped
, void (^handler
)(const LoadedImage
& aLoadedImage
, bool& stop
)) const
925 // call handler on all direct dependents (unless already visited)
926 STACK_ALLOC_ARRAY(LoadedImage
, dependentsToRecurse
, 256);
927 nodeLi
.image()->forEachDependentImage(^(uint32_t depIndex
, closure::Image::LinkKind kind
, closure::ImageNum depImageNum
, bool& depStop
) {
928 if ( kind
== closure::Image::LinkKind::upward
)
930 if ( visited
.contains(depImageNum
) )
933 if ( !findImageNum(depImageNum
, depLi
) )
935 handler(depLi
, depStop
);
936 // <rdar://58466613> if there is an override of some dyld cache dylib, we need to store the override ImageNum in the visited set
937 if ( depImageNum
!= depLi
.image()->imageNum() ) {
938 depImageNum
= depLi
.image()->imageNum();
939 if ( visited
.contains(depImageNum
) )
942 visited
.push_back(depImageNum
);
947 dependentsToRecurse
.push_back(depLi
);
951 // recurse on all dependents just visited
952 for (LoadedImage
& depLi
: dependentsToRecurse
) {
953 breadthFirstRecurseDependents(visited
, depLi
, stopped
, handler
);
957 void AllImages::visitDependentsTopDown(const LoadedImage
& start
, void (^handler
)(const LoadedImage
& aLoadedImage
, bool& stop
)) const
960 STACK_ALLOC_ARRAY(closure::ImageNum
, visited
, count());
962 handler(start
, stop
);
965 visited
.push_back(start
.image()->imageNum());
966 breadthFirstRecurseDependents(visited
, start
, stop
, handler
);
970 const MachOLoaded
* AllImages::mainExecutable() const
972 assert(_programVars
!= nullptr);
973 return (const MachOLoaded
*)_programVars
->mh
;
976 const closure::Image
* AllImages::mainExecutableImage() const
978 assert(_mainClosure
!= nullptr);
979 return _mainClosure
->images()->imageForNum(_mainClosure
->topImageNum());
982 void AllImages::setMainPath(const char* path
)
984 _mainExeOverridePath
= path
;
987 const char* AllImages::imagePath(const closure::Image
* image
) const
990 // on iOS and watchOS, apps may be moved on device after closure built
991 if ( _mainExeOverridePath
!= nullptr ) {
992 if ( image
== mainExecutableImage() )
993 return _mainExeOverridePath
;
996 return image
->path();
999 dyld_platform_t
AllImages::platform() const {
1000 return (dyld_platform_t
)oldAllImageInfo()->platform
;
1003 const GradedArchs
& AllImages::archs() const
1008 void AllImages::incRefCount(const mach_header
* loadAddress
)
1010 for (DlopenCount
& entry
: _dlopenRefCounts
) {
1011 if ( entry
.loadAddress
== loadAddress
) {
1012 // found existing DlopenCount entry, bump counter
1013 entry
.refCount
+= 1;
1018 // no existing DlopenCount, add new one
1019 _dlopenRefCounts
.push_back({ loadAddress
, 1 });
1022 void AllImages::decRefCount(const mach_header
* loadAddress
)
1024 bool doCollect
= false;
1025 for (DlopenCount
& entry
: _dlopenRefCounts
) {
1026 if ( entry
.loadAddress
== loadAddress
) {
1027 // found existing DlopenCount entry, bump counter
1028 entry
.refCount
-= 1;
1029 if ( entry
.refCount
== 0 ) {
1030 _dlopenRefCounts
.erase(entry
);
1038 garbageCollectImages();
1043 NSObjectFileImage
AllImages::addNSObjectFileImage(const OFIInfo
& image
)
1045 __block
uint64_t imageNum
= 0;
1047 imageNum
= ++_nextObjectFileImageNum
;
1048 _objectFileImages
.push_back(image
);
1049 _objectFileImages
.back().imageNum
= imageNum
;
1051 return (NSObjectFileImage
)imageNum
;
1054 bool AllImages::forNSObjectFileImage(NSObjectFileImage imageHandle
,
1055 void (^handler
)(OFIInfo
& image
)) {
1056 uint64_t imageNum
= (uint64_t)imageHandle
;
1057 bool __block foundImage
= false;
1059 for (OFIInfo
& ofi
: _objectFileImages
) {
1060 if ( ofi
.imageNum
== imageNum
) {
1071 void AllImages::removeNSObjectFileImage(NSObjectFileImage imageHandle
)
1073 uint64_t imageNum
= (uint64_t)imageHandle
;
1075 for (OFIInfo
& ofi
: _objectFileImages
) {
1076 if ( ofi
.imageNum
== imageNum
) {
1077 _objectFileImages
.erase(ofi
);
1086 class VIS_HIDDEN Reaper
1091 const LoadedImage
* li
;
1094 Reaper(Array
<ImageAndUse
>& unloadables
, AllImages
*);
1095 void garbageCollect();
1096 void finalizeDeadImages();
1098 static void runTerminators(const LoadedImage
& li
);
1101 void markDirectlyDlopenedImagesAsUsed();
1102 void markDependentOfInUseImages();
1103 void markDependentsOf(const LoadedImage
*);
1104 uint32_t inUseCount();
1105 void dump(const char* msg
);
1107 Array
<ImageAndUse
>& _unloadables
;
1108 AllImages
* _allImages
;
1109 uint32_t _deadCount
;
1112 Reaper::Reaper(Array
<ImageAndUse
>& unloadables
, AllImages
* all
)
1113 : _unloadables(unloadables
), _allImages(all
), _deadCount(0)
1117 void Reaper::markDirectlyDlopenedImagesAsUsed()
1119 for (AllImages::DlopenCount
& entry
: _allImages
->_dlopenRefCounts
) {
1120 if ( entry
.refCount
!= 0 ) {
1121 for (ImageAndUse
& iu
: _unloadables
) {
1122 if ( iu
.li
->loadedAddress() == entry
.loadAddress
) {
1131 uint32_t Reaper::inUseCount()
1134 for (ImageAndUse
& iu
: _unloadables
) {
1141 void Reaper::markDependentsOf(const LoadedImage
* li
)
1143 li
->image()->forEachDependentImage(^(uint32_t depIndex
, closure::Image::LinkKind kind
, closure::ImageNum depImageNum
, bool& stop
) {
1144 for (ImageAndUse
& iu
: _unloadables
) {
1145 if ( !iu
.inUse
&& iu
.li
->image()->representsImageNum(depImageNum
) ) {
1153 void Reaper::markDependentOfInUseImages()
1155 for (ImageAndUse
& iu
: _unloadables
) {
1157 markDependentsOf(iu
.li
);
1161 void Reaper::dump(const char* msg
)
1163 //log("%s:\n", msg);
1164 //for (ImageAndUse& iu : _unloadables) {
1165 // log(" in-used=%d %s\n", iu.inUse, iu.li->image()->path());
1169 void Reaper::garbageCollect()
1171 //dump("all unloadable images");
1173 // mark all dylibs directly dlopen'ed as in use
1174 markDirectlyDlopenedImagesAsUsed();
1176 //dump("directly dlopen()'ed marked");
1178 // iteratively mark dependents of in-use dylibs as in-use until in-use count stops changing
1179 uint32_t lastCount
= inUseCount();
1180 bool countChanged
= false;
1182 markDependentOfInUseImages();
1183 //dump("dependents marked");
1184 uint32_t newCount
= inUseCount();
1185 countChanged
= (newCount
!= lastCount
);
1186 lastCount
= newCount
;
1187 } while (countChanged
);
1189 _deadCount
= (uint32_t)_unloadables
.count() - inUseCount();
1192 void Reaper::finalizeDeadImages()
1194 if ( _deadCount
== 0 )
1196 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(__cxa_range_t
, ranges
, _deadCount
);
1197 for (ImageAndUse
& iu
: _unloadables
) {
1200 runTerminators(*iu
.li
);
1201 iu
.li
->image()->forEachDiskSegment(^(uint32_t segIndex
, uint32_t fileOffset
, uint32_t fileSize
, int64_t vmOffset
, uint64_t vmSize
, uint8_t permissions
, bool laterReadOnly
, bool &stop
) {
1202 if ( permissions
& VM_PROT_EXECUTE
) {
1203 __cxa_range_t range
;
1204 range
.addr
= (char*)(iu
.li
->loadedAddress()) + vmOffset
;
1205 range
.length
= (size_t)vmSize
;
1206 ranges
.push_back(range
);
1210 __cxa_finalize_ranges(ranges
.begin(), (uint32_t)ranges
.count());
1213 void Reaper::runTerminators(const LoadedImage
& li
)
1215 // <rdar://problem/71820555> Don't run static terminator for arm64e
1216 const MachOAnalyzer
* ma
= (MachOAnalyzer
*)li
.loadedAddress();
1217 if ( ma
->isArch("arm64e") )
1220 if ( li
.image()->hasTerminators() ) {
1221 typedef void (*Terminator
)();
1222 li
.image()->forEachTerminator(li
.loadedAddress(), ^(const void* terminator
) {
1223 Terminator termFunc
= (Terminator
)terminator
;
1225 log_initializers("dyld: called static terminator %p in %s\n", termFunc
, li
.image()->path());
1230 void AllImages::runAllStaticTerminators()
1232 // We want to run terminators in reverse chronological order of initializing
1233 // Note: initialLoadCount may be larger than what was actually loaded
1234 const uint32_t currentCount
= (uint32_t)_loadedImages
.count();
1235 const uint32_t initialLoadCount
= std::min(_mainClosure
->initialLoadCount(), currentCount
);
1237 // first run static terminators of anything dlopen()ed
1238 for (uint32_t i
=currentCount
-1; i
>= initialLoadCount
; --i
) {
1239 Reaper::runTerminators(_loadedImages
[i
]);
1242 // next run terminators of statically load images, in loader-order they were init in reverse of this
1243 for (uint32_t i
=0; i
< initialLoadCount
; ++i
) {
1244 Reaper::runTerminators(_loadedImages
[i
]);
1249 // This function is called at the end of dlclose() when the reference count goes to zero.
1250 // The dylib being unloaded may have brought in other dependent dylibs when it was loaded.
1251 // Those dependent dylibs need to be unloaded, but only if they are not referenced by
1252 // something else. We use a standard mark and sweep garbage collection.
1254 // The tricky part is that when a dylib is unloaded it may have a termination function that
1255 // can run and itself call dlclose() on yet another dylib. The problem is that this
1256 // sort of gabage collection is not re-entrant. Instead a terminator's call to dlclose()
1257 // which calls garbageCollectImages() will just set a flag to re-do the garbage collection
1258 // when the current pass is done.
1260 // Also note that this is done within the _loadedImages writer lock, so any dlopen/dlclose
1261 // on other threads are blocked while this garbage collections runs
1263 void AllImages::garbageCollectImages()
1265 // if some other thread is currently GC'ing images, let other thread do the work
1266 int32_t newCount
= OSAtomicIncrement32(&_gcCount
);
1267 if ( newCount
!= 1 )
1271 STACK_ALLOC_ARRAY(Reaper::ImageAndUse
, unloadables
, _loadedImages
.count());
1273 for (const LoadedImage
& li
: _loadedImages
) {
1274 if ( !li
.image()->neverUnload() /*&& !li.neverUnload()*/ ) {
1275 unloadables
.push_back({&li
, false});
1276 //fprintf(stderr, "unloadable[%lu] %p %s\n", unloadables.count(), li.loadedAddress(), li.image()->path());
1280 // make reaper object to do garbage collection and notifications
1281 Reaper
reaper(unloadables
, this);
1282 reaper
.garbageCollect();
1284 // FIXME: we should sort dead images so higher level ones are terminated first
1286 // call cxa_finalize_ranges and static terminators of dead images
1287 reaper
.finalizeDeadImages();
1289 // FIXME: DOF unregister
1291 //fprintf(stderr, "_loadedImages before GC removals:\n");
1292 //for (const LoadedImage& li : _loadedImages) {
1293 // fprintf(stderr, " loadAddr=%p, path=%s\n", li.loadedAddress(), li.image()->path());
1296 // make copy of LoadedImages we want to remove
1297 // because unloadables[] points into LoadedImage we are shrinking
1298 STACK_ALLOC_ARRAY(LoadedImage
, unloadImages
, _loadedImages
.count());
1299 for (const Reaper::ImageAndUse
& iu
: unloadables
) {
1301 unloadImages
.push_back(*iu
.li
);
1303 // remove entries from _loadedImages
1304 if ( !unloadImages
.empty() ) {
1305 removeImages(unloadImages
);
1307 //fprintf(stderr, "_loadedImages after GC removals:\n");
1308 //for (const LoadedImage& li : _loadedImages) {
1309 // fprintf(stderr, " loadAddr=%p, path=%s\n", li.loadedAddress(), li.image()->path());
1313 // if some other thread called GC during our work, redo GC on its behalf
1314 newCount
= OSAtomicDecrement32(&_gcCount
);
1316 while (newCount
> 0);
1321 void AllImages::addLoadNotifier(NotifyFunc func
)
1323 // callback about already loaded images
1325 for (const LoadedImage
& li
: _loadedImages
) {
1326 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE
, (uint64_t)li
.loadedAddress(), (uint64_t)func
, 0);
1327 log_notifications("dyld: add notifier %p called with mh=%p\n", func
, li
.loadedAddress());
1328 if ( li
.image()->inDyldCache() )
1329 func(li
.loadedAddress(), (uintptr_t)_dyldCacheSlide
);
1331 func(li
.loadedAddress(), li
.loadedAddress()->getSlide());
1335 // add to list of functions to call about future loads
1336 withNotifiersLock(^{
1337 _loadNotifiers
.push_back(func
);
1341 void AllImages::addUnloadNotifier(NotifyFunc func
)
1343 // add to list of functions to call about future unloads
1344 withNotifiersLock(^{
1345 _unloadNotifiers
.push_back(func
);
1349 void AllImages::addLoadNotifier(LoadNotifyFunc func
)
1351 // callback about already loaded images
1353 for (const LoadedImage
& li
: _loadedImages
) {
1354 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE
, (uint64_t)li
.loadedAddress(), (uint64_t)func
, 0);
1355 log_notifications("dyld: add notifier %p called with mh=%p\n", func
, li
.loadedAddress());
1356 func(li
.loadedAddress(), li
.image()->path(), !li
.image()->neverUnload());
1360 // add to list of functions to call about future loads
1361 withNotifiersLock(^{
1362 _loadNotifiers2
.push_back(func
);
1367 void AllImages::addBulkLoadNotifier(BulkLoadNotifier func
)
1369 // callback about already loaded images
1370 unsigned count
= (unsigned)_loadedImages
.count();
1371 const mach_header
* mhs
[count
];
1372 const char* paths
[count
];
1373 for (unsigned i
=0; i
< count
; ++i
) {
1374 mhs
[i
] = _loadedImages
[i
].loadedAddress();
1375 paths
[i
] = _loadedImages
[i
].image()->path();
1377 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE
, (uint64_t)mhs
[0], (uint64_t)func
, 0);
1378 log_notifications("dyld: add notifier %p called with %d images\n", func
, count
);
1379 func(count
, mhs
, paths
);
1381 // add to list of functions to call about future loads
1382 withNotifiersLock(^{
1383 _loadBulkNotifiers
.push_back(func
);
1387 // Returns true if logs should be sent to stderr as well as syslog.
1388 // Copied from objc which copied it from CFUtilities.c
1389 static bool also_do_stderr(void)
1392 int ret
= fstat(STDERR_FILENO
, &st
);
1393 if (ret
< 0) return false;
1394 mode_t m
= st
.st_mode
& S_IFMT
;
1395 if (m
== S_IFREG
|| m
== S_IFSOCK
|| m
== S_IFIFO
|| m
== S_IFCHR
) {
1401 // Print "message" to the console. Copied from objc.
1402 static void _objc_syslog(const char *message
)
1404 _simple_asl_log(ASL_LEVEL_ERR
, NULL
, message
);
1406 if (also_do_stderr()) {
1407 write(STDERR_FILENO
, message
, strlen(message
));
1411 void AllImages::setObjCNotifiers(_dyld_objc_notify_mapped map
, _dyld_objc_notify_init init
, _dyld_objc_notify_unmapped unmap
)
1413 _objcNotifyMapped
= map
;
1414 _objcNotifyInit
= init
;
1415 _objcNotifyUnmapped
= unmap
;
1417 // We couldn't initialize the objc optimized closure data in init() as that needs malloc but runs before malloc initializes.
1418 // So lets grab the data now and set it up
1420 // Pull out the objc selector hash table if we have one
1421 Array
<closure::Image::ObjCSelectorImage
> selectorImageNums
;
1422 const closure::ObjCSelectorOpt
* selectorHashTable
= nullptr;
1423 if (_mainClosure
->selectorHashTable(selectorImageNums
, selectorHashTable
)) {
1424 _objcSelectorHashTable
= selectorHashTable
;
1425 for (closure::Image::ObjCSelectorImage selectorImage
: selectorImageNums
) {
1426 LoadedImage loadedImage
;
1427 bool found
= findImageNum(selectorImage
.imageNum
, loadedImage
);
1429 _objcSelectorHashTableImages
.push_back( (uintptr_t)loadedImage
.loadedAddress() + selectorImage
.offset
);
1433 // Pull out the objc class hash table if we have one
1434 Array
<closure::Image::ObjCClassImage
> classImageNums
;
1435 const closure::ObjCClassOpt
* classHashTable
= nullptr;
1436 const closure::ObjCClassOpt
* protocolHashTable
= nullptr;
1437 if (_mainClosure
->classAndProtocolHashTables(classImageNums
, classHashTable
, protocolHashTable
)) {
1438 _objcClassHashTable
= (const closure::ObjCClassOpt
*)classHashTable
;
1439 _objcProtocolHashTable
= (const closure::ObjCClassOpt
*)protocolHashTable
;
1440 for (closure::Image::ObjCClassImage classImage
: classImageNums
) {
1441 LoadedImage loadedImage
;
1442 bool found
= findImageNum(classImage
.imageNum
, loadedImage
);
1444 uintptr_t loadAddress
= (uintptr_t)loadedImage
.loadedAddress();
1445 uintptr_t nameBaseAddress
= loadAddress
+ classImage
.offsetOfClassNames
;
1446 uintptr_t dataBaseAddress
= loadAddress
+ classImage
.offsetOfClasses
;
1447 _objcClassHashTableImages
.push_back({ nameBaseAddress
, dataBaseAddress
});
1451 _mainClosure
->duplicateClassesHashTable(_objcClassDuplicatesHashTable
);
1452 if ( _objcClassDuplicatesHashTable
!= nullptr ) {
1453 // If we have duplicates, the those need the objc opt pointer to find dupes
1454 _dyldCacheObjCOpt
= _dyldCacheAddress
->objcOpt();
1457 // ObjC would have issued warnings on duplicate classes. We've recorded those too
1458 _mainClosure
->forEachWarning(closure::Closure::Warning::duplicateObjCClass
, ^(const char *warning
, bool &stop
) {
1460 diag
.error("objc[%d]: %s\n", getpid(), warning
);
1461 _objc_syslog(diag
.errorMessage());
1464 // callback about already loaded images
1465 uint32_t maxCount
= count();
1466 STACK_ALLOC_ARRAY(const mach_header
*, mhs
, maxCount
);
1467 STACK_ALLOC_ARRAY(const char*, paths
, maxCount
);
1468 // don't need _mutex here because this is called when process is still single threaded
1469 for (const LoadedImage
& li
: _loadedImages
) {
1470 if ( li
.image()->hasObjC() ) {
1471 paths
.push_back(imagePath(li
.image()));
1472 mhs
.push_back(li
.loadedAddress());
1475 if ( !mhs
.empty() ) {
1476 (*map
)((uint32_t)mhs
.count(), &paths
[0], &mhs
[0]);
1477 if ( log_notifications("dyld: objc-mapped-notifier called with %ld images:\n", mhs
.count()) ) {
1478 for (uintptr_t i
=0; i
< mhs
.count(); ++i
) {
1479 log_notifications("dyld: objc-mapped: %p %s\n", mhs
[i
], paths
[i
]);
1485 void AllImages::applyInterposingToDyldCache(const closure::Closure
* closure
, mach_port_t mach_task_self
)
1487 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_APPLY_INTERPOSING
, 0, 0, 0);
1488 const uintptr_t cacheStart
= (uintptr_t)_dyldCacheAddress
;
1489 __block
closure::ImageNum lastCachedDylibImageNum
= 0;
1490 __block
const closure::Image
* lastCachedDylibImage
= nullptr;
1491 __block
bool suspendedAccounting
= false;
1493 if ( closure
->findAttributePayload(closure::TypedBytes::Type::cacheOverrides
) == nullptr )
1496 // make the cache writable for this block
1497 DyldSharedCache::DataConstScopedWriter
patcher(_dyldCacheAddress
, mach_task_self
, (DyldSharedCache::DataConstLogFunc
)&log_segments
);
1499 closure
->forEachPatchEntry(^(const closure::Closure::PatchEntry
& entry
) {
1500 if ( entry
.overriddenDylibInCache
!= lastCachedDylibImageNum
) {
1501 lastCachedDylibImage
= closure::ImageArray::findImage(imagesArrays(), entry
.overriddenDylibInCache
);
1502 assert(lastCachedDylibImage
!= nullptr);
1503 lastCachedDylibImageNum
= entry
.overriddenDylibInCache
;
1505 if ( !suspendedAccounting
) {
1506 Loader::vmAccountingSetSuspended(true, log_fixups
);
1507 suspendedAccounting
= true;
1509 uintptr_t newValue
= 0;
1510 LoadedImage foundImage
;
1511 switch ( entry
.replacement
.image
.kind
) {
1512 case closure::Image::ResolvedSymbolTarget::kindImage
:
1513 if ( !findImageNum(entry
.replacement
.image
.imageNum
, foundImage
) ) {
1514 abort_report_np("cannot find replacement imageNum=0x%04X when patching cache to override imageNum=0x%04X\n", entry
.replacement
.image
.imageNum
, entry
.overriddenDylibInCache
);
1516 newValue
= (uintptr_t)(foundImage
.loadedAddress()) + (uintptr_t)entry
.replacement
.image
.offset
;
1518 case closure::Image::ResolvedSymbolTarget::kindSharedCache
:
1519 newValue
= (uintptr_t)_dyldCacheAddress
+ (uintptr_t)entry
.replacement
.sharedCache
.offset
;
1521 case closure::Image::ResolvedSymbolTarget::kindAbsolute
:
1522 // this means the symbol was missing in the cache override dylib, so set any uses to NULL
1523 newValue
= (uintptr_t)entry
.replacement
.absolute
.value
;
1526 assert(0 && "bad replacement kind");
1528 uint32_t lastCachedDylibImageIndex
= lastCachedDylibImageNum
- (uint32_t)_dyldCacheAddress
->cachedDylibsImageArray()->startImageNum();
1529 _dyldCacheAddress
->forEachPatchableUseOfExport(lastCachedDylibImageIndex
,
1530 entry
.exportCacheOffset
, ^(dyld_cache_patchable_location patchLocation
) {
1531 uintptr_t* loc
= (uintptr_t*)(cacheStart
+patchLocation
.cacheOffset
);
1532 #if __has_feature(ptrauth_calls)
1533 if ( patchLocation
.authenticated
) {
1534 MachOLoaded::ChainedFixupPointerOnDisk fixupInfo
;
1535 fixupInfo
.arm64e
.authRebase
.auth
= true;
1536 fixupInfo
.arm64e
.authRebase
.addrDiv
= patchLocation
.usesAddressDiversity
;
1537 fixupInfo
.arm64e
.authRebase
.diversity
= patchLocation
.discriminator
;
1538 fixupInfo
.arm64e
.authRebase
.key
= patchLocation
.key
;
1539 *loc
= fixupInfo
.arm64e
.signPointer(loc
, newValue
+ DyldSharedCache::getAddend(patchLocation
));
1540 log_fixups("dyld: cache fixup: *%p = %p (JOP: diversity 0x%04X, addr-div=%d, key=%s)\n",
1541 loc
, (void*)*loc
, patchLocation
.discriminator
, patchLocation
.usesAddressDiversity
, DyldSharedCache::keyName(patchLocation
));
1545 log_fixups("dyld: cache fixup: *%p = 0x%0lX (dyld cache patch)\n", loc
, newValue
+ (uintptr_t)DyldSharedCache::getAddend(patchLocation
));
1546 *loc
= newValue
+ (uintptr_t)DyldSharedCache::getAddend(patchLocation
);
1549 if ( suspendedAccounting
)
1550 Loader::vmAccountingSetSuspended(false, log_fixups
);
1553 void AllImages::runStartupInitialzers()
1555 __block
bool mainExecutableInitializerNeedsToRun
= true;
1556 __block
uint32_t imageIndex
= 0;
1557 while ( mainExecutableInitializerNeedsToRun
) {
1558 __block
const closure::Image
* image
= nullptr;
1560 image
= _loadedImages
[imageIndex
].image();
1561 if ( _loadedImages
[imageIndex
].loadedAddress()->isMainExecutable() )
1562 mainExecutableInitializerNeedsToRun
= false;
1564 runInitialzersBottomUp(image
);
1570 // Find image in _loadedImages which has ImageNum == num.
1571 // Try indexHint first, if hint is wrong, updated it, so next use is faster.
1572 LoadedImage
AllImages::findImageNum(closure::ImageNum num
, uint32_t& indexHint
)
1574 __block LoadedImage copy
;
1576 if ( (indexHint
>= _loadedImages
.count()) || !_loadedImages
[indexHint
].image()->representsImageNum(num
) ) {
1578 for (indexHint
=0; indexHint
< _loadedImages
.count(); ++indexHint
) {
1579 if ( _loadedImages
[indexHint
].image()->representsImageNum(num
) )
1582 assert(indexHint
< _loadedImages
.count());
1584 copy
= _loadedImages
[indexHint
];
1590 // Change the state of the LoadedImage in _loadedImages which has ImageNum == num.
1591 // Only change state if current state is expectedCurrentState (atomic swap).
1592 bool AllImages::swapImageState(closure::ImageNum num
, uint32_t& indexHint
, LoadedImage::State expectedCurrentState
, LoadedImage::State newState
)
1594 __block
bool result
= false;
1596 if ( (indexHint
>= _loadedImages
.count()) || !_loadedImages
[indexHint
].image()->representsImageNum(num
) ) {
1598 for (indexHint
=0; indexHint
< _loadedImages
.count(); ++indexHint
) {
1599 if ( _loadedImages
[indexHint
].image()->representsImageNum(num
) )
1602 assert(indexHint
< _loadedImages
.count());
1604 if ( _loadedImages
[indexHint
].state() == expectedCurrentState
) {
1605 _loadedImages
[indexHint
].setState(newState
);
1612 // dyld3 pre-builds the order initializers need to be run (bottom up) in a list in the closure.
1613 // This method uses that list to run all initializers.
1614 // Because an initializer may call dlopen() and/or create threads, the _loadedImages array
1615 // may move under us. So, never keep a pointer into it. Always reference images by ImageNum
1616 // and use hint to make that faster in the case where the _loadedImages does not move.
1617 void AllImages::runInitialzersBottomUp(const closure::Image
* topImage
)
1619 // walk closure specified initializer list, already ordered bottom up
1620 topImage
->forEachImageToInitBefore(^(closure::ImageNum imageToInit
, bool& stop
) {
1621 // get copy of LoadedImage about imageToInit, but don't keep reference into _loadedImages, because it may move if initialzers call dlopen()
1622 uint32_t indexHint
= 0;
1623 LoadedImage loadedImageCopy
= findImageNum(imageToInit
, indexHint
);
1624 // skip if the image is already inited, or in process of being inited (dependency cycle)
1625 if ( (loadedImageCopy
.state() == LoadedImage::State::fixedUp
) && swapImageState(imageToInit
, indexHint
, LoadedImage::State::fixedUp
, LoadedImage::State::beingInited
) ) {
1626 // tell objc to run any +load methods in image
1627 if ( (_objcNotifyInit
!= nullptr) && loadedImageCopy
.image()->mayHavePlusLoads() ) {
1628 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_OBJC_INIT
, (uint64_t)loadedImageCopy
.loadedAddress(), 0, 0);
1629 const char* path
= imagePath(loadedImageCopy
.image());
1630 log_notifications("dyld: objc-init-notifier called with mh=%p, path=%s\n", loadedImageCopy
.loadedAddress(), path
);
1631 (*_objcNotifyInit
)(path
, loadedImageCopy
.loadedAddress());
1634 // run all initializers in image
1635 runAllInitializersInImage(loadedImageCopy
.image(), loadedImageCopy
.loadedAddress());
1637 // advance state to inited
1638 swapImageState(imageToInit
, indexHint
, LoadedImage::State::beingInited
, LoadedImage::State::inited
);
1643 void AllImages::runLibSystemInitializer(LoadedImage
& libSystem
)
1645 // First set the libSystem state to beingInited. This protects against accidentally trying
1646 // to run its initializers again if a dlopen happens insie libSystem_initializer().
1647 libSystem
.setState(LoadedImage::State::beingInited
);
1649 // run all initializers in libSystem.dylib
1650 // Note: during libSystem's initialization, libdyld_initializer() is called which copies _initialImages to _loadedImages
1651 runAllInitializersInImage(libSystem
.image(), libSystem
.loadedAddress());
1653 // update global flags that libsystem has been initialized (so debug tools know it is safe to inject threads)
1654 _oldAllImageInfos
->libSystemInitialized
= true;
1656 // mark libSystem.dylib as being inited, so later recursive-init would re-run it
1657 for (LoadedImage
& li
: _loadedImages
) {
1658 if ( li
.loadedAddress() == libSystem
.loadedAddress() ) {
1659 li
.setState(LoadedImage::State::inited
);
1663 // now that libSystem is up, register a callback that should be called at exit
1664 __cxa_atexit(&AllImages::runAllStaticTerminatorsHelper
, nullptr, nullptr);
1667 void AllImages::runAllStaticTerminatorsHelper(void*)
1669 gAllImages
.runAllStaticTerminators();
1672 void AllImages::runAllInitializersInImage(const closure::Image
* image
, const MachOLoaded
* ml
)
1674 image
->forEachInitializer(ml
, ^(const void* func
) {
1675 Initializer initFunc
= (Initializer
)func
;
1676 #if __has_feature(ptrauth_calls)
1677 initFunc
= (Initializer
)__builtin_ptrauth_sign_unauthenticated((void*)initFunc
, 0, 0);
1680 ScopedTimer(DBG_DYLD_TIMING_STATIC_INITIALIZER
, (uint64_t)ml
, (uint64_t)func
, 0);
1681 initFunc(NXArgc
, NXArgv
, environ
, appleParams
, _programVars
);
1684 log_initializers("dyld: called initialzer %p in %s\n", initFunc
, image
->path());
1688 // Note this is noinline to avoid having too much stack used if loadImage has to call due to an invalid closure
1689 __attribute__((noinline
))
1690 const MachOLoaded
* AllImages::dlopen(Diagnostics
& diag
, const char* path
, bool rtldNoLoad
, bool rtldLocal
,
1691 bool rtldNoDelete
, bool rtldNow
, bool fromOFI
, const void* callerAddress
,
1692 bool canUsePrebuiltSharedCacheClosure
)
1694 bool sharedCacheFormatCompatible
= (_dyldCacheAddress
!= nullptr) && (_dyldCacheAddress
->header
.formatVersion
== dyld3::closure::kFormatVersion
);
1695 canUsePrebuiltSharedCacheClosure
&= sharedCacheFormatCompatible
;
1697 // quick check if path is in shared cache and already loaded
1698 if ( _dyldCacheAddress
!= nullptr ) {
1699 uint32_t dyldCacheImageIndex
;
1700 if ( _dyldCacheAddress
->hasImagePath(path
, dyldCacheImageIndex
) ) {
1703 const MachOLoaded
* mh
= (MachOLoaded
*)_dyldCacheAddress
->getIndexedImageEntry(dyldCacheImageIndex
, mTime
, inode
);
1704 // Note: we do not need readLock because this is within global dlopen lock
1705 for (const LoadedImage
& li
: _loadedImages
) {
1706 if ( li
.loadedAddress() == mh
) {
1711 // If this is a customer cache, and we have no overrides, then we know for sure the cache closure is valid
1712 // This assumes that a libdispatch root would have been loaded on launch, and that root path is not
1713 // supported with customer caches, which is the case today.
1714 if ( !rtldNoLoad
&& !hasInsertedOrInterposingLibraries() &&
1715 (_dyldCacheAddress
->header
.cacheType
== kDyldSharedCacheTypeProduction
) &&
1716 sharedCacheFormatCompatible
) {
1717 const dyld3::closure::ImageArray
* images
= _dyldCacheAddress
->cachedDylibsImageArray();
1718 const dyld3::closure::Image
* image
= images
->imageForNum(dyldCacheImageIndex
+1);
1719 return loadImage(diag
, path
, image
->imageNum(), nullptr, rtldLocal
, rtldNoDelete
, rtldNow
, fromOFI
, callerAddress
);
1724 __block
closure::ImageNum callerImageNum
= 0;
1725 for (const LoadedImage
& li
: _loadedImages
) {
1726 uint8_t permissions
;
1727 if ( (callerImageNum
== 0) && li
.image()->containsAddress(callerAddress
, li
.loadedAddress(), &permissions
) ) {
1728 callerImageNum
= li
.image()->imageNum();
1730 //fprintf(stderr, "mh=%p, image=%p, imageNum=0x%04X, path=%s\n", li.loadedAddress(), li.image(), li.image()->imageNum(), li.image()->path());
1734 closure::ImageNum topImageNum
= 0;
1735 const closure::DlopenClosure
* newClosure
= nullptr;
1737 // First try with closures from the shared cache permitted.
1738 // Then try again with forcing a new closure
1739 for (bool canUseSharedCacheClosure
: { true, false }) {
1740 // We can only use a shared cache closure if the shared cache format is the same as libdyld.
1741 canUseSharedCacheClosure
&= canUsePrebuiltSharedCacheClosure
;
1742 closure::FileSystemPhysical
fileSystem(nullptr, nullptr, _allowEnvPaths
);
1743 RootsChecker rootsChecker
;
1744 closure::ClosureBuilder::AtPath atPathHanding
= (_allowAtPaths
? closure::ClosureBuilder::AtPath::all
: closure::ClosureBuilder::AtPath::onlyInRPaths
);
1745 closure::ClosureBuilder
cb(_nextImageNum
, fileSystem
, rootsChecker
, _dyldCacheAddress
, true, *_archs
, closure::gPathOverrides
, atPathHanding
, true, nullptr, (dyld3::Platform
)platform());
1746 newClosure
= cb
.makeDlopenClosure(path
, _mainClosure
, _loadedImages
.array(), callerImageNum
, rtldNoLoad
, rtldNow
, canUseSharedCacheClosure
, &topImageNum
);
1747 if ( newClosure
== closure::ClosureBuilder::sRetryDlopenClosure
) {
1748 log_apis(" dlopen: closure builder needs to retry: %s\n", path
);
1749 assert(canUseSharedCacheClosure
);
1752 if ( (newClosure
== nullptr) && (topImageNum
== 0) ) {
1753 if ( cb
.diagnostics().hasError())
1754 diag
.error("%s", cb
.diagnostics().errorMessage());
1755 else if ( !rtldNoLoad
)
1756 diag
.error("dlopen(): file not found: %s", path
);
1759 // save off next available ImageNum for use by next call to dlopen()
1760 _nextImageNum
= cb
.nextFreeImageNum();
1764 if ( newClosure
!= nullptr ) {
1765 // if new closure contains an ImageArray, add it to list
1766 if ( const closure::ImageArray
* newArray
= newClosure
->images() ) {
1767 appendToImagesArray(newArray
);
1769 log_apis(" dlopen: made %s closure: %p\n", newClosure
->topImage()->variantString(), newClosure
);
1772 // if already loaded, just bump refCount and return
1773 if ( (newClosure
== nullptr) && (topImageNum
!= 0) ) {
1774 for (LoadedImage
& li
: _loadedImages
) {
1775 if ( li
.image()->imageNum() == topImageNum
) {
1776 // is already loaded
1777 const MachOLoaded
* topLoadAddress
= li
.loadedAddress();
1778 if ( !li
.image()->inDyldCache() )
1779 incRefCount(topLoadAddress
);
1780 log_apis(" dlopen: already loaded as '%s'\n", li
.image()->path());
1781 // if previously opened with RTLD_LOCAL, but now opened with RTLD_GLOBAL, unhide it
1782 if ( !rtldLocal
&& li
.hideFromFlatSearch() )
1783 li
.setHideFromFlatSearch(false);
1784 // if called with RTLD_NODELETE, mark it as never-unload
1786 li
.markLeaveMapped();
1788 // If we haven't run the initializers then we must be in a static init in a dlopen
1789 if ( li
.state() != LoadedImage::State::inited
) {
1790 // RTLD_NOLOAD means dlopen should fail unless path is already loaded.
1791 // don't run initializers when RTLD_NOLOAD is set. This only matters if dlopen() is
1792 // called from within an initializer because it can cause initializers to run
1793 // out of order. Most uses of RTLD_NOLOAD are "probes". If they want initialzers
1794 // to run, then don't use RTLD_NOLOAD.
1796 runInitialzersBottomUp(li
.image());
1800 return topLoadAddress
;
1805 return loadImage(diag
, path
, topImageNum
, newClosure
, rtldLocal
, rtldNoDelete
, rtldNow
, fromOFI
, callerAddress
);
1808 // Note this is noinline to avoid having too much stack used in the parent
1810 __attribute__((noinline
))
1811 const MachOLoaded
* AllImages::loadImage(Diagnostics
& diag
, const char* path
,
1812 closure::ImageNum topImageNum
, const closure::DlopenClosure
* newClosure
,
1813 bool rtldLocal
, bool rtldNoDelete
, bool rtldNow
, bool fromOFI
,
1814 const void* callerAddress
) {
1815 // Note this array is used as the storage to Loader so needs to be at least
1816 // large enough to handle whatever total number of images we need to do the dlopen
1817 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(LoadedImage
, newImages
, 1024);
1819 // Note we don't need pre-optimized Objective-C for dlopen closures, but use
1820 // variables here to make it easier to see whats going on.
1821 const dyld3::closure::ObjCSelectorOpt
* selectorOpt
= nullptr;
1822 dyld3::Array
<dyld3::closure::Image::ObjCSelectorImage
> selectorImages
;
1824 // run loader to load all new images
1825 RootsChecker rootsChecker
;
1826 Loader
loader(_loadedImages
.array(), newImages
, _dyldCacheAddress
, imagesArrays(),
1827 selectorOpt
, selectorImages
, rootsChecker
, (dyld3::Platform
)platform(),
1828 &dyld3::log_loads
, &dyld3::log_segments
, &dyld3::log_fixups
, &dyld3::log_dofs
, !rtldNow
);
1830 // find Image* for top image, look in new closure first
1831 const closure::Image
* topImage
= nullptr;
1832 if ( newClosure
!= nullptr )
1833 topImage
= newClosure
->images()->imageForNum(topImageNum
);
1834 if ( topImage
== nullptr )
1835 topImage
= closure::ImageArray::findImage(imagesArrays(), topImageNum
);
1836 if ( newClosure
== nullptr ) {
1837 if ( topImageNum
< dyld3::closure::kLastDyldCacheImageNum
)
1838 log_apis(" dlopen: using pre-built %s dlopen closure from dyld shared cache %p\n", topImage
->variantString(), topImage
);
1840 log_apis(" dlopen: using pre-built %s dlopen closure %p\n", topImage
->variantString(), topImage
);
1842 LoadedImage topLoadedImage
= LoadedImage::make(topImage
);
1843 if ( rtldLocal
&& !topImage
->inDyldCache() )
1844 topLoadedImage
.setHideFromFlatSearch(true);
1845 if ( rtldNoDelete
&& !topImage
->inDyldCache() )
1846 topLoadedImage
.markLeaveMapped();
1847 loader
.addImage(topLoadedImage
);
1850 // recursively load all dependents and fill in allImages array
1851 bool someCacheImageOverridden
= false;
1852 loader
.completeAllDependents(diag
, someCacheImageOverridden
);
1853 if ( diag
.hasError() )
1855 bool closureOutOfDate
;
1857 loader
.mapAndFixupAllImages(diag
, _processDOFs
, fromOFI
, &closureOutOfDate
, &recoverable
);
1858 if ( diag
.hasError() ) {
1859 // If we used a pre-built shared cache closure, and now found that it was out of date,
1860 // try again and rebuild a new closure
1861 // Note, newClosure is null in the case where we used a prebuilt closure
1862 if ( closureOutOfDate
&& recoverable
&& (newClosure
== nullptr) ) {
1864 return dlopen(diag
, path
, false /* rtldNoLoad */, rtldLocal
, rtldNoDelete
, rtldNow
, fromOFI
, callerAddress
, false);
1869 // Record if we had a root
1870 _someImageOverridden
|= someCacheImageOverridden
;
1872 const MachOLoaded
* topLoadAddress
= newImages
.begin()->loadedAddress();
1874 // bump dlopen refcount of image directly loaded
1875 if ( !topImage
->inDyldCache() )
1876 incRefCount(topLoadAddress
);
1878 // tell gAllImages about new images
1879 addImages(newImages
);
1881 // Run notifiers before applyInterposingToDyldCache() as then we have an
1882 // accurate image list before any calls to findImage().
1883 // TODO: Can we move this even earlier, eg, after map images but before fixups?
1884 runImageNotifiers(newImages
);
1886 // if closure adds images that override dyld cache, patch cache
1887 if ( newClosure
!= nullptr )
1888 applyInterposingToDyldCache(newClosure
, mach_task_self());
1890 runImageCallbacks(newImages
);
1893 runInitialzersBottomUp(topImage
);
1895 return topLoadAddress
;
1898 void AllImages::appendToImagesArray(const closure::ImageArray
* newArray
)
1900 _imagesArrays
.push_back(newArray
);
1903 const Array
<const closure::ImageArray
*>& AllImages::imagesArrays()
1905 return _imagesArrays
.array();
1908 bool AllImages::isRestricted() const
1910 return !_allowEnvPaths
;
1913 bool AllImages::hasInsertedOrInterposingLibraries() const
1915 return _mainClosure
->hasInsertedLibraries() || _mainClosure
->hasInterposings();
1918 void AllImages::takeLockBeforeFork() {
1919 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
1920 os_unfair_recursive_lock_lock(&_globalLock
);
1924 void AllImages::releaseLockInForkParent() {
1925 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
1926 os_unfair_recursive_lock_unlock(&_globalLock
);
1930 void AllImages::resetLockInForkChild() {
1931 #if TARGET_OS_SIMULATOR
1933 // There's no dyld3 on the simulator this year
1938 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
1939 os_unfair_recursive_lock_unlock_forked_child(&_globalLock
);
1942 #endif // TARGET_OS_SIMULATOR
1945 const char* AllImages::getObjCSelector(const char *selName
) const {
1946 if ( _objcSelectorHashTable
== nullptr )
1948 return _objcSelectorHashTable
->getString(selName
, _objcSelectorHashTableImages
.array());
1951 void AllImages::forEachObjCClass(const char* className
,
1952 void (^callback
)(void* classPtr
, bool isLoaded
, bool* stop
)) const {
1953 if ( _objcClassHashTable
== nullptr )
1955 // There may be a duplicate in the shared cache. If that is the case, return it first
1956 if ( _objcClassDuplicatesHashTable
!= nullptr ) {
1957 void* classImpl
= nullptr;
1958 if ( _objcClassDuplicatesHashTable
->getClassLocation(className
, _dyldCacheObjCOpt
, classImpl
) ) {
1960 callback(classImpl
, true, &stop
);
1965 _objcClassHashTable
->forEachClass(className
, _objcClassHashTableImages
.array(), callback
);
1968 void AllImages::forEachObjCProtocol(const char* protocolName
,
1969 void (^callback
)(void* protocolPtr
, bool isLoaded
, bool* stop
)) const {
1970 if ( _objcProtocolHashTable
== nullptr )
1972 _objcProtocolHashTable
->forEachClass(protocolName
, _objcClassHashTableImages
.array(), callback
);
1976 } // namespace dyld3