2 * Copyright (c) 2017 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
27 #include <sys/types.h>
29 #include <sys/sysctl.h>
30 #include <mach/mach_time.h> // mach_absolute_time()
31 #include <libkern/OSAtomic.h>
36 #include "AllImages.h"
37 #include "libdyldEntryVector.h"
41 #include "DyldSharedCache.h"
42 #include "PathOverrides.h"
44 #include "ClosureBuilder.h"
45 #include "ClosureFileSystemPhysical.h"
47 extern const char** appleParams
;
49 // should be a header for these
50 struct __cxa_range_t
{
54 extern "C" void __cxa_finalize_ranges(const __cxa_range_t ranges
[], unsigned int count
);
56 VIS_HIDDEN
bool gUseDyld3
= false;
63 ///////////////////// AllImages ////////////////////////////
70 void AllImages::init(const closure::LaunchClosure
* closure
, const DyldSharedCache
* dyldCacheLoadAddress
, const char* dyldCachePath
,
71 const Array
<LoadedImage
>& initialImages
)
73 _mainClosure
= closure
;
74 _initialImages
= &initialImages
;
75 _dyldCacheAddress
= dyldCacheLoadAddress
;
76 _dyldCachePath
= dyldCachePath
;
78 if ( _dyldCacheAddress
) {
79 const dyld_cache_mapping_info
* const fileMappings
= (dyld_cache_mapping_info
*)((uint64_t)_dyldCacheAddress
+ _dyldCacheAddress
->header
.mappingOffset
);
80 _dyldCacheSlide
= (uint64_t)dyldCacheLoadAddress
- fileMappings
[0].address
;
81 _imagesArrays
.push_back(dyldCacheLoadAddress
->cachedDylibsImageArray());
82 if ( auto others
= dyldCacheLoadAddress
->otherOSImageArray() )
83 _imagesArrays
.push_back(others
);
85 _imagesArrays
.push_back(_mainClosure
->images());
87 // record first ImageNum to do use for dlopen() calls
88 _mainClosure
->images()->forEachImage(^(const dyld3::closure::Image
* image
, bool& stop
) {
89 closure::ImageNum num
= image
->imageNum();
90 if ( num
>= _nextImageNum
)
91 _nextImageNum
= num
+1;
94 // Make temporary old image array, so libSystem initializers can be debugged
95 STACK_ALLOC_ARRAY(dyld_image_info
, oldDyldInfo
, initialImages
.count());
96 for (const LoadedImage
& li
: initialImages
) {
97 oldDyldInfo
.push_back({li
.loadedAddress(), li
.image()->path(), 0});
99 _oldAllImageInfos
->infoArray
= &oldDyldInfo
[0];
100 _oldAllImageInfos
->infoArrayCount
= (uint32_t)oldDyldInfo
.count();
101 _oldAllImageInfos
->notification(dyld_image_adding
, _oldAllImageInfos
->infoArrayCount
, _oldAllImageInfos
->infoArray
);
102 _oldAllImageInfos
->infoArray
= nullptr;
103 _oldAllImageInfos
->infoArrayCount
= 0;
105 _processDOFs
= Loader::dtraceUserProbesEnabled();
108 void AllImages::setProgramVars(ProgramVars
* vars
)
111 const dyld3::MachOFile
* mf
= (dyld3::MachOFile
*)_programVars
->mh
;
112 mf
->forEachSupportedPlatform(^(dyld3::Platform platform
, uint32_t minOS
, uint32_t sdk
) {
113 _platform
= (dyld_platform_t
)platform
;
114 //FIXME assert there is only one?
118 void AllImages::setRestrictions(bool allowAtPaths
, bool allowEnvPaths
)
120 _allowAtPaths
= allowAtPaths
;
121 _allowEnvPaths
= allowEnvPaths
;
124 void AllImages::applyInitialImages()
126 addImages(*_initialImages
);
127 runImageNotifiers(*_initialImages
);
128 _initialImages
= nullptr; // this was stack allocated
131 void AllImages::withReadLock(void (^work
)()) const
133 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
134 os_unfair_recursive_lock_lock(&_loadImagesLock
);
136 os_unfair_recursive_lock_unlock(&_loadImagesLock
);
138 pthread_mutex_lock(&_loadImagesLock
);
140 pthread_mutex_unlock(&_loadImagesLock
);
144 void AllImages::withWriteLock(void (^work
)())
146 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
147 os_unfair_recursive_lock_lock(&_loadImagesLock
);
149 os_unfair_recursive_lock_unlock(&_loadImagesLock
);
151 pthread_mutex_lock(&_loadImagesLock
);
153 pthread_mutex_unlock(&_loadImagesLock
);
157 void AllImages::withNotifiersLock(void (^work
)()) const
159 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
160 os_unfair_recursive_lock_lock(&_notifiersLock
);
162 os_unfair_recursive_lock_unlock(&_notifiersLock
);
164 pthread_mutex_lock(&_notifiersLock
);
166 pthread_mutex_unlock(&_notifiersLock
);
170 void AllImages::mirrorToOldAllImageInfos()
173 // set infoArray to NULL to denote it is in-use
174 _oldAllImageInfos
->infoArray
= nullptr;
176 // if array not large enough, re-alloc it
177 uint32_t imageCount
= (uint32_t)_loadedImages
.count();
178 if ( _oldArrayAllocCount
< imageCount
) {
179 uint32_t newAllocCount
= imageCount
+ 16;
180 dyld_image_info
* newArray
= (dyld_image_info
*)::malloc(sizeof(dyld_image_info
)*newAllocCount
);
181 if ( _oldAllImageArray
!= nullptr ) {
182 ::memcpy(newArray
, _oldAllImageArray
, sizeof(dyld_image_info
)*_oldAllImageInfos
->infoArrayCount
);
183 ::free(_oldAllImageArray
);
185 _oldAllImageArray
= newArray
;
186 _oldArrayAllocCount
= newAllocCount
;
189 // fill out array to mirror current image list
191 for (const LoadedImage
& li
: _loadedImages
) {
192 _oldAllImageArray
[index
].imageLoadAddress
= li
.loadedAddress();
193 _oldAllImageArray
[index
].imageFilePath
= imagePath(li
.image());
194 _oldAllImageArray
[index
].imageFileModDate
= 0;
198 // set infoArray back to base address of array (so other process can now read)
199 _oldAllImageInfos
->infoArrayCount
= imageCount
;
200 _oldAllImageInfos
->infoArrayChangeTimestamp
= mach_absolute_time();
201 _oldAllImageInfos
->infoArray
= _oldAllImageArray
;
206 void AllImages::addImages(const Array
<LoadedImage
>& newImages
)
208 // copy into _loadedImages
210 _loadedImages
.append(newImages
);
211 // if any image not in the shared cache added, recompute bounds
212 for (const LoadedImage
& li
: newImages
) {
213 if ( !((MachOAnalyzer
*)li
.loadedAddress())->inDyldCache() ) {
221 void AllImages::runImageNotifiers(const Array
<LoadedImage
>& newImages
)
223 uint32_t count
= (uint32_t)newImages
.count();
226 if ( _oldAllImageInfos
!= nullptr ) {
227 // sync to old all image infos struct
228 mirrorToOldAllImageInfos();
230 // tell debugger about new images
231 dyld_image_info oldDyldInfo
[count
];
232 for (uint32_t i
=0; i
< count
; ++i
) {
233 oldDyldInfo
[i
].imageLoadAddress
= newImages
[i
].loadedAddress();
234 oldDyldInfo
[i
].imageFilePath
= imagePath(newImages
[i
].image());
235 oldDyldInfo
[i
].imageFileModDate
= 0;
237 _oldAllImageInfos
->notification(dyld_image_adding
, count
, oldDyldInfo
);
241 for (const LoadedImage
& li
: newImages
) {
242 log_loads("dyld: %s\n", imagePath(li
.image()));
245 #if !TARGET_IPHONE_SIMULATOR
246 // call kdebug trace for each image
247 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD
, DBG_DYLD_UUID
, DBG_DYLD_UUID_MAP_A
))) {
248 for (const LoadedImage
& li
: newImages
) {
249 const closure::Image
* image
= li
.image();
250 struct stat stat_buf
;
251 fsid_t fsid
= {{ 0, 0 }};
252 fsobj_id_t fsobjid
= { 0, 0 };
253 if ( !image
->inDyldCache() && (stat(imagePath(image
), &stat_buf
) == 0) ) {
254 fsobjid
= *(fsobj_id_t
*)&stat_buf
.st_ino
;
255 fsid
= {{ stat_buf
.st_dev
, 0 }};
258 image
->getUuid(uuid
);
259 kdebug_trace_dyld_image(DBG_DYLD_UUID_MAP_A
, &uuid
, fsobjid
, fsid
, li
.loadedAddress());
263 // call each _dyld_register_func_for_add_image function with each image
265 for (NotifyFunc func
: _loadNotifiers
) {
266 for (const LoadedImage
& li
: newImages
) {
267 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE
, (uint64_t)li
.loadedAddress(), (uint64_t)func
, 0);
268 log_notifications("dyld: add notifier %p called with mh=%p\n", func
, li
.loadedAddress());
269 if ( li
.image()->inDyldCache() )
270 func(li
.loadedAddress(), (uintptr_t)_dyldCacheSlide
);
272 func(li
.loadedAddress(), li
.loadedAddress()->getSlide());
275 for (LoadNotifyFunc func
: _loadNotifiers2
) {
276 for (const LoadedImage
& li
: newImages
) {
277 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE
, (uint64_t)li
.loadedAddress(), (uint64_t)func
, 0);
278 log_notifications("dyld: add notifier %p called with mh=%p\n", func
, li
.loadedAddress());
279 if ( li
.image()->inDyldCache() )
280 func(li
.loadedAddress(), li
.image()->path(), false);
282 func(li
.loadedAddress(), li
.image()->path(), !li
.image()->neverUnload());
287 // call objc about images that use objc
288 if ( _objcNotifyMapped
!= nullptr ) {
289 const char* pathsBuffer
[count
];
290 const mach_header
* mhBuffer
[count
];
291 uint32_t imagesWithObjC
= 0;
292 for (const LoadedImage
& li
: newImages
) {
293 const closure::Image
* image
= li
.image();
294 if ( image
->hasObjC() ) {
295 pathsBuffer
[imagesWithObjC
] = imagePath(image
);
296 mhBuffer
[imagesWithObjC
] = li
.loadedAddress();
300 if ( imagesWithObjC
!= 0 ) {
301 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_OBJC_MAP
, 0, 0, 0);
302 (*_objcNotifyMapped
)(imagesWithObjC
, pathsBuffer
, mhBuffer
);
303 if ( log_notifications("dyld: objc-mapped-notifier called with %d images:\n", imagesWithObjC
) ) {
304 for (uint32_t i
=0; i
< imagesWithObjC
; ++i
) {
305 log_notifications("dyld: objc-mapped: %p %s\n", mhBuffer
[i
], pathsBuffer
[i
]);
311 // notify any processes tracking loads in this process
312 notifyMonitorLoads(newImages
);
315 void AllImages::removeImages(const Array
<LoadedImage
>& unloadImages
)
317 // call each _dyld_register_func_for_remove_image function with each image
319 for (NotifyFunc func
: _unloadNotifiers
) {
320 for (const LoadedImage
& li
: unloadImages
) {
321 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_FUNC_FOR_REMOVE_IMAGE
, (uint64_t)li
.loadedAddress(), (uint64_t)func
, 0);
322 log_notifications("dyld: remove notifier %p called with mh=%p\n", func
, li
.loadedAddress());
323 if ( li
.image()->inDyldCache() )
324 func(li
.loadedAddress(), (uintptr_t)_dyldCacheSlide
);
326 func(li
.loadedAddress(), li
.loadedAddress()->getSlide());
331 // call objc about images going away
332 if ( _objcNotifyUnmapped
!= nullptr ) {
333 for (const LoadedImage
& li
: unloadImages
) {
334 if ( li
.image()->hasObjC() ) {
335 (*_objcNotifyUnmapped
)(imagePath(li
.image()), li
.loadedAddress());
336 log_notifications("dyld: objc-unmapped-notifier called with image %p %s\n", li
.loadedAddress(), imagePath(li
.image()));
341 #if !TARGET_IPHONE_SIMULATOR
342 // call kdebug trace for each image
343 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD
, DBG_DYLD_UUID
, DBG_DYLD_UUID_MAP_A
))) {
344 for (const LoadedImage
& li
: unloadImages
) {
345 const closure::Image
* image
= li
.image();
346 struct stat stat_buf
;
347 fsid_t fsid
= {{ 0, 0 }};
348 fsobj_id_t fsobjid
= { 0, 0 };
349 if ( stat(imagePath(image
), &stat_buf
) == 0 ) {
350 fsobjid
= *(fsobj_id_t
*)&stat_buf
.st_ino
;
351 fsid
= {{ stat_buf
.st_dev
, 0 }};
354 image
->getUuid(uuid
);
355 kdebug_trace_dyld_image(DBG_DYLD_UUID_UNMAP_A
, &uuid
, fsobjid
, fsid
, li
.loadedAddress());
360 // remove each from _loadedImages
362 for (const LoadedImage
& uli
: unloadImages
) {
363 for (LoadedImage
& li
: _loadedImages
) {
364 if ( uli
.loadedAddress() == li
.loadedAddress() ) {
365 _loadedImages
.erase(li
);
373 // sync to old all image infos struct
374 mirrorToOldAllImageInfos();
376 // tell debugger about removed images
377 STACK_ALLOC_ARRAY(dyld_image_info
, oldDyldInfo
, unloadImages
.count());
378 for (const LoadedImage
& li
: unloadImages
) {
379 oldDyldInfo
.push_back({li
.loadedAddress(), li
.image()->path(), 0});
381 _oldAllImageInfos
->notification(dyld_image_removing
, (uint32_t)oldDyldInfo
.count(), &oldDyldInfo
[0]);
383 // notify any processes tracking loads in this process
384 notifyMonitorUnloads(unloadImages
);
386 // finally, unmap images
387 for (const LoadedImage
& li
: unloadImages
) {
388 if ( li
.leaveMapped() ) {
389 log_loads("dyld: unloaded but left mmapped %s\n", imagePath(li
.image()));
392 // unmapImage() modifies parameter, so use copy
393 LoadedImage copy
= li
;
394 Loader::unmapImage(copy
);
395 log_loads("dyld: unloaded %s\n", imagePath(li
.image()));
400 // must be called with writeLock held
401 void AllImages::recomputeBounds()
403 _lowestNonCached
= UINTPTR_MAX
;
404 _highestNonCached
= 0;
405 for (const LoadedImage
& li
: _loadedImages
) {
406 const MachOLoaded
* ml
= li
.loadedAddress();
407 uintptr_t start
= (uintptr_t)ml
;
408 if ( !((MachOAnalyzer
*)ml
)->inDyldCache() ) {
409 if ( start
< _lowestNonCached
)
410 _lowestNonCached
= start
;
411 uintptr_t end
= start
+ (uintptr_t)(li
.image()->vmSizeToMap());
412 if ( end
> _highestNonCached
)
413 _highestNonCached
= end
;
418 uint32_t AllImages::count() const
420 return (uint32_t)_loadedImages
.count();
423 bool AllImages::dyldCacheHasPath(const char* path
) const
425 uint32_t dyldCacheImageIndex
;
426 if ( _dyldCacheAddress
!= nullptr )
427 return _dyldCacheAddress
->hasImagePath(path
, dyldCacheImageIndex
);
431 const char* AllImages::imagePathByIndex(uint32_t index
) const
433 if ( index
< _loadedImages
.count() )
434 return imagePath(_loadedImages
[index
].image());
438 const mach_header
* AllImages::imageLoadAddressByIndex(uint32_t index
) const
440 if ( index
< _loadedImages
.count() )
441 return _loadedImages
[index
].loadedAddress();
445 bool AllImages::findImage(const mach_header
* loadAddress
, LoadedImage
& foundImage
) const
447 __block
bool result
= false;
449 for (const LoadedImage
& li
: _loadedImages
) {
450 if ( li
.loadedAddress() == loadAddress
) {
460 void AllImages::forEachImage(void (^handler
)(const LoadedImage
& loadedImage
, bool& stop
)) const
464 for (const LoadedImage
& li
: _loadedImages
) {
473 const char* AllImages::pathForImageMappedAt(const void* addr
) const
475 if ( _initialImages
!= nullptr ) {
476 // being called during libSystem initialization, so _loadedImages not allocated yet
477 for (const LoadedImage
& li
: *_initialImages
) {
479 if ( li
.image()->containsAddress(addr
, li
.loadedAddress(), &permissions
) ) {
480 return li
.image()->path();
486 // if address is in cache, do fast search of TEXT segments in cache
487 __block
const char* result
= nullptr;
488 if ( (_dyldCacheAddress
!= nullptr) && (addr
> _dyldCacheAddress
) ) {
489 if ( addr
< (void*)((uint8_t*)_dyldCacheAddress
+_dyldCacheAddress
->mappedSize()) ) {
490 uint64_t cacheSlide
= (uint64_t)_dyldCacheAddress
- _dyldCacheAddress
->unslidLoadAddress();
491 uint64_t unslidTargetAddr
= (uint64_t)addr
- cacheSlide
;
492 _dyldCacheAddress
->forEachImageTextSegment(^(uint64_t loadAddressUnslid
, uint64_t textSegmentSize
, const unsigned char* dylibUUID
, const char* installName
, bool& stop
) {
493 if ( (loadAddressUnslid
<= unslidTargetAddr
) && (unslidTargetAddr
< loadAddressUnslid
+textSegmentSize
) ) {
494 result
= installName
;
498 if ( result
!= nullptr )
503 // slow path - search image list
504 infoForImageMappedAt(addr
, ^(const LoadedImage
& foundImage
, uint8_t permissions
) {
505 result
= foundImage
.image()->path();
511 void AllImages::infoForImageMappedAt(const void* addr
, void (^handler
)(const LoadedImage
& foundImage
, uint8_t permissions
)) const
513 __block
uint8_t permissions
;
514 if ( _initialImages
!= nullptr ) {
515 // being called during libSystem initialization, so _loadedImages not allocated yet
516 for (const LoadedImage
& li
: *_initialImages
) {
517 if ( li
.image()->containsAddress(addr
, li
.loadedAddress(), &permissions
) ) {
518 handler(li
, permissions
);
526 for (const LoadedImage
& li
: _loadedImages
) {
527 if ( li
.image()->containsAddress(addr
, li
.loadedAddress(), &permissions
) ) {
528 handler(li
, permissions
);
536 bool AllImages::infoForImageMappedAt(const void* addr
, const MachOLoaded
** ml
, uint64_t* textSize
, const char** path
) const
538 if ( _initialImages
!= nullptr ) {
539 // being called during libSystem initialization, so _loadedImages not allocated yet
540 for (const LoadedImage
& li
: *_initialImages
) {
542 if ( li
.image()->containsAddress(addr
, li
.loadedAddress(), &permissions
) ) {
544 *ml
= li
.loadedAddress();
545 if ( path
!= nullptr )
546 *path
= li
.image()->path();
547 if ( textSize
!= nullptr ) {
548 *textSize
= li
.image()->textSize();
556 // if address is in cache, do fast search of TEXT segments in cache
557 __block
bool result
= false;
558 if ( (_dyldCacheAddress
!= nullptr) && (addr
> _dyldCacheAddress
) ) {
559 if ( addr
< (void*)((uint8_t*)_dyldCacheAddress
+_dyldCacheAddress
->mappedSize()) ) {
560 uint64_t cacheSlide
= (uint64_t)_dyldCacheAddress
- _dyldCacheAddress
->unslidLoadAddress();
561 uint64_t unslidTargetAddr
= (uint64_t)addr
- cacheSlide
;
562 _dyldCacheAddress
->forEachImageTextSegment(^(uint64_t loadAddressUnslid
, uint64_t textSegmentSize
, const unsigned char* dylibUUID
, const char* installName
, bool& stop
) {
563 if ( (loadAddressUnslid
<= unslidTargetAddr
) && (unslidTargetAddr
< loadAddressUnslid
+textSegmentSize
) ) {
565 *ml
= (MachOLoaded
*)(loadAddressUnslid
+ cacheSlide
);
566 if ( path
!= nullptr )
568 if ( textSize
!= nullptr )
569 *textSize
= textSegmentSize
;
579 // slow path - search image list
580 infoForImageMappedAt(addr
, ^(const LoadedImage
& foundImage
, uint8_t permissions
) {
582 *ml
= foundImage
.loadedAddress();
583 if ( path
!= nullptr )
584 *path
= foundImage
.image()->path();
585 if ( textSize
!= nullptr )
586 *textSize
= foundImage
.image()->textSize();
593 // same as infoForImageMappedAt(), but only look at images not in the dyld cache
594 void AllImages::infoForNonCachedImageMappedAt(const void* addr
, void (^handler
)(const LoadedImage
& foundImage
, uint8_t permissions
)) const
596 __block
uint8_t permissions
;
597 if ( _initialImages
!= nullptr ) {
598 // being called during libSystem initialization, so _loadedImages not allocated yet
599 for (const LoadedImage
& li
: *_initialImages
) {
600 if ( !((MachOAnalyzer
*)li
.loadedAddress())->inDyldCache() ) {
601 if ( li
.image()->containsAddress(addr
, li
.loadedAddress(), &permissions
) ) {
602 handler(li
, permissions
);
611 for (const LoadedImage
& li
: _loadedImages
) {
612 if ( !((MachOAnalyzer
*)li
.loadedAddress())->inDyldCache() ) {
613 if ( li
.image()->containsAddress(addr
, li
.loadedAddress(), &permissions
) ) {
614 handler(li
, permissions
);
622 bool AllImages::immutableMemory(const void* addr
, size_t length
) const
624 // quick check to see if in shared cache
625 if ( _dyldCacheAddress
!= nullptr ) {
627 if ( _dyldCacheAddress
->inCache(addr
, length
, readOnly
) ) {
632 __block
bool result
= false;
634 // quick check to see if it is not any non-cached image loaded
635 if ( ((uintptr_t)addr
< _lowestNonCached
) || ((uintptr_t)addr
+length
> _highestNonCached
) ) {
639 // slow walk through all images, only look at images not in dyld cache
640 for (const LoadedImage
& li
: _loadedImages
) {
641 if ( !((MachOAnalyzer
*)li
.loadedAddress())->inDyldCache() ) {
643 if ( li
.image()->containsAddress(addr
, li
.loadedAddress(), &permissions
) ) {
644 result
= ((permissions
& VM_PROT_WRITE
) == 0) && li
.image()->neverUnload();
654 void AllImages::infoForImageWithLoadAddress(const MachOLoaded
* mh
, void (^handler
)(const LoadedImage
& foundImage
)) const
657 for (const LoadedImage
& li
: _loadedImages
) {
658 if ( li
.loadedAddress() == mh
) {
666 bool AllImages::findImageNum(closure::ImageNum imageNum
, LoadedImage
& foundImage
) const
668 if ( _initialImages
!= nullptr ) {
669 // being called during libSystem initialization, so _loadedImages not allocated yet
670 for (const LoadedImage
& li
: *_initialImages
) {
671 if ( li
.image()->representsImageNum(imageNum
) ) {
680 for (const LoadedImage
& li
: _loadedImages
) {
681 if ( li
.image()->representsImageNum(imageNum
) ) {
691 const MachOLoaded
* AllImages::findDependent(const MachOLoaded
* mh
, uint32_t depIndex
)
693 __block
const MachOLoaded
* result
= nullptr;
695 for (const LoadedImage
& li
: _loadedImages
) {
696 if ( li
.loadedAddress() == mh
) {
697 closure::ImageNum depImageNum
= li
.image()->dependentImageNum(depIndex
);
699 if ( findImageNum(depImageNum
, depLi
) )
700 result
= depLi
.loadedAddress();
709 void AllImages::breadthFirstRecurseDependents(Array
<closure::ImageNum
>& visited
, const LoadedImage
& nodeLi
, bool& stopped
, void (^handler
)(const LoadedImage
& aLoadedImage
, bool& stop
)) const
711 // call handler on all direct dependents (unless already visited)
712 STACK_ALLOC_ARRAY(LoadedImage
, dependentsToRecurse
, 256);
713 nodeLi
.image()->forEachDependentImage(^(uint32_t depIndex
, closure::Image::LinkKind kind
, closure::ImageNum depImageNum
, bool& depStop
) {
714 if ( kind
== closure::Image::LinkKind::upward
)
716 if ( visited
.contains(depImageNum
) )
719 if ( !findImageNum(depImageNum
, depLi
) )
721 handler(depLi
, depStop
);
722 visited
.push_back(depImageNum
);
727 dependentsToRecurse
.push_back(depLi
);
731 // recurse on all dependents just visited
732 for (LoadedImage
& depLi
: dependentsToRecurse
) {
733 breadthFirstRecurseDependents(visited
, depLi
, stopped
, handler
);
737 void AllImages::visitDependentsTopDown(const LoadedImage
& start
, void (^handler
)(const LoadedImage
& aLoadedImage
, bool& stop
)) const
740 STACK_ALLOC_ARRAY(closure::ImageNum
, visited
, count());
742 handler(start
, stop
);
745 visited
.push_back(start
.image()->imageNum());
746 breadthFirstRecurseDependents(visited
, start
, stop
, handler
);
750 const MachOLoaded
* AllImages::mainExecutable() const
752 assert(_programVars
!= nullptr);
753 return (const MachOLoaded
*)_programVars
->mh
;
756 const closure::Image
* AllImages::mainExecutableImage() const
758 assert(_mainClosure
!= nullptr);
759 return _mainClosure
->images()->imageForNum(_mainClosure
->topImage());
762 void AllImages::setMainPath(const char* path
)
764 _mainExeOverridePath
= path
;
767 const char* AllImages::imagePath(const closure::Image
* image
) const
769 #if __IPHONE_OS_VERSION_MIN_REQUIRED
770 // on iOS and watchOS, apps may be moved on device after closure built
771 if ( _mainExeOverridePath
!= nullptr ) {
772 if ( image
== mainExecutableImage() )
773 return _mainExeOverridePath
;
776 return image
->path();
779 dyld_platform_t
AllImages::platform() const {
783 void AllImages::incRefCount(const mach_header
* loadAddress
)
785 for (DlopenCount
& entry
: _dlopenRefCounts
) {
786 if ( entry
.loadAddress
== loadAddress
) {
787 // found existing DlopenCount entry, bump counter
793 // no existing DlopenCount, add new one
794 _dlopenRefCounts
.push_back({ loadAddress
, 1 });
797 void AllImages::decRefCount(const mach_header
* loadAddress
)
799 bool doCollect
= false;
800 for (DlopenCount
& entry
: _dlopenRefCounts
) {
801 if ( entry
.loadAddress
== loadAddress
) {
802 // found existing DlopenCount entry, bump counter
804 if ( entry
.refCount
== 0 ) {
805 _dlopenRefCounts
.erase(entry
);
813 garbageCollectImages();
817 #if __MAC_OS_X_VERSION_MIN_REQUIRED
818 NSObjectFileImage
AllImages::addNSObjectFileImage(const OFIInfo
& image
)
820 __block
uint64_t imageNum
= 0;
822 imageNum
= ++_nextObjectFileImageNum
;
823 _objectFileImages
.push_back(image
);
824 _objectFileImages
.back().imageNum
= imageNum
;
826 return (NSObjectFileImage
)imageNum
;
829 bool AllImages::forNSObjectFileImage(NSObjectFileImage imageHandle
,
830 void (^handler
)(OFIInfo
& image
)) {
831 uint64_t imageNum
= (uint64_t)imageHandle
;
832 bool __block foundImage
= false;
834 for (OFIInfo
& ofi
: _objectFileImages
) {
835 if ( ofi
.imageNum
== imageNum
) {
846 void AllImages::removeNSObjectFileImage(NSObjectFileImage imageHandle
)
848 uint64_t imageNum
= (uint64_t)imageHandle
;
850 for (OFIInfo
& ofi
: _objectFileImages
) {
851 if ( ofi
.imageNum
== imageNum
) {
852 _objectFileImages
.erase(ofi
);
861 class VIS_HIDDEN Reaper
866 const LoadedImage
* li
;
869 Reaper(Array
<ImageAndUse
>& unloadables
, AllImages
*);
870 void garbageCollect();
871 void finalizeDeadImages();
874 void markDirectlyDlopenedImagesAsUsed();
875 void markDependentOfInUseImages();
876 void markDependentsOf(const LoadedImage
*);
877 uint32_t inUseCount();
878 void dump(const char* msg
);
880 Array
<ImageAndUse
>& _unloadables
;
881 AllImages
* _allImages
;
885 Reaper::Reaper(Array
<ImageAndUse
>& unloadables
, AllImages
* all
)
886 : _unloadables(unloadables
), _allImages(all
), _deadCount(0)
890 void Reaper::markDirectlyDlopenedImagesAsUsed()
892 for (AllImages::DlopenCount
& entry
: _allImages
->_dlopenRefCounts
) {
893 if ( entry
.refCount
!= 0 ) {
894 for (ImageAndUse
& iu
: _unloadables
) {
895 if ( iu
.li
->loadedAddress() == entry
.loadAddress
) {
904 uint32_t Reaper::inUseCount()
907 for (ImageAndUse
& iu
: _unloadables
) {
914 void Reaper::markDependentsOf(const LoadedImage
* li
)
916 li
->image()->forEachDependentImage(^(uint32_t depIndex
, closure::Image::LinkKind kind
, closure::ImageNum depImageNum
, bool& stop
) {
917 for (ImageAndUse
& iu
: _unloadables
) {
918 if ( !iu
.inUse
&& iu
.li
->image()->representsImageNum(depImageNum
) ) {
926 void Reaper::markDependentOfInUseImages()
928 for (ImageAndUse
& iu
: _unloadables
) {
930 markDependentsOf(iu
.li
);
934 void Reaper::dump(const char* msg
)
937 //for (ImageAndUse& iu : _unloadables) {
938 // log(" in-used=%d %s\n", iu.inUse, iu.li->image()->path());
942 void Reaper::garbageCollect()
944 //dump("all unloadable images");
946 // mark all dylibs directly dlopen'ed as in use
947 markDirectlyDlopenedImagesAsUsed();
949 //dump("directly dlopen()'ed marked");
951 // iteratively mark dependents of in-use dylibs as in-use until in-use count stops changing
952 uint32_t lastCount
= inUseCount();
953 bool countChanged
= false;
955 markDependentOfInUseImages();
956 //dump("dependents marked");
957 uint32_t newCount
= inUseCount();
958 countChanged
= (newCount
!= lastCount
);
959 lastCount
= newCount
;
960 } while (countChanged
);
962 _deadCount
= (uint32_t)_unloadables
.count() - inUseCount();
965 void Reaper::finalizeDeadImages()
967 if ( _deadCount
== 0 )
969 __cxa_range_t ranges
[_deadCount
];
970 __cxa_range_t
* rangesArray
= ranges
;
971 __block
unsigned int rangesCount
= 0;
972 for (ImageAndUse
& iu
: _unloadables
) {
975 iu
.li
->image()->forEachDiskSegment(^(uint32_t segIndex
, uint32_t fileOffset
, uint32_t fileSize
, int64_t vmOffset
, uint64_t vmSize
, uint8_t permissions
, bool &stop
) {
976 if ( permissions
& VM_PROT_EXECUTE
) {
977 rangesArray
[rangesCount
].addr
= (char*)(iu
.li
->loadedAddress()) + vmOffset
;
978 rangesArray
[rangesCount
].length
= (size_t)vmSize
;
983 __cxa_finalize_ranges(ranges
, rangesCount
);
987 // This function is called at the end of dlclose() when the reference count goes to zero.
988 // The dylib being unloaded may have brought in other dependent dylibs when it was loaded.
989 // Those dependent dylibs need to be unloaded, but only if they are not referenced by
990 // something else. We use a standard mark and sweep garbage collection.
992 // The tricky part is that when a dylib is unloaded it may have a termination function that
993 // can run and itself call dlclose() on yet another dylib. The problem is that this
994 // sort of gabage collection is not re-entrant. Instead a terminator's call to dlclose()
995 // which calls garbageCollectImages() will just set a flag to re-do the garbage collection
996 // when the current pass is done.
998 // Also note that this is done within the _loadedImages writer lock, so any dlopen/dlclose
999 // on other threads are blocked while this garbage collections runs
1001 void AllImages::garbageCollectImages()
1003 // if some other thread is currently GC'ing images, let other thread do the work
1004 int32_t newCount
= OSAtomicIncrement32(&_gcCount
);
1005 if ( newCount
!= 1 )
1009 STACK_ALLOC_ARRAY(Reaper::ImageAndUse
, unloadables
, _loadedImages
.count());
1011 for (const LoadedImage
& li
: _loadedImages
) {
1012 if ( !li
.image()->neverUnload() /*&& !li.neverUnload()*/ ) {
1013 unloadables
.push_back({&li
, false});
1014 //fprintf(stderr, "unloadable[%lu] %p %s\n", unloadables.count(), li.loadedAddress(), li.image()->path());
1018 // make reaper object to do garbage collection and notifications
1019 Reaper
reaper(unloadables
, this);
1020 reaper
.garbageCollect();
1022 // FIXME: we should sort dead images so higher level ones are terminated first
1024 // call cxa_finalize_ranges of dead images
1025 reaper
.finalizeDeadImages();
1027 // FIXME: call static terminators of dead images
1029 // FIXME: DOF unregister
1031 //fprintf(stderr, "_loadedImages before GC removals:\n");
1032 //for (const LoadedImage& li : _loadedImages) {
1033 // fprintf(stderr, " loadAddr=%p, path=%s\n", li.loadedAddress(), li.image()->path());
1036 // make copy of LoadedImages we want to remove
1037 // because unloadables[] points into LoadedImage we are shrinking
1038 STACK_ALLOC_ARRAY(LoadedImage
, unloadImages
, _loadedImages
.count());
1039 for (const Reaper::ImageAndUse
& iu
: unloadables
) {
1041 unloadImages
.push_back(*iu
.li
);
1043 // remove entries from _loadedImages
1044 if ( !unloadImages
.empty() ) {
1045 removeImages(unloadImages
);
1047 //fprintf(stderr, "_loadedImages after GC removals:\n");
1048 //for (const LoadedImage& li : _loadedImages) {
1049 // fprintf(stderr, " loadAddr=%p, path=%s\n", li.loadedAddress(), li.image()->path());
1053 // if some other thread called GC during our work, redo GC on its behalf
1054 newCount
= OSAtomicDecrement32(&_gcCount
);
1056 while (newCount
> 0);
1061 void AllImages::addLoadNotifier(NotifyFunc func
)
1063 // callback about already loaded images
1065 for (const LoadedImage
& li
: _loadedImages
) {
1066 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE
, (uint64_t)li
.loadedAddress(), (uint64_t)func
, 0);
1067 log_notifications("dyld: add notifier %p called with mh=%p\n", func
, li
.loadedAddress());
1068 if ( li
.image()->inDyldCache() )
1069 func(li
.loadedAddress(), (uintptr_t)_dyldCacheSlide
);
1071 func(li
.loadedAddress(), li
.loadedAddress()->getSlide());
1075 // add to list of functions to call about future loads
1076 withNotifiersLock(^{
1077 _loadNotifiers
.push_back(func
);
1081 void AllImages::addUnloadNotifier(NotifyFunc func
)
1083 // add to list of functions to call about future unloads
1084 withNotifiersLock(^{
1085 _unloadNotifiers
.push_back(func
);
1089 void AllImages::addLoadNotifier(LoadNotifyFunc func
)
1091 // callback about already loaded images
1093 for (const LoadedImage
& li
: _loadedImages
) {
1094 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE
, (uint64_t)li
.loadedAddress(), (uint64_t)func
, 0);
1095 log_notifications("dyld: add notifier %p called with mh=%p\n", func
, li
.loadedAddress());
1096 func(li
.loadedAddress(), li
.image()->path(), !li
.image()->neverUnload());
1100 // add to list of functions to call about future loads
1101 withNotifiersLock(^{
1102 _loadNotifiers2
.push_back(func
);
1107 void AllImages::setObjCNotifiers(_dyld_objc_notify_mapped map
, _dyld_objc_notify_init init
, _dyld_objc_notify_unmapped unmap
)
1109 _objcNotifyMapped
= map
;
1110 _objcNotifyInit
= init
;
1111 _objcNotifyUnmapped
= unmap
;
1113 // callback about already loaded images
1114 uint32_t maxCount
= count();
1115 STACK_ALLOC_ARRAY(const mach_header
*, mhs
, maxCount
);
1116 STACK_ALLOC_ARRAY(const char*, paths
, maxCount
);
1117 // don't need _mutex here because this is called when process is still single threaded
1118 for (const LoadedImage
& li
: _loadedImages
) {
1119 if ( li
.image()->hasObjC() ) {
1120 paths
.push_back(imagePath(li
.image()));
1121 mhs
.push_back(li
.loadedAddress());
1124 if ( !mhs
.empty() ) {
1125 (*map
)((uint32_t)mhs
.count(), &paths
[0], &mhs
[0]);
1126 if ( log_notifications("dyld: objc-mapped-notifier called with %ld images:\n", mhs
.count()) ) {
1127 for (uintptr_t i
=0; i
< mhs
.count(); ++i
) {
1128 log_notifications("dyld: objc-mapped: %p %s\n", mhs
[i
], paths
[i
]);
1134 void AllImages::applyInterposingToDyldCache(const closure::Closure
* closure
)
1136 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_APPLY_INTERPOSING
, 0, 0, 0);
1137 const uintptr_t cacheStart
= (uintptr_t)_dyldCacheAddress
;
1138 __block
closure::ImageNum lastCachedDylibImageNum
= 0;
1139 __block
const closure::Image
* lastCachedDylibImage
= nullptr;
1140 __block
bool suspendedAccounting
= false;
1141 closure
->forEachPatchEntry(^(const closure::Closure::PatchEntry
& entry
) {
1142 if ( entry
.overriddenDylibInCache
!= lastCachedDylibImageNum
) {
1143 lastCachedDylibImage
= closure::ImageArray::findImage(imagesArrays(), entry
.overriddenDylibInCache
);
1144 assert(lastCachedDylibImage
!= nullptr);
1145 lastCachedDylibImageNum
= entry
.overriddenDylibInCache
;
1147 if ( !suspendedAccounting
) {
1148 Loader::vmAccountingSetSuspended(true, log_fixups
);
1149 suspendedAccounting
= true;
1151 uintptr_t newValue
= 0;
1152 LoadedImage foundImage
;
1153 switch ( entry
.replacement
.image
.kind
) {
1154 case closure::Image::ResolvedSymbolTarget::kindImage
:
1155 assert(findImageNum(entry
.replacement
.image
.imageNum
, foundImage
));
1156 newValue
= (uintptr_t)(foundImage
.loadedAddress()) + (uintptr_t)entry
.replacement
.image
.offset
;
1158 case closure::Image::ResolvedSymbolTarget::kindSharedCache
:
1159 newValue
= (uintptr_t)_dyldCacheAddress
+ (uintptr_t)entry
.replacement
.sharedCache
.offset
;
1161 case closure::Image::ResolvedSymbolTarget::kindAbsolute
:
1162 // this means the symbol was missing in the cache override dylib, so set any uses to NULL
1163 newValue
= (uintptr_t)entry
.replacement
.absolute
.value
;
1166 assert(0 && "bad replacement kind");
1168 lastCachedDylibImage
->forEachPatchableUseOfExport(entry
.exportCacheOffset
, ^(closure::Image::PatchableExport::PatchLocation patchLocation
) {
1169 uintptr_t* loc
= (uintptr_t*)(cacheStart
+patchLocation
.cacheOffset
);
1170 #if __has_feature(ptrauth_calls)
1171 if ( patchLocation
.authenticated
) {
1172 MachOLoaded::ChainedFixupPointerOnDisk fixupInfo
;
1173 fixupInfo
.authRebase
.auth
= true;
1174 fixupInfo
.authRebase
.addrDiv
= patchLocation
.usesAddressDiversity
;
1175 fixupInfo
.authRebase
.diversity
= patchLocation
.discriminator
;
1176 fixupInfo
.authRebase
.key
= patchLocation
.key
;
1177 *loc
= fixupInfo
.signPointer(loc
, newValue
+ patchLocation
.getAddend());
1178 log_fixups("dyld: cache fixup: *%p = %p (JOP: diversity 0x%04X, addr-div=%d, key=%s)\n",
1179 loc
, (void*)*loc
, patchLocation
.discriminator
, patchLocation
.usesAddressDiversity
, patchLocation
.keyName());
1183 log_fixups("dyld: cache fixup: *%p = 0x%0lX (dyld cache patch)\n", loc
, newValue
+ (uintptr_t)patchLocation
.getAddend());
1184 *loc
= newValue
+ (uintptr_t)patchLocation
.getAddend();
1187 if ( suspendedAccounting
)
1188 Loader::vmAccountingSetSuspended(false, log_fixups
);
1191 void AllImages::runStartupInitialzers()
1193 __block
bool mainExecutableInitializerNeedsToRun
= true;
1194 __block
uint32_t imageIndex
= 0;
1195 while ( mainExecutableInitializerNeedsToRun
) {
1196 __block
const closure::Image
* image
= nullptr;
1198 image
= _loadedImages
[imageIndex
].image();
1199 if ( _loadedImages
[imageIndex
].loadedAddress()->isMainExecutable() )
1200 mainExecutableInitializerNeedsToRun
= false;
1202 runInitialzersBottomUp(image
);
1208 // Find image in _loadedImages which has ImageNum == num.
1209 // Try indexHint first, if hint is wrong, updated it, so next use is faster.
1210 LoadedImage
AllImages::findImageNum(closure::ImageNum num
, uint32_t& indexHint
)
1212 __block LoadedImage copy
;
1214 if ( (indexHint
>= _loadedImages
.count()) || !_loadedImages
[indexHint
].image()->representsImageNum(num
) ) {
1216 for (indexHint
=0; indexHint
< _loadedImages
.count(); ++indexHint
) {
1217 if ( _loadedImages
[indexHint
].image()->representsImageNum(num
) )
1220 assert(indexHint
< _loadedImages
.count());
1222 copy
= _loadedImages
[indexHint
];
1228 // Change the state of the LoadedImage in _loadedImages which has ImageNum == num.
1229 // Only change state if current state is expectedCurrentState (atomic swap).
1230 bool AllImages::swapImageState(closure::ImageNum num
, uint32_t& indexHint
, LoadedImage::State expectedCurrentState
, LoadedImage::State newState
)
1232 __block
bool result
= false;
1234 if ( (indexHint
>= _loadedImages
.count()) || !_loadedImages
[indexHint
].image()->representsImageNum(num
) ) {
1236 for (indexHint
=0; indexHint
< _loadedImages
.count(); ++indexHint
) {
1237 if ( _loadedImages
[indexHint
].image()->representsImageNum(num
) )
1240 assert(indexHint
< _loadedImages
.count());
1242 if ( _loadedImages
[indexHint
].state() == expectedCurrentState
) {
1243 _loadedImages
[indexHint
].setState(newState
);
1250 // dyld3 pre-builds the order initializers need to be run (bottom up) in a list in the closure.
1251 // This method uses that list to run all initializers.
1252 // Because an initializer may call dlopen() and/or create threads, the _loadedImages array
1253 // may move under us. So, never keep a pointer into it. Always reference images by ImageNum
1254 // and use hint to make that faster in the case where the _loadedImages does not move.
1255 void AllImages::runInitialzersBottomUp(const closure::Image
* topImage
)
1257 // walk closure specified initializer list, already ordered bottom up
1258 topImage
->forEachImageToInitBefore(^(closure::ImageNum imageToInit
, bool& stop
) {
1259 // get copy of LoadedImage about imageToInit, but don't keep reference into _loadedImages, because it may move if initialzers call dlopen()
1260 uint32_t indexHint
= 0;
1261 LoadedImage loadedImageCopy
= findImageNum(imageToInit
, indexHint
);
1262 // skip if the image is already inited, or in process of being inited (dependency cycle)
1263 if ( (loadedImageCopy
.state() == LoadedImage::State::fixedUp
) && swapImageState(imageToInit
, indexHint
, LoadedImage::State::fixedUp
, LoadedImage::State::beingInited
) ) {
1264 // tell objc to run any +load methods in image
1265 if ( (_objcNotifyInit
!= nullptr) && loadedImageCopy
.image()->mayHavePlusLoads() ) {
1266 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_OBJC_INIT
, (uint64_t)loadedImageCopy
.loadedAddress(), 0, 0);
1267 const char* path
= imagePath(loadedImageCopy
.image());
1268 log_notifications("dyld: objc-init-notifier called with mh=%p, path=%s\n", loadedImageCopy
.loadedAddress(), path
);
1269 (*_objcNotifyInit
)(path
, loadedImageCopy
.loadedAddress());
1272 // run all initializers in image
1273 runAllInitializersInImage(loadedImageCopy
.image(), loadedImageCopy
.loadedAddress());
1275 // advance state to inited
1276 swapImageState(imageToInit
, indexHint
, LoadedImage::State::beingInited
, LoadedImage::State::inited
);
1282 void AllImages::runLibSystemInitializer(const LoadedImage
& libSystem
)
1284 // run all initializers in libSystem.dylib
1285 runAllInitializersInImage(libSystem
.image(), libSystem
.loadedAddress());
1287 // Note: during libSystem's initialization, libdyld_initializer() is called which copies _initialImages to _loadedImages
1289 // mark libSystem.dylib as being inited, so later recursive-init would re-run it
1290 for (LoadedImage
& li
: _loadedImages
) {
1291 if ( li
.loadedAddress() == libSystem
.loadedAddress() ) {
1292 li
.setState(LoadedImage::State::inited
);
1298 void AllImages::runAllInitializersInImage(const closure::Image
* image
, const MachOLoaded
* ml
)
1300 image
->forEachInitializer(ml
, ^(const void* func
) {
1301 Initializer initFunc
= (Initializer
)func
;
1302 #if __has_feature(ptrauth_calls)
1303 initFunc
= (Initializer
)__builtin_ptrauth_sign_unauthenticated((void*)initFunc
, 0, 0);
1306 ScopedTimer(DBG_DYLD_TIMING_STATIC_INITIALIZER
, (uint64_t)ml
, (uint64_t)func
, 0);
1307 initFunc(NXArgc
, NXArgv
, environ
, appleParams
, _programVars
);
1310 log_initializers("dyld: called initialzer %p in %s\n", initFunc
, image
->path());
1314 const MachOLoaded
* AllImages::dlopen(Diagnostics
& diag
, const char* path
, bool rtldNoLoad
, bool rtldLocal
, bool rtldNoDelete
, bool fromOFI
, const void* callerAddress
)
1316 // quick check if path is in shared cache and already loaded
1317 if ( _dyldCacheAddress
!= nullptr ) {
1318 uint32_t dyldCacheImageIndex
;
1319 if ( _dyldCacheAddress
->hasImagePath(path
, dyldCacheImageIndex
) ) {
1322 const MachOLoaded
* mh
= (MachOLoaded
*)_dyldCacheAddress
->getIndexedImageEntry(dyldCacheImageIndex
, mTime
, inode
);
1323 // Note: we do not need readLock because this is within global dlopen lock
1324 for (const LoadedImage
& li
: _loadedImages
) {
1325 if ( li
.loadedAddress() == mh
) {
1332 __block
closure::ImageNum callerImageNum
= 0;
1333 STACK_ALLOC_ARRAY(LoadedImage
, loadedList
, 1024);
1334 for (const LoadedImage
& li
: _loadedImages
) {
1335 loadedList
.push_back(li
);
1336 uint8_t permissions
;
1337 if ( (callerImageNum
== 0) && li
.image()->containsAddress(callerAddress
, li
.loadedAddress(), &permissions
) ) {
1338 callerImageNum
= li
.image()->imageNum();
1340 //fprintf(stderr, "mh=%p, image=%p, imageNum=0x%04X, path=%s\n", li.loadedAddress(), li.image(), li.image()->imageNum(), li.image()->path());
1342 uintptr_t alreadyLoadedCount
= loadedList
.count();
1345 closure::ImageNum topImageNum
= 0;
1346 const closure::DlopenClosure
* newClosure
;
1348 // First try with closures from the shared cache permitted.
1349 // Then try again with forcing a new closure
1350 for (bool canUseSharedCacheClosure
: { true, false }) {
1351 closure::FileSystemPhysical fileSystem
;
1352 closure::ClosureBuilder::AtPath atPathHanding
= (_allowAtPaths
? closure::ClosureBuilder::AtPath::all
: closure::ClosureBuilder::AtPath::onlyInRPaths
);
1353 closure::ClosureBuilder
cb(_nextImageNum
, fileSystem
, _dyldCacheAddress
, true, closure::gPathOverrides
, atPathHanding
);
1354 newClosure
= cb
.makeDlopenClosure(path
, _mainClosure
, loadedList
, callerImageNum
, rtldNoLoad
, canUseSharedCacheClosure
, &topImageNum
);
1355 if ( newClosure
== closure::ClosureBuilder::sRetryDlopenClosure
) {
1356 log_apis(" dlopen: closure builder needs to retry: %s\n", path
);
1357 assert(canUseSharedCacheClosure
);
1360 if ( (newClosure
== nullptr) && (topImageNum
== 0) ) {
1361 if ( cb
.diagnostics().hasError())
1362 diag
.error("%s", cb
.diagnostics().errorMessage());
1363 else if ( !rtldNoLoad
)
1364 diag
.error("dlopen(): file not found: %s", path
);
1367 // save off next available ImageNum for use by next call to dlopen()
1368 _nextImageNum
= cb
.nextFreeImageNum();
1372 if ( newClosure
!= nullptr ) {
1373 // if new closure contains an ImageArray, add it to list
1374 if ( const closure::ImageArray
* newArray
= newClosure
->images() ) {
1375 appendToImagesArray(newArray
);
1377 log_apis(" dlopen: made closure: %p\n", newClosure
);
1380 // if already loaded, just bump refCount and return
1381 if ( (newClosure
== nullptr) && (topImageNum
!= 0) ) {
1382 for (LoadedImage
& li
: _loadedImages
) {
1383 if ( li
.image()->imageNum() == topImageNum
) {
1384 // is already loaded
1385 const MachOLoaded
* topLoadAddress
= li
.loadedAddress();
1386 if ( !li
.image()->inDyldCache() )
1387 incRefCount(topLoadAddress
);
1388 log_apis(" dlopen: already loaded as '%s'\n", li
.image()->path());
1389 // if previously opened with RTLD_LOCAL, but now opened with RTLD_GLOBAL, unhide it
1390 if ( !rtldLocal
&& li
.hideFromFlatSearch() )
1391 li
.setHideFromFlatSearch(false);
1392 // if called with RTLD_NODELETE, mark it as never-unload
1394 li
.markLeaveMapped();
1395 return topLoadAddress
;
1400 // run loader to load all new images
1401 Loader
loader(loadedList
, _dyldCacheAddress
, imagesArrays(), &dyld3::log_loads
, &dyld3::log_segments
, &dyld3::log_fixups
, &dyld3::log_dofs
);
1402 const closure::Image
* topImage
= closure::ImageArray::findImage(imagesArrays(), topImageNum
);
1403 if ( newClosure
== nullptr ) {
1404 if ( topImageNum
< dyld3::closure::kLastDyldCacheImageNum
)
1405 log_apis(" dlopen: using image in dyld shared cache %p\n", topImage
);
1407 log_apis(" dlopen: using pre-built dlopen closure %p\n", topImage
);
1409 uintptr_t topIndex
= loadedList
.count();
1410 LoadedImage topLoadedImage
= LoadedImage::make(topImage
);
1411 if ( rtldLocal
&& !topImage
->inDyldCache() )
1412 topLoadedImage
.setHideFromFlatSearch(true);
1413 if ( rtldNoDelete
&& !topImage
->inDyldCache() )
1414 topLoadedImage
.markLeaveMapped();
1415 loader
.addImage(topLoadedImage
);
1418 // recursively load all dependents and fill in allImages array
1419 loader
.completeAllDependents(diag
, topIndex
);
1420 if ( diag
.hasError() )
1422 loader
.mapAndFixupAllImages(diag
, _processDOFs
, fromOFI
, topIndex
);
1423 if ( diag
.hasError() )
1426 const MachOLoaded
* topLoadAddress
= loadedList
[topIndex
].loadedAddress();
1428 // bump dlopen refcount of image directly loaded
1429 if ( !topImage
->inDyldCache() )
1430 incRefCount(topLoadAddress
);
1432 // tell gAllImages about new images
1433 const uint32_t newImageCount
= (uint32_t)(loadedList
.count() - alreadyLoadedCount
);
1434 addImages(loadedList
.subArray(alreadyLoadedCount
, newImageCount
));
1436 // if closure adds images that override dyld cache, patch cache
1437 if ( newClosure
!= nullptr )
1438 applyInterposingToDyldCache(newClosure
);
1440 runImageNotifiers(loadedList
.subArray(alreadyLoadedCount
, newImageCount
));
1443 runInitialzersBottomUp(topImage
);
1445 return topLoadAddress
;
1448 void AllImages::appendToImagesArray(const closure::ImageArray
* newArray
)
1450 _imagesArrays
.push_back(newArray
);
1453 const Array
<const closure::ImageArray
*>& AllImages::imagesArrays()
1455 return _imagesArrays
.array();
1458 bool AllImages::isRestricted() const
1460 return !_allowEnvPaths
;
1466 } // namespace dyld3