]> git.saurik.com Git - apple/dyld.git/blob - dyld3/AllImages.cpp
dyld-832.7.3.tar.gz
[apple/dyld.git] / dyld3 / AllImages.cpp
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24
25 #include <stdint.h>
26 #include <fcntl.h>
27 #include <sys/types.h>
28 #include <sys/stat.h>
29 #include <sys/sysctl.h>
30 #include <mach/mach_time.h> // mach_absolute_time()
31 #include <libkern/OSAtomic.h>
32 #include <uuid/uuid.h>
33 #include <mach-o/dyld_images.h>
34 #include <libc_private.h>
35
36 #include <vector>
37 #include <algorithm>
38
39 #include "AllImages.h"
40 #include "libdyldEntryVector.h"
41 #include "Logging.h"
42 #include "Loading.h"
43 #include "Tracing.h"
44 #include "DyldSharedCache.h"
45 #include "PathOverrides.h"
46 #include "Closure.h"
47 #include "ClosureBuilder.h"
48 #include "ClosureFileSystemPhysical.h"
49 #include "RootsChecker.h"
50
51 #include "objc-shared-cache.h"
52
53 extern const char** appleParams;
54
55 // should be a header for these
56 struct __cxa_range_t {
57 const void* addr;
58 size_t length;
59 };
60 extern "C" void __cxa_finalize_ranges(const __cxa_range_t ranges[], unsigned int count);
61
62 extern "C" int __cxa_atexit(void (*func)(void *), void* arg, void* dso);
63
64
65
66 VIS_HIDDEN bool gUseDyld3 = false;
67
68
69 namespace dyld3 {
70
71
72
73 ///////////////////// AllImages ////////////////////////////
74
75
76 AllImages gAllImages;
77
78
79
80 void AllImages::init(const closure::LaunchClosure* closure, const DyldSharedCache* dyldCacheLoadAddress, const char* dyldCachePath,
81 const Array<LoadedImage>& initialImages)
82 {
83 _mainClosure = closure;
84 _initialImages = &initialImages;
85 _dyldCacheAddress = dyldCacheLoadAddress;
86 _dyldCachePath = dyldCachePath;
87
88 if ( _dyldCacheAddress ) {
89 _dyldCacheSlide = (uint64_t)dyldCacheLoadAddress - dyldCacheLoadAddress->unslidLoadAddress();
90 _imagesArrays.push_back(dyldCacheLoadAddress->cachedDylibsImageArray());
91 if ( auto others = dyldCacheLoadAddress->otherOSImageArray() )
92 _imagesArrays.push_back(others);
93 }
94 _imagesArrays.push_back(_mainClosure->images());
95
96 // record first ImageNum to do use for dlopen() calls
97 _mainClosure->images()->forEachImage(^(const dyld3::closure::Image* image, bool& stop) {
98 closure::ImageNum num = image->imageNum();
99 if ( num >= _nextImageNum )
100 _nextImageNum = num+1;
101 });
102
103 // Make temporary old image array, so libSystem initializers can be debugged
104 STACK_ALLOC_ARRAY(dyld_image_info, oldDyldInfo, initialImages.count());
105 for (const LoadedImage& li : initialImages) {
106 oldDyldInfo.push_back({li.loadedAddress(), li.image()->path(), 0});
107 }
108 _oldAllImageInfos->infoArray = &oldDyldInfo[0];
109 _oldAllImageInfos->infoArrayCount = (uint32_t)oldDyldInfo.count();
110 _oldAllImageInfos->notification(dyld_image_adding, _oldAllImageInfos->infoArrayCount, _oldAllImageInfos->infoArray);
111 _oldAllImageInfos->infoArray = nullptr;
112 _oldAllImageInfos->infoArrayCount = 0;
113
114 _processDOFs = Loader::dtraceUserProbesEnabled();
115 }
116
117 void AllImages::setProgramVars(ProgramVars* vars, bool keysOff, bool osBinariesOnly)
118 {
119 _programVars = vars;
120 _archs = &GradedArchs::forCurrentOS(keysOff, osBinariesOnly);
121 }
122
123 void AllImages::setLaunchMode(uint32_t flags)
124 {
125 _launchMode = flags;
126 }
127
128 AllImages::MainFunc AllImages::getDriverkitMain()
129 {
130 return _driverkitMain;
131 }
132
133 void AllImages::setDriverkitMain(MainFunc mainFunc)
134 {
135 _driverkitMain = mainFunc;
136 }
137
138 void AllImages::setRestrictions(bool allowAtPaths, bool allowEnvPaths)
139 {
140 _allowAtPaths = allowAtPaths;
141 _allowEnvPaths = allowEnvPaths;
142 }
143
144 void AllImages::setHasCacheOverrides(bool someCacheImageOverriden)
145 {
146 _someImageOverridden = someCacheImageOverriden;
147 }
148
149 bool AllImages::hasCacheOverrides() const {
150 return _someImageOverridden;
151 }
152
153 void AllImages::applyInitialImages()
154 {
155 addImages(*_initialImages);
156 runImageNotifiers(*_initialImages);
157 runImageCallbacks(*_initialImages);
158 _initialImages = nullptr; // this was stack allocated
159 }
160
161 void AllImages::withReadLock(void (^work)()) const
162 {
163 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
164 os_unfair_recursive_lock_lock(&_globalLock);
165 work();
166 os_unfair_recursive_lock_unlock(&_globalLock);
167 #else
168 pthread_mutex_lock(&_globalLock);
169 work();
170 pthread_mutex_unlock(&_globalLock);
171 #endif
172 }
173
174 void AllImages::withWriteLock(void (^work)())
175 {
176 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
177 os_unfair_recursive_lock_lock(&_globalLock);
178 work();
179 os_unfair_recursive_lock_unlock(&_globalLock);
180 #else
181 pthread_mutex_lock(&_globalLock);
182 work();
183 pthread_mutex_unlock(&_globalLock);
184 #endif
185 }
186
187 void AllImages::withNotifiersLock(void (^work)()) const
188 {
189 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
190 os_unfair_recursive_lock_lock(&_globalLock);
191 work();
192 os_unfair_recursive_lock_unlock(&_globalLock);
193 #else
194 pthread_mutex_lock(&_globalLock);
195 work();
196 pthread_mutex_unlock(&_globalLock);
197 #endif
198 }
199
200 void AllImages::mirrorToOldAllImageInfos()
201 {
202 withReadLock(^(){
203 // set infoArray to NULL to denote it is in-use
204 _oldAllImageInfos->infoArray = nullptr;
205
206 // if array not large enough, re-alloc it
207 uint32_t imageCount = (uint32_t)_loadedImages.count();
208 if ( _oldArrayAllocCount < imageCount ) {
209 uint32_t newAllocCount = imageCount + 16;
210 dyld_image_info* newArray = (dyld_image_info*)::malloc(sizeof(dyld_image_info)*newAllocCount);
211 if ( _oldAllImageArray != nullptr ) {
212 ::memcpy(newArray, _oldAllImageArray, sizeof(dyld_image_info)*_oldAllImageInfos->infoArrayCount);
213 ::free(_oldAllImageArray);
214 }
215 _oldAllImageArray = newArray;
216 _oldArrayAllocCount = newAllocCount;
217 }
218
219 // fill out array to mirror current image list
220 int index = 0;
221 for (const LoadedImage& li : _loadedImages) {
222 _oldAllImageArray[index].imageLoadAddress = li.loadedAddress();
223 _oldAllImageArray[index].imageFilePath = imagePath(li.image());
224 _oldAllImageArray[index].imageFileModDate = 0;
225 ++index;
226 }
227
228 // set infoArray back to base address of array (so other process can now read)
229 _oldAllImageInfos->infoArrayCount = imageCount;
230 _oldAllImageInfos->infoArrayChangeTimestamp = mach_absolute_time();
231 _oldAllImageInfos->infoArray = _oldAllImageArray;
232
233 // <radr://problem/42668846> update UUID array if needed
234 uint32_t nonCachedCount = 1; // always add dyld
235 for (const LoadedImage& li : _loadedImages) {
236 if ( _oldAllImageInfos->processDetachedFromSharedRegion || !li.loadedAddress()->inDyldCache())
237 ++nonCachedCount;
238 }
239 if ( nonCachedCount != _oldAllImageInfos->uuidArrayCount ) {
240 // set infoArray to NULL to denote it is in-use
241 _oldAllImageInfos->uuidArray = nullptr;
242 // make sure allocation can hold all uuids
243 if ( _oldUUIDAllocCount < nonCachedCount ) {
244 uint32_t newAllocCount = (nonCachedCount + 3) & (-4); // round up to multiple of 4
245 dyld_uuid_info* newArray = (dyld_uuid_info*)::malloc(sizeof(dyld_uuid_info)*newAllocCount);
246 if ( _oldUUIDArray != nullptr )
247 ::free(_oldUUIDArray);
248 _oldUUIDArray = newArray;
249 _oldUUIDAllocCount = newAllocCount;
250 }
251 // add dyld then all images not in dyld cache
252 const MachOFile* dyldMF = (MachOFile*)_oldAllImageInfos->dyldImageLoadAddress;
253 _oldUUIDArray[0].imageLoadAddress = dyldMF;
254 dyldMF->getUuid(_oldUUIDArray[0].imageUUID);
255 index = 1;
256 for (const LoadedImage& li : _loadedImages) {
257 if ( _oldAllImageInfos->processDetachedFromSharedRegion || !li.loadedAddress()->inDyldCache() ) {
258 _oldUUIDArray[index].imageLoadAddress = li.loadedAddress();
259 li.loadedAddress()->getUuid(_oldUUIDArray[index].imageUUID);
260 ++index;
261 }
262 }
263 // set uuidArray back to base address of array (so kernel can now read)
264 _oldAllImageInfos->uuidArray = _oldUUIDArray;
265 _oldAllImageInfos->uuidArrayCount = nonCachedCount;
266 }
267 });
268 }
269
270 void AllImages::addImages(const Array<LoadedImage>& newImages)
271 {
272 // copy into _loadedImages
273 withWriteLock(^(){
274 _loadedImages.append(newImages);
275 });
276 }
277
278 void AllImages::addImmutableRange(uintptr_t start, uintptr_t end)
279 {
280 //fprintf(stderr, "AllImages::addImmutableRange(0x%09lX, 0x%09lX)\n", start, end);
281 // first look in existing range buckets for empty slot
282 ImmutableRanges* lastRange = nullptr;
283 for (ImmutableRanges* ranges = &_immutableRanges; ranges != nullptr; ranges = ranges->next.load(std::memory_order_acquire)) {
284 lastRange = ranges;
285 for (uintptr_t i=0; i < ranges->arraySize; ++i) {
286 if ( ranges->array[i].start.load(std::memory_order_acquire) == 0 ) {
287 // set 'end' before 'start' so readers always see consistent state
288 ranges->array[i].end.store(end, std::memory_order_release);
289 ranges->array[i].start.store(start, std::memory_order_release);
290 return;
291 }
292 }
293 }
294 // if we got here, there are no empty slots, so add new ImmutableRanges
295 const uintptr_t newSize = 15; // allocation is 256 bytes on 64-bit processes
296 ImmutableRanges* newRange = (ImmutableRanges*)calloc(offsetof(ImmutableRanges,array[newSize]), 1);
297 newRange->arraySize = newSize;
298 newRange->array[0].end.store(end, std::memory_order_release);
299 newRange->array[0].start.store(start, std::memory_order_release);
300 // tie into previous list last
301 lastRange->next.store(newRange, std::memory_order_release);
302 }
303
304 void AllImages::runImageNotifiers(const Array<LoadedImage>& newImages)
305 {
306 uint32_t count = (uint32_t)newImages.count();
307 assert(count != 0);
308
309 if ( _oldAllImageInfos != nullptr ) {
310 // sync to old all image infos struct
311 mirrorToOldAllImageInfos();
312
313 // tell debugger about new images
314 dyld_image_info oldDyldInfo[count];
315 for (uint32_t i=0; i < count; ++i) {
316 oldDyldInfo[i].imageLoadAddress = newImages[i].loadedAddress();
317 oldDyldInfo[i].imageFilePath = imagePath(newImages[i].image());
318 oldDyldInfo[i].imageFileModDate = 0;
319 }
320 _oldAllImageInfos->notification(dyld_image_adding, count, oldDyldInfo);
321 }
322
323 // if any image not in the shared cache added, recompute bounds
324 for (const LoadedImage& li : newImages) {
325 if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
326 recomputeBounds();
327 break;
328 }
329 }
330
331 // update immutable ranges
332 for (const LoadedImage& li : newImages) {
333 if ( !li.image()->inDyldCache() && li.image()->neverUnload() ) {
334 uintptr_t baseAddr = (uintptr_t)li.loadedAddress();
335 li.image()->forEachDiskSegment(^(uint32_t segIndex, uint32_t fileOffset, uint32_t fileSize, int64_t vmOffset, uint64_t vmSize, uint8_t permissions, bool laterReadOnly, bool &stop) {
336 if ( (permissions & (VM_PROT_READ|VM_PROT_WRITE)) == VM_PROT_READ ) {
337 addImmutableRange(baseAddr + (uintptr_t)vmOffset, (uintptr_t)(baseAddr + vmOffset + vmSize));
338 }
339 });
340 }
341 }
342
343 // log loads
344 for (const LoadedImage& li : newImages) {
345 const char *path = imagePath(li.image());
346 uuid_t imageUUID;
347 if ( li.image()->getUuid(imageUUID)) {
348 uuid_string_t imageUUIDStr;
349 uuid_unparse_upper(imageUUID, imageUUIDStr);
350 log_loads("dyld: <%s> %s\n", imageUUIDStr, path);
351 }
352 else {
353 log_loads("dyld: %s\n", path);
354 }
355 }
356
357 // call kdebug trace for each image
358 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD, DBG_DYLD_UUID, DBG_DYLD_UUID_MAP_A))) {
359 for (const LoadedImage& li : newImages) {
360 const closure::Image* image = li.image();
361 struct stat stat_buf;
362 const char *path = imagePath(image);
363 uuid_t uuid;
364 image->getUuid(uuid);
365 fsid_t fsid = {{ 0, 0 }};
366 fsobj_id_t fsobjid = { 0, 0 };
367 if ( !li.loadedAddress()->inDyldCache() && (dyld3::stat(path, &stat_buf) == 0) ) {
368 fsobjid = *(fsobj_id_t*)&stat_buf.st_ino;
369 fsid = {{ stat_buf.st_dev, 0 }};
370 }
371 kdebug_trace_dyld_image(DBG_DYLD_UUID_MAP_A, path, &uuid, fsobjid, fsid, li.loadedAddress());
372 }
373 }
374 }
375
376 void AllImages::runImageCallbacks(const Array<LoadedImage>& newImages)
377 {
378 uint32_t count = (uint32_t)newImages.count();
379 assert(count != 0);
380
381 // call each _dyld_register_func_for_add_image function with each image
382 withNotifiersLock(^{
383 for (NotifyFunc func : _loadNotifiers) {
384 for (const LoadedImage& li : newImages) {
385 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
386 log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
387 if ( li.image()->inDyldCache() )
388 func(li.loadedAddress(), (uintptr_t)_dyldCacheSlide);
389 else
390 func(li.loadedAddress(), li.loadedAddress()->getSlide());
391 }
392 }
393 for (LoadNotifyFunc func : _loadNotifiers2) {
394 for (const LoadedImage& li : newImages) {
395 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
396 log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
397 if ( li.image()->inDyldCache() )
398 func(li.loadedAddress(), li.image()->path(), false);
399 else
400 func(li.loadedAddress(), li.image()->path(), !li.image()->neverUnload());
401 }
402 }
403 for (BulkLoadNotifier func : _loadBulkNotifiers) {
404 const mach_header* mhs[count];
405 const char* paths[count];
406 for (unsigned i=0; i < count; ++i) {
407 mhs[i] = newImages[i].loadedAddress();
408 paths[i] = newImages[i].image()->path();
409 }
410 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)mhs[0], (uint64_t)func, 0);
411 log_notifications("dyld: add notifier %p called with %d images\n", func, count);
412 func(count, mhs, paths);
413 }
414 });
415
416 // call objc about images that use objc
417 if ( _objcNotifyMapped != nullptr ) {
418 const char* pathsBuffer[count];
419 const mach_header* mhBuffer[count];
420 uint32_t imagesWithObjC = 0;
421 for (const LoadedImage& li : newImages) {
422 const closure::Image* image = li.image();
423 if ( image->hasObjC() ) {
424 pathsBuffer[imagesWithObjC] = imagePath(image);
425 mhBuffer[imagesWithObjC] = li.loadedAddress();
426 ++imagesWithObjC;
427 }
428 }
429 if ( imagesWithObjC != 0 ) {
430 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_OBJC_MAP, 0, 0, 0);
431 (*_objcNotifyMapped)(imagesWithObjC, pathsBuffer, mhBuffer);
432 if ( log_notifications("dyld: objc-mapped-notifier called with %d images:\n", imagesWithObjC) ) {
433 for (uint32_t i=0; i < imagesWithObjC; ++i) {
434 log_notifications("dyld: objc-mapped: %p %s\n", mhBuffer[i], pathsBuffer[i]);
435 }
436 }
437 }
438 }
439
440 #if !TARGET_OS_DRIVERKIT
441 // FIXME: This may make more sense in runImageCallbacks, but the present order
442 // is after callbacks. Can we safely move it?
443 // notify any processes tracking loads in this process
444 notifyMonitorLoads(newImages);
445 #endif
446 }
447
448 void AllImages::removeImages(const Array<LoadedImage>& unloadImages)
449 {
450 // call each _dyld_register_func_for_remove_image function with each image
451 withNotifiersLock(^{
452 for (NotifyFunc func : _unloadNotifiers) {
453 for (const LoadedImage& li : unloadImages) {
454 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_REMOVE_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
455 log_notifications("dyld: remove notifier %p called with mh=%p\n", func, li.loadedAddress());
456 if ( li.image()->inDyldCache() )
457 func(li.loadedAddress(), (uintptr_t)_dyldCacheSlide);
458 else
459 func(li.loadedAddress(), li.loadedAddress()->getSlide());
460 }
461 }
462 });
463
464 // call objc about images going away
465 if ( _objcNotifyUnmapped != nullptr ) {
466 for (const LoadedImage& li : unloadImages) {
467 if ( li.image()->hasObjC() ) {
468 (*_objcNotifyUnmapped)(imagePath(li.image()), li.loadedAddress());
469 log_notifications("dyld: objc-unmapped-notifier called with image %p %s\n", li.loadedAddress(), imagePath(li.image()));
470 }
471 }
472 }
473
474 // call kdebug trace for each image
475 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD, DBG_DYLD_UUID, DBG_DYLD_UUID_MAP_A))) {
476 for (const LoadedImage& li : unloadImages) {
477 const closure::Image* image = li.image();
478 struct stat stat_buf;
479 const char *path = imagePath(image);
480 uuid_t uuid;
481 image->getUuid(uuid);
482 fsid_t fsid = {{ 0, 0 }};
483 fsobj_id_t fsobjid = { 0, 0 };
484 if ( dyld3::stat(path, &stat_buf) == 0 ) {
485 fsobjid = *(fsobj_id_t*)&stat_buf.st_ino;
486 fsid = {{ stat_buf.st_dev, 0 }};
487 }
488 kdebug_trace_dyld_image(DBG_DYLD_UUID_UNMAP_A, path, &uuid, fsobjid, fsid, li.loadedAddress());
489 }
490 }
491
492 // remove each from _loadedImages
493 withWriteLock(^(){
494 for (const LoadedImage& uli : unloadImages) {
495 for (LoadedImage& li : _loadedImages) {
496 if ( uli.loadedAddress() == li.loadedAddress() ) {
497 _loadedImages.erase(li);
498 break;
499 }
500 }
501 }
502 recomputeBounds();
503 });
504
505 // sync to old all image infos struct
506 mirrorToOldAllImageInfos();
507
508 // tell debugger about removed images
509 STACK_ALLOC_ARRAY(dyld_image_info, oldDyldInfo, unloadImages.count());
510 for (const LoadedImage& li : unloadImages) {
511 oldDyldInfo.push_back({li.loadedAddress(), li.image()->path(), 0});
512 }
513 _oldAllImageInfos->notification(dyld_image_removing, (uint32_t)oldDyldInfo.count(), &oldDyldInfo[0]);
514
515 // notify any processes tracking loads in this process
516 notifyMonitorUnloads(unloadImages);
517
518 // finally, unmap images
519 for (const LoadedImage& li : unloadImages) {
520 if ( li.leaveMapped() ) {
521 log_loads("dyld: unloaded but left mmapped %s\n", imagePath(li.image()));
522 }
523 else {
524 // unmapImage() modifies parameter, so use copy
525 LoadedImage copy = li;
526 Loader::unmapImage(copy);
527 log_loads("dyld: unloaded %s\n", imagePath(li.image()));
528 }
529 }
530 }
531
532 // must be called with writeLock held
533 void AllImages::recomputeBounds()
534 {
535 _lowestNonCached = UINTPTR_MAX;
536 _highestNonCached = 0;
537 for (const LoadedImage& li : _loadedImages) {
538 const MachOLoaded* ml = li.loadedAddress();
539 uintptr_t start = (uintptr_t)ml;
540 if ( !((MachOAnalyzer*)ml)->inDyldCache() ) {
541 if ( start < _lowestNonCached )
542 _lowestNonCached = start;
543 uintptr_t end = start + (uintptr_t)(li.image()->vmSizeToMap());
544 if ( end > _highestNonCached )
545 _highestNonCached = end;
546 }
547 }
548 }
549
550 uint32_t AllImages::count() const
551 {
552 return (uint32_t)_loadedImages.count();
553 }
554
555 bool AllImages::dyldCacheHasPath(const char* path) const
556 {
557 uint32_t dyldCacheImageIndex;
558 if ( _dyldCacheAddress != nullptr )
559 return _dyldCacheAddress->hasImagePath(path, dyldCacheImageIndex);
560 return false;
561 }
562
563 const char* AllImages::imagePathByIndex(uint32_t index) const
564 {
565 __block const char* result = nullptr;
566 withReadLock(^{
567 if ( index < _loadedImages.count() ) {
568 result = imagePath(_loadedImages[index].image());
569 return;
570 }
571 });
572 return result;
573 }
574
575 const mach_header* AllImages::imageLoadAddressByIndex(uint32_t index) const
576 {
577 __block const mach_header* result = nullptr;
578 withReadLock(^{
579 if ( index < _loadedImages.count() ) {
580 result = _loadedImages[index].loadedAddress();
581 return;
582 }
583 });
584 return result;
585 }
586
587 bool AllImages::findImage(const mach_header* loadAddress, LoadedImage& foundImage) const
588 {
589 __block bool result = false;
590 withReadLock(^(){
591 for (const LoadedImage& li : _loadedImages) {
592 if ( li.loadedAddress() == loadAddress ) {
593 foundImage = li;
594 result = true;
595 break;
596 }
597 }
598 });
599 return result;
600 }
601
602 void AllImages::forEachImage(void (^handler)(const LoadedImage& loadedImage, bool& stop)) const
603 {
604 if ( _initialImages != nullptr ) {
605 // being called during libSystem initialization, so _loadedImages not allocated yet
606 bool stop = false;
607 for (const LoadedImage& li : *_initialImages) {
608 handler(li, stop);
609 if ( stop )
610 break;
611 }
612 return;
613 }
614
615 withReadLock(^{
616 bool stop = false;
617 for (const LoadedImage& li : _loadedImages) {
618 handler(li, stop);
619 if ( stop )
620 break;
621 }
622 });
623 }
624
625
626 const char* AllImages::pathForImageMappedAt(const void* addr) const
627 {
628 if ( _initialImages != nullptr ) {
629 // being called during libSystem initialization, so _loadedImages not allocated yet
630 for (const LoadedImage& li : *_initialImages) {
631 uint8_t permissions;
632 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
633 return li.image()->path();
634 }
635 }
636 return nullptr;
637 }
638
639 // if address is in cache, do fast search of TEXT segments in cache
640 __block const char* result = nullptr;
641 if ( (_dyldCacheAddress != nullptr) && (addr > _dyldCacheAddress) ) {
642 if ( addr < (void*)((uint8_t*)_dyldCacheAddress+_dyldCacheAddress->mappedSize()) ) {
643 uint64_t cacheSlide = (uint64_t)_dyldCacheAddress - _dyldCacheAddress->unslidLoadAddress();
644 uint64_t unslidTargetAddr = (uint64_t)addr - cacheSlide;
645 _dyldCacheAddress->forEachImageTextSegment(^(uint64_t loadAddressUnslid, uint64_t textSegmentSize, const unsigned char* dylibUUID, const char* installName, bool& stop) {
646 if ( (loadAddressUnslid <= unslidTargetAddr) && (unslidTargetAddr < loadAddressUnslid+textSegmentSize) ) {
647 result = installName;
648 stop = true;
649 }
650 });
651 if ( result != nullptr )
652 return result;
653 }
654 }
655
656 // slow path - search image list
657 infoForImageMappedAt(addr, ^(const LoadedImage& foundImage, uint8_t permissions) {
658 result = foundImage.image()->path();
659 });
660
661 return result;
662 }
663
664 void AllImages::infoForImageMappedAt(const void* addr, void (^handler)(const LoadedImage& foundImage, uint8_t permissions)) const
665 {
666 __block uint8_t permissions;
667 if ( _initialImages != nullptr ) {
668 // being called during libSystem initialization, so _loadedImages not allocated yet
669 for (const LoadedImage& li : *_initialImages) {
670 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
671 handler(li, permissions);
672 break;
673 }
674 }
675 return;
676 }
677
678 withReadLock(^{
679 for (const LoadedImage& li : _loadedImages) {
680 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
681 handler(li, permissions);
682 break;
683 }
684 }
685 });
686 }
687
688
689 bool AllImages::infoForImageMappedAt(const void* addr, const MachOLoaded** ml, uint64_t* textSize, const char** path) const
690 {
691 if ( _initialImages != nullptr ) {
692 // being called during libSystem initialization, so _loadedImages not allocated yet
693 for (const LoadedImage& li : *_initialImages) {
694 uint8_t permissions;
695 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
696 if ( ml != nullptr )
697 *ml = li.loadedAddress();
698 if ( path != nullptr )
699 *path = li.image()->path();
700 if ( textSize != nullptr ) {
701 *textSize = li.image()->textSize();
702 }
703 return true;
704 }
705 }
706 return false;
707 }
708
709 // if address is in cache, do fast search of TEXT segments in cache
710 __block bool result = false;
711 if ( (_dyldCacheAddress != nullptr) && (addr > _dyldCacheAddress) ) {
712 if ( addr < (void*)((uint8_t*)_dyldCacheAddress+_dyldCacheAddress->mappedSize()) ) {
713 uint64_t cacheSlide = (uint64_t)_dyldCacheAddress - _dyldCacheAddress->unslidLoadAddress();
714 uint64_t unslidTargetAddr = (uint64_t)addr - cacheSlide;
715 _dyldCacheAddress->forEachImageTextSegment(^(uint64_t loadAddressUnslid, uint64_t textSegmentSize, const unsigned char* dylibUUID, const char* installName, bool& stop) {
716 if ( (loadAddressUnslid <= unslidTargetAddr) && (unslidTargetAddr < loadAddressUnslid+textSegmentSize) ) {
717 if ( ml != nullptr )
718 *ml = (MachOLoaded*)(loadAddressUnslid + cacheSlide);
719 if ( path != nullptr )
720 *path = installName;
721 if ( textSize != nullptr )
722 *textSize = textSegmentSize;
723 stop = true;
724 result = true;
725 }
726 });
727 if ( result )
728 return result;
729 // in shared cache, but not in a TEXT segment, do slow search of all loaded cache images
730 withReadLock(^{
731 for (const LoadedImage& li : _loadedImages) {
732 if ( ((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
733 uint8_t permissions;
734 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
735 if ( ml != nullptr )
736 *ml = li.loadedAddress();
737 if ( path != nullptr )
738 *path = li.image()->path();
739 if ( textSize != nullptr )
740 *textSize = li.image()->textSize();
741 result = true;
742 break;
743 }
744 }
745 }
746 });
747 return result;
748 }
749 }
750
751 // address not in dyld cache, check each non-cache image
752 infoForNonCachedImageMappedAt(addr, ^(const LoadedImage& foundImage, uint8_t permissions) {
753 if ( ml != nullptr )
754 *ml = foundImage.loadedAddress();
755 if ( path != nullptr )
756 *path = foundImage.image()->path();
757 if ( textSize != nullptr )
758 *textSize = foundImage.image()->textSize();
759 result = true;
760 });
761
762 return result;
763 }
764
765 // same as infoForImageMappedAt(), but only look at images not in the dyld cache
766 void AllImages::infoForNonCachedImageMappedAt(const void* addr, void (^handler)(const LoadedImage& foundImage, uint8_t permissions)) const
767 {
768 __block uint8_t permissions;
769 if ( _initialImages != nullptr ) {
770 // being called during libSystem initialization, so _loadedImages not allocated yet
771 for (const LoadedImage& li : *_initialImages) {
772 if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
773 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
774 handler(li, permissions);
775 break;
776 }
777 }
778 }
779 return;
780 }
781
782 withReadLock(^{
783 for (const LoadedImage& li : _loadedImages) {
784 if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
785 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
786 handler(li, permissions);
787 break;
788 }
789 }
790 }
791 });
792 }
793
794 bool AllImages::immutableMemory(const void* addr, size_t length) const
795 {
796 // check to see if in shared cache
797 if ( _dyldCacheAddress != nullptr ) {
798 bool readOnly;
799 if ( _dyldCacheAddress->inCache(addr, length, readOnly) ) {
800 return readOnly;
801 }
802 }
803
804 // check to see if it is outside the range of any loaded image
805 if ( ((uintptr_t)addr < _lowestNonCached) || ((uintptr_t)addr+length > _highestNonCached) ) {
806 return false;
807 }
808
809 // check immutable ranges
810 for (const ImmutableRanges* ranges = &_immutableRanges; ranges != nullptr; ranges = ranges->next.load(std::memory_order_acquire)) {
811 for (uintptr_t i=0; i < ranges->arraySize; ++i) {
812 if ( ranges->array[i].start.load(std::memory_order_acquire) == 0 )
813 break; // no more entries in use
814 if ( (ranges->array[i].start.load(std::memory_order_acquire) <= (uintptr_t)addr)
815 && (ranges->array[i].end.load(std::memory_order_acquire) > ((uintptr_t)addr)+length) )
816 return true;
817 }
818 }
819
820 return false;
821 }
822
823
824 uintptr_t AllImages::resolveTarget(closure::Image::ResolvedSymbolTarget target) const
825 {
826 switch ( target.sharedCache.kind ) {
827 case closure::Image::ResolvedSymbolTarget::kindSharedCache:
828 assert(_dyldCacheAddress != nullptr);
829 return (uintptr_t)_dyldCacheAddress + (uintptr_t)target.sharedCache.offset;
830
831 case closure::Image::ResolvedSymbolTarget::kindImage: {
832 LoadedImage info;
833 bool foundImage = findImageNum(target.image.imageNum, info);
834 assert(foundImage);
835 return (uintptr_t)(info.loadedAddress()) + (uintptr_t)target.image.offset;
836 }
837
838 case closure::Image::ResolvedSymbolTarget::kindAbsolute:
839 if ( target.absolute.value & (1ULL << 62) )
840 return (uintptr_t)(target.absolute.value | 0xC000000000000000ULL);
841 else
842 return (uintptr_t)target.absolute.value;
843 }
844 assert(0 && "malformed ResolvedSymbolTarget");
845 return 0;
846 }
847
848 void* AllImages::interposeValue(void *value) const {
849 if ( !_mainClosure->hasInterposings() )
850 return value;
851
852 __block void* replacementValue = nullptr;
853 __block bool foundReplacement = false;
854 _mainClosure->forEachInterposingTuple(^(const closure::InterposingTuple& tuple, bool& stop) {
855 void* stockPointer = (void*)resolveTarget(tuple.stockImplementation);
856 if ( stockPointer == value) {
857 replacementValue = (void*)resolveTarget(tuple.newImplementation);
858 foundReplacement = true;
859 stop = true;
860 }
861 });
862
863 if ( foundReplacement )
864 return replacementValue;
865
866 return value;
867 }
868
869 void AllImages::infoForImageWithLoadAddress(const MachOLoaded* mh, void (^handler)(const LoadedImage& foundImage)) const
870 {
871 withReadLock(^{
872 for (const LoadedImage& li : _loadedImages) {
873 if ( li.loadedAddress() == mh ) {
874 handler(li);
875 break;
876 }
877 }
878 });
879 }
880
881 bool AllImages::findImageNum(closure::ImageNum imageNum, LoadedImage& foundImage) const
882 {
883 if ( _initialImages != nullptr ) {
884 // being called during libSystem initialization, so _loadedImages not allocated yet
885 for (const LoadedImage& li : *_initialImages) {
886 if ( li.image()->representsImageNum(imageNum) ) {
887 foundImage = li;
888 return true;
889 }
890 }
891 return false;
892 }
893
894 bool result = false;
895 for (const LoadedImage& li : _loadedImages) {
896 if ( li.image()->representsImageNum(imageNum) ) {
897 foundImage = li;
898 result = true;
899 break;
900 }
901 }
902
903 return result;
904 }
905
906 const MachOLoaded* AllImages::findDependent(const MachOLoaded* mh, uint32_t depIndex)
907 {
908 __block const MachOLoaded* result = nullptr;
909 withReadLock(^{
910 for (const LoadedImage& li : _loadedImages) {
911 if ( li.loadedAddress() == mh ) {
912 closure::ImageNum depImageNum = li.image()->dependentImageNum(depIndex);
913 LoadedImage depLi;
914 if ( findImageNum(depImageNum, depLi) )
915 result = depLi.loadedAddress();
916 break;
917 }
918 }
919 });
920 return result;
921 }
922
923
924 void AllImages::breadthFirstRecurseDependents(Array<closure::ImageNum>& visited, const LoadedImage& nodeLi, bool& stopped, void (^handler)(const LoadedImage& aLoadedImage, bool& stop)) const
925 {
926 // call handler on all direct dependents (unless already visited)
927 STACK_ALLOC_ARRAY(LoadedImage, dependentsToRecurse, 256);
928 nodeLi.image()->forEachDependentImage(^(uint32_t depIndex, closure::Image::LinkKind kind, closure::ImageNum depImageNum, bool& depStop) {
929 if ( kind == closure::Image::LinkKind::upward )
930 return;
931 if ( visited.contains(depImageNum) )
932 return;
933 LoadedImage depLi;
934 if ( !findImageNum(depImageNum, depLi) )
935 return;
936 handler(depLi, depStop);
937 // <rdar://58466613> if there is an override of some dyld cache dylib, we need to store the override ImageNum in the visited set
938 if ( depImageNum != depLi.image()->imageNum() ) {
939 depImageNum = depLi.image()->imageNum();
940 if ( visited.contains(depImageNum) )
941 return;
942 }
943 visited.push_back(depImageNum);
944 if ( depStop ) {
945 stopped = true;
946 return;
947 }
948 dependentsToRecurse.push_back(depLi);
949 });
950 if ( stopped )
951 return;
952 // recurse on all dependents just visited
953 for (LoadedImage& depLi : dependentsToRecurse) {
954 breadthFirstRecurseDependents(visited, depLi, stopped, handler);
955 }
956 }
957
958 void AllImages::visitDependentsTopDown(const LoadedImage& start, void (^handler)(const LoadedImage& aLoadedImage, bool& stop)) const
959 {
960 withReadLock(^{
961 STACK_ALLOC_ARRAY(closure::ImageNum, visited, count());
962 bool stop = false;
963 handler(start, stop);
964 if ( stop )
965 return;
966 visited.push_back(start.image()->imageNum());
967 breadthFirstRecurseDependents(visited, start, stop, handler);
968 });
969 }
970
971 const MachOLoaded* AllImages::mainExecutable() const
972 {
973 assert(_programVars != nullptr);
974 return (const MachOLoaded*)_programVars->mh;
975 }
976
977 const closure::Image* AllImages::mainExecutableImage() const
978 {
979 assert(_mainClosure != nullptr);
980 return _mainClosure->images()->imageForNum(_mainClosure->topImageNum());
981 }
982
983 void AllImages::setMainPath(const char* path )
984 {
985 _mainExeOverridePath = path;
986 }
987
988 const char* AllImages::imagePath(const closure::Image* image) const
989 {
990 #if TARGET_OS_IPHONE
991 // on iOS and watchOS, apps may be moved on device after closure built
992 if ( _mainExeOverridePath != nullptr ) {
993 if ( image == mainExecutableImage() )
994 return _mainExeOverridePath;
995 }
996 #endif
997 return image->path();
998 }
999
1000 dyld_platform_t AllImages::platform() const {
1001 return (dyld_platform_t)oldAllImageInfo()->platform;
1002 }
1003
1004 const GradedArchs& AllImages::archs() const
1005 {
1006 return *_archs;
1007 }
1008
1009 void AllImages::incRefCount(const mach_header* loadAddress)
1010 {
1011 for (DlopenCount& entry : _dlopenRefCounts) {
1012 if ( entry.loadAddress == loadAddress ) {
1013 // found existing DlopenCount entry, bump counter
1014 entry.refCount += 1;
1015 return;
1016 }
1017 }
1018
1019 // no existing DlopenCount, add new one
1020 _dlopenRefCounts.push_back({ loadAddress, 1 });
1021 }
1022
1023 void AllImages::decRefCount(const mach_header* loadAddress)
1024 {
1025 bool doCollect = false;
1026 for (DlopenCount& entry : _dlopenRefCounts) {
1027 if ( entry.loadAddress == loadAddress ) {
1028 // found existing DlopenCount entry, bump counter
1029 entry.refCount -= 1;
1030 if ( entry.refCount == 0 ) {
1031 _dlopenRefCounts.erase(entry);
1032 doCollect = true;
1033 break;
1034 }
1035 return;
1036 }
1037 }
1038 if ( doCollect )
1039 garbageCollectImages();
1040 }
1041
1042
1043 #if TARGET_OS_OSX
1044 NSObjectFileImage AllImages::addNSObjectFileImage(const OFIInfo& image)
1045 {
1046 __block uint64_t imageNum = 0;
1047 withWriteLock(^{
1048 imageNum = ++_nextObjectFileImageNum;
1049 _objectFileImages.push_back(image);
1050 _objectFileImages.back().imageNum = imageNum;
1051 });
1052 return (NSObjectFileImage)imageNum;
1053 }
1054
1055 bool AllImages::forNSObjectFileImage(NSObjectFileImage imageHandle,
1056 void (^handler)(OFIInfo& image)) {
1057 uint64_t imageNum = (uint64_t)imageHandle;
1058 bool __block foundImage = false;
1059 withReadLock(^{
1060 for (OFIInfo& ofi : _objectFileImages) {
1061 if ( ofi.imageNum == imageNum ) {
1062 handler(ofi);
1063 foundImage = true;
1064 return;
1065 }
1066 }
1067 });
1068
1069 return foundImage;
1070 }
1071
1072 void AllImages::removeNSObjectFileImage(NSObjectFileImage imageHandle)
1073 {
1074 uint64_t imageNum = (uint64_t)imageHandle;
1075 withWriteLock(^{
1076 for (OFIInfo& ofi : _objectFileImages) {
1077 if ( ofi.imageNum == imageNum ) {
1078 _objectFileImages.erase(ofi);
1079 return;
1080 }
1081 }
1082 });
1083 }
1084 #endif
1085
1086
1087 class VIS_HIDDEN Reaper
1088 {
1089 public:
1090 struct ImageAndUse
1091 {
1092 const LoadedImage* li;
1093 bool inUse;
1094 };
1095 Reaper(Array<ImageAndUse>& unloadables, AllImages*);
1096 void garbageCollect();
1097 void finalizeDeadImages();
1098
1099 static void runTerminators(const LoadedImage& li);
1100 private:
1101
1102 void markDirectlyDlopenedImagesAsUsed();
1103 void markDependentOfInUseImages();
1104 void markDependentsOf(const LoadedImage*);
1105 uint32_t inUseCount();
1106 void dump(const char* msg);
1107
1108 Array<ImageAndUse>& _unloadables;
1109 AllImages* _allImages;
1110 uint32_t _deadCount;
1111 };
1112
1113 Reaper::Reaper(Array<ImageAndUse>& unloadables, AllImages* all)
1114 : _unloadables(unloadables), _allImages(all), _deadCount(0)
1115 {
1116 }
1117
1118 void Reaper::markDirectlyDlopenedImagesAsUsed()
1119 {
1120 for (AllImages::DlopenCount& entry : _allImages->_dlopenRefCounts) {
1121 if ( entry.refCount != 0 ) {
1122 for (ImageAndUse& iu : _unloadables) {
1123 if ( iu.li->loadedAddress() == entry.loadAddress ) {
1124 iu.inUse = true;
1125 break;
1126 }
1127 }
1128 }
1129 }
1130 }
1131
1132 uint32_t Reaper::inUseCount()
1133 {
1134 uint32_t count = 0;
1135 for (ImageAndUse& iu : _unloadables) {
1136 if ( iu.inUse )
1137 ++count;
1138 }
1139 return count;
1140 }
1141
1142 void Reaper::markDependentsOf(const LoadedImage* li)
1143 {
1144 li->image()->forEachDependentImage(^(uint32_t depIndex, closure::Image::LinkKind kind, closure::ImageNum depImageNum, bool& stop) {
1145 for (ImageAndUse& iu : _unloadables) {
1146 if ( !iu.inUse && iu.li->image()->representsImageNum(depImageNum) ) {
1147 iu.inUse = true;
1148 break;
1149 }
1150 }
1151 });
1152 }
1153
1154 void Reaper::markDependentOfInUseImages()
1155 {
1156 for (ImageAndUse& iu : _unloadables) {
1157 if ( iu.inUse )
1158 markDependentsOf(iu.li);
1159 }
1160 }
1161
1162 void Reaper::dump(const char* msg)
1163 {
1164 //log("%s:\n", msg);
1165 //for (ImageAndUse& iu : _unloadables) {
1166 // log(" in-used=%d %s\n", iu.inUse, iu.li->image()->path());
1167 //}
1168 }
1169
1170 void Reaper::garbageCollect()
1171 {
1172 //dump("all unloadable images");
1173
1174 // mark all dylibs directly dlopen'ed as in use
1175 markDirectlyDlopenedImagesAsUsed();
1176
1177 //dump("directly dlopen()'ed marked");
1178
1179 // iteratively mark dependents of in-use dylibs as in-use until in-use count stops changing
1180 uint32_t lastCount = inUseCount();
1181 bool countChanged = false;
1182 do {
1183 markDependentOfInUseImages();
1184 //dump("dependents marked");
1185 uint32_t newCount = inUseCount();
1186 countChanged = (newCount != lastCount);
1187 lastCount = newCount;
1188 } while (countChanged);
1189
1190 _deadCount = (uint32_t)_unloadables.count() - inUseCount();
1191 }
1192
1193 void Reaper::finalizeDeadImages()
1194 {
1195 if ( _deadCount == 0 )
1196 return;
1197 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(__cxa_range_t, ranges, _deadCount);
1198 for (ImageAndUse& iu : _unloadables) {
1199 if ( iu.inUse )
1200 continue;
1201 runTerminators(*iu.li);
1202 iu.li->image()->forEachDiskSegment(^(uint32_t segIndex, uint32_t fileOffset, uint32_t fileSize, int64_t vmOffset, uint64_t vmSize, uint8_t permissions, bool laterReadOnly, bool &stop) {
1203 if ( permissions & VM_PROT_EXECUTE ) {
1204 __cxa_range_t range;
1205 range.addr = (char*)(iu.li->loadedAddress()) + vmOffset;
1206 range.length = (size_t)vmSize;
1207 ranges.push_back(range);
1208 }
1209 });
1210 }
1211 __cxa_finalize_ranges(ranges.begin(), (uint32_t)ranges.count());
1212 }
1213
1214 void Reaper::runTerminators(const LoadedImage& li)
1215 {
1216 if ( li.image()->hasTerminators() ) {
1217 typedef void (*Terminator)();
1218 li.image()->forEachTerminator(li.loadedAddress(), ^(const void* terminator) {
1219 Terminator termFunc = (Terminator)terminator;
1220 #if __has_feature(ptrauth_calls)
1221 termFunc = (Terminator)__builtin_ptrauth_sign_unauthenticated((void*)termFunc, 0, 0);
1222 #endif
1223 termFunc();
1224 log_initializers("dyld: called static terminator %p in %s\n", termFunc, li.image()->path());
1225 });
1226 }
1227 }
1228
1229 void AllImages::runAllStaticTerminators()
1230 {
1231 // We want to run terminators in reverse chronological order of initializing
1232 // Note: initialLoadCount may be larger than what was actually loaded
1233 const uint32_t currentCount = (uint32_t)_loadedImages.count();
1234 const uint32_t initialLoadCount = std::min(_mainClosure->initialLoadCount(), currentCount);
1235
1236 // first run static terminators of anything dlopen()ed
1237 for (uint32_t i=currentCount-1; i >= initialLoadCount; --i) {
1238 Reaper::runTerminators(_loadedImages[i]);
1239 }
1240
1241 // next run terminators of statically load images, in loader-order they were init in reverse of this
1242 for (uint32_t i=0; i < initialLoadCount; ++i) {
1243 Reaper::runTerminators(_loadedImages[i]);
1244 }
1245 }
1246
1247
1248 // This function is called at the end of dlclose() when the reference count goes to zero.
1249 // The dylib being unloaded may have brought in other dependent dylibs when it was loaded.
1250 // Those dependent dylibs need to be unloaded, but only if they are not referenced by
1251 // something else. We use a standard mark and sweep garbage collection.
1252 //
1253 // The tricky part is that when a dylib is unloaded it may have a termination function that
1254 // can run and itself call dlclose() on yet another dylib. The problem is that this
1255 // sort of gabage collection is not re-entrant. Instead a terminator's call to dlclose()
1256 // which calls garbageCollectImages() will just set a flag to re-do the garbage collection
1257 // when the current pass is done.
1258 //
1259 // Also note that this is done within the _loadedImages writer lock, so any dlopen/dlclose
1260 // on other threads are blocked while this garbage collections runs
1261 //
1262 void AllImages::garbageCollectImages()
1263 {
1264 // if some other thread is currently GC'ing images, let other thread do the work
1265 int32_t newCount = OSAtomicIncrement32(&_gcCount);
1266 if ( newCount != 1 )
1267 return;
1268
1269 do {
1270 STACK_ALLOC_ARRAY(Reaper::ImageAndUse, unloadables, _loadedImages.count());
1271 withReadLock(^{
1272 for (const LoadedImage& li : _loadedImages) {
1273 if ( !li.image()->neverUnload() /*&& !li.neverUnload()*/ ) {
1274 unloadables.push_back({&li, false});
1275 //fprintf(stderr, "unloadable[%lu] %p %s\n", unloadables.count(), li.loadedAddress(), li.image()->path());
1276 }
1277 }
1278 });
1279 // make reaper object to do garbage collection and notifications
1280 Reaper reaper(unloadables, this);
1281 reaper.garbageCollect();
1282
1283 // FIXME: we should sort dead images so higher level ones are terminated first
1284
1285 // call cxa_finalize_ranges and static terminators of dead images
1286 reaper.finalizeDeadImages();
1287
1288 // FIXME: DOF unregister
1289
1290 //fprintf(stderr, "_loadedImages before GC removals:\n");
1291 //for (const LoadedImage& li : _loadedImages) {
1292 // fprintf(stderr, " loadAddr=%p, path=%s\n", li.loadedAddress(), li.image()->path());
1293 //}
1294
1295 // make copy of LoadedImages we want to remove
1296 // because unloadables[] points into LoadedImage we are shrinking
1297 STACK_ALLOC_ARRAY(LoadedImage, unloadImages, _loadedImages.count());
1298 for (const Reaper::ImageAndUse& iu : unloadables) {
1299 if ( !iu.inUse )
1300 unloadImages.push_back(*iu.li);
1301 }
1302 // remove entries from _loadedImages
1303 if ( !unloadImages.empty() ) {
1304 removeImages(unloadImages);
1305
1306 //fprintf(stderr, "_loadedImages after GC removals:\n");
1307 //for (const LoadedImage& li : _loadedImages) {
1308 // fprintf(stderr, " loadAddr=%p, path=%s\n", li.loadedAddress(), li.image()->path());
1309 //}
1310 }
1311
1312 // if some other thread called GC during our work, redo GC on its behalf
1313 newCount = OSAtomicDecrement32(&_gcCount);
1314 }
1315 while (newCount > 0);
1316 }
1317
1318
1319
1320 void AllImages::addLoadNotifier(NotifyFunc func)
1321 {
1322 // callback about already loaded images
1323 withReadLock(^{
1324 for (const LoadedImage& li : _loadedImages) {
1325 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
1326 log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
1327 if ( li.image()->inDyldCache() )
1328 func(li.loadedAddress(), (uintptr_t)_dyldCacheSlide);
1329 else
1330 func(li.loadedAddress(), li.loadedAddress()->getSlide());
1331 }
1332 });
1333
1334 // add to list of functions to call about future loads
1335 withNotifiersLock(^{
1336 _loadNotifiers.push_back(func);
1337 });
1338 }
1339
1340 void AllImages::addUnloadNotifier(NotifyFunc func)
1341 {
1342 // add to list of functions to call about future unloads
1343 withNotifiersLock(^{
1344 _unloadNotifiers.push_back(func);
1345 });
1346 }
1347
1348 void AllImages::addLoadNotifier(LoadNotifyFunc func)
1349 {
1350 // callback about already loaded images
1351 withReadLock(^{
1352 for (const LoadedImage& li : _loadedImages) {
1353 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
1354 log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
1355 func(li.loadedAddress(), li.image()->path(), !li.image()->neverUnload());
1356 }
1357 });
1358
1359 // add to list of functions to call about future loads
1360 withNotifiersLock(^{
1361 _loadNotifiers2.push_back(func);
1362 });
1363 }
1364
1365
1366 void AllImages::addBulkLoadNotifier(BulkLoadNotifier func)
1367 {
1368 // callback about already loaded images
1369 unsigned count = (unsigned)_loadedImages.count();
1370 const mach_header* mhs[count];
1371 const char* paths[count];
1372 for (unsigned i=0; i < count; ++i) {
1373 mhs[i] = _loadedImages[i].loadedAddress();
1374 paths[i] = _loadedImages[i].image()->path();
1375 }
1376 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)mhs[0], (uint64_t)func, 0);
1377 log_notifications("dyld: add notifier %p called with %d images\n", func, count);
1378 func(count, mhs, paths);
1379
1380 // add to list of functions to call about future loads
1381 withNotifiersLock(^{
1382 _loadBulkNotifiers.push_back(func);
1383 });
1384 }
1385
1386 // Returns true if logs should be sent to stderr as well as syslog.
1387 // Copied from objc which copied it from CFUtilities.c
1388 static bool also_do_stderr(void)
1389 {
1390 struct stat st;
1391 int ret = fstat(STDERR_FILENO, &st);
1392 if (ret < 0) return false;
1393 mode_t m = st.st_mode & S_IFMT;
1394 if (m == S_IFREG || m == S_IFSOCK || m == S_IFIFO || m == S_IFCHR) {
1395 return true;
1396 }
1397 return false;
1398 }
1399
1400 // Print "message" to the console. Copied from objc.
1401 static void _objc_syslog(const char *message)
1402 {
1403 _simple_asl_log(ASL_LEVEL_ERR, NULL, message);
1404
1405 if (also_do_stderr()) {
1406 write(STDERR_FILENO, message, strlen(message));
1407 }
1408 }
1409
1410 void AllImages::setObjCNotifiers(_dyld_objc_notify_mapped map, _dyld_objc_notify_init init, _dyld_objc_notify_unmapped unmap)
1411 {
1412 _objcNotifyMapped = map;
1413 _objcNotifyInit = init;
1414 _objcNotifyUnmapped = unmap;
1415
1416 // We couldn't initialize the objc optimized closure data in init() as that needs malloc but runs before malloc initializes.
1417 // So lets grab the data now and set it up
1418
1419 // Pull out the objc selector hash table if we have one
1420 Array<closure::Image::ObjCSelectorImage> selectorImageNums;
1421 const closure::ObjCSelectorOpt* selectorHashTable = nullptr;
1422 if (_mainClosure->selectorHashTable(selectorImageNums, selectorHashTable)) {
1423 _objcSelectorHashTable = selectorHashTable;
1424 for (closure::Image::ObjCSelectorImage selectorImage : selectorImageNums) {
1425 LoadedImage loadedImage;
1426 bool found = findImageNum(selectorImage.imageNum, loadedImage);
1427 assert(found);
1428 _objcSelectorHashTableImages.push_back( (uintptr_t)loadedImage.loadedAddress() + selectorImage.offset );
1429 }
1430 }
1431
1432 // Pull out the objc class hash table if we have one
1433 Array<closure::Image::ObjCClassImage> classImageNums;
1434 const closure::ObjCClassOpt* classHashTable = nullptr;
1435 const closure::ObjCClassOpt* protocolHashTable = nullptr;
1436 if (_mainClosure->classAndProtocolHashTables(classImageNums, classHashTable, protocolHashTable)) {
1437 _objcClassHashTable = (const closure::ObjCClassOpt*)classHashTable;
1438 _objcProtocolHashTable = (const closure::ObjCClassOpt*)protocolHashTable;
1439 for (closure::Image::ObjCClassImage classImage : classImageNums) {
1440 LoadedImage loadedImage;
1441 bool found = findImageNum(classImage.imageNum, loadedImage);
1442 assert(found);
1443 uintptr_t loadAddress = (uintptr_t)loadedImage.loadedAddress();
1444 uintptr_t nameBaseAddress = loadAddress + classImage.offsetOfClassNames;
1445 uintptr_t dataBaseAddress = loadAddress + classImage.offsetOfClasses;
1446 _objcClassHashTableImages.push_back({ nameBaseAddress, dataBaseAddress });
1447 }
1448 }
1449
1450 _mainClosure->duplicateClassesHashTable(_objcClassDuplicatesHashTable);
1451 if ( _objcClassDuplicatesHashTable != nullptr ) {
1452 // If we have duplicates, the those need the objc opt pointer to find dupes
1453 _dyldCacheObjCOpt = _dyldCacheAddress->objcOpt();
1454 }
1455
1456 // ObjC would have issued warnings on duplicate classes. We've recorded those too
1457 _mainClosure->forEachWarning(closure::Closure::Warning::duplicateObjCClass, ^(const char *warning, bool &stop) {
1458 Diagnostics diag;
1459 diag.error("objc[%d]: %s\n", getpid(), warning);
1460 _objc_syslog(diag.errorMessage());
1461 });
1462
1463 // callback about already loaded images
1464 uint32_t maxCount = count();
1465 STACK_ALLOC_ARRAY(const mach_header*, mhs, maxCount);
1466 STACK_ALLOC_ARRAY(const char*, paths, maxCount);
1467 // don't need _mutex here because this is called when process is still single threaded
1468 for (const LoadedImage& li : _loadedImages) {
1469 if ( li.image()->hasObjC() ) {
1470 paths.push_back(imagePath(li.image()));
1471 mhs.push_back(li.loadedAddress());
1472 }
1473 }
1474 if ( !mhs.empty() ) {
1475 (*map)((uint32_t)mhs.count(), &paths[0], &mhs[0]);
1476 if ( log_notifications("dyld: objc-mapped-notifier called with %ld images:\n", mhs.count()) ) {
1477 for (uintptr_t i=0; i < mhs.count(); ++i) {
1478 log_notifications("dyld: objc-mapped: %p %s\n", mhs[i], paths[i]);
1479 }
1480 }
1481 }
1482 }
1483
1484 void AllImages::applyInterposingToDyldCache(const closure::Closure* closure)
1485 {
1486 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_APPLY_INTERPOSING, 0, 0, 0);
1487 const uintptr_t cacheStart = (uintptr_t)_dyldCacheAddress;
1488 __block closure::ImageNum lastCachedDylibImageNum = 0;
1489 __block const closure::Image* lastCachedDylibImage = nullptr;
1490 __block bool suspendedAccounting = false;
1491 closure->forEachPatchEntry(^(const closure::Closure::PatchEntry& entry) {
1492 if ( entry.overriddenDylibInCache != lastCachedDylibImageNum ) {
1493 lastCachedDylibImage = closure::ImageArray::findImage(imagesArrays(), entry.overriddenDylibInCache);
1494 assert(lastCachedDylibImage != nullptr);
1495 lastCachedDylibImageNum = entry.overriddenDylibInCache;
1496 }
1497 if ( !suspendedAccounting ) {
1498 Loader::vmAccountingSetSuspended(true, log_fixups);
1499 suspendedAccounting = true;
1500 }
1501 uintptr_t newValue = 0;
1502 LoadedImage foundImage;
1503 switch ( entry.replacement.image.kind ) {
1504 case closure::Image::ResolvedSymbolTarget::kindImage:
1505 if ( !findImageNum(entry.replacement.image.imageNum, foundImage) ) {
1506 abort_report_np("cannot find replacement imageNum=0x%04X when patching cache to override imageNum=0x%04X\n", entry.replacement.image.imageNum, entry.overriddenDylibInCache);
1507 }
1508 newValue = (uintptr_t)(foundImage.loadedAddress()) + (uintptr_t)entry.replacement.image.offset;
1509 break;
1510 case closure::Image::ResolvedSymbolTarget::kindSharedCache:
1511 newValue = (uintptr_t)_dyldCacheAddress + (uintptr_t)entry.replacement.sharedCache.offset;
1512 break;
1513 case closure::Image::ResolvedSymbolTarget::kindAbsolute:
1514 // this means the symbol was missing in the cache override dylib, so set any uses to NULL
1515 newValue = (uintptr_t)entry.replacement.absolute.value;
1516 break;
1517 default:
1518 assert(0 && "bad replacement kind");
1519 }
1520 uint32_t lastCachedDylibImageIndex = lastCachedDylibImageNum - (uint32_t)_dyldCacheAddress->cachedDylibsImageArray()->startImageNum();
1521 _dyldCacheAddress->forEachPatchableUseOfExport(lastCachedDylibImageIndex,
1522 entry.exportCacheOffset, ^(dyld_cache_patchable_location patchLocation) {
1523 uintptr_t* loc = (uintptr_t*)(cacheStart+patchLocation.cacheOffset);
1524 #if __has_feature(ptrauth_calls)
1525 if ( patchLocation.authenticated ) {
1526 MachOLoaded::ChainedFixupPointerOnDisk fixupInfo;
1527 fixupInfo.arm64e.authRebase.auth = true;
1528 fixupInfo.arm64e.authRebase.addrDiv = patchLocation.usesAddressDiversity;
1529 fixupInfo.arm64e.authRebase.diversity = patchLocation.discriminator;
1530 fixupInfo.arm64e.authRebase.key = patchLocation.key;
1531 *loc = fixupInfo.arm64e.signPointer(loc, newValue + DyldSharedCache::getAddend(patchLocation));
1532 log_fixups("dyld: cache fixup: *%p = %p (JOP: diversity 0x%04X, addr-div=%d, key=%s)\n",
1533 loc, (void*)*loc, patchLocation.discriminator, patchLocation.usesAddressDiversity, DyldSharedCache::keyName(patchLocation));
1534 return;
1535 }
1536 #endif
1537 log_fixups("dyld: cache fixup: *%p = 0x%0lX (dyld cache patch)\n", loc, newValue + (uintptr_t)DyldSharedCache::getAddend(patchLocation));
1538 *loc = newValue + (uintptr_t)DyldSharedCache::getAddend(patchLocation);
1539 });
1540 });
1541 if ( suspendedAccounting )
1542 Loader::vmAccountingSetSuspended(false, log_fixups);
1543 }
1544
1545 void AllImages::runStartupInitialzers()
1546 {
1547 __block bool mainExecutableInitializerNeedsToRun = true;
1548 __block uint32_t imageIndex = 0;
1549 while ( mainExecutableInitializerNeedsToRun ) {
1550 __block const closure::Image* image = nullptr;
1551 withReadLock(^{
1552 image = _loadedImages[imageIndex].image();
1553 if ( _loadedImages[imageIndex].loadedAddress()->isMainExecutable() )
1554 mainExecutableInitializerNeedsToRun = false;
1555 });
1556 runInitialzersBottomUp(image);
1557 ++imageIndex;
1558 }
1559 }
1560
1561
1562 // Find image in _loadedImages which has ImageNum == num.
1563 // Try indexHint first, if hint is wrong, updated it, so next use is faster.
1564 LoadedImage AllImages::findImageNum(closure::ImageNum num, uint32_t& indexHint)
1565 {
1566 __block LoadedImage copy;
1567 withReadLock(^{
1568 if ( (indexHint >= _loadedImages.count()) || !_loadedImages[indexHint].image()->representsImageNum(num) ) {
1569 indexHint = 0;
1570 for (indexHint=0; indexHint < _loadedImages.count(); ++indexHint) {
1571 if ( _loadedImages[indexHint].image()->representsImageNum(num) )
1572 break;
1573 }
1574 assert(indexHint < _loadedImages.count());
1575 }
1576 copy = _loadedImages[indexHint];
1577 });
1578 return copy;
1579 }
1580
1581
1582 // Change the state of the LoadedImage in _loadedImages which has ImageNum == num.
1583 // Only change state if current state is expectedCurrentState (atomic swap).
1584 bool AllImages::swapImageState(closure::ImageNum num, uint32_t& indexHint, LoadedImage::State expectedCurrentState, LoadedImage::State newState)
1585 {
1586 __block bool result = false;
1587 withWriteLock(^{
1588 if ( (indexHint >= _loadedImages.count()) || !_loadedImages[indexHint].image()->representsImageNum(num) ) {
1589 indexHint = 0;
1590 for (indexHint=0; indexHint < _loadedImages.count(); ++indexHint) {
1591 if ( _loadedImages[indexHint].image()->representsImageNum(num) )
1592 break;
1593 }
1594 assert(indexHint < _loadedImages.count());
1595 }
1596 if ( _loadedImages[indexHint].state() == expectedCurrentState ) {
1597 _loadedImages[indexHint].setState(newState);
1598 result = true;
1599 }
1600 });
1601 return result;
1602 }
1603
1604 // dyld3 pre-builds the order initializers need to be run (bottom up) in a list in the closure.
1605 // This method uses that list to run all initializers.
1606 // Because an initializer may call dlopen() and/or create threads, the _loadedImages array
1607 // may move under us. So, never keep a pointer into it. Always reference images by ImageNum
1608 // and use hint to make that faster in the case where the _loadedImages does not move.
1609 void AllImages::runInitialzersBottomUp(const closure::Image* topImage)
1610 {
1611 // walk closure specified initializer list, already ordered bottom up
1612 topImage->forEachImageToInitBefore(^(closure::ImageNum imageToInit, bool& stop) {
1613 // get copy of LoadedImage about imageToInit, but don't keep reference into _loadedImages, because it may move if initialzers call dlopen()
1614 uint32_t indexHint = 0;
1615 LoadedImage loadedImageCopy = findImageNum(imageToInit, indexHint);
1616 // skip if the image is already inited, or in process of being inited (dependency cycle)
1617 if ( (loadedImageCopy.state() == LoadedImage::State::fixedUp) && swapImageState(imageToInit, indexHint, LoadedImage::State::fixedUp, LoadedImage::State::beingInited) ) {
1618 // tell objc to run any +load methods in image
1619 if ( (_objcNotifyInit != nullptr) && loadedImageCopy.image()->mayHavePlusLoads() ) {
1620 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_OBJC_INIT, (uint64_t)loadedImageCopy.loadedAddress(), 0, 0);
1621 const char* path = imagePath(loadedImageCopy.image());
1622 log_notifications("dyld: objc-init-notifier called with mh=%p, path=%s\n", loadedImageCopy.loadedAddress(), path);
1623 (*_objcNotifyInit)(path, loadedImageCopy.loadedAddress());
1624 }
1625
1626 // run all initializers in image
1627 runAllInitializersInImage(loadedImageCopy.image(), loadedImageCopy.loadedAddress());
1628
1629 // advance state to inited
1630 swapImageState(imageToInit, indexHint, LoadedImage::State::beingInited, LoadedImage::State::inited);
1631 }
1632 });
1633 }
1634
1635 void AllImages::runLibSystemInitializer(LoadedImage& libSystem)
1636 {
1637 // First set the libSystem state to beingInited. This protects against accidentally trying
1638 // to run its initializers again if a dlopen happens insie libSystem_initializer().
1639 libSystem.setState(LoadedImage::State::beingInited);
1640
1641 // run all initializers in libSystem.dylib
1642 // Note: during libSystem's initialization, libdyld_initializer() is called which copies _initialImages to _loadedImages
1643 runAllInitializersInImage(libSystem.image(), libSystem.loadedAddress());
1644
1645 // update global flags that libsystem has been initialized (so debug tools know it is safe to inject threads)
1646 _oldAllImageInfos->libSystemInitialized = true;
1647
1648 // mark libSystem.dylib as being inited, so later recursive-init would re-run it
1649 for (LoadedImage& li : _loadedImages) {
1650 if ( li.loadedAddress() == libSystem.loadedAddress() ) {
1651 li.setState(LoadedImage::State::inited);
1652 break;
1653 }
1654 }
1655 // now that libSystem is up, register a callback that should be called at exit
1656 __cxa_atexit(&AllImages::runAllStaticTerminatorsHelper, nullptr, nullptr);
1657 }
1658
1659 void AllImages::runAllStaticTerminatorsHelper(void*)
1660 {
1661 gAllImages.runAllStaticTerminators();
1662 }
1663
1664 void AllImages::runAllInitializersInImage(const closure::Image* image, const MachOLoaded* ml)
1665 {
1666 image->forEachInitializer(ml, ^(const void* func) {
1667 Initializer initFunc = (Initializer)func;
1668 #if __has_feature(ptrauth_calls)
1669 initFunc = (Initializer)__builtin_ptrauth_sign_unauthenticated((void*)initFunc, 0, 0);
1670 #endif
1671 {
1672 ScopedTimer(DBG_DYLD_TIMING_STATIC_INITIALIZER, (uint64_t)ml, (uint64_t)func, 0);
1673 initFunc(NXArgc, NXArgv, environ, appleParams, _programVars);
1674
1675 }
1676 log_initializers("dyld: called initialzer %p in %s\n", initFunc, image->path());
1677 });
1678 }
1679
1680 // Note this is noinline to avoid having too much stack used if loadImage has to call due to an invalid closure
1681 __attribute__((noinline))
1682 const MachOLoaded* AllImages::dlopen(Diagnostics& diag, const char* path, bool rtldNoLoad, bool rtldLocal,
1683 bool rtldNoDelete, bool rtldNow, bool fromOFI, const void* callerAddress,
1684 bool canUsePrebuiltSharedCacheClosure)
1685 {
1686 bool sharedCacheFormatCompatible = (_dyldCacheAddress != nullptr) && (_dyldCacheAddress->header.formatVersion == dyld3::closure::kFormatVersion);
1687 canUsePrebuiltSharedCacheClosure &= sharedCacheFormatCompatible;
1688
1689 // quick check if path is in shared cache and already loaded
1690 if ( _dyldCacheAddress != nullptr ) {
1691 uint32_t dyldCacheImageIndex;
1692 if ( _dyldCacheAddress->hasImagePath(path, dyldCacheImageIndex) ) {
1693 uint64_t mTime;
1694 uint64_t inode;
1695 const MachOLoaded* mh = (MachOLoaded*)_dyldCacheAddress->getIndexedImageEntry(dyldCacheImageIndex, mTime, inode);
1696 // Note: we do not need readLock because this is within global dlopen lock
1697 for (const LoadedImage& li : _loadedImages) {
1698 if ( li.loadedAddress() == mh ) {
1699 return mh;
1700 }
1701 }
1702
1703 // If this is a customer cache, and we have no overrides, then we know for sure the cache closure is valid
1704 // This assumes that a libdispatch root would have been loaded on launch, and that root path is not
1705 // supported with customer caches, which is the case today.
1706 if ( !rtldNoLoad && !hasInsertedOrInterposingLibraries() &&
1707 (_dyldCacheAddress->header.cacheType == kDyldSharedCacheTypeProduction) &&
1708 sharedCacheFormatCompatible ) {
1709 const dyld3::closure::ImageArray* images = _dyldCacheAddress->cachedDylibsImageArray();
1710 const dyld3::closure::Image* image = images->imageForNum(dyldCacheImageIndex+1);
1711 return loadImage(diag, path, image->imageNum(), nullptr, rtldLocal, rtldNoDelete, rtldNow, fromOFI, callerAddress);
1712 }
1713 }
1714 }
1715
1716 __block closure::ImageNum callerImageNum = 0;
1717 for (const LoadedImage& li : _loadedImages) {
1718 uint8_t permissions;
1719 if ( (callerImageNum == 0) && li.image()->containsAddress(callerAddress, li.loadedAddress(), &permissions) ) {
1720 callerImageNum = li.image()->imageNum();
1721 }
1722 //fprintf(stderr, "mh=%p, image=%p, imageNum=0x%04X, path=%s\n", li.loadedAddress(), li.image(), li.image()->imageNum(), li.image()->path());
1723 }
1724
1725 // make closure
1726 closure::ImageNum topImageNum = 0;
1727 const closure::DlopenClosure* newClosure = nullptr;
1728
1729 // First try with closures from the shared cache permitted.
1730 // Then try again with forcing a new closure
1731 for (bool canUseSharedCacheClosure : { true, false }) {
1732 // We can only use a shared cache closure if the shared cache format is the same as libdyld.
1733 canUseSharedCacheClosure &= canUsePrebuiltSharedCacheClosure;
1734 closure::FileSystemPhysical fileSystem(nullptr, nullptr, _allowEnvPaths);
1735 RootsChecker rootsChecker;
1736 closure::ClosureBuilder::AtPath atPathHanding = (_allowAtPaths ? closure::ClosureBuilder::AtPath::all : closure::ClosureBuilder::AtPath::onlyInRPaths);
1737 closure::ClosureBuilder cb(_nextImageNum, fileSystem, rootsChecker, _dyldCacheAddress, true, *_archs, closure::gPathOverrides, atPathHanding, true, nullptr, (dyld3::Platform)platform());
1738 newClosure = cb.makeDlopenClosure(path, _mainClosure, _loadedImages.array(), callerImageNum, rtldNoLoad, rtldNow, canUseSharedCacheClosure, &topImageNum);
1739 if ( newClosure == closure::ClosureBuilder::sRetryDlopenClosure ) {
1740 log_apis(" dlopen: closure builder needs to retry: %s\n", path);
1741 assert(canUseSharedCacheClosure);
1742 continue;
1743 }
1744 if ( (newClosure == nullptr) && (topImageNum == 0) ) {
1745 if ( cb.diagnostics().hasError())
1746 diag.error("%s", cb.diagnostics().errorMessage());
1747 else if ( !rtldNoLoad )
1748 diag.error("dlopen(): file not found: %s", path);
1749 return nullptr;
1750 }
1751 // save off next available ImageNum for use by next call to dlopen()
1752 _nextImageNum = cb.nextFreeImageNum();
1753 break;
1754 }
1755
1756 if ( newClosure != nullptr ) {
1757 // if new closure contains an ImageArray, add it to list
1758 if ( const closure::ImageArray* newArray = newClosure->images() ) {
1759 appendToImagesArray(newArray);
1760 }
1761 log_apis(" dlopen: made %s closure: %p\n", newClosure->topImage()->variantString(), newClosure);
1762 }
1763
1764 // if already loaded, just bump refCount and return
1765 if ( (newClosure == nullptr) && (topImageNum != 0) ) {
1766 for (LoadedImage& li : _loadedImages) {
1767 if ( li.image()->imageNum() == topImageNum ) {
1768 // is already loaded
1769 const MachOLoaded* topLoadAddress = li.loadedAddress();
1770 if ( !li.image()->inDyldCache() )
1771 incRefCount(topLoadAddress);
1772 log_apis(" dlopen: already loaded as '%s'\n", li.image()->path());
1773 // if previously opened with RTLD_LOCAL, but now opened with RTLD_GLOBAL, unhide it
1774 if ( !rtldLocal && li.hideFromFlatSearch() )
1775 li.setHideFromFlatSearch(false);
1776 // if called with RTLD_NODELETE, mark it as never-unload
1777 if ( rtldNoDelete )
1778 li.markLeaveMapped();
1779
1780 // If we haven't run the initializers then we must be in a static init in a dlopen
1781 if ( li.state() != LoadedImage::State::inited ) {
1782 // RTLD_NOLOAD means dlopen should fail unless path is already loaded.
1783 // don't run initializers when RTLD_NOLOAD is set. This only matters if dlopen() is
1784 // called from within an initializer because it can cause initializers to run
1785 // out of order. Most uses of RTLD_NOLOAD are "probes". If they want initialzers
1786 // to run, then don't use RTLD_NOLOAD.
1787 if (!rtldNoLoad) {
1788 runInitialzersBottomUp(li.image());
1789 }
1790 }
1791
1792 return topLoadAddress;
1793 }
1794 }
1795 }
1796
1797 return loadImage(diag, path, topImageNum, newClosure, rtldLocal, rtldNoDelete, rtldNow, fromOFI, callerAddress);
1798 }
1799
1800 // Note this is noinline to avoid having too much stack used in the parent
1801 // dlopen method
1802 __attribute__((noinline))
1803 const MachOLoaded* AllImages::loadImage(Diagnostics& diag, const char* path,
1804 closure::ImageNum topImageNum, const closure::DlopenClosure* newClosure,
1805 bool rtldLocal, bool rtldNoDelete, bool rtldNow, bool fromOFI,
1806 const void* callerAddress) {
1807 // Note this array is used as the storage to Loader so needs to be at least
1808 // large enough to handle whatever total number of images we need to do the dlopen
1809 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(LoadedImage, newImages, 1024);
1810
1811 // Note we don't need pre-optimized Objective-C for dlopen closures, but use
1812 // variables here to make it easier to see whats going on.
1813 const dyld3::closure::ObjCSelectorOpt* selectorOpt = nullptr;
1814 dyld3::Array<dyld3::closure::Image::ObjCSelectorImage> selectorImages;
1815
1816 // run loader to load all new images
1817 RootsChecker rootsChecker;
1818 Loader loader(_loadedImages.array(), newImages, _dyldCacheAddress, imagesArrays(),
1819 selectorOpt, selectorImages, rootsChecker, (dyld3::Platform)platform(),
1820 &dyld3::log_loads, &dyld3::log_segments, &dyld3::log_fixups, &dyld3::log_dofs, !rtldNow);
1821
1822 // find Image* for top image, look in new closure first
1823 const closure::Image* topImage = nullptr;
1824 if ( newClosure != nullptr )
1825 topImage = newClosure->images()->imageForNum(topImageNum);
1826 if ( topImage == nullptr )
1827 topImage = closure::ImageArray::findImage(imagesArrays(), topImageNum);
1828 if ( newClosure == nullptr ) {
1829 if ( topImageNum < dyld3::closure::kLastDyldCacheImageNum )
1830 log_apis(" dlopen: using pre-built %s dlopen closure from dyld shared cache %p\n", topImage->variantString(), topImage);
1831 else
1832 log_apis(" dlopen: using pre-built %s dlopen closure %p\n", topImage->variantString(), topImage);
1833 }
1834 LoadedImage topLoadedImage = LoadedImage::make(topImage);
1835 if ( rtldLocal && !topImage->inDyldCache() )
1836 topLoadedImage.setHideFromFlatSearch(true);
1837 if ( rtldNoDelete && !topImage->inDyldCache() )
1838 topLoadedImage.markLeaveMapped();
1839 loader.addImage(topLoadedImage);
1840
1841
1842 // recursively load all dependents and fill in allImages array
1843 bool someCacheImageOverridden = false;
1844 loader.completeAllDependents(diag, someCacheImageOverridden);
1845 if ( diag.hasError() )
1846 return nullptr;
1847 bool closureOutOfDate;
1848 bool recoverable;
1849 loader.mapAndFixupAllImages(diag, _processDOFs, fromOFI, &closureOutOfDate, &recoverable);
1850 if ( diag.hasError() ) {
1851 // If we used a pre-built shared cache closure, and now found that it was out of date,
1852 // try again and rebuild a new closure
1853 // Note, newClosure is null in the case where we used a prebuilt closure
1854 if ( closureOutOfDate && recoverable && (newClosure == nullptr) ) {
1855 diag.clearError();
1856 return dlopen(diag, path, false /* rtldNoLoad */, rtldLocal, rtldNoDelete, rtldNow, fromOFI, callerAddress, false);
1857 }
1858 return nullptr;
1859 }
1860
1861 // Record if we had a root
1862 _someImageOverridden |= someCacheImageOverridden;
1863
1864 const MachOLoaded* topLoadAddress = newImages.begin()->loadedAddress();
1865
1866 // bump dlopen refcount of image directly loaded
1867 if ( !topImage->inDyldCache() )
1868 incRefCount(topLoadAddress);
1869
1870 // tell gAllImages about new images
1871 addImages(newImages);
1872
1873 // Run notifiers before applyInterposingToDyldCache() as then we have an
1874 // accurate image list before any calls to findImage().
1875 // TODO: Can we move this even earlier, eg, after map images but before fixups?
1876 runImageNotifiers(newImages);
1877
1878 // if closure adds images that override dyld cache, patch cache
1879 if ( newClosure != nullptr )
1880 applyInterposingToDyldCache(newClosure);
1881
1882 runImageCallbacks(newImages);
1883
1884 // run initializers
1885 runInitialzersBottomUp(topImage);
1886
1887 return topLoadAddress;
1888 }
1889
1890 void AllImages::appendToImagesArray(const closure::ImageArray* newArray)
1891 {
1892 _imagesArrays.push_back(newArray);
1893 }
1894
1895 const Array<const closure::ImageArray*>& AllImages::imagesArrays()
1896 {
1897 return _imagesArrays.array();
1898 }
1899
1900 bool AllImages::isRestricted() const
1901 {
1902 return !_allowEnvPaths;
1903 }
1904
1905 bool AllImages::hasInsertedOrInterposingLibraries() const
1906 {
1907 return _mainClosure->hasInsertedLibraries() || _mainClosure->hasInterposings();
1908 }
1909
1910 void AllImages::takeLockBeforeFork() {
1911 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
1912 os_unfair_recursive_lock_lock(&_globalLock);
1913 #endif
1914 }
1915
1916 void AllImages::releaseLockInForkParent() {
1917 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
1918 os_unfair_recursive_lock_unlock(&_globalLock);
1919 #endif
1920 }
1921
1922 void AllImages::resetLockInForkChild() {
1923 #if TARGET_OS_SIMULATOR
1924
1925 // There's no dyld3 on the simulator this year
1926 assert(false);
1927
1928 #else
1929
1930 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
1931 os_unfair_recursive_lock_unlock_forked_child(&_globalLock);
1932 #endif
1933
1934 #endif // TARGET_OS_SIMULATOR
1935 }
1936
1937 const char* AllImages::getObjCSelector(const char *selName) const {
1938 if ( _objcSelectorHashTable == nullptr )
1939 return nullptr;
1940 return _objcSelectorHashTable->getString(selName, _objcSelectorHashTableImages.array());
1941 }
1942
1943 void AllImages::forEachObjCClass(const char* className,
1944 void (^callback)(void* classPtr, bool isLoaded, bool* stop)) const {
1945 if ( _objcClassHashTable == nullptr )
1946 return;
1947 // There may be a duplicate in the shared cache. If that is the case, return it first
1948 if ( _objcClassDuplicatesHashTable != nullptr ) {
1949 void* classImpl = nullptr;
1950 if ( _objcClassDuplicatesHashTable->getClassLocation(className, _dyldCacheObjCOpt, classImpl) ) {
1951 bool stop = false;
1952 callback(classImpl, true, &stop);
1953 if (stop)
1954 return;
1955 }
1956 }
1957 _objcClassHashTable->forEachClass(className, _objcClassHashTableImages.array(), callback);
1958 }
1959
1960 void AllImages::forEachObjCProtocol(const char* protocolName,
1961 void (^callback)(void* protocolPtr, bool isLoaded, bool* stop)) const {
1962 if ( _objcProtocolHashTable == nullptr )
1963 return;
1964 _objcProtocolHashTable->forEachClass(protocolName, _objcClassHashTableImages.array(), callback);
1965 }
1966
1967
1968 } // namespace dyld3
1969
1970
1971
1972
1973
1974