dyld-851.27.tar.gz
[apple/dyld.git] / dyld3 / AllImages.cpp
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24
25 #include <stdint.h>
26 #include <fcntl.h>
27 #include <sys/types.h>
28 #include <sys/stat.h>
29 #include <sys/sysctl.h>
30 #include <mach/mach_time.h> // mach_absolute_time()
31 #include <libkern/OSAtomic.h>
32 #include <uuid/uuid.h>
33 #include <mach-o/dyld_images.h>
34 #include <libc_private.h>
35
36 #include <vector>
37 #include <algorithm>
38
39 #include "AllImages.h"
40 #include "libdyldEntryVector.h"
41 #include "Logging.h"
42 #include "Loading.h"
43 #include "Tracing.h"
44 #include "DyldSharedCache.h"
45 #include "PathOverrides.h"
46 #include "Closure.h"
47 #include "ClosureBuilder.h"
48 #include "ClosureFileSystemPhysical.h"
49 #include "RootsChecker.h"
50
51 #include "objc-shared-cache.h"
52
53 extern const char** appleParams;
54
55 // should be a header for these
56 struct __cxa_range_t {
57 const void* addr;
58 size_t length;
59 };
60 extern "C" void __cxa_finalize_ranges(const __cxa_range_t ranges[], unsigned int count);
61
62 extern "C" int __cxa_atexit(void (*func)(void *), void* arg, void* dso);
63
64
65 VIS_HIDDEN void* __ptrauth_dyld_address_auth gUseDyld3 = nullptr;
66
67
68 namespace dyld3 {
69
70
71
72 ///////////////////// AllImages ////////////////////////////
73
74
75 AllImages gAllImages;
76
77
78
79 void AllImages::init(const closure::LaunchClosure* closure, const DyldSharedCache* dyldCacheLoadAddress, const char* dyldCachePath,
80 const Array<LoadedImage>& initialImages)
81 {
82 _mainClosure = closure;
83 _initialImages = &initialImages;
84 _dyldCacheAddress = dyldCacheLoadAddress;
85 _dyldCachePath = dyldCachePath;
86
87 if ( _dyldCacheAddress ) {
88 _dyldCacheSlide = (uint64_t)dyldCacheLoadAddress - dyldCacheLoadAddress->unslidLoadAddress();
89 _imagesArrays.push_back(dyldCacheLoadAddress->cachedDylibsImageArray());
90 if ( auto others = dyldCacheLoadAddress->otherOSImageArray() )
91 _imagesArrays.push_back(others);
92 }
93 _imagesArrays.push_back(_mainClosure->images());
94
95 // record first ImageNum to do use for dlopen() calls
96 _mainClosure->images()->forEachImage(^(const dyld3::closure::Image* image, bool& stop) {
97 closure::ImageNum num = image->imageNum();
98 if ( num >= _nextImageNum )
99 _nextImageNum = num+1;
100 });
101
102 // Make temporary old image array, so libSystem initializers can be debugged
103 STACK_ALLOC_ARRAY(dyld_image_info, oldDyldInfo, initialImages.count());
104 for (const LoadedImage& li : initialImages) {
105 oldDyldInfo.push_back({li.loadedAddress(), li.image()->path(), 0});
106 }
107 _oldAllImageInfos->infoArray = &oldDyldInfo[0];
108 _oldAllImageInfos->infoArrayCount = (uint32_t)oldDyldInfo.count();
109 _oldAllImageInfos->notification(dyld_image_adding, _oldAllImageInfos->infoArrayCount, _oldAllImageInfos->infoArray);
110 _oldAllImageInfos->infoArray = nullptr;
111 _oldAllImageInfos->infoArrayCount = 0;
112
113 _processDOFs = Loader::dtraceUserProbesEnabled();
114 }
115
116 void AllImages::setProgramVars(ProgramVars* vars, bool keysOff, bool osBinariesOnly)
117 {
118 _programVars = vars;
119 _archs = &GradedArchs::forCurrentOS(keysOff, osBinariesOnly);
120 }
121
122 void AllImages::setLaunchMode(uint32_t flags)
123 {
124 _launchMode = flags;
125 }
126
127 AllImages::MainFunc AllImages::getDriverkitMain()
128 {
129 return _driverkitMain;
130 }
131
132 void AllImages::setDriverkitMain(MainFunc mainFunc)
133 {
134 _driverkitMain = mainFunc;
135 }
136
137 void AllImages::setRestrictions(bool allowAtPaths, bool allowEnvPaths)
138 {
139 _allowAtPaths = allowAtPaths;
140 _allowEnvPaths = allowEnvPaths;
141 }
142
143 void AllImages::setHasCacheOverrides(bool someCacheImageOverriden)
144 {
145 _someImageOverridden = someCacheImageOverriden;
146 }
147
148 bool AllImages::hasCacheOverrides() const {
149 return _someImageOverridden;
150 }
151
152 void AllImages::applyInitialImages()
153 {
154 addImages(*_initialImages);
155 runImageNotifiers(*_initialImages);
156 runImageCallbacks(*_initialImages);
157 _initialImages = nullptr; // this was stack allocated
158 }
159
160 void AllImages::withReadLock(void (^work)()) const
161 {
162 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
163 os_unfair_recursive_lock_lock(&_globalLock);
164 work();
165 os_unfair_recursive_lock_unlock(&_globalLock);
166 #else
167 pthread_mutex_lock(&_globalLock);
168 work();
169 pthread_mutex_unlock(&_globalLock);
170 #endif
171 }
172
173 void AllImages::withWriteLock(void (^work)())
174 {
175 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
176 os_unfair_recursive_lock_lock(&_globalLock);
177 work();
178 os_unfair_recursive_lock_unlock(&_globalLock);
179 #else
180 pthread_mutex_lock(&_globalLock);
181 work();
182 pthread_mutex_unlock(&_globalLock);
183 #endif
184 }
185
186 void AllImages::withNotifiersLock(void (^work)()) const
187 {
188 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
189 os_unfair_recursive_lock_lock(&_globalLock);
190 work();
191 os_unfair_recursive_lock_unlock(&_globalLock);
192 #else
193 pthread_mutex_lock(&_globalLock);
194 work();
195 pthread_mutex_unlock(&_globalLock);
196 #endif
197 }
198
199 void AllImages::mirrorToOldAllImageInfos()
200 {
201 withReadLock(^(){
202 // set infoArray to NULL to denote it is in-use
203 _oldAllImageInfos->infoArray = nullptr;
204
205 // if array not large enough, re-alloc it
206 uint32_t imageCount = (uint32_t)_loadedImages.count();
207 if ( _oldArrayAllocCount < imageCount ) {
208 uint32_t newAllocCount = imageCount + 16;
209 dyld_image_info* newArray = (dyld_image_info*)::malloc(sizeof(dyld_image_info)*newAllocCount);
210 if ( _oldAllImageArray != nullptr ) {
211 ::memcpy(newArray, _oldAllImageArray, sizeof(dyld_image_info)*_oldAllImageInfos->infoArrayCount);
212 ::free(_oldAllImageArray);
213 }
214 _oldAllImageArray = newArray;
215 _oldArrayAllocCount = newAllocCount;
216 }
217
218 // fill out array to mirror current image list
219 int index = 0;
220 for (const LoadedImage& li : _loadedImages) {
221 _oldAllImageArray[index].imageLoadAddress = li.loadedAddress();
222 _oldAllImageArray[index].imageFilePath = imagePath(li.image());
223 _oldAllImageArray[index].imageFileModDate = 0;
224 ++index;
225 }
226
227 // set infoArray back to base address of array (so other process can now read)
228 _oldAllImageInfos->infoArrayCount = imageCount;
229 _oldAllImageInfos->infoArrayChangeTimestamp = mach_absolute_time();
230 _oldAllImageInfos->infoArray = _oldAllImageArray;
231
232 // <radr://problem/42668846> update UUID array if needed
233 uint32_t nonCachedCount = 1; // always add dyld
234 for (const LoadedImage& li : _loadedImages) {
235 if ( _oldAllImageInfos->processDetachedFromSharedRegion || !li.loadedAddress()->inDyldCache())
236 ++nonCachedCount;
237 }
238 if ( nonCachedCount != _oldAllImageInfos->uuidArrayCount ) {
239 // set infoArray to NULL to denote it is in-use
240 _oldAllImageInfos->uuidArray = nullptr;
241 // make sure allocation can hold all uuids
242 if ( _oldUUIDAllocCount < nonCachedCount ) {
243 uint32_t newAllocCount = (nonCachedCount + 3) & (-4); // round up to multiple of 4
244 dyld_uuid_info* newArray = (dyld_uuid_info*)::malloc(sizeof(dyld_uuid_info)*newAllocCount);
245 if ( _oldUUIDArray != nullptr )
246 ::free(_oldUUIDArray);
247 _oldUUIDArray = newArray;
248 _oldUUIDAllocCount = newAllocCount;
249 }
250 // add dyld then all images not in dyld cache
251 const MachOFile* dyldMF = (MachOFile*)_oldAllImageInfos->dyldImageLoadAddress;
252 _oldUUIDArray[0].imageLoadAddress = dyldMF;
253 dyldMF->getUuid(_oldUUIDArray[0].imageUUID);
254 index = 1;
255 for (const LoadedImage& li : _loadedImages) {
256 if ( _oldAllImageInfos->processDetachedFromSharedRegion || !li.loadedAddress()->inDyldCache() ) {
257 _oldUUIDArray[index].imageLoadAddress = li.loadedAddress();
258 li.loadedAddress()->getUuid(_oldUUIDArray[index].imageUUID);
259 ++index;
260 }
261 }
262 // set uuidArray back to base address of array (so kernel can now read)
263 _oldAllImageInfos->uuidArray = _oldUUIDArray;
264 _oldAllImageInfos->uuidArrayCount = nonCachedCount;
265 }
266 });
267 }
268
269 void AllImages::addImages(const Array<LoadedImage>& newImages)
270 {
271 // copy into _loadedImages
272 withWriteLock(^(){
273 _loadedImages.append(newImages);
274 });
275 }
276
277 void AllImages::addImmutableRange(uintptr_t start, uintptr_t end)
278 {
279 //fprintf(stderr, "AllImages::addImmutableRange(0x%09lX, 0x%09lX)\n", start, end);
280 // first look in existing range buckets for empty slot
281 ImmutableRanges* lastRange = nullptr;
282 for (ImmutableRanges* ranges = &_immutableRanges; ranges != nullptr; ranges = ranges->next.load(std::memory_order_acquire)) {
283 lastRange = ranges;
284 for (uintptr_t i=0; i < ranges->arraySize; ++i) {
285 if ( ranges->array[i].start.load(std::memory_order_acquire) == 0 ) {
286 // set 'end' before 'start' so readers always see consistent state
287 ranges->array[i].end.store(end, std::memory_order_release);
288 ranges->array[i].start.store(start, std::memory_order_release);
289 return;
290 }
291 }
292 }
293 // if we got here, there are no empty slots, so add new ImmutableRanges
294 const uintptr_t newSize = 15; // allocation is 256 bytes on 64-bit processes
295 ImmutableRanges* newRange = (ImmutableRanges*)calloc(offsetof(ImmutableRanges,array[newSize]), 1);
296 newRange->arraySize = newSize;
297 newRange->array[0].end.store(end, std::memory_order_release);
298 newRange->array[0].start.store(start, std::memory_order_release);
299 // tie into previous list last
300 lastRange->next.store(newRange, std::memory_order_release);
301 }
302
303 void AllImages::runImageNotifiers(const Array<LoadedImage>& newImages)
304 {
305 uint32_t count = (uint32_t)newImages.count();
306 assert(count != 0);
307
308 if ( _oldAllImageInfos != nullptr ) {
309 // sync to old all image infos struct
310 mirrorToOldAllImageInfos();
311
312 // tell debugger about new images
313 dyld_image_info oldDyldInfo[count];
314 for (uint32_t i=0; i < count; ++i) {
315 oldDyldInfo[i].imageLoadAddress = newImages[i].loadedAddress();
316 oldDyldInfo[i].imageFilePath = imagePath(newImages[i].image());
317 oldDyldInfo[i].imageFileModDate = 0;
318 }
319 _oldAllImageInfos->notification(dyld_image_adding, count, oldDyldInfo);
320 }
321
322 // if any image not in the shared cache added, recompute bounds
323 for (const LoadedImage& li : newImages) {
324 if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
325 recomputeBounds();
326 break;
327 }
328 }
329
330 // update immutable ranges
331 for (const LoadedImage& li : newImages) {
332 if ( !li.image()->inDyldCache() && li.image()->neverUnload() ) {
333 uintptr_t baseAddr = (uintptr_t)li.loadedAddress();
334 li.image()->forEachDiskSegment(^(uint32_t segIndex, uint32_t fileOffset, uint32_t fileSize, int64_t vmOffset, uint64_t vmSize, uint8_t permissions, bool laterReadOnly, bool &stop) {
335 if ( (permissions & (VM_PROT_READ|VM_PROT_WRITE)) == VM_PROT_READ ) {
336 addImmutableRange(baseAddr + (uintptr_t)vmOffset, (uintptr_t)(baseAddr + vmOffset + vmSize));
337 }
338 });
339 }
340 }
341
342 // log loads
343 for (const LoadedImage& li : newImages) {
344 const char *path = imagePath(li.image());
345 uuid_t imageUUID;
346 if ( li.image()->getUuid(imageUUID)) {
347 uuid_string_t imageUUIDStr;
348 uuid_unparse_upper(imageUUID, imageUUIDStr);
349 log_loads("dyld: <%s> %s\n", imageUUIDStr, path);
350 }
351 else {
352 log_loads("dyld: %s\n", path);
353 }
354 }
355
356 // call kdebug trace for each image
357 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD, DBG_DYLD_UUID, DBG_DYLD_UUID_MAP_A))) {
358 for (const LoadedImage& li : newImages) {
359 const closure::Image* image = li.image();
360 struct stat stat_buf;
361 const char *path = imagePath(image);
362 uuid_t uuid;
363 image->getUuid(uuid);
364 fsid_t fsid = {{ 0, 0 }};
365 fsobj_id_t fsobjid = { 0, 0 };
366 if ( !li.loadedAddress()->inDyldCache() && (dyld3::stat(path, &stat_buf) == 0) ) {
367 fsobjid = *(fsobj_id_t*)&stat_buf.st_ino;
368 fsid = {{ stat_buf.st_dev, 0 }};
369 }
370 kdebug_trace_dyld_image(DBG_DYLD_UUID_MAP_A, path, &uuid, fsobjid, fsid, li.loadedAddress());
371 }
372 }
373 }
374
375 void AllImages::runImageCallbacks(const Array<LoadedImage>& newImages)
376 {
377 uint32_t count = (uint32_t)newImages.count();
378 assert(count != 0);
379
380 // call each _dyld_register_func_for_add_image function with each image
381 withNotifiersLock(^{
382 for (NotifyFunc func : _loadNotifiers) {
383 for (const LoadedImage& li : newImages) {
384 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
385 log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
386 if ( li.image()->inDyldCache() )
387 func(li.loadedAddress(), (uintptr_t)_dyldCacheSlide);
388 else
389 func(li.loadedAddress(), li.loadedAddress()->getSlide());
390 }
391 }
392 for (LoadNotifyFunc func : _loadNotifiers2) {
393 for (const LoadedImage& li : newImages) {
394 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
395 log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
396 if ( li.image()->inDyldCache() )
397 func(li.loadedAddress(), li.image()->path(), false);
398 else
399 func(li.loadedAddress(), li.image()->path(), !li.image()->neverUnload());
400 }
401 }
402 for (BulkLoadNotifier func : _loadBulkNotifiers) {
403 const mach_header* mhs[count];
404 const char* paths[count];
405 for (unsigned i=0; i < count; ++i) {
406 mhs[i] = newImages[i].loadedAddress();
407 paths[i] = newImages[i].image()->path();
408 }
409 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)mhs[0], (uint64_t)func, 0);
410 log_notifications("dyld: add notifier %p called with %d images\n", func, count);
411 func(count, mhs, paths);
412 }
413 });
414
415 // call objc about images that use objc
416 if ( _objcNotifyMapped != nullptr ) {
417 const char* pathsBuffer[count];
418 const mach_header* mhBuffer[count];
419 uint32_t imagesWithObjC = 0;
420 for (const LoadedImage& li : newImages) {
421 const closure::Image* image = li.image();
422 if ( image->hasObjC() ) {
423 pathsBuffer[imagesWithObjC] = imagePath(image);
424 mhBuffer[imagesWithObjC] = li.loadedAddress();
425 ++imagesWithObjC;
426 }
427 }
428 if ( imagesWithObjC != 0 ) {
429 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_OBJC_MAP, 0, 0, 0);
430 (*_objcNotifyMapped)(imagesWithObjC, pathsBuffer, mhBuffer);
431 if ( log_notifications("dyld: objc-mapped-notifier called with %d images:\n", imagesWithObjC) ) {
432 for (uint32_t i=0; i < imagesWithObjC; ++i) {
433 log_notifications("dyld: objc-mapped: %p %s\n", mhBuffer[i], pathsBuffer[i]);
434 }
435 }
436 }
437 }
438
439 #if !TARGET_OS_DRIVERKIT
440 // FIXME: This may make more sense in runImageCallbacks, but the present order
441 // is after callbacks. Can we safely move it?
442 // notify any processes tracking loads in this process
443 notifyMonitorLoads(newImages);
444 #endif
445 }
446
447 void AllImages::removeImages(const Array<LoadedImage>& unloadImages)
448 {
449 // call each _dyld_register_func_for_remove_image function with each image
450 withNotifiersLock(^{
451 for (NotifyFunc func : _unloadNotifiers) {
452 for (const LoadedImage& li : unloadImages) {
453 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_REMOVE_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
454 log_notifications("dyld: remove notifier %p called with mh=%p\n", func, li.loadedAddress());
455 if ( li.image()->inDyldCache() )
456 func(li.loadedAddress(), (uintptr_t)_dyldCacheSlide);
457 else
458 func(li.loadedAddress(), li.loadedAddress()->getSlide());
459 }
460 }
461 });
462
463 // call objc about images going away
464 if ( _objcNotifyUnmapped != nullptr ) {
465 for (const LoadedImage& li : unloadImages) {
466 if ( li.image()->hasObjC() ) {
467 (*_objcNotifyUnmapped)(imagePath(li.image()), li.loadedAddress());
468 log_notifications("dyld: objc-unmapped-notifier called with image %p %s\n", li.loadedAddress(), imagePath(li.image()));
469 }
470 }
471 }
472
473 // call kdebug trace for each image
474 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD, DBG_DYLD_UUID, DBG_DYLD_UUID_MAP_A))) {
475 for (const LoadedImage& li : unloadImages) {
476 const closure::Image* image = li.image();
477 struct stat stat_buf;
478 const char *path = imagePath(image);
479 uuid_t uuid;
480 image->getUuid(uuid);
481 fsid_t fsid = {{ 0, 0 }};
482 fsobj_id_t fsobjid = { 0, 0 };
483 if ( dyld3::stat(path, &stat_buf) == 0 ) {
484 fsobjid = *(fsobj_id_t*)&stat_buf.st_ino;
485 fsid = {{ stat_buf.st_dev, 0 }};
486 }
487 kdebug_trace_dyld_image(DBG_DYLD_UUID_UNMAP_A, path, &uuid, fsobjid, fsid, li.loadedAddress());
488 }
489 }
490
491 // remove each from _loadedImages
492 withWriteLock(^(){
493 for (const LoadedImage& uli : unloadImages) {
494 for (LoadedImage& li : _loadedImages) {
495 if ( uli.loadedAddress() == li.loadedAddress() ) {
496 _loadedImages.erase(li);
497 break;
498 }
499 }
500 }
501 recomputeBounds();
502 });
503
504 // sync to old all image infos struct
505 mirrorToOldAllImageInfos();
506
507 // tell debugger about removed images
508 STACK_ALLOC_ARRAY(dyld_image_info, oldDyldInfo, unloadImages.count());
509 for (const LoadedImage& li : unloadImages) {
510 oldDyldInfo.push_back({li.loadedAddress(), li.image()->path(), 0});
511 }
512 _oldAllImageInfos->notification(dyld_image_removing, (uint32_t)oldDyldInfo.count(), &oldDyldInfo[0]);
513
514 // notify any processes tracking loads in this process
515 notifyMonitorUnloads(unloadImages);
516
517 // finally, unmap images
518 for (const LoadedImage& li : unloadImages) {
519 if ( li.leaveMapped() ) {
520 log_loads("dyld: unloaded but left mmapped %s\n", imagePath(li.image()));
521 }
522 else {
523 // unmapImage() modifies parameter, so use copy
524 LoadedImage copy = li;
525 Loader::unmapImage(copy);
526 log_loads("dyld: unloaded %s\n", imagePath(li.image()));
527 }
528 }
529 }
530
531 // must be called with writeLock held
532 void AllImages::recomputeBounds()
533 {
534 _lowestNonCached = UINTPTR_MAX;
535 _highestNonCached = 0;
536 for (const LoadedImage& li : _loadedImages) {
537 const MachOLoaded* ml = li.loadedAddress();
538 uintptr_t start = (uintptr_t)ml;
539 if ( !((MachOAnalyzer*)ml)->inDyldCache() ) {
540 if ( start < _lowestNonCached )
541 _lowestNonCached = start;
542 uintptr_t end = start + (uintptr_t)(li.image()->vmSizeToMap());
543 if ( end > _highestNonCached )
544 _highestNonCached = end;
545 }
546 }
547 }
548
549 uint32_t AllImages::count() const
550 {
551 return (uint32_t)_loadedImages.count();
552 }
553
554 bool AllImages::dyldCacheHasPath(const char* path) const
555 {
556 uint32_t dyldCacheImageIndex;
557 if ( _dyldCacheAddress != nullptr )
558 return _dyldCacheAddress->hasImagePath(path, dyldCacheImageIndex);
559 return false;
560 }
561
562 const char* AllImages::imagePathByIndex(uint32_t index) const
563 {
564 __block const char* result = nullptr;
565 withReadLock(^{
566 if ( index < _loadedImages.count() ) {
567 result = imagePath(_loadedImages[index].image());
568 return;
569 }
570 });
571 return result;
572 }
573
574 const mach_header* AllImages::imageLoadAddressByIndex(uint32_t index) const
575 {
576 __block const mach_header* result = nullptr;
577 withReadLock(^{
578 if ( index < _loadedImages.count() ) {
579 result = _loadedImages[index].loadedAddress();
580 return;
581 }
582 });
583 return result;
584 }
585
586 bool AllImages::findImage(const mach_header* loadAddress, LoadedImage& foundImage) const
587 {
588 __block bool result = false;
589 withReadLock(^(){
590 for (const LoadedImage& li : _loadedImages) {
591 if ( li.loadedAddress() == loadAddress ) {
592 foundImage = li;
593 result = true;
594 break;
595 }
596 }
597 });
598 return result;
599 }
600
601 void AllImages::forEachImage(void (^handler)(const LoadedImage& loadedImage, bool& stop)) const
602 {
603 if ( _initialImages != nullptr ) {
604 // being called during libSystem initialization, so _loadedImages not allocated yet
605 bool stop = false;
606 for (const LoadedImage& li : *_initialImages) {
607 handler(li, stop);
608 if ( stop )
609 break;
610 }
611 return;
612 }
613
614 withReadLock(^{
615 bool stop = false;
616 for (const LoadedImage& li : _loadedImages) {
617 handler(li, stop);
618 if ( stop )
619 break;
620 }
621 });
622 }
623
624
625 const char* AllImages::pathForImageMappedAt(const void* addr) const
626 {
627 if ( _initialImages != nullptr ) {
628 // being called during libSystem initialization, so _loadedImages not allocated yet
629 for (const LoadedImage& li : *_initialImages) {
630 uint8_t permissions;
631 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
632 return li.image()->path();
633 }
634 }
635 return nullptr;
636 }
637
638 // if address is in cache, do fast search of TEXT segments in cache
639 __block const char* result = nullptr;
640 if ( (_dyldCacheAddress != nullptr) && (addr > _dyldCacheAddress) ) {
641 if ( addr < (void*)((uint8_t*)_dyldCacheAddress+_dyldCacheAddress->mappedSize()) ) {
642 uint64_t cacheSlide = (uint64_t)_dyldCacheAddress - _dyldCacheAddress->unslidLoadAddress();
643 uint64_t unslidTargetAddr = (uint64_t)addr - cacheSlide;
644 _dyldCacheAddress->forEachImageTextSegment(^(uint64_t loadAddressUnslid, uint64_t textSegmentSize, const unsigned char* dylibUUID, const char* installName, bool& stop) {
645 if ( (loadAddressUnslid <= unslidTargetAddr) && (unslidTargetAddr < loadAddressUnslid+textSegmentSize) ) {
646 result = installName;
647 stop = true;
648 }
649 });
650 if ( result != nullptr )
651 return result;
652 }
653 }
654
655 // slow path - search image list
656 infoForImageMappedAt(addr, ^(const LoadedImage& foundImage, uint8_t permissions) {
657 result = foundImage.image()->path();
658 });
659
660 return result;
661 }
662
663 void AllImages::infoForImageMappedAt(const void* addr, void (^handler)(const LoadedImage& foundImage, uint8_t permissions)) const
664 {
665 __block uint8_t permissions;
666 if ( _initialImages != nullptr ) {
667 // being called during libSystem initialization, so _loadedImages not allocated yet
668 for (const LoadedImage& li : *_initialImages) {
669 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
670 handler(li, permissions);
671 break;
672 }
673 }
674 return;
675 }
676
677 withReadLock(^{
678 for (const LoadedImage& li : _loadedImages) {
679 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
680 handler(li, permissions);
681 break;
682 }
683 }
684 });
685 }
686
687
688 bool AllImages::infoForImageMappedAt(const void* addr, const MachOLoaded** ml, uint64_t* textSize, const char** path) const
689 {
690 if ( _initialImages != nullptr ) {
691 // being called during libSystem initialization, so _loadedImages not allocated yet
692 for (const LoadedImage& li : *_initialImages) {
693 uint8_t permissions;
694 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
695 if ( ml != nullptr )
696 *ml = li.loadedAddress();
697 if ( path != nullptr )
698 *path = li.image()->path();
699 if ( textSize != nullptr ) {
700 *textSize = li.image()->textSize();
701 }
702 return true;
703 }
704 }
705 return false;
706 }
707
708 // if address is in cache, do fast search of TEXT segments in cache
709 __block bool result = false;
710 if ( (_dyldCacheAddress != nullptr) && (addr > _dyldCacheAddress) ) {
711 if ( addr < (void*)((uint8_t*)_dyldCacheAddress+_dyldCacheAddress->mappedSize()) ) {
712 uint64_t cacheSlide = (uint64_t)_dyldCacheAddress - _dyldCacheAddress->unslidLoadAddress();
713 uint64_t unslidTargetAddr = (uint64_t)addr - cacheSlide;
714 _dyldCacheAddress->forEachImageTextSegment(^(uint64_t loadAddressUnslid, uint64_t textSegmentSize, const unsigned char* dylibUUID, const char* installName, bool& stop) {
715 if ( (loadAddressUnslid <= unslidTargetAddr) && (unslidTargetAddr < loadAddressUnslid+textSegmentSize) ) {
716 if ( ml != nullptr )
717 *ml = (MachOLoaded*)(loadAddressUnslid + cacheSlide);
718 if ( path != nullptr )
719 *path = installName;
720 if ( textSize != nullptr )
721 *textSize = textSegmentSize;
722 stop = true;
723 result = true;
724 }
725 });
726 if ( result )
727 return result;
728 // in shared cache, but not in a TEXT segment, do slow search of all loaded cache images
729 withReadLock(^{
730 for (const LoadedImage& li : _loadedImages) {
731 if ( ((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
732 uint8_t permissions;
733 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
734 if ( ml != nullptr )
735 *ml = li.loadedAddress();
736 if ( path != nullptr )
737 *path = li.image()->path();
738 if ( textSize != nullptr )
739 *textSize = li.image()->textSize();
740 result = true;
741 break;
742 }
743 }
744 }
745 });
746 return result;
747 }
748 }
749
750 // address not in dyld cache, check each non-cache image
751 infoForNonCachedImageMappedAt(addr, ^(const LoadedImage& foundImage, uint8_t permissions) {
752 if ( ml != nullptr )
753 *ml = foundImage.loadedAddress();
754 if ( path != nullptr )
755 *path = foundImage.image()->path();
756 if ( textSize != nullptr )
757 *textSize = foundImage.image()->textSize();
758 result = true;
759 });
760
761 return result;
762 }
763
764 // same as infoForImageMappedAt(), but only look at images not in the dyld cache
765 void AllImages::infoForNonCachedImageMappedAt(const void* addr, void (^handler)(const LoadedImage& foundImage, uint8_t permissions)) const
766 {
767 __block uint8_t permissions;
768 if ( _initialImages != nullptr ) {
769 // being called during libSystem initialization, so _loadedImages not allocated yet
770 for (const LoadedImage& li : *_initialImages) {
771 if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
772 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
773 handler(li, permissions);
774 break;
775 }
776 }
777 }
778 return;
779 }
780
781 withReadLock(^{
782 for (const LoadedImage& li : _loadedImages) {
783 if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
784 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
785 handler(li, permissions);
786 break;
787 }
788 }
789 }
790 });
791 }
792
793 bool AllImages::immutableMemory(const void* addr, size_t length) const
794 {
795 // check to see if in shared cache
796 if ( _dyldCacheAddress != nullptr ) {
797 bool readOnly;
798 if ( _dyldCacheAddress->inCache(addr, length, readOnly) ) {
799 return readOnly;
800 }
801 }
802
803 // check to see if it is outside the range of any loaded image
804 if ( ((uintptr_t)addr < _lowestNonCached) || ((uintptr_t)addr+length > _highestNonCached) ) {
805 return false;
806 }
807
808 // check immutable ranges
809 for (const ImmutableRanges* ranges = &_immutableRanges; ranges != nullptr; ranges = ranges->next.load(std::memory_order_acquire)) {
810 for (uintptr_t i=0; i < ranges->arraySize; ++i) {
811 if ( ranges->array[i].start.load(std::memory_order_acquire) == 0 )
812 break; // no more entries in use
813 if ( (ranges->array[i].start.load(std::memory_order_acquire) <= (uintptr_t)addr)
814 && (ranges->array[i].end.load(std::memory_order_acquire) > ((uintptr_t)addr)+length) )
815 return true;
816 }
817 }
818
819 return false;
820 }
821
822
823 uintptr_t AllImages::resolveTarget(closure::Image::ResolvedSymbolTarget target) const
824 {
825 switch ( target.sharedCache.kind ) {
826 case closure::Image::ResolvedSymbolTarget::kindSharedCache:
827 assert(_dyldCacheAddress != nullptr);
828 return (uintptr_t)_dyldCacheAddress + (uintptr_t)target.sharedCache.offset;
829
830 case closure::Image::ResolvedSymbolTarget::kindImage: {
831 LoadedImage info;
832 bool foundImage = findImageNum(target.image.imageNum, info);
833 assert(foundImage);
834 return (uintptr_t)(info.loadedAddress()) + (uintptr_t)target.image.offset;
835 }
836
837 case closure::Image::ResolvedSymbolTarget::kindAbsolute:
838 if ( target.absolute.value & (1ULL << 62) )
839 return (uintptr_t)(target.absolute.value | 0xC000000000000000ULL);
840 else
841 return (uintptr_t)target.absolute.value;
842 }
843 assert(0 && "malformed ResolvedSymbolTarget");
844 return 0;
845 }
846
847 void* AllImages::interposeValue(void *value) const {
848 if ( !_mainClosure->hasInterposings() )
849 return value;
850
851 __block void* replacementValue = nullptr;
852 __block bool foundReplacement = false;
853 _mainClosure->forEachInterposingTuple(^(const closure::InterposingTuple& tuple, bool& stop) {
854 void* stockPointer = (void*)resolveTarget(tuple.stockImplementation);
855 if ( stockPointer == value) {
856 replacementValue = (void*)resolveTarget(tuple.newImplementation);
857 foundReplacement = true;
858 stop = true;
859 }
860 });
861
862 if ( foundReplacement )
863 return replacementValue;
864
865 return value;
866 }
867
868 void AllImages::infoForImageWithLoadAddress(const MachOLoaded* mh, void (^handler)(const LoadedImage& foundImage)) const
869 {
870 withReadLock(^{
871 for (const LoadedImage& li : _loadedImages) {
872 if ( li.loadedAddress() == mh ) {
873 handler(li);
874 break;
875 }
876 }
877 });
878 }
879
880 bool AllImages::findImageNum(closure::ImageNum imageNum, LoadedImage& foundImage) const
881 {
882 if ( _initialImages != nullptr ) {
883 // being called during libSystem initialization, so _loadedImages not allocated yet
884 for (const LoadedImage& li : *_initialImages) {
885 if ( li.image()->representsImageNum(imageNum) ) {
886 foundImage = li;
887 return true;
888 }
889 }
890 return false;
891 }
892
893 bool result = false;
894 for (const LoadedImage& li : _loadedImages) {
895 if ( li.image()->representsImageNum(imageNum) ) {
896 foundImage = li;
897 result = true;
898 break;
899 }
900 }
901
902 return result;
903 }
904
905 const MachOLoaded* AllImages::findDependent(const MachOLoaded* mh, uint32_t depIndex)
906 {
907 __block const MachOLoaded* result = nullptr;
908 withReadLock(^{
909 for (const LoadedImage& li : _loadedImages) {
910 if ( li.loadedAddress() == mh ) {
911 closure::ImageNum depImageNum = li.image()->dependentImageNum(depIndex);
912 LoadedImage depLi;
913 if ( findImageNum(depImageNum, depLi) )
914 result = depLi.loadedAddress();
915 break;
916 }
917 }
918 });
919 return result;
920 }
921
922
923 void AllImages::breadthFirstRecurseDependents(Array<closure::ImageNum>& visited, const LoadedImage& nodeLi, bool& stopped, void (^handler)(const LoadedImage& aLoadedImage, bool& stop)) const
924 {
925 // call handler on all direct dependents (unless already visited)
926 STACK_ALLOC_ARRAY(LoadedImage, dependentsToRecurse, 256);
927 nodeLi.image()->forEachDependentImage(^(uint32_t depIndex, closure::Image::LinkKind kind, closure::ImageNum depImageNum, bool& depStop) {
928 if ( kind == closure::Image::LinkKind::upward )
929 return;
930 if ( visited.contains(depImageNum) )
931 return;
932 LoadedImage depLi;
933 if ( !findImageNum(depImageNum, depLi) )
934 return;
935 handler(depLi, depStop);
936 // <rdar://58466613> if there is an override of some dyld cache dylib, we need to store the override ImageNum in the visited set
937 if ( depImageNum != depLi.image()->imageNum() ) {
938 depImageNum = depLi.image()->imageNum();
939 if ( visited.contains(depImageNum) )
940 return;
941 }
942 visited.push_back(depImageNum);
943 if ( depStop ) {
944 stopped = true;
945 return;
946 }
947 dependentsToRecurse.push_back(depLi);
948 });
949 if ( stopped )
950 return;
951 // recurse on all dependents just visited
952 for (LoadedImage& depLi : dependentsToRecurse) {
953 breadthFirstRecurseDependents(visited, depLi, stopped, handler);
954 }
955 }
956
957 void AllImages::visitDependentsTopDown(const LoadedImage& start, void (^handler)(const LoadedImage& aLoadedImage, bool& stop)) const
958 {
959 withReadLock(^{
960 STACK_ALLOC_ARRAY(closure::ImageNum, visited, count());
961 bool stop = false;
962 handler(start, stop);
963 if ( stop )
964 return;
965 visited.push_back(start.image()->imageNum());
966 breadthFirstRecurseDependents(visited, start, stop, handler);
967 });
968 }
969
970 const MachOLoaded* AllImages::mainExecutable() const
971 {
972 assert(_programVars != nullptr);
973 return (const MachOLoaded*)_programVars->mh;
974 }
975
976 const closure::Image* AllImages::mainExecutableImage() const
977 {
978 assert(_mainClosure != nullptr);
979 return _mainClosure->images()->imageForNum(_mainClosure->topImageNum());
980 }
981
982 void AllImages::setMainPath(const char* path )
983 {
984 _mainExeOverridePath = path;
985 }
986
987 const char* AllImages::imagePath(const closure::Image* image) const
988 {
989 #if TARGET_OS_IPHONE
990 // on iOS and watchOS, apps may be moved on device after closure built
991 if ( _mainExeOverridePath != nullptr ) {
992 if ( image == mainExecutableImage() )
993 return _mainExeOverridePath;
994 }
995 #endif
996 return image->path();
997 }
998
999 dyld_platform_t AllImages::platform() const {
1000 return (dyld_platform_t)oldAllImageInfo()->platform;
1001 }
1002
1003 const GradedArchs& AllImages::archs() const
1004 {
1005 return *_archs;
1006 }
1007
1008 void AllImages::incRefCount(const mach_header* loadAddress)
1009 {
1010 for (DlopenCount& entry : _dlopenRefCounts) {
1011 if ( entry.loadAddress == loadAddress ) {
1012 // found existing DlopenCount entry, bump counter
1013 entry.refCount += 1;
1014 return;
1015 }
1016 }
1017
1018 // no existing DlopenCount, add new one
1019 _dlopenRefCounts.push_back({ loadAddress, 1 });
1020 }
1021
1022 void AllImages::decRefCount(const mach_header* loadAddress)
1023 {
1024 bool doCollect = false;
1025 for (DlopenCount& entry : _dlopenRefCounts) {
1026 if ( entry.loadAddress == loadAddress ) {
1027 // found existing DlopenCount entry, bump counter
1028 entry.refCount -= 1;
1029 if ( entry.refCount == 0 ) {
1030 _dlopenRefCounts.erase(entry);
1031 doCollect = true;
1032 break;
1033 }
1034 return;
1035 }
1036 }
1037 if ( doCollect )
1038 garbageCollectImages();
1039 }
1040
1041
1042 #if TARGET_OS_OSX
1043 NSObjectFileImage AllImages::addNSObjectFileImage(const OFIInfo& image)
1044 {
1045 __block uint64_t imageNum = 0;
1046 withWriteLock(^{
1047 imageNum = ++_nextObjectFileImageNum;
1048 _objectFileImages.push_back(image);
1049 _objectFileImages.back().imageNum = imageNum;
1050 });
1051 return (NSObjectFileImage)imageNum;
1052 }
1053
1054 bool AllImages::forNSObjectFileImage(NSObjectFileImage imageHandle,
1055 void (^handler)(OFIInfo& image)) {
1056 uint64_t imageNum = (uint64_t)imageHandle;
1057 bool __block foundImage = false;
1058 withReadLock(^{
1059 for (OFIInfo& ofi : _objectFileImages) {
1060 if ( ofi.imageNum == imageNum ) {
1061 handler(ofi);
1062 foundImage = true;
1063 return;
1064 }
1065 }
1066 });
1067
1068 return foundImage;
1069 }
1070
1071 void AllImages::removeNSObjectFileImage(NSObjectFileImage imageHandle)
1072 {
1073 uint64_t imageNum = (uint64_t)imageHandle;
1074 withWriteLock(^{
1075 for (OFIInfo& ofi : _objectFileImages) {
1076 if ( ofi.imageNum == imageNum ) {
1077 _objectFileImages.erase(ofi);
1078 return;
1079 }
1080 }
1081 });
1082 }
1083 #endif
1084
1085
1086 class VIS_HIDDEN Reaper
1087 {
1088 public:
1089 struct ImageAndUse
1090 {
1091 const LoadedImage* li;
1092 bool inUse;
1093 };
1094 Reaper(Array<ImageAndUse>& unloadables, AllImages*);
1095 void garbageCollect();
1096 void finalizeDeadImages();
1097
1098 static void runTerminators(const LoadedImage& li);
1099 private:
1100
1101 void markDirectlyDlopenedImagesAsUsed();
1102 void markDependentOfInUseImages();
1103 void markDependentsOf(const LoadedImage*);
1104 uint32_t inUseCount();
1105 void dump(const char* msg);
1106
1107 Array<ImageAndUse>& _unloadables;
1108 AllImages* _allImages;
1109 uint32_t _deadCount;
1110 };
1111
1112 Reaper::Reaper(Array<ImageAndUse>& unloadables, AllImages* all)
1113 : _unloadables(unloadables), _allImages(all), _deadCount(0)
1114 {
1115 }
1116
1117 void Reaper::markDirectlyDlopenedImagesAsUsed()
1118 {
1119 for (AllImages::DlopenCount& entry : _allImages->_dlopenRefCounts) {
1120 if ( entry.refCount != 0 ) {
1121 for (ImageAndUse& iu : _unloadables) {
1122 if ( iu.li->loadedAddress() == entry.loadAddress ) {
1123 iu.inUse = true;
1124 break;
1125 }
1126 }
1127 }
1128 }
1129 }
1130
1131 uint32_t Reaper::inUseCount()
1132 {
1133 uint32_t count = 0;
1134 for (ImageAndUse& iu : _unloadables) {
1135 if ( iu.inUse )
1136 ++count;
1137 }
1138 return count;
1139 }
1140
1141 void Reaper::markDependentsOf(const LoadedImage* li)
1142 {
1143 li->image()->forEachDependentImage(^(uint32_t depIndex, closure::Image::LinkKind kind, closure::ImageNum depImageNum, bool& stop) {
1144 for (ImageAndUse& iu : _unloadables) {
1145 if ( !iu.inUse && iu.li->image()->representsImageNum(depImageNum) ) {
1146 iu.inUse = true;
1147 break;
1148 }
1149 }
1150 });
1151 }
1152
1153 void Reaper::markDependentOfInUseImages()
1154 {
1155 for (ImageAndUse& iu : _unloadables) {
1156 if ( iu.inUse )
1157 markDependentsOf(iu.li);
1158 }
1159 }
1160
1161 void Reaper::dump(const char* msg)
1162 {
1163 //log("%s:\n", msg);
1164 //for (ImageAndUse& iu : _unloadables) {
1165 // log(" in-used=%d %s\n", iu.inUse, iu.li->image()->path());
1166 //}
1167 }
1168
1169 void Reaper::garbageCollect()
1170 {
1171 //dump("all unloadable images");
1172
1173 // mark all dylibs directly dlopen'ed as in use
1174 markDirectlyDlopenedImagesAsUsed();
1175
1176 //dump("directly dlopen()'ed marked");
1177
1178 // iteratively mark dependents of in-use dylibs as in-use until in-use count stops changing
1179 uint32_t lastCount = inUseCount();
1180 bool countChanged = false;
1181 do {
1182 markDependentOfInUseImages();
1183 //dump("dependents marked");
1184 uint32_t newCount = inUseCount();
1185 countChanged = (newCount != lastCount);
1186 lastCount = newCount;
1187 } while (countChanged);
1188
1189 _deadCount = (uint32_t)_unloadables.count() - inUseCount();
1190 }
1191
1192 void Reaper::finalizeDeadImages()
1193 {
1194 if ( _deadCount == 0 )
1195 return;
1196 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(__cxa_range_t, ranges, _deadCount);
1197 for (ImageAndUse& iu : _unloadables) {
1198 if ( iu.inUse )
1199 continue;
1200 runTerminators(*iu.li);
1201 iu.li->image()->forEachDiskSegment(^(uint32_t segIndex, uint32_t fileOffset, uint32_t fileSize, int64_t vmOffset, uint64_t vmSize, uint8_t permissions, bool laterReadOnly, bool &stop) {
1202 if ( permissions & VM_PROT_EXECUTE ) {
1203 __cxa_range_t range;
1204 range.addr = (char*)(iu.li->loadedAddress()) + vmOffset;
1205 range.length = (size_t)vmSize;
1206 ranges.push_back(range);
1207 }
1208 });
1209 }
1210 __cxa_finalize_ranges(ranges.begin(), (uint32_t)ranges.count());
1211 }
1212
1213 void Reaper::runTerminators(const LoadedImage& li)
1214 {
1215 // <rdar://problem/71820555> Don't run static terminator for arm64e
1216 const MachOAnalyzer* ma = (MachOAnalyzer*)li.loadedAddress();
1217 if ( ma->isArch("arm64e") )
1218 return;
1219
1220 if ( li.image()->hasTerminators() ) {
1221 typedef void (*Terminator)();
1222 li.image()->forEachTerminator(li.loadedAddress(), ^(const void* terminator) {
1223 Terminator termFunc = (Terminator)terminator;
1224 termFunc();
1225 log_initializers("dyld: called static terminator %p in %s\n", termFunc, li.image()->path());
1226 });
1227 }
1228 }
1229
1230 void AllImages::runAllStaticTerminators()
1231 {
1232 // We want to run terminators in reverse chronological order of initializing
1233 // Note: initialLoadCount may be larger than what was actually loaded
1234 const uint32_t currentCount = (uint32_t)_loadedImages.count();
1235 const uint32_t initialLoadCount = std::min(_mainClosure->initialLoadCount(), currentCount);
1236
1237 // first run static terminators of anything dlopen()ed
1238 for (uint32_t i=currentCount-1; i >= initialLoadCount; --i) {
1239 Reaper::runTerminators(_loadedImages[i]);
1240 }
1241
1242 // next run terminators of statically load images, in loader-order they were init in reverse of this
1243 for (uint32_t i=0; i < initialLoadCount; ++i) {
1244 Reaper::runTerminators(_loadedImages[i]);
1245 }
1246 }
1247
1248
1249 // This function is called at the end of dlclose() when the reference count goes to zero.
1250 // The dylib being unloaded may have brought in other dependent dylibs when it was loaded.
1251 // Those dependent dylibs need to be unloaded, but only if they are not referenced by
1252 // something else. We use a standard mark and sweep garbage collection.
1253 //
1254 // The tricky part is that when a dylib is unloaded it may have a termination function that
1255 // can run and itself call dlclose() on yet another dylib. The problem is that this
1256 // sort of gabage collection is not re-entrant. Instead a terminator's call to dlclose()
1257 // which calls garbageCollectImages() will just set a flag to re-do the garbage collection
1258 // when the current pass is done.
1259 //
1260 // Also note that this is done within the _loadedImages writer lock, so any dlopen/dlclose
1261 // on other threads are blocked while this garbage collections runs
1262 //
1263 void AllImages::garbageCollectImages()
1264 {
1265 // if some other thread is currently GC'ing images, let other thread do the work
1266 int32_t newCount = OSAtomicIncrement32(&_gcCount);
1267 if ( newCount != 1 )
1268 return;
1269
1270 do {
1271 STACK_ALLOC_ARRAY(Reaper::ImageAndUse, unloadables, _loadedImages.count());
1272 withReadLock(^{
1273 for (const LoadedImage& li : _loadedImages) {
1274 if ( !li.image()->neverUnload() /*&& !li.neverUnload()*/ ) {
1275 unloadables.push_back({&li, false});
1276 //fprintf(stderr, "unloadable[%lu] %p %s\n", unloadables.count(), li.loadedAddress(), li.image()->path());
1277 }
1278 }
1279 });
1280 // make reaper object to do garbage collection and notifications
1281 Reaper reaper(unloadables, this);
1282 reaper.garbageCollect();
1283
1284 // FIXME: we should sort dead images so higher level ones are terminated first
1285
1286 // call cxa_finalize_ranges and static terminators of dead images
1287 reaper.finalizeDeadImages();
1288
1289 // FIXME: DOF unregister
1290
1291 //fprintf(stderr, "_loadedImages before GC removals:\n");
1292 //for (const LoadedImage& li : _loadedImages) {
1293 // fprintf(stderr, " loadAddr=%p, path=%s\n", li.loadedAddress(), li.image()->path());
1294 //}
1295
1296 // make copy of LoadedImages we want to remove
1297 // because unloadables[] points into LoadedImage we are shrinking
1298 STACK_ALLOC_ARRAY(LoadedImage, unloadImages, _loadedImages.count());
1299 for (const Reaper::ImageAndUse& iu : unloadables) {
1300 if ( !iu.inUse )
1301 unloadImages.push_back(*iu.li);
1302 }
1303 // remove entries from _loadedImages
1304 if ( !unloadImages.empty() ) {
1305 removeImages(unloadImages);
1306
1307 //fprintf(stderr, "_loadedImages after GC removals:\n");
1308 //for (const LoadedImage& li : _loadedImages) {
1309 // fprintf(stderr, " loadAddr=%p, path=%s\n", li.loadedAddress(), li.image()->path());
1310 //}
1311 }
1312
1313 // if some other thread called GC during our work, redo GC on its behalf
1314 newCount = OSAtomicDecrement32(&_gcCount);
1315 }
1316 while (newCount > 0);
1317 }
1318
1319
1320
1321 void AllImages::addLoadNotifier(NotifyFunc func)
1322 {
1323 // callback about already loaded images
1324 withReadLock(^{
1325 for (const LoadedImage& li : _loadedImages) {
1326 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
1327 log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
1328 if ( li.image()->inDyldCache() )
1329 func(li.loadedAddress(), (uintptr_t)_dyldCacheSlide);
1330 else
1331 func(li.loadedAddress(), li.loadedAddress()->getSlide());
1332 }
1333 });
1334
1335 // add to list of functions to call about future loads
1336 withNotifiersLock(^{
1337 _loadNotifiers.push_back(func);
1338 });
1339 }
1340
1341 void AllImages::addUnloadNotifier(NotifyFunc func)
1342 {
1343 // add to list of functions to call about future unloads
1344 withNotifiersLock(^{
1345 _unloadNotifiers.push_back(func);
1346 });
1347 }
1348
1349 void AllImages::addLoadNotifier(LoadNotifyFunc func)
1350 {
1351 // callback about already loaded images
1352 withReadLock(^{
1353 for (const LoadedImage& li : _loadedImages) {
1354 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
1355 log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
1356 func(li.loadedAddress(), li.image()->path(), !li.image()->neverUnload());
1357 }
1358 });
1359
1360 // add to list of functions to call about future loads
1361 withNotifiersLock(^{
1362 _loadNotifiers2.push_back(func);
1363 });
1364 }
1365
1366
1367 void AllImages::addBulkLoadNotifier(BulkLoadNotifier func)
1368 {
1369 // callback about already loaded images
1370 unsigned count = (unsigned)_loadedImages.count();
1371 const mach_header* mhs[count];
1372 const char* paths[count];
1373 for (unsigned i=0; i < count; ++i) {
1374 mhs[i] = _loadedImages[i].loadedAddress();
1375 paths[i] = _loadedImages[i].image()->path();
1376 }
1377 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)mhs[0], (uint64_t)func, 0);
1378 log_notifications("dyld: add notifier %p called with %d images\n", func, count);
1379 func(count, mhs, paths);
1380
1381 // add to list of functions to call about future loads
1382 withNotifiersLock(^{
1383 _loadBulkNotifiers.push_back(func);
1384 });
1385 }
1386
1387 // Returns true if logs should be sent to stderr as well as syslog.
1388 // Copied from objc which copied it from CFUtilities.c
1389 static bool also_do_stderr(void)
1390 {
1391 struct stat st;
1392 int ret = fstat(STDERR_FILENO, &st);
1393 if (ret < 0) return false;
1394 mode_t m = st.st_mode & S_IFMT;
1395 if (m == S_IFREG || m == S_IFSOCK || m == S_IFIFO || m == S_IFCHR) {
1396 return true;
1397 }
1398 return false;
1399 }
1400
1401 // Print "message" to the console. Copied from objc.
1402 static void _objc_syslog(const char *message)
1403 {
1404 _simple_asl_log(ASL_LEVEL_ERR, NULL, message);
1405
1406 if (also_do_stderr()) {
1407 write(STDERR_FILENO, message, strlen(message));
1408 }
1409 }
1410
1411 void AllImages::setObjCNotifiers(_dyld_objc_notify_mapped map, _dyld_objc_notify_init init, _dyld_objc_notify_unmapped unmap)
1412 {
1413 _objcNotifyMapped = map;
1414 _objcNotifyInit = init;
1415 _objcNotifyUnmapped = unmap;
1416
1417 // We couldn't initialize the objc optimized closure data in init() as that needs malloc but runs before malloc initializes.
1418 // So lets grab the data now and set it up
1419
1420 // Pull out the objc selector hash table if we have one
1421 Array<closure::Image::ObjCSelectorImage> selectorImageNums;
1422 const closure::ObjCSelectorOpt* selectorHashTable = nullptr;
1423 if (_mainClosure->selectorHashTable(selectorImageNums, selectorHashTable)) {
1424 _objcSelectorHashTable = selectorHashTable;
1425 for (closure::Image::ObjCSelectorImage selectorImage : selectorImageNums) {
1426 LoadedImage loadedImage;
1427 bool found = findImageNum(selectorImage.imageNum, loadedImage);
1428 assert(found);
1429 _objcSelectorHashTableImages.push_back( (uintptr_t)loadedImage.loadedAddress() + selectorImage.offset );
1430 }
1431 }
1432
1433 // Pull out the objc class hash table if we have one
1434 Array<closure::Image::ObjCClassImage> classImageNums;
1435 const closure::ObjCClassOpt* classHashTable = nullptr;
1436 const closure::ObjCClassOpt* protocolHashTable = nullptr;
1437 if (_mainClosure->classAndProtocolHashTables(classImageNums, classHashTable, protocolHashTable)) {
1438 _objcClassHashTable = (const closure::ObjCClassOpt*)classHashTable;
1439 _objcProtocolHashTable = (const closure::ObjCClassOpt*)protocolHashTable;
1440 for (closure::Image::ObjCClassImage classImage : classImageNums) {
1441 LoadedImage loadedImage;
1442 bool found = findImageNum(classImage.imageNum, loadedImage);
1443 assert(found);
1444 uintptr_t loadAddress = (uintptr_t)loadedImage.loadedAddress();
1445 uintptr_t nameBaseAddress = loadAddress + classImage.offsetOfClassNames;
1446 uintptr_t dataBaseAddress = loadAddress + classImage.offsetOfClasses;
1447 _objcClassHashTableImages.push_back({ nameBaseAddress, dataBaseAddress });
1448 }
1449 }
1450
1451 _mainClosure->duplicateClassesHashTable(_objcClassDuplicatesHashTable);
1452 if ( _objcClassDuplicatesHashTable != nullptr ) {
1453 // If we have duplicates, the those need the objc opt pointer to find dupes
1454 _dyldCacheObjCOpt = _dyldCacheAddress->objcOpt();
1455 }
1456
1457 // ObjC would have issued warnings on duplicate classes. We've recorded those too
1458 _mainClosure->forEachWarning(closure::Closure::Warning::duplicateObjCClass, ^(const char *warning, bool &stop) {
1459 Diagnostics diag;
1460 diag.error("objc[%d]: %s\n", getpid(), warning);
1461 _objc_syslog(diag.errorMessage());
1462 });
1463
1464 // callback about already loaded images
1465 uint32_t maxCount = count();
1466 STACK_ALLOC_ARRAY(const mach_header*, mhs, maxCount);
1467 STACK_ALLOC_ARRAY(const char*, paths, maxCount);
1468 // don't need _mutex here because this is called when process is still single threaded
1469 for (const LoadedImage& li : _loadedImages) {
1470 if ( li.image()->hasObjC() ) {
1471 paths.push_back(imagePath(li.image()));
1472 mhs.push_back(li.loadedAddress());
1473 }
1474 }
1475 if ( !mhs.empty() ) {
1476 (*map)((uint32_t)mhs.count(), &paths[0], &mhs[0]);
1477 if ( log_notifications("dyld: objc-mapped-notifier called with %ld images:\n", mhs.count()) ) {
1478 for (uintptr_t i=0; i < mhs.count(); ++i) {
1479 log_notifications("dyld: objc-mapped: %p %s\n", mhs[i], paths[i]);
1480 }
1481 }
1482 }
1483 }
1484
1485 void AllImages::applyInterposingToDyldCache(const closure::Closure* closure, mach_port_t mach_task_self)
1486 {
1487 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_APPLY_INTERPOSING, 0, 0, 0);
1488 const uintptr_t cacheStart = (uintptr_t)_dyldCacheAddress;
1489 __block closure::ImageNum lastCachedDylibImageNum = 0;
1490 __block const closure::Image* lastCachedDylibImage = nullptr;
1491 __block bool suspendedAccounting = false;
1492
1493 if ( closure->findAttributePayload(closure::TypedBytes::Type::cacheOverrides) == nullptr )
1494 return;
1495
1496 // make the cache writable for this block
1497 DyldSharedCache::DataConstScopedWriter patcher(_dyldCacheAddress, mach_task_self, (DyldSharedCache::DataConstLogFunc)&log_segments);
1498
1499 closure->forEachPatchEntry(^(const closure::Closure::PatchEntry& entry) {
1500 if ( entry.overriddenDylibInCache != lastCachedDylibImageNum ) {
1501 lastCachedDylibImage = closure::ImageArray::findImage(imagesArrays(), entry.overriddenDylibInCache);
1502 assert(lastCachedDylibImage != nullptr);
1503 lastCachedDylibImageNum = entry.overriddenDylibInCache;
1504 }
1505 if ( !suspendedAccounting ) {
1506 Loader::vmAccountingSetSuspended(true, log_fixups);
1507 suspendedAccounting = true;
1508 }
1509 uintptr_t newValue = 0;
1510 LoadedImage foundImage;
1511 switch ( entry.replacement.image.kind ) {
1512 case closure::Image::ResolvedSymbolTarget::kindImage:
1513 if ( !findImageNum(entry.replacement.image.imageNum, foundImage) ) {
1514 abort_report_np("cannot find replacement imageNum=0x%04X when patching cache to override imageNum=0x%04X\n", entry.replacement.image.imageNum, entry.overriddenDylibInCache);
1515 }
1516 newValue = (uintptr_t)(foundImage.loadedAddress()) + (uintptr_t)entry.replacement.image.offset;
1517 break;
1518 case closure::Image::ResolvedSymbolTarget::kindSharedCache:
1519 newValue = (uintptr_t)_dyldCacheAddress + (uintptr_t)entry.replacement.sharedCache.offset;
1520 break;
1521 case closure::Image::ResolvedSymbolTarget::kindAbsolute:
1522 // this means the symbol was missing in the cache override dylib, so set any uses to NULL
1523 newValue = (uintptr_t)entry.replacement.absolute.value;
1524 break;
1525 default:
1526 assert(0 && "bad replacement kind");
1527 }
1528 uint32_t lastCachedDylibImageIndex = lastCachedDylibImageNum - (uint32_t)_dyldCacheAddress->cachedDylibsImageArray()->startImageNum();
1529 _dyldCacheAddress->forEachPatchableUseOfExport(lastCachedDylibImageIndex,
1530 entry.exportCacheOffset, ^(dyld_cache_patchable_location patchLocation) {
1531 uintptr_t* loc = (uintptr_t*)(cacheStart+patchLocation.cacheOffset);
1532 #if __has_feature(ptrauth_calls)
1533 if ( patchLocation.authenticated ) {
1534 MachOLoaded::ChainedFixupPointerOnDisk fixupInfo;
1535 fixupInfo.arm64e.authRebase.auth = true;
1536 fixupInfo.arm64e.authRebase.addrDiv = patchLocation.usesAddressDiversity;
1537 fixupInfo.arm64e.authRebase.diversity = patchLocation.discriminator;
1538 fixupInfo.arm64e.authRebase.key = patchLocation.key;
1539 *loc = fixupInfo.arm64e.signPointer(loc, newValue + DyldSharedCache::getAddend(patchLocation));
1540 log_fixups("dyld: cache fixup: *%p = %p (JOP: diversity 0x%04X, addr-div=%d, key=%s)\n",
1541 loc, (void*)*loc, patchLocation.discriminator, patchLocation.usesAddressDiversity, DyldSharedCache::keyName(patchLocation));
1542 return;
1543 }
1544 #endif
1545 log_fixups("dyld: cache fixup: *%p = 0x%0lX (dyld cache patch)\n", loc, newValue + (uintptr_t)DyldSharedCache::getAddend(patchLocation));
1546 *loc = newValue + (uintptr_t)DyldSharedCache::getAddend(patchLocation);
1547 });
1548 });
1549 if ( suspendedAccounting )
1550 Loader::vmAccountingSetSuspended(false, log_fixups);
1551 }
1552
1553 void AllImages::runStartupInitialzers()
1554 {
1555 __block bool mainExecutableInitializerNeedsToRun = true;
1556 __block uint32_t imageIndex = 0;
1557 while ( mainExecutableInitializerNeedsToRun ) {
1558 __block const closure::Image* image = nullptr;
1559 withReadLock(^{
1560 image = _loadedImages[imageIndex].image();
1561 if ( _loadedImages[imageIndex].loadedAddress()->isMainExecutable() )
1562 mainExecutableInitializerNeedsToRun = false;
1563 });
1564 runInitialzersBottomUp(image);
1565 ++imageIndex;
1566 }
1567 }
1568
1569
1570 // Find image in _loadedImages which has ImageNum == num.
1571 // Try indexHint first, if hint is wrong, updated it, so next use is faster.
1572 LoadedImage AllImages::findImageNum(closure::ImageNum num, uint32_t& indexHint)
1573 {
1574 __block LoadedImage copy;
1575 withReadLock(^{
1576 if ( (indexHint >= _loadedImages.count()) || !_loadedImages[indexHint].image()->representsImageNum(num) ) {
1577 indexHint = 0;
1578 for (indexHint=0; indexHint < _loadedImages.count(); ++indexHint) {
1579 if ( _loadedImages[indexHint].image()->representsImageNum(num) )
1580 break;
1581 }
1582 assert(indexHint < _loadedImages.count());
1583 }
1584 copy = _loadedImages[indexHint];
1585 });
1586 return copy;
1587 }
1588
1589
1590 // Change the state of the LoadedImage in _loadedImages which has ImageNum == num.
1591 // Only change state if current state is expectedCurrentState (atomic swap).
1592 bool AllImages::swapImageState(closure::ImageNum num, uint32_t& indexHint, LoadedImage::State expectedCurrentState, LoadedImage::State newState)
1593 {
1594 __block bool result = false;
1595 withWriteLock(^{
1596 if ( (indexHint >= _loadedImages.count()) || !_loadedImages[indexHint].image()->representsImageNum(num) ) {
1597 indexHint = 0;
1598 for (indexHint=0; indexHint < _loadedImages.count(); ++indexHint) {
1599 if ( _loadedImages[indexHint].image()->representsImageNum(num) )
1600 break;
1601 }
1602 assert(indexHint < _loadedImages.count());
1603 }
1604 if ( _loadedImages[indexHint].state() == expectedCurrentState ) {
1605 _loadedImages[indexHint].setState(newState);
1606 result = true;
1607 }
1608 });
1609 return result;
1610 }
1611
1612 // dyld3 pre-builds the order initializers need to be run (bottom up) in a list in the closure.
1613 // This method uses that list to run all initializers.
1614 // Because an initializer may call dlopen() and/or create threads, the _loadedImages array
1615 // may move under us. So, never keep a pointer into it. Always reference images by ImageNum
1616 // and use hint to make that faster in the case where the _loadedImages does not move.
1617 void AllImages::runInitialzersBottomUp(const closure::Image* topImage)
1618 {
1619 // walk closure specified initializer list, already ordered bottom up
1620 topImage->forEachImageToInitBefore(^(closure::ImageNum imageToInit, bool& stop) {
1621 // get copy of LoadedImage about imageToInit, but don't keep reference into _loadedImages, because it may move if initialzers call dlopen()
1622 uint32_t indexHint = 0;
1623 LoadedImage loadedImageCopy = findImageNum(imageToInit, indexHint);
1624 // skip if the image is already inited, or in process of being inited (dependency cycle)
1625 if ( (loadedImageCopy.state() == LoadedImage::State::fixedUp) && swapImageState(imageToInit, indexHint, LoadedImage::State::fixedUp, LoadedImage::State::beingInited) ) {
1626 // tell objc to run any +load methods in image
1627 if ( (_objcNotifyInit != nullptr) && loadedImageCopy.image()->mayHavePlusLoads() ) {
1628 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_OBJC_INIT, (uint64_t)loadedImageCopy.loadedAddress(), 0, 0);
1629 const char* path = imagePath(loadedImageCopy.image());
1630 log_notifications("dyld: objc-init-notifier called with mh=%p, path=%s\n", loadedImageCopy.loadedAddress(), path);
1631 (*_objcNotifyInit)(path, loadedImageCopy.loadedAddress());
1632 }
1633
1634 // run all initializers in image
1635 runAllInitializersInImage(loadedImageCopy.image(), loadedImageCopy.loadedAddress());
1636
1637 // advance state to inited
1638 swapImageState(imageToInit, indexHint, LoadedImage::State::beingInited, LoadedImage::State::inited);
1639 }
1640 });
1641 }
1642
1643 void AllImages::runLibSystemInitializer(LoadedImage& libSystem)
1644 {
1645 // First set the libSystem state to beingInited. This protects against accidentally trying
1646 // to run its initializers again if a dlopen happens insie libSystem_initializer().
1647 libSystem.setState(LoadedImage::State::beingInited);
1648
1649 // run all initializers in libSystem.dylib
1650 // Note: during libSystem's initialization, libdyld_initializer() is called which copies _initialImages to _loadedImages
1651 runAllInitializersInImage(libSystem.image(), libSystem.loadedAddress());
1652
1653 // update global flags that libsystem has been initialized (so debug tools know it is safe to inject threads)
1654 _oldAllImageInfos->libSystemInitialized = true;
1655
1656 // mark libSystem.dylib as being inited, so later recursive-init would re-run it
1657 for (LoadedImage& li : _loadedImages) {
1658 if ( li.loadedAddress() == libSystem.loadedAddress() ) {
1659 li.setState(LoadedImage::State::inited);
1660 break;
1661 }
1662 }
1663 // now that libSystem is up, register a callback that should be called at exit
1664 __cxa_atexit(&AllImages::runAllStaticTerminatorsHelper, nullptr, nullptr);
1665 }
1666
1667 void AllImages::runAllStaticTerminatorsHelper(void*)
1668 {
1669 gAllImages.runAllStaticTerminators();
1670 }
1671
1672 void AllImages::runAllInitializersInImage(const closure::Image* image, const MachOLoaded* ml)
1673 {
1674 image->forEachInitializer(ml, ^(const void* func) {
1675 Initializer initFunc = (Initializer)func;
1676 #if __has_feature(ptrauth_calls)
1677 initFunc = (Initializer)__builtin_ptrauth_sign_unauthenticated((void*)initFunc, 0, 0);
1678 #endif
1679 {
1680 ScopedTimer(DBG_DYLD_TIMING_STATIC_INITIALIZER, (uint64_t)ml, (uint64_t)func, 0);
1681 initFunc(NXArgc, NXArgv, environ, appleParams, _programVars);
1682
1683 }
1684 log_initializers("dyld: called initialzer %p in %s\n", initFunc, image->path());
1685 });
1686 }
1687
1688 // Note this is noinline to avoid having too much stack used if loadImage has to call due to an invalid closure
1689 __attribute__((noinline))
1690 const MachOLoaded* AllImages::dlopen(Diagnostics& diag, const char* path, bool rtldNoLoad, bool rtldLocal,
1691 bool rtldNoDelete, bool rtldNow, bool fromOFI, const void* callerAddress,
1692 bool canUsePrebuiltSharedCacheClosure)
1693 {
1694 bool sharedCacheFormatCompatible = (_dyldCacheAddress != nullptr) && (_dyldCacheAddress->header.formatVersion == dyld3::closure::kFormatVersion);
1695 canUsePrebuiltSharedCacheClosure &= sharedCacheFormatCompatible;
1696
1697 // quick check if path is in shared cache and already loaded
1698 if ( _dyldCacheAddress != nullptr ) {
1699 uint32_t dyldCacheImageIndex;
1700 if ( _dyldCacheAddress->hasImagePath(path, dyldCacheImageIndex) ) {
1701 uint64_t mTime;
1702 uint64_t inode;
1703 const MachOLoaded* mh = (MachOLoaded*)_dyldCacheAddress->getIndexedImageEntry(dyldCacheImageIndex, mTime, inode);
1704 // Note: we do not need readLock because this is within global dlopen lock
1705 for (const LoadedImage& li : _loadedImages) {
1706 if ( li.loadedAddress() == mh ) {
1707 return mh;
1708 }
1709 }
1710
1711 // If this is a customer cache, and we have no overrides, then we know for sure the cache closure is valid
1712 // This assumes that a libdispatch root would have been loaded on launch, and that root path is not
1713 // supported with customer caches, which is the case today.
1714 if ( !rtldNoLoad && !hasInsertedOrInterposingLibraries() &&
1715 (_dyldCacheAddress->header.cacheType == kDyldSharedCacheTypeProduction) &&
1716 sharedCacheFormatCompatible ) {
1717 const dyld3::closure::ImageArray* images = _dyldCacheAddress->cachedDylibsImageArray();
1718 const dyld3::closure::Image* image = images->imageForNum(dyldCacheImageIndex+1);
1719 return loadImage(diag, path, image->imageNum(), nullptr, rtldLocal, rtldNoDelete, rtldNow, fromOFI, callerAddress);
1720 }
1721 }
1722 }
1723
1724 __block closure::ImageNum callerImageNum = 0;
1725 for (const LoadedImage& li : _loadedImages) {
1726 uint8_t permissions;
1727 if ( (callerImageNum == 0) && li.image()->containsAddress(callerAddress, li.loadedAddress(), &permissions) ) {
1728 callerImageNum = li.image()->imageNum();
1729 }
1730 //fprintf(stderr, "mh=%p, image=%p, imageNum=0x%04X, path=%s\n", li.loadedAddress(), li.image(), li.image()->imageNum(), li.image()->path());
1731 }
1732
1733 // make closure
1734 closure::ImageNum topImageNum = 0;
1735 const closure::DlopenClosure* newClosure = nullptr;
1736
1737 // First try with closures from the shared cache permitted.
1738 // Then try again with forcing a new closure
1739 for (bool canUseSharedCacheClosure : { true, false }) {
1740 // We can only use a shared cache closure if the shared cache format is the same as libdyld.
1741 canUseSharedCacheClosure &= canUsePrebuiltSharedCacheClosure;
1742 closure::FileSystemPhysical fileSystem(nullptr, nullptr, _allowEnvPaths);
1743 RootsChecker rootsChecker;
1744 closure::ClosureBuilder::AtPath atPathHanding = (_allowAtPaths ? closure::ClosureBuilder::AtPath::all : closure::ClosureBuilder::AtPath::onlyInRPaths);
1745 closure::ClosureBuilder cb(_nextImageNum, fileSystem, rootsChecker, _dyldCacheAddress, true, *_archs, closure::gPathOverrides, atPathHanding, true, nullptr, (dyld3::Platform)platform());
1746 newClosure = cb.makeDlopenClosure(path, _mainClosure, _loadedImages.array(), callerImageNum, rtldNoLoad, rtldNow, canUseSharedCacheClosure, &topImageNum);
1747 if ( newClosure == closure::ClosureBuilder::sRetryDlopenClosure ) {
1748 log_apis(" dlopen: closure builder needs to retry: %s\n", path);
1749 assert(canUseSharedCacheClosure);
1750 continue;
1751 }
1752 if ( (newClosure == nullptr) && (topImageNum == 0) ) {
1753 if ( cb.diagnostics().hasError())
1754 diag.error("%s", cb.diagnostics().errorMessage());
1755 else if ( !rtldNoLoad )
1756 diag.error("dlopen(): file not found: %s", path);
1757 return nullptr;
1758 }
1759 // save off next available ImageNum for use by next call to dlopen()
1760 _nextImageNum = cb.nextFreeImageNum();
1761 break;
1762 }
1763
1764 if ( newClosure != nullptr ) {
1765 // if new closure contains an ImageArray, add it to list
1766 if ( const closure::ImageArray* newArray = newClosure->images() ) {
1767 appendToImagesArray(newArray);
1768 }
1769 log_apis(" dlopen: made %s closure: %p\n", newClosure->topImage()->variantString(), newClosure);
1770 }
1771
1772 // if already loaded, just bump refCount and return
1773 if ( (newClosure == nullptr) && (topImageNum != 0) ) {
1774 for (LoadedImage& li : _loadedImages) {
1775 if ( li.image()->imageNum() == topImageNum ) {
1776 // is already loaded
1777 const MachOLoaded* topLoadAddress = li.loadedAddress();
1778 if ( !li.image()->inDyldCache() )
1779 incRefCount(topLoadAddress);
1780 log_apis(" dlopen: already loaded as '%s'\n", li.image()->path());
1781 // if previously opened with RTLD_LOCAL, but now opened with RTLD_GLOBAL, unhide it
1782 if ( !rtldLocal && li.hideFromFlatSearch() )
1783 li.setHideFromFlatSearch(false);
1784 // if called with RTLD_NODELETE, mark it as never-unload
1785 if ( rtldNoDelete )
1786 li.markLeaveMapped();
1787
1788 // If we haven't run the initializers then we must be in a static init in a dlopen
1789 if ( li.state() != LoadedImage::State::inited ) {
1790 // RTLD_NOLOAD means dlopen should fail unless path is already loaded.
1791 // don't run initializers when RTLD_NOLOAD is set. This only matters if dlopen() is
1792 // called from within an initializer because it can cause initializers to run
1793 // out of order. Most uses of RTLD_NOLOAD are "probes". If they want initialzers
1794 // to run, then don't use RTLD_NOLOAD.
1795 if (!rtldNoLoad) {
1796 runInitialzersBottomUp(li.image());
1797 }
1798 }
1799
1800 return topLoadAddress;
1801 }
1802 }
1803 }
1804
1805 return loadImage(diag, path, topImageNum, newClosure, rtldLocal, rtldNoDelete, rtldNow, fromOFI, callerAddress);
1806 }
1807
1808 // Note this is noinline to avoid having too much stack used in the parent
1809 // dlopen method
1810 __attribute__((noinline))
1811 const MachOLoaded* AllImages::loadImage(Diagnostics& diag, const char* path,
1812 closure::ImageNum topImageNum, const closure::DlopenClosure* newClosure,
1813 bool rtldLocal, bool rtldNoDelete, bool rtldNow, bool fromOFI,
1814 const void* callerAddress) {
1815 // Note this array is used as the storage to Loader so needs to be at least
1816 // large enough to handle whatever total number of images we need to do the dlopen
1817 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(LoadedImage, newImages, 1024);
1818
1819 // Note we don't need pre-optimized Objective-C for dlopen closures, but use
1820 // variables here to make it easier to see whats going on.
1821 const dyld3::closure::ObjCSelectorOpt* selectorOpt = nullptr;
1822 dyld3::Array<dyld3::closure::Image::ObjCSelectorImage> selectorImages;
1823
1824 // run loader to load all new images
1825 RootsChecker rootsChecker;
1826 Loader loader(_loadedImages.array(), newImages, _dyldCacheAddress, imagesArrays(),
1827 selectorOpt, selectorImages, rootsChecker, (dyld3::Platform)platform(),
1828 &dyld3::log_loads, &dyld3::log_segments, &dyld3::log_fixups, &dyld3::log_dofs, !rtldNow);
1829
1830 // find Image* for top image, look in new closure first
1831 const closure::Image* topImage = nullptr;
1832 if ( newClosure != nullptr )
1833 topImage = newClosure->images()->imageForNum(topImageNum);
1834 if ( topImage == nullptr )
1835 topImage = closure::ImageArray::findImage(imagesArrays(), topImageNum);
1836 if ( newClosure == nullptr ) {
1837 if ( topImageNum < dyld3::closure::kLastDyldCacheImageNum )
1838 log_apis(" dlopen: using pre-built %s dlopen closure from dyld shared cache %p\n", topImage->variantString(), topImage);
1839 else
1840 log_apis(" dlopen: using pre-built %s dlopen closure %p\n", topImage->variantString(), topImage);
1841 }
1842 LoadedImage topLoadedImage = LoadedImage::make(topImage);
1843 if ( rtldLocal && !topImage->inDyldCache() )
1844 topLoadedImage.setHideFromFlatSearch(true);
1845 if ( rtldNoDelete && !topImage->inDyldCache() )
1846 topLoadedImage.markLeaveMapped();
1847 loader.addImage(topLoadedImage);
1848
1849
1850 // recursively load all dependents and fill in allImages array
1851 bool someCacheImageOverridden = false;
1852 loader.completeAllDependents(diag, someCacheImageOverridden);
1853 if ( diag.hasError() )
1854 return nullptr;
1855 bool closureOutOfDate;
1856 bool recoverable;
1857 loader.mapAndFixupAllImages(diag, _processDOFs, fromOFI, &closureOutOfDate, &recoverable);
1858 if ( diag.hasError() ) {
1859 // If we used a pre-built shared cache closure, and now found that it was out of date,
1860 // try again and rebuild a new closure
1861 // Note, newClosure is null in the case where we used a prebuilt closure
1862 if ( closureOutOfDate && recoverable && (newClosure == nullptr) ) {
1863 diag.clearError();
1864 return dlopen(diag, path, false /* rtldNoLoad */, rtldLocal, rtldNoDelete, rtldNow, fromOFI, callerAddress, false);
1865 }
1866 return nullptr;
1867 }
1868
1869 // Record if we had a root
1870 _someImageOverridden |= someCacheImageOverridden;
1871
1872 const MachOLoaded* topLoadAddress = newImages.begin()->loadedAddress();
1873
1874 // bump dlopen refcount of image directly loaded
1875 if ( !topImage->inDyldCache() )
1876 incRefCount(topLoadAddress);
1877
1878 // tell gAllImages about new images
1879 addImages(newImages);
1880
1881 // Run notifiers before applyInterposingToDyldCache() as then we have an
1882 // accurate image list before any calls to findImage().
1883 // TODO: Can we move this even earlier, eg, after map images but before fixups?
1884 runImageNotifiers(newImages);
1885
1886 // if closure adds images that override dyld cache, patch cache
1887 if ( newClosure != nullptr )
1888 applyInterposingToDyldCache(newClosure, mach_task_self());
1889
1890 runImageCallbacks(newImages);
1891
1892 // run initializers
1893 runInitialzersBottomUp(topImage);
1894
1895 return topLoadAddress;
1896 }
1897
1898 void AllImages::appendToImagesArray(const closure::ImageArray* newArray)
1899 {
1900 _imagesArrays.push_back(newArray);
1901 }
1902
1903 const Array<const closure::ImageArray*>& AllImages::imagesArrays()
1904 {
1905 return _imagesArrays.array();
1906 }
1907
1908 bool AllImages::isRestricted() const
1909 {
1910 return !_allowEnvPaths;
1911 }
1912
1913 bool AllImages::hasInsertedOrInterposingLibraries() const
1914 {
1915 return _mainClosure->hasInsertedLibraries() || _mainClosure->hasInterposings();
1916 }
1917
1918 void AllImages::takeLockBeforeFork() {
1919 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
1920 os_unfair_recursive_lock_lock(&_globalLock);
1921 #endif
1922 }
1923
1924 void AllImages::releaseLockInForkParent() {
1925 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
1926 os_unfair_recursive_lock_unlock(&_globalLock);
1927 #endif
1928 }
1929
1930 void AllImages::resetLockInForkChild() {
1931 #if TARGET_OS_SIMULATOR
1932
1933 // There's no dyld3 on the simulator this year
1934 assert(false);
1935
1936 #else
1937
1938 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
1939 os_unfair_recursive_lock_unlock_forked_child(&_globalLock);
1940 #endif
1941
1942 #endif // TARGET_OS_SIMULATOR
1943 }
1944
1945 const char* AllImages::getObjCSelector(const char *selName) const {
1946 if ( _objcSelectorHashTable == nullptr )
1947 return nullptr;
1948 return _objcSelectorHashTable->getString(selName, _objcSelectorHashTableImages.array());
1949 }
1950
1951 void AllImages::forEachObjCClass(const char* className,
1952 void (^callback)(void* classPtr, bool isLoaded, bool* stop)) const {
1953 if ( _objcClassHashTable == nullptr )
1954 return;
1955 // There may be a duplicate in the shared cache. If that is the case, return it first
1956 if ( _objcClassDuplicatesHashTable != nullptr ) {
1957 void* classImpl = nullptr;
1958 if ( _objcClassDuplicatesHashTable->getClassLocation(className, _dyldCacheObjCOpt, classImpl) ) {
1959 bool stop = false;
1960 callback(classImpl, true, &stop);
1961 if (stop)
1962 return;
1963 }
1964 }
1965 _objcClassHashTable->forEachClass(className, _objcClassHashTableImages.array(), callback);
1966 }
1967
1968 void AllImages::forEachObjCProtocol(const char* protocolName,
1969 void (^callback)(void* protocolPtr, bool isLoaded, bool* stop)) const {
1970 if ( _objcProtocolHashTable == nullptr )
1971 return;
1972 _objcProtocolHashTable->forEachClass(protocolName, _objcClassHashTableImages.array(), callback);
1973 }
1974
1975
1976 } // namespace dyld3
1977
1978
1979
1980
1981
1982