]> git.saurik.com Git - apple/dyld.git/blob - dyld3/AllImages.cpp
dyld-733.8.tar.gz
[apple/dyld.git] / dyld3 / AllImages.cpp
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24
25 #include <stdint.h>
26 #include <fcntl.h>
27 #include <sys/types.h>
28 #include <sys/stat.h>
29 #include <sys/sysctl.h>
30 #include <mach/mach_time.h> // mach_absolute_time()
31 #include <libkern/OSAtomic.h>
32 #include <uuid/uuid.h>
33 #include <mach-o/dyld_images.h>
34
35 #include <vector>
36 #include <algorithm>
37
38 #include "AllImages.h"
39 #include "libdyldEntryVector.h"
40 #include "Logging.h"
41 #include "Loading.h"
42 #include "Tracing.h"
43 #include "DyldSharedCache.h"
44 #include "PathOverrides.h"
45 #include "Closure.h"
46 #include "ClosureBuilder.h"
47 #include "ClosureFileSystemPhysical.h"
48
49 #include "objc-shared-cache.h"
50
51 extern const char** appleParams;
52
53 // should be a header for these
54 struct __cxa_range_t {
55 const void* addr;
56 size_t length;
57 };
58 extern "C" void __cxa_finalize_ranges(const __cxa_range_t ranges[], unsigned int count);
59
60 extern "C" int __cxa_atexit(void (*func)(void *), void* arg, void* dso);
61
62
63
64 VIS_HIDDEN bool gUseDyld3 = false;
65
66
67 namespace dyld3 {
68
69
70
71 ///////////////////// AllImages ////////////////////////////
72
73
74 AllImages gAllImages;
75
76
77
78 void AllImages::init(const closure::LaunchClosure* closure, const DyldSharedCache* dyldCacheLoadAddress, const char* dyldCachePath,
79 const Array<LoadedImage>& initialImages)
80 {
81 _mainClosure = closure;
82 _initialImages = &initialImages;
83 _dyldCacheAddress = dyldCacheLoadAddress;
84 _dyldCachePath = dyldCachePath;
85
86 if ( _dyldCacheAddress ) {
87 const dyld_cache_mapping_info* const fileMappings = (dyld_cache_mapping_info*)((uint64_t)_dyldCacheAddress + _dyldCacheAddress->header.mappingOffset);
88 _dyldCacheSlide = (uint64_t)dyldCacheLoadAddress - fileMappings[0].address;
89 _imagesArrays.push_back(dyldCacheLoadAddress->cachedDylibsImageArray());
90 if ( auto others = dyldCacheLoadAddress->otherOSImageArray() )
91 _imagesArrays.push_back(others);
92 }
93 _imagesArrays.push_back(_mainClosure->images());
94
95 // record first ImageNum to do use for dlopen() calls
96 _mainClosure->images()->forEachImage(^(const dyld3::closure::Image* image, bool& stop) {
97 closure::ImageNum num = image->imageNum();
98 if ( num >= _nextImageNum )
99 _nextImageNum = num+1;
100 });
101
102 // Make temporary old image array, so libSystem initializers can be debugged
103 STACK_ALLOC_ARRAY(dyld_image_info, oldDyldInfo, initialImages.count());
104 for (const LoadedImage& li : initialImages) {
105 oldDyldInfo.push_back({li.loadedAddress(), li.image()->path(), 0});
106 }
107 _oldAllImageInfos->infoArray = &oldDyldInfo[0];
108 _oldAllImageInfos->infoArrayCount = (uint32_t)oldDyldInfo.count();
109 _oldAllImageInfos->notification(dyld_image_adding, _oldAllImageInfos->infoArrayCount, _oldAllImageInfos->infoArray);
110 _oldAllImageInfos->infoArray = nullptr;
111 _oldAllImageInfos->infoArrayCount = 0;
112
113 _processDOFs = Loader::dtraceUserProbesEnabled();
114 }
115
116 void AllImages::setProgramVars(ProgramVars* vars)
117 {
118 _programVars = vars;
119 const dyld3::MachOFile* mf = (dyld3::MachOFile*)_programVars->mh;
120 _archs = &GradedArchs::forCurrentOS(mf);
121 }
122
123 void AllImages::setRestrictions(bool allowAtPaths, bool allowEnvPaths)
124 {
125 _allowAtPaths = allowAtPaths;
126 _allowEnvPaths = allowEnvPaths;
127 }
128
129 void AllImages::setHasCacheOverrides(bool someCacheImageOverriden)
130 {
131 _someImageOverridden = someCacheImageOverriden;
132 }
133
134 bool AllImages::hasCacheOverrides() const {
135 return _someImageOverridden;
136 }
137
138 void AllImages::applyInitialImages()
139 {
140 addImages(*_initialImages);
141 runImageNotifiers(*_initialImages);
142 runImageCallbacks(*_initialImages);
143 _initialImages = nullptr; // this was stack allocated
144 }
145
146 void AllImages::withReadLock(void (^work)()) const
147 {
148 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
149 os_unfair_recursive_lock_lock(&_globalLock);
150 work();
151 os_unfair_recursive_lock_unlock(&_globalLock);
152 #else
153 pthread_mutex_lock(&_globalLock);
154 work();
155 pthread_mutex_unlock(&_globalLock);
156 #endif
157 }
158
159 void AllImages::withWriteLock(void (^work)())
160 {
161 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
162 os_unfair_recursive_lock_lock(&_globalLock);
163 work();
164 os_unfair_recursive_lock_unlock(&_globalLock);
165 #else
166 pthread_mutex_lock(&_globalLock);
167 work();
168 pthread_mutex_unlock(&_globalLock);
169 #endif
170 }
171
172 void AllImages::withNotifiersLock(void (^work)()) const
173 {
174 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
175 os_unfair_recursive_lock_lock(&_globalLock);
176 work();
177 os_unfair_recursive_lock_unlock(&_globalLock);
178 #else
179 pthread_mutex_lock(&_globalLock);
180 work();
181 pthread_mutex_unlock(&_globalLock);
182 #endif
183 }
184
185 void AllImages::mirrorToOldAllImageInfos()
186 {
187 withReadLock(^(){
188 // set infoArray to NULL to denote it is in-use
189 _oldAllImageInfos->infoArray = nullptr;
190
191 // if array not large enough, re-alloc it
192 uint32_t imageCount = (uint32_t)_loadedImages.count();
193 if ( _oldArrayAllocCount < imageCount ) {
194 uint32_t newAllocCount = imageCount + 16;
195 dyld_image_info* newArray = (dyld_image_info*)::malloc(sizeof(dyld_image_info)*newAllocCount);
196 if ( _oldAllImageArray != nullptr ) {
197 ::memcpy(newArray, _oldAllImageArray, sizeof(dyld_image_info)*_oldAllImageInfos->infoArrayCount);
198 ::free(_oldAllImageArray);
199 }
200 _oldAllImageArray = newArray;
201 _oldArrayAllocCount = newAllocCount;
202 }
203
204 // fill out array to mirror current image list
205 int index = 0;
206 for (const LoadedImage& li : _loadedImages) {
207 _oldAllImageArray[index].imageLoadAddress = li.loadedAddress();
208 _oldAllImageArray[index].imageFilePath = imagePath(li.image());
209 _oldAllImageArray[index].imageFileModDate = 0;
210 ++index;
211 }
212
213 // set infoArray back to base address of array (so other process can now read)
214 _oldAllImageInfos->infoArrayCount = imageCount;
215 _oldAllImageInfos->infoArrayChangeTimestamp = mach_absolute_time();
216 _oldAllImageInfos->infoArray = _oldAllImageArray;
217
218 // <radr://problem/42668846> update UUID array if needed
219 uint32_t nonCachedCount = 1; // always add dyld
220 for (const LoadedImage& li : _loadedImages) {
221 if ( !li.loadedAddress()->inDyldCache() )
222 ++nonCachedCount;
223 }
224 if ( nonCachedCount != _oldAllImageInfos->uuidArrayCount ) {
225 // set infoArray to NULL to denote it is in-use
226 _oldAllImageInfos->uuidArray = nullptr;
227 // make sure allocation can hold all uuids
228 if ( _oldUUIDAllocCount < nonCachedCount ) {
229 uint32_t newAllocCount = (nonCachedCount + 3) & (-4); // round up to multiple of 4
230 dyld_uuid_info* newArray = (dyld_uuid_info*)::malloc(sizeof(dyld_uuid_info)*newAllocCount);
231 if ( _oldUUIDArray != nullptr )
232 ::free(_oldUUIDArray);
233 _oldUUIDArray = newArray;
234 _oldUUIDAllocCount = newAllocCount;
235 }
236 // add dyld then all images not in dyld cache
237 const MachOFile* dyldMF = (MachOFile*)_oldAllImageInfos->dyldImageLoadAddress;
238 _oldUUIDArray[0].imageLoadAddress = dyldMF;
239 dyldMF->getUuid(_oldUUIDArray[0].imageUUID);
240 index = 1;
241 for (const LoadedImage& li : _loadedImages) {
242 if ( !li.loadedAddress()->inDyldCache() ) {
243 _oldUUIDArray[index].imageLoadAddress = li.loadedAddress();
244 li.loadedAddress()->getUuid(_oldUUIDArray[index].imageUUID);
245 ++index;
246 }
247 }
248 // set uuidArray back to base address of array (so kernel can now read)
249 _oldAllImageInfos->uuidArray = _oldUUIDArray;
250 _oldAllImageInfos->uuidArrayCount = nonCachedCount;
251 }
252 });
253 }
254
255 void AllImages::addImages(const Array<LoadedImage>& newImages)
256 {
257 // copy into _loadedImages
258 withWriteLock(^(){
259 _loadedImages.append(newImages);
260 // if any image not in the shared cache added, recompute bounds
261 for (const LoadedImage& li : newImages) {
262 if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
263 recomputeBounds();
264 break;
265 }
266 }
267 });
268 }
269
270 void AllImages::addImmutableRange(uintptr_t start, uintptr_t end)
271 {
272 //fprintf(stderr, "AllImages::addImmutableRange(0x%09lX, 0x%09lX)\n", start, end);
273 // first look in existing range buckets for empty slot
274 ImmutableRanges* lastRange = nullptr;
275 for (ImmutableRanges* ranges = &_immutableRanges; ranges != nullptr; ranges = ranges->next.load(std::memory_order_acquire)) {
276 lastRange = ranges;
277 for (uintptr_t i=0; i < ranges->arraySize; ++i) {
278 if ( ranges->array[i].start.load(std::memory_order_acquire) == 0 ) {
279 // set 'end' before 'start' so readers always see consistent state
280 ranges->array[i].end.store(end, std::memory_order_release);
281 ranges->array[i].start.store(start, std::memory_order_release);
282 return;
283 }
284 }
285 }
286 // if we got here, there are no empty slots, so add new ImmutableRanges
287 const uintptr_t newSize = 15; // allocation is 256 bytes on 64-bit processes
288 ImmutableRanges* newRange = (ImmutableRanges*)calloc(offsetof(ImmutableRanges,array[newSize]), 1);
289 newRange->arraySize = newSize;
290 newRange->array[0].end.store(end, std::memory_order_release);
291 newRange->array[0].start.store(start, std::memory_order_release);
292 // tie into previous list last
293 lastRange->next.store(newRange, std::memory_order_release);
294 }
295
296 void AllImages::runImageNotifiers(const Array<LoadedImage>& newImages)
297 {
298 uint32_t count = (uint32_t)newImages.count();
299 assert(count != 0);
300
301 if ( _oldAllImageInfos != nullptr ) {
302 // sync to old all image infos struct
303 mirrorToOldAllImageInfos();
304
305 // tell debugger about new images
306 dyld_image_info oldDyldInfo[count];
307 for (uint32_t i=0; i < count; ++i) {
308 oldDyldInfo[i].imageLoadAddress = newImages[i].loadedAddress();
309 oldDyldInfo[i].imageFilePath = imagePath(newImages[i].image());
310 oldDyldInfo[i].imageFileModDate = 0;
311 }
312 _oldAllImageInfos->notification(dyld_image_adding, count, oldDyldInfo);
313 }
314
315
316 // update immutable ranges
317 for (const LoadedImage& li : newImages) {
318 if ( !li.image()->inDyldCache() && li.image()->neverUnload() ) {
319 uintptr_t baseAddr = (uintptr_t)li.loadedAddress();
320 li.image()->forEachDiskSegment(^(uint32_t segIndex, uint32_t fileOffset, uint32_t fileSize, int64_t vmOffset, uint64_t vmSize, uint8_t permissions, bool laterReadOnly, bool &stop) {
321 if ( (permissions & (VM_PROT_READ|VM_PROT_WRITE)) == VM_PROT_READ ) {
322 addImmutableRange(baseAddr + vmOffset, baseAddr + vmOffset + vmSize);
323 }
324 });
325 }
326 }
327
328 // log loads
329 for (const LoadedImage& li : newImages) {
330 const char *path = imagePath(li.image());
331 uuid_t imageUUID;
332 if ( li.image()->getUuid(imageUUID)) {
333 uuid_string_t imageUUIDStr;
334 uuid_unparse_upper(imageUUID, imageUUIDStr);
335 log_loads("dyld: <%s> %s\n", imageUUIDStr, path);
336 }
337 else {
338 log_loads("dyld: %s\n", path);
339 }
340 }
341
342 // call kdebug trace for each image
343 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD, DBG_DYLD_UUID, DBG_DYLD_UUID_MAP_A))) {
344 for (const LoadedImage& li : newImages) {
345 const closure::Image* image = li.image();
346 struct stat stat_buf;
347 const char *path = imagePath(image);
348 uuid_t uuid;
349 image->getUuid(uuid);
350 fsid_t fsid = {{ 0, 0 }};
351 fsobj_id_t fsobjid = { 0, 0 };
352 if ( !li.loadedAddress()->inDyldCache() && (stat(path, &stat_buf) == 0) ) {
353 fsobjid = *(fsobj_id_t*)&stat_buf.st_ino;
354 fsid = {{ stat_buf.st_dev, 0 }};
355 }
356 kdebug_trace_dyld_image(DBG_DYLD_UUID_MAP_A, path, &uuid, fsobjid, fsid, li.loadedAddress());
357 }
358 }
359 }
360
361 void AllImages::runImageCallbacks(const Array<LoadedImage>& newImages)
362 {
363 uint32_t count = (uint32_t)newImages.count();
364 assert(count != 0);
365
366 // call each _dyld_register_func_for_add_image function with each image
367 withNotifiersLock(^{
368 for (NotifyFunc func : _loadNotifiers) {
369 for (const LoadedImage& li : newImages) {
370 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
371 log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
372 if ( li.image()->inDyldCache() )
373 func(li.loadedAddress(), (uintptr_t)_dyldCacheSlide);
374 else
375 func(li.loadedAddress(), li.loadedAddress()->getSlide());
376 }
377 }
378 for (LoadNotifyFunc func : _loadNotifiers2) {
379 for (const LoadedImage& li : newImages) {
380 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
381 log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
382 if ( li.image()->inDyldCache() )
383 func(li.loadedAddress(), li.image()->path(), false);
384 else
385 func(li.loadedAddress(), li.image()->path(), !li.image()->neverUnload());
386 }
387 }
388 for (BulkLoadNotifier func : _loadBulkNotifiers) {
389 const mach_header* mhs[count];
390 const char* paths[count];
391 for (unsigned i=0; i < count; ++i) {
392 mhs[i] = newImages[i].loadedAddress();
393 paths[i] = newImages[i].image()->path();
394 }
395 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)mhs[0], (uint64_t)func, 0);
396 log_notifications("dyld: add notifier %p called with %d images\n", func, count);
397 func(count, mhs, paths);
398 }
399 });
400
401 // call objc about images that use objc
402 if ( _objcNotifyMapped != nullptr ) {
403 const char* pathsBuffer[count];
404 const mach_header* mhBuffer[count];
405 uint32_t imagesWithObjC = 0;
406 for (const LoadedImage& li : newImages) {
407 const closure::Image* image = li.image();
408 if ( image->hasObjC() ) {
409 pathsBuffer[imagesWithObjC] = imagePath(image);
410 mhBuffer[imagesWithObjC] = li.loadedAddress();
411 ++imagesWithObjC;
412 }
413 }
414 if ( imagesWithObjC != 0 ) {
415 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_OBJC_MAP, 0, 0, 0);
416 (*_objcNotifyMapped)(imagesWithObjC, pathsBuffer, mhBuffer);
417 if ( log_notifications("dyld: objc-mapped-notifier called with %d images:\n", imagesWithObjC) ) {
418 for (uint32_t i=0; i < imagesWithObjC; ++i) {
419 log_notifications("dyld: objc-mapped: %p %s\n", mhBuffer[i], pathsBuffer[i]);
420 }
421 }
422 }
423 }
424
425 #if !TARGET_OS_DRIVERKIT
426 // FIXME: This may make more sense in runImageCallbacks, but the present order
427 // is after callbacks. Can we safely move it?
428 // notify any processes tracking loads in this process
429 notifyMonitorLoads(newImages);
430 #endif
431 }
432
433 void AllImages::removeImages(const Array<LoadedImage>& unloadImages)
434 {
435 // call each _dyld_register_func_for_remove_image function with each image
436 withNotifiersLock(^{
437 for (NotifyFunc func : _unloadNotifiers) {
438 for (const LoadedImage& li : unloadImages) {
439 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_REMOVE_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
440 log_notifications("dyld: remove notifier %p called with mh=%p\n", func, li.loadedAddress());
441 if ( li.image()->inDyldCache() )
442 func(li.loadedAddress(), (uintptr_t)_dyldCacheSlide);
443 else
444 func(li.loadedAddress(), li.loadedAddress()->getSlide());
445 }
446 }
447 });
448
449 // call objc about images going away
450 if ( _objcNotifyUnmapped != nullptr ) {
451 for (const LoadedImage& li : unloadImages) {
452 if ( li.image()->hasObjC() ) {
453 (*_objcNotifyUnmapped)(imagePath(li.image()), li.loadedAddress());
454 log_notifications("dyld: objc-unmapped-notifier called with image %p %s\n", li.loadedAddress(), imagePath(li.image()));
455 }
456 }
457 }
458
459 // call kdebug trace for each image
460 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD, DBG_DYLD_UUID, DBG_DYLD_UUID_MAP_A))) {
461 for (const LoadedImage& li : unloadImages) {
462 const closure::Image* image = li.image();
463 struct stat stat_buf;
464 const char *path = imagePath(image);
465 uuid_t uuid;
466 image->getUuid(uuid);
467 fsid_t fsid = {{ 0, 0 }};
468 fsobj_id_t fsobjid = { 0, 0 };
469 if ( stat(path, &stat_buf) == 0 ) {
470 fsobjid = *(fsobj_id_t*)&stat_buf.st_ino;
471 fsid = {{ stat_buf.st_dev, 0 }};
472 }
473 kdebug_trace_dyld_image(DBG_DYLD_UUID_UNMAP_A, path, &uuid, fsobjid, fsid, li.loadedAddress());
474 }
475 }
476
477 // remove each from _loadedImages
478 withWriteLock(^(){
479 for (const LoadedImage& uli : unloadImages) {
480 for (LoadedImage& li : _loadedImages) {
481 if ( uli.loadedAddress() == li.loadedAddress() ) {
482 _loadedImages.erase(li);
483 break;
484 }
485 }
486 }
487 recomputeBounds();
488 });
489
490 // sync to old all image infos struct
491 mirrorToOldAllImageInfos();
492
493 // tell debugger about removed images
494 STACK_ALLOC_ARRAY(dyld_image_info, oldDyldInfo, unloadImages.count());
495 for (const LoadedImage& li : unloadImages) {
496 oldDyldInfo.push_back({li.loadedAddress(), li.image()->path(), 0});
497 }
498 _oldAllImageInfos->notification(dyld_image_removing, (uint32_t)oldDyldInfo.count(), &oldDyldInfo[0]);
499
500 // notify any processes tracking loads in this process
501 notifyMonitorUnloads(unloadImages);
502
503 // finally, unmap images
504 for (const LoadedImage& li : unloadImages) {
505 if ( li.leaveMapped() ) {
506 log_loads("dyld: unloaded but left mmapped %s\n", imagePath(li.image()));
507 }
508 else {
509 // unmapImage() modifies parameter, so use copy
510 LoadedImage copy = li;
511 Loader::unmapImage(copy);
512 log_loads("dyld: unloaded %s\n", imagePath(li.image()));
513 }
514 }
515 }
516
517 // must be called with writeLock held
518 void AllImages::recomputeBounds()
519 {
520 _lowestNonCached = UINTPTR_MAX;
521 _highestNonCached = 0;
522 for (const LoadedImage& li : _loadedImages) {
523 const MachOLoaded* ml = li.loadedAddress();
524 uintptr_t start = (uintptr_t)ml;
525 if ( !((MachOAnalyzer*)ml)->inDyldCache() ) {
526 if ( start < _lowestNonCached )
527 _lowestNonCached = start;
528 uintptr_t end = start + (uintptr_t)(li.image()->vmSizeToMap());
529 if ( end > _highestNonCached )
530 _highestNonCached = end;
531 }
532 }
533 }
534
535 uint32_t AllImages::count() const
536 {
537 return (uint32_t)_loadedImages.count();
538 }
539
540 bool AllImages::dyldCacheHasPath(const char* path) const
541 {
542 uint32_t dyldCacheImageIndex;
543 if ( _dyldCacheAddress != nullptr )
544 return _dyldCacheAddress->hasImagePath(path, dyldCacheImageIndex);
545 return false;
546 }
547
548 const char* AllImages::imagePathByIndex(uint32_t index) const
549 {
550 __block const char* result = nullptr;
551 withReadLock(^{
552 if ( index < _loadedImages.count() ) {
553 result = imagePath(_loadedImages[index].image());
554 return;
555 }
556 });
557 return result;
558 }
559
560 const mach_header* AllImages::imageLoadAddressByIndex(uint32_t index) const
561 {
562 __block const mach_header* result = nullptr;
563 withReadLock(^{
564 if ( index < _loadedImages.count() ) {
565 result = _loadedImages[index].loadedAddress();
566 return;
567 }
568 });
569 return result;
570 }
571
572 bool AllImages::findImage(const mach_header* loadAddress, LoadedImage& foundImage) const
573 {
574 __block bool result = false;
575 withReadLock(^(){
576 for (const LoadedImage& li : _loadedImages) {
577 if ( li.loadedAddress() == loadAddress ) {
578 foundImage = li;
579 result = true;
580 break;
581 }
582 }
583 });
584 return result;
585 }
586
587 void AllImages::forEachImage(void (^handler)(const LoadedImage& loadedImage, bool& stop)) const
588 {
589 if ( _initialImages != nullptr ) {
590 // being called during libSystem initialization, so _loadedImages not allocated yet
591 bool stop = false;
592 for (const LoadedImage& li : *_initialImages) {
593 handler(li, stop);
594 if ( stop )
595 break;
596 }
597 return;
598 }
599
600 withReadLock(^{
601 bool stop = false;
602 for (const LoadedImage& li : _loadedImages) {
603 handler(li, stop);
604 if ( stop )
605 break;
606 }
607 });
608 }
609
610
611 const char* AllImages::pathForImageMappedAt(const void* addr) const
612 {
613 if ( _initialImages != nullptr ) {
614 // being called during libSystem initialization, so _loadedImages not allocated yet
615 for (const LoadedImage& li : *_initialImages) {
616 uint8_t permissions;
617 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
618 return li.image()->path();
619 }
620 }
621 return nullptr;
622 }
623
624 // if address is in cache, do fast search of TEXT segments in cache
625 __block const char* result = nullptr;
626 if ( (_dyldCacheAddress != nullptr) && (addr > _dyldCacheAddress) ) {
627 if ( addr < (void*)((uint8_t*)_dyldCacheAddress+_dyldCacheAddress->mappedSize()) ) {
628 uint64_t cacheSlide = (uint64_t)_dyldCacheAddress - _dyldCacheAddress->unslidLoadAddress();
629 uint64_t unslidTargetAddr = (uint64_t)addr - cacheSlide;
630 _dyldCacheAddress->forEachImageTextSegment(^(uint64_t loadAddressUnslid, uint64_t textSegmentSize, const unsigned char* dylibUUID, const char* installName, bool& stop) {
631 if ( (loadAddressUnslid <= unslidTargetAddr) && (unslidTargetAddr < loadAddressUnslid+textSegmentSize) ) {
632 result = installName;
633 stop = true;
634 }
635 });
636 if ( result != nullptr )
637 return result;
638 }
639 }
640
641 // slow path - search image list
642 infoForImageMappedAt(addr, ^(const LoadedImage& foundImage, uint8_t permissions) {
643 result = foundImage.image()->path();
644 });
645
646 return result;
647 }
648
649 void AllImages::infoForImageMappedAt(const void* addr, void (^handler)(const LoadedImage& foundImage, uint8_t permissions)) const
650 {
651 __block uint8_t permissions;
652 if ( _initialImages != nullptr ) {
653 // being called during libSystem initialization, so _loadedImages not allocated yet
654 for (const LoadedImage& li : *_initialImages) {
655 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
656 handler(li, permissions);
657 break;
658 }
659 }
660 return;
661 }
662
663 withReadLock(^{
664 for (const LoadedImage& li : _loadedImages) {
665 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
666 handler(li, permissions);
667 break;
668 }
669 }
670 });
671 }
672
673
674 bool AllImages::infoForImageMappedAt(const void* addr, const MachOLoaded** ml, uint64_t* textSize, const char** path) const
675 {
676 if ( _initialImages != nullptr ) {
677 // being called during libSystem initialization, so _loadedImages not allocated yet
678 for (const LoadedImage& li : *_initialImages) {
679 uint8_t permissions;
680 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
681 if ( ml != nullptr )
682 *ml = li.loadedAddress();
683 if ( path != nullptr )
684 *path = li.image()->path();
685 if ( textSize != nullptr ) {
686 *textSize = li.image()->textSize();
687 }
688 return true;
689 }
690 }
691 return false;
692 }
693
694 // if address is in cache, do fast search of TEXT segments in cache
695 __block bool result = false;
696 if ( (_dyldCacheAddress != nullptr) && (addr > _dyldCacheAddress) ) {
697 if ( addr < (void*)((uint8_t*)_dyldCacheAddress+_dyldCacheAddress->mappedSize()) ) {
698 uint64_t cacheSlide = (uint64_t)_dyldCacheAddress - _dyldCacheAddress->unslidLoadAddress();
699 uint64_t unslidTargetAddr = (uint64_t)addr - cacheSlide;
700 _dyldCacheAddress->forEachImageTextSegment(^(uint64_t loadAddressUnslid, uint64_t textSegmentSize, const unsigned char* dylibUUID, const char* installName, bool& stop) {
701 if ( (loadAddressUnslid <= unslidTargetAddr) && (unslidTargetAddr < loadAddressUnslid+textSegmentSize) ) {
702 if ( ml != nullptr )
703 *ml = (MachOLoaded*)(loadAddressUnslid + cacheSlide);
704 if ( path != nullptr )
705 *path = installName;
706 if ( textSize != nullptr )
707 *textSize = textSegmentSize;
708 stop = true;
709 result = true;
710 }
711 });
712 if ( result )
713 return result;
714 // in shared cache, but not in a TEXT segment, do slow search of all loaded cache images
715 withReadLock(^{
716 for (const LoadedImage& li : _loadedImages) {
717 if ( ((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
718 uint8_t permissions;
719 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
720 if ( ml != nullptr )
721 *ml = li.loadedAddress();
722 if ( path != nullptr )
723 *path = li.image()->path();
724 if ( textSize != nullptr )
725 *textSize = li.image()->textSize();
726 result = true;
727 break;
728 }
729 }
730 }
731 });
732 return result;
733 }
734 }
735
736 // address not in dyld cache, check each non-cache image
737 infoForNonCachedImageMappedAt(addr, ^(const LoadedImage& foundImage, uint8_t permissions) {
738 if ( ml != nullptr )
739 *ml = foundImage.loadedAddress();
740 if ( path != nullptr )
741 *path = foundImage.image()->path();
742 if ( textSize != nullptr )
743 *textSize = foundImage.image()->textSize();
744 result = true;
745 });
746
747 return result;
748 }
749
750 // same as infoForImageMappedAt(), but only look at images not in the dyld cache
751 void AllImages::infoForNonCachedImageMappedAt(const void* addr, void (^handler)(const LoadedImage& foundImage, uint8_t permissions)) const
752 {
753 __block uint8_t permissions;
754 if ( _initialImages != nullptr ) {
755 // being called during libSystem initialization, so _loadedImages not allocated yet
756 for (const LoadedImage& li : *_initialImages) {
757 if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
758 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
759 handler(li, permissions);
760 break;
761 }
762 }
763 }
764 return;
765 }
766
767 withReadLock(^{
768 for (const LoadedImage& li : _loadedImages) {
769 if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
770 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
771 handler(li, permissions);
772 break;
773 }
774 }
775 }
776 });
777 }
778
779 bool AllImages::immutableMemory(const void* addr, size_t length) const
780 {
781 // check to see if in shared cache
782 if ( _dyldCacheAddress != nullptr ) {
783 bool readOnly;
784 if ( _dyldCacheAddress->inCache(addr, length, readOnly) ) {
785 return readOnly;
786 }
787 }
788
789 // check to see if it is outside the range of any loaded image
790 if ( ((uintptr_t)addr < _lowestNonCached) || ((uintptr_t)addr+length > _highestNonCached) ) {
791 return false;
792 }
793
794 // check immutable ranges
795 for (const ImmutableRanges* ranges = &_immutableRanges; ranges != nullptr; ranges = ranges->next.load(std::memory_order_acquire)) {
796 for (uintptr_t i=0; i < ranges->arraySize; ++i) {
797 if ( ranges->array[i].start.load(std::memory_order_acquire) == 0 )
798 break; // no more entries in use
799 if ( (ranges->array[i].start.load(std::memory_order_acquire) <= (uintptr_t)addr)
800 && (ranges->array[i].end.load(std::memory_order_acquire) > ((uintptr_t)addr)+length) )
801 return true;
802 }
803 }
804
805 return false;
806 }
807
808
809 uintptr_t AllImages::resolveTarget(closure::Image::ResolvedSymbolTarget target) const
810 {
811 switch ( target.sharedCache.kind ) {
812 case closure::Image::ResolvedSymbolTarget::kindSharedCache:
813 assert(_dyldCacheAddress != nullptr);
814 return (uintptr_t)_dyldCacheAddress + (uintptr_t)target.sharedCache.offset;
815
816 case closure::Image::ResolvedSymbolTarget::kindImage: {
817 LoadedImage info;
818 bool foundImage = findImageNum(target.image.imageNum, info);
819 assert(foundImage);
820 return (uintptr_t)(info.loadedAddress()) + (uintptr_t)target.image.offset;
821 }
822
823 case closure::Image::ResolvedSymbolTarget::kindAbsolute:
824 if ( target.absolute.value & (1ULL << 62) )
825 return (uintptr_t)(target.absolute.value | 0xC000000000000000ULL);
826 else
827 return (uintptr_t)target.absolute.value;
828 }
829 assert(0 && "malformed ResolvedSymbolTarget");
830 return 0;
831 }
832
833 void* AllImages::interposeValue(void *value) const {
834 if ( !_mainClosure->hasInterposings() )
835 return value;
836
837 __block void* replacementValue = nullptr;
838 __block bool foundReplacement = false;
839 _mainClosure->forEachInterposingTuple(^(const closure::InterposingTuple& tuple, bool& stop) {
840 void* stockPointer = (void*)resolveTarget(tuple.stockImplementation);
841 if ( stockPointer == value) {
842 replacementValue = (void*)resolveTarget(tuple.newImplementation);
843 foundReplacement = true;
844 stop = true;
845 }
846 });
847
848 if ( foundReplacement )
849 return replacementValue;
850
851 return value;
852 }
853
854 void AllImages::infoForImageWithLoadAddress(const MachOLoaded* mh, void (^handler)(const LoadedImage& foundImage)) const
855 {
856 withReadLock(^{
857 for (const LoadedImage& li : _loadedImages) {
858 if ( li.loadedAddress() == mh ) {
859 handler(li);
860 break;
861 }
862 }
863 });
864 }
865
866 bool AllImages::findImageNum(closure::ImageNum imageNum, LoadedImage& foundImage) const
867 {
868 if ( _initialImages != nullptr ) {
869 // being called during libSystem initialization, so _loadedImages not allocated yet
870 for (const LoadedImage& li : *_initialImages) {
871 if ( li.image()->representsImageNum(imageNum) ) {
872 foundImage = li;
873 return true;
874 }
875 }
876 return false;
877 }
878
879 bool result = false;
880 for (const LoadedImage& li : _loadedImages) {
881 if ( li.image()->representsImageNum(imageNum) ) {
882 foundImage = li;
883 result = true;
884 break;
885 }
886 }
887
888 return result;
889 }
890
891 const MachOLoaded* AllImages::findDependent(const MachOLoaded* mh, uint32_t depIndex)
892 {
893 __block const MachOLoaded* result = nullptr;
894 withReadLock(^{
895 for (const LoadedImage& li : _loadedImages) {
896 if ( li.loadedAddress() == mh ) {
897 closure::ImageNum depImageNum = li.image()->dependentImageNum(depIndex);
898 LoadedImage depLi;
899 if ( findImageNum(depImageNum, depLi) )
900 result = depLi.loadedAddress();
901 break;
902 }
903 }
904 });
905 return result;
906 }
907
908
909 void AllImages::breadthFirstRecurseDependents(Array<closure::ImageNum>& visited, const LoadedImage& nodeLi, bool& stopped, void (^handler)(const LoadedImage& aLoadedImage, bool& stop)) const
910 {
911 // call handler on all direct dependents (unless already visited)
912 STACK_ALLOC_ARRAY(LoadedImage, dependentsToRecurse, 256);
913 nodeLi.image()->forEachDependentImage(^(uint32_t depIndex, closure::Image::LinkKind kind, closure::ImageNum depImageNum, bool& depStop) {
914 if ( kind == closure::Image::LinkKind::upward )
915 return;
916 if ( visited.contains(depImageNum) )
917 return;
918 LoadedImage depLi;
919 if ( !findImageNum(depImageNum, depLi) )
920 return;
921 handler(depLi, depStop);
922 visited.push_back(depImageNum);
923 if ( depStop ) {
924 stopped = true;
925 return;
926 }
927 dependentsToRecurse.push_back(depLi);
928 });
929 if ( stopped )
930 return;
931 // recurse on all dependents just visited
932 for (LoadedImage& depLi : dependentsToRecurse) {
933 breadthFirstRecurseDependents(visited, depLi, stopped, handler);
934 }
935 }
936
937 void AllImages::visitDependentsTopDown(const LoadedImage& start, void (^handler)(const LoadedImage& aLoadedImage, bool& stop)) const
938 {
939 withReadLock(^{
940 STACK_ALLOC_ARRAY(closure::ImageNum, visited, count());
941 bool stop = false;
942 handler(start, stop);
943 if ( stop )
944 return;
945 visited.push_back(start.image()->imageNum());
946 breadthFirstRecurseDependents(visited, start, stop, handler);
947 });
948 }
949
950 const MachOLoaded* AllImages::mainExecutable() const
951 {
952 assert(_programVars != nullptr);
953 return (const MachOLoaded*)_programVars->mh;
954 }
955
956 const closure::Image* AllImages::mainExecutableImage() const
957 {
958 assert(_mainClosure != nullptr);
959 return _mainClosure->images()->imageForNum(_mainClosure->topImage());
960 }
961
962 void AllImages::setMainPath(const char* path )
963 {
964 _mainExeOverridePath = path;
965 }
966
967 const char* AllImages::imagePath(const closure::Image* image) const
968 {
969 #if __IPHONE_OS_VERSION_MIN_REQUIRED
970 // on iOS and watchOS, apps may be moved on device after closure built
971 if ( _mainExeOverridePath != nullptr ) {
972 if ( image == mainExecutableImage() )
973 return _mainExeOverridePath;
974 }
975 #endif
976 return image->path();
977 }
978
979 dyld_platform_t AllImages::platform() const {
980 if (oldAllImageInfo()->version >= 16) { return (dyld_platform_t)oldAllImageInfo()->platform; }
981
982 __block dyld_platform_t result;
983 // FIXME: Remove this once we only care about version 16 or greater all image infos
984 dyld_get_image_versions(mainExecutable(), ^(dyld_platform_t platform, uint32_t sdk_version, uint32_t min_version) {
985 result = platform;
986 });
987 return result;
988 }
989
990 const GradedArchs& AllImages::archs() const
991 {
992 return *_archs;
993 }
994
995 void AllImages::incRefCount(const mach_header* loadAddress)
996 {
997 for (DlopenCount& entry : _dlopenRefCounts) {
998 if ( entry.loadAddress == loadAddress ) {
999 // found existing DlopenCount entry, bump counter
1000 entry.refCount += 1;
1001 return;
1002 }
1003 }
1004
1005 // no existing DlopenCount, add new one
1006 _dlopenRefCounts.push_back({ loadAddress, 1 });
1007 }
1008
1009 void AllImages::decRefCount(const mach_header* loadAddress)
1010 {
1011 bool doCollect = false;
1012 for (DlopenCount& entry : _dlopenRefCounts) {
1013 if ( entry.loadAddress == loadAddress ) {
1014 // found existing DlopenCount entry, bump counter
1015 entry.refCount -= 1;
1016 if ( entry.refCount == 0 ) {
1017 _dlopenRefCounts.erase(entry);
1018 doCollect = true;
1019 break;
1020 }
1021 return;
1022 }
1023 }
1024 if ( doCollect )
1025 garbageCollectImages();
1026 }
1027
1028
1029 #if __MAC_OS_X_VERSION_MIN_REQUIRED
1030 NSObjectFileImage AllImages::addNSObjectFileImage(const OFIInfo& image)
1031 {
1032 __block uint64_t imageNum = 0;
1033 withWriteLock(^{
1034 imageNum = ++_nextObjectFileImageNum;
1035 _objectFileImages.push_back(image);
1036 _objectFileImages.back().imageNum = imageNum;
1037 });
1038 return (NSObjectFileImage)imageNum;
1039 }
1040
1041 bool AllImages::forNSObjectFileImage(NSObjectFileImage imageHandle,
1042 void (^handler)(OFIInfo& image)) {
1043 uint64_t imageNum = (uint64_t)imageHandle;
1044 bool __block foundImage = false;
1045 withReadLock(^{
1046 for (OFIInfo& ofi : _objectFileImages) {
1047 if ( ofi.imageNum == imageNum ) {
1048 handler(ofi);
1049 foundImage = true;
1050 return;
1051 }
1052 }
1053 });
1054
1055 return foundImage;
1056 }
1057
1058 void AllImages::removeNSObjectFileImage(NSObjectFileImage imageHandle)
1059 {
1060 uint64_t imageNum = (uint64_t)imageHandle;
1061 withWriteLock(^{
1062 for (OFIInfo& ofi : _objectFileImages) {
1063 if ( ofi.imageNum == imageNum ) {
1064 _objectFileImages.erase(ofi);
1065 return;
1066 }
1067 }
1068 });
1069 }
1070 #endif
1071
1072
1073 class VIS_HIDDEN Reaper
1074 {
1075 public:
1076 struct ImageAndUse
1077 {
1078 const LoadedImage* li;
1079 bool inUse;
1080 };
1081 Reaper(Array<ImageAndUse>& unloadables, AllImages*);
1082 void garbageCollect();
1083 void finalizeDeadImages();
1084
1085 static void runTerminators(const LoadedImage& li);
1086 private:
1087
1088 void markDirectlyDlopenedImagesAsUsed();
1089 void markDependentOfInUseImages();
1090 void markDependentsOf(const LoadedImage*);
1091 uint32_t inUseCount();
1092 void dump(const char* msg);
1093
1094 Array<ImageAndUse>& _unloadables;
1095 AllImages* _allImages;
1096 uint32_t _deadCount;
1097 };
1098
1099 Reaper::Reaper(Array<ImageAndUse>& unloadables, AllImages* all)
1100 : _unloadables(unloadables), _allImages(all), _deadCount(0)
1101 {
1102 }
1103
1104 void Reaper::markDirectlyDlopenedImagesAsUsed()
1105 {
1106 for (AllImages::DlopenCount& entry : _allImages->_dlopenRefCounts) {
1107 if ( entry.refCount != 0 ) {
1108 for (ImageAndUse& iu : _unloadables) {
1109 if ( iu.li->loadedAddress() == entry.loadAddress ) {
1110 iu.inUse = true;
1111 break;
1112 }
1113 }
1114 }
1115 }
1116 }
1117
1118 uint32_t Reaper::inUseCount()
1119 {
1120 uint32_t count = 0;
1121 for (ImageAndUse& iu : _unloadables) {
1122 if ( iu.inUse )
1123 ++count;
1124 }
1125 return count;
1126 }
1127
1128 void Reaper::markDependentsOf(const LoadedImage* li)
1129 {
1130 li->image()->forEachDependentImage(^(uint32_t depIndex, closure::Image::LinkKind kind, closure::ImageNum depImageNum, bool& stop) {
1131 for (ImageAndUse& iu : _unloadables) {
1132 if ( !iu.inUse && iu.li->image()->representsImageNum(depImageNum) ) {
1133 iu.inUse = true;
1134 break;
1135 }
1136 }
1137 });
1138 }
1139
1140 void Reaper::markDependentOfInUseImages()
1141 {
1142 for (ImageAndUse& iu : _unloadables) {
1143 if ( iu.inUse )
1144 markDependentsOf(iu.li);
1145 }
1146 }
1147
1148 void Reaper::dump(const char* msg)
1149 {
1150 //log("%s:\n", msg);
1151 //for (ImageAndUse& iu : _unloadables) {
1152 // log(" in-used=%d %s\n", iu.inUse, iu.li->image()->path());
1153 //}
1154 }
1155
1156 void Reaper::garbageCollect()
1157 {
1158 //dump("all unloadable images");
1159
1160 // mark all dylibs directly dlopen'ed as in use
1161 markDirectlyDlopenedImagesAsUsed();
1162
1163 //dump("directly dlopen()'ed marked");
1164
1165 // iteratively mark dependents of in-use dylibs as in-use until in-use count stops changing
1166 uint32_t lastCount = inUseCount();
1167 bool countChanged = false;
1168 do {
1169 markDependentOfInUseImages();
1170 //dump("dependents marked");
1171 uint32_t newCount = inUseCount();
1172 countChanged = (newCount != lastCount);
1173 lastCount = newCount;
1174 } while (countChanged);
1175
1176 _deadCount = (uint32_t)_unloadables.count() - inUseCount();
1177 }
1178
1179 void Reaper::finalizeDeadImages()
1180 {
1181 if ( _deadCount == 0 )
1182 return;
1183 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(__cxa_range_t, ranges, _deadCount);
1184 for (ImageAndUse& iu : _unloadables) {
1185 if ( iu.inUse )
1186 continue;
1187 runTerminators(*iu.li);
1188 iu.li->image()->forEachDiskSegment(^(uint32_t segIndex, uint32_t fileOffset, uint32_t fileSize, int64_t vmOffset, uint64_t vmSize, uint8_t permissions, bool laterReadOnly, bool &stop) {
1189 if ( permissions & VM_PROT_EXECUTE ) {
1190 __cxa_range_t range;
1191 range.addr = (char*)(iu.li->loadedAddress()) + vmOffset;
1192 range.length = (size_t)vmSize;
1193 ranges.push_back(range);
1194 }
1195 });
1196 }
1197 __cxa_finalize_ranges(ranges.begin(), (uint32_t)ranges.count());
1198 }
1199
1200 void Reaper::runTerminators(const LoadedImage& li)
1201 {
1202 if ( li.image()->hasTerminators() ) {
1203 typedef void (*Terminator)();
1204 li.image()->forEachTerminator(li.loadedAddress(), ^(const void* terminator) {
1205 Terminator termFunc = (Terminator)terminator;
1206 #if __has_feature(ptrauth_calls)
1207 termFunc = (Terminator)__builtin_ptrauth_sign_unauthenticated((void*)termFunc, 0, 0);
1208 #endif
1209 termFunc();
1210 log_initializers("dyld: called static terminator %p in %s\n", termFunc, li.image()->path());
1211 });
1212 }
1213 }
1214
1215 void AllImages::runAllStaticTerminators()
1216 {
1217 // We want to run terminators in reverse chronological order of initializing
1218 // Note: initialLoadCount may be larger than what was actually loaded
1219 const uint32_t currentCount = (uint32_t)_loadedImages.count();
1220 const uint32_t initialLoadCount = std::min(_mainClosure->initialLoadCount(), currentCount);
1221
1222 // first run static terminators of anything dlopen()ed
1223 for (uint32_t i=currentCount-1; i >= initialLoadCount; --i) {
1224 Reaper::runTerminators(_loadedImages[i]);
1225 }
1226
1227 // next run terminators of statically load images, in loader-order they were init in reverse of this
1228 for (uint32_t i=0; i < initialLoadCount; ++i) {
1229 Reaper::runTerminators(_loadedImages[i]);
1230 }
1231 }
1232
1233
1234 // This function is called at the end of dlclose() when the reference count goes to zero.
1235 // The dylib being unloaded may have brought in other dependent dylibs when it was loaded.
1236 // Those dependent dylibs need to be unloaded, but only if they are not referenced by
1237 // something else. We use a standard mark and sweep garbage collection.
1238 //
1239 // The tricky part is that when a dylib is unloaded it may have a termination function that
1240 // can run and itself call dlclose() on yet another dylib. The problem is that this
1241 // sort of gabage collection is not re-entrant. Instead a terminator's call to dlclose()
1242 // which calls garbageCollectImages() will just set a flag to re-do the garbage collection
1243 // when the current pass is done.
1244 //
1245 // Also note that this is done within the _loadedImages writer lock, so any dlopen/dlclose
1246 // on other threads are blocked while this garbage collections runs
1247 //
1248 void AllImages::garbageCollectImages()
1249 {
1250 // if some other thread is currently GC'ing images, let other thread do the work
1251 int32_t newCount = OSAtomicIncrement32(&_gcCount);
1252 if ( newCount != 1 )
1253 return;
1254
1255 do {
1256 STACK_ALLOC_ARRAY(Reaper::ImageAndUse, unloadables, _loadedImages.count());
1257 withReadLock(^{
1258 for (const LoadedImage& li : _loadedImages) {
1259 if ( !li.image()->neverUnload() /*&& !li.neverUnload()*/ ) {
1260 unloadables.push_back({&li, false});
1261 //fprintf(stderr, "unloadable[%lu] %p %s\n", unloadables.count(), li.loadedAddress(), li.image()->path());
1262 }
1263 }
1264 });
1265 // make reaper object to do garbage collection and notifications
1266 Reaper reaper(unloadables, this);
1267 reaper.garbageCollect();
1268
1269 // FIXME: we should sort dead images so higher level ones are terminated first
1270
1271 // call cxa_finalize_ranges and static terminators of dead images
1272 reaper.finalizeDeadImages();
1273
1274 // FIXME: DOF unregister
1275
1276 //fprintf(stderr, "_loadedImages before GC removals:\n");
1277 //for (const LoadedImage& li : _loadedImages) {
1278 // fprintf(stderr, " loadAddr=%p, path=%s\n", li.loadedAddress(), li.image()->path());
1279 //}
1280
1281 // make copy of LoadedImages we want to remove
1282 // because unloadables[] points into LoadedImage we are shrinking
1283 STACK_ALLOC_ARRAY(LoadedImage, unloadImages, _loadedImages.count());
1284 for (const Reaper::ImageAndUse& iu : unloadables) {
1285 if ( !iu.inUse )
1286 unloadImages.push_back(*iu.li);
1287 }
1288 // remove entries from _loadedImages
1289 if ( !unloadImages.empty() ) {
1290 removeImages(unloadImages);
1291
1292 //fprintf(stderr, "_loadedImages after GC removals:\n");
1293 //for (const LoadedImage& li : _loadedImages) {
1294 // fprintf(stderr, " loadAddr=%p, path=%s\n", li.loadedAddress(), li.image()->path());
1295 //}
1296 }
1297
1298 // if some other thread called GC during our work, redo GC on its behalf
1299 newCount = OSAtomicDecrement32(&_gcCount);
1300 }
1301 while (newCount > 0);
1302 }
1303
1304
1305
1306 void AllImages::addLoadNotifier(NotifyFunc func)
1307 {
1308 // callback about already loaded images
1309 withReadLock(^{
1310 for (const LoadedImage& li : _loadedImages) {
1311 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
1312 log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
1313 if ( li.image()->inDyldCache() )
1314 func(li.loadedAddress(), (uintptr_t)_dyldCacheSlide);
1315 else
1316 func(li.loadedAddress(), li.loadedAddress()->getSlide());
1317 }
1318 });
1319
1320 // add to list of functions to call about future loads
1321 withNotifiersLock(^{
1322 _loadNotifiers.push_back(func);
1323 });
1324 }
1325
1326 void AllImages::addUnloadNotifier(NotifyFunc func)
1327 {
1328 // add to list of functions to call about future unloads
1329 withNotifiersLock(^{
1330 _unloadNotifiers.push_back(func);
1331 });
1332 }
1333
1334 void AllImages::addLoadNotifier(LoadNotifyFunc func)
1335 {
1336 // callback about already loaded images
1337 withReadLock(^{
1338 for (const LoadedImage& li : _loadedImages) {
1339 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
1340 log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
1341 func(li.loadedAddress(), li.image()->path(), !li.image()->neverUnload());
1342 }
1343 });
1344
1345 // add to list of functions to call about future loads
1346 withNotifiersLock(^{
1347 _loadNotifiers2.push_back(func);
1348 });
1349 }
1350
1351
1352 void AllImages::addBulkLoadNotifier(BulkLoadNotifier func)
1353 {
1354 // callback about already loaded images
1355 unsigned count = (unsigned)_loadedImages.count();
1356 const mach_header* mhs[count];
1357 const char* paths[count];
1358 for (unsigned i=0; i < count; ++i) {
1359 mhs[i] = _loadedImages[i].loadedAddress();
1360 paths[i] = _loadedImages[i].image()->path();
1361 }
1362 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)mhs[0], (uint64_t)func, 0);
1363 log_notifications("dyld: add notifier %p called with %d images\n", func, count);
1364 func(count, mhs, paths);
1365
1366 // add to list of functions to call about future loads
1367 withNotifiersLock(^{
1368 _loadBulkNotifiers.push_back(func);
1369 });
1370 }
1371
1372 // Returns true if logs should be sent to stderr as well as syslog.
1373 // Copied from objc which copied it from CFUtilities.c
1374 static bool also_do_stderr(void)
1375 {
1376 struct stat st;
1377 int ret = fstat(STDERR_FILENO, &st);
1378 if (ret < 0) return false;
1379 mode_t m = st.st_mode & S_IFMT;
1380 if (m == S_IFREG || m == S_IFSOCK || m == S_IFIFO || m == S_IFCHR) {
1381 return true;
1382 }
1383 return false;
1384 }
1385
1386 // Print "message" to the console. Copied from objc.
1387 static void _objc_syslog(const char *message)
1388 {
1389 _simple_asl_log(ASL_LEVEL_ERR, NULL, message);
1390
1391 if (also_do_stderr()) {
1392 write(STDERR_FILENO, message, strlen(message));
1393 }
1394 }
1395
1396 void AllImages::setObjCNotifiers(_dyld_objc_notify_mapped map, _dyld_objc_notify_init init, _dyld_objc_notify_unmapped unmap)
1397 {
1398 _objcNotifyMapped = map;
1399 _objcNotifyInit = init;
1400 _objcNotifyUnmapped = unmap;
1401
1402 // We couldn't initialize the objc optimized closure data in init() as that needs malloc but runs before malloc initializes.
1403 // So lets grab the data now and set it up
1404
1405 // Pull out the objc selector hash table if we have one
1406 Array<closure::Image::ObjCSelectorImage> selectorImageNums;
1407 const closure::ObjCSelectorOpt* selectorHashTable = nullptr;
1408 if (_mainClosure->selectorHashTable(selectorImageNums, selectorHashTable)) {
1409 _objcSelectorHashTable = selectorHashTable;
1410 for (closure::Image::ObjCSelectorImage selectorImage : selectorImageNums) {
1411 LoadedImage loadedImage;
1412 bool found = findImageNum(selectorImage.imageNum, loadedImage);
1413 assert(found);
1414 _objcSelectorHashTableImages.push_back( (uintptr_t)loadedImage.loadedAddress() + selectorImage.offset );
1415 }
1416 }
1417
1418 // Pull out the objc class hash table if we have one
1419 Array<closure::Image::ObjCClassImage> classImageNums;
1420 const closure::ObjCClassOpt* classHashTable = nullptr;
1421 const closure::ObjCClassOpt* protocolHashTable = nullptr;
1422 if (_mainClosure->classAndProtocolHashTables(classImageNums, classHashTable, protocolHashTable)) {
1423 _objcClassHashTable = (const closure::ObjCClassOpt*)classHashTable;
1424 _objcProtocolHashTable = (const closure::ObjCClassOpt*)protocolHashTable;
1425 for (closure::Image::ObjCClassImage classImage : classImageNums) {
1426 LoadedImage loadedImage;
1427 bool found = findImageNum(classImage.imageNum, loadedImage);
1428 assert(found);
1429 uintptr_t loadAddress = (uintptr_t)loadedImage.loadedAddress();
1430 uintptr_t nameBaseAddress = loadAddress + classImage.offsetOfClassNames;
1431 uintptr_t dataBaseAddress = loadAddress + classImage.offsetOfClasses;
1432 _objcClassHashTableImages.push_back({ nameBaseAddress, dataBaseAddress });
1433 }
1434 }
1435
1436 _mainClosure->duplicateClassesHashTable(_objcClassDuplicatesHashTable);
1437 if ( _objcClassDuplicatesHashTable != nullptr ) {
1438 // If we have duplicates, the those need the objc opt pointer to find dupes
1439 _dyldCacheObjCOpt = _dyldCacheAddress->objcOpt();
1440 }
1441
1442 // ObjC would have issued warnings on duplicate classes. We've recorded those too
1443 _mainClosure->forEachWarning(closure::Closure::Warning::duplicateObjCClass, ^(const char *warning, bool &stop) {
1444 Diagnostics diag;
1445 diag.error("objc[%d]: %s\n", getpid(), warning);
1446 _objc_syslog(diag.errorMessage());
1447 });
1448
1449 // callback about already loaded images
1450 uint32_t maxCount = count();
1451 STACK_ALLOC_ARRAY(const mach_header*, mhs, maxCount);
1452 STACK_ALLOC_ARRAY(const char*, paths, maxCount);
1453 // don't need _mutex here because this is called when process is still single threaded
1454 for (const LoadedImage& li : _loadedImages) {
1455 if ( li.image()->hasObjC() ) {
1456 paths.push_back(imagePath(li.image()));
1457 mhs.push_back(li.loadedAddress());
1458 }
1459 }
1460 if ( !mhs.empty() ) {
1461 (*map)((uint32_t)mhs.count(), &paths[0], &mhs[0]);
1462 if ( log_notifications("dyld: objc-mapped-notifier called with %ld images:\n", mhs.count()) ) {
1463 for (uintptr_t i=0; i < mhs.count(); ++i) {
1464 log_notifications("dyld: objc-mapped: %p %s\n", mhs[i], paths[i]);
1465 }
1466 }
1467 }
1468 }
1469
1470 void AllImages::applyInterposingToDyldCache(const closure::Closure* closure)
1471 {
1472 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_APPLY_INTERPOSING, 0, 0, 0);
1473 const uintptr_t cacheStart = (uintptr_t)_dyldCacheAddress;
1474 __block closure::ImageNum lastCachedDylibImageNum = 0;
1475 __block const closure::Image* lastCachedDylibImage = nullptr;
1476 __block bool suspendedAccounting = false;
1477 closure->forEachPatchEntry(^(const closure::Closure::PatchEntry& entry) {
1478 if ( entry.overriddenDylibInCache != lastCachedDylibImageNum ) {
1479 lastCachedDylibImage = closure::ImageArray::findImage(imagesArrays(), entry.overriddenDylibInCache);
1480 assert(lastCachedDylibImage != nullptr);
1481 lastCachedDylibImageNum = entry.overriddenDylibInCache;
1482 }
1483 if ( !suspendedAccounting ) {
1484 Loader::vmAccountingSetSuspended(true, log_fixups);
1485 suspendedAccounting = true;
1486 }
1487 uintptr_t newValue = 0;
1488 LoadedImage foundImage;
1489 switch ( entry.replacement.image.kind ) {
1490 case closure::Image::ResolvedSymbolTarget::kindImage:
1491 assert(findImageNum(entry.replacement.image.imageNum, foundImage));
1492 newValue = (uintptr_t)(foundImage.loadedAddress()) + (uintptr_t)entry.replacement.image.offset;
1493 break;
1494 case closure::Image::ResolvedSymbolTarget::kindSharedCache:
1495 newValue = (uintptr_t)_dyldCacheAddress + (uintptr_t)entry.replacement.sharedCache.offset;
1496 break;
1497 case closure::Image::ResolvedSymbolTarget::kindAbsolute:
1498 // this means the symbol was missing in the cache override dylib, so set any uses to NULL
1499 newValue = (uintptr_t)entry.replacement.absolute.value;
1500 break;
1501 default:
1502 assert(0 && "bad replacement kind");
1503 }
1504 uint32_t lastCachedDylibImageIndex = lastCachedDylibImageNum - (uint32_t)_dyldCacheAddress->cachedDylibsImageArray()->startImageNum();
1505 _dyldCacheAddress->forEachPatchableUseOfExport(lastCachedDylibImageIndex,
1506 entry.exportCacheOffset, ^(dyld_cache_patchable_location patchLocation) {
1507 uintptr_t* loc = (uintptr_t*)(cacheStart+patchLocation.cacheOffset);
1508 #if __has_feature(ptrauth_calls)
1509 if ( patchLocation.authenticated ) {
1510 MachOLoaded::ChainedFixupPointerOnDisk fixupInfo;
1511 fixupInfo.arm64e.authRebase.auth = true;
1512 fixupInfo.arm64e.authRebase.addrDiv = patchLocation.usesAddressDiversity;
1513 fixupInfo.arm64e.authRebase.diversity = patchLocation.discriminator;
1514 fixupInfo.arm64e.authRebase.key = patchLocation.key;
1515 *loc = fixupInfo.arm64e.signPointer(loc, newValue + DyldSharedCache::getAddend(patchLocation));
1516 log_fixups("dyld: cache fixup: *%p = %p (JOP: diversity 0x%04X, addr-div=%d, key=%s)\n",
1517 loc, (void*)*loc, patchLocation.discriminator, patchLocation.usesAddressDiversity, DyldSharedCache::keyName(patchLocation));
1518 return;
1519 }
1520 #endif
1521 log_fixups("dyld: cache fixup: *%p = 0x%0lX (dyld cache patch)\n", loc, newValue + (uintptr_t)DyldSharedCache::getAddend(patchLocation));
1522 *loc = newValue + (uintptr_t)DyldSharedCache::getAddend(patchLocation);
1523 });
1524 });
1525 if ( suspendedAccounting )
1526 Loader::vmAccountingSetSuspended(false, log_fixups);
1527 }
1528
1529 void AllImages::runStartupInitialzers()
1530 {
1531 __block bool mainExecutableInitializerNeedsToRun = true;
1532 __block uint32_t imageIndex = 0;
1533 while ( mainExecutableInitializerNeedsToRun ) {
1534 __block const closure::Image* image = nullptr;
1535 withReadLock(^{
1536 image = _loadedImages[imageIndex].image();
1537 if ( _loadedImages[imageIndex].loadedAddress()->isMainExecutable() )
1538 mainExecutableInitializerNeedsToRun = false;
1539 });
1540 runInitialzersBottomUp(image);
1541 ++imageIndex;
1542 }
1543 }
1544
1545
1546 // Find image in _loadedImages which has ImageNum == num.
1547 // Try indexHint first, if hint is wrong, updated it, so next use is faster.
1548 LoadedImage AllImages::findImageNum(closure::ImageNum num, uint32_t& indexHint)
1549 {
1550 __block LoadedImage copy;
1551 withReadLock(^{
1552 if ( (indexHint >= _loadedImages.count()) || !_loadedImages[indexHint].image()->representsImageNum(num) ) {
1553 indexHint = 0;
1554 for (indexHint=0; indexHint < _loadedImages.count(); ++indexHint) {
1555 if ( _loadedImages[indexHint].image()->representsImageNum(num) )
1556 break;
1557 }
1558 assert(indexHint < _loadedImages.count());
1559 }
1560 copy = _loadedImages[indexHint];
1561 });
1562 return copy;
1563 }
1564
1565
1566 // Change the state of the LoadedImage in _loadedImages which has ImageNum == num.
1567 // Only change state if current state is expectedCurrentState (atomic swap).
1568 bool AllImages::swapImageState(closure::ImageNum num, uint32_t& indexHint, LoadedImage::State expectedCurrentState, LoadedImage::State newState)
1569 {
1570 __block bool result = false;
1571 withWriteLock(^{
1572 if ( (indexHint >= _loadedImages.count()) || !_loadedImages[indexHint].image()->representsImageNum(num) ) {
1573 indexHint = 0;
1574 for (indexHint=0; indexHint < _loadedImages.count(); ++indexHint) {
1575 if ( _loadedImages[indexHint].image()->representsImageNum(num) )
1576 break;
1577 }
1578 assert(indexHint < _loadedImages.count());
1579 }
1580 if ( _loadedImages[indexHint].state() == expectedCurrentState ) {
1581 _loadedImages[indexHint].setState(newState);
1582 result = true;
1583 }
1584 });
1585 return result;
1586 }
1587
1588 // dyld3 pre-builds the order initializers need to be run (bottom up) in a list in the closure.
1589 // This method uses that list to run all initializers.
1590 // Because an initializer may call dlopen() and/or create threads, the _loadedImages array
1591 // may move under us. So, never keep a pointer into it. Always reference images by ImageNum
1592 // and use hint to make that faster in the case where the _loadedImages does not move.
1593 void AllImages::runInitialzersBottomUp(const closure::Image* topImage)
1594 {
1595 // walk closure specified initializer list, already ordered bottom up
1596 topImage->forEachImageToInitBefore(^(closure::ImageNum imageToInit, bool& stop) {
1597 // get copy of LoadedImage about imageToInit, but don't keep reference into _loadedImages, because it may move if initialzers call dlopen()
1598 uint32_t indexHint = 0;
1599 LoadedImage loadedImageCopy = findImageNum(imageToInit, indexHint);
1600 // skip if the image is already inited, or in process of being inited (dependency cycle)
1601 if ( (loadedImageCopy.state() == LoadedImage::State::fixedUp) && swapImageState(imageToInit, indexHint, LoadedImage::State::fixedUp, LoadedImage::State::beingInited) ) {
1602 // tell objc to run any +load methods in image
1603 if ( (_objcNotifyInit != nullptr) && loadedImageCopy.image()->mayHavePlusLoads() ) {
1604 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_OBJC_INIT, (uint64_t)loadedImageCopy.loadedAddress(), 0, 0);
1605 const char* path = imagePath(loadedImageCopy.image());
1606 log_notifications("dyld: objc-init-notifier called with mh=%p, path=%s\n", loadedImageCopy.loadedAddress(), path);
1607 (*_objcNotifyInit)(path, loadedImageCopy.loadedAddress());
1608 }
1609
1610 // run all initializers in image
1611 runAllInitializersInImage(loadedImageCopy.image(), loadedImageCopy.loadedAddress());
1612
1613 // advance state to inited
1614 swapImageState(imageToInit, indexHint, LoadedImage::State::beingInited, LoadedImage::State::inited);
1615 }
1616 });
1617 }
1618
1619 void AllImages::runLibSystemInitializer(LoadedImage& libSystem)
1620 {
1621 // First set the libSystem state to beingInited. This protects against accidentally trying
1622 // to run its initializers again if a dlopen happens insie libSystem_initializer().
1623 libSystem.setState(LoadedImage::State::beingInited);
1624
1625 // run all initializers in libSystem.dylib
1626 // Note: during libSystem's initialization, libdyld_initializer() is called which copies _initialImages to _loadedImages
1627 runAllInitializersInImage(libSystem.image(), libSystem.loadedAddress());
1628
1629 // update global flags that libsystem has been initialized (so debug tools know it is safe to inject threads)
1630 _oldAllImageInfos->libSystemInitialized = true;
1631
1632 // mark libSystem.dylib as being inited, so later recursive-init would re-run it
1633 for (LoadedImage& li : _loadedImages) {
1634 if ( li.loadedAddress() == libSystem.loadedAddress() ) {
1635 li.setState(LoadedImage::State::inited);
1636 break;
1637 }
1638 }
1639 // now that libSystem is up, register a callback that should be called at exit
1640 __cxa_atexit(&AllImages::runAllStaticTerminatorsHelper, nullptr, nullptr);
1641 }
1642
1643 void AllImages::runAllStaticTerminatorsHelper(void*)
1644 {
1645 gAllImages.runAllStaticTerminators();
1646 }
1647
1648 void AllImages::runAllInitializersInImage(const closure::Image* image, const MachOLoaded* ml)
1649 {
1650 image->forEachInitializer(ml, ^(const void* func) {
1651 Initializer initFunc = (Initializer)func;
1652 #if __has_feature(ptrauth_calls)
1653 initFunc = (Initializer)__builtin_ptrauth_sign_unauthenticated((void*)initFunc, 0, 0);
1654 #endif
1655 {
1656 ScopedTimer(DBG_DYLD_TIMING_STATIC_INITIALIZER, (uint64_t)ml, (uint64_t)func, 0);
1657 initFunc(NXArgc, NXArgv, environ, appleParams, _programVars);
1658
1659 }
1660 log_initializers("dyld: called initialzer %p in %s\n", initFunc, image->path());
1661 });
1662 }
1663
1664 const MachOLoaded* AllImages::dlopen(Diagnostics& diag, const char* path, bool rtldNoLoad, bool rtldLocal, bool rtldNoDelete, bool rtldNow, bool fromOFI, const void* callerAddress)
1665 {
1666 bool sharedCacheFormatCompatible = (_dyldCacheAddress != nullptr) && (_dyldCacheAddress->header.formatVersion == dyld3::closure::kFormatVersion);
1667
1668 // quick check if path is in shared cache and already loaded
1669 if ( _dyldCacheAddress != nullptr ) {
1670 uint32_t dyldCacheImageIndex;
1671 if ( _dyldCacheAddress->hasImagePath(path, dyldCacheImageIndex) ) {
1672 uint64_t mTime;
1673 uint64_t inode;
1674 const MachOLoaded* mh = (MachOLoaded*)_dyldCacheAddress->getIndexedImageEntry(dyldCacheImageIndex, mTime, inode);
1675 // Note: we do not need readLock because this is within global dlopen lock
1676 for (const LoadedImage& li : _loadedImages) {
1677 if ( li.loadedAddress() == mh ) {
1678 return mh;
1679 }
1680 }
1681
1682 // If this is a customer cache, and we have no overrides, then we know for sure the cache closure is valid
1683 // This assumes that a libdispatch root would have been loaded on launch, and that root path is not
1684 // supported with customer caches, which is the case today.
1685 if ( !rtldNoLoad && !hasInsertedOrInterposingLibraries() &&
1686 (_dyldCacheAddress->header.cacheType == kDyldSharedCacheTypeProduction) &&
1687 sharedCacheFormatCompatible ) {
1688 const dyld3::closure::ImageArray* images = _dyldCacheAddress->cachedDylibsImageArray();
1689 const dyld3::closure::Image* image = images->imageForNum(dyldCacheImageIndex+1);
1690 return loadImage(diag, image->imageNum(), nullptr, rtldLocal, rtldNoDelete, rtldNow, fromOFI);
1691 }
1692 }
1693 }
1694
1695 __block closure::ImageNum callerImageNum = 0;
1696 for (const LoadedImage& li : _loadedImages) {
1697 uint8_t permissions;
1698 if ( (callerImageNum == 0) && li.image()->containsAddress(callerAddress, li.loadedAddress(), &permissions) ) {
1699 callerImageNum = li.image()->imageNum();
1700 }
1701 //fprintf(stderr, "mh=%p, image=%p, imageNum=0x%04X, path=%s\n", li.loadedAddress(), li.image(), li.image()->imageNum(), li.image()->path());
1702 }
1703
1704 // make closure
1705 closure::ImageNum topImageNum = 0;
1706 const closure::DlopenClosure* newClosure = nullptr;
1707
1708 // First try with closures from the shared cache permitted.
1709 // Then try again with forcing a new closure
1710 for (bool canUseSharedCacheClosure : { true, false }) {
1711 // We can only use a shared cache closure if the shared cache format is the same as libdyld.
1712 canUseSharedCacheClosure &= sharedCacheFormatCompatible;
1713 closure::FileSystemPhysical fileSystem(nullptr, nullptr, _allowEnvPaths);
1714 closure::ClosureBuilder::AtPath atPathHanding = (_allowAtPaths ? closure::ClosureBuilder::AtPath::all : closure::ClosureBuilder::AtPath::onlyInRPaths);
1715 closure::ClosureBuilder cb(_nextImageNum, fileSystem, _dyldCacheAddress, true, *_archs, closure::gPathOverrides, atPathHanding);
1716 newClosure = cb.makeDlopenClosure(path, _mainClosure, _loadedImages.array(), callerImageNum, rtldNoLoad, rtldNow, canUseSharedCacheClosure, &topImageNum);
1717 if ( newClosure == closure::ClosureBuilder::sRetryDlopenClosure ) {
1718 log_apis(" dlopen: closure builder needs to retry: %s\n", path);
1719 assert(canUseSharedCacheClosure);
1720 continue;
1721 }
1722 if ( (newClosure == nullptr) && (topImageNum == 0) ) {
1723 if ( cb.diagnostics().hasError())
1724 diag.error("%s", cb.diagnostics().errorMessage());
1725 else if ( !rtldNoLoad )
1726 diag.error("dlopen(): file not found: %s", path);
1727 return nullptr;
1728 }
1729 // save off next available ImageNum for use by next call to dlopen()
1730 _nextImageNum = cb.nextFreeImageNum();
1731 break;
1732 }
1733
1734 if ( newClosure != nullptr ) {
1735 // if new closure contains an ImageArray, add it to list
1736 if ( const closure::ImageArray* newArray = newClosure->images() ) {
1737 appendToImagesArray(newArray);
1738 }
1739 log_apis(" dlopen: made closure: %p\n", newClosure);
1740 }
1741
1742 // if already loaded, just bump refCount and return
1743 if ( (newClosure == nullptr) && (topImageNum != 0) ) {
1744 for (LoadedImage& li : _loadedImages) {
1745 if ( li.image()->imageNum() == topImageNum ) {
1746 // is already loaded
1747 const MachOLoaded* topLoadAddress = li.loadedAddress();
1748 if ( !li.image()->inDyldCache() )
1749 incRefCount(topLoadAddress);
1750 log_apis(" dlopen: already loaded as '%s'\n", li.image()->path());
1751 // if previously opened with RTLD_LOCAL, but now opened with RTLD_GLOBAL, unhide it
1752 if ( !rtldLocal && li.hideFromFlatSearch() )
1753 li.setHideFromFlatSearch(false);
1754 // if called with RTLD_NODELETE, mark it as never-unload
1755 if ( rtldNoDelete )
1756 li.markLeaveMapped();
1757
1758 // If we haven't run the initializers then we must be in a static init in a dlopen
1759 if ( li.state() != LoadedImage::State::inited ) {
1760 // RTLD_NOLOAD means dlopen should fail unless path is already loaded.
1761 // don't run initializers when RTLD_NOLOAD is set. This only matters if dlopen() is
1762 // called from within an initializer because it can cause initializers to run
1763 // out of order. Most uses of RTLD_NOLOAD are "probes". If they want initialzers
1764 // to run, then don't use RTLD_NOLOAD.
1765 if (!rtldNoLoad) {
1766 runInitialzersBottomUp(li.image());
1767 }
1768 }
1769
1770 return topLoadAddress;
1771 }
1772 }
1773 }
1774
1775 return loadImage(diag, topImageNum, newClosure, rtldLocal, rtldNoDelete, rtldNow, fromOFI);
1776 }
1777
1778 // Note this is noinline to avoid having too much stack used in the parent
1779 // dlopen method
1780 __attribute__((noinline))
1781 const MachOLoaded* AllImages::loadImage(Diagnostics& diag, closure::ImageNum topImageNum, const closure::DlopenClosure* newClosure,
1782 bool rtldLocal, bool rtldNoDelete, bool rtldNow, bool fromOFI) {
1783 // Note this array is used as the storage to Loader so needs to be at least
1784 // large enough to handle whatever total number of images we need to do the dlopen
1785 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(LoadedImage, newImages, 1024);
1786
1787 // Note we don't need pre-optimized Objective-C for dlopen closures, but use
1788 // variables here to make it easier to see whats going on.
1789 const dyld3::closure::ObjCSelectorOpt* selectorOpt = nullptr;
1790 dyld3::Array<dyld3::closure::Image::ObjCSelectorImage> selectorImages;
1791
1792 // run loader to load all new images
1793 Loader loader(_loadedImages.array(), newImages, _dyldCacheAddress, imagesArrays(),
1794 selectorOpt, selectorImages,
1795 &dyld3::log_loads, &dyld3::log_segments, &dyld3::log_fixups, &dyld3::log_dofs);
1796
1797 // find Image* for top image, look in new closure first
1798 const closure::Image* topImage = nullptr;
1799 if ( newClosure != nullptr )
1800 topImage = newClosure->images()->imageForNum(topImageNum);
1801 if ( topImage == nullptr )
1802 topImage = closure::ImageArray::findImage(imagesArrays(), topImageNum);
1803 if ( newClosure == nullptr ) {
1804 if ( topImageNum < dyld3::closure::kLastDyldCacheImageNum )
1805 log_apis(" dlopen: using image in dyld shared cache %p\n", topImage);
1806 else
1807 log_apis(" dlopen: using pre-built dlopen closure %p\n", topImage);
1808 }
1809 LoadedImage topLoadedImage = LoadedImage::make(topImage);
1810 if ( rtldLocal && !topImage->inDyldCache() )
1811 topLoadedImage.setHideFromFlatSearch(true);
1812 if ( rtldNoDelete && !topImage->inDyldCache() )
1813 topLoadedImage.markLeaveMapped();
1814 loader.addImage(topLoadedImage);
1815
1816
1817 // recursively load all dependents and fill in allImages array
1818 bool someCacheImageOverridden = false;
1819 loader.completeAllDependents(diag, someCacheImageOverridden);
1820 if ( diag.hasError() )
1821 return nullptr;
1822 loader.mapAndFixupAllImages(diag, _processDOFs, fromOFI);
1823 if ( diag.hasError() )
1824 return nullptr;
1825
1826 // Record if we had a root
1827 _someImageOverridden |= someCacheImageOverridden;
1828
1829 const MachOLoaded* topLoadAddress = newImages.begin()->loadedAddress();
1830
1831 // bump dlopen refcount of image directly loaded
1832 if ( !topImage->inDyldCache() )
1833 incRefCount(topLoadAddress);
1834
1835 // tell gAllImages about new images
1836 addImages(newImages);
1837
1838 // Run notifiers before applyInterposingToDyldCache() as then we have an
1839 // accurate image list before any calls to findImage().
1840 // TODO: Can we move this even earlier, eg, after map images but before fixups?
1841 runImageNotifiers(newImages);
1842
1843 // if closure adds images that override dyld cache, patch cache
1844 if ( newClosure != nullptr )
1845 applyInterposingToDyldCache(newClosure);
1846
1847 runImageCallbacks(newImages);
1848
1849 // run initializers
1850 runInitialzersBottomUp(topImage);
1851
1852 return topLoadAddress;
1853 }
1854
1855 void AllImages::appendToImagesArray(const closure::ImageArray* newArray)
1856 {
1857 _imagesArrays.push_back(newArray);
1858 }
1859
1860 const Array<const closure::ImageArray*>& AllImages::imagesArrays()
1861 {
1862 return _imagesArrays.array();
1863 }
1864
1865 bool AllImages::isRestricted() const
1866 {
1867 return !_allowEnvPaths;
1868 }
1869
1870 bool AllImages::hasInsertedOrInterposingLibraries() const
1871 {
1872 return _mainClosure->hasInsertedLibraries() || _mainClosure->hasInterposings();
1873 }
1874
1875 void AllImages::takeLockBeforeFork() {
1876 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
1877 os_unfair_recursive_lock_lock(&_globalLock);
1878 #endif
1879 }
1880
1881 void AllImages::releaseLockInForkParent() {
1882 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
1883 os_unfair_recursive_lock_unlock(&_globalLock);
1884 #endif
1885 }
1886
1887 void AllImages::resetLockInForkChild() {
1888 #if TARGET_OS_SIMULATOR
1889
1890 // There's no dyld3 on the simulator this year
1891 assert(false);
1892
1893 #else
1894
1895 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
1896 os_unfair_recursive_lock_unlock_forked_child(&_globalLock);
1897 #endif
1898
1899 #endif // TARGET_OS_SIMULATOR
1900 }
1901
1902 const char* AllImages::getObjCSelector(const char *selName) const {
1903 if ( _objcSelectorHashTable == nullptr )
1904 return nullptr;
1905 return _objcSelectorHashTable->getString(selName, _objcSelectorHashTableImages.array());
1906 }
1907
1908 void AllImages::forEachObjCClass(const char* className,
1909 void (^callback)(void* classPtr, bool isLoaded, bool* stop)) const {
1910 if ( _objcClassHashTable == nullptr )
1911 return;
1912 // There may be a duplicate in the shared cache. If that is the case, return it first
1913 if ( _objcClassDuplicatesHashTable != nullptr ) {
1914 void* classImpl = nullptr;
1915 if ( _objcClassDuplicatesHashTable->getClassLocation(className, _dyldCacheObjCOpt, classImpl) ) {
1916 bool stop = false;
1917 callback(classImpl, true, &stop);
1918 if (stop)
1919 return;
1920 }
1921 }
1922 _objcClassHashTable->forEachClass(className, _objcClassHashTableImages.array(), callback);
1923 }
1924
1925 void AllImages::forEachObjCProtocol(const char* protocolName,
1926 void (^callback)(void* protocolPtr, bool isLoaded, bool* stop)) const {
1927 if ( _objcProtocolHashTable == nullptr )
1928 return;
1929 _objcProtocolHashTable->forEachClass(protocolName, _objcClassHashTableImages.array(), callback);
1930 }
1931
1932
1933 } // namespace dyld3
1934
1935
1936
1937
1938
1939