]> git.saurik.com Git - apple/dyld.git/blob - dyld3/AllImages.cpp
dyld-625.13.tar.gz
[apple/dyld.git] / dyld3 / AllImages.cpp
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24
25 #include <stdint.h>
26 #include <fcntl.h>
27 #include <sys/types.h>
28 #include <sys/stat.h>
29 #include <sys/sysctl.h>
30 #include <mach/mach_time.h> // mach_absolute_time()
31 #include <libkern/OSAtomic.h>
32
33 #include <vector>
34 #include <algorithm>
35
36 #include "AllImages.h"
37 #include "libdyldEntryVector.h"
38 #include "Logging.h"
39 #include "Loading.h"
40 #include "Tracing.h"
41 #include "DyldSharedCache.h"
42 #include "PathOverrides.h"
43 #include "Closure.h"
44 #include "ClosureBuilder.h"
45 #include "ClosureFileSystemPhysical.h"
46
47 extern const char** appleParams;
48
49 // should be a header for these
50 struct __cxa_range_t {
51 const void* addr;
52 size_t length;
53 };
54 extern "C" void __cxa_finalize_ranges(const __cxa_range_t ranges[], unsigned int count);
55
56 VIS_HIDDEN bool gUseDyld3 = false;
57
58
59 namespace dyld3 {
60
61
62
63 ///////////////////// AllImages ////////////////////////////
64
65
66 AllImages gAllImages;
67
68
69
70 void AllImages::init(const closure::LaunchClosure* closure, const DyldSharedCache* dyldCacheLoadAddress, const char* dyldCachePath,
71 const Array<LoadedImage>& initialImages)
72 {
73 _mainClosure = closure;
74 _initialImages = &initialImages;
75 _dyldCacheAddress = dyldCacheLoadAddress;
76 _dyldCachePath = dyldCachePath;
77
78 if ( _dyldCacheAddress ) {
79 const dyld_cache_mapping_info* const fileMappings = (dyld_cache_mapping_info*)((uint64_t)_dyldCacheAddress + _dyldCacheAddress->header.mappingOffset);
80 _dyldCacheSlide = (uint64_t)dyldCacheLoadAddress - fileMappings[0].address;
81 _imagesArrays.push_back(dyldCacheLoadAddress->cachedDylibsImageArray());
82 if ( auto others = dyldCacheLoadAddress->otherOSImageArray() )
83 _imagesArrays.push_back(others);
84 }
85 _imagesArrays.push_back(_mainClosure->images());
86
87 // record first ImageNum to do use for dlopen() calls
88 _mainClosure->images()->forEachImage(^(const dyld3::closure::Image* image, bool& stop) {
89 closure::ImageNum num = image->imageNum();
90 if ( num >= _nextImageNum )
91 _nextImageNum = num+1;
92 });
93
94 // Make temporary old image array, so libSystem initializers can be debugged
95 STACK_ALLOC_ARRAY(dyld_image_info, oldDyldInfo, initialImages.count());
96 for (const LoadedImage& li : initialImages) {
97 oldDyldInfo.push_back({li.loadedAddress(), li.image()->path(), 0});
98 }
99 _oldAllImageInfos->infoArray = &oldDyldInfo[0];
100 _oldAllImageInfos->infoArrayCount = (uint32_t)oldDyldInfo.count();
101 _oldAllImageInfos->notification(dyld_image_adding, _oldAllImageInfos->infoArrayCount, _oldAllImageInfos->infoArray);
102 _oldAllImageInfos->infoArray = nullptr;
103 _oldAllImageInfos->infoArrayCount = 0;
104
105 _processDOFs = Loader::dtraceUserProbesEnabled();
106 }
107
108 void AllImages::setProgramVars(ProgramVars* vars)
109 {
110 _programVars = vars;
111 const dyld3::MachOFile* mf = (dyld3::MachOFile*)_programVars->mh;
112 mf->forEachSupportedPlatform(^(dyld3::Platform platform, uint32_t minOS, uint32_t sdk) {
113 _platform = (dyld_platform_t)platform;
114 //FIXME assert there is only one?
115 });
116 }
117
118 void AllImages::setRestrictions(bool allowAtPaths, bool allowEnvPaths)
119 {
120 _allowAtPaths = allowAtPaths;
121 _allowEnvPaths = allowEnvPaths;
122 }
123
124 void AllImages::applyInitialImages()
125 {
126 addImages(*_initialImages);
127 runImageNotifiers(*_initialImages);
128 _initialImages = nullptr; // this was stack allocated
129 }
130
131 void AllImages::withReadLock(void (^work)()) const
132 {
133 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
134 os_unfair_recursive_lock_lock(&_loadImagesLock);
135 work();
136 os_unfair_recursive_lock_unlock(&_loadImagesLock);
137 #else
138 pthread_mutex_lock(&_loadImagesLock);
139 work();
140 pthread_mutex_unlock(&_loadImagesLock);
141 #endif
142 }
143
144 void AllImages::withWriteLock(void (^work)())
145 {
146 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
147 os_unfair_recursive_lock_lock(&_loadImagesLock);
148 work();
149 os_unfair_recursive_lock_unlock(&_loadImagesLock);
150 #else
151 pthread_mutex_lock(&_loadImagesLock);
152 work();
153 pthread_mutex_unlock(&_loadImagesLock);
154 #endif
155 }
156
157 void AllImages::withNotifiersLock(void (^work)()) const
158 {
159 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
160 os_unfair_recursive_lock_lock(&_notifiersLock);
161 work();
162 os_unfair_recursive_lock_unlock(&_notifiersLock);
163 #else
164 pthread_mutex_lock(&_notifiersLock);
165 work();
166 pthread_mutex_unlock(&_notifiersLock);
167 #endif
168 }
169
170 void AllImages::mirrorToOldAllImageInfos()
171 {
172 withReadLock(^(){
173 // set infoArray to NULL to denote it is in-use
174 _oldAllImageInfos->infoArray = nullptr;
175
176 // if array not large enough, re-alloc it
177 uint32_t imageCount = (uint32_t)_loadedImages.count();
178 if ( _oldArrayAllocCount < imageCount ) {
179 uint32_t newAllocCount = imageCount + 16;
180 dyld_image_info* newArray = (dyld_image_info*)::malloc(sizeof(dyld_image_info)*newAllocCount);
181 if ( _oldAllImageArray != nullptr ) {
182 ::memcpy(newArray, _oldAllImageArray, sizeof(dyld_image_info)*_oldAllImageInfos->infoArrayCount);
183 ::free(_oldAllImageArray);
184 }
185 _oldAllImageArray = newArray;
186 _oldArrayAllocCount = newAllocCount;
187 }
188
189 // fill out array to mirror current image list
190 int index = 0;
191 for (const LoadedImage& li : _loadedImages) {
192 _oldAllImageArray[index].imageLoadAddress = li.loadedAddress();
193 _oldAllImageArray[index].imageFilePath = imagePath(li.image());
194 _oldAllImageArray[index].imageFileModDate = 0;
195 ++index;
196 }
197
198 // set infoArray back to base address of array (so other process can now read)
199 _oldAllImageInfos->infoArrayCount = imageCount;
200 _oldAllImageInfos->infoArrayChangeTimestamp = mach_absolute_time();
201 _oldAllImageInfos->infoArray = _oldAllImageArray;
202
203 });
204 }
205
206 void AllImages::addImages(const Array<LoadedImage>& newImages)
207 {
208 // copy into _loadedImages
209 withWriteLock(^(){
210 _loadedImages.append(newImages);
211 // if any image not in the shared cache added, recompute bounds
212 for (const LoadedImage& li : newImages) {
213 if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
214 recomputeBounds();
215 break;
216 }
217 }
218 });
219 }
220
221 void AllImages::runImageNotifiers(const Array<LoadedImage>& newImages)
222 {
223 uint32_t count = (uint32_t)newImages.count();
224 assert(count != 0);
225
226 if ( _oldAllImageInfos != nullptr ) {
227 // sync to old all image infos struct
228 mirrorToOldAllImageInfos();
229
230 // tell debugger about new images
231 dyld_image_info oldDyldInfo[count];
232 for (uint32_t i=0; i < count; ++i) {
233 oldDyldInfo[i].imageLoadAddress = newImages[i].loadedAddress();
234 oldDyldInfo[i].imageFilePath = imagePath(newImages[i].image());
235 oldDyldInfo[i].imageFileModDate = 0;
236 }
237 _oldAllImageInfos->notification(dyld_image_adding, count, oldDyldInfo);
238 }
239
240 // log loads
241 for (const LoadedImage& li : newImages) {
242 log_loads("dyld: %s\n", imagePath(li.image()));
243 }
244
245 #if !TARGET_IPHONE_SIMULATOR
246 // call kdebug trace for each image
247 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD, DBG_DYLD_UUID, DBG_DYLD_UUID_MAP_A))) {
248 for (const LoadedImage& li : newImages) {
249 const closure::Image* image = li.image();
250 struct stat stat_buf;
251 fsid_t fsid = {{ 0, 0 }};
252 fsobj_id_t fsobjid = { 0, 0 };
253 if ( !image->inDyldCache() && (stat(imagePath(image), &stat_buf) == 0) ) {
254 fsobjid = *(fsobj_id_t*)&stat_buf.st_ino;
255 fsid = {{ stat_buf.st_dev, 0 }};
256 }
257 uuid_t uuid;
258 image->getUuid(uuid);
259 kdebug_trace_dyld_image(DBG_DYLD_UUID_MAP_A, &uuid, fsobjid, fsid, li.loadedAddress());
260 }
261 }
262 #endif
263 // call each _dyld_register_func_for_add_image function with each image
264 withNotifiersLock(^{
265 for (NotifyFunc func : _loadNotifiers) {
266 for (const LoadedImage& li : newImages) {
267 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
268 log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
269 if ( li.image()->inDyldCache() )
270 func(li.loadedAddress(), (uintptr_t)_dyldCacheSlide);
271 else
272 func(li.loadedAddress(), li.loadedAddress()->getSlide());
273 }
274 }
275 for (LoadNotifyFunc func : _loadNotifiers2) {
276 for (const LoadedImage& li : newImages) {
277 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
278 log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
279 if ( li.image()->inDyldCache() )
280 func(li.loadedAddress(), li.image()->path(), false);
281 else
282 func(li.loadedAddress(), li.image()->path(), !li.image()->neverUnload());
283 }
284 }
285 });
286
287 // call objc about images that use objc
288 if ( _objcNotifyMapped != nullptr ) {
289 const char* pathsBuffer[count];
290 const mach_header* mhBuffer[count];
291 uint32_t imagesWithObjC = 0;
292 for (const LoadedImage& li : newImages) {
293 const closure::Image* image = li.image();
294 if ( image->hasObjC() ) {
295 pathsBuffer[imagesWithObjC] = imagePath(image);
296 mhBuffer[imagesWithObjC] = li.loadedAddress();
297 ++imagesWithObjC;
298 }
299 }
300 if ( imagesWithObjC != 0 ) {
301 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_OBJC_MAP, 0, 0, 0);
302 (*_objcNotifyMapped)(imagesWithObjC, pathsBuffer, mhBuffer);
303 if ( log_notifications("dyld: objc-mapped-notifier called with %d images:\n", imagesWithObjC) ) {
304 for (uint32_t i=0; i < imagesWithObjC; ++i) {
305 log_notifications("dyld: objc-mapped: %p %s\n", mhBuffer[i], pathsBuffer[i]);
306 }
307 }
308 }
309 }
310
311 // notify any processes tracking loads in this process
312 notifyMonitorLoads(newImages);
313 }
314
315 void AllImages::removeImages(const Array<LoadedImage>& unloadImages)
316 {
317 // call each _dyld_register_func_for_remove_image function with each image
318 withNotifiersLock(^{
319 for (NotifyFunc func : _unloadNotifiers) {
320 for (const LoadedImage& li : unloadImages) {
321 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_REMOVE_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
322 log_notifications("dyld: remove notifier %p called with mh=%p\n", func, li.loadedAddress());
323 if ( li.image()->inDyldCache() )
324 func(li.loadedAddress(), (uintptr_t)_dyldCacheSlide);
325 else
326 func(li.loadedAddress(), li.loadedAddress()->getSlide());
327 }
328 }
329 });
330
331 // call objc about images going away
332 if ( _objcNotifyUnmapped != nullptr ) {
333 for (const LoadedImage& li : unloadImages) {
334 if ( li.image()->hasObjC() ) {
335 (*_objcNotifyUnmapped)(imagePath(li.image()), li.loadedAddress());
336 log_notifications("dyld: objc-unmapped-notifier called with image %p %s\n", li.loadedAddress(), imagePath(li.image()));
337 }
338 }
339 }
340
341 #if !TARGET_IPHONE_SIMULATOR
342 // call kdebug trace for each image
343 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD, DBG_DYLD_UUID, DBG_DYLD_UUID_MAP_A))) {
344 for (const LoadedImage& li : unloadImages) {
345 const closure::Image* image = li.image();
346 struct stat stat_buf;
347 fsid_t fsid = {{ 0, 0 }};
348 fsobj_id_t fsobjid = { 0, 0 };
349 if ( stat(imagePath(image), &stat_buf) == 0 ) {
350 fsobjid = *(fsobj_id_t*)&stat_buf.st_ino;
351 fsid = {{ stat_buf.st_dev, 0 }};
352 }
353 uuid_t uuid;
354 image->getUuid(uuid);
355 kdebug_trace_dyld_image(DBG_DYLD_UUID_UNMAP_A, &uuid, fsobjid, fsid, li.loadedAddress());
356 }
357 }
358 #endif
359
360 // remove each from _loadedImages
361 withWriteLock(^(){
362 for (const LoadedImage& uli : unloadImages) {
363 for (LoadedImage& li : _loadedImages) {
364 if ( uli.loadedAddress() == li.loadedAddress() ) {
365 _loadedImages.erase(li);
366 break;
367 }
368 }
369 }
370 recomputeBounds();
371 });
372
373 // sync to old all image infos struct
374 mirrorToOldAllImageInfos();
375
376 // tell debugger about removed images
377 STACK_ALLOC_ARRAY(dyld_image_info, oldDyldInfo, unloadImages.count());
378 for (const LoadedImage& li : unloadImages) {
379 oldDyldInfo.push_back({li.loadedAddress(), li.image()->path(), 0});
380 }
381 _oldAllImageInfos->notification(dyld_image_removing, (uint32_t)oldDyldInfo.count(), &oldDyldInfo[0]);
382
383 // notify any processes tracking loads in this process
384 notifyMonitorUnloads(unloadImages);
385
386 // finally, unmap images
387 for (const LoadedImage& li : unloadImages) {
388 if ( li.leaveMapped() ) {
389 log_loads("dyld: unloaded but left mmapped %s\n", imagePath(li.image()));
390 }
391 else {
392 // unmapImage() modifies parameter, so use copy
393 LoadedImage copy = li;
394 Loader::unmapImage(copy);
395 log_loads("dyld: unloaded %s\n", imagePath(li.image()));
396 }
397 }
398 }
399
400 // must be called with writeLock held
401 void AllImages::recomputeBounds()
402 {
403 _lowestNonCached = UINTPTR_MAX;
404 _highestNonCached = 0;
405 for (const LoadedImage& li : _loadedImages) {
406 const MachOLoaded* ml = li.loadedAddress();
407 uintptr_t start = (uintptr_t)ml;
408 if ( !((MachOAnalyzer*)ml)->inDyldCache() ) {
409 if ( start < _lowestNonCached )
410 _lowestNonCached = start;
411 uintptr_t end = start + (uintptr_t)(li.image()->vmSizeToMap());
412 if ( end > _highestNonCached )
413 _highestNonCached = end;
414 }
415 }
416 }
417
418 uint32_t AllImages::count() const
419 {
420 return (uint32_t)_loadedImages.count();
421 }
422
423 bool AllImages::dyldCacheHasPath(const char* path) const
424 {
425 uint32_t dyldCacheImageIndex;
426 if ( _dyldCacheAddress != nullptr )
427 return _dyldCacheAddress->hasImagePath(path, dyldCacheImageIndex);
428 return false;
429 }
430
431 const char* AllImages::imagePathByIndex(uint32_t index) const
432 {
433 if ( index < _loadedImages.count() )
434 return imagePath(_loadedImages[index].image());
435 return nullptr;
436 }
437
438 const mach_header* AllImages::imageLoadAddressByIndex(uint32_t index) const
439 {
440 if ( index < _loadedImages.count() )
441 return _loadedImages[index].loadedAddress();
442 return nullptr;
443 }
444
445 bool AllImages::findImage(const mach_header* loadAddress, LoadedImage& foundImage) const
446 {
447 __block bool result = false;
448 withReadLock(^(){
449 for (const LoadedImage& li : _loadedImages) {
450 if ( li.loadedAddress() == loadAddress ) {
451 foundImage = li;
452 result = true;
453 break;
454 }
455 }
456 });
457 return result;
458 }
459
460 void AllImages::forEachImage(void (^handler)(const LoadedImage& loadedImage, bool& stop)) const
461 {
462 withReadLock(^{
463 bool stop = false;
464 for (const LoadedImage& li : _loadedImages) {
465 handler(li, stop);
466 if ( stop )
467 break;
468 }
469 });
470 }
471
472
473 const char* AllImages::pathForImageMappedAt(const void* addr) const
474 {
475 if ( _initialImages != nullptr ) {
476 // being called during libSystem initialization, so _loadedImages not allocated yet
477 for (const LoadedImage& li : *_initialImages) {
478 uint8_t permissions;
479 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
480 return li.image()->path();
481 }
482 }
483 return nullptr;
484 }
485
486 // if address is in cache, do fast search of TEXT segments in cache
487 __block const char* result = nullptr;
488 if ( (_dyldCacheAddress != nullptr) && (addr > _dyldCacheAddress) ) {
489 if ( addr < (void*)((uint8_t*)_dyldCacheAddress+_dyldCacheAddress->mappedSize()) ) {
490 uint64_t cacheSlide = (uint64_t)_dyldCacheAddress - _dyldCacheAddress->unslidLoadAddress();
491 uint64_t unslidTargetAddr = (uint64_t)addr - cacheSlide;
492 _dyldCacheAddress->forEachImageTextSegment(^(uint64_t loadAddressUnslid, uint64_t textSegmentSize, const unsigned char* dylibUUID, const char* installName, bool& stop) {
493 if ( (loadAddressUnslid <= unslidTargetAddr) && (unslidTargetAddr < loadAddressUnslid+textSegmentSize) ) {
494 result = installName;
495 stop = true;
496 }
497 });
498 if ( result != nullptr )
499 return result;
500 }
501 }
502
503 // slow path - search image list
504 infoForImageMappedAt(addr, ^(const LoadedImage& foundImage, uint8_t permissions) {
505 result = foundImage.image()->path();
506 });
507
508 return result;
509 }
510
511 void AllImages::infoForImageMappedAt(const void* addr, void (^handler)(const LoadedImage& foundImage, uint8_t permissions)) const
512 {
513 __block uint8_t permissions;
514 if ( _initialImages != nullptr ) {
515 // being called during libSystem initialization, so _loadedImages not allocated yet
516 for (const LoadedImage& li : *_initialImages) {
517 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
518 handler(li, permissions);
519 break;
520 }
521 }
522 return;
523 }
524
525 withReadLock(^{
526 for (const LoadedImage& li : _loadedImages) {
527 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
528 handler(li, permissions);
529 break;
530 }
531 }
532 });
533 }
534
535
536 bool AllImages::infoForImageMappedAt(const void* addr, const MachOLoaded** ml, uint64_t* textSize, const char** path) const
537 {
538 if ( _initialImages != nullptr ) {
539 // being called during libSystem initialization, so _loadedImages not allocated yet
540 for (const LoadedImage& li : *_initialImages) {
541 uint8_t permissions;
542 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
543 if ( ml != nullptr )
544 *ml = li.loadedAddress();
545 if ( path != nullptr )
546 *path = li.image()->path();
547 if ( textSize != nullptr ) {
548 *textSize = li.image()->textSize();
549 }
550 return true;
551 }
552 }
553 return false;
554 }
555
556 // if address is in cache, do fast search of TEXT segments in cache
557 __block bool result = false;
558 if ( (_dyldCacheAddress != nullptr) && (addr > _dyldCacheAddress) ) {
559 if ( addr < (void*)((uint8_t*)_dyldCacheAddress+_dyldCacheAddress->mappedSize()) ) {
560 uint64_t cacheSlide = (uint64_t)_dyldCacheAddress - _dyldCacheAddress->unslidLoadAddress();
561 uint64_t unslidTargetAddr = (uint64_t)addr - cacheSlide;
562 _dyldCacheAddress->forEachImageTextSegment(^(uint64_t loadAddressUnslid, uint64_t textSegmentSize, const unsigned char* dylibUUID, const char* installName, bool& stop) {
563 if ( (loadAddressUnslid <= unslidTargetAddr) && (unslidTargetAddr < loadAddressUnslid+textSegmentSize) ) {
564 if ( ml != nullptr )
565 *ml = (MachOLoaded*)(loadAddressUnslid + cacheSlide);
566 if ( path != nullptr )
567 *path = installName;
568 if ( textSize != nullptr )
569 *textSize = textSegmentSize;
570 stop = true;
571 result = true;
572 }
573 });
574 if ( result )
575 return result;
576 }
577 }
578
579 // slow path - search image list
580 infoForImageMappedAt(addr, ^(const LoadedImage& foundImage, uint8_t permissions) {
581 if ( ml != nullptr )
582 *ml = foundImage.loadedAddress();
583 if ( path != nullptr )
584 *path = foundImage.image()->path();
585 if ( textSize != nullptr )
586 *textSize = foundImage.image()->textSize();
587 result = true;
588 });
589
590 return result;
591 }
592
593 // same as infoForImageMappedAt(), but only look at images not in the dyld cache
594 void AllImages::infoForNonCachedImageMappedAt(const void* addr, void (^handler)(const LoadedImage& foundImage, uint8_t permissions)) const
595 {
596 __block uint8_t permissions;
597 if ( _initialImages != nullptr ) {
598 // being called during libSystem initialization, so _loadedImages not allocated yet
599 for (const LoadedImage& li : *_initialImages) {
600 if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
601 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
602 handler(li, permissions);
603 break;
604 }
605 }
606 }
607 return;
608 }
609
610 withReadLock(^{
611 for (const LoadedImage& li : _loadedImages) {
612 if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
613 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
614 handler(li, permissions);
615 break;
616 }
617 }
618 }
619 });
620 }
621
622 bool AllImages::immutableMemory(const void* addr, size_t length) const
623 {
624 // quick check to see if in shared cache
625 if ( _dyldCacheAddress != nullptr ) {
626 bool readOnly;
627 if ( _dyldCacheAddress->inCache(addr, length, readOnly) ) {
628 return readOnly;
629 }
630 }
631
632 __block bool result = false;
633 withReadLock(^() {
634 // quick check to see if it is not any non-cached image loaded
635 if ( ((uintptr_t)addr < _lowestNonCached) || ((uintptr_t)addr+length > _highestNonCached) ) {
636 result = false;
637 return;
638 }
639 // slow walk through all images, only look at images not in dyld cache
640 for (const LoadedImage& li : _loadedImages) {
641 if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
642 uint8_t permissions;
643 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
644 result = ((permissions & VM_PROT_WRITE) == 0) && li.image()->neverUnload();
645 break;
646 }
647 }
648 }
649 });
650
651 return result;
652 }
653
654 void AllImages::infoForImageWithLoadAddress(const MachOLoaded* mh, void (^handler)(const LoadedImage& foundImage)) const
655 {
656 withReadLock(^{
657 for (const LoadedImage& li : _loadedImages) {
658 if ( li.loadedAddress() == mh ) {
659 handler(li);
660 break;
661 }
662 }
663 });
664 }
665
666 bool AllImages::findImageNum(closure::ImageNum imageNum, LoadedImage& foundImage) const
667 {
668 if ( _initialImages != nullptr ) {
669 // being called during libSystem initialization, so _loadedImages not allocated yet
670 for (const LoadedImage& li : *_initialImages) {
671 if ( li.image()->representsImageNum(imageNum) ) {
672 foundImage = li;
673 return true;
674 }
675 }
676 return false;
677 }
678
679 bool result = false;
680 for (const LoadedImage& li : _loadedImages) {
681 if ( li.image()->representsImageNum(imageNum) ) {
682 foundImage = li;
683 result = true;
684 break;
685 }
686 }
687
688 return result;
689 }
690
691 const MachOLoaded* AllImages::findDependent(const MachOLoaded* mh, uint32_t depIndex)
692 {
693 __block const MachOLoaded* result = nullptr;
694 withReadLock(^{
695 for (const LoadedImage& li : _loadedImages) {
696 if ( li.loadedAddress() == mh ) {
697 closure::ImageNum depImageNum = li.image()->dependentImageNum(depIndex);
698 LoadedImage depLi;
699 if ( findImageNum(depImageNum, depLi) )
700 result = depLi.loadedAddress();
701 break;
702 }
703 }
704 });
705 return result;
706 }
707
708
709 void AllImages::breadthFirstRecurseDependents(Array<closure::ImageNum>& visited, const LoadedImage& nodeLi, bool& stopped, void (^handler)(const LoadedImage& aLoadedImage, bool& stop)) const
710 {
711 // call handler on all direct dependents (unless already visited)
712 STACK_ALLOC_ARRAY(LoadedImage, dependentsToRecurse, 256);
713 nodeLi.image()->forEachDependentImage(^(uint32_t depIndex, closure::Image::LinkKind kind, closure::ImageNum depImageNum, bool& depStop) {
714 if ( kind == closure::Image::LinkKind::upward )
715 return;
716 if ( visited.contains(depImageNum) )
717 return;
718 LoadedImage depLi;
719 if ( !findImageNum(depImageNum, depLi) )
720 return;
721 handler(depLi, depStop);
722 visited.push_back(depImageNum);
723 if ( depStop ) {
724 stopped = true;
725 return;
726 }
727 dependentsToRecurse.push_back(depLi);
728 });
729 if ( stopped )
730 return;
731 // recurse on all dependents just visited
732 for (LoadedImage& depLi : dependentsToRecurse) {
733 breadthFirstRecurseDependents(visited, depLi, stopped, handler);
734 }
735 }
736
737 void AllImages::visitDependentsTopDown(const LoadedImage& start, void (^handler)(const LoadedImage& aLoadedImage, bool& stop)) const
738 {
739 withReadLock(^{
740 STACK_ALLOC_ARRAY(closure::ImageNum, visited, count());
741 bool stop = false;
742 handler(start, stop);
743 if ( stop )
744 return;
745 visited.push_back(start.image()->imageNum());
746 breadthFirstRecurseDependents(visited, start, stop, handler);
747 });
748 }
749
750 const MachOLoaded* AllImages::mainExecutable() const
751 {
752 assert(_programVars != nullptr);
753 return (const MachOLoaded*)_programVars->mh;
754 }
755
756 const closure::Image* AllImages::mainExecutableImage() const
757 {
758 assert(_mainClosure != nullptr);
759 return _mainClosure->images()->imageForNum(_mainClosure->topImage());
760 }
761
762 void AllImages::setMainPath(const char* path )
763 {
764 _mainExeOverridePath = path;
765 }
766
767 const char* AllImages::imagePath(const closure::Image* image) const
768 {
769 #if __IPHONE_OS_VERSION_MIN_REQUIRED
770 // on iOS and watchOS, apps may be moved on device after closure built
771 if ( _mainExeOverridePath != nullptr ) {
772 if ( image == mainExecutableImage() )
773 return _mainExeOverridePath;
774 }
775 #endif
776 return image->path();
777 }
778
779 dyld_platform_t AllImages::platform() const {
780 return _platform;
781 }
782
783 void AllImages::incRefCount(const mach_header* loadAddress)
784 {
785 for (DlopenCount& entry : _dlopenRefCounts) {
786 if ( entry.loadAddress == loadAddress ) {
787 // found existing DlopenCount entry, bump counter
788 entry.refCount += 1;
789 return;
790 }
791 }
792
793 // no existing DlopenCount, add new one
794 _dlopenRefCounts.push_back({ loadAddress, 1 });
795 }
796
797 void AllImages::decRefCount(const mach_header* loadAddress)
798 {
799 bool doCollect = false;
800 for (DlopenCount& entry : _dlopenRefCounts) {
801 if ( entry.loadAddress == loadAddress ) {
802 // found existing DlopenCount entry, bump counter
803 entry.refCount -= 1;
804 if ( entry.refCount == 0 ) {
805 _dlopenRefCounts.erase(entry);
806 doCollect = true;
807 break;
808 }
809 return;
810 }
811 }
812 if ( doCollect )
813 garbageCollectImages();
814 }
815
816
817 #if __MAC_OS_X_VERSION_MIN_REQUIRED
818 NSObjectFileImage AllImages::addNSObjectFileImage(const OFIInfo& image)
819 {
820 __block uint64_t imageNum = 0;
821 withWriteLock(^{
822 imageNum = ++_nextObjectFileImageNum;
823 _objectFileImages.push_back(image);
824 _objectFileImages.back().imageNum = imageNum;
825 });
826 return (NSObjectFileImage)imageNum;
827 }
828
829 bool AllImages::forNSObjectFileImage(NSObjectFileImage imageHandle,
830 void (^handler)(OFIInfo& image)) {
831 uint64_t imageNum = (uint64_t)imageHandle;
832 bool __block foundImage = false;
833 withReadLock(^{
834 for (OFIInfo& ofi : _objectFileImages) {
835 if ( ofi.imageNum == imageNum ) {
836 handler(ofi);
837 foundImage = true;
838 return;
839 }
840 }
841 });
842
843 return foundImage;
844 }
845
846 void AllImages::removeNSObjectFileImage(NSObjectFileImage imageHandle)
847 {
848 uint64_t imageNum = (uint64_t)imageHandle;
849 withWriteLock(^{
850 for (OFIInfo& ofi : _objectFileImages) {
851 if ( ofi.imageNum == imageNum ) {
852 _objectFileImages.erase(ofi);
853 return;
854 }
855 }
856 });
857 }
858 #endif
859
860
861 class VIS_HIDDEN Reaper
862 {
863 public:
864 struct ImageAndUse
865 {
866 const LoadedImage* li;
867 bool inUse;
868 };
869 Reaper(Array<ImageAndUse>& unloadables, AllImages*);
870 void garbageCollect();
871 void finalizeDeadImages();
872 private:
873
874 void markDirectlyDlopenedImagesAsUsed();
875 void markDependentOfInUseImages();
876 void markDependentsOf(const LoadedImage*);
877 uint32_t inUseCount();
878 void dump(const char* msg);
879
880 Array<ImageAndUse>& _unloadables;
881 AllImages* _allImages;
882 uint32_t _deadCount;
883 };
884
885 Reaper::Reaper(Array<ImageAndUse>& unloadables, AllImages* all)
886 : _unloadables(unloadables), _allImages(all), _deadCount(0)
887 {
888 }
889
890 void Reaper::markDirectlyDlopenedImagesAsUsed()
891 {
892 for (AllImages::DlopenCount& entry : _allImages->_dlopenRefCounts) {
893 if ( entry.refCount != 0 ) {
894 for (ImageAndUse& iu : _unloadables) {
895 if ( iu.li->loadedAddress() == entry.loadAddress ) {
896 iu.inUse = true;
897 break;
898 }
899 }
900 }
901 }
902 }
903
904 uint32_t Reaper::inUseCount()
905 {
906 uint32_t count = 0;
907 for (ImageAndUse& iu : _unloadables) {
908 if ( iu.inUse )
909 ++count;
910 }
911 return count;
912 }
913
914 void Reaper::markDependentsOf(const LoadedImage* li)
915 {
916 li->image()->forEachDependentImage(^(uint32_t depIndex, closure::Image::LinkKind kind, closure::ImageNum depImageNum, bool& stop) {
917 for (ImageAndUse& iu : _unloadables) {
918 if ( !iu.inUse && iu.li->image()->representsImageNum(depImageNum) ) {
919 iu.inUse = true;
920 break;
921 }
922 }
923 });
924 }
925
926 void Reaper::markDependentOfInUseImages()
927 {
928 for (ImageAndUse& iu : _unloadables) {
929 if ( iu.inUse )
930 markDependentsOf(iu.li);
931 }
932 }
933
934 void Reaper::dump(const char* msg)
935 {
936 //log("%s:\n", msg);
937 //for (ImageAndUse& iu : _unloadables) {
938 // log(" in-used=%d %s\n", iu.inUse, iu.li->image()->path());
939 //}
940 }
941
942 void Reaper::garbageCollect()
943 {
944 //dump("all unloadable images");
945
946 // mark all dylibs directly dlopen'ed as in use
947 markDirectlyDlopenedImagesAsUsed();
948
949 //dump("directly dlopen()'ed marked");
950
951 // iteratively mark dependents of in-use dylibs as in-use until in-use count stops changing
952 uint32_t lastCount = inUseCount();
953 bool countChanged = false;
954 do {
955 markDependentOfInUseImages();
956 //dump("dependents marked");
957 uint32_t newCount = inUseCount();
958 countChanged = (newCount != lastCount);
959 lastCount = newCount;
960 } while (countChanged);
961
962 _deadCount = (uint32_t)_unloadables.count() - inUseCount();
963 }
964
965 void Reaper::finalizeDeadImages()
966 {
967 if ( _deadCount == 0 )
968 return;
969 __cxa_range_t ranges[_deadCount];
970 __cxa_range_t* rangesArray = ranges;
971 __block unsigned int rangesCount = 0;
972 for (ImageAndUse& iu : _unloadables) {
973 if ( iu.inUse )
974 continue;
975 iu.li->image()->forEachDiskSegment(^(uint32_t segIndex, uint32_t fileOffset, uint32_t fileSize, int64_t vmOffset, uint64_t vmSize, uint8_t permissions, bool &stop) {
976 if ( permissions & VM_PROT_EXECUTE ) {
977 rangesArray[rangesCount].addr = (char*)(iu.li->loadedAddress()) + vmOffset;
978 rangesArray[rangesCount].length = (size_t)vmSize;
979 ++rangesCount;
980 }
981 });
982 }
983 __cxa_finalize_ranges(ranges, rangesCount);
984 }
985
986
987 // This function is called at the end of dlclose() when the reference count goes to zero.
988 // The dylib being unloaded may have brought in other dependent dylibs when it was loaded.
989 // Those dependent dylibs need to be unloaded, but only if they are not referenced by
990 // something else. We use a standard mark and sweep garbage collection.
991 //
992 // The tricky part is that when a dylib is unloaded it may have a termination function that
993 // can run and itself call dlclose() on yet another dylib. The problem is that this
994 // sort of gabage collection is not re-entrant. Instead a terminator's call to dlclose()
995 // which calls garbageCollectImages() will just set a flag to re-do the garbage collection
996 // when the current pass is done.
997 //
998 // Also note that this is done within the _loadedImages writer lock, so any dlopen/dlclose
999 // on other threads are blocked while this garbage collections runs
1000 //
1001 void AllImages::garbageCollectImages()
1002 {
1003 // if some other thread is currently GC'ing images, let other thread do the work
1004 int32_t newCount = OSAtomicIncrement32(&_gcCount);
1005 if ( newCount != 1 )
1006 return;
1007
1008 do {
1009 STACK_ALLOC_ARRAY(Reaper::ImageAndUse, unloadables, _loadedImages.count());
1010 withReadLock(^{
1011 for (const LoadedImage& li : _loadedImages) {
1012 if ( !li.image()->neverUnload() /*&& !li.neverUnload()*/ ) {
1013 unloadables.push_back({&li, false});
1014 //fprintf(stderr, "unloadable[%lu] %p %s\n", unloadables.count(), li.loadedAddress(), li.image()->path());
1015 }
1016 }
1017 });
1018 // make reaper object to do garbage collection and notifications
1019 Reaper reaper(unloadables, this);
1020 reaper.garbageCollect();
1021
1022 // FIXME: we should sort dead images so higher level ones are terminated first
1023
1024 // call cxa_finalize_ranges of dead images
1025 reaper.finalizeDeadImages();
1026
1027 // FIXME: call static terminators of dead images
1028
1029 // FIXME: DOF unregister
1030
1031 //fprintf(stderr, "_loadedImages before GC removals:\n");
1032 //for (const LoadedImage& li : _loadedImages) {
1033 // fprintf(stderr, " loadAddr=%p, path=%s\n", li.loadedAddress(), li.image()->path());
1034 //}
1035
1036 // make copy of LoadedImages we want to remove
1037 // because unloadables[] points into LoadedImage we are shrinking
1038 STACK_ALLOC_ARRAY(LoadedImage, unloadImages, _loadedImages.count());
1039 for (const Reaper::ImageAndUse& iu : unloadables) {
1040 if ( !iu.inUse )
1041 unloadImages.push_back(*iu.li);
1042 }
1043 // remove entries from _loadedImages
1044 if ( !unloadImages.empty() ) {
1045 removeImages(unloadImages);
1046
1047 //fprintf(stderr, "_loadedImages after GC removals:\n");
1048 //for (const LoadedImage& li : _loadedImages) {
1049 // fprintf(stderr, " loadAddr=%p, path=%s\n", li.loadedAddress(), li.image()->path());
1050 //}
1051 }
1052
1053 // if some other thread called GC during our work, redo GC on its behalf
1054 newCount = OSAtomicDecrement32(&_gcCount);
1055 }
1056 while (newCount > 0);
1057 }
1058
1059
1060
1061 void AllImages::addLoadNotifier(NotifyFunc func)
1062 {
1063 // callback about already loaded images
1064 withReadLock(^{
1065 for (const LoadedImage& li : _loadedImages) {
1066 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
1067 log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
1068 if ( li.image()->inDyldCache() )
1069 func(li.loadedAddress(), (uintptr_t)_dyldCacheSlide);
1070 else
1071 func(li.loadedAddress(), li.loadedAddress()->getSlide());
1072 }
1073 });
1074
1075 // add to list of functions to call about future loads
1076 withNotifiersLock(^{
1077 _loadNotifiers.push_back(func);
1078 });
1079 }
1080
1081 void AllImages::addUnloadNotifier(NotifyFunc func)
1082 {
1083 // add to list of functions to call about future unloads
1084 withNotifiersLock(^{
1085 _unloadNotifiers.push_back(func);
1086 });
1087 }
1088
1089 void AllImages::addLoadNotifier(LoadNotifyFunc func)
1090 {
1091 // callback about already loaded images
1092 withReadLock(^{
1093 for (const LoadedImage& li : _loadedImages) {
1094 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
1095 log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
1096 func(li.loadedAddress(), li.image()->path(), !li.image()->neverUnload());
1097 }
1098 });
1099
1100 // add to list of functions to call about future loads
1101 withNotifiersLock(^{
1102 _loadNotifiers2.push_back(func);
1103 });
1104 }
1105
1106
1107 void AllImages::setObjCNotifiers(_dyld_objc_notify_mapped map, _dyld_objc_notify_init init, _dyld_objc_notify_unmapped unmap)
1108 {
1109 _objcNotifyMapped = map;
1110 _objcNotifyInit = init;
1111 _objcNotifyUnmapped = unmap;
1112
1113 // callback about already loaded images
1114 uint32_t maxCount = count();
1115 STACK_ALLOC_ARRAY(const mach_header*, mhs, maxCount);
1116 STACK_ALLOC_ARRAY(const char*, paths, maxCount);
1117 // don't need _mutex here because this is called when process is still single threaded
1118 for (const LoadedImage& li : _loadedImages) {
1119 if ( li.image()->hasObjC() ) {
1120 paths.push_back(imagePath(li.image()));
1121 mhs.push_back(li.loadedAddress());
1122 }
1123 }
1124 if ( !mhs.empty() ) {
1125 (*map)((uint32_t)mhs.count(), &paths[0], &mhs[0]);
1126 if ( log_notifications("dyld: objc-mapped-notifier called with %ld images:\n", mhs.count()) ) {
1127 for (uintptr_t i=0; i < mhs.count(); ++i) {
1128 log_notifications("dyld: objc-mapped: %p %s\n", mhs[i], paths[i]);
1129 }
1130 }
1131 }
1132 }
1133
1134 void AllImages::applyInterposingToDyldCache(const closure::Closure* closure)
1135 {
1136 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_APPLY_INTERPOSING, 0, 0, 0);
1137 const uintptr_t cacheStart = (uintptr_t)_dyldCacheAddress;
1138 __block closure::ImageNum lastCachedDylibImageNum = 0;
1139 __block const closure::Image* lastCachedDylibImage = nullptr;
1140 __block bool suspendedAccounting = false;
1141 closure->forEachPatchEntry(^(const closure::Closure::PatchEntry& entry) {
1142 if ( entry.overriddenDylibInCache != lastCachedDylibImageNum ) {
1143 lastCachedDylibImage = closure::ImageArray::findImage(imagesArrays(), entry.overriddenDylibInCache);
1144 assert(lastCachedDylibImage != nullptr);
1145 lastCachedDylibImageNum = entry.overriddenDylibInCache;
1146 }
1147 if ( !suspendedAccounting ) {
1148 Loader::vmAccountingSetSuspended(true, log_fixups);
1149 suspendedAccounting = true;
1150 }
1151 uintptr_t newValue = 0;
1152 LoadedImage foundImage;
1153 switch ( entry.replacement.image.kind ) {
1154 case closure::Image::ResolvedSymbolTarget::kindImage:
1155 assert(findImageNum(entry.replacement.image.imageNum, foundImage));
1156 newValue = (uintptr_t)(foundImage.loadedAddress()) + (uintptr_t)entry.replacement.image.offset;
1157 break;
1158 case closure::Image::ResolvedSymbolTarget::kindSharedCache:
1159 newValue = (uintptr_t)_dyldCacheAddress + (uintptr_t)entry.replacement.sharedCache.offset;
1160 break;
1161 case closure::Image::ResolvedSymbolTarget::kindAbsolute:
1162 // this means the symbol was missing in the cache override dylib, so set any uses to NULL
1163 newValue = (uintptr_t)entry.replacement.absolute.value;
1164 break;
1165 default:
1166 assert(0 && "bad replacement kind");
1167 }
1168 lastCachedDylibImage->forEachPatchableUseOfExport(entry.exportCacheOffset, ^(closure::Image::PatchableExport::PatchLocation patchLocation) {
1169 uintptr_t* loc = (uintptr_t*)(cacheStart+patchLocation.cacheOffset);
1170 #if __has_feature(ptrauth_calls)
1171 if ( patchLocation.authenticated ) {
1172 MachOLoaded::ChainedFixupPointerOnDisk fixupInfo;
1173 fixupInfo.authRebase.auth = true;
1174 fixupInfo.authRebase.addrDiv = patchLocation.usesAddressDiversity;
1175 fixupInfo.authRebase.diversity = patchLocation.discriminator;
1176 fixupInfo.authRebase.key = patchLocation.key;
1177 *loc = fixupInfo.signPointer(loc, newValue + patchLocation.getAddend());
1178 log_fixups("dyld: cache fixup: *%p = %p (JOP: diversity 0x%04X, addr-div=%d, key=%s)\n",
1179 loc, (void*)*loc, patchLocation.discriminator, patchLocation.usesAddressDiversity, patchLocation.keyName());
1180 return;
1181 }
1182 #endif
1183 log_fixups("dyld: cache fixup: *%p = 0x%0lX (dyld cache patch)\n", loc, newValue + (uintptr_t)patchLocation.getAddend());
1184 *loc = newValue + (uintptr_t)patchLocation.getAddend();
1185 });
1186 });
1187 if ( suspendedAccounting )
1188 Loader::vmAccountingSetSuspended(false, log_fixups);
1189 }
1190
1191 void AllImages::runStartupInitialzers()
1192 {
1193 __block bool mainExecutableInitializerNeedsToRun = true;
1194 __block uint32_t imageIndex = 0;
1195 while ( mainExecutableInitializerNeedsToRun ) {
1196 __block const closure::Image* image = nullptr;
1197 withReadLock(^{
1198 image = _loadedImages[imageIndex].image();
1199 if ( _loadedImages[imageIndex].loadedAddress()->isMainExecutable() )
1200 mainExecutableInitializerNeedsToRun = false;
1201 });
1202 runInitialzersBottomUp(image);
1203 ++imageIndex;
1204 }
1205 }
1206
1207
1208 // Find image in _loadedImages which has ImageNum == num.
1209 // Try indexHint first, if hint is wrong, updated it, so next use is faster.
1210 LoadedImage AllImages::findImageNum(closure::ImageNum num, uint32_t& indexHint)
1211 {
1212 __block LoadedImage copy;
1213 withReadLock(^{
1214 if ( (indexHint >= _loadedImages.count()) || !_loadedImages[indexHint].image()->representsImageNum(num) ) {
1215 indexHint = 0;
1216 for (indexHint=0; indexHint < _loadedImages.count(); ++indexHint) {
1217 if ( _loadedImages[indexHint].image()->representsImageNum(num) )
1218 break;
1219 }
1220 assert(indexHint < _loadedImages.count());
1221 }
1222 copy = _loadedImages[indexHint];
1223 });
1224 return copy;
1225 }
1226
1227
1228 // Change the state of the LoadedImage in _loadedImages which has ImageNum == num.
1229 // Only change state if current state is expectedCurrentState (atomic swap).
1230 bool AllImages::swapImageState(closure::ImageNum num, uint32_t& indexHint, LoadedImage::State expectedCurrentState, LoadedImage::State newState)
1231 {
1232 __block bool result = false;
1233 withWriteLock(^{
1234 if ( (indexHint >= _loadedImages.count()) || !_loadedImages[indexHint].image()->representsImageNum(num) ) {
1235 indexHint = 0;
1236 for (indexHint=0; indexHint < _loadedImages.count(); ++indexHint) {
1237 if ( _loadedImages[indexHint].image()->representsImageNum(num) )
1238 break;
1239 }
1240 assert(indexHint < _loadedImages.count());
1241 }
1242 if ( _loadedImages[indexHint].state() == expectedCurrentState ) {
1243 _loadedImages[indexHint].setState(newState);
1244 result = true;
1245 }
1246 });
1247 return result;
1248 }
1249
1250 // dyld3 pre-builds the order initializers need to be run (bottom up) in a list in the closure.
1251 // This method uses that list to run all initializers.
1252 // Because an initializer may call dlopen() and/or create threads, the _loadedImages array
1253 // may move under us. So, never keep a pointer into it. Always reference images by ImageNum
1254 // and use hint to make that faster in the case where the _loadedImages does not move.
1255 void AllImages::runInitialzersBottomUp(const closure::Image* topImage)
1256 {
1257 // walk closure specified initializer list, already ordered bottom up
1258 topImage->forEachImageToInitBefore(^(closure::ImageNum imageToInit, bool& stop) {
1259 // get copy of LoadedImage about imageToInit, but don't keep reference into _loadedImages, because it may move if initialzers call dlopen()
1260 uint32_t indexHint = 0;
1261 LoadedImage loadedImageCopy = findImageNum(imageToInit, indexHint);
1262 // skip if the image is already inited, or in process of being inited (dependency cycle)
1263 if ( (loadedImageCopy.state() == LoadedImage::State::fixedUp) && swapImageState(imageToInit, indexHint, LoadedImage::State::fixedUp, LoadedImage::State::beingInited) ) {
1264 // tell objc to run any +load methods in image
1265 if ( (_objcNotifyInit != nullptr) && loadedImageCopy.image()->mayHavePlusLoads() ) {
1266 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_OBJC_INIT, (uint64_t)loadedImageCopy.loadedAddress(), 0, 0);
1267 const char* path = imagePath(loadedImageCopy.image());
1268 log_notifications("dyld: objc-init-notifier called with mh=%p, path=%s\n", loadedImageCopy.loadedAddress(), path);
1269 (*_objcNotifyInit)(path, loadedImageCopy.loadedAddress());
1270 }
1271
1272 // run all initializers in image
1273 runAllInitializersInImage(loadedImageCopy.image(), loadedImageCopy.loadedAddress());
1274
1275 // advance state to inited
1276 swapImageState(imageToInit, indexHint, LoadedImage::State::beingInited, LoadedImage::State::inited);
1277 }
1278 });
1279 }
1280
1281
1282 void AllImages::runLibSystemInitializer(const LoadedImage& libSystem)
1283 {
1284 // run all initializers in libSystem.dylib
1285 runAllInitializersInImage(libSystem.image(), libSystem.loadedAddress());
1286
1287 // Note: during libSystem's initialization, libdyld_initializer() is called which copies _initialImages to _loadedImages
1288
1289 // mark libSystem.dylib as being inited, so later recursive-init would re-run it
1290 for (LoadedImage& li : _loadedImages) {
1291 if ( li.loadedAddress() == libSystem.loadedAddress() ) {
1292 li.setState(LoadedImage::State::inited);
1293 break;
1294 }
1295 }
1296 }
1297
1298 void AllImages::runAllInitializersInImage(const closure::Image* image, const MachOLoaded* ml)
1299 {
1300 image->forEachInitializer(ml, ^(const void* func) {
1301 Initializer initFunc = (Initializer)func;
1302 #if __has_feature(ptrauth_calls)
1303 initFunc = (Initializer)__builtin_ptrauth_sign_unauthenticated((void*)initFunc, 0, 0);
1304 #endif
1305 {
1306 ScopedTimer(DBG_DYLD_TIMING_STATIC_INITIALIZER, (uint64_t)ml, (uint64_t)func, 0);
1307 initFunc(NXArgc, NXArgv, environ, appleParams, _programVars);
1308
1309 }
1310 log_initializers("dyld: called initialzer %p in %s\n", initFunc, image->path());
1311 });
1312 }
1313
1314 const MachOLoaded* AllImages::dlopen(Diagnostics& diag, const char* path, bool rtldNoLoad, bool rtldLocal, bool rtldNoDelete, bool fromOFI, const void* callerAddress)
1315 {
1316 // quick check if path is in shared cache and already loaded
1317 if ( _dyldCacheAddress != nullptr ) {
1318 uint32_t dyldCacheImageIndex;
1319 if ( _dyldCacheAddress->hasImagePath(path, dyldCacheImageIndex) ) {
1320 uint64_t mTime;
1321 uint64_t inode;
1322 const MachOLoaded* mh = (MachOLoaded*)_dyldCacheAddress->getIndexedImageEntry(dyldCacheImageIndex, mTime, inode);
1323 // Note: we do not need readLock because this is within global dlopen lock
1324 for (const LoadedImage& li : _loadedImages) {
1325 if ( li.loadedAddress() == mh ) {
1326 return mh;
1327 }
1328 }
1329 }
1330 }
1331
1332 __block closure::ImageNum callerImageNum = 0;
1333 STACK_ALLOC_ARRAY(LoadedImage, loadedList, 1024);
1334 for (const LoadedImage& li : _loadedImages) {
1335 loadedList.push_back(li);
1336 uint8_t permissions;
1337 if ( (callerImageNum == 0) && li.image()->containsAddress(callerAddress, li.loadedAddress(), &permissions) ) {
1338 callerImageNum = li.image()->imageNum();
1339 }
1340 //fprintf(stderr, "mh=%p, image=%p, imageNum=0x%04X, path=%s\n", li.loadedAddress(), li.image(), li.image()->imageNum(), li.image()->path());
1341 }
1342 uintptr_t alreadyLoadedCount = loadedList.count();
1343
1344 // make closure
1345 closure::ImageNum topImageNum = 0;
1346 const closure::DlopenClosure* newClosure;
1347
1348 // First try with closures from the shared cache permitted.
1349 // Then try again with forcing a new closure
1350 for (bool canUseSharedCacheClosure : { true, false }) {
1351 closure::FileSystemPhysical fileSystem;
1352 closure::ClosureBuilder::AtPath atPathHanding = (_allowAtPaths ? closure::ClosureBuilder::AtPath::all : closure::ClosureBuilder::AtPath::onlyInRPaths);
1353 closure::ClosureBuilder cb(_nextImageNum, fileSystem, _dyldCacheAddress, true, closure::gPathOverrides, atPathHanding);
1354 newClosure = cb.makeDlopenClosure(path, _mainClosure, loadedList, callerImageNum, rtldNoLoad, canUseSharedCacheClosure, &topImageNum);
1355 if ( newClosure == closure::ClosureBuilder::sRetryDlopenClosure ) {
1356 log_apis(" dlopen: closure builder needs to retry: %s\n", path);
1357 assert(canUseSharedCacheClosure);
1358 continue;
1359 }
1360 if ( (newClosure == nullptr) && (topImageNum == 0) ) {
1361 if ( cb.diagnostics().hasError())
1362 diag.error("%s", cb.diagnostics().errorMessage());
1363 else if ( !rtldNoLoad )
1364 diag.error("dlopen(): file not found: %s", path);
1365 return nullptr;
1366 }
1367 // save off next available ImageNum for use by next call to dlopen()
1368 _nextImageNum = cb.nextFreeImageNum();
1369 break;
1370 }
1371
1372 if ( newClosure != nullptr ) {
1373 // if new closure contains an ImageArray, add it to list
1374 if ( const closure::ImageArray* newArray = newClosure->images() ) {
1375 appendToImagesArray(newArray);
1376 }
1377 log_apis(" dlopen: made closure: %p\n", newClosure);
1378 }
1379
1380 // if already loaded, just bump refCount and return
1381 if ( (newClosure == nullptr) && (topImageNum != 0) ) {
1382 for (LoadedImage& li : _loadedImages) {
1383 if ( li.image()->imageNum() == topImageNum ) {
1384 // is already loaded
1385 const MachOLoaded* topLoadAddress = li.loadedAddress();
1386 if ( !li.image()->inDyldCache() )
1387 incRefCount(topLoadAddress);
1388 log_apis(" dlopen: already loaded as '%s'\n", li.image()->path());
1389 // if previously opened with RTLD_LOCAL, but now opened with RTLD_GLOBAL, unhide it
1390 if ( !rtldLocal && li.hideFromFlatSearch() )
1391 li.setHideFromFlatSearch(false);
1392 // if called with RTLD_NODELETE, mark it as never-unload
1393 if ( rtldNoDelete )
1394 li.markLeaveMapped();
1395 return topLoadAddress;
1396 }
1397 }
1398 }
1399
1400 // run loader to load all new images
1401 Loader loader(loadedList, _dyldCacheAddress, imagesArrays(), &dyld3::log_loads, &dyld3::log_segments, &dyld3::log_fixups, &dyld3::log_dofs);
1402 const closure::Image* topImage = closure::ImageArray::findImage(imagesArrays(), topImageNum);
1403 if ( newClosure == nullptr ) {
1404 if ( topImageNum < dyld3::closure::kLastDyldCacheImageNum )
1405 log_apis(" dlopen: using image in dyld shared cache %p\n", topImage);
1406 else
1407 log_apis(" dlopen: using pre-built dlopen closure %p\n", topImage);
1408 }
1409 uintptr_t topIndex = loadedList.count();
1410 LoadedImage topLoadedImage = LoadedImage::make(topImage);
1411 if ( rtldLocal && !topImage->inDyldCache() )
1412 topLoadedImage.setHideFromFlatSearch(true);
1413 if ( rtldNoDelete && !topImage->inDyldCache() )
1414 topLoadedImage.markLeaveMapped();
1415 loader.addImage(topLoadedImage);
1416
1417
1418 // recursively load all dependents and fill in allImages array
1419 loader.completeAllDependents(diag, topIndex);
1420 if ( diag.hasError() )
1421 return nullptr;
1422 loader.mapAndFixupAllImages(diag, _processDOFs, fromOFI, topIndex);
1423 if ( diag.hasError() )
1424 return nullptr;
1425
1426 const MachOLoaded* topLoadAddress = loadedList[topIndex].loadedAddress();
1427
1428 // bump dlopen refcount of image directly loaded
1429 if ( !topImage->inDyldCache() )
1430 incRefCount(topLoadAddress);
1431
1432 // tell gAllImages about new images
1433 const uint32_t newImageCount = (uint32_t)(loadedList.count() - alreadyLoadedCount);
1434 addImages(loadedList.subArray(alreadyLoadedCount, newImageCount));
1435
1436 // if closure adds images that override dyld cache, patch cache
1437 if ( newClosure != nullptr )
1438 applyInterposingToDyldCache(newClosure);
1439
1440 runImageNotifiers(loadedList.subArray(alreadyLoadedCount, newImageCount));
1441
1442 // run initializers
1443 runInitialzersBottomUp(topImage);
1444
1445 return topLoadAddress;
1446 }
1447
1448 void AllImages::appendToImagesArray(const closure::ImageArray* newArray)
1449 {
1450 _imagesArrays.push_back(newArray);
1451 }
1452
1453 const Array<const closure::ImageArray*>& AllImages::imagesArrays()
1454 {
1455 return _imagesArrays.array();
1456 }
1457
1458 bool AllImages::isRestricted() const
1459 {
1460 return !_allowEnvPaths;
1461 }
1462
1463
1464
1465
1466 } // namespace dyld3
1467
1468
1469
1470
1471
1472