dyld-655.1.tar.gz
[apple/dyld.git] / dyld3 / AllImages.cpp
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24
25 #include <stdint.h>
26 #include <fcntl.h>
27 #include <sys/types.h>
28 #include <sys/stat.h>
29 #include <sys/sysctl.h>
30 #include <mach/mach_time.h> // mach_absolute_time()
31 #include <libkern/OSAtomic.h>
32
33 #include <vector>
34 #include <algorithm>
35
36 #include "AllImages.h"
37 #include "libdyldEntryVector.h"
38 #include "Logging.h"
39 #include "Loading.h"
40 #include "Tracing.h"
41 #include "DyldSharedCache.h"
42 #include "PathOverrides.h"
43 #include "Closure.h"
44 #include "ClosureBuilder.h"
45 #include "ClosureFileSystemPhysical.h"
46
47 extern const char** appleParams;
48
49 // should be a header for these
50 struct __cxa_range_t {
51 const void* addr;
52 size_t length;
53 };
54 extern "C" void __cxa_finalize_ranges(const __cxa_range_t ranges[], unsigned int count);
55
56 VIS_HIDDEN bool gUseDyld3 = false;
57
58
59 namespace dyld3 {
60
61
62
63 ///////////////////// AllImages ////////////////////////////
64
65
66 AllImages gAllImages;
67
68
69
70 void AllImages::init(const closure::LaunchClosure* closure, const DyldSharedCache* dyldCacheLoadAddress, const char* dyldCachePath,
71 const Array<LoadedImage>& initialImages)
72 {
73 _mainClosure = closure;
74 _initialImages = &initialImages;
75 _dyldCacheAddress = dyldCacheLoadAddress;
76 _dyldCachePath = dyldCachePath;
77
78 if ( _dyldCacheAddress ) {
79 const dyld_cache_mapping_info* const fileMappings = (dyld_cache_mapping_info*)((uint64_t)_dyldCacheAddress + _dyldCacheAddress->header.mappingOffset);
80 _dyldCacheSlide = (uint64_t)dyldCacheLoadAddress - fileMappings[0].address;
81 _imagesArrays.push_back(dyldCacheLoadAddress->cachedDylibsImageArray());
82 if ( auto others = dyldCacheLoadAddress->otherOSImageArray() )
83 _imagesArrays.push_back(others);
84 }
85 _imagesArrays.push_back(_mainClosure->images());
86
87 // record first ImageNum to do use for dlopen() calls
88 _mainClosure->images()->forEachImage(^(const dyld3::closure::Image* image, bool& stop) {
89 closure::ImageNum num = image->imageNum();
90 if ( num >= _nextImageNum )
91 _nextImageNum = num+1;
92 });
93
94 // Make temporary old image array, so libSystem initializers can be debugged
95 STACK_ALLOC_ARRAY(dyld_image_info, oldDyldInfo, initialImages.count());
96 for (const LoadedImage& li : initialImages) {
97 oldDyldInfo.push_back({li.loadedAddress(), li.image()->path(), 0});
98 }
99 _oldAllImageInfos->infoArray = &oldDyldInfo[0];
100 _oldAllImageInfos->infoArrayCount = (uint32_t)oldDyldInfo.count();
101 _oldAllImageInfos->notification(dyld_image_adding, _oldAllImageInfos->infoArrayCount, _oldAllImageInfos->infoArray);
102 _oldAllImageInfos->infoArray = nullptr;
103 _oldAllImageInfos->infoArrayCount = 0;
104
105 _processDOFs = Loader::dtraceUserProbesEnabled();
106 }
107
108 void AllImages::setProgramVars(ProgramVars* vars)
109 {
110 _programVars = vars;
111 const dyld3::MachOFile* mf = (dyld3::MachOFile*)_programVars->mh;
112 mf->forEachSupportedPlatform(^(dyld3::Platform platform, uint32_t minOS, uint32_t sdk) {
113 _platform = (dyld_platform_t)platform;
114 //FIXME assert there is only one?
115 });
116 }
117
118 void AllImages::setRestrictions(bool allowAtPaths, bool allowEnvPaths)
119 {
120 _allowAtPaths = allowAtPaths;
121 _allowEnvPaths = allowEnvPaths;
122 }
123
124 void AllImages::applyInitialImages()
125 {
126 addImages(*_initialImages);
127 runImageNotifiers(*_initialImages);
128 _initialImages = nullptr; // this was stack allocated
129 }
130
131 void AllImages::withReadLock(void (^work)()) const
132 {
133 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
134 os_unfair_recursive_lock_lock(&_loadImagesLock);
135 work();
136 os_unfair_recursive_lock_unlock(&_loadImagesLock);
137 #else
138 pthread_mutex_lock(&_loadImagesLock);
139 work();
140 pthread_mutex_unlock(&_loadImagesLock);
141 #endif
142 }
143
144 void AllImages::withWriteLock(void (^work)())
145 {
146 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
147 os_unfair_recursive_lock_lock(&_loadImagesLock);
148 work();
149 os_unfair_recursive_lock_unlock(&_loadImagesLock);
150 #else
151 pthread_mutex_lock(&_loadImagesLock);
152 work();
153 pthread_mutex_unlock(&_loadImagesLock);
154 #endif
155 }
156
157 void AllImages::withNotifiersLock(void (^work)()) const
158 {
159 #ifdef OS_UNFAIR_RECURSIVE_LOCK_INIT
160 os_unfair_recursive_lock_lock(&_notifiersLock);
161 work();
162 os_unfair_recursive_lock_unlock(&_notifiersLock);
163 #else
164 pthread_mutex_lock(&_notifiersLock);
165 work();
166 pthread_mutex_unlock(&_notifiersLock);
167 #endif
168 }
169
170 void AllImages::mirrorToOldAllImageInfos()
171 {
172 withReadLock(^(){
173 // set infoArray to NULL to denote it is in-use
174 _oldAllImageInfos->infoArray = nullptr;
175
176 // if array not large enough, re-alloc it
177 uint32_t imageCount = (uint32_t)_loadedImages.count();
178 if ( _oldArrayAllocCount < imageCount ) {
179 uint32_t newAllocCount = imageCount + 16;
180 dyld_image_info* newArray = (dyld_image_info*)::malloc(sizeof(dyld_image_info)*newAllocCount);
181 if ( _oldAllImageArray != nullptr ) {
182 ::memcpy(newArray, _oldAllImageArray, sizeof(dyld_image_info)*_oldAllImageInfos->infoArrayCount);
183 ::free(_oldAllImageArray);
184 }
185 _oldAllImageArray = newArray;
186 _oldArrayAllocCount = newAllocCount;
187 }
188
189 // fill out array to mirror current image list
190 int index = 0;
191 for (const LoadedImage& li : _loadedImages) {
192 _oldAllImageArray[index].imageLoadAddress = li.loadedAddress();
193 _oldAllImageArray[index].imageFilePath = imagePath(li.image());
194 _oldAllImageArray[index].imageFileModDate = 0;
195 ++index;
196 }
197
198 // set infoArray back to base address of array (so other process can now read)
199 _oldAllImageInfos->infoArrayCount = imageCount;
200 _oldAllImageInfos->infoArrayChangeTimestamp = mach_absolute_time();
201 _oldAllImageInfos->infoArray = _oldAllImageArray;
202
203 // <radr://problem/42668846> update UUID array if needed
204 uint32_t nonCachedCount = 1; // always add dyld
205 for (const LoadedImage& li : _loadedImages) {
206 if ( !li.loadedAddress()->inDyldCache() )
207 ++nonCachedCount;
208 }
209 if ( nonCachedCount != _oldAllImageInfos->uuidArrayCount ) {
210 // set infoArray to NULL to denote it is in-use
211 _oldAllImageInfos->uuidArray = nullptr;
212 // make sure allocation can hold all uuids
213 if ( _oldUUIDAllocCount < nonCachedCount ) {
214 uint32_t newAllocCount = (nonCachedCount + 3) & (-4); // round up to multiple of 4
215 dyld_uuid_info* newArray = (dyld_uuid_info*)::malloc(sizeof(dyld_uuid_info)*newAllocCount);
216 if ( _oldUUIDArray != nullptr )
217 ::free(_oldUUIDArray);
218 _oldUUIDArray = newArray;
219 _oldUUIDAllocCount = newAllocCount;
220 }
221 // add dyld then all images not in dyld cache
222 const MachOFile* dyldMF = (MachOFile*)_oldAllImageInfos->dyldImageLoadAddress;
223 _oldUUIDArray[0].imageLoadAddress = dyldMF;
224 dyldMF->getUuid(_oldUUIDArray[0].imageUUID);
225 index = 1;
226 for (const LoadedImage& li : _loadedImages) {
227 if ( !li.loadedAddress()->inDyldCache() ) {
228 _oldUUIDArray[index].imageLoadAddress = li.loadedAddress();
229 li.loadedAddress()->getUuid(_oldUUIDArray[index].imageUUID);
230 ++index;
231 }
232 }
233 // set uuidArray back to base address of array (so kernel can now read)
234 _oldAllImageInfos->uuidArray = _oldUUIDArray;
235 _oldAllImageInfos->uuidArrayCount = nonCachedCount;
236 }
237 });
238 }
239
240 void AllImages::addImages(const Array<LoadedImage>& newImages)
241 {
242 // copy into _loadedImages
243 withWriteLock(^(){
244 _loadedImages.append(newImages);
245 // if any image not in the shared cache added, recompute bounds
246 for (const LoadedImage& li : newImages) {
247 if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
248 recomputeBounds();
249 break;
250 }
251 }
252 });
253 }
254
255 void AllImages::runImageNotifiers(const Array<LoadedImage>& newImages)
256 {
257 uint32_t count = (uint32_t)newImages.count();
258 assert(count != 0);
259
260 if ( _oldAllImageInfos != nullptr ) {
261 // sync to old all image infos struct
262 mirrorToOldAllImageInfos();
263
264 // tell debugger about new images
265 dyld_image_info oldDyldInfo[count];
266 for (uint32_t i=0; i < count; ++i) {
267 oldDyldInfo[i].imageLoadAddress = newImages[i].loadedAddress();
268 oldDyldInfo[i].imageFilePath = imagePath(newImages[i].image());
269 oldDyldInfo[i].imageFileModDate = 0;
270 }
271 _oldAllImageInfos->notification(dyld_image_adding, count, oldDyldInfo);
272 }
273
274 // log loads
275 for (const LoadedImage& li : newImages) {
276 log_loads("dyld: %s\n", imagePath(li.image()));
277 }
278
279 #if !TARGET_IPHONE_SIMULATOR
280 // call kdebug trace for each image
281 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD, DBG_DYLD_UUID, DBG_DYLD_UUID_MAP_A))) {
282 for (const LoadedImage& li : newImages) {
283 const closure::Image* image = li.image();
284 struct stat stat_buf;
285 fsid_t fsid = {{ 0, 0 }};
286 fsobj_id_t fsobjid = { 0, 0 };
287 if ( !image->inDyldCache() && (stat(imagePath(image), &stat_buf) == 0) ) {
288 fsobjid = *(fsobj_id_t*)&stat_buf.st_ino;
289 fsid = {{ stat_buf.st_dev, 0 }};
290 }
291 uuid_t uuid;
292 image->getUuid(uuid);
293 kdebug_trace_dyld_image(DBG_DYLD_UUID_MAP_A, &uuid, fsobjid, fsid, li.loadedAddress());
294 }
295 }
296 #endif
297 // call each _dyld_register_func_for_add_image function with each image
298 withNotifiersLock(^{
299 for (NotifyFunc func : _loadNotifiers) {
300 for (const LoadedImage& li : newImages) {
301 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
302 log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
303 if ( li.image()->inDyldCache() )
304 func(li.loadedAddress(), (uintptr_t)_dyldCacheSlide);
305 else
306 func(li.loadedAddress(), li.loadedAddress()->getSlide());
307 }
308 }
309 for (LoadNotifyFunc func : _loadNotifiers2) {
310 for (const LoadedImage& li : newImages) {
311 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
312 log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
313 if ( li.image()->inDyldCache() )
314 func(li.loadedAddress(), li.image()->path(), false);
315 else
316 func(li.loadedAddress(), li.image()->path(), !li.image()->neverUnload());
317 }
318 }
319 });
320
321 // call objc about images that use objc
322 if ( _objcNotifyMapped != nullptr ) {
323 const char* pathsBuffer[count];
324 const mach_header* mhBuffer[count];
325 uint32_t imagesWithObjC = 0;
326 for (const LoadedImage& li : newImages) {
327 const closure::Image* image = li.image();
328 if ( image->hasObjC() ) {
329 pathsBuffer[imagesWithObjC] = imagePath(image);
330 mhBuffer[imagesWithObjC] = li.loadedAddress();
331 ++imagesWithObjC;
332 }
333 }
334 if ( imagesWithObjC != 0 ) {
335 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_OBJC_MAP, 0, 0, 0);
336 (*_objcNotifyMapped)(imagesWithObjC, pathsBuffer, mhBuffer);
337 if ( log_notifications("dyld: objc-mapped-notifier called with %d images:\n", imagesWithObjC) ) {
338 for (uint32_t i=0; i < imagesWithObjC; ++i) {
339 log_notifications("dyld: objc-mapped: %p %s\n", mhBuffer[i], pathsBuffer[i]);
340 }
341 }
342 }
343 }
344
345 // notify any processes tracking loads in this process
346 notifyMonitorLoads(newImages);
347 }
348
349 void AllImages::removeImages(const Array<LoadedImage>& unloadImages)
350 {
351 // call each _dyld_register_func_for_remove_image function with each image
352 withNotifiersLock(^{
353 for (NotifyFunc func : _unloadNotifiers) {
354 for (const LoadedImage& li : unloadImages) {
355 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_REMOVE_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
356 log_notifications("dyld: remove notifier %p called with mh=%p\n", func, li.loadedAddress());
357 if ( li.image()->inDyldCache() )
358 func(li.loadedAddress(), (uintptr_t)_dyldCacheSlide);
359 else
360 func(li.loadedAddress(), li.loadedAddress()->getSlide());
361 }
362 }
363 });
364
365 // call objc about images going away
366 if ( _objcNotifyUnmapped != nullptr ) {
367 for (const LoadedImage& li : unloadImages) {
368 if ( li.image()->hasObjC() ) {
369 (*_objcNotifyUnmapped)(imagePath(li.image()), li.loadedAddress());
370 log_notifications("dyld: objc-unmapped-notifier called with image %p %s\n", li.loadedAddress(), imagePath(li.image()));
371 }
372 }
373 }
374
375 #if !TARGET_IPHONE_SIMULATOR
376 // call kdebug trace for each image
377 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD, DBG_DYLD_UUID, DBG_DYLD_UUID_MAP_A))) {
378 for (const LoadedImage& li : unloadImages) {
379 const closure::Image* image = li.image();
380 struct stat stat_buf;
381 fsid_t fsid = {{ 0, 0 }};
382 fsobj_id_t fsobjid = { 0, 0 };
383 if ( stat(imagePath(image), &stat_buf) == 0 ) {
384 fsobjid = *(fsobj_id_t*)&stat_buf.st_ino;
385 fsid = {{ stat_buf.st_dev, 0 }};
386 }
387 uuid_t uuid;
388 image->getUuid(uuid);
389 kdebug_trace_dyld_image(DBG_DYLD_UUID_UNMAP_A, &uuid, fsobjid, fsid, li.loadedAddress());
390 }
391 }
392 #endif
393
394 // remove each from _loadedImages
395 withWriteLock(^(){
396 for (const LoadedImage& uli : unloadImages) {
397 for (LoadedImage& li : _loadedImages) {
398 if ( uli.loadedAddress() == li.loadedAddress() ) {
399 _loadedImages.erase(li);
400 break;
401 }
402 }
403 }
404 recomputeBounds();
405 });
406
407 // sync to old all image infos struct
408 mirrorToOldAllImageInfos();
409
410 // tell debugger about removed images
411 STACK_ALLOC_ARRAY(dyld_image_info, oldDyldInfo, unloadImages.count());
412 for (const LoadedImage& li : unloadImages) {
413 oldDyldInfo.push_back({li.loadedAddress(), li.image()->path(), 0});
414 }
415 _oldAllImageInfos->notification(dyld_image_removing, (uint32_t)oldDyldInfo.count(), &oldDyldInfo[0]);
416
417 // notify any processes tracking loads in this process
418 notifyMonitorUnloads(unloadImages);
419
420 // finally, unmap images
421 for (const LoadedImage& li : unloadImages) {
422 if ( li.leaveMapped() ) {
423 log_loads("dyld: unloaded but left mmapped %s\n", imagePath(li.image()));
424 }
425 else {
426 // unmapImage() modifies parameter, so use copy
427 LoadedImage copy = li;
428 Loader::unmapImage(copy);
429 log_loads("dyld: unloaded %s\n", imagePath(li.image()));
430 }
431 }
432 }
433
434 // must be called with writeLock held
435 void AllImages::recomputeBounds()
436 {
437 _lowestNonCached = UINTPTR_MAX;
438 _highestNonCached = 0;
439 for (const LoadedImage& li : _loadedImages) {
440 const MachOLoaded* ml = li.loadedAddress();
441 uintptr_t start = (uintptr_t)ml;
442 if ( !((MachOAnalyzer*)ml)->inDyldCache() ) {
443 if ( start < _lowestNonCached )
444 _lowestNonCached = start;
445 uintptr_t end = start + (uintptr_t)(li.image()->vmSizeToMap());
446 if ( end > _highestNonCached )
447 _highestNonCached = end;
448 }
449 }
450 }
451
452 uint32_t AllImages::count() const
453 {
454 return (uint32_t)_loadedImages.count();
455 }
456
457 bool AllImages::dyldCacheHasPath(const char* path) const
458 {
459 uint32_t dyldCacheImageIndex;
460 if ( _dyldCacheAddress != nullptr )
461 return _dyldCacheAddress->hasImagePath(path, dyldCacheImageIndex);
462 return false;
463 }
464
465 const char* AllImages::imagePathByIndex(uint32_t index) const
466 {
467 if ( index < _loadedImages.count() )
468 return imagePath(_loadedImages[index].image());
469 return nullptr;
470 }
471
472 const mach_header* AllImages::imageLoadAddressByIndex(uint32_t index) const
473 {
474 if ( index < _loadedImages.count() )
475 return _loadedImages[index].loadedAddress();
476 return nullptr;
477 }
478
479 bool AllImages::findImage(const mach_header* loadAddress, LoadedImage& foundImage) const
480 {
481 __block bool result = false;
482 withReadLock(^(){
483 for (const LoadedImage& li : _loadedImages) {
484 if ( li.loadedAddress() == loadAddress ) {
485 foundImage = li;
486 result = true;
487 break;
488 }
489 }
490 });
491 return result;
492 }
493
494 void AllImages::forEachImage(void (^handler)(const LoadedImage& loadedImage, bool& stop)) const
495 {
496 withReadLock(^{
497 bool stop = false;
498 for (const LoadedImage& li : _loadedImages) {
499 handler(li, stop);
500 if ( stop )
501 break;
502 }
503 });
504 }
505
506
507 const char* AllImages::pathForImageMappedAt(const void* addr) const
508 {
509 if ( _initialImages != nullptr ) {
510 // being called during libSystem initialization, so _loadedImages not allocated yet
511 for (const LoadedImage& li : *_initialImages) {
512 uint8_t permissions;
513 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
514 return li.image()->path();
515 }
516 }
517 return nullptr;
518 }
519
520 // if address is in cache, do fast search of TEXT segments in cache
521 __block const char* result = nullptr;
522 if ( (_dyldCacheAddress != nullptr) && (addr > _dyldCacheAddress) ) {
523 if ( addr < (void*)((uint8_t*)_dyldCacheAddress+_dyldCacheAddress->mappedSize()) ) {
524 uint64_t cacheSlide = (uint64_t)_dyldCacheAddress - _dyldCacheAddress->unslidLoadAddress();
525 uint64_t unslidTargetAddr = (uint64_t)addr - cacheSlide;
526 _dyldCacheAddress->forEachImageTextSegment(^(uint64_t loadAddressUnslid, uint64_t textSegmentSize, const unsigned char* dylibUUID, const char* installName, bool& stop) {
527 if ( (loadAddressUnslid <= unslidTargetAddr) && (unslidTargetAddr < loadAddressUnslid+textSegmentSize) ) {
528 result = installName;
529 stop = true;
530 }
531 });
532 if ( result != nullptr )
533 return result;
534 }
535 }
536
537 // slow path - search image list
538 infoForImageMappedAt(addr, ^(const LoadedImage& foundImage, uint8_t permissions) {
539 result = foundImage.image()->path();
540 });
541
542 return result;
543 }
544
545 void AllImages::infoForImageMappedAt(const void* addr, void (^handler)(const LoadedImage& foundImage, uint8_t permissions)) const
546 {
547 __block uint8_t permissions;
548 if ( _initialImages != nullptr ) {
549 // being called during libSystem initialization, so _loadedImages not allocated yet
550 for (const LoadedImage& li : *_initialImages) {
551 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
552 handler(li, permissions);
553 break;
554 }
555 }
556 return;
557 }
558
559 withReadLock(^{
560 for (const LoadedImage& li : _loadedImages) {
561 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
562 handler(li, permissions);
563 break;
564 }
565 }
566 });
567 }
568
569
570 bool AllImages::infoForImageMappedAt(const void* addr, const MachOLoaded** ml, uint64_t* textSize, const char** path) const
571 {
572 if ( _initialImages != nullptr ) {
573 // being called during libSystem initialization, so _loadedImages not allocated yet
574 for (const LoadedImage& li : *_initialImages) {
575 uint8_t permissions;
576 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
577 if ( ml != nullptr )
578 *ml = li.loadedAddress();
579 if ( path != nullptr )
580 *path = li.image()->path();
581 if ( textSize != nullptr ) {
582 *textSize = li.image()->textSize();
583 }
584 return true;
585 }
586 }
587 return false;
588 }
589
590 // if address is in cache, do fast search of TEXT segments in cache
591 __block bool result = false;
592 if ( (_dyldCacheAddress != nullptr) && (addr > _dyldCacheAddress) ) {
593 if ( addr < (void*)((uint8_t*)_dyldCacheAddress+_dyldCacheAddress->mappedSize()) ) {
594 uint64_t cacheSlide = (uint64_t)_dyldCacheAddress - _dyldCacheAddress->unslidLoadAddress();
595 uint64_t unslidTargetAddr = (uint64_t)addr - cacheSlide;
596 _dyldCacheAddress->forEachImageTextSegment(^(uint64_t loadAddressUnslid, uint64_t textSegmentSize, const unsigned char* dylibUUID, const char* installName, bool& stop) {
597 if ( (loadAddressUnslid <= unslidTargetAddr) && (unslidTargetAddr < loadAddressUnslid+textSegmentSize) ) {
598 if ( ml != nullptr )
599 *ml = (MachOLoaded*)(loadAddressUnslid + cacheSlide);
600 if ( path != nullptr )
601 *path = installName;
602 if ( textSize != nullptr )
603 *textSize = textSegmentSize;
604 stop = true;
605 result = true;
606 }
607 });
608 if ( result )
609 return result;
610 }
611 }
612
613 // slow path - search image list
614 infoForImageMappedAt(addr, ^(const LoadedImage& foundImage, uint8_t permissions) {
615 if ( ml != nullptr )
616 *ml = foundImage.loadedAddress();
617 if ( path != nullptr )
618 *path = foundImage.image()->path();
619 if ( textSize != nullptr )
620 *textSize = foundImage.image()->textSize();
621 result = true;
622 });
623
624 return result;
625 }
626
627 // same as infoForImageMappedAt(), but only look at images not in the dyld cache
628 void AllImages::infoForNonCachedImageMappedAt(const void* addr, void (^handler)(const LoadedImage& foundImage, uint8_t permissions)) const
629 {
630 __block uint8_t permissions;
631 if ( _initialImages != nullptr ) {
632 // being called during libSystem initialization, so _loadedImages not allocated yet
633 for (const LoadedImage& li : *_initialImages) {
634 if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
635 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
636 handler(li, permissions);
637 break;
638 }
639 }
640 }
641 return;
642 }
643
644 withReadLock(^{
645 for (const LoadedImage& li : _loadedImages) {
646 if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
647 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
648 handler(li, permissions);
649 break;
650 }
651 }
652 }
653 });
654 }
655
656 bool AllImages::immutableMemory(const void* addr, size_t length) const
657 {
658 // quick check to see if in shared cache
659 if ( _dyldCacheAddress != nullptr ) {
660 bool readOnly;
661 if ( _dyldCacheAddress->inCache(addr, length, readOnly) ) {
662 return readOnly;
663 }
664 }
665
666 __block bool result = false;
667 withReadLock(^() {
668 // quick check to see if it is not any non-cached image loaded
669 if ( ((uintptr_t)addr < _lowestNonCached) || ((uintptr_t)addr+length > _highestNonCached) ) {
670 result = false;
671 return;
672 }
673 // slow walk through all images, only look at images not in dyld cache
674 for (const LoadedImage& li : _loadedImages) {
675 if ( !((MachOAnalyzer*)li.loadedAddress())->inDyldCache() ) {
676 uint8_t permissions;
677 if ( li.image()->containsAddress(addr, li.loadedAddress(), &permissions) ) {
678 result = ((permissions & VM_PROT_WRITE) == 0) && li.image()->neverUnload();
679 break;
680 }
681 }
682 }
683 });
684
685 return result;
686 }
687
688 void AllImages::infoForImageWithLoadAddress(const MachOLoaded* mh, void (^handler)(const LoadedImage& foundImage)) const
689 {
690 withReadLock(^{
691 for (const LoadedImage& li : _loadedImages) {
692 if ( li.loadedAddress() == mh ) {
693 handler(li);
694 break;
695 }
696 }
697 });
698 }
699
700 bool AllImages::findImageNum(closure::ImageNum imageNum, LoadedImage& foundImage) const
701 {
702 if ( _initialImages != nullptr ) {
703 // being called during libSystem initialization, so _loadedImages not allocated yet
704 for (const LoadedImage& li : *_initialImages) {
705 if ( li.image()->representsImageNum(imageNum) ) {
706 foundImage = li;
707 return true;
708 }
709 }
710 return false;
711 }
712
713 bool result = false;
714 for (const LoadedImage& li : _loadedImages) {
715 if ( li.image()->representsImageNum(imageNum) ) {
716 foundImage = li;
717 result = true;
718 break;
719 }
720 }
721
722 return result;
723 }
724
725 const MachOLoaded* AllImages::findDependent(const MachOLoaded* mh, uint32_t depIndex)
726 {
727 __block const MachOLoaded* result = nullptr;
728 withReadLock(^{
729 for (const LoadedImage& li : _loadedImages) {
730 if ( li.loadedAddress() == mh ) {
731 closure::ImageNum depImageNum = li.image()->dependentImageNum(depIndex);
732 LoadedImage depLi;
733 if ( findImageNum(depImageNum, depLi) )
734 result = depLi.loadedAddress();
735 break;
736 }
737 }
738 });
739 return result;
740 }
741
742
743 void AllImages::breadthFirstRecurseDependents(Array<closure::ImageNum>& visited, const LoadedImage& nodeLi, bool& stopped, void (^handler)(const LoadedImage& aLoadedImage, bool& stop)) const
744 {
745 // call handler on all direct dependents (unless already visited)
746 STACK_ALLOC_ARRAY(LoadedImage, dependentsToRecurse, 256);
747 nodeLi.image()->forEachDependentImage(^(uint32_t depIndex, closure::Image::LinkKind kind, closure::ImageNum depImageNum, bool& depStop) {
748 if ( kind == closure::Image::LinkKind::upward )
749 return;
750 if ( visited.contains(depImageNum) )
751 return;
752 LoadedImage depLi;
753 if ( !findImageNum(depImageNum, depLi) )
754 return;
755 handler(depLi, depStop);
756 visited.push_back(depImageNum);
757 if ( depStop ) {
758 stopped = true;
759 return;
760 }
761 dependentsToRecurse.push_back(depLi);
762 });
763 if ( stopped )
764 return;
765 // recurse on all dependents just visited
766 for (LoadedImage& depLi : dependentsToRecurse) {
767 breadthFirstRecurseDependents(visited, depLi, stopped, handler);
768 }
769 }
770
771 void AllImages::visitDependentsTopDown(const LoadedImage& start, void (^handler)(const LoadedImage& aLoadedImage, bool& stop)) const
772 {
773 withReadLock(^{
774 STACK_ALLOC_ARRAY(closure::ImageNum, visited, count());
775 bool stop = false;
776 handler(start, stop);
777 if ( stop )
778 return;
779 visited.push_back(start.image()->imageNum());
780 breadthFirstRecurseDependents(visited, start, stop, handler);
781 });
782 }
783
784 const MachOLoaded* AllImages::mainExecutable() const
785 {
786 assert(_programVars != nullptr);
787 return (const MachOLoaded*)_programVars->mh;
788 }
789
790 const closure::Image* AllImages::mainExecutableImage() const
791 {
792 assert(_mainClosure != nullptr);
793 return _mainClosure->images()->imageForNum(_mainClosure->topImage());
794 }
795
796 void AllImages::setMainPath(const char* path )
797 {
798 _mainExeOverridePath = path;
799 }
800
801 const char* AllImages::imagePath(const closure::Image* image) const
802 {
803 #if __IPHONE_OS_VERSION_MIN_REQUIRED
804 // on iOS and watchOS, apps may be moved on device after closure built
805 if ( _mainExeOverridePath != nullptr ) {
806 if ( image == mainExecutableImage() )
807 return _mainExeOverridePath;
808 }
809 #endif
810 return image->path();
811 }
812
813 dyld_platform_t AllImages::platform() const {
814 return _platform;
815 }
816
817 void AllImages::incRefCount(const mach_header* loadAddress)
818 {
819 for (DlopenCount& entry : _dlopenRefCounts) {
820 if ( entry.loadAddress == loadAddress ) {
821 // found existing DlopenCount entry, bump counter
822 entry.refCount += 1;
823 return;
824 }
825 }
826
827 // no existing DlopenCount, add new one
828 _dlopenRefCounts.push_back({ loadAddress, 1 });
829 }
830
831 void AllImages::decRefCount(const mach_header* loadAddress)
832 {
833 bool doCollect = false;
834 for (DlopenCount& entry : _dlopenRefCounts) {
835 if ( entry.loadAddress == loadAddress ) {
836 // found existing DlopenCount entry, bump counter
837 entry.refCount -= 1;
838 if ( entry.refCount == 0 ) {
839 _dlopenRefCounts.erase(entry);
840 doCollect = true;
841 break;
842 }
843 return;
844 }
845 }
846 if ( doCollect )
847 garbageCollectImages();
848 }
849
850
851 #if __MAC_OS_X_VERSION_MIN_REQUIRED
852 NSObjectFileImage AllImages::addNSObjectFileImage(const OFIInfo& image)
853 {
854 __block uint64_t imageNum = 0;
855 withWriteLock(^{
856 imageNum = ++_nextObjectFileImageNum;
857 _objectFileImages.push_back(image);
858 _objectFileImages.back().imageNum = imageNum;
859 });
860 return (NSObjectFileImage)imageNum;
861 }
862
863 bool AllImages::forNSObjectFileImage(NSObjectFileImage imageHandle,
864 void (^handler)(OFIInfo& image)) {
865 uint64_t imageNum = (uint64_t)imageHandle;
866 bool __block foundImage = false;
867 withReadLock(^{
868 for (OFIInfo& ofi : _objectFileImages) {
869 if ( ofi.imageNum == imageNum ) {
870 handler(ofi);
871 foundImage = true;
872 return;
873 }
874 }
875 });
876
877 return foundImage;
878 }
879
880 void AllImages::removeNSObjectFileImage(NSObjectFileImage imageHandle)
881 {
882 uint64_t imageNum = (uint64_t)imageHandle;
883 withWriteLock(^{
884 for (OFIInfo& ofi : _objectFileImages) {
885 if ( ofi.imageNum == imageNum ) {
886 _objectFileImages.erase(ofi);
887 return;
888 }
889 }
890 });
891 }
892 #endif
893
894
895 class VIS_HIDDEN Reaper
896 {
897 public:
898 struct ImageAndUse
899 {
900 const LoadedImage* li;
901 bool inUse;
902 };
903 Reaper(Array<ImageAndUse>& unloadables, AllImages*);
904 void garbageCollect();
905 void finalizeDeadImages();
906 private:
907
908 void markDirectlyDlopenedImagesAsUsed();
909 void markDependentOfInUseImages();
910 void markDependentsOf(const LoadedImage*);
911 uint32_t inUseCount();
912 void dump(const char* msg);
913
914 Array<ImageAndUse>& _unloadables;
915 AllImages* _allImages;
916 uint32_t _deadCount;
917 };
918
919 Reaper::Reaper(Array<ImageAndUse>& unloadables, AllImages* all)
920 : _unloadables(unloadables), _allImages(all), _deadCount(0)
921 {
922 }
923
924 void Reaper::markDirectlyDlopenedImagesAsUsed()
925 {
926 for (AllImages::DlopenCount& entry : _allImages->_dlopenRefCounts) {
927 if ( entry.refCount != 0 ) {
928 for (ImageAndUse& iu : _unloadables) {
929 if ( iu.li->loadedAddress() == entry.loadAddress ) {
930 iu.inUse = true;
931 break;
932 }
933 }
934 }
935 }
936 }
937
938 uint32_t Reaper::inUseCount()
939 {
940 uint32_t count = 0;
941 for (ImageAndUse& iu : _unloadables) {
942 if ( iu.inUse )
943 ++count;
944 }
945 return count;
946 }
947
948 void Reaper::markDependentsOf(const LoadedImage* li)
949 {
950 li->image()->forEachDependentImage(^(uint32_t depIndex, closure::Image::LinkKind kind, closure::ImageNum depImageNum, bool& stop) {
951 for (ImageAndUse& iu : _unloadables) {
952 if ( !iu.inUse && iu.li->image()->representsImageNum(depImageNum) ) {
953 iu.inUse = true;
954 break;
955 }
956 }
957 });
958 }
959
960 void Reaper::markDependentOfInUseImages()
961 {
962 for (ImageAndUse& iu : _unloadables) {
963 if ( iu.inUse )
964 markDependentsOf(iu.li);
965 }
966 }
967
968 void Reaper::dump(const char* msg)
969 {
970 //log("%s:\n", msg);
971 //for (ImageAndUse& iu : _unloadables) {
972 // log(" in-used=%d %s\n", iu.inUse, iu.li->image()->path());
973 //}
974 }
975
976 void Reaper::garbageCollect()
977 {
978 //dump("all unloadable images");
979
980 // mark all dylibs directly dlopen'ed as in use
981 markDirectlyDlopenedImagesAsUsed();
982
983 //dump("directly dlopen()'ed marked");
984
985 // iteratively mark dependents of in-use dylibs as in-use until in-use count stops changing
986 uint32_t lastCount = inUseCount();
987 bool countChanged = false;
988 do {
989 markDependentOfInUseImages();
990 //dump("dependents marked");
991 uint32_t newCount = inUseCount();
992 countChanged = (newCount != lastCount);
993 lastCount = newCount;
994 } while (countChanged);
995
996 _deadCount = (uint32_t)_unloadables.count() - inUseCount();
997 }
998
999 void Reaper::finalizeDeadImages()
1000 {
1001 if ( _deadCount == 0 )
1002 return;
1003 __cxa_range_t ranges[_deadCount];
1004 __cxa_range_t* rangesArray = ranges;
1005 __block unsigned int rangesCount = 0;
1006 for (ImageAndUse& iu : _unloadables) {
1007 if ( iu.inUse )
1008 continue;
1009 iu.li->image()->forEachDiskSegment(^(uint32_t segIndex, uint32_t fileOffset, uint32_t fileSize, int64_t vmOffset, uint64_t vmSize, uint8_t permissions, bool &stop) {
1010 if ( permissions & VM_PROT_EXECUTE ) {
1011 rangesArray[rangesCount].addr = (char*)(iu.li->loadedAddress()) + vmOffset;
1012 rangesArray[rangesCount].length = (size_t)vmSize;
1013 ++rangesCount;
1014 }
1015 });
1016 }
1017 __cxa_finalize_ranges(ranges, rangesCount);
1018 }
1019
1020
1021 // This function is called at the end of dlclose() when the reference count goes to zero.
1022 // The dylib being unloaded may have brought in other dependent dylibs when it was loaded.
1023 // Those dependent dylibs need to be unloaded, but only if they are not referenced by
1024 // something else. We use a standard mark and sweep garbage collection.
1025 //
1026 // The tricky part is that when a dylib is unloaded it may have a termination function that
1027 // can run and itself call dlclose() on yet another dylib. The problem is that this
1028 // sort of gabage collection is not re-entrant. Instead a terminator's call to dlclose()
1029 // which calls garbageCollectImages() will just set a flag to re-do the garbage collection
1030 // when the current pass is done.
1031 //
1032 // Also note that this is done within the _loadedImages writer lock, so any dlopen/dlclose
1033 // on other threads are blocked while this garbage collections runs
1034 //
1035 void AllImages::garbageCollectImages()
1036 {
1037 // if some other thread is currently GC'ing images, let other thread do the work
1038 int32_t newCount = OSAtomicIncrement32(&_gcCount);
1039 if ( newCount != 1 )
1040 return;
1041
1042 do {
1043 STACK_ALLOC_ARRAY(Reaper::ImageAndUse, unloadables, _loadedImages.count());
1044 withReadLock(^{
1045 for (const LoadedImage& li : _loadedImages) {
1046 if ( !li.image()->neverUnload() /*&& !li.neverUnload()*/ ) {
1047 unloadables.push_back({&li, false});
1048 //fprintf(stderr, "unloadable[%lu] %p %s\n", unloadables.count(), li.loadedAddress(), li.image()->path());
1049 }
1050 }
1051 });
1052 // make reaper object to do garbage collection and notifications
1053 Reaper reaper(unloadables, this);
1054 reaper.garbageCollect();
1055
1056 // FIXME: we should sort dead images so higher level ones are terminated first
1057
1058 // call cxa_finalize_ranges of dead images
1059 reaper.finalizeDeadImages();
1060
1061 // FIXME: call static terminators of dead images
1062
1063 // FIXME: DOF unregister
1064
1065 //fprintf(stderr, "_loadedImages before GC removals:\n");
1066 //for (const LoadedImage& li : _loadedImages) {
1067 // fprintf(stderr, " loadAddr=%p, path=%s\n", li.loadedAddress(), li.image()->path());
1068 //}
1069
1070 // make copy of LoadedImages we want to remove
1071 // because unloadables[] points into LoadedImage we are shrinking
1072 STACK_ALLOC_ARRAY(LoadedImage, unloadImages, _loadedImages.count());
1073 for (const Reaper::ImageAndUse& iu : unloadables) {
1074 if ( !iu.inUse )
1075 unloadImages.push_back(*iu.li);
1076 }
1077 // remove entries from _loadedImages
1078 if ( !unloadImages.empty() ) {
1079 removeImages(unloadImages);
1080
1081 //fprintf(stderr, "_loadedImages after GC removals:\n");
1082 //for (const LoadedImage& li : _loadedImages) {
1083 // fprintf(stderr, " loadAddr=%p, path=%s\n", li.loadedAddress(), li.image()->path());
1084 //}
1085 }
1086
1087 // if some other thread called GC during our work, redo GC on its behalf
1088 newCount = OSAtomicDecrement32(&_gcCount);
1089 }
1090 while (newCount > 0);
1091 }
1092
1093
1094
1095 void AllImages::addLoadNotifier(NotifyFunc func)
1096 {
1097 // callback about already loaded images
1098 withReadLock(^{
1099 for (const LoadedImage& li : _loadedImages) {
1100 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
1101 log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
1102 if ( li.image()->inDyldCache() )
1103 func(li.loadedAddress(), (uintptr_t)_dyldCacheSlide);
1104 else
1105 func(li.loadedAddress(), li.loadedAddress()->getSlide());
1106 }
1107 });
1108
1109 // add to list of functions to call about future loads
1110 withNotifiersLock(^{
1111 _loadNotifiers.push_back(func);
1112 });
1113 }
1114
1115 void AllImages::addUnloadNotifier(NotifyFunc func)
1116 {
1117 // add to list of functions to call about future unloads
1118 withNotifiersLock(^{
1119 _unloadNotifiers.push_back(func);
1120 });
1121 }
1122
1123 void AllImages::addLoadNotifier(LoadNotifyFunc func)
1124 {
1125 // callback about already loaded images
1126 withReadLock(^{
1127 for (const LoadedImage& li : _loadedImages) {
1128 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_FUNC_FOR_ADD_IMAGE, (uint64_t)li.loadedAddress(), (uint64_t)func, 0);
1129 log_notifications("dyld: add notifier %p called with mh=%p\n", func, li.loadedAddress());
1130 func(li.loadedAddress(), li.image()->path(), !li.image()->neverUnload());
1131 }
1132 });
1133
1134 // add to list of functions to call about future loads
1135 withNotifiersLock(^{
1136 _loadNotifiers2.push_back(func);
1137 });
1138 }
1139
1140
1141 void AllImages::setObjCNotifiers(_dyld_objc_notify_mapped map, _dyld_objc_notify_init init, _dyld_objc_notify_unmapped unmap)
1142 {
1143 _objcNotifyMapped = map;
1144 _objcNotifyInit = init;
1145 _objcNotifyUnmapped = unmap;
1146
1147 // callback about already loaded images
1148 uint32_t maxCount = count();
1149 STACK_ALLOC_ARRAY(const mach_header*, mhs, maxCount);
1150 STACK_ALLOC_ARRAY(const char*, paths, maxCount);
1151 // don't need _mutex here because this is called when process is still single threaded
1152 for (const LoadedImage& li : _loadedImages) {
1153 if ( li.image()->hasObjC() ) {
1154 paths.push_back(imagePath(li.image()));
1155 mhs.push_back(li.loadedAddress());
1156 }
1157 }
1158 if ( !mhs.empty() ) {
1159 (*map)((uint32_t)mhs.count(), &paths[0], &mhs[0]);
1160 if ( log_notifications("dyld: objc-mapped-notifier called with %ld images:\n", mhs.count()) ) {
1161 for (uintptr_t i=0; i < mhs.count(); ++i) {
1162 log_notifications("dyld: objc-mapped: %p %s\n", mhs[i], paths[i]);
1163 }
1164 }
1165 }
1166 }
1167
1168 void AllImages::applyInterposingToDyldCache(const closure::Closure* closure)
1169 {
1170 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_APPLY_INTERPOSING, 0, 0, 0);
1171 const uintptr_t cacheStart = (uintptr_t)_dyldCacheAddress;
1172 __block closure::ImageNum lastCachedDylibImageNum = 0;
1173 __block const closure::Image* lastCachedDylibImage = nullptr;
1174 __block bool suspendedAccounting = false;
1175 closure->forEachPatchEntry(^(const closure::Closure::PatchEntry& entry) {
1176 if ( entry.overriddenDylibInCache != lastCachedDylibImageNum ) {
1177 lastCachedDylibImage = closure::ImageArray::findImage(imagesArrays(), entry.overriddenDylibInCache);
1178 assert(lastCachedDylibImage != nullptr);
1179 lastCachedDylibImageNum = entry.overriddenDylibInCache;
1180 }
1181 if ( !suspendedAccounting ) {
1182 Loader::vmAccountingSetSuspended(true, log_fixups);
1183 suspendedAccounting = true;
1184 }
1185 uintptr_t newValue = 0;
1186 LoadedImage foundImage;
1187 switch ( entry.replacement.image.kind ) {
1188 case closure::Image::ResolvedSymbolTarget::kindImage:
1189 assert(findImageNum(entry.replacement.image.imageNum, foundImage));
1190 newValue = (uintptr_t)(foundImage.loadedAddress()) + (uintptr_t)entry.replacement.image.offset;
1191 break;
1192 case closure::Image::ResolvedSymbolTarget::kindSharedCache:
1193 newValue = (uintptr_t)_dyldCacheAddress + (uintptr_t)entry.replacement.sharedCache.offset;
1194 break;
1195 case closure::Image::ResolvedSymbolTarget::kindAbsolute:
1196 // this means the symbol was missing in the cache override dylib, so set any uses to NULL
1197 newValue = (uintptr_t)entry.replacement.absolute.value;
1198 break;
1199 default:
1200 assert(0 && "bad replacement kind");
1201 }
1202 lastCachedDylibImage->forEachPatchableUseOfExport(entry.exportCacheOffset, ^(closure::Image::PatchableExport::PatchLocation patchLocation) {
1203 uintptr_t* loc = (uintptr_t*)(cacheStart+patchLocation.cacheOffset);
1204 #if __has_feature(ptrauth_calls)
1205 if ( patchLocation.authenticated ) {
1206 MachOLoaded::ChainedFixupPointerOnDisk fixupInfo;
1207 fixupInfo.authRebase.auth = true;
1208 fixupInfo.authRebase.addrDiv = patchLocation.usesAddressDiversity;
1209 fixupInfo.authRebase.diversity = patchLocation.discriminator;
1210 fixupInfo.authRebase.key = patchLocation.key;
1211 *loc = fixupInfo.signPointer(loc, newValue + patchLocation.getAddend());
1212 log_fixups("dyld: cache fixup: *%p = %p (JOP: diversity 0x%04X, addr-div=%d, key=%s)\n",
1213 loc, (void*)*loc, patchLocation.discriminator, patchLocation.usesAddressDiversity, patchLocation.keyName());
1214 return;
1215 }
1216 #endif
1217 log_fixups("dyld: cache fixup: *%p = 0x%0lX (dyld cache patch)\n", loc, newValue + (uintptr_t)patchLocation.getAddend());
1218 *loc = newValue + (uintptr_t)patchLocation.getAddend();
1219 });
1220 });
1221 if ( suspendedAccounting )
1222 Loader::vmAccountingSetSuspended(false, log_fixups);
1223 }
1224
1225 void AllImages::runStartupInitialzers()
1226 {
1227 __block bool mainExecutableInitializerNeedsToRun = true;
1228 __block uint32_t imageIndex = 0;
1229 while ( mainExecutableInitializerNeedsToRun ) {
1230 __block const closure::Image* image = nullptr;
1231 withReadLock(^{
1232 image = _loadedImages[imageIndex].image();
1233 if ( _loadedImages[imageIndex].loadedAddress()->isMainExecutable() )
1234 mainExecutableInitializerNeedsToRun = false;
1235 });
1236 runInitialzersBottomUp(image);
1237 ++imageIndex;
1238 }
1239 }
1240
1241
1242 // Find image in _loadedImages which has ImageNum == num.
1243 // Try indexHint first, if hint is wrong, updated it, so next use is faster.
1244 LoadedImage AllImages::findImageNum(closure::ImageNum num, uint32_t& indexHint)
1245 {
1246 __block LoadedImage copy;
1247 withReadLock(^{
1248 if ( (indexHint >= _loadedImages.count()) || !_loadedImages[indexHint].image()->representsImageNum(num) ) {
1249 indexHint = 0;
1250 for (indexHint=0; indexHint < _loadedImages.count(); ++indexHint) {
1251 if ( _loadedImages[indexHint].image()->representsImageNum(num) )
1252 break;
1253 }
1254 assert(indexHint < _loadedImages.count());
1255 }
1256 copy = _loadedImages[indexHint];
1257 });
1258 return copy;
1259 }
1260
1261
1262 // Change the state of the LoadedImage in _loadedImages which has ImageNum == num.
1263 // Only change state if current state is expectedCurrentState (atomic swap).
1264 bool AllImages::swapImageState(closure::ImageNum num, uint32_t& indexHint, LoadedImage::State expectedCurrentState, LoadedImage::State newState)
1265 {
1266 __block bool result = false;
1267 withWriteLock(^{
1268 if ( (indexHint >= _loadedImages.count()) || !_loadedImages[indexHint].image()->representsImageNum(num) ) {
1269 indexHint = 0;
1270 for (indexHint=0; indexHint < _loadedImages.count(); ++indexHint) {
1271 if ( _loadedImages[indexHint].image()->representsImageNum(num) )
1272 break;
1273 }
1274 assert(indexHint < _loadedImages.count());
1275 }
1276 if ( _loadedImages[indexHint].state() == expectedCurrentState ) {
1277 _loadedImages[indexHint].setState(newState);
1278 result = true;
1279 }
1280 });
1281 return result;
1282 }
1283
1284 // dyld3 pre-builds the order initializers need to be run (bottom up) in a list in the closure.
1285 // This method uses that list to run all initializers.
1286 // Because an initializer may call dlopen() and/or create threads, the _loadedImages array
1287 // may move under us. So, never keep a pointer into it. Always reference images by ImageNum
1288 // and use hint to make that faster in the case where the _loadedImages does not move.
1289 void AllImages::runInitialzersBottomUp(const closure::Image* topImage)
1290 {
1291 // walk closure specified initializer list, already ordered bottom up
1292 topImage->forEachImageToInitBefore(^(closure::ImageNum imageToInit, bool& stop) {
1293 // get copy of LoadedImage about imageToInit, but don't keep reference into _loadedImages, because it may move if initialzers call dlopen()
1294 uint32_t indexHint = 0;
1295 LoadedImage loadedImageCopy = findImageNum(imageToInit, indexHint);
1296 // skip if the image is already inited, or in process of being inited (dependency cycle)
1297 if ( (loadedImageCopy.state() == LoadedImage::State::fixedUp) && swapImageState(imageToInit, indexHint, LoadedImage::State::fixedUp, LoadedImage::State::beingInited) ) {
1298 // tell objc to run any +load methods in image
1299 if ( (_objcNotifyInit != nullptr) && loadedImageCopy.image()->mayHavePlusLoads() ) {
1300 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_OBJC_INIT, (uint64_t)loadedImageCopy.loadedAddress(), 0, 0);
1301 const char* path = imagePath(loadedImageCopy.image());
1302 log_notifications("dyld: objc-init-notifier called with mh=%p, path=%s\n", loadedImageCopy.loadedAddress(), path);
1303 (*_objcNotifyInit)(path, loadedImageCopy.loadedAddress());
1304 }
1305
1306 // run all initializers in image
1307 runAllInitializersInImage(loadedImageCopy.image(), loadedImageCopy.loadedAddress());
1308
1309 // advance state to inited
1310 swapImageState(imageToInit, indexHint, LoadedImage::State::beingInited, LoadedImage::State::inited);
1311 }
1312 });
1313 }
1314
1315
1316 void AllImages::runLibSystemInitializer(const LoadedImage& libSystem)
1317 {
1318 // run all initializers in libSystem.dylib
1319 runAllInitializersInImage(libSystem.image(), libSystem.loadedAddress());
1320
1321 // Note: during libSystem's initialization, libdyld_initializer() is called which copies _initialImages to _loadedImages
1322
1323 // mark libSystem.dylib as being inited, so later recursive-init would re-run it
1324 for (LoadedImage& li : _loadedImages) {
1325 if ( li.loadedAddress() == libSystem.loadedAddress() ) {
1326 li.setState(LoadedImage::State::inited);
1327 break;
1328 }
1329 }
1330 }
1331
1332 void AllImages::runAllInitializersInImage(const closure::Image* image, const MachOLoaded* ml)
1333 {
1334 image->forEachInitializer(ml, ^(const void* func) {
1335 Initializer initFunc = (Initializer)func;
1336 #if __has_feature(ptrauth_calls)
1337 initFunc = (Initializer)__builtin_ptrauth_sign_unauthenticated((void*)initFunc, 0, 0);
1338 #endif
1339 {
1340 ScopedTimer(DBG_DYLD_TIMING_STATIC_INITIALIZER, (uint64_t)ml, (uint64_t)func, 0);
1341 initFunc(NXArgc, NXArgv, environ, appleParams, _programVars);
1342
1343 }
1344 log_initializers("dyld: called initialzer %p in %s\n", initFunc, image->path());
1345 });
1346 }
1347
1348 const MachOLoaded* AllImages::dlopen(Diagnostics& diag, const char* path, bool rtldNoLoad, bool rtldLocal, bool rtldNoDelete, bool fromOFI, const void* callerAddress)
1349 {
1350 // quick check if path is in shared cache and already loaded
1351 if ( _dyldCacheAddress != nullptr ) {
1352 uint32_t dyldCacheImageIndex;
1353 if ( _dyldCacheAddress->hasImagePath(path, dyldCacheImageIndex) ) {
1354 uint64_t mTime;
1355 uint64_t inode;
1356 const MachOLoaded* mh = (MachOLoaded*)_dyldCacheAddress->getIndexedImageEntry(dyldCacheImageIndex, mTime, inode);
1357 // Note: we do not need readLock because this is within global dlopen lock
1358 for (const LoadedImage& li : _loadedImages) {
1359 if ( li.loadedAddress() == mh ) {
1360 return mh;
1361 }
1362 }
1363 }
1364 }
1365
1366 __block closure::ImageNum callerImageNum = 0;
1367 STACK_ALLOC_ARRAY(LoadedImage, loadedList, 1024);
1368 for (const LoadedImage& li : _loadedImages) {
1369 loadedList.push_back(li);
1370 uint8_t permissions;
1371 if ( (callerImageNum == 0) && li.image()->containsAddress(callerAddress, li.loadedAddress(), &permissions) ) {
1372 callerImageNum = li.image()->imageNum();
1373 }
1374 //fprintf(stderr, "mh=%p, image=%p, imageNum=0x%04X, path=%s\n", li.loadedAddress(), li.image(), li.image()->imageNum(), li.image()->path());
1375 }
1376 uintptr_t alreadyLoadedCount = loadedList.count();
1377
1378 // make closure
1379 closure::ImageNum topImageNum = 0;
1380 const closure::DlopenClosure* newClosure;
1381
1382 // First try with closures from the shared cache permitted.
1383 // Then try again with forcing a new closure
1384 for (bool canUseSharedCacheClosure : { true, false }) {
1385 closure::FileSystemPhysical fileSystem;
1386 closure::ClosureBuilder::AtPath atPathHanding = (_allowAtPaths ? closure::ClosureBuilder::AtPath::all : closure::ClosureBuilder::AtPath::onlyInRPaths);
1387 closure::ClosureBuilder cb(_nextImageNum, fileSystem, _dyldCacheAddress, true, closure::gPathOverrides, atPathHanding);
1388 newClosure = cb.makeDlopenClosure(path, _mainClosure, loadedList, callerImageNum, rtldNoLoad, canUseSharedCacheClosure, &topImageNum);
1389 if ( newClosure == closure::ClosureBuilder::sRetryDlopenClosure ) {
1390 log_apis(" dlopen: closure builder needs to retry: %s\n", path);
1391 assert(canUseSharedCacheClosure);
1392 continue;
1393 }
1394 if ( (newClosure == nullptr) && (topImageNum == 0) ) {
1395 if ( cb.diagnostics().hasError())
1396 diag.error("%s", cb.diagnostics().errorMessage());
1397 else if ( !rtldNoLoad )
1398 diag.error("dlopen(): file not found: %s", path);
1399 return nullptr;
1400 }
1401 // save off next available ImageNum for use by next call to dlopen()
1402 _nextImageNum = cb.nextFreeImageNum();
1403 break;
1404 }
1405
1406 if ( newClosure != nullptr ) {
1407 // if new closure contains an ImageArray, add it to list
1408 if ( const closure::ImageArray* newArray = newClosure->images() ) {
1409 appendToImagesArray(newArray);
1410 }
1411 log_apis(" dlopen: made closure: %p\n", newClosure);
1412 }
1413
1414 // if already loaded, just bump refCount and return
1415 if ( (newClosure == nullptr) && (topImageNum != 0) ) {
1416 for (LoadedImage& li : _loadedImages) {
1417 if ( li.image()->imageNum() == topImageNum ) {
1418 // is already loaded
1419 const MachOLoaded* topLoadAddress = li.loadedAddress();
1420 if ( !li.image()->inDyldCache() )
1421 incRefCount(topLoadAddress);
1422 log_apis(" dlopen: already loaded as '%s'\n", li.image()->path());
1423 // if previously opened with RTLD_LOCAL, but now opened with RTLD_GLOBAL, unhide it
1424 if ( !rtldLocal && li.hideFromFlatSearch() )
1425 li.setHideFromFlatSearch(false);
1426 // if called with RTLD_NODELETE, mark it as never-unload
1427 if ( rtldNoDelete )
1428 li.markLeaveMapped();
1429 return topLoadAddress;
1430 }
1431 }
1432 }
1433
1434 // run loader to load all new images
1435 Loader loader(loadedList, _dyldCacheAddress, imagesArrays(), &dyld3::log_loads, &dyld3::log_segments, &dyld3::log_fixups, &dyld3::log_dofs);
1436 const closure::Image* topImage = closure::ImageArray::findImage(imagesArrays(), topImageNum);
1437 if ( newClosure == nullptr ) {
1438 if ( topImageNum < dyld3::closure::kLastDyldCacheImageNum )
1439 log_apis(" dlopen: using image in dyld shared cache %p\n", topImage);
1440 else
1441 log_apis(" dlopen: using pre-built dlopen closure %p\n", topImage);
1442 }
1443 uintptr_t topIndex = loadedList.count();
1444 LoadedImage topLoadedImage = LoadedImage::make(topImage);
1445 if ( rtldLocal && !topImage->inDyldCache() )
1446 topLoadedImage.setHideFromFlatSearch(true);
1447 if ( rtldNoDelete && !topImage->inDyldCache() )
1448 topLoadedImage.markLeaveMapped();
1449 loader.addImage(topLoadedImage);
1450
1451
1452 // recursively load all dependents and fill in allImages array
1453 loader.completeAllDependents(diag, topIndex);
1454 if ( diag.hasError() )
1455 return nullptr;
1456 loader.mapAndFixupAllImages(diag, _processDOFs, fromOFI, topIndex);
1457 if ( diag.hasError() )
1458 return nullptr;
1459
1460 const MachOLoaded* topLoadAddress = loadedList[topIndex].loadedAddress();
1461
1462 // bump dlopen refcount of image directly loaded
1463 if ( !topImage->inDyldCache() )
1464 incRefCount(topLoadAddress);
1465
1466 // tell gAllImages about new images
1467 const uint32_t newImageCount = (uint32_t)(loadedList.count() - alreadyLoadedCount);
1468 addImages(loadedList.subArray(alreadyLoadedCount, newImageCount));
1469
1470 // if closure adds images that override dyld cache, patch cache
1471 if ( newClosure != nullptr )
1472 applyInterposingToDyldCache(newClosure);
1473
1474 runImageNotifiers(loadedList.subArray(alreadyLoadedCount, newImageCount));
1475
1476 // run initializers
1477 runInitialzersBottomUp(topImage);
1478
1479 return topLoadAddress;
1480 }
1481
1482 void AllImages::appendToImagesArray(const closure::ImageArray* newArray)
1483 {
1484 _imagesArrays.push_back(newArray);
1485 }
1486
1487 const Array<const closure::ImageArray*>& AllImages::imagesArrays()
1488 {
1489 return _imagesArrays.array();
1490 }
1491
1492 bool AllImages::isRestricted() const
1493 {
1494 return !_allowEnvPaths;
1495 }
1496
1497
1498
1499
1500 } // namespace dyld3
1501
1502
1503
1504
1505
1506