]> git.saurik.com Git - apple/dyld.git/blob - dyld3/AllImages.cpp
dyld-551.3.tar.gz
[apple/dyld.git] / dyld3 / AllImages.cpp
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24
25 #include <stdint.h>
26 #include <sys/stat.h>
27 #include <sys/sysctl.h>
28 #include <mach/mach_time.h> // mach_absolute_time()
29 #include <pthread/pthread.h>
30 #include <libkern/OSAtomic.h>
31
32 #include <vector>
33 #include <algorithm>
34
35 #include "AllImages.h"
36 #include "MachOParser.h"
37 #include "libdyldEntryVector.h"
38 #include "Logging.h"
39 #include "Loading.h"
40 #include "Tracing.h"
41 #include "LaunchCache.h"
42 #include "DyldSharedCache.h"
43 #include "PathOverrides.h"
44 #include "DyldCacheParser.h"
45
46 extern const char** appleParams;
47
48 // should be a header for these
49 struct __cxa_range_t {
50 const void* addr;
51 size_t length;
52 };
53 extern "C" void __cxa_finalize_ranges(const __cxa_range_t ranges[], unsigned int count);
54
55 VIS_HIDDEN bool gUseDyld3 = false;
56
57
58 namespace dyld3 {
59
60 class VIS_HIDDEN LoadedImage {
61 public:
62 enum class State { uninited=3, beingInited=2, inited=0 };
63 typedef launch_cache::binary_format::Image BinaryImage;
64
65 LoadedImage(const mach_header* mh, const BinaryImage* bi);
66 bool operator==(const LoadedImage& rhs) const;
67 void init(const mach_header* mh, const BinaryImage* bi);
68 const mach_header* loadedAddress() const { return (mach_header*)((uintptr_t)_loadAddress & ~0x7ULL); }
69 State state() const { return (State)((uintptr_t)_loadAddress & 0x3ULL); }
70 const BinaryImage* image() const { return _image; }
71 bool neverUnload() const { return ((uintptr_t)_loadAddress & 0x4ULL); }
72 void setState(State s) { _loadAddress = (mach_header*)((((uintptr_t)_loadAddress) & ~0x3ULL) | (uintptr_t)s); }
73 void setNeverUnload() { _loadAddress = (mach_header*)(((uintptr_t)_loadAddress) | 0x4ULL); }
74
75 private:
76 const mach_header* _loadAddress; // low bits: bit2=neverUnload, bit1/bit0 contain State
77 const BinaryImage* _image;
78 };
79
80
81 bool LoadedImage::operator==(const LoadedImage& rhs) const
82 {
83 return (_image == rhs._image) && (loadedAddress() == rhs.loadedAddress());
84 }
85
86
87
88 struct VIS_HIDDEN DlopenCount {
89 bool operator==(const DlopenCount& rhs) const;
90 const mach_header* loadAddress;
91 uintptr_t refCount;
92 };
93
94 bool DlopenCount::operator==(const DlopenCount& rhs) const
95 {
96 return (loadAddress == rhs.loadAddress) && (refCount == rhs.refCount);
97 }
98
99 LoadedImage::LoadedImage(const mach_header* mh, const BinaryImage* bi)
100 : _loadAddress(mh), _image(bi)
101 {
102 assert(loadedAddress() == mh);
103 setState(State::uninited);
104 }
105
106 void LoadedImage::init(const mach_header* mh, const BinaryImage* bi)
107 {
108 _loadAddress = mh;
109 _image = bi;
110 assert(loadedAddress() == mh);
111 setState(State::uninited);
112 }
113
114 // forward reference
115 template <typename T, int C> class ReaderWriterChunkedVector;
116
117 template <typename T, int C>
118 class VIS_HIDDEN ChunkedVector {
119 public:
120 static ChunkedVector<T,C>* make(uint32_t count);
121
122 void forEach(uint32_t& startIndex, bool& outerStop, void (^callback)(uint32_t index, const T& value, bool& stop)) const;
123 void forEach(uint32_t& startIndex, bool& outerStop, void (^callback)(uint32_t index, T& value, bool& stop));
124 T* add(const T& value);
125 T* add(uint32_t count, const T values[]);
126 void remove(uint32_t index);
127 uint32_t count() const { return _inUseCount; }
128 uint32_t freeCount() const { return _allocCount - _inUseCount; }
129 private:
130 T& element(uint32_t index) { return ((T*)_elements)[index]; }
131 const T& element(uint32_t index) const { return ((T*)_elements)[index]; }
132
133 friend class ReaderWriterChunkedVector<T,C>;
134
135 ChunkedVector<T,C>* _next = nullptr;
136 uint32_t _allocCount = C;
137 uint32_t _inUseCount = 0;
138 uint8_t _elements[C*sizeof(T)] = { 0 };
139 };
140
141 template <typename T, int C>
142 class VIS_HIDDEN ReaderWriterChunkedVector {
143 public:
144 T* add(uint32_t count, const T values[]);
145 T* add(const T& value) { return add(1, &value); }
146 T* addNoLock(uint32_t count, const T values[]);
147 T* addNoLock(const T& value) { return addNoLock(1, &value); }
148 void remove(const T& value);
149 uint32_t count() const;
150 void forEachWithReadLock(void (^callback)(uint32_t index, const T& value, bool& stop)) const;
151 void forEachWithWriteLock(void (^callback)(uint32_t index, T& value, bool& stop));
152 void forEachNoLock(void (^callback)(uint32_t index, const T& value, bool& stop)) const;
153 T& operator[](size_t index);
154 uint32_t countNoLock() const;
155
156 void withReadLock(void (^withLock)()) const;
157 void withWriteLock(void (^withLock)()) const;
158 void acquireWriteLock();
159 void releaseWriteLock();
160 void dump(void (^callback)(const T& value)) const;
161
162 private:
163 mutable pthread_rwlock_t _lock = PTHREAD_RWLOCK_INITIALIZER;
164 ChunkedVector<T,C> _firstChunk;
165 };
166
167
168 typedef void (*NotifyFunc)(const mach_header* mh, intptr_t slide);
169
170 static ReaderWriterChunkedVector<NotifyFunc, 4> sLoadNotifiers;
171 static ReaderWriterChunkedVector<NotifyFunc, 4> sUnloadNotifiers;
172 static ReaderWriterChunkedVector<LoadedImage, 4> sLoadedImages;
173 static ReaderWriterChunkedVector<DlopenCount, 4> sDlopenRefCounts;
174 static ReaderWriterChunkedVector<const launch_cache::BinaryImageGroupData*, 4> sKnownGroups;
175 #if __MAC_OS_X_VERSION_MIN_REQUIRED
176 static ReaderWriterChunkedVector<__NSObjectFileImage, 2> sNSObjectFileImages;
177 #endif
178
179
180 ///////////////////// ChunkedVector ////////////////////////////
181
182 template <typename T, int C>
183 ChunkedVector<T,C>* ChunkedVector<T,C>::make(uint32_t count)
184 {
185 size_t size = sizeof(ChunkedVector) + sizeof(T) * (count-C);
186 ChunkedVector<T,C>* result = (ChunkedVector<T,C>*)malloc(size);
187 result->_next = nullptr;
188 result->_allocCount = count;
189 result->_inUseCount = 0;
190 return result;
191 }
192
193 template <typename T, int C>
194 void ChunkedVector<T,C>::forEach(uint32_t& outerIndex, bool& outerStop, void (^callback)(uint32_t index, const T& value, bool& stop)) const
195 {
196 for (uint32_t i=0; i < _inUseCount; ++i) {
197 callback(outerIndex, element(i), outerStop);
198 ++outerIndex;
199 if ( outerStop )
200 break;
201 }
202 }
203
204 template <typename T, int C>
205 void ChunkedVector<T,C>::forEach(uint32_t& outerIndex, bool& outerStop, void (^callback)(uint32_t index, T& value, bool& stop))
206 {
207 for (uint32_t i=0; i < _inUseCount; ++i) {
208 callback(outerIndex, element(i), outerStop);
209 ++outerIndex;
210 if ( outerStop )
211 break;
212 }
213 }
214
215 template <typename T, int C>
216 T* ChunkedVector<T,C>::add(const T& value)
217 {
218 return add(1, &value);
219 }
220
221 template <typename T, int C>
222 T* ChunkedVector<T,C>::add(uint32_t count, const T values[])
223 {
224 assert(count <= (_allocCount - _inUseCount));
225 T* result = &element(_inUseCount);
226 memmove(result, values, sizeof(T)*count);
227 _inUseCount += count;
228 return result;
229 }
230
231 template <typename T, int C>
232 void ChunkedVector<T,C>::remove(uint32_t index)
233 {
234 assert(index < _inUseCount);
235 int moveCount = _inUseCount - index - 1;
236 if ( moveCount >= 1 ) {
237 memmove(&element(index), &element(index+1), sizeof(T)*moveCount);
238 }
239 _inUseCount--;
240 }
241
242
243 ///////////////////// ReaderWriterChunkedVector ////////////////////////////
244
245
246
247 template <typename T, int C>
248 void ReaderWriterChunkedVector<T,C>::withReadLock(void (^work)()) const
249 {
250 assert(pthread_rwlock_rdlock(&_lock) == 0);
251 work();
252 assert(pthread_rwlock_unlock(&_lock) == 0);
253 }
254
255 template <typename T, int C>
256 void ReaderWriterChunkedVector<T,C>::withWriteLock(void (^work)()) const
257 {
258 assert(pthread_rwlock_wrlock(&_lock) == 0);
259 work();
260 assert(pthread_rwlock_unlock(&_lock) == 0);
261 }
262
263 template <typename T, int C>
264 void ReaderWriterChunkedVector<T,C>::acquireWriteLock()
265 {
266 assert(pthread_rwlock_wrlock(&_lock) == 0);
267 }
268
269 template <typename T, int C>
270 void ReaderWriterChunkedVector<T,C>::releaseWriteLock()
271 {
272 assert(pthread_rwlock_unlock(&_lock) == 0);
273 }
274
275 template <typename T, int C>
276 uint32_t ReaderWriterChunkedVector<T,C>::count() const
277 {
278 __block uint32_t result = 0;
279 withReadLock(^() {
280 for (const ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
281 result += chunk->count();
282 }
283 });
284 return result;
285 }
286
287 template <typename T, int C>
288 uint32_t ReaderWriterChunkedVector<T,C>::countNoLock() const
289 {
290 uint32_t result = 0;
291 for (const ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
292 result += chunk->count();
293 }
294 return result;
295 }
296
297 template <typename T, int C>
298 T* ReaderWriterChunkedVector<T,C>::addNoLock(uint32_t count, const T values[])
299 {
300 T* result = nullptr;
301 ChunkedVector<T,C>* lastChunk = &_firstChunk;
302 while ( lastChunk->_next != nullptr )
303 lastChunk = lastChunk->_next;
304
305 if ( lastChunk->freeCount() >= count ) {
306 // append to last chunk
307 result = lastChunk->add(count, values);
308 }
309 else {
310 // append new chunk
311 uint32_t allocCount = count;
312 uint32_t remainder = count % C;
313 if ( remainder != 0 )
314 allocCount = count + C - remainder;
315 ChunkedVector<T,C>* newChunk = ChunkedVector<T,C>::make(allocCount);
316 result = newChunk->add(count, values);
317 lastChunk->_next = newChunk;
318 }
319
320 return result;
321 }
322
323 template <typename T, int C>
324 T* ReaderWriterChunkedVector<T,C>::add(uint32_t count, const T values[])
325 {
326 __block T* result = nullptr;
327 withWriteLock(^() {
328 result = addNoLock(count, values);
329 });
330 return result;
331 }
332
333 template <typename T, int C>
334 void ReaderWriterChunkedVector<T,C>::remove(const T& valueToRemove)
335 {
336 __block bool stopStorage = false;
337 withWriteLock(^() {
338 ChunkedVector<T,C>* chunkNowEmpty = nullptr;
339 __block uint32_t indexStorage = 0;
340 __block bool found = false;
341 for (ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
342 uint32_t chunkStartIndex = indexStorage;
343 __block uint32_t foundIndex = 0;
344 chunk->forEach(indexStorage, stopStorage, ^(uint32_t index, const T& value, bool& stop) {
345 if ( value == valueToRemove ) {
346 foundIndex = index - chunkStartIndex;
347 found = true;
348 stop = true;
349 }
350 });
351 if ( found ) {
352 chunk->remove(foundIndex);
353 found = false;
354 if ( chunk->count() == 0 )
355 chunkNowEmpty = chunk;
356 }
357 }
358 // if chunk is now empty, remove from linked list and free
359 if ( chunkNowEmpty ) {
360 for (ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
361 if ( chunk->_next == chunkNowEmpty ) {
362 chunk->_next = chunkNowEmpty->_next;
363 if ( chunkNowEmpty != &_firstChunk )
364 free(chunkNowEmpty);
365 break;
366 }
367 }
368 }
369 });
370 }
371
372 template <typename T, int C>
373 void ReaderWriterChunkedVector<T,C>::forEachWithReadLock(void (^callback)(uint32_t index, const T& value, bool& stop)) const
374 {
375 __block uint32_t index = 0;
376 __block bool stop = false;
377 withReadLock(^() {
378 for (const ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
379 chunk->forEach(index, stop, callback);
380 if ( stop )
381 break;
382 }
383 });
384 }
385
386 template <typename T, int C>
387 void ReaderWriterChunkedVector<T,C>::forEachWithWriteLock(void (^callback)(uint32_t index, T& value, bool& stop))
388 {
389 __block uint32_t index = 0;
390 __block bool stop = false;
391 withReadLock(^() {
392 for (ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
393 chunk->forEach(index, stop, callback);
394 if ( stop )
395 break;
396 }
397 });
398 }
399
400 template <typename T, int C>
401 void ReaderWriterChunkedVector<T,C>::forEachNoLock(void (^callback)(uint32_t index, const T& value, bool& stop)) const
402 {
403 uint32_t index = 0;
404 bool stop = false;
405 for (const ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
406 chunk->forEach(index, stop, callback);
407 if ( stop )
408 break;
409 }
410 }
411
412 template <typename T, int C>
413 T& ReaderWriterChunkedVector<T,C>::operator[](size_t targetIndex)
414 {
415 __block T* result = nullptr;
416 forEachNoLock(^(uint32_t index, T const& value, bool& stop) {
417 if ( index == targetIndex ) {
418 result = (T*)&value;
419 stop = true;
420 }
421 });
422 return *result;
423 }
424
425 template <typename T, int C>
426 void ReaderWriterChunkedVector<T,C>::dump(void (^callback)(const T& value)) const
427 {
428 log("dump ReaderWriterChunkedVector at %p\n", this);
429 __block uint32_t index = 0;
430 __block bool stop = false;
431 withReadLock(^() {
432 for (const ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
433 log(" chunk at %p\n", chunk);
434 chunk->forEach(index, stop, ^(uint32_t i, const T& value, bool& s) {
435 callback(value);
436 });
437 }
438 });
439 }
440
441
442
443 ///////////////////// AllImages ////////////////////////////
444
445
446 AllImages gAllImages;
447
448
449
450 void AllImages::init(const BinaryClosure* closure, const void* dyldCacheLoadAddress, const char* dyldCachePath,
451 const dyld3::launch_cache::DynArray<loader::ImageInfo>& initialImages)
452 {
453 _mainClosure = closure;
454 _initialImages = &initialImages;
455 _dyldCacheAddress = dyldCacheLoadAddress;
456 _dyldCachePath = dyldCachePath;
457
458 if ( _dyldCacheAddress ) {
459 const DyldSharedCache* cache = (DyldSharedCache*)_dyldCacheAddress;
460 const dyld_cache_mapping_info* const fileMappings = (dyld_cache_mapping_info*)((uint64_t)_dyldCacheAddress + cache->header.mappingOffset);
461 _dyldCacheSlide = (uint64_t)dyldCacheLoadAddress - fileMappings[0].address;
462 }
463
464 // Make temporary old image array, so libSystem initializers can be debugged
465 uint32_t count = (uint32_t)initialImages.count();
466 dyld_image_info oldDyldInfo[count];
467 for (int i=0; i < count; ++i) {
468 launch_cache::Image img(initialImages[i].imageData);
469 oldDyldInfo[i].imageLoadAddress = initialImages[i].loadAddress;
470 oldDyldInfo[i].imageFilePath = img.path();
471 oldDyldInfo[i].imageFileModDate = 0;
472 }
473 _oldAllImageInfos->infoArray = oldDyldInfo;
474 _oldAllImageInfos->infoArrayCount = count;
475 _oldAllImageInfos->notification(dyld_image_adding, count, oldDyldInfo);
476 _oldAllImageInfos->infoArray = nullptr;
477 _oldAllImageInfos->infoArrayCount = 0;
478 }
479
480 void AllImages::setProgramVars(ProgramVars* vars)
481 {
482 _programVars = vars;
483 }
484
485 void AllImages::applyInitialImages()
486 {
487 addImages(*_initialImages);
488 _initialImages = nullptr; // this was stack allocated
489 }
490
491 void AllImages::mirrorToOldAllImageInfos()
492 {
493 // set infoArray to NULL to denote it is in-use
494 _oldAllImageInfos->infoArray = nullptr;
495
496 // if array not large enough, re-alloc it
497 uint32_t imageCount = sLoadedImages.countNoLock();
498 if ( _oldArrayAllocCount < imageCount ) {
499 uint32_t newAllocCount = imageCount + 16;
500 dyld_image_info* newArray = (dyld_image_info*)malloc(sizeof(dyld_image_info)*newAllocCount);
501 if ( _oldAllImageArray != nullptr ) {
502 memcpy(newArray, _oldAllImageArray, sizeof(dyld_image_info)*_oldAllImageInfos->infoArrayCount);
503 free(_oldAllImageArray);
504 }
505 _oldAllImageArray = newArray;
506 _oldArrayAllocCount = newAllocCount;
507 }
508
509 // fill out array to mirror current image list
510 sLoadedImages.forEachNoLock(^(uint32_t index, const LoadedImage& loadedImage, bool& stop) {
511 launch_cache::Image img(loadedImage.image());
512 _oldAllImageArray[index].imageLoadAddress = loadedImage.loadedAddress();
513 _oldAllImageArray[index].imageFilePath = imagePath(loadedImage.image());
514 _oldAllImageArray[index].imageFileModDate = 0;
515 });
516
517 // set infoArray back to base address of array (so other process can now read)
518 _oldAllImageInfos->infoArrayCount = imageCount;
519 _oldAllImageInfos->infoArrayChangeTimestamp = mach_absolute_time();
520 _oldAllImageInfos->infoArray = _oldAllImageArray;
521 }
522
523 void AllImages::addImages(const launch_cache::DynArray<loader::ImageInfo>& newImages)
524 {
525 uint32_t count = (uint32_t)newImages.count();
526 assert(count != 0);
527
528 // build stack array of LoadedImage to copy into sLoadedImages
529 STACK_ALLOC_DYNARRAY(LoadedImage, count, loadedImagesArray);
530 for (uint32_t i=0; i < count; ++i) {
531 loadedImagesArray[i].init(newImages[i].loadAddress, newImages[i].imageData);
532 if (newImages[i].neverUnload)
533 loadedImagesArray[i].setNeverUnload();
534 }
535 sLoadedImages.add(count, &loadedImagesArray[0]);
536
537 if ( _oldAllImageInfos != nullptr ) {
538 // sync to old all image infos struct
539 if ( _initialImages != nullptr ) {
540 // libSystem not initialized yet, don't use locks
541 mirrorToOldAllImageInfos();
542 }
543 else {
544 sLoadedImages.withReadLock(^{
545 mirrorToOldAllImageInfos();
546 });
547 }
548
549 // tell debugger about new images
550 dyld_image_info oldDyldInfo[count];
551 for (int i=0; i < count; ++i) {
552 launch_cache::Image img(newImages[i].imageData);
553 oldDyldInfo[i].imageLoadAddress = newImages[i].loadAddress;
554 oldDyldInfo[i].imageFilePath = imagePath(newImages[i].imageData);
555 oldDyldInfo[i].imageFileModDate = 0;
556 }
557 _oldAllImageInfos->notification(dyld_image_adding, count, oldDyldInfo);
558 }
559
560 // log loads
561 for (int i=0; i < count; ++i) {
562 launch_cache::Image img(newImages[i].imageData);
563 log_loads("dyld: %s\n", imagePath(newImages[i].imageData));
564 }
565
566 #if !TARGET_IPHONE_SIMULATOR
567 // call kdebug trace for each image
568 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD, DBG_DYLD_UUID, DBG_DYLD_UUID_MAP_A))) {
569 for (uint32_t i=0; i < count; ++i) {
570 launch_cache::Image img(newImages[i].imageData);
571 struct stat stat_buf;
572 fsid_t fsid = {{ 0, 0 }};
573 fsobj_id_t fsobjid = { 0, 0 };
574 if (img.isDiskImage() && stat(imagePath(newImages[i].imageData), &stat_buf) == 0 ) {
575 fsobjid = *(fsobj_id_t*)&stat_buf.st_ino;
576 fsid = {{ stat_buf.st_dev, 0 }};
577 }
578 kdebug_trace_dyld_image(DBG_DYLD_UUID_MAP_A, img.uuid(), fsobjid, fsid, newImages[i].loadAddress);
579 }
580 }
581 #endif
582 // call each _dyld_register_func_for_add_image function with each image
583 const uint32_t existingNotifierCount = sLoadNotifiers.count();
584 NotifyFunc existingNotifiers[existingNotifierCount];
585 NotifyFunc* existingNotifierArray = existingNotifiers;
586 sLoadNotifiers.forEachWithReadLock(^(uint32_t index, const NotifyFunc& func, bool& stop) {
587 if ( index < existingNotifierCount )
588 existingNotifierArray[index] = func;
589 });
590 // we don't want to hold lock while calling out, so prebuild array (with lock) then do calls on that array (without lock)
591 for (uint32_t j=0; j < existingNotifierCount; ++j) {
592 NotifyFunc func = existingNotifierArray[j];
593 for (uint32_t i=0; i < count; ++i) {
594 log_notifications("dyld: add notifier %p called with mh=%p\n", func, newImages[i].loadAddress);
595 if (newImages[i].justUsedFromDyldCache) {
596 func(newImages[i].loadAddress, _dyldCacheSlide);
597 } else {
598 MachOParser parser(newImages[i].loadAddress);
599 func(newImages[i].loadAddress, parser.getSlide());
600 }
601 }
602 }
603
604 // call objc about images that use objc
605 if ( _objcNotifyMapped != nullptr ) {
606 const char* pathsBuffer[count];
607 const mach_header* mhBuffer[count];
608 uint32_t imagesWithObjC = 0;
609 for (uint32_t i=0; i < count; ++i) {
610 launch_cache::Image img(newImages[i].imageData);
611 if ( img.hasObjC() ) {
612 pathsBuffer[imagesWithObjC] = imagePath(newImages[i].imageData);
613 mhBuffer[imagesWithObjC] = newImages[i].loadAddress;
614 ++imagesWithObjC;
615 }
616 }
617 if ( imagesWithObjC != 0 ) {
618 (*_objcNotifyMapped)(imagesWithObjC, pathsBuffer, mhBuffer);
619 if ( log_notifications("dyld: objc-mapped-notifier called with %d images:\n", imagesWithObjC) ) {
620 for (uint32_t i=0; i < imagesWithObjC; ++i) {
621 log_notifications("dyld: objc-mapped: %p %s\n", mhBuffer[i], pathsBuffer[i]);
622 }
623 }
624 }
625 }
626
627 // notify any processes tracking loads in this process
628 notifyMonitorLoads(newImages);
629 }
630
631 void AllImages::removeImages(const launch_cache::DynArray<loader::ImageInfo>& unloadImages)
632 {
633 uint32_t count = (uint32_t)unloadImages.count();
634 assert(count != 0);
635
636 // call each _dyld_register_func_for_remove_image function with each image
637 // do this before removing image from internal data structures so that the callback can query dyld about the image
638 const uint32_t existingNotifierCount = sUnloadNotifiers.count();
639 NotifyFunc existingNotifiers[existingNotifierCount];
640 NotifyFunc* existingNotifierArray = existingNotifiers;
641 sUnloadNotifiers.forEachWithReadLock(^(uint32_t index, const NotifyFunc& func, bool& stop) {
642 if ( index < existingNotifierCount )
643 existingNotifierArray[index] = func;
644 });
645 // we don't want to hold lock while calling out, so prebuild array (with lock) then do calls on that array (without lock)
646 for (uint32_t j=0; j < existingNotifierCount; ++j) {
647 NotifyFunc func = existingNotifierArray[j];
648 for (uint32_t i=0; i < count; ++i) {
649 MachOParser parser(unloadImages[i].loadAddress);
650 log_notifications("dyld: remove notifier %p called with mh=%p\n", func, unloadImages[i].loadAddress);
651 func(unloadImages[i].loadAddress, parser.getSlide());
652 }
653 }
654
655 // call objc about images going away
656 if ( _objcNotifyUnmapped != nullptr ) {
657 for (uint32_t i=0; i < count; ++i) {
658 launch_cache::Image img(unloadImages[i].imageData);
659 if ( img.hasObjC() ) {
660 (*_objcNotifyUnmapped)(imagePath(unloadImages[i].imageData), unloadImages[i].loadAddress);
661 log_notifications("dyld: objc-unmapped-notifier called with image %p %s\n", unloadImages[i].loadAddress, imagePath(unloadImages[i].imageData));
662 }
663 }
664 }
665
666 #if !TARGET_IPHONE_SIMULATOR
667 // call kdebug trace for each image
668 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD, DBG_DYLD_UUID, DBG_DYLD_UUID_MAP_A))) {
669 for (uint32_t i=0; i < count; ++i) {
670 launch_cache::Image img(unloadImages[i].imageData);
671 struct stat stat_buf;
672 fsid_t fsid = {{ 0, 0 }};
673 fsobj_id_t fsobjid = { 0, 0 };
674 if (stat(imagePath(unloadImages[i].imageData), &stat_buf) == 0 ) {
675 fsobjid = *(fsobj_id_t*)&stat_buf.st_ino;
676 fsid = {{ stat_buf.st_dev, 0 }};
677 }
678 kdebug_trace_dyld_image(DBG_DYLD_UUID_UNMAP_A, img.uuid(), fsobjid, fsid, unloadImages[i].loadAddress);
679 }
680 }
681 #endif
682
683 // remove each from sLoadedImages
684 for (uint32_t i=0; i < count; ++i) {
685 LoadedImage info(unloadImages[i].loadAddress, unloadImages[i].imageData);
686 sLoadedImages.remove(info);
687 }
688
689 // sync to old all image infos struct
690 sLoadedImages.withReadLock(^{
691 mirrorToOldAllImageInfos();
692 });
693
694 // tell debugger about removed images
695 dyld_image_info oldDyldInfo[count];
696 for (int i=0; i < count; ++i) {
697 launch_cache::Image img(unloadImages[i].imageData);
698 oldDyldInfo[i].imageLoadAddress = unloadImages[i].loadAddress;
699 oldDyldInfo[i].imageFilePath = imagePath(unloadImages[i].imageData);
700 oldDyldInfo[i].imageFileModDate = 0;
701 }
702 _oldAllImageInfos->notification(dyld_image_removing, count, oldDyldInfo);
703
704 // unmap images
705 for (int i=0; i < count; ++i) {
706 launch_cache::Image img(unloadImages[i].imageData);
707 loader::unmapImage(unloadImages[i].imageData, unloadImages[i].loadAddress);
708 log_loads("dyld: unloaded %s\n", imagePath(unloadImages[i].imageData));
709 }
710
711 // notify any processes tracking loads in this process
712 notifyMonitorUnloads(unloadImages);
713 }
714
715 void AllImages::setNeverUnload(const loader::ImageInfo& existingImage)
716 {
717 sLoadedImages.forEachWithWriteLock(^(uint32_t index, dyld3::LoadedImage &value, bool &stop) {
718 if (value.image() == existingImage.imageData) {
719 value.setNeverUnload();
720 stop = true;
721 }
722 });
723 }
724
725 uint32_t AllImages::count() const
726 {
727 return sLoadedImages.count();
728 }
729
730
731 launch_cache::Image AllImages::findByLoadOrder(uint32_t index, const mach_header** loadAddress) const
732 {
733 __block const BinaryImage* foundImage = nullptr;
734 sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
735 if ( anIndex == index ) {
736 foundImage = loadedImage.image();
737 *loadAddress = loadedImage.loadedAddress();
738 stop = true;
739 }
740 });
741 return launch_cache::Image(foundImage);
742 }
743
744 launch_cache::Image AllImages::findByLoadAddress(const mach_header* loadAddress) const
745 {
746 __block const BinaryImage* foundImage = nullptr;
747 sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
748 if ( loadedImage.loadedAddress() == loadAddress ) {
749 foundImage = loadedImage.image();
750 stop = true;
751 }
752 });
753 return launch_cache::Image(foundImage);
754 }
755
756 bool AllImages::findIndexForLoadAddress(const mach_header* loadAddress, uint32_t& index)
757 {
758 __block bool result = false;
759 sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
760 if ( loadedImage.loadedAddress() == loadAddress ) {
761 index = anIndex;
762 result = true;
763 stop = true;
764 }
765 });
766 return result;
767 }
768
769 void AllImages::forEachImage(void (^handler)(uint32_t imageIndex, const mach_header* loadAddress, const launch_cache::Image image, bool& stop)) const
770 {
771 sLoadedImages.forEachWithReadLock(^(uint32_t imageIndex, const LoadedImage& loadedImage, bool& stop) {
772 handler(imageIndex, loadedImage.loadedAddress(), launch_cache::Image(loadedImage.image()), stop);
773 });
774 }
775
776 launch_cache::Image AllImages::findByOwnedAddress(const void* addr, const mach_header** loadAddress, uint8_t* permissions) const
777 {
778 if ( _initialImages != nullptr ) {
779 // being called during libSystem initialization, so sLoadedImages not allocated yet
780 for (int i=0; i < _initialImages->count(); ++i) {
781 const loader::ImageInfo& entry = (*_initialImages)[i];
782 launch_cache::Image anImage(entry.imageData);
783 if ( anImage.containsAddress(addr, entry.loadAddress, permissions) ) {
784 *loadAddress = entry.loadAddress;
785 return entry.imageData;
786 }
787 }
788 return launch_cache::Image(nullptr);
789 }
790
791 // if address is in cache, do fast search of cache
792 if ( (_dyldCacheAddress != nullptr) && (addr > _dyldCacheAddress) ) {
793 const DyldSharedCache* dyldCache = (DyldSharedCache*)_dyldCacheAddress;
794 if ( addr < (void*)((uint8_t*)_dyldCacheAddress+dyldCache->mappedSize()) ) {
795 size_t cacheVmOffset = ((uint8_t*)addr - (uint8_t*)_dyldCacheAddress);
796 DyldCacheParser cacheParser(dyldCache, false);
797 launch_cache::ImageGroup cachedDylibsGroup(cacheParser.cachedDylibsGroup());
798 uint32_t mhCacheOffset;
799 uint8_t foundPermissions;
800 launch_cache::Image image(cachedDylibsGroup.findImageByCacheOffset(cacheVmOffset, mhCacheOffset, foundPermissions));
801 if ( image.valid() ) {
802 *loadAddress = (mach_header*)((uint8_t*)_dyldCacheAddress + mhCacheOffset);
803 if ( permissions != nullptr )
804 *permissions = foundPermissions;
805 return image;
806 }
807 }
808 }
809
810 __block const BinaryImage* foundImage = nullptr;
811 sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
812 launch_cache::Image anImage(loadedImage.image());
813 if ( anImage.containsAddress(addr, loadedImage.loadedAddress(), permissions) ) {
814 *loadAddress = loadedImage.loadedAddress();
815 foundImage = loadedImage.image();
816 stop = true;
817 }
818 });
819 return launch_cache::Image(foundImage);
820 }
821
822 const mach_header* AllImages::findLoadAddressByImage(const BinaryImage* targetImage) const
823 {
824 __block const mach_header* foundAddress = nullptr;
825 sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
826 if ( targetImage == loadedImage.image() ) {
827 foundAddress = loadedImage.loadedAddress();
828 stop = true;
829 }
830 });
831 return foundAddress;
832 }
833
834 const mach_header* AllImages::mainExecutable() const
835 {
836 assert(_programVars != nullptr);
837 return (const mach_header*)_programVars->mh;
838 }
839
840 launch_cache::Image AllImages::mainExecutableImage() const
841 {
842 assert(_mainClosure != nullptr);
843 const launch_cache::Closure mainClosure(_mainClosure);
844 const dyld3::launch_cache::ImageGroup mainGroup = mainClosure.group();
845 const uint32_t mainExecutableIndex = mainClosure.mainExecutableImageIndex();
846 const dyld3::launch_cache::Image mainImage = mainGroup.image(mainExecutableIndex);
847 return mainImage;
848 }
849
850 void AllImages::setMainPath(const char* path )
851 {
852 _mainExeOverridePath = path;
853 }
854
855 const char* AllImages::imagePath(const BinaryImage* binImage) const
856 {
857 #if __IPHONE_OS_VERSION_MIN_REQUIRED
858 // on iOS and watchOS, apps may be moved on device after closure built
859 if ( _mainExeOverridePath != nullptr ) {
860 if ( binImage == mainExecutableImage().binaryData() )
861 return _mainExeOverridePath;
862 }
863 #endif
864 launch_cache::Image image(binImage);
865 return image.path();
866 }
867
868 void AllImages::setInitialGroups()
869 {
870 DyldCacheParser cacheParser((DyldSharedCache*)_dyldCacheAddress, false);
871 sKnownGroups.addNoLock(cacheParser.cachedDylibsGroup());
872 sKnownGroups.addNoLock(cacheParser.otherDylibsGroup());
873 launch_cache::Closure closure(_mainClosure);
874 sKnownGroups.addNoLock(closure.group().binaryData());
875 }
876
877 const launch_cache::binary_format::ImageGroup* AllImages::cachedDylibsGroup()
878 {
879 return sKnownGroups[0];
880 }
881
882 const launch_cache::binary_format::ImageGroup* AllImages::otherDylibsGroup()
883 {
884 return sKnownGroups[1];
885 }
886
887 const AllImages::BinaryImageGroup* AllImages::mainClosureGroup()
888 {
889 return sKnownGroups[2];
890 }
891
892 uint32_t AllImages::currentGroupsCount() const
893 {
894 return sKnownGroups.count();
895 }
896
897 void AllImages::copyCurrentGroups(ImageGroupList& groups) const
898 {
899 sKnownGroups.forEachWithReadLock(^(uint32_t index, const dyld3::launch_cache::binary_format::ImageGroup* const &grpData, bool &stop) {
900 if ( index < groups.count() )
901 groups[index] = grpData;
902 });
903 }
904
905 void AllImages::copyCurrentGroupsNoLock(ImageGroupList& groups) const
906 {
907 sKnownGroups.forEachNoLock(^(uint32_t index, const dyld3::launch_cache::binary_format::ImageGroup* const &grpData, bool &stop) {
908 if ( index < groups.count() )
909 groups[index] = grpData;
910 });
911 }
912
913 const mach_header* AllImages::alreadyLoaded(uint64_t inode, uint64_t mtime, bool bumpRefCount)
914 {
915 __block const mach_header* result = nullptr;
916 sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
917 launch_cache::Image img(loadedImage.image());
918 if ( img.validateUsingModTimeAndInode() ) {
919 if ( (img.fileINode() == inode) && (img.fileModTime() == mtime) ) {
920 result = loadedImage.loadedAddress();
921 if ( bumpRefCount && !loadedImage.neverUnload() )
922 incRefCount(loadedImage.loadedAddress());
923 stop = true;
924 }
925 }
926 });
927 return result;
928 }
929
930 const mach_header* AllImages::alreadyLoaded(const char* path, bool bumpRefCount)
931 {
932 __block const mach_header* result = nullptr;
933 uint32_t targetHash = launch_cache::ImageGroup::hashFunction(path);
934 sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
935 launch_cache::Image img(loadedImage.image());
936 if ( (img.pathHash() == targetHash) && (strcmp(path, imagePath(loadedImage.image())) == 0) ) {
937 result = loadedImage.loadedAddress();
938 if ( bumpRefCount && !loadedImage.neverUnload() )
939 incRefCount(loadedImage.loadedAddress());
940 stop = true;
941 }
942 });
943 if ( result == nullptr ) {
944 // perhaps there was an image override
945 launch_cache::ImageGroup mainGroup(mainClosureGroup());
946 STACK_ALLOC_DYNARRAY(const launch_cache::BinaryImageGroupData*, currentGroupsCount(), currentGroupsList);
947 copyCurrentGroups(currentGroupsList);
948 mainGroup.forEachImageRefOverride(currentGroupsList, ^(launch_cache::Image standardDylib, launch_cache::Image overrideDyilb, bool& stop) {
949 if ( strcmp(standardDylib.path(), path) == 0 ) {
950 result = alreadyLoaded(overrideDyilb.path(), bumpRefCount);
951 stop = true;
952 }
953 });
954 }
955 return result;
956 }
957
958 const mach_header* AllImages::alreadyLoaded(const BinaryImage* binImage, bool bumpRefCount)
959 {
960 const mach_header* result = findLoadAddressByImage(binImage);
961 if ( result != nullptr ) {
962 launch_cache::Image loadedImage(binImage);
963 if ( bumpRefCount && !loadedImage.neverUnload() )
964 incRefCount(result);
965 }
966 return result;
967 }
968
969 void AllImages::incRefCount(const mach_header* loadAddress)
970 {
971 __block bool found = false;
972 sDlopenRefCounts.forEachWithWriteLock(^(uint32_t index, DlopenCount& entry, bool& stop) {
973 if ( entry.loadAddress == loadAddress ) {
974 found = true;
975 entry.refCount += 1;
976 stop = true;
977 }
978 });
979 if ( !found ) {
980 DlopenCount newEnty = { loadAddress, 1 };
981 sDlopenRefCounts.add(newEnty);
982 }
983 }
984
985 void AllImages::decRefCount(const mach_header* loadAddress)
986 {
987 __block bool refCountNowZero = false;
988 sDlopenRefCounts.forEachWithWriteLock(^(uint32_t index, DlopenCount& entry, bool& stop) {
989 if ( entry.loadAddress == loadAddress ) {
990 entry.refCount -= 1;
991 stop = true;
992 if ( entry.refCount == 0 )
993 refCountNowZero = true;
994 }
995 });
996 if ( refCountNowZero ) {
997 DlopenCount delEnty = { loadAddress, 0 };
998 sDlopenRefCounts.remove(delEnty);
999 garbageCollectImages();
1000 }
1001 }
1002
1003
1004 #if __MAC_OS_X_VERSION_MIN_REQUIRED
1005 __NSObjectFileImage* AllImages::addNSObjectFileImage()
1006 {
1007 // look for empty slot first
1008 __block __NSObjectFileImage* result = nullptr;
1009 sNSObjectFileImages.forEachWithWriteLock(^(uint32_t index, __NSObjectFileImage& value, bool& stop) {
1010 if ( (value.path == nullptr) && (value.memSource == nullptr) ) {
1011 result = &value;
1012 stop = true;
1013 }
1014 });
1015 if ( result != nullptr )
1016 return result;
1017
1018 // otherwise allocate new slot
1019 __NSObjectFileImage empty;
1020 return sNSObjectFileImages.add(empty);
1021 }
1022
1023 bool AllImages::hasNSObjectFileImage(__NSObjectFileImage* ofi)
1024 {
1025 __block bool result = false;
1026 sNSObjectFileImages.forEachNoLock(^(uint32_t index, const __NSObjectFileImage& value, bool& stop) {
1027 if ( &value == ofi ) {
1028 result = ((value.memSource != nullptr) || (value.path != nullptr));
1029 stop = true;
1030 }
1031 });
1032 return result;
1033 }
1034
1035 void AllImages::removeNSObjectFileImage(__NSObjectFileImage* ofi)
1036 {
1037 sNSObjectFileImages.forEachWithWriteLock(^(uint32_t index, __NSObjectFileImage& value, bool& stop) {
1038 if ( &value == ofi ) {
1039 // mark slot as empty
1040 ofi->path = nullptr;
1041 ofi->memSource = nullptr;
1042 ofi->memLength = 0;
1043 ofi->loadAddress = nullptr;
1044 ofi->binImage = nullptr;
1045 stop = true;
1046 }
1047 });
1048 }
1049 #endif
1050
1051
1052 class VIS_HIDDEN Reaper
1053 {
1054 public:
1055 Reaper(uint32_t count, const LoadedImage** unloadables, bool* inUseArray);
1056 void garbageCollect();
1057 void finalizeDeadImages();
1058
1059 private:
1060 typedef launch_cache::binary_format::Image BinaryImage;
1061
1062 void markDirectlyDlopenedImagesAsUsed();
1063 void markDependentOfInUseImages();
1064 void markDependentsOf(const LoadedImage*);
1065 bool loadAddressIsUnloadable(const mach_header* loadAddr, uint32_t& index);
1066 bool imageIsUnloadable(const BinaryImage* binImage, uint32_t& foundIndex);
1067 uint32_t inUseCount();
1068 void dump(const char* msg);
1069
1070 const LoadedImage** _unloadablesArray;
1071 bool* _inUseArray;
1072 uint32_t _arrayCount;
1073 uint32_t _deadCount;
1074 };
1075
1076 Reaper::Reaper(uint32_t count, const LoadedImage** unloadables, bool* inUseArray)
1077 : _unloadablesArray(unloadables), _inUseArray(inUseArray),_arrayCount(count)
1078 {
1079 }
1080
1081
1082 bool Reaper::loadAddressIsUnloadable(const mach_header* loadAddr, uint32_t& foundIndex)
1083 {
1084 for (uint32_t i=0; i < _arrayCount; ++i) {
1085 if ( _unloadablesArray[i]->loadedAddress() == loadAddr ) {
1086 foundIndex = i;
1087 return true;
1088 }
1089 }
1090 return false;
1091 }
1092
1093 bool Reaper::imageIsUnloadable(const BinaryImage* binImage, uint32_t& foundIndex)
1094 {
1095 for (uint32_t i=0; i < _arrayCount; ++i) {
1096 if ( _unloadablesArray[i]->image() == binImage ) {
1097 foundIndex = i;
1098 return true;
1099 }
1100 }
1101 return false;
1102 }
1103
1104 void Reaper::markDirectlyDlopenedImagesAsUsed()
1105 {
1106 sDlopenRefCounts.forEachWithReadLock(^(uint32_t refCountIndex, const dyld3::DlopenCount& dlEntry, bool& stop) {
1107 if ( dlEntry.refCount != 0 ) {
1108 uint32_t foundIndex;
1109 if ( loadAddressIsUnloadable(dlEntry.loadAddress, foundIndex) ) {
1110 _inUseArray[foundIndex] = true;
1111 }
1112 }
1113 });
1114 }
1115
1116 uint32_t Reaper::inUseCount()
1117 {
1118 uint32_t count = 0;
1119 for (uint32_t i=0; i < _arrayCount; ++i) {
1120 if ( _inUseArray[i] )
1121 ++count;
1122 }
1123 return count;
1124 }
1125
1126 void Reaper::markDependentsOf(const LoadedImage* entry)
1127 {
1128 const launch_cache::Image image(entry->image());
1129 STACK_ALLOC_DYNARRAY(const launch_cache::BinaryImageGroupData*, gAllImages.currentGroupsCount(), currentGroupsList);
1130 gAllImages.copyCurrentGroups(currentGroupsList);
1131 image.forEachDependentImage(currentGroupsList, ^(uint32_t depIndex, dyld3::launch_cache::Image depImage, dyld3::launch_cache::Image::LinkKind kind, bool& stop) {
1132 uint32_t foundIndex;
1133 if ( !depImage.neverUnload() && imageIsUnloadable(depImage.binaryData(), foundIndex) ) {
1134 _inUseArray[foundIndex] = true;
1135 }
1136 });
1137 }
1138
1139 void Reaper::markDependentOfInUseImages()
1140 {
1141 for (uint32_t i=0; i < _arrayCount; ++i) {
1142 if ( _inUseArray[i] )
1143 markDependentsOf(_unloadablesArray[i]);
1144 }
1145 }
1146
1147 void Reaper::dump(const char* msg)
1148 {
1149 //log("%s:\n", msg);
1150 for (uint32_t i=0; i < _arrayCount; ++i) {
1151 dyld3::launch_cache::Image image(_unloadablesArray[i]->image());
1152 //log(" in-used=%d %s\n", _inUseArray[i], image.path());
1153 }
1154 }
1155
1156 void Reaper::garbageCollect()
1157 {
1158 //dump("all unloadable images");
1159
1160 // mark all dylibs directly dlopen'ed as in use
1161 markDirectlyDlopenedImagesAsUsed();
1162
1163 //dump("directly dlopen()'ed marked");
1164
1165 // iteratively mark dependents of in-use dylibs as in-use until in-use count stops changing
1166 uint32_t lastCount = inUseCount();
1167 bool countChanged = false;
1168 do {
1169 markDependentOfInUseImages();
1170 //dump("dependents marked");
1171 uint32_t newCount = inUseCount();
1172 countChanged = (newCount != lastCount);
1173 lastCount = newCount;
1174 } while (countChanged);
1175
1176 _deadCount = _arrayCount - inUseCount();
1177 }
1178
1179 void Reaper::finalizeDeadImages()
1180 {
1181 if ( _deadCount == 0 )
1182 return;
1183 __cxa_range_t ranges[_deadCount];
1184 __cxa_range_t* rangesArray = ranges;
1185 __block unsigned int rangesCount = 0;
1186 for (uint32_t i=0; i < _arrayCount; ++i) {
1187 if ( _inUseArray[i] )
1188 continue;
1189 dyld3::launch_cache::Image image(_unloadablesArray[i]->image());
1190 image.forEachDiskSegment(^(uint32_t segIndex, uint32_t fileOffset, uint32_t fileSize, int64_t vmOffset, uint64_t vmSize, uint8_t permissions, bool &stop) {
1191 if ( permissions & VM_PROT_EXECUTE ) {
1192 rangesArray[rangesCount].addr = (char*)(_unloadablesArray[i]->loadedAddress()) + vmOffset;
1193 rangesArray[rangesCount].length = (size_t)vmSize;
1194 ++rangesCount;
1195 }
1196 });
1197 }
1198 __cxa_finalize_ranges(ranges, rangesCount);
1199 }
1200
1201
1202 // This function is called at the end of dlclose() when the reference count goes to zero.
1203 // The dylib being unloaded may have brought in other dependent dylibs when it was loaded.
1204 // Those dependent dylibs need to be unloaded, but only if they are not referenced by
1205 // something else. We use a standard mark and sweep garbage collection.
1206 //
1207 // The tricky part is that when a dylib is unloaded it may have a termination function that
1208 // can run and itself call dlclose() on yet another dylib. The problem is that this
1209 // sort of gabage collection is not re-entrant. Instead a terminator's call to dlclose()
1210 // which calls garbageCollectImages() will just set a flag to re-do the garbage collection
1211 // when the current pass is done.
1212 //
1213 // Also note that this is done within the sLoadedImages writer lock, so any dlopen/dlclose
1214 // on other threads are blocked while this garbage collections runs
1215 //
1216 void AllImages::garbageCollectImages()
1217 {
1218 // if some other thread is currently GC'ing images, let other thread do the work
1219 int32_t newCount = OSAtomicIncrement32(&_gcCount);
1220 if ( newCount != 1 )
1221 return;
1222
1223 do {
1224 const uint32_t loadedImageCount = sLoadedImages.count();
1225 const LoadedImage* unloadables[loadedImageCount];
1226 bool unloadableInUse[loadedImageCount];
1227 const LoadedImage** unloadablesArray = unloadables;
1228 bool* unloadableInUseArray = unloadableInUse;
1229 __block uint32_t unloadableCount = 0;
1230 // do GC with lock, so no other images can be added during GC
1231 sLoadedImages.withReadLock(^() {
1232 sLoadedImages.forEachNoLock(^(uint32_t index, const LoadedImage& entry, bool& stop) {
1233 const launch_cache::Image image(entry.image());
1234 if ( !image.neverUnload() && !entry.neverUnload() ) {
1235 unloadablesArray[unloadableCount] = &entry;
1236 unloadableInUseArray[unloadableCount] = false;
1237 //log("unloadable[%d] %p %s\n", unloadableCount, entry.loadedAddress(), image.path());
1238 ++unloadableCount;
1239 }
1240 });
1241 // make reaper object to do garbage collection and notifications
1242 Reaper reaper(unloadableCount, unloadablesArray, unloadableInUseArray);
1243 reaper.garbageCollect();
1244
1245 // FIXME: we should sort dead images so higher level ones are terminated first
1246
1247 // call cxa_finalize_ranges of dead images
1248 reaper.finalizeDeadImages();
1249
1250 // FIXME: call static terminators of dead images
1251
1252 // FIXME: DOF unregister
1253 });
1254
1255 //log("sLoadedImages before GC removals:\n");
1256 //sLoadedImages.dump(^(const LoadedImage& entry) {
1257 // const launch_cache::Image image(entry.image());
1258 // log(" loadAddr=%p, path=%s\n", entry.loadedAddress(), image.path());
1259 //});
1260
1261 // make copy of LoadedImages we want to remove
1262 // because unloadables[] points into ChunkVector we are shrinking
1263 uint32_t removalCount = 0;
1264 for (uint32_t i=0; i < unloadableCount; ++i) {
1265 if ( !unloadableInUse[i] )
1266 ++removalCount;
1267 }
1268 if ( removalCount > 0 ) {
1269 STACK_ALLOC_DYNARRAY(loader::ImageInfo, removalCount, unloadImages);
1270 uint32_t removalIndex = 0;
1271 for (uint32_t i=0; i < unloadableCount; ++i) {
1272 if ( !unloadableInUse[i] ) {
1273 unloadImages[removalIndex].loadAddress = unloadables[i]->loadedAddress();
1274 unloadImages[removalIndex].imageData = unloadables[i]->image();
1275 ++removalIndex;
1276 }
1277 }
1278 // remove entries from sLoadedImages
1279 removeImages(unloadImages);
1280
1281 //log("sLoadedImages after GC removals:\n");
1282 //sLoadedImages.dump(^(const LoadedImage& entry) {
1283 // const launch_cache::Image image(entry.image());
1284 // //log(" loadAddr=%p, path=%s\n", entry.loadedAddress(), image.path());
1285 //});
1286 }
1287
1288 // if some other thread called GC during our work, redo GC on its behalf
1289 newCount = OSAtomicDecrement32(&_gcCount);
1290 }
1291 while (newCount > 0);
1292 }
1293
1294
1295
1296 VIS_HIDDEN
1297 const launch_cache::binary_format::Image* AllImages::messageClosured(const char* path, const char* apiName, const char* closuredErrorMessages[3], int& closuredErrorMessagesCount)
1298 {
1299 __block const launch_cache::binary_format::Image* result = nullptr;
1300 sKnownGroups.withWriteLock(^() {
1301 ClosureBuffer::CacheIdent cacheIdent;
1302 bzero(&cacheIdent, sizeof(cacheIdent));
1303 if ( _dyldCacheAddress != nullptr ) {
1304 const DyldSharedCache* dyldCache = (DyldSharedCache*)_dyldCacheAddress;
1305 dyldCache->getUUID(cacheIdent.cacheUUID);
1306 cacheIdent.cacheAddress = (unsigned long)_dyldCacheAddress;
1307 cacheIdent.cacheMappedSize = dyldCache->mappedSize();
1308 }
1309 gPathOverrides.forEachPathVariant(path, ^(const char* possiblePath, bool& stopVariants) {
1310 struct stat statBuf;
1311 if ( stat(possiblePath, &statBuf) == 0 ) {
1312 if ( S_ISDIR(statBuf.st_mode) ) {
1313 log_apis(" %s: path is directory: %s\n", apiName, possiblePath);
1314 if ( closuredErrorMessagesCount < 3 )
1315 closuredErrorMessages[closuredErrorMessagesCount++] = strdup("not a file");
1316 }
1317 else {
1318 // file exists, ask closured to build info for it
1319 STACK_ALLOC_DYNARRAY(const launch_cache::BinaryImageGroupData*, sKnownGroups.countNoLock(), currentGroupsList);
1320 gAllImages.copyCurrentGroupsNoLock(currentGroupsList);
1321 dyld3::launch_cache::DynArray<const dyld3::launch_cache::binary_format::ImageGroup*> nonCacheGroupList(currentGroupsList.count()-2, &currentGroupsList[2]);
1322 const dyld3::launch_cache::binary_format::ImageGroup* closuredCreatedGroupData = nullptr;
1323 ClosureBuffer closureBuilderInput(cacheIdent, path, nonCacheGroupList, gPathOverrides);
1324 ClosureBuffer closureBuilderOutput = dyld3::closured_CreateImageGroup(closureBuilderInput);
1325 if ( !closureBuilderOutput.isError() ) {
1326 vm_protect(mach_task_self(), closureBuilderOutput.vmBuffer(), closureBuilderOutput.vmBufferSize(), false, VM_PROT_READ);
1327 closuredCreatedGroupData = closureBuilderOutput.imageGroup();
1328 log_apis(" %s: closured built ImageGroup for path: %s\n", apiName, possiblePath);
1329 sKnownGroups.addNoLock(closuredCreatedGroupData);
1330 launch_cache::ImageGroup group(closuredCreatedGroupData);
1331 result = group.imageBinary(0);
1332 stopVariants = true;
1333 }
1334 else {
1335 log_apis(" %s: closured failed for path: %s, error: %s\n", apiName, possiblePath, closureBuilderOutput.errorMessage());
1336 if ( closuredErrorMessagesCount < 3 ) {
1337 closuredErrorMessages[closuredErrorMessagesCount++] = strdup(closureBuilderOutput.errorMessage());
1338 }
1339 closureBuilderOutput.free();
1340 }
1341 }
1342 }
1343 else {
1344 log_apis(" %s: file does not exist for path: %s\n", apiName, possiblePath);
1345 }
1346 });
1347 });
1348
1349 return result;
1350 }
1351
1352 const AllImages::BinaryImage* AllImages::findImageInKnownGroups(const char* path)
1353 {
1354 __block const AllImages::BinaryImage* result = nullptr;
1355 sKnownGroups.forEachWithReadLock(^(uint32_t index, const dyld3::launch_cache::binary_format::ImageGroup* const& grpData, bool& stop) {
1356 launch_cache::ImageGroup group(grpData);
1357 uint32_t ignore;
1358 if ( const AllImages::BinaryImage* binImage = group.findImageByPath(path, ignore) ) {
1359 result = binImage;
1360 stop = true;
1361 }
1362 });
1363 return result;
1364 }
1365
1366 bool AllImages::imageUnloadable(const launch_cache::Image& image, const mach_header* loadAddress) const
1367 {
1368 // check if statically determined in clousre that this can never be unloaded
1369 if ( image.neverUnload() )
1370 return false;
1371
1372 // check if some runtime decision made this be never-unloadable
1373 __block bool foundAsNeverUnload = false;
1374 sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
1375 if ( loadedImage.loadedAddress() == loadAddress ) {
1376 stop = true;
1377 if ( loadedImage.neverUnload() )
1378 foundAsNeverUnload = true;
1379 }
1380 });
1381 if ( foundAsNeverUnload )
1382 return false;
1383
1384 return true;
1385 }
1386
1387 void AllImages::addLoadNotifier(NotifyFunc func)
1388 {
1389 // callback about already loaded images
1390 const uint32_t existingCount = sLoadedImages.count();
1391 const mach_header* existingMHs[existingCount];
1392 const mach_header** existingArray = existingMHs;
1393 sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
1394 if ( anIndex < existingCount )
1395 existingArray[anIndex] = loadedImage.loadedAddress();
1396 });
1397 // we don't want to hold lock while calling out, so prebuild array (with lock) then do calls on that array (without lock)
1398 for (uint32_t i=0; i < existingCount; i++) {
1399 MachOParser parser(existingArray[i]);
1400 log_notifications("dyld: add notifier %p called with mh=%p\n", func, existingArray[i]);
1401 func(existingArray[i], parser.getSlide());
1402 }
1403
1404 // add to list of functions to call about future loads
1405 sLoadNotifiers.add(func);
1406 }
1407
1408 void AllImages::addUnloadNotifier(NotifyFunc func)
1409 {
1410 // add to list of functions to call about future unloads
1411 sUnloadNotifiers.add(func);
1412 }
1413
1414 void AllImages::setObjCNotifiers(_dyld_objc_notify_mapped map, _dyld_objc_notify_init init, _dyld_objc_notify_unmapped unmap)
1415 {
1416 _objcNotifyMapped = map;
1417 _objcNotifyInit = init;
1418 _objcNotifyUnmapped = unmap;
1419
1420 // callback about already loaded images
1421 uint32_t maxCount = count();
1422 const char* pathsBuffer[maxCount];
1423 const mach_header* mhBuffer[maxCount];
1424 __block const char** paths = pathsBuffer;
1425 __block const mach_header** mhs = mhBuffer;
1426 __block uint32_t imagesWithObjC = 0;
1427 sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
1428 launch_cache::Image img(loadedImage.image());
1429 if ( img.hasObjC() ) {
1430 mhs[imagesWithObjC] = loadedImage.loadedAddress();
1431 paths[imagesWithObjC] = imagePath(loadedImage.image());
1432 ++imagesWithObjC;
1433 }
1434 });
1435 if ( imagesWithObjC != 0 ) {
1436 (*map)(imagesWithObjC, pathsBuffer, mhBuffer);
1437 if ( log_notifications("dyld: objc-mapped-notifier called with %d images:\n", imagesWithObjC) ) {
1438 for (uint32_t i=0; i < imagesWithObjC; ++i) {
1439 log_notifications("dyld: objc-mapped: %p %s\n", mhBuffer[i], pathsBuffer[i]);
1440 }
1441 }
1442 }
1443 }
1444
1445 void AllImages::vmAccountingSetSuspended(bool suspend)
1446 {
1447 #if __arm__ || __arm64__
1448 // <rdar://problem/29099600> dyld should tell the kernel when it is doing fix-ups caused by roots
1449 log_fixups("vm.footprint_suspend=%d\n", suspend);
1450 int newValue = suspend ? 1 : 0;
1451 int oldValue = 0;
1452 size_t newlen = sizeof(newValue);
1453 size_t oldlen = sizeof(oldValue);
1454 sysctlbyname("vm.footprint_suspend", &oldValue, &oldlen, &newValue, newlen);
1455 #endif
1456 }
1457
1458 void AllImages::applyInterposingToDyldCache(const launch_cache::binary_format::Closure* closure, const dyld3::launch_cache::DynArray<loader::ImageInfo>& initialImages)
1459 {
1460 launch_cache::Closure mainClosure(closure);
1461 launch_cache::ImageGroup mainGroup = mainClosure.group();
1462 DyldCacheParser cacheParser((DyldSharedCache*)_dyldCacheAddress, false);
1463 const launch_cache::binary_format::ImageGroup* dylibsGroupData = cacheParser.cachedDylibsGroup();
1464 launch_cache::ImageGroup dyldCacheDylibGroup(dylibsGroupData);
1465 __block bool suspendedAccounting = false;
1466 mainGroup.forEachDyldCacheSymbolOverride(^(uint32_t patchTableIndex, const launch_cache::binary_format::Image* imageData, uint32_t imageOffset, bool& stop) {
1467 bool foundInImages = false;
1468 for (int i=0; i < initialImages.count(); ++i) {
1469 if ( initialImages[i].imageData == imageData ) {
1470 foundInImages = true;
1471 uintptr_t replacement = (uintptr_t)(initialImages[i].loadAddress) + imageOffset;
1472 dyldCacheDylibGroup.forEachDyldCachePatchLocation(_dyldCacheAddress, patchTableIndex, ^(uintptr_t* locationToPatch, uintptr_t addend, bool& innerStop) {
1473 if ( !suspendedAccounting ) {
1474 vmAccountingSetSuspended(true);
1475 suspendedAccounting = true;
1476 }
1477 log_fixups("dyld: cache fixup: *%p = %p\n", locationToPatch, (void*)replacement);
1478 *locationToPatch = replacement + addend;
1479 });
1480 break;
1481 }
1482 }
1483 if ( !foundInImages ) {
1484 launch_cache::Image img(imageData);
1485 log_fixups("did not find loaded image to patch into cache: %s\n", img.path());
1486 }
1487 });
1488 if ( suspendedAccounting )
1489 vmAccountingSetSuspended(false);
1490 }
1491
1492 void AllImages::runLibSystemInitializer(const mach_header* libSystemAddress, const launch_cache::binary_format::Image* libSystemBinImage)
1493 {
1494 // run all initializers in image
1495 launch_cache::Image libSystemImage(libSystemBinImage);
1496 libSystemImage.forEachInitializer(libSystemAddress, ^(const void* func) {
1497 Initializer initFunc = (Initializer)func;
1498 dyld3::kdebug_trace_dyld_duration(DBG_DYLD_TIMING_STATIC_INITIALIZER, (uint64_t)func, 0, ^{
1499 initFunc(NXArgc, NXArgv, environ, appleParams, _programVars);
1500 });
1501 log_initializers("called initialzer %p in %s\n", initFunc, libSystemImage.path());
1502 });
1503
1504 // mark libSystem.dylib as being init, so later recursive-init would re-run it
1505 sLoadedImages.forEachWithWriteLock(^(uint32_t anIndex, LoadedImage& loadedImage, bool& stop) {
1506 if ( loadedImage.loadedAddress() == libSystemAddress ) {
1507 loadedImage.setState(LoadedImage::State::inited);
1508 stop = true;
1509 }
1510 });
1511 }
1512
1513 void AllImages::runInitialzersBottomUp(const mach_header* imageLoadAddress)
1514 {
1515 launch_cache::Image topImage = findByLoadAddress(imageLoadAddress);
1516 if ( topImage.isInvalid() )
1517 return;
1518
1519 // closure contains list of intializers to run in-order
1520 STACK_ALLOC_DYNARRAY(const launch_cache::BinaryImageGroupData*, currentGroupsCount(), currentGroupsList);
1521 copyCurrentGroups(currentGroupsList);
1522 topImage.forEachInitBefore(currentGroupsList, ^(launch_cache::Image imageToInit) {
1523 // find entry
1524 __block LoadedImage* foundEntry = nullptr;
1525 sLoadedImages.forEachWithReadLock(^(uint32_t index, const LoadedImage& entry, bool& stop) {
1526 if ( entry.image() == imageToInit.binaryData() ) {
1527 foundEntry = (LoadedImage*)&entry;
1528 stop = true;
1529 }
1530 });
1531 assert(foundEntry != nullptr);
1532 pthread_mutex_lock(&_initializerLock);
1533 // Note, due to the large lock in dlopen, we can't be waiting on another thread
1534 // here, but its possible that we are in a dlopen which is initialising us again
1535 if ( foundEntry->state() == LoadedImage::State::beingInited ) {
1536 log_initializers("dyld: already initializing '%s'\n", imagePath(imageToInit.binaryData()));
1537 }
1538 // at this point, the image is either initialized or not
1539 // if not, initialize it on this thread
1540 if ( foundEntry->state() == LoadedImage::State::uninited ) {
1541 foundEntry->setState(LoadedImage::State::beingInited);
1542 // release initializer lock, so other threads can run initializers
1543 pthread_mutex_unlock(&_initializerLock);
1544 // tell objc to run any +load methods in image
1545 if ( (_objcNotifyInit != nullptr) && imageToInit.mayHavePlusLoads() ) {
1546 log_notifications("dyld: objc-init-notifier called with mh=%p, path=%s\n", foundEntry->loadedAddress(), imagePath(imageToInit.binaryData()));
1547 (*_objcNotifyInit)(imagePath(imageToInit.binaryData()), foundEntry->loadedAddress());
1548 }
1549 // run all initializers in image
1550 imageToInit.forEachInitializer(foundEntry->loadedAddress(), ^(const void* func) {
1551 Initializer initFunc = (Initializer)func;
1552 dyld3::kdebug_trace_dyld_duration(DBG_DYLD_TIMING_STATIC_INITIALIZER, (uint64_t)func, 0, ^{
1553 initFunc(NXArgc, NXArgv, environ, appleParams, _programVars);
1554 });
1555 log_initializers("dyld: called initialzer %p in %s\n", initFunc, imageToInit.path());
1556 });
1557 // reaquire initializer lock to switch state to inited
1558 pthread_mutex_lock(&_initializerLock);
1559 foundEntry->setState(LoadedImage::State::inited);
1560 }
1561 pthread_mutex_unlock(&_initializerLock);
1562 });
1563 }
1564
1565
1566 } // namespace dyld3
1567
1568
1569
1570
1571
1572