]> git.saurik.com Git - apple/dyld.git/blob - dyld3/AllImages.cpp
dyld-519.2.1.tar.gz
[apple/dyld.git] / dyld3 / AllImages.cpp
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24
25 #include <stdint.h>
26 #include <sys/stat.h>
27 #include <sys/sysctl.h>
28 #include <mach/mach_time.h> // mach_absolute_time()
29 #include <pthread/pthread.h>
30 #include <libkern/OSAtomic.h>
31
32 #include <vector>
33 #include <algorithm>
34
35 #include "AllImages.h"
36 #include "MachOParser.h"
37 #include "libdyldEntryVector.h"
38 #include "Logging.h"
39 #include "Loading.h"
40 #include "Tracing.h"
41 #include "LaunchCache.h"
42 #include "DyldSharedCache.h"
43 #include "PathOverrides.h"
44 #include "DyldCacheParser.h"
45
46 extern const char** appleParams;
47
48 // should be a header for these
49 struct __cxa_range_t {
50 const void* addr;
51 size_t length;
52 };
53 extern "C" void __cxa_finalize_ranges(const __cxa_range_t ranges[], unsigned int count);
54
55 VIS_HIDDEN bool gUseDyld3 = false;
56
57
58 namespace dyld3 {
59
60 class VIS_HIDDEN LoadedImage {
61 public:
62 enum class State { uninited=3, beingInited=2, inited=0 };
63 typedef launch_cache::binary_format::Image BinaryImage;
64
65 LoadedImage(const mach_header* mh, const BinaryImage* bi);
66 bool operator==(const LoadedImage& rhs) const;
67 void init(const mach_header* mh, const BinaryImage* bi);
68 const mach_header* loadedAddress() const { return (mach_header*)((uintptr_t)_loadAddress & ~0x7ULL); }
69 State state() const { return (State)((uintptr_t)_loadAddress & 0x3ULL); }
70 const BinaryImage* image() const { return _image; }
71 bool neverUnload() const { return ((uintptr_t)_loadAddress & 0x4ULL); }
72 void setState(State s) { _loadAddress = (mach_header*)((((uintptr_t)_loadAddress) & ~0x3ULL) | (uintptr_t)s); }
73 void setNeverUnload() { _loadAddress = (mach_header*)(((uintptr_t)_loadAddress) | 0x4ULL); }
74
75 private:
76 const mach_header* _loadAddress; // low bits: bit2=neverUnload, bit1/bit0 contain State
77 const BinaryImage* _image;
78 };
79
80
81 bool LoadedImage::operator==(const LoadedImage& rhs) const
82 {
83 return (_image == rhs._image) && (loadedAddress() == rhs.loadedAddress());
84 }
85
86
87
88 struct VIS_HIDDEN DlopenCount {
89 bool operator==(const DlopenCount& rhs) const;
90 const mach_header* loadAddress;
91 uintptr_t refCount;
92 };
93
94 bool DlopenCount::operator==(const DlopenCount& rhs) const
95 {
96 return (loadAddress == rhs.loadAddress) && (refCount == rhs.refCount);
97 }
98
99 LoadedImage::LoadedImage(const mach_header* mh, const BinaryImage* bi)
100 : _loadAddress(mh), _image(bi)
101 {
102 assert(loadedAddress() == mh);
103 setState(State::uninited);
104 }
105
106 void LoadedImage::init(const mach_header* mh, const BinaryImage* bi)
107 {
108 _loadAddress = mh;
109 _image = bi;
110 assert(loadedAddress() == mh);
111 setState(State::uninited);
112 }
113
114 // forward reference
115 template <typename T, int C> class ReaderWriterChunkedVector;
116
117 template <typename T, int C>
118 class VIS_HIDDEN ChunkedVector {
119 public:
120 static ChunkedVector<T,C>* make(uint32_t count);
121
122 void forEach(uint32_t& startIndex, bool& outerStop, void (^callback)(uint32_t index, const T& value, bool& stop)) const;
123 void forEach(uint32_t& startIndex, bool& outerStop, void (^callback)(uint32_t index, T& value, bool& stop));
124 T* add(const T& value);
125 T* add(uint32_t count, const T values[]);
126 void remove(uint32_t index);
127 uint32_t count() const { return _inUseCount; }
128 uint32_t freeCount() const { return _allocCount - _inUseCount; }
129 private:
130 T& element(uint32_t index) { return ((T*)_elements)[index]; }
131 const T& element(uint32_t index) const { return ((T*)_elements)[index]; }
132
133 friend class ReaderWriterChunkedVector<T,C>;
134
135 ChunkedVector<T,C>* _next = nullptr;
136 uint32_t _allocCount = C;
137 uint32_t _inUseCount = 0;
138 uint8_t _elements[C*sizeof(T)] = { 0 };
139 };
140
141 template <typename T, int C>
142 class VIS_HIDDEN ReaderWriterChunkedVector {
143 public:
144 T* add(uint32_t count, const T values[]);
145 T* add(const T& value) { return add(1, &value); }
146 T* addNoLock(uint32_t count, const T values[]);
147 T* addNoLock(const T& value) { return addNoLock(1, &value); }
148 void remove(const T& value);
149 uint32_t count() const;
150 void forEachWithReadLock(void (^callback)(uint32_t index, const T& value, bool& stop)) const;
151 void forEachWithWriteLock(void (^callback)(uint32_t index, T& value, bool& stop));
152 void forEachNoLock(void (^callback)(uint32_t index, const T& value, bool& stop)) const;
153 T& operator[](size_t index);
154 uint32_t countNoLock() const;
155
156 void withReadLock(void (^withLock)()) const;
157 void withWriteLock(void (^withLock)()) const;
158 void acquireWriteLock();
159 void releaseWriteLock();
160 void dump(void (^callback)(const T& value)) const;
161
162 private:
163 mutable pthread_rwlock_t _lock = PTHREAD_RWLOCK_INITIALIZER;
164 ChunkedVector<T,C> _firstChunk;
165 };
166
167
168 typedef void (*NotifyFunc)(const mach_header* mh, intptr_t slide);
169
170 static ReaderWriterChunkedVector<NotifyFunc, 4> sLoadNotifiers;
171 static ReaderWriterChunkedVector<NotifyFunc, 4> sUnloadNotifiers;
172 static ReaderWriterChunkedVector<LoadedImage, 4> sLoadedImages;
173 static ReaderWriterChunkedVector<DlopenCount, 4> sDlopenRefCounts;
174 static ReaderWriterChunkedVector<const launch_cache::BinaryImageGroupData*, 4> sKnownGroups;
175 #if __MAC_OS_X_VERSION_MIN_REQUIRED
176 static ReaderWriterChunkedVector<__NSObjectFileImage, 2> sNSObjectFileImages;
177 #endif
178
179
180 ///////////////////// ChunkedVector ////////////////////////////
181
182 template <typename T, int C>
183 ChunkedVector<T,C>* ChunkedVector<T,C>::make(uint32_t count)
184 {
185 size_t size = sizeof(ChunkedVector) + sizeof(T) * (count-C);
186 ChunkedVector<T,C>* result = (ChunkedVector<T,C>*)malloc(size);
187 result->_next = nullptr;
188 result->_allocCount = count;
189 result->_inUseCount = 0;
190 return result;
191 }
192
193 template <typename T, int C>
194 void ChunkedVector<T,C>::forEach(uint32_t& outerIndex, bool& outerStop, void (^callback)(uint32_t index, const T& value, bool& stop)) const
195 {
196 for (uint32_t i=0; i < _inUseCount; ++i) {
197 callback(outerIndex, element(i), outerStop);
198 ++outerIndex;
199 if ( outerStop )
200 break;
201 }
202 }
203
204 template <typename T, int C>
205 void ChunkedVector<T,C>::forEach(uint32_t& outerIndex, bool& outerStop, void (^callback)(uint32_t index, T& value, bool& stop))
206 {
207 for (uint32_t i=0; i < _inUseCount; ++i) {
208 callback(outerIndex, element(i), outerStop);
209 ++outerIndex;
210 if ( outerStop )
211 break;
212 }
213 }
214
215 template <typename T, int C>
216 T* ChunkedVector<T,C>::add(const T& value)
217 {
218 return add(1, &value);
219 }
220
221 template <typename T, int C>
222 T* ChunkedVector<T,C>::add(uint32_t count, const T values[])
223 {
224 assert(count <= (_allocCount - _inUseCount));
225 T* result = &element(_inUseCount);
226 memmove(result, values, sizeof(T)*count);
227 _inUseCount += count;
228 return result;
229 }
230
231 template <typename T, int C>
232 void ChunkedVector<T,C>::remove(uint32_t index)
233 {
234 assert(index < _inUseCount);
235 int moveCount = _inUseCount - index - 1;
236 if ( moveCount >= 1 ) {
237 memmove(&element(index), &element(index+1), sizeof(T)*moveCount);
238 }
239 _inUseCount--;
240 }
241
242
243 ///////////////////// ReaderWriterChunkedVector ////////////////////////////
244
245
246
247 template <typename T, int C>
248 void ReaderWriterChunkedVector<T,C>::withReadLock(void (^work)()) const
249 {
250 assert(pthread_rwlock_rdlock(&_lock) == 0);
251 work();
252 assert(pthread_rwlock_unlock(&_lock) == 0);
253 }
254
255 template <typename T, int C>
256 void ReaderWriterChunkedVector<T,C>::withWriteLock(void (^work)()) const
257 {
258 assert(pthread_rwlock_wrlock(&_lock) == 0);
259 work();
260 assert(pthread_rwlock_unlock(&_lock) == 0);
261 }
262
263 template <typename T, int C>
264 void ReaderWriterChunkedVector<T,C>::acquireWriteLock()
265 {
266 assert(pthread_rwlock_wrlock(&_lock) == 0);
267 }
268
269 template <typename T, int C>
270 void ReaderWriterChunkedVector<T,C>::releaseWriteLock()
271 {
272 assert(pthread_rwlock_unlock(&_lock) == 0);
273 }
274
275 template <typename T, int C>
276 uint32_t ReaderWriterChunkedVector<T,C>::count() const
277 {
278 __block uint32_t result = 0;
279 withReadLock(^() {
280 for (const ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
281 result += chunk->count();
282 }
283 });
284 return result;
285 }
286
287 template <typename T, int C>
288 uint32_t ReaderWriterChunkedVector<T,C>::countNoLock() const
289 {
290 uint32_t result = 0;
291 for (const ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
292 result += chunk->count();
293 }
294 return result;
295 }
296
297 template <typename T, int C>
298 T* ReaderWriterChunkedVector<T,C>::addNoLock(uint32_t count, const T values[])
299 {
300 T* result = nullptr;
301 ChunkedVector<T,C>* lastChunk = &_firstChunk;
302 while ( lastChunk->_next != nullptr )
303 lastChunk = lastChunk->_next;
304
305 if ( lastChunk->freeCount() >= count ) {
306 // append to last chunk
307 result = lastChunk->add(count, values);
308 }
309 else {
310 // append new chunk
311 uint32_t allocCount = count;
312 uint32_t remainder = count % C;
313 if ( remainder != 0 )
314 allocCount = count + C - remainder;
315 ChunkedVector<T,C>* newChunk = ChunkedVector<T,C>::make(allocCount);
316 result = newChunk->add(count, values);
317 lastChunk->_next = newChunk;
318 }
319
320 return result;
321 }
322
323 template <typename T, int C>
324 T* ReaderWriterChunkedVector<T,C>::add(uint32_t count, const T values[])
325 {
326 __block T* result = nullptr;
327 withWriteLock(^() {
328 result = addNoLock(count, values);
329 });
330 return result;
331 }
332
333 template <typename T, int C>
334 void ReaderWriterChunkedVector<T,C>::remove(const T& valueToRemove)
335 {
336 __block bool stopStorage = false;
337 withWriteLock(^() {
338 ChunkedVector<T,C>* chunkNowEmpty = nullptr;
339 __block uint32_t indexStorage = 0;
340 __block bool found = false;
341 for (ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
342 uint32_t chunkStartIndex = indexStorage;
343 __block uint32_t foundIndex = 0;
344 chunk->forEach(indexStorage, stopStorage, ^(uint32_t index, const T& value, bool& stop) {
345 if ( value == valueToRemove ) {
346 foundIndex = index - chunkStartIndex;
347 found = true;
348 stop = true;
349 }
350 });
351 if ( found ) {
352 chunk->remove(foundIndex);
353 found = false;
354 if ( chunk->count() == 0 )
355 chunkNowEmpty = chunk;
356 }
357 }
358 // if chunk is now empty, remove from linked list and free
359 if ( chunkNowEmpty ) {
360 for (ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
361 if ( chunk->_next == chunkNowEmpty ) {
362 chunk->_next = chunkNowEmpty->_next;
363 if ( chunkNowEmpty != &_firstChunk )
364 free(chunkNowEmpty);
365 break;
366 }
367 }
368 }
369 });
370 }
371
372 template <typename T, int C>
373 void ReaderWriterChunkedVector<T,C>::forEachWithReadLock(void (^callback)(uint32_t index, const T& value, bool& stop)) const
374 {
375 __block uint32_t index = 0;
376 __block bool stop = false;
377 withReadLock(^() {
378 for (const ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
379 chunk->forEach(index, stop, callback);
380 if ( stop )
381 break;
382 }
383 });
384 }
385
386 template <typename T, int C>
387 void ReaderWriterChunkedVector<T,C>::forEachWithWriteLock(void (^callback)(uint32_t index, T& value, bool& stop))
388 {
389 __block uint32_t index = 0;
390 __block bool stop = false;
391 withReadLock(^() {
392 for (ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
393 chunk->forEach(index, stop, callback);
394 if ( stop )
395 break;
396 }
397 });
398 }
399
400 template <typename T, int C>
401 void ReaderWriterChunkedVector<T,C>::forEachNoLock(void (^callback)(uint32_t index, const T& value, bool& stop)) const
402 {
403 uint32_t index = 0;
404 bool stop = false;
405 for (const ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
406 chunk->forEach(index, stop, callback);
407 if ( stop )
408 break;
409 }
410 }
411
412 template <typename T, int C>
413 T& ReaderWriterChunkedVector<T,C>::operator[](size_t targetIndex)
414 {
415 __block T* result = nullptr;
416 forEachNoLock(^(uint32_t index, T const& value, bool& stop) {
417 if ( index == targetIndex ) {
418 result = (T*)&value;
419 stop = true;
420 }
421 });
422 return *result;
423 }
424
425 template <typename T, int C>
426 void ReaderWriterChunkedVector<T,C>::dump(void (^callback)(const T& value)) const
427 {
428 log("dump ReaderWriterChunkedVector at %p\n", this);
429 __block uint32_t index = 0;
430 __block bool stop = false;
431 withReadLock(^() {
432 for (const ChunkedVector<T,C>* chunk = &_firstChunk; chunk != nullptr; chunk = chunk->_next) {
433 log(" chunk at %p\n", chunk);
434 chunk->forEach(index, stop, ^(uint32_t i, const T& value, bool& s) {
435 callback(value);
436 });
437 }
438 });
439 }
440
441
442
443 ///////////////////// AllImages ////////////////////////////
444
445
446 AllImages gAllImages;
447
448
449
450 void AllImages::init(const BinaryClosure* closure, const void* dyldCacheLoadAddress, const char* dyldCachePath,
451 const dyld3::launch_cache::DynArray<loader::ImageInfo>& initialImages)
452 {
453 _mainClosure = closure;
454 _initialImages = &initialImages;
455 _dyldCacheAddress = dyldCacheLoadAddress;
456 _dyldCachePath = dyldCachePath;
457
458 // Make temporary old image array, so libSystem initializers can be debugged
459 uint32_t count = (uint32_t)initialImages.count();
460 dyld_image_info oldDyldInfo[count];
461 for (int i=0; i < count; ++i) {
462 launch_cache::Image img(initialImages[i].imageData);
463 oldDyldInfo[i].imageLoadAddress = initialImages[i].loadAddress;
464 oldDyldInfo[i].imageFilePath = img.path();
465 oldDyldInfo[i].imageFileModDate = 0;
466 }
467 _oldAllImageInfos->infoArray = oldDyldInfo;
468 _oldAllImageInfos->infoArrayCount = count;
469 _oldAllImageInfos->notification(dyld_image_adding, count, oldDyldInfo);
470 _oldAllImageInfos->infoArray = nullptr;
471 _oldAllImageInfos->infoArrayCount = 0;
472 }
473
474 void AllImages::setProgramVars(ProgramVars* vars)
475 {
476 _programVars = vars;
477 }
478
479 void AllImages::applyInitialImages()
480 {
481 addImages(*_initialImages);
482 _initialImages = nullptr; // this was stack allocated
483 }
484
485 void AllImages::mirrorToOldAllImageInfos()
486 {
487 // set infoArray to NULL to denote it is in-use
488 _oldAllImageInfos->infoArray = nullptr;
489
490 // if array not large enough, re-alloc it
491 uint32_t imageCount = sLoadedImages.countNoLock();
492 if ( _oldArrayAllocCount < imageCount ) {
493 uint32_t newAllocCount = imageCount + 16;
494 dyld_image_info* newArray = (dyld_image_info*)malloc(sizeof(dyld_image_info)*newAllocCount);
495 if ( _oldAllImageArray != nullptr ) {
496 memcpy(newArray, _oldAllImageArray, sizeof(dyld_image_info)*_oldAllImageInfos->infoArrayCount);
497 free(_oldAllImageArray);
498 }
499 _oldAllImageArray = newArray;
500 _oldArrayAllocCount = newAllocCount;
501 }
502
503 // fill out array to mirror current image list
504 sLoadedImages.forEachNoLock(^(uint32_t index, const LoadedImage& loadedImage, bool& stop) {
505 launch_cache::Image img(loadedImage.image());
506 _oldAllImageArray[index].imageLoadAddress = loadedImage.loadedAddress();
507 _oldAllImageArray[index].imageFilePath = imagePath(loadedImage.image());
508 _oldAllImageArray[index].imageFileModDate = 0;
509 });
510
511 // set infoArray back to base address of array (so other process can now read)
512 _oldAllImageInfos->infoArrayCount = imageCount;
513 _oldAllImageInfos->infoArrayChangeTimestamp = mach_absolute_time();
514 _oldAllImageInfos->infoArray = _oldAllImageArray;
515 }
516
517 void AllImages::addImages(const launch_cache::DynArray<loader::ImageInfo>& newImages)
518 {
519 uint32_t count = (uint32_t)newImages.count();
520 assert(count != 0);
521
522 // build stack array of LoadedImage to copy into sLoadedImages
523 STACK_ALLOC_DYNARRAY(LoadedImage, count, loadedImagesArray);
524 for (uint32_t i=0; i < count; ++i) {
525 loadedImagesArray[i].init(newImages[i].loadAddress, newImages[i].imageData);
526 if (newImages[i].neverUnload)
527 loadedImagesArray[i].setNeverUnload();
528 }
529 sLoadedImages.add(count, &loadedImagesArray[0]);
530
531 if ( _oldAllImageInfos != nullptr ) {
532 // sync to old all image infos struct
533 if ( _initialImages != nullptr ) {
534 // libSystem not initialized yet, don't use locks
535 mirrorToOldAllImageInfos();
536 }
537 else {
538 sLoadedImages.withReadLock(^{
539 mirrorToOldAllImageInfos();
540 });
541 }
542
543 // tell debugger about new images
544 dyld_image_info oldDyldInfo[count];
545 for (int i=0; i < count; ++i) {
546 launch_cache::Image img(newImages[i].imageData);
547 oldDyldInfo[i].imageLoadAddress = newImages[i].loadAddress;
548 oldDyldInfo[i].imageFilePath = imagePath(newImages[i].imageData);
549 oldDyldInfo[i].imageFileModDate = 0;
550 }
551 _oldAllImageInfos->notification(dyld_image_adding, count, oldDyldInfo);
552 }
553
554 // log loads
555 for (int i=0; i < count; ++i) {
556 launch_cache::Image img(newImages[i].imageData);
557 log_loads("dyld: %s\n", imagePath(newImages[i].imageData));
558 }
559
560 #if !TARGET_IPHONE_SIMULATOR
561 // call kdebug trace for each image
562 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD, DBG_DYLD_UUID, DBG_DYLD_UUID_MAP_A))) {
563 for (uint32_t i=0; i < count; ++i) {
564 launch_cache::Image img(newImages[i].imageData);
565 struct stat stat_buf;
566 fsid_t fsid = {{ 0, 0 }};
567 fsobj_id_t fsobjid = { 0, 0 };
568 if (img.isDiskImage() && stat(imagePath(newImages[i].imageData), &stat_buf) == 0 ) {
569 fsobjid = *(fsobj_id_t*)&stat_buf.st_ino;
570 fsid = {{ stat_buf.st_dev, 0 }};
571 }
572 kdebug_trace_dyld_image(DBG_DYLD_UUID_MAP_A, img.uuid(), fsobjid, fsid, newImages[i].loadAddress);
573 }
574 }
575 #endif
576 // call each _dyld_register_func_for_add_image function with each image
577 const uint32_t existingNotifierCount = sLoadNotifiers.count();
578 NotifyFunc existingNotifiers[existingNotifierCount];
579 NotifyFunc* existingNotifierArray = existingNotifiers;
580 sLoadNotifiers.forEachWithReadLock(^(uint32_t index, const NotifyFunc& func, bool& stop) {
581 if ( index < existingNotifierCount )
582 existingNotifierArray[index] = func;
583 });
584 // we don't want to hold lock while calling out, so prebuild array (with lock) then do calls on that array (without lock)
585 for (uint32_t j=0; j < existingNotifierCount; ++j) {
586 NotifyFunc func = existingNotifierArray[j];
587 for (uint32_t i=0; i < count; ++i) {
588 MachOParser parser(newImages[i].loadAddress);
589 log_notifications("dyld: add notifier %p called with mh=%p\n", func, newImages[i].loadAddress);
590 func(newImages[i].loadAddress, parser.getSlide());
591 }
592 }
593
594 // call objc about images that use objc
595 if ( _objcNotifyMapped != nullptr ) {
596 const char* pathsBuffer[count];
597 const mach_header* mhBuffer[count];
598 uint32_t imagesWithObjC = 0;
599 for (uint32_t i=0; i < count; ++i) {
600 launch_cache::Image img(newImages[i].imageData);
601 if ( img.hasObjC() ) {
602 pathsBuffer[imagesWithObjC] = imagePath(newImages[i].imageData);
603 mhBuffer[imagesWithObjC] = newImages[i].loadAddress;
604 ++imagesWithObjC;
605 }
606 }
607 if ( imagesWithObjC != 0 ) {
608 (*_objcNotifyMapped)(imagesWithObjC, pathsBuffer, mhBuffer);
609 if ( log_notifications("dyld: objc-mapped-notifier called with %d images:\n", imagesWithObjC) ) {
610 for (uint32_t i=0; i < imagesWithObjC; ++i) {
611 log_notifications("dyld: objc-mapped: %p %s\n", mhBuffer[i], pathsBuffer[i]);
612 }
613 }
614 }
615 }
616
617 // notify any processes tracking loads in this process
618 notifyMonitorLoads(newImages);
619 }
620
621 void AllImages::removeImages(const launch_cache::DynArray<loader::ImageInfo>& unloadImages)
622 {
623 uint32_t count = (uint32_t)unloadImages.count();
624 assert(count != 0);
625
626 // call each _dyld_register_func_for_remove_image function with each image
627 // do this before removing image from internal data structures so that the callback can query dyld about the image
628 const uint32_t existingNotifierCount = sUnloadNotifiers.count();
629 NotifyFunc existingNotifiers[existingNotifierCount];
630 NotifyFunc* existingNotifierArray = existingNotifiers;
631 sUnloadNotifiers.forEachWithReadLock(^(uint32_t index, const NotifyFunc& func, bool& stop) {
632 if ( index < existingNotifierCount )
633 existingNotifierArray[index] = func;
634 });
635 // we don't want to hold lock while calling out, so prebuild array (with lock) then do calls on that array (without lock)
636 for (uint32_t j=0; j < existingNotifierCount; ++j) {
637 NotifyFunc func = existingNotifierArray[j];
638 for (uint32_t i=0; i < count; ++i) {
639 MachOParser parser(unloadImages[i].loadAddress);
640 log_notifications("dyld: remove notifier %p called with mh=%p\n", func, unloadImages[i].loadAddress);
641 func(unloadImages[i].loadAddress, parser.getSlide());
642 }
643 }
644
645 // call objc about images going away
646 if ( _objcNotifyUnmapped != nullptr ) {
647 for (uint32_t i=0; i < count; ++i) {
648 launch_cache::Image img(unloadImages[i].imageData);
649 if ( img.hasObjC() ) {
650 (*_objcNotifyUnmapped)(imagePath(unloadImages[i].imageData), unloadImages[i].loadAddress);
651 log_notifications("dyld: objc-unmapped-notifier called with image %p %s\n", unloadImages[i].loadAddress, imagePath(unloadImages[i].imageData));
652 }
653 }
654 }
655
656 #if !TARGET_IPHONE_SIMULATOR
657 // call kdebug trace for each image
658 if (kdebug_is_enabled(KDBG_CODE(DBG_DYLD, DBG_DYLD_UUID, DBG_DYLD_UUID_MAP_A))) {
659 for (uint32_t i=0; i < count; ++i) {
660 launch_cache::Image img(unloadImages[i].imageData);
661 struct stat stat_buf;
662 fsid_t fsid = {{ 0, 0 }};
663 fsobj_id_t fsobjid = { 0, 0 };
664 if (stat(imagePath(unloadImages[i].imageData), &stat_buf) == 0 ) {
665 fsobjid = *(fsobj_id_t*)&stat_buf.st_ino;
666 fsid = {{ stat_buf.st_dev, 0 }};
667 }
668 kdebug_trace_dyld_image(DBG_DYLD_UUID_UNMAP_A, img.uuid(), fsobjid, fsid, unloadImages[i].loadAddress);
669 }
670 }
671 #endif
672
673 // remove each from sLoadedImages
674 for (uint32_t i=0; i < count; ++i) {
675 LoadedImage info(unloadImages[i].loadAddress, unloadImages[i].imageData);
676 sLoadedImages.remove(info);
677 }
678
679 // sync to old all image infos struct
680 sLoadedImages.withReadLock(^{
681 mirrorToOldAllImageInfos();
682 });
683
684 // tell debugger about removed images
685 dyld_image_info oldDyldInfo[count];
686 for (int i=0; i < count; ++i) {
687 launch_cache::Image img(unloadImages[i].imageData);
688 oldDyldInfo[i].imageLoadAddress = unloadImages[i].loadAddress;
689 oldDyldInfo[i].imageFilePath = imagePath(unloadImages[i].imageData);
690 oldDyldInfo[i].imageFileModDate = 0;
691 }
692 _oldAllImageInfos->notification(dyld_image_removing, count, oldDyldInfo);
693
694 // unmap images
695 for (int i=0; i < count; ++i) {
696 launch_cache::Image img(unloadImages[i].imageData);
697 loader::unmapImage(unloadImages[i].imageData, unloadImages[i].loadAddress);
698 log_loads("dyld: unloaded %s\n", imagePath(unloadImages[i].imageData));
699 }
700
701 // notify any processes tracking loads in this process
702 notifyMonitorUnloads(unloadImages);
703 }
704
705 void AllImages::setNeverUnload(const loader::ImageInfo& existingImage)
706 {
707 sLoadedImages.forEachWithWriteLock(^(uint32_t index, dyld3::LoadedImage &value, bool &stop) {
708 if (value.image() == existingImage.imageData) {
709 value.setNeverUnload();
710 stop = true;
711 }
712 });
713 }
714
715 uint32_t AllImages::count() const
716 {
717 return sLoadedImages.count();
718 }
719
720
721 launch_cache::Image AllImages::findByLoadOrder(uint32_t index, const mach_header** loadAddress) const
722 {
723 __block const BinaryImage* foundImage = nullptr;
724 sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
725 if ( anIndex == index ) {
726 foundImage = loadedImage.image();
727 *loadAddress = loadedImage.loadedAddress();
728 stop = true;
729 }
730 });
731 return launch_cache::Image(foundImage);
732 }
733
734 launch_cache::Image AllImages::findByLoadAddress(const mach_header* loadAddress) const
735 {
736 __block const BinaryImage* foundImage = nullptr;
737 sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
738 if ( loadedImage.loadedAddress() == loadAddress ) {
739 foundImage = loadedImage.image();
740 stop = true;
741 }
742 });
743 return launch_cache::Image(foundImage);
744 }
745
746 bool AllImages::findIndexForLoadAddress(const mach_header* loadAddress, uint32_t& index)
747 {
748 __block bool result = false;
749 sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
750 if ( loadedImage.loadedAddress() == loadAddress ) {
751 index = anIndex;
752 result = true;
753 stop = true;
754 }
755 });
756 return result;
757 }
758
759 void AllImages::forEachImage(void (^handler)(uint32_t imageIndex, const mach_header* loadAddress, const launch_cache::Image image, bool& stop)) const
760 {
761 sLoadedImages.forEachWithReadLock(^(uint32_t imageIndex, const LoadedImage& loadedImage, bool& stop) {
762 handler(imageIndex, loadedImage.loadedAddress(), launch_cache::Image(loadedImage.image()), stop);
763 });
764 }
765
766 launch_cache::Image AllImages::findByOwnedAddress(const void* addr, const mach_header** loadAddress, uint8_t* permissions) const
767 {
768 if ( _initialImages != nullptr ) {
769 // being called during libSystem initialization, so sLoadedImages not allocated yet
770 for (int i=0; i < _initialImages->count(); ++i) {
771 const loader::ImageInfo& entry = (*_initialImages)[i];
772 launch_cache::Image anImage(entry.imageData);
773 if ( anImage.containsAddress(addr, entry.loadAddress, permissions) ) {
774 *loadAddress = entry.loadAddress;
775 return entry.imageData;
776 }
777 }
778 return launch_cache::Image(nullptr);
779 }
780
781 // if address is in cache, do fast search of cache
782 if ( (_dyldCacheAddress != nullptr) && (addr > _dyldCacheAddress) ) {
783 const DyldSharedCache* dyldCache = (DyldSharedCache*)_dyldCacheAddress;
784 if ( addr < (void*)((uint8_t*)_dyldCacheAddress+dyldCache->mappedSize()) ) {
785 size_t cacheVmOffset = ((uint8_t*)addr - (uint8_t*)_dyldCacheAddress);
786 DyldCacheParser cacheParser(dyldCache, false);
787 launch_cache::ImageGroup cachedDylibsGroup(cacheParser.cachedDylibsGroup());
788 uint32_t mhCacheOffset;
789 uint8_t foundPermissions;
790 launch_cache::Image image(cachedDylibsGroup.findImageByCacheOffset(cacheVmOffset, mhCacheOffset, foundPermissions));
791 if ( image.valid() ) {
792 *loadAddress = (mach_header*)((uint8_t*)_dyldCacheAddress + mhCacheOffset);
793 if ( permissions != nullptr )
794 *permissions = foundPermissions;
795 return image;
796 }
797 }
798 }
799
800 __block const BinaryImage* foundImage = nullptr;
801 sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
802 launch_cache::Image anImage(loadedImage.image());
803 if ( anImage.containsAddress(addr, loadedImage.loadedAddress(), permissions) ) {
804 *loadAddress = loadedImage.loadedAddress();
805 foundImage = loadedImage.image();
806 stop = true;
807 }
808 });
809 return launch_cache::Image(foundImage);
810 }
811
812 const mach_header* AllImages::findLoadAddressByImage(const BinaryImage* targetImage) const
813 {
814 __block const mach_header* foundAddress = nullptr;
815 sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
816 if ( targetImage == loadedImage.image() ) {
817 foundAddress = loadedImage.loadedAddress();
818 stop = true;
819 }
820 });
821 return foundAddress;
822 }
823
824 const mach_header* AllImages::mainExecutable() const
825 {
826 assert(_programVars != nullptr);
827 return (const mach_header*)_programVars->mh;
828 }
829
830 launch_cache::Image AllImages::mainExecutableImage() const
831 {
832 assert(_mainClosure != nullptr);
833 const launch_cache::Closure mainClosure(_mainClosure);
834 const dyld3::launch_cache::ImageGroup mainGroup = mainClosure.group();
835 const uint32_t mainExecutableIndex = mainClosure.mainExecutableImageIndex();
836 const dyld3::launch_cache::Image mainImage = mainGroup.image(mainExecutableIndex);
837 return mainImage;
838 }
839
840 void AllImages::setMainPath(const char* path )
841 {
842 _mainExeOverridePath = path;
843 }
844
845 const char* AllImages::imagePath(const BinaryImage* binImage) const
846 {
847 #if __IPHONE_OS_VERSION_MIN_REQUIRED
848 // on iOS and watchOS, apps may be moved on device after closure built
849 if ( _mainExeOverridePath != nullptr ) {
850 if ( binImage == mainExecutableImage().binaryData() )
851 return _mainExeOverridePath;
852 }
853 #endif
854 launch_cache::Image image(binImage);
855 return image.path();
856 }
857
858 void AllImages::setInitialGroups()
859 {
860 DyldCacheParser cacheParser((DyldSharedCache*)_dyldCacheAddress, false);
861 sKnownGroups.addNoLock(cacheParser.cachedDylibsGroup());
862 sKnownGroups.addNoLock(cacheParser.otherDylibsGroup());
863 launch_cache::Closure closure(_mainClosure);
864 sKnownGroups.addNoLock(closure.group().binaryData());
865 }
866
867 const launch_cache::binary_format::ImageGroup* AllImages::cachedDylibsGroup()
868 {
869 return sKnownGroups[0];
870 }
871
872 const launch_cache::binary_format::ImageGroup* AllImages::otherDylibsGroup()
873 {
874 return sKnownGroups[1];
875 }
876
877 const AllImages::BinaryImageGroup* AllImages::mainClosureGroup()
878 {
879 return sKnownGroups[2];
880 }
881
882 uint32_t AllImages::currentGroupsCount() const
883 {
884 return sKnownGroups.count();
885 }
886
887 void AllImages::copyCurrentGroups(ImageGroupList& groups) const
888 {
889 sKnownGroups.forEachWithReadLock(^(uint32_t index, const dyld3::launch_cache::binary_format::ImageGroup* const &grpData, bool &stop) {
890 if ( index < groups.count() )
891 groups[index] = grpData;
892 });
893 }
894
895 void AllImages::copyCurrentGroupsNoLock(ImageGroupList& groups) const
896 {
897 sKnownGroups.forEachNoLock(^(uint32_t index, const dyld3::launch_cache::binary_format::ImageGroup* const &grpData, bool &stop) {
898 if ( index < groups.count() )
899 groups[index] = grpData;
900 });
901 }
902
903 const mach_header* AllImages::alreadyLoaded(uint64_t inode, uint64_t mtime, bool bumpRefCount)
904 {
905 __block const mach_header* result = nullptr;
906 sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
907 launch_cache::Image img(loadedImage.image());
908 if ( img.validateUsingModTimeAndInode() ) {
909 if ( (img.fileINode() == inode) && (img.fileModTime() == mtime) ) {
910 result = loadedImage.loadedAddress();
911 if ( bumpRefCount && !loadedImage.neverUnload() )
912 incRefCount(loadedImage.loadedAddress());
913 stop = true;
914 }
915 }
916 });
917 return result;
918 }
919
920 const mach_header* AllImages::alreadyLoaded(const char* path, bool bumpRefCount)
921 {
922 __block const mach_header* result = nullptr;
923 uint32_t targetHash = launch_cache::ImageGroup::hashFunction(path);
924 sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
925 launch_cache::Image img(loadedImage.image());
926 if ( (img.pathHash() == targetHash) && (strcmp(path, imagePath(loadedImage.image())) == 0) ) {
927 result = loadedImage.loadedAddress();
928 if ( bumpRefCount && !loadedImage.neverUnload() )
929 incRefCount(loadedImage.loadedAddress());
930 stop = true;
931 }
932 });
933 if ( result == nullptr ) {
934 // perhaps there was an image override
935 launch_cache::ImageGroup mainGroup(mainClosureGroup());
936 STACK_ALLOC_DYNARRAY(const launch_cache::BinaryImageGroupData*, currentGroupsCount(), currentGroupsList);
937 copyCurrentGroups(currentGroupsList);
938 mainGroup.forEachImageRefOverride(currentGroupsList, ^(launch_cache::Image standardDylib, launch_cache::Image overrideDyilb, bool& stop) {
939 if ( strcmp(standardDylib.path(), path) == 0 ) {
940 result = alreadyLoaded(overrideDyilb.path(), bumpRefCount);
941 stop = true;
942 }
943 });
944 }
945 return result;
946 }
947
948 const mach_header* AllImages::alreadyLoaded(const BinaryImage* binImage, bool bumpRefCount)
949 {
950 const mach_header* result = findLoadAddressByImage(binImage);
951 if ( result != nullptr ) {
952 launch_cache::Image loadedImage(binImage);
953 if ( bumpRefCount && !loadedImage.neverUnload() )
954 incRefCount(result);
955 }
956 return result;
957 }
958
959 void AllImages::incRefCount(const mach_header* loadAddress)
960 {
961 __block bool found = false;
962 sDlopenRefCounts.forEachWithWriteLock(^(uint32_t index, DlopenCount& entry, bool& stop) {
963 if ( entry.loadAddress == loadAddress ) {
964 found = true;
965 entry.refCount += 1;
966 stop = true;
967 }
968 });
969 if ( !found ) {
970 DlopenCount newEnty = { loadAddress, 1 };
971 sDlopenRefCounts.add(newEnty);
972 }
973 }
974
975 void AllImages::decRefCount(const mach_header* loadAddress)
976 {
977 __block bool refCountNowZero = false;
978 sDlopenRefCounts.forEachWithWriteLock(^(uint32_t index, DlopenCount& entry, bool& stop) {
979 if ( entry.loadAddress == loadAddress ) {
980 entry.refCount -= 1;
981 stop = true;
982 if ( entry.refCount == 0 )
983 refCountNowZero = true;
984 }
985 });
986 if ( refCountNowZero ) {
987 DlopenCount delEnty = { loadAddress, 0 };
988 sDlopenRefCounts.remove(delEnty);
989 garbageCollectImages();
990 }
991 }
992
993
994 #if __MAC_OS_X_VERSION_MIN_REQUIRED
995 __NSObjectFileImage* AllImages::addNSObjectFileImage()
996 {
997 // look for empty slot first
998 __block __NSObjectFileImage* result = nullptr;
999 sNSObjectFileImages.forEachWithWriteLock(^(uint32_t index, __NSObjectFileImage& value, bool& stop) {
1000 if ( (value.path == nullptr) && (value.memSource == nullptr) ) {
1001 result = &value;
1002 stop = true;
1003 }
1004 });
1005 if ( result != nullptr )
1006 return result;
1007
1008 // otherwise allocate new slot
1009 __NSObjectFileImage empty;
1010 return sNSObjectFileImages.add(empty);
1011 }
1012
1013 bool AllImages::hasNSObjectFileImage(__NSObjectFileImage* ofi)
1014 {
1015 __block bool result = false;
1016 sNSObjectFileImages.forEachNoLock(^(uint32_t index, const __NSObjectFileImage& value, bool& stop) {
1017 if ( &value == ofi ) {
1018 result = ((value.memSource != nullptr) || (value.path != nullptr));
1019 stop = true;
1020 }
1021 });
1022 return result;
1023 }
1024
1025 void AllImages::removeNSObjectFileImage(__NSObjectFileImage* ofi)
1026 {
1027 sNSObjectFileImages.forEachWithWriteLock(^(uint32_t index, __NSObjectFileImage& value, bool& stop) {
1028 if ( &value == ofi ) {
1029 // mark slot as empty
1030 ofi->path = nullptr;
1031 ofi->memSource = nullptr;
1032 ofi->memLength = 0;
1033 ofi->loadAddress = nullptr;
1034 ofi->binImage = nullptr;
1035 stop = true;
1036 }
1037 });
1038 }
1039 #endif
1040
1041
1042 class VIS_HIDDEN Reaper
1043 {
1044 public:
1045 Reaper(uint32_t count, const LoadedImage** unloadables, bool* inUseArray);
1046 void garbageCollect();
1047 void finalizeDeadImages();
1048
1049 private:
1050 typedef launch_cache::binary_format::Image BinaryImage;
1051
1052 void markDirectlyDlopenedImagesAsUsed();
1053 void markDependentOfInUseImages();
1054 void markDependentsOf(const LoadedImage*);
1055 bool loadAddressIsUnloadable(const mach_header* loadAddr, uint32_t& index);
1056 bool imageIsUnloadable(const BinaryImage* binImage, uint32_t& foundIndex);
1057 uint32_t inUseCount();
1058 void dump(const char* msg);
1059
1060 const LoadedImage** _unloadablesArray;
1061 bool* _inUseArray;
1062 uint32_t _arrayCount;
1063 uint32_t _deadCount;
1064 };
1065
1066 Reaper::Reaper(uint32_t count, const LoadedImage** unloadables, bool* inUseArray)
1067 : _unloadablesArray(unloadables), _inUseArray(inUseArray),_arrayCount(count)
1068 {
1069 }
1070
1071
1072 bool Reaper::loadAddressIsUnloadable(const mach_header* loadAddr, uint32_t& foundIndex)
1073 {
1074 for (uint32_t i=0; i < _arrayCount; ++i) {
1075 if ( _unloadablesArray[i]->loadedAddress() == loadAddr ) {
1076 foundIndex = i;
1077 return true;
1078 }
1079 }
1080 return false;
1081 }
1082
1083 bool Reaper::imageIsUnloadable(const BinaryImage* binImage, uint32_t& foundIndex)
1084 {
1085 for (uint32_t i=0; i < _arrayCount; ++i) {
1086 if ( _unloadablesArray[i]->image() == binImage ) {
1087 foundIndex = i;
1088 return true;
1089 }
1090 }
1091 return false;
1092 }
1093
1094 void Reaper::markDirectlyDlopenedImagesAsUsed()
1095 {
1096 sDlopenRefCounts.forEachWithReadLock(^(uint32_t refCountIndex, const dyld3::DlopenCount& dlEntry, bool& stop) {
1097 if ( dlEntry.refCount != 0 ) {
1098 uint32_t foundIndex;
1099 if ( loadAddressIsUnloadable(dlEntry.loadAddress, foundIndex) ) {
1100 _inUseArray[foundIndex] = true;
1101 }
1102 }
1103 });
1104 }
1105
1106 uint32_t Reaper::inUseCount()
1107 {
1108 uint32_t count = 0;
1109 for (uint32_t i=0; i < _arrayCount; ++i) {
1110 if ( _inUseArray[i] )
1111 ++count;
1112 }
1113 return count;
1114 }
1115
1116 void Reaper::markDependentsOf(const LoadedImage* entry)
1117 {
1118 const launch_cache::Image image(entry->image());
1119 STACK_ALLOC_DYNARRAY(const launch_cache::BinaryImageGroupData*, gAllImages.currentGroupsCount(), currentGroupsList);
1120 gAllImages.copyCurrentGroups(currentGroupsList);
1121 image.forEachDependentImage(currentGroupsList, ^(uint32_t depIndex, dyld3::launch_cache::Image depImage, dyld3::launch_cache::Image::LinkKind kind, bool& stop) {
1122 uint32_t foundIndex;
1123 if ( !depImage.neverUnload() && imageIsUnloadable(depImage.binaryData(), foundIndex) ) {
1124 _inUseArray[foundIndex] = true;
1125 }
1126 });
1127 }
1128
1129 void Reaper::markDependentOfInUseImages()
1130 {
1131 for (uint32_t i=0; i < _arrayCount; ++i) {
1132 if ( _inUseArray[i] )
1133 markDependentsOf(_unloadablesArray[i]);
1134 }
1135 }
1136
1137 void Reaper::dump(const char* msg)
1138 {
1139 //log("%s:\n", msg);
1140 for (uint32_t i=0; i < _arrayCount; ++i) {
1141 dyld3::launch_cache::Image image(_unloadablesArray[i]->image());
1142 //log(" in-used=%d %s\n", _inUseArray[i], image.path());
1143 }
1144 }
1145
1146 void Reaper::garbageCollect()
1147 {
1148 //dump("all unloadable images");
1149
1150 // mark all dylibs directly dlopen'ed as in use
1151 markDirectlyDlopenedImagesAsUsed();
1152
1153 //dump("directly dlopen()'ed marked");
1154
1155 // iteratively mark dependents of in-use dylibs as in-use until in-use count stops changing
1156 uint32_t lastCount = inUseCount();
1157 bool countChanged = false;
1158 do {
1159 markDependentOfInUseImages();
1160 //dump("dependents marked");
1161 uint32_t newCount = inUseCount();
1162 countChanged = (newCount != lastCount);
1163 lastCount = newCount;
1164 } while (countChanged);
1165
1166 _deadCount = _arrayCount - inUseCount();
1167 }
1168
1169 void Reaper::finalizeDeadImages()
1170 {
1171 if ( _deadCount == 0 )
1172 return;
1173 __cxa_range_t ranges[_deadCount];
1174 __cxa_range_t* rangesArray = ranges;
1175 __block unsigned int rangesCount = 0;
1176 for (uint32_t i=0; i < _arrayCount; ++i) {
1177 if ( _inUseArray[i] )
1178 continue;
1179 dyld3::launch_cache::Image image(_unloadablesArray[i]->image());
1180 image.forEachDiskSegment(^(uint32_t segIndex, uint32_t fileOffset, uint32_t fileSize, int64_t vmOffset, uint64_t vmSize, uint8_t permissions, bool &stop) {
1181 if ( permissions & VM_PROT_EXECUTE ) {
1182 rangesArray[rangesCount].addr = (char*)(_unloadablesArray[i]->loadedAddress()) + vmOffset;
1183 rangesArray[rangesCount].length = (size_t)vmSize;
1184 ++rangesCount;
1185 }
1186 });
1187 }
1188 __cxa_finalize_ranges(ranges, rangesCount);
1189 }
1190
1191
1192 // This function is called at the end of dlclose() when the reference count goes to zero.
1193 // The dylib being unloaded may have brought in other dependent dylibs when it was loaded.
1194 // Those dependent dylibs need to be unloaded, but only if they are not referenced by
1195 // something else. We use a standard mark and sweep garbage collection.
1196 //
1197 // The tricky part is that when a dylib is unloaded it may have a termination function that
1198 // can run and itself call dlclose() on yet another dylib. The problem is that this
1199 // sort of gabage collection is not re-entrant. Instead a terminator's call to dlclose()
1200 // which calls garbageCollectImages() will just set a flag to re-do the garbage collection
1201 // when the current pass is done.
1202 //
1203 // Also note that this is done within the sLoadedImages writer lock, so any dlopen/dlclose
1204 // on other threads are blocked while this garbage collections runs
1205 //
1206 void AllImages::garbageCollectImages()
1207 {
1208 // if some other thread is currently GC'ing images, let other thread do the work
1209 int32_t newCount = OSAtomicIncrement32(&_gcCount);
1210 if ( newCount != 1 )
1211 return;
1212
1213 do {
1214 const uint32_t loadedImageCount = sLoadedImages.count();
1215 const LoadedImage* unloadables[loadedImageCount];
1216 bool unloadableInUse[loadedImageCount];
1217 const LoadedImage** unloadablesArray = unloadables;
1218 bool* unloadableInUseArray = unloadableInUse;
1219 __block uint32_t unloadableCount = 0;
1220 // do GC with lock, so no other images can be added during GC
1221 sLoadedImages.withReadLock(^() {
1222 sLoadedImages.forEachNoLock(^(uint32_t index, const LoadedImage& entry, bool& stop) {
1223 const launch_cache::Image image(entry.image());
1224 if ( !image.neverUnload() && !entry.neverUnload() ) {
1225 unloadablesArray[unloadableCount] = &entry;
1226 unloadableInUseArray[unloadableCount] = false;
1227 //log("unloadable[%d] %p %s\n", unloadableCount, entry.loadedAddress(), image.path());
1228 ++unloadableCount;
1229 }
1230 });
1231 // make reaper object to do garbage collection and notifications
1232 Reaper reaper(unloadableCount, unloadablesArray, unloadableInUseArray);
1233 reaper.garbageCollect();
1234
1235 // FIXME: we should sort dead images so higher level ones are terminated first
1236
1237 // call cxa_finalize_ranges of dead images
1238 reaper.finalizeDeadImages();
1239
1240 // FIXME: call static terminators of dead images
1241
1242 // FIXME: DOF unregister
1243 });
1244
1245 //log("sLoadedImages before GC removals:\n");
1246 //sLoadedImages.dump(^(const LoadedImage& entry) {
1247 // const launch_cache::Image image(entry.image());
1248 // log(" loadAddr=%p, path=%s\n", entry.loadedAddress(), image.path());
1249 //});
1250
1251 // make copy of LoadedImages we want to remove
1252 // because unloadables[] points into ChunkVector we are shrinking
1253 uint32_t removalCount = 0;
1254 for (uint32_t i=0; i < unloadableCount; ++i) {
1255 if ( !unloadableInUse[i] )
1256 ++removalCount;
1257 }
1258 if ( removalCount > 0 ) {
1259 STACK_ALLOC_DYNARRAY(loader::ImageInfo, removalCount, unloadImages);
1260 uint32_t removalIndex = 0;
1261 for (uint32_t i=0; i < unloadableCount; ++i) {
1262 if ( !unloadableInUse[i] ) {
1263 unloadImages[removalIndex].loadAddress = unloadables[i]->loadedAddress();
1264 unloadImages[removalIndex].imageData = unloadables[i]->image();
1265 ++removalIndex;
1266 }
1267 }
1268 // remove entries from sLoadedImages
1269 removeImages(unloadImages);
1270
1271 //log("sLoadedImages after GC removals:\n");
1272 //sLoadedImages.dump(^(const LoadedImage& entry) {
1273 // const launch_cache::Image image(entry.image());
1274 // //log(" loadAddr=%p, path=%s\n", entry.loadedAddress(), image.path());
1275 //});
1276 }
1277
1278 // if some other thread called GC during our work, redo GC on its behalf
1279 newCount = OSAtomicDecrement32(&_gcCount);
1280 }
1281 while (newCount > 0);
1282 }
1283
1284
1285
1286 VIS_HIDDEN
1287 const launch_cache::binary_format::Image* AllImages::messageClosured(const char* path, const char* apiName, const char* closuredErrorMessages[3], int& closuredErrorMessagesCount)
1288 {
1289 __block const launch_cache::binary_format::Image* result = nullptr;
1290 sKnownGroups.withWriteLock(^() {
1291 ClosureBuffer::CacheIdent cacheIdent;
1292 bzero(&cacheIdent, sizeof(cacheIdent));
1293 if ( _dyldCacheAddress != nullptr ) {
1294 const DyldSharedCache* dyldCache = (DyldSharedCache*)_dyldCacheAddress;
1295 dyldCache->getUUID(cacheIdent.cacheUUID);
1296 cacheIdent.cacheAddress = (unsigned long)_dyldCacheAddress;
1297 cacheIdent.cacheMappedSize = dyldCache->mappedSize();
1298 }
1299 gPathOverrides.forEachPathVariant(path, ^(const char* possiblePath, bool& stopVariants) {
1300 struct stat statBuf;
1301 if ( stat(possiblePath, &statBuf) == 0 ) {
1302 if ( S_ISDIR(statBuf.st_mode) ) {
1303 log_apis(" %s: path is directory: %s\n", apiName, possiblePath);
1304 if ( closuredErrorMessagesCount < 3 )
1305 closuredErrorMessages[closuredErrorMessagesCount++] = strdup("not a file");
1306 }
1307 else {
1308 // file exists, ask closured to build info for it
1309 STACK_ALLOC_DYNARRAY(const launch_cache::BinaryImageGroupData*, sKnownGroups.countNoLock(), currentGroupsList);
1310 gAllImages.copyCurrentGroupsNoLock(currentGroupsList);
1311 dyld3::launch_cache::DynArray<const dyld3::launch_cache::binary_format::ImageGroup*> nonCacheGroupList(currentGroupsList.count()-2, &currentGroupsList[2]);
1312 const dyld3::launch_cache::binary_format::ImageGroup* closuredCreatedGroupData = nullptr;
1313 ClosureBuffer closureBuilderInput(cacheIdent, path, nonCacheGroupList, gPathOverrides);
1314 ClosureBuffer closureBuilderOutput = dyld3::closured_CreateImageGroup(closureBuilderInput);
1315 if ( !closureBuilderOutput.isError() ) {
1316 vm_protect(mach_task_self(), closureBuilderOutput.vmBuffer(), closureBuilderOutput.vmBufferSize(), false, VM_PROT_READ);
1317 closuredCreatedGroupData = closureBuilderOutput.imageGroup();
1318 log_apis(" %s: closured built ImageGroup for path: %s\n", apiName, possiblePath);
1319 sKnownGroups.addNoLock(closuredCreatedGroupData);
1320 launch_cache::ImageGroup group(closuredCreatedGroupData);
1321 result = group.imageBinary(0);
1322 stopVariants = true;
1323 }
1324 else {
1325 log_apis(" %s: closured failed for path: %s, error: %s\n", apiName, possiblePath, closureBuilderOutput.errorMessage());
1326 if ( closuredErrorMessagesCount < 3 ) {
1327 closuredErrorMessages[closuredErrorMessagesCount++] = strdup(closureBuilderOutput.errorMessage());
1328 }
1329 closureBuilderOutput.free();
1330 }
1331 }
1332 }
1333 else {
1334 log_apis(" %s: file does not exist for path: %s\n", apiName, possiblePath);
1335 }
1336 });
1337 });
1338
1339 return result;
1340 }
1341
1342 const AllImages::BinaryImage* AllImages::findImageInKnownGroups(const char* path)
1343 {
1344 __block const AllImages::BinaryImage* result = nullptr;
1345 sKnownGroups.forEachWithReadLock(^(uint32_t index, const dyld3::launch_cache::binary_format::ImageGroup* const& grpData, bool& stop) {
1346 launch_cache::ImageGroup group(grpData);
1347 uint32_t ignore;
1348 if ( const AllImages::BinaryImage* binImage = group.findImageByPath(path, ignore) ) {
1349 result = binImage;
1350 stop = true;
1351 }
1352 });
1353 return result;
1354 }
1355
1356 bool AllImages::imageUnloadable(const launch_cache::Image& image, const mach_header* loadAddress) const
1357 {
1358 // check if statically determined in clousre that this can never be unloaded
1359 if ( image.neverUnload() )
1360 return false;
1361
1362 // check if some runtime decision made this be never-unloadable
1363 __block bool foundAsNeverUnload = false;
1364 sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
1365 if ( loadedImage.loadedAddress() == loadAddress ) {
1366 stop = true;
1367 if ( loadedImage.neverUnload() )
1368 foundAsNeverUnload = true;
1369 }
1370 });
1371 if ( foundAsNeverUnload )
1372 return false;
1373
1374 return true;
1375 }
1376
1377 void AllImages::addLoadNotifier(NotifyFunc func)
1378 {
1379 // callback about already loaded images
1380 const uint32_t existingCount = sLoadedImages.count();
1381 const mach_header* existingMHs[existingCount];
1382 const mach_header** existingArray = existingMHs;
1383 sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
1384 if ( anIndex < existingCount )
1385 existingArray[anIndex] = loadedImage.loadedAddress();
1386 });
1387 // we don't want to hold lock while calling out, so prebuild array (with lock) then do calls on that array (without lock)
1388 for (uint32_t i=0; i < existingCount; i++) {
1389 MachOParser parser(existingArray[i]);
1390 log_notifications("dyld: add notifier %p called with mh=%p\n", func, existingArray[i]);
1391 func(existingArray[i], parser.getSlide());
1392 }
1393
1394 // add to list of functions to call about future loads
1395 sLoadNotifiers.add(func);
1396 }
1397
1398 void AllImages::addUnloadNotifier(NotifyFunc func)
1399 {
1400 // add to list of functions to call about future unloads
1401 sUnloadNotifiers.add(func);
1402 }
1403
1404 void AllImages::setObjCNotifiers(_dyld_objc_notify_mapped map, _dyld_objc_notify_init init, _dyld_objc_notify_unmapped unmap)
1405 {
1406 _objcNotifyMapped = map;
1407 _objcNotifyInit = init;
1408 _objcNotifyUnmapped = unmap;
1409
1410 // callback about already loaded images
1411 uint32_t maxCount = count();
1412 const char* pathsBuffer[maxCount];
1413 const mach_header* mhBuffer[maxCount];
1414 __block const char** paths = pathsBuffer;
1415 __block const mach_header** mhs = mhBuffer;
1416 __block uint32_t imagesWithObjC = 0;
1417 sLoadedImages.forEachWithReadLock(^(uint32_t anIndex, const LoadedImage& loadedImage, bool& stop) {
1418 launch_cache::Image img(loadedImage.image());
1419 if ( img.hasObjC() ) {
1420 mhs[imagesWithObjC] = loadedImage.loadedAddress();
1421 paths[imagesWithObjC] = imagePath(loadedImage.image());
1422 ++imagesWithObjC;
1423 }
1424 });
1425 if ( imagesWithObjC != 0 ) {
1426 (*map)(imagesWithObjC, pathsBuffer, mhBuffer);
1427 if ( log_notifications("dyld: objc-mapped-notifier called with %d images:\n", imagesWithObjC) ) {
1428 for (uint32_t i=0; i < imagesWithObjC; ++i) {
1429 log_notifications("dyld: objc-mapped: %p %s\n", mhBuffer[i], pathsBuffer[i]);
1430 }
1431 }
1432 }
1433 }
1434
1435 void AllImages::vmAccountingSetSuspended(bool suspend)
1436 {
1437 #if __arm__ || __arm64__
1438 // <rdar://problem/29099600> dyld should tell the kernel when it is doing fix-ups caused by roots
1439 log_fixups("vm.footprint_suspend=%d\n", suspend);
1440 int newValue = suspend ? 1 : 0;
1441 int oldValue = 0;
1442 size_t newlen = sizeof(newValue);
1443 size_t oldlen = sizeof(oldValue);
1444 sysctlbyname("vm.footprint_suspend", &oldValue, &oldlen, &newValue, newlen);
1445 #endif
1446 }
1447
1448 void AllImages::applyInterposingToDyldCache(const launch_cache::binary_format::Closure* closure, const dyld3::launch_cache::DynArray<loader::ImageInfo>& initialImages)
1449 {
1450 launch_cache::Closure mainClosure(closure);
1451 launch_cache::ImageGroup mainGroup = mainClosure.group();
1452 DyldCacheParser cacheParser((DyldSharedCache*)_dyldCacheAddress, false);
1453 const launch_cache::binary_format::ImageGroup* dylibsGroupData = cacheParser.cachedDylibsGroup();
1454 launch_cache::ImageGroup dyldCacheDylibGroup(dylibsGroupData);
1455 __block bool suspendedAccounting = false;
1456 mainGroup.forEachDyldCacheSymbolOverride(^(uint32_t patchTableIndex, const launch_cache::binary_format::Image* imageData, uint32_t imageOffset, bool& stop) {
1457 bool foundInImages = false;
1458 for (int i=0; i < initialImages.count(); ++i) {
1459 if ( initialImages[i].imageData == imageData ) {
1460 foundInImages = true;
1461 uintptr_t replacement = (uintptr_t)(initialImages[i].loadAddress) + imageOffset;
1462 dyldCacheDylibGroup.forEachDyldCachePatchLocation(_dyldCacheAddress, patchTableIndex, ^(uintptr_t* locationToPatch, uintptr_t addend, bool& innerStop) {
1463 if ( !suspendedAccounting ) {
1464 vmAccountingSetSuspended(true);
1465 suspendedAccounting = true;
1466 }
1467 log_fixups("dyld: cache fixup: *%p = %p\n", locationToPatch, (void*)replacement);
1468 *locationToPatch = replacement + addend;
1469 });
1470 break;
1471 }
1472 }
1473 if ( !foundInImages ) {
1474 launch_cache::Image img(imageData);
1475 log_fixups("did not find loaded image to patch into cache: %s\n", img.path());
1476 }
1477 });
1478 if ( suspendedAccounting )
1479 vmAccountingSetSuspended(false);
1480 }
1481
1482 void AllImages::runLibSystemInitializer(const mach_header* libSystemAddress, const launch_cache::binary_format::Image* libSystemBinImage)
1483 {
1484 // run all initializers in image
1485 launch_cache::Image libSystemImage(libSystemBinImage);
1486 libSystemImage.forEachInitializer(libSystemAddress, ^(const void* func) {
1487 Initializer initFunc = (Initializer)func;
1488 dyld3::kdebug_trace_dyld_duration(DBG_DYLD_TIMING_STATIC_INITIALIZER, (uint64_t)func, 0, ^{
1489 initFunc(NXArgc, NXArgv, environ, appleParams, _programVars);
1490 });
1491 log_initializers("called initialzer %p in %s\n", initFunc, libSystemImage.path());
1492 });
1493
1494 // mark libSystem.dylib as being init, so later recursive-init would re-run it
1495 sLoadedImages.forEachWithWriteLock(^(uint32_t anIndex, LoadedImage& loadedImage, bool& stop) {
1496 if ( loadedImage.loadedAddress() == libSystemAddress ) {
1497 loadedImage.setState(LoadedImage::State::inited);
1498 stop = true;
1499 }
1500 });
1501 }
1502
1503 void AllImages::runInitialzersBottomUp(const mach_header* imageLoadAddress)
1504 {
1505 launch_cache::Image topImage = findByLoadAddress(imageLoadAddress);
1506 if ( topImage.isInvalid() )
1507 return;
1508
1509 // closure contains list of intializers to run in-order
1510 STACK_ALLOC_DYNARRAY(const launch_cache::BinaryImageGroupData*, currentGroupsCount(), currentGroupsList);
1511 copyCurrentGroups(currentGroupsList);
1512 topImage.forEachInitBefore(currentGroupsList, ^(launch_cache::Image imageToInit) {
1513 // find entry
1514 __block LoadedImage* foundEntry = nullptr;
1515 sLoadedImages.forEachWithReadLock(^(uint32_t index, const LoadedImage& entry, bool& stop) {
1516 if ( entry.image() == imageToInit.binaryData() ) {
1517 foundEntry = (LoadedImage*)&entry;
1518 stop = true;
1519 }
1520 });
1521 assert(foundEntry != nullptr);
1522 pthread_mutex_lock(&_initializerLock);
1523 // Note, due to the large lock in dlopen, we can't be waiting on another thread
1524 // here, but its possible that we are in a dlopen which is initialising us again
1525 if ( foundEntry->state() == LoadedImage::State::beingInited ) {
1526 log_initializers("dyld: already initializing '%s'\n", imagePath(imageToInit.binaryData()));
1527 }
1528 // at this point, the image is either initialized or not
1529 // if not, initialize it on this thread
1530 if ( foundEntry->state() == LoadedImage::State::uninited ) {
1531 foundEntry->setState(LoadedImage::State::beingInited);
1532 // release initializer lock, so other threads can run initializers
1533 pthread_mutex_unlock(&_initializerLock);
1534 // tell objc to run any +load methods in image
1535 if ( (_objcNotifyInit != nullptr) && imageToInit.mayHavePlusLoads() ) {
1536 log_notifications("dyld: objc-init-notifier called with mh=%p, path=%s\n", foundEntry->loadedAddress(), imagePath(imageToInit.binaryData()));
1537 (*_objcNotifyInit)(imagePath(imageToInit.binaryData()), foundEntry->loadedAddress());
1538 }
1539 // run all initializers in image
1540 imageToInit.forEachInitializer(foundEntry->loadedAddress(), ^(const void* func) {
1541 Initializer initFunc = (Initializer)func;
1542 dyld3::kdebug_trace_dyld_duration(DBG_DYLD_TIMING_STATIC_INITIALIZER, (uint64_t)func, 0, ^{
1543 initFunc(NXArgc, NXArgv, environ, appleParams, _programVars);
1544 });
1545 log_initializers("dyld: called initialzer %p in %s\n", initFunc, imageToInit.path());
1546 });
1547 // reaquire initializer lock to switch state to inited
1548 pthread_mutex_lock(&_initializerLock);
1549 foundEntry->setState(LoadedImage::State::inited);
1550 }
1551 pthread_mutex_unlock(&_initializerLock);
1552 });
1553 }
1554
1555
1556 } // namespace dyld3
1557
1558
1559
1560
1561
1562