dyld-832.7.1.tar.gz
[apple/dyld.git] / dyld3 / Loading.cpp
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24
25 #include <bitset>
26
27 #include <stdint.h>
28 #include <string.h>
29 #include <assert.h>
30 #include <uuid/uuid.h>
31 #include <mach/mach.h>
32 #include <sys/stat.h>
33 #include <sys/types.h>
34 #include <sys/sysctl.h>
35 #include <fcntl.h>
36 #include <sys/dtrace.h>
37 #include <sys/errno.h>
38 #include <unistd.h>
39 #include <System/sys/mman.h>
40 #include <System/sys/csr.h>
41 #include <System/machine/cpu_capabilities.h>
42 #if !TARGET_OS_SIMULATOR && !TARGET_OS_DRIVERKIT
43 #include <sandbox.h>
44 #include <sandbox/private.h>
45 #endif
46 //#include <dispatch/dispatch.h>
47 #include <mach/vm_page_size.h>
48
49 #include "ClosureFileSystemPhysical.h"
50 #include "MachOFile.h"
51 #include "MachOLoaded.h"
52 #include "MachOAnalyzer.h"
53 #include "Logging.h"
54 #include "Loading.h"
55 #include "RootsChecker.h"
56 #include "Tracing.h"
57 #include "dyld2.h"
58 #include "dyld_cache_format.h"
59 #include "libdyldEntryVector.h"
60
61 #include "objc-shared-cache.h"
62
63 namespace dyld {
64 void log(const char* m, ...);
65 }
66
67
68 namespace {
69
70 // utility to track a set of ImageNum's in use
71 class VIS_HIDDEN ImageNumSet
72 {
73 public:
74 void add(dyld3::closure::ImageNum num);
75 bool contains(dyld3::closure::ImageNum num) const;
76
77 private:
78 std::bitset<5120> _bitmap;
79 dyld3::OverflowSafeArray<dyld3::closure::ImageNum> _overflowArray;
80 };
81
82 void ImageNumSet::add(dyld3::closure::ImageNum num)
83 {
84 if ( num < 5120 )
85 _bitmap.set(num);
86 else
87 _overflowArray.push_back(num);
88 }
89
90 bool ImageNumSet::contains(dyld3::closure::ImageNum num) const
91 {
92 if ( num < 5120 )
93 return _bitmap.test(num);
94
95 for (dyld3::closure::ImageNum existingNum : _overflowArray) {
96 if ( existingNum == num )
97 return true;
98 }
99 return false;
100 }
101 } // namespace anonymous
102
103
104 namespace dyld3 {
105
106 Loader::Loader(const Array<LoadedImage>& existingImages, Array<LoadedImage>& newImagesStorage,
107 const void* cacheAddress, const Array<const dyld3::closure::ImageArray*>& imagesArrays,
108 const closure::ObjCSelectorOpt* selOpt, const Array<closure::Image::ObjCSelectorImage>& selImages,
109 const RootsChecker& rootsChecker, dyld3::Platform platform,
110 LogFunc logLoads, LogFunc logSegments, LogFunc logFixups, LogFunc logDof,
111 bool allowMissingLazies, dyld3::LaunchErrorInfo* launchErrorInfo)
112 : _existingImages(existingImages), _newImages(newImagesStorage),
113 _imagesArrays(imagesArrays), _dyldCacheAddress(cacheAddress), _dyldCacheSelectorOpt(nullptr),
114 _closureSelectorOpt(selOpt), _closureSelectorImages(selImages),
115 _rootsChecker(rootsChecker), _allowMissingLazies(allowMissingLazies), _platform(platform),
116 _logLoads(logLoads), _logSegments(logSegments), _logFixups(logFixups), _logDofs(logDof), _launchErrorInfo(launchErrorInfo)
117
118 {
119 #if BUILDING_DYLD
120 // This is only needed for dyld and the launch closure, not the dlopen closures
121 if ( _dyldCacheAddress != nullptr ) {
122 _dyldCacheSelectorOpt = ((const DyldSharedCache*)_dyldCacheAddress)->objcOpt()->selopt();
123 }
124 #endif
125 }
126
127 void Loader::addImage(const LoadedImage& li)
128 {
129 _newImages.push_back(li);
130 }
131
132 LoadedImage* Loader::findImage(closure::ImageNum targetImageNum) const
133 {
134 #if BUILDING_DYLD
135 // The launch images are different in dyld vs libdyld. In dyld, the new images are
136 // the launch images, while in libdyld, the existing images are the launch images
137 if (LoadedImage* info = _launchImagesCache.findImage(targetImageNum, _newImages)) {
138 return info;
139 }
140
141 for (uintptr_t index = 0; index != _newImages.count(); ++index) {
142 LoadedImage& info = _newImages[index];
143 if ( info.image()->representsImageNum(targetImageNum) ) {
144 // Try cache this entry for next time
145 _launchImagesCache.tryAddImage(targetImageNum, index);
146 return &info;
147 }
148 }
149 #elif BUILDING_LIBDYLD
150 for (const LoadedImage& info : _existingImages) {
151 if ( info.image()->representsImageNum(targetImageNum) )
152 return (LoadedImage*)&info;
153 }
154 for (LoadedImage& info : _newImages) {
155 if ( info.image()->representsImageNum(targetImageNum) )
156 return &info;
157 }
158 #else
159 #error Must be building dyld or libdyld
160 #endif
161 return nullptr;
162 }
163
164 uintptr_t Loader::resolveTarget(closure::Image::ResolvedSymbolTarget target)
165 {
166 const LoadedImage* info;
167 switch ( target.sharedCache.kind ) {
168 case closure::Image::ResolvedSymbolTarget::kindSharedCache:
169 assert(_dyldCacheAddress != nullptr);
170 return (uintptr_t)_dyldCacheAddress + (uintptr_t)target.sharedCache.offset;
171
172 case closure::Image::ResolvedSymbolTarget::kindImage:
173 info = findImage(target.image.imageNum);
174 assert(info != nullptr);
175 return (uintptr_t)(info->loadedAddress()) + (uintptr_t)target.image.offset;
176
177 case closure::Image::ResolvedSymbolTarget::kindAbsolute:
178 if ( target.absolute.value & (1ULL << 62) )
179 return (uintptr_t)(target.absolute.value | 0xC000000000000000ULL);
180 else
181 return (uintptr_t)target.absolute.value;
182 }
183 assert(0 && "malformed ResolvedSymbolTarget");
184 return 0;
185 }
186
187
188 void Loader::completeAllDependents(Diagnostics& diag, bool& someCacheImageOverridden)
189 {
190 bool iOSonMac = (_platform == Platform::iOSMac);
191 #if (TARGET_OS_OSX && TARGET_CPU_ARM64)
192 if ( _platform == Platform::iOS )
193 iOSonMac = true;
194 #endif
195 // accumulate all image overrides (512 is placeholder for max unzippered twins in dyld cache)
196 STACK_ALLOC_ARRAY(ImageOverride, overrides, _existingImages.maxCount() + _newImages.maxCount() + 512);
197 for (const auto anArray : _imagesArrays) {
198 // ignore prebuilt Image* in dyld cache, except for MacCatalyst apps where unzipped twins can override each other
199 if ( (anArray->startImageNum() < dyld3::closure::kFirstLaunchClosureImageNum) && !iOSonMac )
200 continue;
201 anArray->forEachImage(^(const dyld3::closure::Image* image, bool& stop) {
202 ImageOverride overrideEntry;
203 if ( image->isOverrideOfDyldCacheImage(overrideEntry.inCache) ) {
204 someCacheImageOverridden = true;
205 overrideEntry.replacement = image->imageNum();
206 overrides.push_back(overrideEntry);
207 }
208 });
209 }
210
211 // make cache for fast lookup of already loaded images
212 __block ImageNumSet alreadyLoaded;
213 for (const LoadedImage& info : _existingImages) {
214 alreadyLoaded.add(info.image()->imageNum());
215 }
216 alreadyLoaded.add(_newImages.begin()->image()->imageNum());
217
218 // for each image in _newImages, starting at the top image, make sure its dependents are in _allImages
219 uintptr_t index = 0;
220 while ( (index < _newImages.count()) && diag.noError() ) {
221 const closure::Image* image = _newImages[index].image();
222 //dyld::log("completeAllDependents(): looking at dependents of %s\n", image->path());
223 image->forEachDependentImage(^(uint32_t depIndex, closure::Image::LinkKind kind, closure::ImageNum depImageNum, bool& stop) {
224 // check if imageNum needs to be changed to an override
225 for (const ImageOverride& entry : overrides) {
226 if ( entry.inCache == depImageNum ) {
227 depImageNum = entry.replacement;
228 break;
229 }
230 }
231 // check if this dependent is already loaded
232 if ( !alreadyLoaded.contains(depImageNum) ) {
233 // if not, look in imagesArrays
234 const closure::Image* depImage = closure::ImageArray::findImage(_imagesArrays, depImageNum);
235 if ( depImage != nullptr ) {
236 //dyld::log(" load imageNum=0x%05X, image path=%s\n", depImageNum, depImage->path());
237 if ( _newImages.freeCount() == 0 ) {
238 diag.error("too many initial images");
239 stop = true;
240 }
241 else {
242 _newImages.push_back(LoadedImage::make(depImage));
243 }
244 alreadyLoaded.add(depImageNum);
245 }
246 else {
247 diag.error("unable to locate imageNum=0x%04X, depIndex=%d of %s", depImageNum, depIndex, image->path());
248 stop = true;
249 }
250 }
251 });
252 ++index;
253 }
254 }
255
256 void Loader::mapAndFixupAllImages(Diagnostics& diag, bool processDOFs, bool fromOFI, bool* closureOutOfDate, bool* recoverable)
257 {
258 *closureOutOfDate = false;
259 *recoverable = true;
260
261 // scan array and map images not already loaded
262 for (LoadedImage& info : _newImages) {
263 if ( info.loadedAddress() != nullptr ) {
264 // log main executable's segments
265 if ( (info.loadedAddress()->filetype == MH_EXECUTE) && (info.state() == LoadedImage::State::mapped) ) {
266 if ( _logSegments("dyld: mapped by kernel %s\n", info.image()->path()) ) {
267 info.image()->forEachDiskSegment(^(uint32_t segIndex, uint32_t fileOffset, uint32_t fileSize, int64_t vmOffset, uint64_t vmSize, uint8_t permissions, bool laterReadOnly, bool& stop) {
268 uint64_t start = (long)info.loadedAddress() + vmOffset;
269 uint64_t end = start+vmSize-1;
270 if ( (segIndex == 0) && (permissions == 0) ) {
271 start = 0;
272 }
273 _logSegments("%14s (%c%c%c) 0x%012llX->0x%012llX \n", info.loadedAddress()->segmentName(segIndex),
274 (permissions & PROT_READ) ? 'r' : '.', (permissions & PROT_WRITE) ? 'w' : '.', (permissions & PROT_EXEC) ? 'x' : '.' ,
275 start, end);
276 });
277 }
278 }
279 // skip over ones already loaded
280 continue;
281 }
282 if ( info.image()->inDyldCache() ) {
283 if ( info.image()->overridableDylib() ) {
284 struct stat statBuf;
285 if ( dyld3::stat(info.image()->path(), &statBuf) == 0 ) {
286 dyld3::closure::FileSystemPhysical fileSystem;
287 if ( _rootsChecker.onDiskFileIsRoot(info.image()->path(), (const DyldSharedCache*)_dyldCacheAddress, info.image(),
288 &fileSystem, statBuf.st_ino, statBuf.st_mtime) ) {
289 if ( ((const DyldSharedCache*)_dyldCacheAddress)->header.dylibsExpectedOnDisk ) {
290 diag.error("dylib file mtime/inode changed since closure was built for '%s'", info.image()->path());
291 } else {
292 diag.error("dylib file not expected on disk, must be a root '%s'", info.image()->path());
293 }
294 *closureOutOfDate = true;
295 }
296 }
297 else if ( (_dyldCacheAddress != nullptr) && ((dyld_cache_header*)_dyldCacheAddress)->dylibsExpectedOnDisk ) {
298 diag.error("dylib file missing, was in dyld shared cache '%s'", info.image()->path());
299 *closureOutOfDate = true;
300 }
301 }
302 if ( diag.noError() ) {
303 info.setLoadedAddress((MachOLoaded*)((uintptr_t)_dyldCacheAddress + info.image()->cacheOffset()));
304 info.setState(LoadedImage::State::fixedUp);
305 if ( _logSegments("dyld: Using from dyld cache %s\n", info.image()->path()) ) {
306 info.image()->forEachCacheSegment(^(uint32_t segIndex, uint64_t vmOffset, uint64_t vmSize, uint8_t permissions, bool &stop) {
307 _logSegments("%14s (%c%c%c) 0x%012lX->0x%012lX \n", info.loadedAddress()->segmentName(segIndex),
308 (permissions & PROT_READ) ? 'r' : '.', (permissions & PROT_WRITE) ? 'w' : '.', (permissions & PROT_EXEC) ? 'x' : '.' ,
309 (long)info.loadedAddress()+(long)vmOffset, (long)info.loadedAddress()+(long)vmOffset+(long)vmSize-1);
310 });
311 }
312 }
313 }
314 else {
315 mapImage(diag, info, fromOFI, closureOutOfDate);
316 if ( diag.hasError() )
317 break; // out of for loop
318 }
319
320 }
321 if ( diag.hasError() ) {
322 // need to clean up by unmapping any images just mapped
323 unmapAllImages();
324 return;
325 }
326
327 // apply fixups to all but main executable
328 LoadedImage* mainInfo = nullptr;
329 for (LoadedImage& info : _newImages) {
330 // images in shared cache do not need fixups applied
331 if ( info.image()->inDyldCache() )
332 continue;
333 if ( info.loadedAddress()->filetype == MH_EXECUTE ) {
334 mainInfo = &info;
335 continue;
336 }
337 // previously loaded images were previously fixed up
338 if ( info.state() < LoadedImage::State::fixedUp ) {
339 applyFixupsToImage(diag, info);
340 if ( diag.hasError() )
341 break;
342 info.setState(LoadedImage::State::fixedUp);
343 }
344 }
345 if ( diag.hasError() ) {
346 // need to clean up by unmapping any images just mapped
347 unmapAllImages();
348 return;
349 }
350
351 if ( mainInfo != nullptr ) {
352 // now apply fixups to main executable
353 // we do it in this order so that if there is a problem with the dylibs in the closure
354 // the main executable is left untouched so the closure can be rebuilt
355 applyFixupsToImage(diag, *mainInfo);
356 if ( diag.hasError() ) {
357 // need to clean up by unmapping any images just mapped
358 unmapAllImages();
359 // we have already started fixing up the main executable, so we cannot retry the launch again
360 *recoverable = false;
361 return;
362 }
363 mainInfo->setState(LoadedImage::State::fixedUp);
364 }
365
366 // find and register dtrace DOFs
367 if ( processDOFs ) {
368 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(DOFInfo, dofImages, _newImages.count());
369 for (LoadedImage& info : _newImages) {
370 info.image()->forEachDOF(info.loadedAddress(), ^(const void* section) {
371 DOFInfo dofInfo;
372 dofInfo.dof = section;
373 dofInfo.imageHeader = info.loadedAddress();
374 dofInfo.imageShortName = info.image()->leafName();
375 dofImages.push_back(dofInfo);
376 });
377 }
378 registerDOFs(dofImages);
379 }
380 }
381
382 void Loader::unmapAllImages()
383 {
384 for (LoadedImage& info : _newImages) {
385 if ( !info.image()->inDyldCache() && !info.leaveMapped() ) {
386 if ( (info.state() == LoadedImage::State::mapped) || (info.state() == LoadedImage::State::fixedUp) ) {
387 _logSegments("dyld: unmapping %s\n", info.image()->path());
388 unmapImage(info);
389 }
390 }
391 }
392 }
393
394 bool Loader::sandboxBlocked(const char* path, const char* kind)
395 {
396 #if TARGET_OS_SIMULATOR || TARGET_OS_DRIVERKIT
397 // sandbox calls not yet supported in dyld_sim
398 return false;
399 #else
400 sandbox_filter_type filter = (sandbox_filter_type)(SANDBOX_FILTER_PATH | SANDBOX_CHECK_NO_REPORT);
401 return ( sandbox_check(getpid(), kind, filter, path) > 0 );
402 #endif
403 }
404
405 bool Loader::sandboxBlockedMmap(const char* path)
406 {
407 return sandboxBlocked(path, "file-map-executable");
408 }
409
410 bool Loader::sandboxBlockedOpen(const char* path)
411 {
412 return sandboxBlocked(path, "file-read-data");
413 }
414
415 bool Loader::sandboxBlockedStat(const char* path)
416 {
417 return sandboxBlocked(path, "file-read-metadata");
418 }
419
420 void Loader::mapImage(Diagnostics& diag, LoadedImage& info, bool fromOFI, bool* closureOutOfDate)
421 {
422 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_MAP_IMAGE, info.image()->path(), 0, 0);
423
424 const closure::Image* image = info.image();
425 uint64_t sliceOffset = image->sliceOffsetInFile();
426 const uint64_t totalVMSize = image->vmSizeToMap();
427 uint32_t codeSignFileOffset;
428 uint32_t codeSignFileSize;
429 bool isCodeSigned = image->hasCodeSignature(codeSignFileOffset, codeSignFileSize);
430
431 // open file
432 int fd = dyld3::open(info.image()->path(), O_RDONLY, 0);
433 if ( fd == -1 ) {
434 int openErr = errno;
435 if ( (openErr == EPERM) && sandboxBlockedOpen(image->path()) )
436 diag.error("file system sandbox blocked open(\"%s\", O_RDONLY)", image->path());
437 else
438 diag.error("open(\"%s\", O_RDONLY) failed with errno=%d", image->path(), openErr);
439 return;
440 }
441
442 // get file info
443 struct stat statBuf;
444 #if TARGET_OS_SIMULATOR
445 if ( dyld3::stat(image->path(), &statBuf) != 0 ) {
446 #else
447 if ( fstat(fd, &statBuf) != 0 ) {
448 #endif
449 int statErr = errno;
450 if ( (statErr == EPERM) && sandboxBlockedStat(image->path()) )
451 diag.error("file system sandbox blocked stat(\"%s\")", image->path());
452 else
453 diag.error("stat(\"%s\") failed with errno=%d", image->path(), statErr);
454 close(fd);
455 return;
456 }
457
458 // verify file has not changed since closure was built
459 uint64_t inode;
460 uint64_t mtime;
461 if ( image->hasFileModTimeAndInode(inode, mtime) ) {
462 if ( (statBuf.st_mtime != mtime) || (statBuf.st_ino != inode) ) {
463 diag.error("file mtime/inode changed since closure was built for '%s'", image->path());
464 *closureOutOfDate = true;
465 close(fd);
466 return;
467 }
468 }
469
470 // handle case on iOS where sliceOffset in closure is wrong because file was thinned after cache was built
471 if ( (_dyldCacheAddress != nullptr) && !(((dyld_cache_header*)_dyldCacheAddress)->dylibsExpectedOnDisk) ) {
472 if ( sliceOffset != 0 ) {
473 if ( round_page_kernel(codeSignFileOffset+codeSignFileSize) == round_page_kernel(statBuf.st_size) ) {
474 // file is now thin
475 sliceOffset = 0;
476 }
477 }
478 }
479
480 if ( isCodeSigned && (sliceOffset == 0) ) {
481 uint64_t expectedFileSize = round_page_kernel(codeSignFileOffset+codeSignFileSize);
482 uint64_t actualFileSize = round_page_kernel(statBuf.st_size);
483 if ( actualFileSize < expectedFileSize ) {
484 diag.error("File size too small for code signature");
485 *closureOutOfDate = true;
486 close(fd);
487 return;
488 }
489 if ( actualFileSize != expectedFileSize ) {
490 diag.error("File size doesn't match code signature");
491 *closureOutOfDate = true;
492 close(fd);
493 return;
494 }
495 }
496
497 // register code signature
498 uint64_t coveredCodeLength = UINT64_MAX;
499 if ( isCodeSigned ) {
500 auto sigTimer = ScopedTimer(DBG_DYLD_TIMING_ATTACH_CODESIGNATURE, 0, 0, 0);
501 fsignatures_t siginfo;
502 siginfo.fs_file_start = sliceOffset; // start of mach-o slice in fat file
503 siginfo.fs_blob_start = (void*)(long)(codeSignFileOffset); // start of CD in mach-o file
504 siginfo.fs_blob_size = codeSignFileSize; // size of CD
505 int result = fcntl(fd, F_ADDFILESIGS_RETURN, &siginfo);
506 if ( result == -1 ) {
507 int errnoCopy = errno;
508 if ( (errnoCopy == EPERM) || (errnoCopy == EBADEXEC) ) {
509 diag.error("code signature invalid (errno=%d) sliceOffset=0x%08llX, codeBlobOffset=0x%08X, codeBlobSize=0x%08X for '%s'",
510 errnoCopy, sliceOffset, codeSignFileOffset, codeSignFileSize, image->path());
511 #if BUILDING_LIBDYLD
512 if ( errnoCopy == EBADEXEC ) {
513 // dlopen closures many be prebuilt in to the shared cache with a code signature, but the dylib is replaced
514 // with one without a code signature. In that case, lets build a new closure
515 *closureOutOfDate = true;
516 }
517 #endif
518 }
519 else {
520 diag.error("fcntl(fd, F_ADDFILESIGS_RETURN) failed with errno=%d, sliceOffset=0x%08llX, codeBlobOffset=0x%08X, codeBlobSize=0x%08X for '%s'",
521 errnoCopy, sliceOffset, codeSignFileOffset, codeSignFileSize, image->path());
522 }
523 close(fd);
524 return;
525 }
526 coveredCodeLength = siginfo.fs_file_start;
527 if ( coveredCodeLength < codeSignFileOffset ) {
528 diag.error("code signature does not cover entire file up to signature");
529 close(fd);
530 return;
531 }
532 }
533
534 // <rdar://problem/41015217> dyld should use F_CHECK_LV even on unsigned binaries
535 {
536 // <rdar://problem/32684903> always call F_CHECK_LV to preflight
537 fchecklv checkInfo;
538 char messageBuffer[512];
539 messageBuffer[0] = '\0';
540 checkInfo.lv_file_start = sliceOffset;
541 checkInfo.lv_error_message_size = sizeof(messageBuffer);
542 checkInfo.lv_error_message = messageBuffer;
543 int res = fcntl(fd, F_CHECK_LV, &checkInfo);
544 if ( res == -1 ) {
545 diag.error("code signature in (%s) not valid for use in process: %s", image->path(), messageBuffer);
546 close(fd);
547 return;
548 }
549 }
550
551 // reserve address range
552 vm_address_t loadAddress = 0;
553 kern_return_t r = vm_allocate(mach_task_self(), &loadAddress, (vm_size_t)totalVMSize, VM_FLAGS_ANYWHERE);
554 if ( r != KERN_SUCCESS ) {
555 diag.error("vm_allocate(size=0x%0llX) failed with result=%d", totalVMSize, r);
556 close(fd);
557 return;
558 }
559
560 if ( sliceOffset != 0 )
561 _logSegments("dyld: Mapping %s (slice offset=%llu)\n", image->path(), sliceOffset);
562 else
563 _logSegments("dyld: Mapping %s\n", image->path());
564
565 // map each segment
566 __block bool mmapFailure = false;
567 __block const uint8_t* codeSignatureStartAddress = nullptr;
568 __block const uint8_t* linkeditEndAddress = nullptr;
569 __block bool mappedFirstSegment = false;
570 __block uint64_t maxFileOffset = 0;
571 image->forEachDiskSegment(^(uint32_t segIndex, uint32_t fileOffset, uint32_t fileSize, int64_t vmOffset, uint64_t vmSize, uint8_t permissions, bool laterReadOnly, bool& stop) {
572 // <rdar://problem/32363581> Mapping zero filled segments fails with mmap of size 0
573 if ( fileSize == 0 )
574 return;
575 void* segAddress = mmap((void*)(loadAddress+vmOffset), fileSize, permissions, MAP_FIXED | MAP_PRIVATE, fd, sliceOffset+fileOffset);
576 int mmapErr = errno;
577 if ( segAddress == MAP_FAILED ) {
578 if ( mmapErr == EPERM ) {
579 if ( sandboxBlockedMmap(image->path()) )
580 diag.error("file system sandbox blocked mmap() of '%s'", image->path());
581 else
582 diag.error("code signing blocked mmap() of '%s'", image->path());
583 }
584 else {
585 diag.error("mmap(addr=0x%0llX, size=0x%08X) failed with errno=%d for %s", loadAddress+vmOffset, fileSize, mmapErr, image->path());
586 }
587 mmapFailure = true;
588 stop = true;
589 }
590 else if ( codeSignFileOffset > fileOffset ) {
591 codeSignatureStartAddress = (uint8_t*)segAddress + (codeSignFileOffset-fileOffset);
592 linkeditEndAddress = (uint8_t*)segAddress + vmSize;
593 }
594 // sanity check first segment is mach-o header
595 if ( (segAddress != MAP_FAILED) && !mappedFirstSegment ) {
596 mappedFirstSegment = true;
597 const MachOFile* mf = (MachOFile*)segAddress;
598 if ( !mf->isMachO(diag, fileSize) ) {
599 mmapFailure = true;
600 stop = true;
601 }
602 }
603 if ( !mmapFailure ) {
604 const MachOLoaded* lmo = (MachOLoaded*)loadAddress;
605 _logSegments("%14s (%c%c%c) 0x%012lX->0x%012lX \n", lmo->segmentName(segIndex),
606 (permissions & PROT_READ) ? 'r' : '.', (permissions & PROT_WRITE) ? 'w' : '.', (permissions & PROT_EXEC) ? 'x' : '.' ,
607 (long)segAddress, (long)segAddress+(long)vmSize-1);
608 }
609 maxFileOffset = fileOffset + fileSize;
610 });
611 if ( mmapFailure ) {
612 ::vm_deallocate(mach_task_self(), loadAddress, (vm_size_t)totalVMSize);
613 ::close(fd);
614 return;
615 }
616
617 // <rdar://problem/47163421> speculatively read whole slice
618 fspecread_t specread = {} ;
619 specread.fsr_offset = sliceOffset;
620 specread.fsr_length = maxFileOffset;
621 specread.fsr_flags = 0;
622 fcntl(fd, F_SPECULATIVE_READ, &specread);
623 _logSegments("dyld: Speculatively read offset=0x%08llX, len=0x%08llX, path=%s\n", sliceOffset, maxFileOffset, image->path());
624
625 // close file
626 close(fd);
627
628 #if BUILDING_LIBDYLD
629 // verify file has not changed since closure was built by checking code signature has not changed
630 struct CDHashWrapper {
631 uint8_t cdHash[20];
632 };
633
634 // Get all the hashes for the image
635 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(CDHashWrapper, expectedCDHashes, 1);
636 image->forEachCDHash(^(const uint8_t *cdHash, bool &stop) {
637 CDHashWrapper cdHashWrapper;
638 memcpy(cdHashWrapper.cdHash, cdHash, sizeof(CDHashWrapper::cdHash));
639 expectedCDHashes.push_back(cdHashWrapper);
640 });
641
642 if (!expectedCDHashes.empty()) {
643 if (expectedCDHashes.count() != 1) {
644 // We should only see a single hash for dylibs
645 diag.error("code signature count invalid");
646 } else if ( codeSignatureStartAddress == nullptr ) {
647 diag.error("code signature missing");
648 }
649 else if ( codeSignatureStartAddress+codeSignFileSize > linkeditEndAddress ) {
650 diag.error("code signature extends beyond end of __LINKEDIT");
651 }
652 else {
653 // Get all the cd hashes for the macho
654 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(CDHashWrapper, foundCDHashes, 1);
655 const MachOLoaded* lmo = (MachOLoaded*)loadAddress;
656 lmo->forEachCDHashOfCodeSignature(codeSignatureStartAddress, codeSignFileSize,
657 ^(const uint8_t *cdHash) {
658 CDHashWrapper cdHashWrapper;
659 memcpy(cdHashWrapper.cdHash, cdHash, sizeof(CDHashWrapper::cdHash));
660 foundCDHashes.push_back(cdHashWrapper);
661 });
662
663 if (foundCDHashes.empty()) {
664 diag.error("code signature format invalid");
665 } else if (expectedCDHashes.count() != foundCDHashes.count()) {
666 diag.error("code signature count invalid");
667 } else {
668 // We found a hash, so make sure its equal.
669 if ( ::memcmp(foundCDHashes[0].cdHash, expectedCDHashes[0].cdHash, 20) != 0 )
670 diag.error("code signature changed since closure was built");
671 }
672 }
673 if ( diag.hasError() ) {
674 *closureOutOfDate = true;
675 ::vm_deallocate(mach_task_self(), loadAddress, (vm_size_t)totalVMSize);
676 return;
677 }
678 }
679
680 #endif
681
682 #if (__arm__ || __arm64__) && !TARGET_OS_SIMULATOR
683 // tell kernel about fairplay encrypted regions
684 uint32_t fpTextOffset;
685 uint32_t fpSize;
686 if ( image->isFairPlayEncrypted(fpTextOffset, fpSize) ) {
687 const mach_header* mh = (mach_header*)loadAddress;
688 int result = ::mremap_encrypted(((uint8_t*)mh) + fpTextOffset, fpSize, 1, mh->cputype, mh->cpusubtype);
689 if ( result != 0 ) {
690 diag.error("could not register fairplay decryption, mremap_encrypted() => %d", result);
691 ::vm_deallocate(mach_task_self(), loadAddress, (vm_size_t)totalVMSize);
692 return;
693 }
694 }
695 #endif
696
697 _logLoads("dyld: load %s\n", image->path());
698
699 timer.setData4((uint64_t)loadAddress);
700 info.setLoadedAddress((MachOLoaded*)loadAddress);
701 info.setState(LoadedImage::State::mapped);
702 }
703
704 void Loader::unmapImage(LoadedImage& info)
705 {
706 assert(info.loadedAddress() != nullptr);
707 ::vm_deallocate(mach_task_self(), (vm_address_t)info.loadedAddress(), (vm_size_t)(info.image()->vmSizeToMap()));
708 info.setLoadedAddress(nullptr);
709 }
710
711 void Loader::registerDOFs(const Array<DOFInfo>& dofs)
712 {
713 if ( dofs.empty() )
714 return;
715
716 int fd = ::open("/dev/" DTRACEMNR_HELPER, O_RDWR);
717 if ( fd < 0 ) {
718 _logDofs("can't open /dev/" DTRACEMNR_HELPER " to register dtrace DOF sections\n");
719 }
720 else {
721 // allocate a buffer on the stack for the variable length dof_ioctl_data_t type
722 uint8_t buffer[sizeof(dof_ioctl_data_t) + dofs.count()*sizeof(dof_helper_t)];
723 dof_ioctl_data_t* ioctlData = (dof_ioctl_data_t*)buffer;
724
725 // fill in buffer with one dof_helper_t per DOF section
726 ioctlData->dofiod_count = dofs.count();
727 for (unsigned int i=0; i < dofs.count(); ++i) {
728 strlcpy(ioctlData->dofiod_helpers[i].dofhp_mod, dofs[i].imageShortName, DTRACE_MODNAMELEN);
729 ioctlData->dofiod_helpers[i].dofhp_dof = (uintptr_t)(dofs[i].dof);
730 ioctlData->dofiod_helpers[i].dofhp_addr = (uintptr_t)(dofs[i].dof);
731 }
732
733 // tell kernel about all DOF sections en mas
734 // pass pointer to ioctlData because ioctl() only copies a fixed size amount of data into kernel
735 user_addr_t val = (user_addr_t)(unsigned long)ioctlData;
736 if ( ioctl(fd, DTRACEHIOC_ADDDOF, &val) != -1 ) {
737 // kernel returns a unique identifier for each section in the dofiod_helpers[].dofhp_dof field.
738 // Note, the closure marked the image as being never unload, so we don't need to keep the ID around
739 // or support unregistering it later.
740 for (unsigned int i=0; i < dofs.count(); ++i) {
741 _logDofs("dyld: registering DOF section %p in %s with dtrace, ID=0x%08X\n",
742 dofs[i].dof, dofs[i].imageShortName, (int)(ioctlData->dofiod_helpers[i].dofhp_dof));
743 }
744 }
745 else {
746 _logDofs("dyld: ioctl to register dtrace DOF section failed\n");
747 }
748 close(fd);
749 }
750 }
751
752 bool Loader::dtraceUserProbesEnabled()
753 {
754 #if !TARGET_OS_SIMULATOR
755 uint8_t dofEnabled = *((uint8_t*)_COMM_PAGE_DTRACE_DOF_ENABLED);
756 return ( (dofEnabled & 1) );
757 #else
758 return false;
759 #endif
760 }
761
762
763 void Loader::vmAccountingSetSuspended(bool suspend, LogFunc logger)
764 {
765 #if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
766 // <rdar://problem/29099600> dyld should tell the kernel when it is doing fix-ups caused by roots
767 logger("vm.footprint_suspend=%d\n", suspend);
768 int newValue = suspend ? 1 : 0;
769 int oldValue = 0;
770 size_t newlen = sizeof(newValue);
771 size_t oldlen = sizeof(oldValue);
772 sysctlbyname("vm.footprint_suspend", &oldValue, &oldlen, &newValue, newlen);
773 #endif
774 }
775
776 static const char* targetString(const MachOAnalyzerSet::FixupTarget& target)
777 {
778 switch (target.kind ) {
779 case MachOAnalyzerSet::FixupTarget::Kind::rebase:
780 return "rebase";
781 case MachOAnalyzerSet::FixupTarget::Kind::bindAbsolute:
782 return "abolute";
783 case MachOAnalyzerSet::FixupTarget::Kind::bindToImage:
784 return target.foundSymbolName;
785 case MachOAnalyzerSet::FixupTarget::Kind::bindMissingSymbol:
786 return "missing";
787 }
788 return "";
789 }
790
791 void Loader::applyFixupsToImage(Diagnostics& diag, LoadedImage& info)
792 {
793 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_APPLY_FIXUPS, (uint64_t)info.loadedAddress(), 0, 0);
794 closure::ImageNum cacheImageNum;
795 const char* leafName = info.image()->leafName();
796 const closure::Image* image = info.image();
797 const uint8_t* imageLoadAddress = (uint8_t*)info.loadedAddress();
798 uintptr_t slide = info.loadedAddress()->getSlide();
799 bool overrideOfCache = info.image()->isOverrideOfDyldCacheImage(cacheImageNum);
800
801 if ( overrideOfCache )
802 vmAccountingSetSuspended(true, _logFixups);
803 if ( image->fixupsNotEncoded() ) {
804 WrappedMachO wmo((MachOAnalyzer*)info.loadedAddress(), this, (void*)info.image());
805 wmo.forEachFixup(diag,
806 ^(uint64_t fixupLocRuntimeOffset, PointerMetaData pmd, const FixupTarget& target, bool& stop) {
807 uintptr_t* fixUpLoc = (uintptr_t*)(imageLoadAddress + fixupLocRuntimeOffset);
808 uintptr_t value;
809 switch ( target.kind ) {
810 case MachOAnalyzerSet::FixupTarget::Kind::rebase:
811 case MachOAnalyzerSet::FixupTarget::Kind::bindToImage:
812 value = (uintptr_t)(target.foundInImage._mh) + target.offsetInImage;
813 break;
814 case MachOAnalyzerSet::FixupTarget::Kind::bindAbsolute:
815 value = (uintptr_t)target.offsetInImage;
816 break;
817 case MachOAnalyzerSet::FixupTarget::Kind::bindMissingSymbol:
818 if ( _launchErrorInfo ) {
819 _launchErrorInfo->kind = DYLD_EXIT_REASON_SYMBOL_MISSING;
820 _launchErrorInfo->clientOfDylibPath = info.image()->path();
821 _launchErrorInfo->targetDylibPath = target.foundInImage.path();
822 _launchErrorInfo->symbol = target.requestedSymbolName;
823 }
824 // we have no value to set, and forEachFixup() is about to finish
825 return;
826 }
827 #if __has_feature(ptrauth_calls)
828 if ( pmd.authenticated )
829 value = MachOLoaded::ChainedFixupPointerOnDisk::Arm64e::signPointer(value, fixUpLoc, pmd.usesAddrDiversity, pmd.diversity, pmd.key);
830 #endif
831 if ( pmd.high8 )
832 value |= ((uint64_t)pmd.high8 << 56);
833 _logFixups("dyld: fixup: %s:%p = %p (%s)\n", leafName, fixUpLoc, (void*)value, targetString(target));
834 *fixUpLoc = value;
835 },
836 ^(uint32_t cachedDylibIndex, uint32_t exportCacheOffset, const FixupTarget& target) {
837 #if BUILDING_LIBDYLD && __x86_64__
838 // Full dlopen closures don't patch weak defs. Bail out early if we are libdyld to match this behaviour
839 return;
840 #endif
841 ((const DyldSharedCache*)_dyldCacheAddress)->forEachPatchableUseOfExport(cachedDylibIndex, exportCacheOffset, ^(dyld_cache_patchable_location patchLoc) {
842 uintptr_t* loc = (uintptr_t*)(((uint8_t*)_dyldCacheAddress)+patchLoc.cacheOffset);
843 uintptr_t newImpl = (uintptr_t)(target.foundInImage._mh) + target.offsetInImage + DyldSharedCache::getAddend(patchLoc);
844 #if __has_feature(ptrauth_calls)
845 if ( patchLoc.authenticated )
846 newImpl = MachOLoaded::ChainedFixupPointerOnDisk::Arm64e::signPointer(newImpl, loc, patchLoc.usesAddressDiversity, patchLoc.discriminator, patchLoc.key);
847 #endif
848 // ignore duplicate patch entries
849 if ( *loc != newImpl ) {
850 _logFixups("dyld: cache patch: %p = 0x%0lX\n", loc, newImpl);
851 *loc = newImpl;
852 }
853 });
854 });
855 #if BUILDING_LIBDYLD && TARGET_OS_OSX
856 // <rdar://problem/59265987> support old licenseware plugins on macOS using minimal closures
857 __block bool oldBinary = true;
858 info.loadedAddress()->forEachSupportedPlatform(^(Platform platform, uint32_t minOS, uint32_t sdk) {
859 if ( (platform == Platform::macOS) && (sdk >= 0x000A0F00) )
860 oldBinary = false;
861 });
862 if ( oldBinary ) {
863 // look for __DATA,__dyld section
864 info.loadedAddress()->forEachSection(^(const MachOAnalyzer::SectionInfo& sectInfo, bool malformedSectionRange, bool& stop) {
865 if ( (strcmp(sectInfo.sectName, "__dyld") == 0) && (strcmp(sectInfo.segInfo.segName, "__DATA") == 0) ) {
866 // dyld_func_lookup is second pointer in __dyld section
867 uintptr_t* dyldSection = (uintptr_t*)(sectInfo.sectAddr + (uintptr_t)info.loadedAddress());
868 _logFixups("dyld: __dyld section: %p = %p\n", &dyldSection[1], &dyld3::compatFuncLookup);
869 dyldSection[1] = (uintptr_t)&dyld3::compatFuncLookup;
870 }
871 });
872 }
873 #endif
874 }
875 else {
876 if ( image->rebasesNotEncoded() ) {
877 // <rdar://problem/56172089> some apps have so many rebases the closure file is too big, instead we go back to rebase opcodes
878 ((MachOAnalyzer*)imageLoadAddress)->forEachRebase(diag, true, ^(uint64_t imageOffsetToRebase, bool& stop) {
879 // this is a rebase, add slide
880 uintptr_t* fixUpLoc = (uintptr_t*)(imageLoadAddress + imageOffsetToRebase);
881 *fixUpLoc += slide;
882 _logFixups("dyld: fixup: %s:%p += %p\n", leafName, fixUpLoc, (void*)slide);
883 });
884 }
885 image->forEachFixup(^(uint64_t imageOffsetToRebase, bool& stop) {
886 // this is a rebase, add slide
887 uintptr_t* fixUpLoc = (uintptr_t*)(imageLoadAddress + imageOffsetToRebase);
888 *fixUpLoc += slide;
889 _logFixups("dyld: fixup: %s:%p += %p\n", leafName, fixUpLoc, (void*)slide);
890 },
891 ^(uint64_t imageOffsetToBind, closure::Image::ResolvedSymbolTarget bindTarget, bool& stop) {
892 // this is a bind, set to target
893 uintptr_t* fixUpLoc = (uintptr_t*)(imageLoadAddress + imageOffsetToBind);
894 uintptr_t value = resolveTarget(bindTarget);
895 _logFixups("dyld: fixup: %s:%p = %p\n", leafName, fixUpLoc, (void*)value);
896 *fixUpLoc = value;
897 },
898 ^(uint64_t imageOffsetToStartsInfo, const Array<closure::Image::ResolvedSymbolTarget>& targets, bool& stop) {
899 // this is a chain of fixups, fix up all
900 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(const void*, targetAddrs, 128);
901 targetAddrs.reserve(targets.count());
902 for (uint32_t i=0; i < targets.count(); ++i)
903 targetAddrs.push_back((void*)resolveTarget(targets[i]));
904 ((dyld3::MachOAnalyzer*)(info.loadedAddress()))->withChainStarts(diag, imageOffsetToStartsInfo, ^(const dyld_chained_starts_in_image* starts) {
905 info.loadedAddress()->fixupAllChainedFixups(diag, starts, slide, targetAddrs, ^(void* loc, void* newValue) {
906 _logFixups("dyld: fixup: %s:%p = %p\n", leafName, loc, newValue);
907 });
908 });
909 },
910 ^(uint64_t imageOffsetToFixup) {
911 uintptr_t* fixUpLoc = (uintptr_t*)(imageLoadAddress + imageOffsetToFixup);
912 _logFixups("dyld: fixup objc image info: %s Setting objc image info for precomputed objc\n", leafName);
913
914 MachOAnalyzer::ObjCImageInfo *imageInfo = (MachOAnalyzer::ObjCImageInfo *)fixUpLoc;
915 ((MachOAnalyzer::ObjCImageInfo *)imageInfo)->flags |= MachOAnalyzer::ObjCImageInfo::dyldPreoptimized;
916 },
917 ^(uint64_t imageOffsetToBind, closure::Image::ResolvedSymbolTarget bindTarget, bool& stop) {
918 // this is a bind, set to target
919 uintptr_t* fixUpLoc = (uintptr_t*)(imageLoadAddress + imageOffsetToBind);
920 uintptr_t value = resolveTarget(bindTarget);
921 #if __has_feature(ptrauth_calls)
922 // Sign the ISA on arm64e.
923 // Unfortunately a hard coded value here is not ideal, but this is ABI so we aren't going to change it
924 // This matches the value in libobjc __objc_opt_ptrs: .quad x@AUTH(da, 27361, addr)
925 value = MachOLoaded::ChainedFixupPointerOnDisk::Arm64e::signPointer(value, fixUpLoc, true, 27361, 2);
926 #endif
927 _logFixups("dyld: fixup objc protocol: %s:%p = %p\n", leafName, fixUpLoc, (void*)value);
928 *fixUpLoc = value;
929 },
930 ^(uint64_t imageOffsetToFixup, uint32_t selectorIndex, bool inSharedCache, bool &stop) {
931 // fixupObjCSelRefs
932 closure::Image::ResolvedSymbolTarget fixupTarget;
933 if ( inSharedCache ) {
934 const char* selectorString = _dyldCacheSelectorOpt->getEntryForIndex(selectorIndex);
935 fixupTarget.sharedCache.kind = closure::Image::ResolvedSymbolTarget::kindSharedCache;
936 fixupTarget.sharedCache.offset = (uint64_t)selectorString - (uint64_t)_dyldCacheAddress;
937 } else {
938 closure::ImageNum imageNum;
939 uint64_t vmOffset;
940 bool gotLocation = _closureSelectorOpt->getStringLocation(selectorIndex, _closureSelectorImages, imageNum, vmOffset);
941 assert(gotLocation);
942 fixupTarget.image.kind = closure::Image::ResolvedSymbolTarget::kindImage;
943 fixupTarget.image.imageNum = imageNum;
944 fixupTarget.image.offset = vmOffset;
945 }
946 uintptr_t* fixUpLoc = (uintptr_t*)(imageLoadAddress + imageOffsetToFixup);
947 uintptr_t value = resolveTarget(fixupTarget);
948 _logFixups("dyld: fixup objc selector: %s:%p(was '%s') = %p(now '%s')\n", leafName, fixUpLoc, (const char*)*fixUpLoc, (void*)value, (const char*)value);
949 *fixUpLoc = value;
950 }, ^(uint64_t imageOffsetToFixup, bool &stop) {
951 // fixupObjCStableSwift
952 // Class really is stable Swift, pretending to be pre-stable.
953 // Fix its lie.
954 uintptr_t* fixUpLoc = (uintptr_t*)(imageLoadAddress + imageOffsetToFixup);
955 uintptr_t value = ((*fixUpLoc) | MachOAnalyzer::ObjCClassInfo::FAST_IS_SWIFT_STABLE) & ~MachOAnalyzer::ObjCClassInfo::FAST_IS_SWIFT_LEGACY;
956 _logFixups("dyld: fixup objc stable Swift: %s:%p = %p\n", leafName, fixUpLoc, (void*)value);
957 *fixUpLoc = value;
958 }, ^(uint64_t imageOffsetToFixup, bool &stop) {
959 // fixupObjCMethodList
960 // Set the method list to have the uniqued bit set
961 uint32_t* fixUpLoc = (uint32_t*)(imageLoadAddress + imageOffsetToFixup);
962 uint32_t value = (*fixUpLoc) | MachOAnalyzer::ObjCMethodList::methodListIsUniqued;
963 _logFixups("dyld: fixup objc method list: %s:%p = 0x%08x\n", leafName, fixUpLoc, value);
964 *fixUpLoc = value;
965 });
966
967 #if __i386__
968 __block bool segmentsMadeWritable = false;
969 image->forEachTextReloc(^(uint32_t imageOffsetToRebase, bool& stop) {
970 if ( !segmentsMadeWritable )
971 setSegmentProtects(info, true);
972 uintptr_t* fixUpLoc = (uintptr_t*)(imageLoadAddress + imageOffsetToRebase);
973 *fixUpLoc += slide;
974 _logFixups("dyld: fixup: %s:%p += %p\n", leafName, fixUpLoc, (void*)slide);
975 },
976 ^(uint32_t imageOffsetToBind, closure::Image::ResolvedSymbolTarget bindTarget, bool& stop) {
977 // FIXME
978 });
979 if ( segmentsMadeWritable )
980 setSegmentProtects(info, false);
981 #endif
982 }
983
984 // make any read-only data segments read-only
985 if ( image->hasReadOnlyData() && !image->inDyldCache() ) {
986 image->forEachDiskSegment(^(uint32_t segIndex, uint32_t fileOffset, uint32_t fileSize, int64_t vmOffset, uint64_t vmSize, uint8_t permissions, bool laterReadOnly, bool& segStop) {
987 if ( laterReadOnly ) {
988 ::mprotect((void*)(imageLoadAddress+vmOffset), (size_t)vmSize, VM_PROT_READ);
989 }
990 });
991 }
992
993 if ( overrideOfCache )
994 vmAccountingSetSuspended(false, _logFixups);
995 }
996
997 #if __i386__
998 void Loader::setSegmentProtects(const LoadedImage& info, bool write)
999 {
1000 info.image()->forEachDiskSegment(^(uint32_t segIndex, uint32_t fileOffset, uint32_t fileSize, int64_t vmOffset, uint64_t vmSize, uint8_t protections, bool laterReadOnly, bool& segStop) {
1001 if ( protections & VM_PROT_WRITE )
1002 return;
1003 uint32_t regionProt = protections;
1004 if ( write )
1005 regionProt = VM_PROT_WRITE | VM_PROT_READ;
1006 kern_return_t r = vm_protect(mach_task_self(), ((uintptr_t)info.loadedAddress())+(uintptr_t)vmOffset, (uintptr_t)vmSize, false, regionProt);
1007 assert( r == KERN_SUCCESS );
1008 });
1009 }
1010 #endif
1011
1012
1013 void Loader::forEachImage(void (^handler)(const LoadedImage& li, bool& stop)) const
1014 {
1015 bool stop = false;
1016 for (const LoadedImage& li : _existingImages) {
1017 handler(li, stop);
1018 if ( stop )
1019 return;
1020 }
1021 for (const LoadedImage& li : _newImages) {
1022 handler(li, stop);
1023 if ( stop )
1024 return;
1025 }
1026 }
1027
1028 void Loader::mas_forEachImage(void (^handler)(const WrappedMachO& wmo, bool hidden, bool& stop)) const
1029 {
1030 forEachImage(^(const LoadedImage& li, bool& stop) {
1031 WrappedMachO wmo((MachOAnalyzer*)li.loadedAddress(), this, (void*)li.image());
1032 handler(wmo, li.hideFromFlatSearch(), stop);
1033 });
1034 }
1035
1036
1037 bool Loader::wmo_missingSymbolResolver(const WrappedMachO* fromWmo, bool weakImport, bool lazyBind, const char* symbolName, const char* expectedInDylibPath, const char* clientPath, FixupTarget& target) const
1038 {
1039 if ( weakImport ) {
1040 target.offsetInImage = 0;
1041 target.kind = FixupTarget::Kind::bindAbsolute;
1042 return true;
1043 }
1044
1045 if ( lazyBind && _allowMissingLazies ) {
1046 __block bool result = false;
1047 forEachImage(^(const LoadedImage& li, bool& stop) {
1048 if ( li.loadedAddress()->isDylib() && (strcmp(li.loadedAddress()->installName(), "/usr/lib/system/libdyld.dylib") == 0) ) {
1049 WrappedMachO libdyldWmo((MachOAnalyzer*)li.loadedAddress(), this, (void*)li.image());
1050 Diagnostics diag;
1051 if ( libdyldWmo.findSymbolIn(diag, "__dyld_missing_symbol_abort", 0, target) ) {
1052 // <rdar://problem/44315944> closures should bind missing lazy-bind symbols to a missing symbol handler in libdyld in flat namespace
1053 result = true;
1054 }
1055 stop = true;
1056 }
1057 });
1058 return result;
1059 }
1060
1061 // FIXME
1062 return false;
1063 }
1064
1065
1066 void Loader::mas_mainExecutable(WrappedMachO& mainWmo) const
1067 {
1068 forEachImage(^(const LoadedImage& li, bool& stop) {
1069 if ( li.loadedAddress()->isMainExecutable() ) {
1070 WrappedMachO wmo((MachOAnalyzer*)li.loadedAddress(), this, (void*)li.image());
1071 mainWmo = wmo;
1072 stop = true;
1073 }
1074 });
1075 }
1076
1077 void* Loader::mas_dyldCache() const
1078 {
1079 return (void*)_dyldCacheAddress;
1080 }
1081
1082
1083 bool Loader::wmo_dependent(const WrappedMachO* wmo, uint32_t depIndex, WrappedMachO& childWmo, bool& missingWeakDylib) const
1084 {
1085 const closure::Image* image = (closure::Image*)(wmo->_other);
1086 closure::ImageNum depImageNum = image->dependentImageNum(depIndex);
1087 if ( depImageNum == closure::kMissingWeakLinkedImage ) {
1088 missingWeakDylib = true;
1089 return true;
1090 }
1091 else {
1092 if ( LoadedImage* li = findImage(depImageNum) ) {
1093 WrappedMachO foundWmo((MachOAnalyzer*)li->loadedAddress(), this, (void*)li->image());
1094 missingWeakDylib = false;
1095 childWmo = foundWmo;
1096 return true;
1097 }
1098 }
1099 return false;
1100 }
1101
1102
1103 const char* Loader::wmo_path(const WrappedMachO* wmo) const
1104 {
1105 const closure::Image* image = (closure::Image*)(wmo->_other);
1106 return image->path();
1107 }
1108
1109
1110
1111 #if BUILDING_DYLD
1112 LoadedImage* Loader::LaunchImagesCache::findImage(closure::ImageNum imageNum,
1113 Array<LoadedImage>& images) const {
1114 if ( (imageNum < _firstImageNum) || (imageNum >= _lastImageNum) )
1115 return nullptr;
1116
1117 unsigned int cacheIndex = imageNum - _firstImageNum;
1118 uint32_t imagesIndex = _imageIndices[cacheIndex];
1119 if ( imagesIndex == 0 )
1120 return nullptr;
1121
1122 // Note the index is offset by 1 so that 0's are not yet set
1123 return &images[imagesIndex - 1];
1124 }
1125
1126 void Loader::LaunchImagesCache::tryAddImage(closure::ImageNum imageNum, uint64_t allImagesIndex) const {
1127 if ( (imageNum < _firstImageNum) || (imageNum >= _lastImageNum) )
1128 return;
1129
1130 unsigned int cacheIndex = imageNum - _firstImageNum;
1131
1132 // Note the index is offset by 1 so that 0's are not yet set
1133 _imageIndices[cacheIndex] = (uint32_t)allImagesIndex + 1;
1134 }
1135 #endif
1136
1137
1138 void forEachLineInFile(const char* buffer, size_t bufferLen, void (^lineHandler)(const char* line, bool& stop))
1139 {
1140 bool stop = false;
1141 const char* const eof = &buffer[bufferLen];
1142 for (const char* s = buffer; s < eof; ++s) {
1143 char lineBuffer[MAXPATHLEN];
1144 char* t = lineBuffer;
1145 char* tEnd = &lineBuffer[MAXPATHLEN];
1146 while ( (s < eof) && (t != tEnd) ) {
1147 if ( *s == '\n' )
1148 break;
1149 *t++ = *s++;
1150 }
1151 *t = '\0';
1152 lineHandler(lineBuffer, stop);
1153 if ( stop )
1154 break;
1155 }
1156 }
1157
1158 void forEachLineInFile(const char* path, void (^lineHandler)(const char* line, bool& stop))
1159 {
1160 int fd = dyld3::open(path, O_RDONLY, 0);
1161 if ( fd != -1 ) {
1162 struct stat statBuf;
1163 if ( fstat(fd, &statBuf) == 0 ) {
1164 const char* lines = (const char*)mmap(nullptr, (size_t)statBuf.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
1165 if ( lines != MAP_FAILED ) {
1166 forEachLineInFile(lines, (size_t)statBuf.st_size, lineHandler);
1167 munmap((void*)lines, (size_t)statBuf.st_size);
1168 }
1169 }
1170 close(fd);
1171 }
1172 }
1173
1174
1175 #if (BUILDING_LIBDYLD || BUILDING_DYLD)
1176 bool internalInstall()
1177 {
1178 #if TARGET_OS_SIMULATOR
1179 return false;
1180 #elif TARGET_OS_IPHONE
1181 uint32_t devFlags = *((uint32_t*)_COMM_PAGE_DEV_FIRM);
1182 return ( (devFlags & 1) == 1 );
1183 #else
1184 return ( csr_check(CSR_ALLOW_APPLE_INTERNAL) == 0 );
1185 #endif
1186 }
1187 #endif
1188
1189 #if BUILDING_LIBDYLD
1190 // hack because libdyld.dylib should not link with libc++.dylib
1191 extern "C" void __cxa_pure_virtual() __attribute__((visibility("hidden")));
1192 void __cxa_pure_virtual()
1193 {
1194 abort();
1195 }
1196 #endif
1197
1198 } // namespace dyld3
1199
1200
1201
1202
1203