dyld-732.8.tar.gz
[apple/dyld.git] / dyld3 / Loading.cpp
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24
25 #include <bitset>
26
27 #include <stdint.h>
28 #include <string.h>
29 #include <assert.h>
30 #include <uuid/uuid.h>
31 #include <mach/mach.h>
32 #include <sys/stat.h>
33 #include <sys/types.h>
34 #include <sys/sysctl.h>
35 #include <fcntl.h>
36 #include <sys/dtrace.h>
37 #include <sys/errno.h>
38 #include <unistd.h>
39 #include <System/sys/mman.h>
40 #include <System/sys/csr.h>
41 #include <System/machine/cpu_capabilities.h>
42 #if !TARGET_OS_SIMULATOR && !TARGET_OS_DRIVERKIT
43 #include <sandbox.h>
44 #include <sandbox/private.h>
45 #endif
46 //#include <dispatch/dispatch.h>
47 #include <mach/vm_page_size.h>
48
49 #include "MachOFile.h"
50 #include "MachOLoaded.h"
51 #include "MachOAnalyzer.h"
52 #include "Logging.h"
53 #include "Loading.h"
54 #include "Tracing.h"
55 #include "dyld2.h"
56 #include "dyld_cache_format.h"
57
58 #include "objc-shared-cache.h"
59
60 namespace dyld {
61 void log(const char* m, ...);
62 }
63
64
65 namespace {
66
67 // utility to track a set of ImageNum's in use
68 class VIS_HIDDEN ImageNumSet
69 {
70 public:
71 void add(dyld3::closure::ImageNum num);
72 bool contains(dyld3::closure::ImageNum num) const;
73
74 private:
75 std::bitset<5120> _bitmap;
76 dyld3::OverflowSafeArray<dyld3::closure::ImageNum> _overflowArray;
77 };
78
79 void ImageNumSet::add(dyld3::closure::ImageNum num)
80 {
81 if ( num < 5120 )
82 _bitmap.set(num);
83 else
84 _overflowArray.push_back(num);
85 }
86
87 bool ImageNumSet::contains(dyld3::closure::ImageNum num) const
88 {
89 if ( num < 5120 )
90 return _bitmap.test(num);
91
92 for (dyld3::closure::ImageNum existingNum : _overflowArray) {
93 if ( existingNum == num )
94 return true;
95 }
96 return false;
97 }
98 } // namespace anonymous
99
100
101 namespace dyld3 {
102
103 Loader::Loader(const Array<LoadedImage>& existingImages, Array<LoadedImage>& newImagesStorage,
104 const void* cacheAddress, const Array<const dyld3::closure::ImageArray*>& imagesArrays,
105 const closure::ObjCSelectorOpt* selOpt, const Array<closure::Image::ObjCSelectorImage>& selImages,
106 LogFunc logLoads, LogFunc logSegments, LogFunc logFixups, LogFunc logDofs)
107 : _existingImages(existingImages), _newImages(newImagesStorage),
108 _imagesArrays(imagesArrays), _dyldCacheAddress(cacheAddress), _dyldCacheSelectorOpt(nullptr),
109 _closureSelectorOpt(selOpt), _closureSelectorImages(selImages),
110 _logLoads(logLoads), _logSegments(logSegments), _logFixups(logFixups), _logDofs(logDofs)
111 {
112 #if BUILDING_DYLD
113 // This is only needed for dyld and the launch closure, not the dlopen closures
114 if ( _dyldCacheAddress != nullptr ) {
115 _dyldCacheSelectorOpt = ((const DyldSharedCache*)_dyldCacheAddress)->objcOpt()->selopt();
116 }
117 #endif
118 }
119
120 void Loader::addImage(const LoadedImage& li)
121 {
122 _newImages.push_back(li);
123 }
124
125 LoadedImage* Loader::findImage(closure::ImageNum targetImageNum)
126 {
127 #if BUILDING_DYLD
128 // The launch images are different in dyld vs libdyld. In dyld, the new images are
129 // the launch images, while in libdyld, the existing images are the launch images
130 if (LoadedImage* info = _launchImagesCache.findImage(targetImageNum, _newImages)) {
131 return info;
132 }
133
134 for (uint64_t index = 0; index != _newImages.count(); ++index) {
135 LoadedImage& info = _newImages[index];
136 if ( info.image()->representsImageNum(targetImageNum) ) {
137 // Try cache this entry for next time
138 _launchImagesCache.tryAddImage(targetImageNum, index);
139 return &info;
140 }
141 }
142 #elif BUILDING_LIBDYLD
143 for (const LoadedImage& info : _existingImages) {
144 if ( info.image()->representsImageNum(targetImageNum) )
145 return (LoadedImage*)&info;
146 }
147 for (LoadedImage& info : _newImages) {
148 if ( info.image()->representsImageNum(targetImageNum) )
149 return &info;
150 }
151 #else
152 #error Must be building dyld or libdyld
153 #endif
154 return nullptr;
155 }
156
157 uintptr_t Loader::resolveTarget(closure::Image::ResolvedSymbolTarget target)
158 {
159 const LoadedImage* info;
160 switch ( target.sharedCache.kind ) {
161 case closure::Image::ResolvedSymbolTarget::kindSharedCache:
162 assert(_dyldCacheAddress != nullptr);
163 return (uintptr_t)_dyldCacheAddress + (uintptr_t)target.sharedCache.offset;
164
165 case closure::Image::ResolvedSymbolTarget::kindImage:
166 info = findImage(target.image.imageNum);
167 assert(info != nullptr);
168 return (uintptr_t)(info->loadedAddress()) + (uintptr_t)target.image.offset;
169
170 case closure::Image::ResolvedSymbolTarget::kindAbsolute:
171 if ( target.absolute.value & (1ULL << 62) )
172 return (uintptr_t)(target.absolute.value | 0xC000000000000000ULL);
173 else
174 return (uintptr_t)target.absolute.value;
175 }
176 assert(0 && "malformed ResolvedSymbolTarget");
177 return 0;
178 }
179
180
181 void Loader::completeAllDependents(Diagnostics& diag, bool& someCacheImageOverridden)
182 {
183 // accumulate all image overrides
184 STACK_ALLOC_ARRAY(ImageOverride, overrides, _existingImages.maxCount() + _newImages.maxCount());
185 for (const auto anArray : _imagesArrays) {
186 // ignore prebuilt Image* in dyld cache
187 if ( anArray->startImageNum() < dyld3::closure::kFirstLaunchClosureImageNum )
188 continue;
189 anArray->forEachImage(^(const dyld3::closure::Image* image, bool& stop) {
190 ImageOverride overrideEntry;
191 if ( image->isOverrideOfDyldCacheImage(overrideEntry.inCache) ) {
192 someCacheImageOverridden = true;
193 overrideEntry.replacement = image->imageNum();
194 overrides.push_back(overrideEntry);
195 }
196 });
197 }
198
199 // make cache for fast lookup of already loaded images
200 __block ImageNumSet alreadyLoaded;
201 for (const LoadedImage& info : _existingImages) {
202 alreadyLoaded.add(info.image()->imageNum());
203 }
204 alreadyLoaded.add(_newImages.begin()->image()->imageNum());
205
206 // for each image in _newImages, starting at the top image, make sure its dependents are in _allImages
207 uintptr_t index = 0;
208 while ( (index < _newImages.count()) && diag.noError() ) {
209 const closure::Image* image = _newImages[index].image();
210 //fprintf(stderr, "completeAllDependents(): looking at dependents of %s\n", image->path());
211 image->forEachDependentImage(^(uint32_t depIndex, closure::Image::LinkKind kind, closure::ImageNum depImageNum, bool& stop) {
212 // check if imageNum needs to be changed to an override
213 for (const ImageOverride& entry : overrides) {
214 if ( entry.inCache == depImageNum ) {
215 depImageNum = entry.replacement;
216 break;
217 }
218 }
219 // check if this dependent is already loaded
220 if ( !alreadyLoaded.contains(depImageNum) ) {
221 // if not, look in imagesArrays
222 const closure::Image* depImage = closure::ImageArray::findImage(_imagesArrays, depImageNum);
223 if ( depImage != nullptr ) {
224 //dyld::log(" load imageNum=0x%05X, image path=%s\n", depImageNum, depImage->path());
225 if ( _newImages.freeCount() == 0 ) {
226 diag.error("too many initial images");
227 stop = true;
228 }
229 else {
230 _newImages.push_back(LoadedImage::make(depImage));
231 }
232 alreadyLoaded.add(depImageNum);
233 }
234 else {
235 diag.error("unable to locate imageNum=0x%04X, depIndex=%d of %s", depImageNum, depIndex, image->path());
236 stop = true;
237 }
238 }
239 });
240 ++index;
241 }
242 }
243
244 void Loader::mapAndFixupAllImages(Diagnostics& diag, bool processDOFs, bool fromOFI)
245 {
246 // scan array and map images not already loaded
247 for (LoadedImage& info : _newImages) {
248 if ( info.loadedAddress() != nullptr ) {
249 // log main executable's segments
250 if ( (info.loadedAddress()->filetype == MH_EXECUTE) && (info.state() == LoadedImage::State::mapped) ) {
251 if ( _logSegments("dyld: mapped by kernel %s\n", info.image()->path()) ) {
252 info.image()->forEachDiskSegment(^(uint32_t segIndex, uint32_t fileOffset, uint32_t fileSize, int64_t vmOffset, uint64_t vmSize, uint8_t permissions, bool laterReadOnly, bool& stop) {
253 uint64_t start = (long)info.loadedAddress() + vmOffset;
254 uint64_t end = start+vmSize-1;
255 if ( (segIndex == 0) && (permissions == 0) ) {
256 start = 0;
257 }
258 _logSegments("%14s (%c%c%c) 0x%012llX->0x%012llX \n", info.loadedAddress()->segmentName(segIndex),
259 (permissions & PROT_READ) ? 'r' : '.', (permissions & PROT_WRITE) ? 'w' : '.', (permissions & PROT_EXEC) ? 'x' : '.' ,
260 start, end);
261 });
262 }
263 }
264 // skip over ones already loaded
265 continue;
266 }
267 if ( info.image()->inDyldCache() ) {
268 if ( info.image()->overridableDylib() ) {
269 struct stat statBuf;
270 if ( stat(info.image()->path(), &statBuf) == 0 ) {
271 // verify file has not changed since closure was built
272 uint64_t inode;
273 uint64_t mtime;
274 if ( info.image()->hasFileModTimeAndInode(inode, mtime) ) {
275 if ( (statBuf.st_mtime != mtime) || (statBuf.st_ino != inode) ) {
276 diag.error("dylib file mtime/inode changed since closure was built for '%s'", info.image()->path());
277 }
278 }
279 else {
280 diag.error("dylib file not expected on disk, must be a root '%s'", info.image()->path());
281 }
282 }
283 else if ( (_dyldCacheAddress != nullptr) && ((dyld_cache_header*)_dyldCacheAddress)->dylibsExpectedOnDisk ) {
284 diag.error("dylib file missing, was in dyld shared cache '%s'", info.image()->path());
285 }
286 }
287 if ( diag.noError() ) {
288 info.setLoadedAddress((MachOLoaded*)((uintptr_t)_dyldCacheAddress + info.image()->cacheOffset()));
289 info.setState(LoadedImage::State::fixedUp);
290 if ( _logSegments("dyld: Using from dyld cache %s\n", info.image()->path()) ) {
291 info.image()->forEachCacheSegment(^(uint32_t segIndex, uint64_t vmOffset, uint64_t vmSize, uint8_t permissions, bool &stop) {
292 _logSegments("%14s (%c%c%c) 0x%012lX->0x%012lX \n", info.loadedAddress()->segmentName(segIndex),
293 (permissions & PROT_READ) ? 'r' : '.', (permissions & PROT_WRITE) ? 'w' : '.', (permissions & PROT_EXEC) ? 'x' : '.' ,
294 (long)info.loadedAddress()+(long)vmOffset, (long)info.loadedAddress()+(long)vmOffset+(long)vmSize-1);
295 });
296 }
297 }
298 }
299 else {
300 mapImage(diag, info, fromOFI);
301 if ( diag.hasError() )
302 break; // out of for loop
303 }
304
305 }
306 if ( diag.hasError() ) {
307 // bummer, need to clean up by unmapping any images just mapped
308 for (LoadedImage& info : _newImages) {
309 if ( (info.state() == LoadedImage::State::mapped) && !info.image()->inDyldCache() && !info.leaveMapped() ) {
310 _logSegments("dyld: unmapping %s\n", info.image()->path());
311 unmapImage(info);
312 }
313 }
314 return;
315 }
316
317 // apply fixups
318 for (LoadedImage& info : _newImages) {
319 // images in shared cache do not need fixups applied
320 if ( info.image()->inDyldCache() )
321 continue;
322 // previously loaded images were previously fixed up
323 if ( info.state() < LoadedImage::State::fixedUp ) {
324 applyFixupsToImage(diag, info);
325 if ( diag.hasError() )
326 break;
327 info.setState(LoadedImage::State::fixedUp);
328 }
329 }
330
331 // find and register dtrace DOFs
332 if ( processDOFs ) {
333 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(DOFInfo, dofImages, _newImages.count());
334 for (LoadedImage& info : _newImages) {
335 info.image()->forEachDOF(info.loadedAddress(), ^(const void* section) {
336 DOFInfo dofInfo;
337 dofInfo.dof = section;
338 dofInfo.imageHeader = info.loadedAddress();
339 dofInfo.imageShortName = info.image()->leafName();
340 dofImages.push_back(dofInfo);
341 });
342 }
343 registerDOFs(dofImages);
344 }
345 }
346
347 bool Loader::sandboxBlocked(const char* path, const char* kind)
348 {
349 #if TARGET_OS_SIMULATOR || TARGET_OS_DRIVERKIT
350 // sandbox calls not yet supported in dyld_sim
351 return false;
352 #else
353 sandbox_filter_type filter = (sandbox_filter_type)(SANDBOX_FILTER_PATH | SANDBOX_CHECK_NO_REPORT);
354 return ( sandbox_check(getpid(), kind, filter, path) > 0 );
355 #endif
356 }
357
358 bool Loader::sandboxBlockedMmap(const char* path)
359 {
360 return sandboxBlocked(path, "file-map-executable");
361 }
362
363 bool Loader::sandboxBlockedOpen(const char* path)
364 {
365 return sandboxBlocked(path, "file-read-data");
366 }
367
368 bool Loader::sandboxBlockedStat(const char* path)
369 {
370 return sandboxBlocked(path, "file-read-metadata");
371 }
372
373 void Loader::mapImage(Diagnostics& diag, LoadedImage& info, bool fromOFI)
374 {
375 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_MAP_IMAGE, info.image()->path(), 0, 0);
376
377 const closure::Image* image = info.image();
378 uint64_t sliceOffset = image->sliceOffsetInFile();
379 const uint64_t totalVMSize = image->vmSizeToMap();
380 uint32_t codeSignFileOffset;
381 uint32_t codeSignFileSize;
382 bool isCodeSigned = image->hasCodeSignature(codeSignFileOffset, codeSignFileSize);
383
384 // open file
385 #if BUILDING_DYLD
386 int fd = dyld::my_open(info.image()->path(), O_RDONLY, 0);
387 #else
388 int fd = ::open(info.image()->path(), O_RDONLY, 0);
389 #endif
390 if ( fd == -1 ) {
391 int openErr = errno;
392 if ( (openErr == EPERM) && sandboxBlockedOpen(image->path()) )
393 diag.error("file system sandbox blocked open(\"%s\", O_RDONLY)", image->path());
394 else
395 diag.error("open(\"%s\", O_RDONLY) failed with errno=%d", image->path(), openErr);
396 return;
397 }
398
399 // get file info
400 struct stat statBuf;
401 #if TARGET_OS_SIMULATOR
402 if ( stat(image->path(), &statBuf) != 0 ) {
403 #else
404 if ( fstat(fd, &statBuf) != 0 ) {
405 #endif
406 int statErr = errno;
407 if ( (statErr == EPERM) && sandboxBlockedStat(image->path()) )
408 diag.error("file system sandbox blocked stat(\"%s\")", image->path());
409 else
410 diag.error("stat(\"%s\") failed with errno=%d", image->path(), statErr);
411 close(fd);
412 return;
413 }
414
415 // verify file has not changed since closure was built
416 uint64_t inode;
417 uint64_t mtime;
418 if ( image->hasFileModTimeAndInode(inode, mtime) ) {
419 if ( (statBuf.st_mtime != mtime) || (statBuf.st_ino != inode) ) {
420 diag.error("file mtime/inode changed since closure was built for '%s'", image->path());
421 close(fd);
422 return;
423 }
424 }
425
426 // handle case on iOS where sliceOffset in closure is wrong because file was thinned after cache was built
427 if ( (_dyldCacheAddress != nullptr) && !(((dyld_cache_header*)_dyldCacheAddress)->dylibsExpectedOnDisk) ) {
428 if ( sliceOffset != 0 ) {
429 if ( round_page_kernel(codeSignFileOffset+codeSignFileSize) == round_page_kernel(statBuf.st_size) ) {
430 // file is now thin
431 sliceOffset = 0;
432 }
433 }
434 }
435
436 // register code signature
437 uint64_t coveredCodeLength = UINT64_MAX;
438 if ( isCodeSigned ) {
439 auto sigTimer = ScopedTimer(DBG_DYLD_TIMING_ATTACH_CODESIGNATURE, 0, 0, 0);
440 fsignatures_t siginfo;
441 siginfo.fs_file_start = sliceOffset; // start of mach-o slice in fat file
442 siginfo.fs_blob_start = (void*)(long)(codeSignFileOffset); // start of CD in mach-o file
443 siginfo.fs_blob_size = codeSignFileSize; // size of CD
444 int result = fcntl(fd, F_ADDFILESIGS_RETURN, &siginfo);
445 if ( result == -1 ) {
446 int errnoCopy = errno;
447 if ( (errnoCopy == EPERM) || (errnoCopy == EBADEXEC) ) {
448 diag.error("code signature invalid (errno=%d) sliceOffset=0x%08llX, codeBlobOffset=0x%08X, codeBlobSize=0x%08X for '%s'",
449 errnoCopy, sliceOffset, codeSignFileOffset, codeSignFileSize, image->path());
450 }
451 else {
452 diag.error("fcntl(fd, F_ADDFILESIGS_RETURN) failed with errno=%d, sliceOffset=0x%08llX, codeBlobOffset=0x%08X, codeBlobSize=0x%08X for '%s'",
453 errnoCopy, sliceOffset, codeSignFileOffset, codeSignFileSize, image->path());
454 }
455 close(fd);
456 return;
457 }
458 coveredCodeLength = siginfo.fs_file_start;
459 if ( coveredCodeLength < codeSignFileOffset ) {
460 diag.error("code signature does not cover entire file up to signature");
461 close(fd);
462 return;
463 }
464 }
465
466 // <rdar://problem/41015217> dyld should use F_CHECK_LV even on unsigned binaries
467 {
468 // <rdar://problem/32684903> always call F_CHECK_LV to preflight
469 fchecklv checkInfo;
470 char messageBuffer[512];
471 messageBuffer[0] = '\0';
472 checkInfo.lv_file_start = sliceOffset;
473 checkInfo.lv_error_message_size = sizeof(messageBuffer);
474 checkInfo.lv_error_message = messageBuffer;
475 int res = fcntl(fd, F_CHECK_LV, &checkInfo);
476 if ( res == -1 ) {
477 diag.error("code signature in (%s) not valid for use in process: %s", image->path(), messageBuffer);
478 close(fd);
479 return;
480 }
481 }
482
483 // reserve address range
484 vm_address_t loadAddress = 0;
485 kern_return_t r = vm_allocate(mach_task_self(), &loadAddress, (vm_size_t)totalVMSize, VM_FLAGS_ANYWHERE);
486 if ( r != KERN_SUCCESS ) {
487 diag.error("vm_allocate(size=0x%0llX) failed with result=%d", totalVMSize, r);
488 close(fd);
489 return;
490 }
491
492 if ( sliceOffset != 0 )
493 _logSegments("dyld: Mapping %s (slice offset=%llu)\n", image->path(), sliceOffset);
494 else
495 _logSegments("dyld: Mapping %s\n", image->path());
496
497 // map each segment
498 __block bool mmapFailure = false;
499 __block const uint8_t* codeSignatureStartAddress = nullptr;
500 __block const uint8_t* linkeditEndAddress = nullptr;
501 __block bool mappedFirstSegment = false;
502 __block uint64_t maxFileOffset = 0;
503 image->forEachDiskSegment(^(uint32_t segIndex, uint32_t fileOffset, uint32_t fileSize, int64_t vmOffset, uint64_t vmSize, uint8_t permissions, bool laterReadOnly, bool& stop) {
504 // <rdar://problem/32363581> Mapping zero filled segments fails with mmap of size 0
505 if ( fileSize == 0 )
506 return;
507 void* segAddress = mmap((void*)(loadAddress+vmOffset), fileSize, permissions, MAP_FIXED | MAP_PRIVATE, fd, sliceOffset+fileOffset);
508 int mmapErr = errno;
509 if ( segAddress == MAP_FAILED ) {
510 if ( mmapErr == EPERM ) {
511 if ( sandboxBlockedMmap(image->path()) )
512 diag.error("file system sandbox blocked mmap() of '%s'", image->path());
513 else
514 diag.error("code signing blocked mmap() of '%s'", image->path());
515 }
516 else {
517 diag.error("mmap(addr=0x%0llX, size=0x%08X) failed with errno=%d for %s", loadAddress+vmOffset, fileSize, mmapErr, image->path());
518 }
519 mmapFailure = true;
520 stop = true;
521 }
522 else if ( codeSignFileOffset > fileOffset ) {
523 codeSignatureStartAddress = (uint8_t*)segAddress + (codeSignFileOffset-fileOffset);
524 linkeditEndAddress = (uint8_t*)segAddress + vmSize;
525 }
526 // sanity check first segment is mach-o header
527 if ( (segAddress != MAP_FAILED) && !mappedFirstSegment ) {
528 mappedFirstSegment = true;
529 const MachOFile* mf = (MachOFile*)segAddress;
530 if ( !mf->isMachO(diag, fileSize) ) {
531 mmapFailure = true;
532 stop = true;
533 }
534 }
535 if ( !mmapFailure ) {
536 const MachOLoaded* lmo = (MachOLoaded*)loadAddress;
537 _logSegments("%14s (%c%c%c) 0x%012lX->0x%012lX \n", lmo->segmentName(segIndex),
538 (permissions & PROT_READ) ? 'r' : '.', (permissions & PROT_WRITE) ? 'w' : '.', (permissions & PROT_EXEC) ? 'x' : '.' ,
539 (long)segAddress, (long)segAddress+(long)vmSize-1);
540 }
541 maxFileOffset = fileOffset + fileSize;
542 });
543 if ( mmapFailure ) {
544 ::vm_deallocate(mach_task_self(), loadAddress, (vm_size_t)totalVMSize);
545 ::close(fd);
546 return;
547 }
548
549 // <rdar://problem/47163421> speculatively read whole slice
550 fspecread_t specread = {} ;
551 specread.fsr_offset = sliceOffset;
552 specread.fsr_length = maxFileOffset;
553 specread.fsr_flags = 0;
554 fcntl(fd, F_SPECULATIVE_READ, &specread);
555 _logSegments("dyld: Speculatively read offset=0x%08llX, len=0x%08llX, path=%s\n", sliceOffset, maxFileOffset, image->path());
556
557 // close file
558 close(fd);
559
560 #if BUILDING_LIBDYLD
561 // verify file has not changed since closure was built by checking code signature has not changed
562 struct CDHashWrapper {
563 uint8_t cdHash[20];
564 };
565
566 // Get all the hashes for the image
567 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(CDHashWrapper, expectedCDHashes, 1);
568 image->forEachCDHash(^(const uint8_t *cdHash, bool &stop) {
569 CDHashWrapper cdHashWrapper;
570 memcpy(cdHashWrapper.cdHash, cdHash, sizeof(CDHashWrapper::cdHash));
571 expectedCDHashes.push_back(cdHashWrapper);
572 });
573
574 if (!expectedCDHashes.empty()) {
575 if (expectedCDHashes.count() != 1) {
576 // We should only see a single hash for dylibs
577 diag.error("code signature count invalid");
578 } else if ( codeSignatureStartAddress == nullptr ) {
579 diag.error("code signature missing");
580 }
581 else if ( codeSignatureStartAddress+codeSignFileSize > linkeditEndAddress ) {
582 diag.error("code signature extends beyond end of __LINKEDIT");
583 }
584 else {
585 // Get all the cd hashes for the macho
586 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(CDHashWrapper, foundCDHashes, 1);
587 const MachOLoaded* lmo = (MachOLoaded*)loadAddress;
588 lmo->forEachCDHashOfCodeSignature(codeSignatureStartAddress, codeSignFileSize,
589 ^(const uint8_t *cdHash) {
590 CDHashWrapper cdHashWrapper;
591 memcpy(cdHashWrapper.cdHash, cdHash, sizeof(CDHashWrapper::cdHash));
592 foundCDHashes.push_back(cdHashWrapper);
593 });
594
595 if (foundCDHashes.empty()) {
596 diag.error("code signature format invalid");
597 } else if (expectedCDHashes.count() != foundCDHashes.count()) {
598 diag.error("code signature count invalid");
599 } else {
600 // We found a hash, so make sure its equal.
601 if ( ::memcmp(foundCDHashes[0].cdHash, expectedCDHashes[0].cdHash, 20) != 0 )
602 diag.error("code signature changed since closure was built");
603 }
604 }
605 if ( diag.hasError() ) {
606 ::vm_deallocate(mach_task_self(), loadAddress, (vm_size_t)totalVMSize);
607 return;
608 }
609 }
610
611 #endif
612
613 #if __IPHONE_OS_VERSION_MIN_REQUIRED && !TARGET_OS_SIMULATOR
614 // tell kernel about fairplay encrypted regions
615 uint32_t fpTextOffset;
616 uint32_t fpSize;
617 if ( image->isFairPlayEncrypted(fpTextOffset, fpSize) ) {
618 const mach_header* mh = (mach_header*)loadAddress;
619 int result = ::mremap_encrypted(((uint8_t*)mh) + fpTextOffset, fpSize, 1, mh->cputype, mh->cpusubtype);
620 if ( result != 0 ) {
621 diag.error("could not register fairplay decryption, mremap_encrypted() => %d", result);
622 ::vm_deallocate(mach_task_self(), loadAddress, (vm_size_t)totalVMSize);
623 return;
624 }
625 }
626 #endif
627
628 _logLoads("dyld: load %s\n", image->path());
629
630 timer.setData4((uint64_t)loadAddress);
631 info.setLoadedAddress((MachOLoaded*)loadAddress);
632 info.setState(LoadedImage::State::mapped);
633 }
634
635 void Loader::unmapImage(LoadedImage& info)
636 {
637 assert(info.loadedAddress() != nullptr);
638 ::vm_deallocate(mach_task_self(), (vm_address_t)info.loadedAddress(), (vm_size_t)(info.image()->vmSizeToMap()));
639 info.setLoadedAddress(nullptr);
640 }
641
642 void Loader::registerDOFs(const Array<DOFInfo>& dofs)
643 {
644 if ( dofs.empty() )
645 return;
646
647 int fd = open("/dev/" DTRACEMNR_HELPER, O_RDWR);
648 if ( fd < 0 ) {
649 _logDofs("can't open /dev/" DTRACEMNR_HELPER " to register dtrace DOF sections\n");
650 }
651 else {
652 // allocate a buffer on the stack for the variable length dof_ioctl_data_t type
653 uint8_t buffer[sizeof(dof_ioctl_data_t) + dofs.count()*sizeof(dof_helper_t)];
654 dof_ioctl_data_t* ioctlData = (dof_ioctl_data_t*)buffer;
655
656 // fill in buffer with one dof_helper_t per DOF section
657 ioctlData->dofiod_count = dofs.count();
658 for (unsigned int i=0; i < dofs.count(); ++i) {
659 strlcpy(ioctlData->dofiod_helpers[i].dofhp_mod, dofs[i].imageShortName, DTRACE_MODNAMELEN);
660 ioctlData->dofiod_helpers[i].dofhp_dof = (uintptr_t)(dofs[i].dof);
661 ioctlData->dofiod_helpers[i].dofhp_addr = (uintptr_t)(dofs[i].dof);
662 }
663
664 // tell kernel about all DOF sections en mas
665 // pass pointer to ioctlData because ioctl() only copies a fixed size amount of data into kernel
666 user_addr_t val = (user_addr_t)(unsigned long)ioctlData;
667 if ( ioctl(fd, DTRACEHIOC_ADDDOF, &val) != -1 ) {
668 // kernel returns a unique identifier for each section in the dofiod_helpers[].dofhp_dof field.
669 // Note, the closure marked the image as being never unload, so we don't need to keep the ID around
670 // or support unregistering it later.
671 for (unsigned int i=0; i < dofs.count(); ++i) {
672 _logDofs("dyld: registering DOF section %p in %s with dtrace, ID=0x%08X\n",
673 dofs[i].dof, dofs[i].imageShortName, (int)(ioctlData->dofiod_helpers[i].dofhp_dof));
674 }
675 }
676 else {
677 _logDofs("dyld: ioctl to register dtrace DOF section failed\n");
678 }
679 close(fd);
680 }
681 }
682
683 bool Loader::dtraceUserProbesEnabled()
684 {
685 uint8_t dofEnabled = *((uint8_t*)_COMM_PAGE_DTRACE_DOF_ENABLED);
686 return ( (dofEnabled & 1) );
687 }
688
689
690 void Loader::vmAccountingSetSuspended(bool suspend, LogFunc logger)
691 {
692 #if __arm__ || __arm64__
693 // <rdar://problem/29099600> dyld should tell the kernel when it is doing fix-ups caused by roots
694 logger("vm.footprint_suspend=%d\n", suspend);
695 int newValue = suspend ? 1 : 0;
696 int oldValue = 0;
697 size_t newlen = sizeof(newValue);
698 size_t oldlen = sizeof(oldValue);
699 sysctlbyname("vm.footprint_suspend", &oldValue, &oldlen, &newValue, newlen);
700 #endif
701 }
702
703 void Loader::applyFixupsToImage(Diagnostics& diag, LoadedImage& info)
704 {
705 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_APPLY_FIXUPS, (uint64_t)info.loadedAddress(), 0, 0);
706 closure::ImageNum cacheImageNum;
707 const char* leafName = info.image()->leafName();
708 const closure::Image* image = info.image();
709 const uint8_t* imageLoadAddress = (uint8_t*)info.loadedAddress();
710 uintptr_t slide = info.loadedAddress()->getSlide();
711 bool overrideOfCache = info.image()->isOverrideOfDyldCacheImage(cacheImageNum);
712
713 if ( overrideOfCache )
714 vmAccountingSetSuspended(true, _logFixups);
715 image->forEachFixup(^(uint64_t imageOffsetToRebase, bool& stop) {
716 // this is a rebase, add slide
717 uintptr_t* fixUpLoc = (uintptr_t*)(imageLoadAddress + imageOffsetToRebase);
718 *fixUpLoc += slide;
719 _logFixups("dyld: fixup: %s:%p += %p\n", leafName, fixUpLoc, (void*)slide);
720 },
721 ^(uint64_t imageOffsetToBind, closure::Image::ResolvedSymbolTarget bindTarget, bool& stop) {
722 // this is a bind, set to target
723 uintptr_t* fixUpLoc = (uintptr_t*)(imageLoadAddress + imageOffsetToBind);
724 uintptr_t value = resolveTarget(bindTarget);
725 _logFixups("dyld: fixup: %s:%p = %p\n", leafName, fixUpLoc, (void*)value);
726 *fixUpLoc = value;
727 },
728 ^(uint64_t imageOffsetToStartsInfo, const Array<closure::Image::ResolvedSymbolTarget>& targets, bool& stop) {
729 // this is a chain of fixups, fix up all
730 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(const void*, targetAddrs, 128);
731 targetAddrs.reserve(targets.count());
732 for (uint32_t i=0; i < targets.count(); ++i)
733 targetAddrs.push_back((void*)resolveTarget(targets[i]));
734 ((dyld3::MachOAnalyzer*)(info.loadedAddress()))->withChainStarts(diag, imageOffsetToStartsInfo, ^(const dyld_chained_starts_in_image* starts) {
735 info.loadedAddress()->fixupAllChainedFixups(diag, starts, slide, targetAddrs, ^(void* loc, void* newValue) {
736 _logFixups("dyld: fixup: %s:%p = %p\n", leafName, loc, newValue);
737 });
738 });
739 },
740 ^(uint64_t imageOffsetToFixup) {
741 uintptr_t* fixUpLoc = (uintptr_t*)(imageLoadAddress + imageOffsetToFixup);
742 _logFixups("dyld: fixup objc image info: %s Setting objc image info for precomputed objc\n", leafName);
743
744 MachOAnalyzer::ObjCImageInfo *imageInfo = (MachOAnalyzer::ObjCImageInfo *)fixUpLoc;
745 ((MachOAnalyzer::ObjCImageInfo *)imageInfo)->flags |= MachOAnalyzer::ObjCImageInfo::dyldPreoptimized;
746 },
747 ^(uint64_t imageOffsetToBind, closure::Image::ResolvedSymbolTarget bindTarget, bool& stop) {
748 // this is a bind, set to target
749 uintptr_t* fixUpLoc = (uintptr_t*)(imageLoadAddress + imageOffsetToBind);
750 uintptr_t value = resolveTarget(bindTarget);
751 _logFixups("dyld: fixup objc protocol: %s:%p = %p\n", leafName, fixUpLoc, (void*)value);
752 *fixUpLoc = value;
753 },
754 ^(uint64_t imageOffsetToFixup, uint32_t selectorIndex, bool inSharedCache, bool &stop) {
755 // fixupObjCSelRefs
756 closure::Image::ResolvedSymbolTarget fixupTarget;
757 if ( inSharedCache ) {
758 const char* selectorString = _dyldCacheSelectorOpt->getEntryForIndex(selectorIndex);
759 fixupTarget.sharedCache.kind = closure::Image::ResolvedSymbolTarget::kindSharedCache;
760 fixupTarget.sharedCache.offset = (uint64_t)selectorString - (uint64_t)_dyldCacheAddress;
761 } else {
762 closure::ImageNum imageNum;
763 uint64_t vmOffset;
764 bool gotLocation = _closureSelectorOpt->getStringLocation(selectorIndex, _closureSelectorImages, imageNum, vmOffset);
765 assert(gotLocation);
766 fixupTarget.image.kind = closure::Image::ResolvedSymbolTarget::kindImage;
767 fixupTarget.image.imageNum = imageNum;
768 fixupTarget.image.offset = vmOffset;
769 }
770
771 uintptr_t* fixUpLoc = (uintptr_t*)(imageLoadAddress + imageOffsetToFixup);
772 uintptr_t value = resolveTarget(fixupTarget);
773 _logFixups("dyld: fixup objc selector: %s:%p(was '%s') = %p(now '%s')\n", leafName, fixUpLoc, (const char*)*fixUpLoc, (void*)value, (const char*)value);
774 *fixUpLoc = value;
775 }, ^(uint64_t imageOffsetToFixup, bool &stop) {
776 // fixupObjCStableSwift
777 // Class really is stable Swift, pretending to be pre-stable.
778 // Fix its lie.
779 uintptr_t* fixUpLoc = (uintptr_t*)(imageLoadAddress + imageOffsetToFixup);
780 uintptr_t value = ((*fixUpLoc) | MachOAnalyzer::ObjCClassInfo::FAST_IS_SWIFT_STABLE) & ~MachOAnalyzer::ObjCClassInfo::FAST_IS_SWIFT_LEGACY;
781 _logFixups("dyld: fixup objc stable Swift: %s:%p = %p\n", leafName, fixUpLoc, (void*)value);
782 *fixUpLoc = value;
783 }, ^(uint64_t imageOffsetToFixup, bool &stop) {
784 // TODO: Implement this
785 });
786
787 #if __i386__
788 __block bool segmentsMadeWritable = false;
789 image->forEachTextReloc(^(uint32_t imageOffsetToRebase, bool& stop) {
790 if ( !segmentsMadeWritable )
791 setSegmentProtects(info, true);
792 uintptr_t* fixUpLoc = (uintptr_t*)(imageLoadAddress + imageOffsetToRebase);
793 *fixUpLoc += slide;
794 _logFixups("dyld: fixup: %s:%p += %p\n", leafName, fixUpLoc, (void*)slide);
795 },
796 ^(uint32_t imageOffsetToBind, closure::Image::ResolvedSymbolTarget bindTarget, bool& stop) {
797 // FIXME
798 });
799 if ( segmentsMadeWritable )
800 setSegmentProtects(info, false);
801 #endif
802
803 // make any read-only data segments read-only
804 if ( image->hasReadOnlyData() && !image->inDyldCache() ) {
805 image->forEachDiskSegment(^(uint32_t segIndex, uint32_t fileOffset, uint32_t fileSize, int64_t vmOffset, uint64_t vmSize, uint8_t permissions, bool laterReadOnly, bool& segStop) {
806 if ( laterReadOnly ) {
807 ::mprotect((void*)(imageLoadAddress+vmOffset), (size_t)vmSize, VM_PROT_READ);
808 }
809 });
810 }
811
812 if ( overrideOfCache )
813 vmAccountingSetSuspended(false, _logFixups);
814 }
815
816 #if __i386__
817 void Loader::setSegmentProtects(const LoadedImage& info, bool write)
818 {
819 info.image()->forEachDiskSegment(^(uint32_t segIndex, uint32_t fileOffset, uint32_t fileSize, int64_t vmOffset, uint64_t vmSize, uint8_t protections, bool laterReadOnly, bool& segStop) {
820 if ( protections & VM_PROT_WRITE )
821 return;
822 uint32_t regionProt = protections;
823 if ( write )
824 regionProt = VM_PROT_WRITE | VM_PROT_READ;
825 kern_return_t r = vm_protect(mach_task_self(), ((uintptr_t)info.loadedAddress())+(uintptr_t)vmOffset, (uintptr_t)vmSize, false, regionProt);
826 assert( r == KERN_SUCCESS );
827 });
828 }
829 #endif
830
831 #if BUILDING_DYLD
832 LoadedImage* Loader::LaunchImagesCache::findImage(closure::ImageNum imageNum,
833 Array<LoadedImage>& images) const {
834 if ( (imageNum < _firstImageNum) || (imageNum >= _lastImageNum) )
835 return nullptr;
836
837 uint64_t cacheIndex = imageNum - _firstImageNum;
838 uint32_t imagesIndex = _imageIndices[cacheIndex];
839 if ( imagesIndex == 0 )
840 return nullptr;
841
842 // Note the index is offset by 1 so that 0's are not yet set
843 return &images[imagesIndex - 1];
844 }
845
846 void Loader::LaunchImagesCache::tryAddImage(closure::ImageNum imageNum,
847 uint64_t allImagesIndex) {
848 if ( (imageNum < _firstImageNum) || (imageNum >= _lastImageNum) )
849 return;
850
851 uint64_t cacheIndex = imageNum - _firstImageNum;
852
853 // Note the index is offset by 1 so that 0's are not yet set
854 _imageIndices[cacheIndex] = (uint32_t)allImagesIndex + 1;
855 }
856
857 void forEachLineInFile(const char* buffer, size_t bufferLen, void (^lineHandler)(const char* line, bool& stop))
858 {
859 bool stop = false;
860 const char* const eof = &buffer[bufferLen];
861 for (const char* s = buffer; s < eof; ++s) {
862 char lineBuffer[MAXPATHLEN];
863 char* t = lineBuffer;
864 char* tEnd = &lineBuffer[MAXPATHLEN];
865 while ( (s < eof) && (t != tEnd) ) {
866 if ( *s == '\n' )
867 break;
868 *t++ = *s++;
869 }
870 *t = '\0';
871 lineHandler(lineBuffer, stop);
872 if ( stop )
873 break;
874 }
875 }
876
877 void forEachLineInFile(const char* path, void (^lineHandler)(const char* line, bool& stop))
878 {
879 int fd = dyld::my_open(path, O_RDONLY, 0);
880 if ( fd != -1 ) {
881 struct stat statBuf;
882 if ( fstat(fd, &statBuf) == 0 ) {
883 const char* lines = (const char*)mmap(nullptr, (size_t)statBuf.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
884 if ( lines != MAP_FAILED ) {
885 forEachLineInFile(lines, (size_t)statBuf.st_size, lineHandler);
886 munmap((void*)lines, (size_t)statBuf.st_size);
887 }
888 }
889 close(fd);
890 }
891 }
892
893 #endif
894
895 #if (BUILDING_LIBDYLD || BUILDING_DYLD)
896 bool internalInstall()
897 {
898 #if TARGET_OS_SIMULATOR
899 return false;
900 #elif __IPHONE_OS_VERSION_MIN_REQUIRED
901 uint32_t devFlags = *((uint32_t*)_COMM_PAGE_DEV_FIRM);
902 return ( (devFlags & 1) == 1 );
903 #else
904 return ( csr_check(CSR_ALLOW_APPLE_INTERNAL) == 0 );
905 #endif
906 }
907 #endif
908
909 #if BUILDING_LIBDYLD
910 // hack because libdyld.dylib should not link with libc++.dylib
911 extern "C" void __cxa_pure_virtual() __attribute__((visibility("hidden")));
912 void __cxa_pure_virtual()
913 {
914 abort();
915 }
916 #endif
917
918 } // namespace dyld3
919
920
921
922
923