]> git.saurik.com Git - apple/dyld.git/blob - dyld3/LaunchCacheWriter.cpp
dyld-519.2.2.tar.gz
[apple/dyld.git] / dyld3 / LaunchCacheWriter.cpp
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24
25 #include <stdint.h>
26 #include <string.h>
27 #include <assert.h>
28 #include <uuid/uuid.h>
29 #include <mach/mach.h>
30 #include <fcntl.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <sys/uio.h>
34 #include <sys/param.h>
35 #include <sys/sysctl.h>
36 #include <sys/resource.h>
37 #include <sys/types.h>
38 #include <sys/stat.h>
39 #include <sys/mman.h>
40 #include <dirent.h>
41
42 #include <string>
43 #include <map>
44 #include <list>
45 #include <unordered_set>
46 #include <unordered_map>
47
48 #include "LaunchCacheFormat.h"
49 #include "LaunchCacheWriter.h"
50 #include "shared-cache/dyld_cache_format.h"
51 #include "shared-cache/DyldSharedCache.h"
52 #include "shared-cache/FileUtils.h"
53
54 namespace std
55 {
56 template <>
57 struct hash<dyld3::launch_cache::binary_format::ImageRef>
58 {
59 std::size_t operator()(const dyld3::launch_cache::binary_format::ImageRef& value) const {
60 return std::hash<uint16_t>()(value.value());
61 }
62 };
63 }
64
65
66 namespace dyld3 {
67 namespace launch_cache {
68
69
70 static uintptr_t align(uintptr_t value, uintptr_t align)
71 {
72 return (value+align-1) & (-align);
73 }
74
75 //////////////////////////// ImageGroupWriter ////////////////////////////////////////
76
77 ImageGroupWriter::ImageGroupWriter(uint32_t groupNum, bool pages16KB, bool is64, bool dylibsExpectedOnDisk, bool mtimeAndInodeAreValid)
78 : _isDiskImage(groupNum != 0), _is64(is64), _groupNum(groupNum), _pageSize(pages16KB ? 0x4000 : 0x1000),
79 _dylibsExpectedOnDisk(dylibsExpectedOnDisk), _imageFileInfoIsCdHash(!mtimeAndInodeAreValid)
80 {
81 }
82
83
84 uint32_t ImageGroupWriter::size() const
85 {
86 binary_format::ImageGroup tempGroup;
87 layoutBinary(&tempGroup);
88 return tempGroup.stringsPoolOffset + tempGroup.stringsPoolSize;
89 }
90
91 void ImageGroupWriter::layoutBinary(binary_format::ImageGroup* grp) const
92 {
93 grp->imagesEntrySize = _isDiskImage ? sizeof(binary_format::DiskImage) : sizeof(binary_format::CachedImage);
94 grp->groupNum = _groupNum;
95 grp->dylibsExpectedOnDisk = _dylibsExpectedOnDisk;
96 grp->imageFileInfoIsCdHash = _imageFileInfoIsCdHash;
97 grp->padding = 0;
98
99 grp->imagesPoolCount = imageCount();
100 grp->imagesPoolOffset = sizeof(binary_format::ImageGroup);
101 uint32_t imagesPoolSize = grp->imagesEntrySize * grp->imagesPoolCount;
102
103 grp->imageAliasCount = (uint32_t)_aliases.size();
104 grp->imageAliasOffset = grp->imagesPoolOffset + imagesPoolSize;
105 uint32_t imageAliasSize = grp->imageAliasCount * sizeof(binary_format::AliasEntry);
106
107 grp->segmentsPoolCount = (uint32_t)_segmentPool.size();
108 grp->segmentsPoolOffset = (uint32_t)align(grp->imageAliasOffset + imageAliasSize, 8);
109 uint32_t segmentsPoolSize = grp->segmentsPoolCount * sizeof(uint64_t);
110
111 grp->dependentsPoolCount = (uint32_t)_dependentsPool.size();
112 grp->dependentsPoolOffset = grp->segmentsPoolOffset + segmentsPoolSize;
113 uint32_t dependentsPoolSize = grp->dependentsPoolCount * sizeof(binary_format::ImageRef);
114
115 grp->intializerOffsetPoolCount = (uint32_t)_initializerOffsets.size();
116 grp->intializerOffsetPoolOffset = (uint32_t)align(grp->dependentsPoolOffset + dependentsPoolSize, 4);
117 uint32_t intializerOffsetSize = grp->intializerOffsetPoolCount * sizeof(uint32_t);
118
119 grp->intializerListPoolCount = (uint32_t)_initializerBeforeLists.size();
120 grp->intializerListPoolOffset = grp->intializerOffsetPoolOffset + intializerOffsetSize;
121 uint32_t intializerListPoolSize = grp->intializerListPoolCount * sizeof(binary_format::ImageRef);
122
123 grp->targetsPoolCount = (uint32_t)_targetsPool.size();
124 grp->targetsOffset = (uint32_t)align(grp->intializerListPoolOffset + intializerListPoolSize, 8);
125 uint32_t targetsSize = grp->targetsPoolCount * sizeof(TargetSymbolValue);
126
127 grp->fixupsPoolSize = (uint32_t)_fixupsPool.size();
128 grp->fixupsOffset = (uint32_t)align(grp->targetsOffset + targetsSize, 4);
129
130 grp->cachePatchTableCount = (uint32_t)_patchPool.size();
131 grp->cachePatchTableOffset = (uint32_t)align(grp->fixupsOffset + grp->fixupsPoolSize, 4);
132 uint32_t patchTableSize = grp->cachePatchTableCount * sizeof(binary_format::PatchTable);
133
134 grp->cachePatchOffsetsCount = (uint32_t)_patchLocationPool.size();
135 grp->cachePatchOffsetsOffset = grp->cachePatchTableOffset + patchTableSize;
136 uint32_t patchOffsetsSize = grp->cachePatchOffsetsCount * sizeof(binary_format::PatchOffset);
137
138 grp->symbolOverrideTableCount = (uint32_t)_dyldCacheSymbolOverridePool.size();
139 grp->symbolOverrideTableOffset = grp->cachePatchOffsetsOffset + patchOffsetsSize;
140 uint32_t symbolOverrideSize = grp->symbolOverrideTableCount * sizeof(binary_format::DyldCacheOverride);
141
142 grp->imageOverrideTableCount = (uint32_t)_imageOverridePool.size();
143 grp->imageOverrideTableOffset = grp->symbolOverrideTableOffset + symbolOverrideSize;
144 uint32_t imageOverrideSize = grp->imageOverrideTableCount * sizeof(binary_format::ImageRefOverride);
145
146 grp->dofOffsetPoolCount = (uint32_t)_dofOffsets.size();
147 grp->dofOffsetPoolOffset = grp->imageOverrideTableOffset + imageOverrideSize;
148 uint32_t dofOffsetSize = grp->dofOffsetPoolCount * sizeof(uint32_t);
149
150 grp->indirectGroupNumPoolCount = (uint32_t)_indirectGroupNumPool.size();
151 grp->indirectGroupNumPoolOffset = grp->dofOffsetPoolOffset + dofOffsetSize;
152 uint32_t indirectGroupNumSize = grp->indirectGroupNumPoolCount * sizeof(uint32_t);
153
154 grp->stringsPoolSize = (uint32_t)_stringPool.size();
155 grp->stringsPoolOffset = grp->indirectGroupNumPoolOffset + indirectGroupNumSize;
156 }
157
158
159 void ImageGroupWriter::finalizeTo(Diagnostics& diag, const std::vector<const BinaryImageGroupData*>& curGroups, binary_format::ImageGroup* grp) const
160 {
161 layoutBinary(grp);
162 uint8_t* buffer = (uint8_t*)grp;
163 if ( imageCount() > 0 ) {
164 uint32_t pad1Size = grp->segmentsPoolOffset - (grp->imageAliasOffset + grp->imageAliasCount * sizeof(binary_format::AliasEntry));
165 uint32_t pad2Size = grp->targetsOffset - (grp->intializerListPoolOffset + grp->intializerListPoolCount * sizeof(binary_format::ImageRef));
166 memcpy(&buffer[grp->imagesPoolOffset], &imageByIndex(0), grp->imagesEntrySize * grp->imagesPoolCount);
167 memcpy(&buffer[grp->imageAliasOffset], &_aliases[0], grp->imageAliasCount * sizeof(binary_format::AliasEntry));
168 bzero( &buffer[grp->segmentsPoolOffset-pad1Size], pad1Size);
169 memcpy(&buffer[grp->segmentsPoolOffset], &_segmentPool[0], grp->segmentsPoolCount * sizeof(uint64_t));
170 memcpy(&buffer[grp->dependentsPoolOffset], &_dependentsPool[0], grp->dependentsPoolCount * sizeof(binary_format::ImageRef));
171 memcpy(&buffer[grp->intializerListPoolOffset], &_initializerBeforeLists[0], grp->intializerListPoolCount * sizeof(binary_format::ImageRef));
172 memcpy(&buffer[grp->intializerOffsetPoolOffset],&_initializerOffsets[0], grp->intializerOffsetPoolCount * sizeof(uint32_t));
173 bzero( &buffer[grp->targetsOffset-pad2Size], pad2Size);
174 memcpy(&buffer[grp->targetsOffset], &_targetsPool[0], grp->targetsPoolCount * sizeof(TargetSymbolValue));
175 memcpy(&buffer[grp->fixupsOffset], _fixupsPool.start(), grp->fixupsPoolSize);
176 memcpy(&buffer[grp->cachePatchTableOffset], &_patchPool[0], grp->cachePatchTableCount * sizeof(binary_format::PatchTable));
177 memcpy(&buffer[grp->cachePatchOffsetsOffset], &_patchLocationPool[0], grp->cachePatchOffsetsCount * sizeof(binary_format::PatchOffset));
178 memcpy(&buffer[grp->symbolOverrideTableOffset], &_dyldCacheSymbolOverridePool[0], grp->symbolOverrideTableCount * sizeof(binary_format::DyldCacheOverride));
179 memcpy(&buffer[grp->imageOverrideTableOffset], &_imageOverridePool[0], grp->imageOverrideTableCount * sizeof(binary_format::ImageRefOverride));
180 memcpy(&buffer[grp->dofOffsetPoolOffset], &_dofOffsets[0], grp->dofOffsetPoolCount * sizeof(uint32_t));
181 memcpy(&buffer[grp->indirectGroupNumPoolOffset], &_indirectGroupNumPool[0], grp->indirectGroupNumPoolCount * sizeof(uint32_t));
182 memcpy(&buffer[grp->stringsPoolOffset], &_stringPool[0], grp->stringsPoolSize);
183 }
184
185 // now that we have a real ImageGroup, we can analyze it to find max load counts for each image
186 ImageGroup imGroup(grp);
187 std::unordered_set<const BinaryImageData*> allDependents;
188 STACK_ALLOC_DYNARRAY(const binary_format::ImageGroup*, curGroups.size()+1, newGroupList);
189 for (int i=0; i < curGroups.size(); ++i)
190 newGroupList[i] = curGroups[i];
191 newGroupList[newGroupList.count()-1] = grp;
192 for (uint32_t i=0; i < grp->imagesPoolCount; ++i) {
193 Image image = imGroup.image(i);
194 if ( image.isInvalid() )
195 continue;
196 allDependents.clear();
197 allDependents.insert(image.binaryData());
198 BinaryImageData* imageData = (BinaryImageData*)(buffer + grp->imagesPoolOffset + (i * grp->imagesEntrySize));
199 if ( !image.recurseAllDependentImages(newGroupList, allDependents) ) {
200 //diag.warning("%s dependents on an invalid dylib", image.path());
201 imageData->isInvalid = true;
202 }
203 imageData->maxLoadCount = (uint32_t)allDependents.size();
204 }
205 }
206
207 uint32_t ImageGroupWriter::maxLoadCount(Diagnostics& diag, const std::vector<const BinaryImageGroupData*>& curGroups, binary_format::ImageGroup* grp) const
208 {
209 ImageGroup imGroup(grp);
210 std::unordered_set<const BinaryImageData*> allDependents;
211 std::vector<const BinaryImageGroupData*> allGroups = curGroups;
212 if ( grp->groupNum == 2 )
213 allGroups.push_back(grp);
214 DynArray<const binary_format::ImageGroup*> groupList(allGroups);
215 for (uint32_t i=0; i < grp->imagesPoolCount; ++i) {
216 Image image = imGroup.image(i);
217 if ( image.isInvalid() )
218 continue;
219 allDependents.insert(image.binaryData());
220 BinaryImageData* imageData = (BinaryImageData*)((char*)grp + grp->imagesPoolOffset + (i * grp->imagesEntrySize));
221 if ( !image.recurseAllDependentImages(groupList, allDependents) ) {
222 //diag.warning("%s dependents on an invalid dylib", image.path());
223 imageData->isInvalid = true;
224 }
225 }
226 return (uint32_t)allDependents.size();
227 }
228
229 void ImageGroupWriter::setImageCount(uint32_t count)
230 {
231 if ( _isDiskImage ) {
232 _diskImages.resize(count);
233 bzero(&_diskImages[0], count*sizeof(binary_format::DiskImage));
234 }
235 else {
236 _images.resize(count);
237 bzero(&_images[0], count*sizeof(binary_format::CachedImage));
238 }
239
240 int32_t offset = 0 - (int32_t)sizeof(binary_format::ImageGroup);
241 for (uint32_t i=0; i < count; ++i) {
242 binary_format::Image& img = imageByIndex(i);
243 img.isDiskImage = _isDiskImage;
244 img.has16KBpages = (_pageSize == 0x4000);
245 img.groupOffset = offset;
246 if ( _isDiskImage )
247 offset -= sizeof(binary_format::DiskImage);
248 else
249 offset -= sizeof(binary_format::CachedImage);
250 }
251 }
252
253 uint32_t ImageGroupWriter::imageCount() const
254 {
255 if ( _isDiskImage )
256 return (uint32_t)_diskImages.size();
257 else
258 return (uint32_t)_images.size();
259 }
260
261 binary_format::Image& ImageGroupWriter::imageByIndex(uint32_t imageIndex)
262 {
263 assert(imageIndex < imageCount());
264 if ( _isDiskImage )
265 return _diskImages[imageIndex];
266 else
267 return _images[imageIndex];
268 }
269
270 const binary_format::Image& ImageGroupWriter::imageByIndex(uint32_t imageIndex) const
271 {
272 assert(imageIndex < imageCount());
273 if ( _isDiskImage )
274 return _diskImages[imageIndex];
275 else
276 return _images[imageIndex];
277 }
278
279 bool ImageGroupWriter::isInvalid(uint32_t imageIndex) const
280 {
281 return imageByIndex(imageIndex).isInvalid;
282 }
283
284 void ImageGroupWriter::setImageInvalid(uint32_t imageIndex)
285 {
286 imageByIndex(imageIndex).isInvalid = true;
287 }
288
289 uint32_t ImageGroupWriter::addIndirectGroupNum(uint32_t groupNum)
290 {
291 auto pos = _indirectGroupNumPoolExisting.find(groupNum);
292 if ( pos != _indirectGroupNumPoolExisting.end() )
293 return pos->second;
294 uint32_t startOffset = (uint32_t)_indirectGroupNumPool.size();
295 _indirectGroupNumPool.push_back(groupNum);
296 _indirectGroupNumPoolExisting[startOffset] = groupNum;
297 return startOffset;
298 }
299
300 uint32_t ImageGroupWriter::addString(const char* str)
301 {
302 auto pos = _stringPoolExisting.find(str);
303 if ( pos != _stringPoolExisting.end() )
304 return pos->second;
305 uint32_t startOffset = (uint32_t)_stringPool.size();
306 size_t size = strlen(str) + 1;
307 _stringPool.insert(_stringPool.end(), str, &str[size]);
308 _stringPoolExisting[str] = startOffset;
309 return startOffset;
310 }
311
312 void ImageGroupWriter::alignStringPool()
313 {
314 while ( (_stringPool.size() % 4) != 0 )
315 _stringPool.push_back('\0');
316 }
317
318 void ImageGroupWriter::setImagePath(uint32_t imageIndex, const char* path)
319 {
320 binary_format::Image& image = imageByIndex(imageIndex);
321 image.pathPoolOffset = addString(path);
322 image.pathHash = ImageGroup::hashFunction(path);
323 }
324
325 void ImageGroupWriter::addImageAliasPath(uint32_t imageIndex, const char* anAlias)
326 {
327 binary_format::AliasEntry entry;
328 entry.aliasHash = ImageGroup::hashFunction(anAlias);
329 entry.imageIndexInGroup = imageIndex;
330 entry.aliasOffsetInStringPool = addString(anAlias);
331 _aliases.push_back(entry);
332 }
333
334 void ImageGroupWriter::ImageGroupWriter::setImageUUID(uint32_t imageIndex, const uuid_t uuid)
335 {
336 memcpy(imageByIndex(imageIndex).uuid, uuid, sizeof(uuid_t));
337 }
338
339 void ImageGroupWriter::setImageHasObjC(uint32_t imageIndex, bool value)
340 {
341 imageByIndex(imageIndex).hasObjC = value;
342 }
343
344 void ImageGroupWriter::setImageIsBundle(uint32_t imageIndex, bool value)
345 {
346 imageByIndex(imageIndex).isBundle = value;
347 }
348
349 void ImageGroupWriter::setImageHasWeakDefs(uint32_t imageIndex, bool value)
350 {
351 imageByIndex(imageIndex).hasWeakDefs = value;
352 }
353
354 void ImageGroupWriter::setImageMayHavePlusLoads(uint32_t imageIndex, bool value)
355 {
356 imageByIndex(imageIndex).mayHavePlusLoads = value;
357 }
358
359 void ImageGroupWriter::setImageNeverUnload(uint32_t imageIndex, bool value)
360 {
361 imageByIndex(imageIndex).neverUnload = value;
362 }
363
364 void ImageGroupWriter::setImageMustBeThisDir(uint32_t imageIndex, bool value)
365 {
366 imageByIndex(imageIndex).cwdSameAsThis = value;
367 }
368
369 void ImageGroupWriter::setImageIsPlatformBinary(uint32_t imageIndex, bool value)
370 {
371 imageByIndex(imageIndex).isPlatformBinary = value;
372 }
373
374 void ImageGroupWriter::setImageOverridableDylib(uint32_t imageIndex, bool value)
375 {
376 imageByIndex(imageIndex).overridableDylib = value;
377 }
378
379 void ImageGroupWriter::setImageFileMtimeAndInode(uint32_t imageIndex, uint64_t mTime, uint64_t inode)
380 {
381 imageByIndex(imageIndex).fileInfo.statInfo.mtime = mTime;
382 imageByIndex(imageIndex).fileInfo.statInfo.inode = inode;
383 assert(!_imageFileInfoIsCdHash);
384 }
385
386 void ImageGroupWriter::setImageCdHash(uint32_t imageIndex, uint8_t cdHash[20])
387 {
388 memcpy(imageByIndex(imageIndex).fileInfo.cdHash16.bytes, cdHash, 16);
389 assert(_imageFileInfoIsCdHash);
390 }
391
392 void ImageGroupWriter::setImageIsEncrypted(uint32_t imageIndex, bool value)
393 {
394 imageByIndex(imageIndex).isEncrypted = value;
395 }
396
397 void ImageGroupWriter::setImageMaxLoadCount(uint32_t imageIndex, uint32_t count)
398 {
399 imageByIndex(imageIndex).maxLoadCount = count;
400 }
401
402 void ImageGroupWriter::setImageFairPlayRange(uint32_t imageIndex, uint32_t offset, uint32_t size)
403 {
404 assert(imageIndex < imageCount());
405 assert(_isDiskImage);
406 binary_format::DiskImage& image = _diskImages[imageIndex];
407 if ( image.has16KBpages ) {
408 assert((offset & 0x3FFF) == 0);
409 assert((size & 0x3FFF) == 0);
410 }
411 else {
412 assert((offset & 0xFFF) == 0);
413 assert((size & 0xFFF) == 0);
414 }
415 assert(offset < (_pageSize*16));
416 image.fairPlayTextStartPage = offset / _pageSize;
417 image.fairPlayTextPageCount = size / _pageSize;
418 }
419
420 void ImageGroupWriter::setImageInitializerOffsets(uint32_t imageIndex, const std::vector<uint32_t>& offsets)
421 {
422 binary_format::Image& image = imageByIndex(imageIndex);
423 image.initOffsetsArrayStartIndex = _initializerOffsets.size();
424 image.initOffsetsArrayCount = offsets.size();
425 _initializerOffsets.insert(_initializerOffsets.end(), offsets.begin(), offsets.end());
426 }
427
428 void ImageGroupWriter::setImageDOFOffsets(uint32_t imageIndex, const std::vector<uint32_t>& offsets)
429 {
430 binary_format::Image& image = imageByIndex(imageIndex);
431 image.dofOffsetsArrayStartIndex = _dofOffsets.size();
432 image.dofOffsetsArrayCount = offsets.size();
433 _dofOffsets.insert(_dofOffsets.end(), offsets.begin(), offsets.end());
434 }
435
436 uint32_t ImageGroupWriter::addUniqueInitList(const std::vector<binary_format::ImageRef>& initBefore)
437 {
438 // see if this initBefore list already exists in pool
439 if ( _initializerBeforeLists.size() > initBefore.size() ) {
440 size_t cmpLen = initBefore.size()*sizeof(binary_format::ImageRef);
441 size_t end = _initializerBeforeLists.size() - initBefore.size();
442 for (uint32_t i=0; i < end; ++i) {
443 if ( memcmp(&initBefore[0], &_initializerBeforeLists[i], cmpLen) == 0 ) {
444 return i;
445 }
446 }
447 }
448 uint32_t result = (uint32_t)_initializerBeforeLists.size();
449 _initializerBeforeLists.insert(_initializerBeforeLists.end(), initBefore.begin(), initBefore.end());
450 return result;
451 }
452
453 void ImageGroupWriter::setImageInitBefore(uint32_t imageIndex, const std::vector<binary_format::ImageRef>& initBefore)
454 {
455 binary_format::Image& image = imageByIndex(imageIndex);
456 image.initBeforeArrayStartIndex = addUniqueInitList(initBefore);
457 image.initBeforeArrayCount = initBefore.size();
458 }
459
460 void ImageGroupWriter::setImageSliceOffset(uint32_t imageIndex, uint64_t fileOffset)
461 {
462 assert(imageIndex < imageCount());
463 assert(_isDiskImage);
464 binary_format::DiskImage& image = _diskImages[imageIndex];
465 image.sliceOffsetIn4K = (uint32_t)(fileOffset / 4096);
466 }
467
468 void ImageGroupWriter::setImageCodeSignatureLocation(uint32_t imageIndex, uint32_t fileOffset, uint32_t size)
469 {
470 assert(imageIndex < imageCount());
471 assert(_isDiskImage);
472 binary_format::DiskImage& image = _diskImages[imageIndex];
473 image.codeSignFileOffset = fileOffset;
474 image.codeSignFileSize = size;
475 }
476
477 void ImageGroupWriter::setImageDependentsCount(uint32_t imageIndex, uint32_t count)
478 {
479 binary_format::Image& image = imageByIndex(imageIndex);
480 image.dependentsArrayStartIndex = _dependentsPool.size();
481 image.dependentsArrayCount = count;
482 _dependentsPool.resize(_dependentsPool.size() + count);
483 }
484
485 void ImageGroupWriter::setImageDependent(uint32_t imageIndex, uint32_t depIndex, binary_format::ImageRef dependent)
486 {
487 binary_format::Image& image = imageByIndex(imageIndex);
488 assert(depIndex < image.dependentsArrayCount);
489 _dependentsPool[image.dependentsArrayStartIndex + depIndex] = dependent;
490 }
491
492 uint32_t ImageGroupWriter::imageDependentsCount(uint32_t imageIndex) const
493 {
494 return imageByIndex(imageIndex).dependentsArrayCount;
495 }
496
497 binary_format::ImageRef ImageGroupWriter::imageDependent(uint32_t imageIndex, uint32_t depIndex) const
498 {
499 const binary_format::Image& image = imageByIndex(imageIndex);
500 assert(depIndex < image.dependentsArrayCount);
501 return _dependentsPool[image.dependentsArrayStartIndex + depIndex];
502 }
503
504 void ImageGroupWriter::setImageSegments(uint32_t imageIndex, MachOParser& imageParser, uint64_t cacheUnslideBaseAddress)
505 {
506 if ( _isDiskImage ) {
507 __block uint32_t totalPageCount = 0;
508 __block uint32_t lastFileOffsetEnd = 0;
509 __block uint64_t lastVmAddrEnd = 0;
510 __block std::vector<binary_format::DiskSegment> diskSegments;
511 diskSegments.reserve(8);
512 imageParser.forEachSegment(^(const char* segName, uint32_t fileOffset, uint32_t fileSize, uint64_t vmAddr, uint64_t vmSize, uint8_t protections, bool& stop) {
513 if ( (fileOffset != 0) && (fileOffset != lastFileOffsetEnd) ) {
514 binary_format::DiskSegment filePadding;
515 filePadding.filePageCount = (fileOffset - lastFileOffsetEnd)/_pageSize;
516 filePadding.vmPageCount = 0;
517 filePadding.permissions = 0;
518 filePadding.paddingNotSeg = 1;
519 diskSegments.push_back(filePadding);
520 }
521 if ( (lastVmAddrEnd != 0) && (vmAddr != lastVmAddrEnd) ) {
522 binary_format::DiskSegment vmPadding;
523 vmPadding.filePageCount = 0;
524 vmPadding.vmPageCount = (vmAddr - lastVmAddrEnd)/_pageSize;
525 vmPadding.permissions = 0;
526 vmPadding.paddingNotSeg = 1;
527 diskSegments.push_back(vmPadding);
528 totalPageCount += vmPadding.vmPageCount;
529 }
530 {
531 binary_format::DiskSegment segInfo;
532 segInfo.filePageCount = (fileSize+_pageSize-1)/_pageSize;
533 segInfo.vmPageCount = (vmSize+_pageSize-1)/_pageSize;
534 segInfo.permissions = protections & 7;
535 segInfo.paddingNotSeg = 0;
536 diskSegments.push_back(segInfo);
537 totalPageCount += segInfo.vmPageCount;
538 if ( fileSize != 0 )
539 lastFileOffsetEnd = fileOffset + fileSize;
540 if ( vmSize != 0 )
541 lastVmAddrEnd = vmAddr + vmSize;
542 }
543 });
544 binary_format::Image& image = imageByIndex(imageIndex);
545 image.segmentsArrayStartIndex = _segmentPool.size();
546 image.segmentsArrayCount = diskSegments.size();
547 _segmentPool.insert(_segmentPool.end(), (uint64_t*)&diskSegments[0], (uint64_t*)&diskSegments[image.segmentsArrayCount]);
548 _diskImages[imageIndex].totalVmPages = totalPageCount;
549 }
550 else {
551 binary_format::Image& image = imageByIndex(imageIndex);
552 image.segmentsArrayStartIndex = _segmentPool.size();
553 image.segmentsArrayCount = imageParser.segmentCount();
554 _segmentPool.resize(_segmentPool.size() + image.segmentsArrayCount);
555 __block uint32_t segIndex = 0;
556 imageParser.forEachSegment(^(const char* segName, uint32_t fileOffset, uint32_t fileSize, uint64_t vmAddr, uint64_t vmSize, uint8_t protections, bool& stop) {
557 binary_format::DyldCacheSegment seg = { (uint32_t)(vmAddr-cacheUnslideBaseAddress), (uint32_t)vmSize, protections };
558 _segmentPool[image.segmentsArrayStartIndex + segIndex] = *((uint64_t*)&seg);
559 ++segIndex;
560 });
561 }
562 }
563
564 void ImageGroupWriter::setImagePatchLocations(uint32_t imageIndex, uint32_t funcVmOffset, const std::unordered_set<uint32_t>& patchLocations)
565 {
566 assert(imageIndex < imageCount());
567 binary_format::CachedImage& image = _images[imageIndex];
568 if ( image.patchStartIndex == 0 ) {
569 image.patchStartIndex = (uint32_t)_patchPool.size();
570 image.patchCount = 0;
571 }
572 else {
573 assert(image.patchStartIndex + image.patchCount == _patchPool.size());
574 }
575
576 binary_format::PatchTable entry = { funcVmOffset, (uint32_t)_patchLocationPool.size() };
577 for (uint32_t loc : patchLocations) {
578 _patchLocationPool.push_back(*((binary_format::PatchOffset*)&loc));
579 }
580 _patchLocationPool.back().last = true;
581 _patchPool.push_back(entry);
582 _images[imageIndex].patchCount++;
583 }
584
585 void ImageGroupWriter::setGroupCacheOverrides(const std::vector<binary_format::DyldCacheOverride>& cacheOverrides)
586 {
587 _dyldCacheSymbolOverridePool = cacheOverrides;
588 }
589
590 void ImageGroupWriter::addImageIsOverride(binary_format::ImageRef standardDylibRef, binary_format::ImageRef overrideDylibRef)
591 {
592 _imageOverridePool.push_back({standardDylibRef, overrideDylibRef});
593 }
594
595
596 class SegmentFixUpBuilder
597 {
598 public:
599 SegmentFixUpBuilder(uint32_t segIndex, uint32_t dataSegPageCount, uint32_t pageSize, bool is64,
600 const std::vector<ImageGroupWriter::FixUp>& fixups,
601 std::vector<TargetSymbolValue>& targetsForImage, bool log);
602
603 bool hasFixups() { return _hasFixups; }
604 uint32_t segIndex() { return _segIndex; }
605 void appendSegmentFixUpMap(ContentBuffer&);
606
607 private:
608 struct TmpOpcode {
609 binary_format::FixUpOpcode op;
610 uint8_t repeatOpcodeCount;
611 uint16_t count;
612
613 bool operator!=(const TmpOpcode& rhs) const {
614 return ((op != rhs.op) || (count != rhs.count) || (repeatOpcodeCount != rhs.repeatOpcodeCount));
615 }
616 };
617
618
619 ContentBuffer makeFixupOpcodesForPage(uint32_t pageStartSegmentOffset, const ImageGroupWriter::FixUp* start,
620 const ImageGroupWriter::FixUp* end);
621 uint32_t getOrdinalForTarget(TargetSymbolValue);
622 void expandOpcodes(const std::vector<TmpOpcode>& opcodes, uint8_t page[0x4000], uint32_t& offset, uint32_t& ordinal);
623 void expandOpcodes(const std::vector<TmpOpcode>& opcodes, uint8_t page[0x4000]);
624 bool samePageContent(const uint8_t page1[], const uint8_t page2[]);
625 void printOpcodes(const char* prefix, const std::vector<TmpOpcode> opcodes);
626 void printOpcodes(const char* prefix, bool printOffset, const TmpOpcode opcodes[], size_t opcodesLen, uint32_t& offset);
627 uint32_t opcodeEncodingSize(const std::vector<TmpOpcode>& opcodes);
628
629 const bool _is64;
630 const bool _log;
631 bool _hasFixups;
632 const uint32_t _segIndex;
633 const uint32_t _dataSegPageCount;
634 const uint32_t _pageSize;
635 std::vector<TargetSymbolValue>& _targets;
636 std::vector<ContentBuffer> _opcodesByPage;
637 };
638
639
640
641
642 SegmentFixUpBuilder::SegmentFixUpBuilder(uint32_t segIndex, uint32_t segPageCount, uint32_t pageSize, bool is64,
643 const std::vector<ImageGroupWriter::FixUp>& fixups,
644 std::vector<TargetSymbolValue>& targetsForImage, bool log)
645 : _is64(is64), _log(log), _hasFixups(false), _segIndex(segIndex), _dataSegPageCount(segPageCount), _pageSize(pageSize), _targets(targetsForImage)
646 {
647 //fprintf(stderr, "SegmentFixUpBuilder(segIndex=%d, segPageCount=%d)\n", segIndex, segPageCount);
648 _targets.push_back(TargetSymbolValue::makeInvalid()); // ordinal zero reserved to mean "add slide"
649 _opcodesByPage.resize(segPageCount);
650 size_t startFixupIndex = 0;
651 for (uint32_t pageIndex=0; pageIndex < segPageCount; ++pageIndex) {
652 uint32_t pageStartOffset = pageIndex*_pageSize;
653 uint32_t pageEndOffset = pageStartOffset+_pageSize;
654 // find first index in this page
655 while ( (startFixupIndex < fixups.size()) && ((fixups[startFixupIndex].segIndex != segIndex) || (fixups[startFixupIndex].segOffset < pageStartOffset)) )
656 ++startFixupIndex;
657 // find first index beyond this page
658 size_t endFixupIndex = startFixupIndex;
659 while ( (endFixupIndex < fixups.size()) && (fixups[endFixupIndex].segIndex == segIndex) && (fixups[endFixupIndex].segOffset < pageEndOffset) )
660 ++endFixupIndex;
661 // create opcodes for fixups on this pageb
662 _opcodesByPage[pageIndex] = makeFixupOpcodesForPage(pageStartOffset, &fixups[startFixupIndex], &fixups[endFixupIndex]);
663 startFixupIndex = endFixupIndex;
664 }
665 }
666
667
668 uint32_t SegmentFixUpBuilder::getOrdinalForTarget(TargetSymbolValue target)
669 {
670 uint32_t ordinal = 0;
671 for (const TargetSymbolValue& entry : _targets) {
672 if ( entry == target )
673 return ordinal;
674 ++ordinal;
675 }
676 _targets.push_back(target);
677 return ordinal;
678 }
679
680 void SegmentFixUpBuilder::appendSegmentFixUpMap(ContentBuffer& buffer)
681 {
682 std::vector<uint32_t> offsets;
683 uint32_t curOffset = sizeof(binary_format::SegmentFixupsByPage)-4 + _dataSegPageCount*4;
684 for (auto& opcodes : _opcodesByPage) {
685 if ( opcodes.size() == 0 )
686 offsets.push_back(0);
687 else
688 offsets.push_back(curOffset);
689 curOffset += opcodes.size();
690 }
691 uint32_t totalSize = curOffset;
692
693 // write header
694 buffer.append_uint32(totalSize); // SegmentFixupsByPage.size
695 buffer.append_uint32(_pageSize); // SegmentFixupsByPage.pageSize
696 buffer.append_uint32(_dataSegPageCount); // SegmentFixupsByPage.pageCount
697 for (uint32_t i=0; i < _dataSegPageCount; ++i) {
698 buffer.append_uint32(offsets[i]); // SegmentFixupsByPage.pageInfoOffsets[i]
699 }
700 // write each page's opcode stream
701 for (uint32_t i=0; i < offsets.size(); ++i) {
702 buffer.append_buffer(_opcodesByPage[i]);
703 }
704 }
705
706 void SegmentFixUpBuilder::expandOpcodes(const std::vector<TmpOpcode>& opcodes, uint8_t page[])
707 {
708 uint32_t offset = 0;
709 uint32_t ordinal = 0;
710 bzero(page, _pageSize);
711 expandOpcodes(opcodes, page, offset, ordinal);
712 }
713
714 void SegmentFixUpBuilder::expandOpcodes(const std::vector<TmpOpcode>& opcodes, uint8_t page[], uint32_t& offset, uint32_t& ordinal)
715 {
716 for (int i=0; i < opcodes.size(); ++i) {
717 assert(offset < _pageSize);
718 TmpOpcode tmp = opcodes[i];
719 switch ( tmp.op ) {
720 case binary_format::FixUpOpcode::bind64:
721 *(uint64_t*)(&page[offset]) = ordinal;
722 offset += 8;
723 break;
724 case binary_format::FixUpOpcode::bind32:
725 *(uint32_t*)(&page[offset]) = ordinal;
726 offset += 4;
727 break;
728 case binary_format::FixUpOpcode::rebase64:
729 *(uint64_t*)(&page[offset]) = 0x1122334455667788;
730 offset += 8;
731 break;
732 case binary_format::FixUpOpcode::rebase32:
733 *(uint32_t*)(&page[offset]) = 0x23452345;
734 offset += 4;
735 break;
736 case binary_format::FixUpOpcode::rebaseText32:
737 *(uint32_t*)(&page[offset]) = 0x56785678;
738 offset += 4;
739 break;
740 case binary_format::FixUpOpcode::bindText32:
741 *(uint32_t*)(&page[offset]) = 0x98769876;
742 offset += 4;
743 break;
744 case binary_format::FixUpOpcode::bindTextRel32:
745 *(uint32_t*)(&page[offset]) = 0x34563456;
746 offset += 4;
747 break;
748 case binary_format::FixUpOpcode::bindImportJmp32:
749 *(uint32_t*)(&page[offset]) = 0x44556677;
750 offset += 4;
751 break;
752 case binary_format::FixUpOpcode::done:
753 break;
754 case binary_format::FixUpOpcode::setPageOffset:
755 offset = tmp.count;
756 break;
757 case binary_format::FixUpOpcode::incPageOffset:
758 offset += (tmp.count*4);
759 break;
760 case binary_format::FixUpOpcode::setOrdinal:
761 ordinal = tmp.count;
762 break;
763 case binary_format::FixUpOpcode::incOrdinal:
764 ++ordinal;
765 break;
766 case binary_format::FixUpOpcode::repeat: {
767 std::vector<TmpOpcode> pattern;
768 for (int j=0; j < tmp.repeatOpcodeCount; ++j) {
769 pattern.push_back(opcodes[i+j+1]);
770 }
771 for (int j=0; j < tmp.count; ++j) {
772 expandOpcodes(pattern, page, offset, ordinal);
773 }
774 i += tmp.repeatOpcodeCount;
775 }
776 break;
777 }
778 }
779 }
780
781
782
783 uint32_t SegmentFixUpBuilder::opcodeEncodingSize(const std::vector<TmpOpcode>& opcodes)
784 {
785 uint32_t size = 0;
786 for (int i=0; i < opcodes.size(); ++i) {
787 switch ( opcodes[i].op ) {
788 case binary_format::FixUpOpcode::bind64:
789 case binary_format::FixUpOpcode::bind32:
790 case binary_format::FixUpOpcode::rebase64:
791 case binary_format::FixUpOpcode::rebase32:
792 case binary_format::FixUpOpcode::rebaseText32:
793 case binary_format::FixUpOpcode::bindText32:
794 case binary_format::FixUpOpcode::bindTextRel32:
795 case binary_format::FixUpOpcode::bindImportJmp32:
796 case binary_format::FixUpOpcode::done:
797 ++size;
798 break;
799 case binary_format::FixUpOpcode::setPageOffset:
800 case binary_format::FixUpOpcode::incPageOffset:
801 case binary_format::FixUpOpcode::setOrdinal:
802 case binary_format::FixUpOpcode::incOrdinal:
803 ++size;
804 if ( opcodes[i].count >= 16 )
805 size += ContentBuffer::uleb128_size(opcodes[i].count);
806 break;
807 case binary_format::FixUpOpcode::repeat: {
808 ++size;
809 size += ContentBuffer::uleb128_size(opcodes[i].count);
810 std::vector<TmpOpcode> pattern;
811 for (int j=0; j < opcodes[i].repeatOpcodeCount; ++j) {
812 pattern.push_back(opcodes[++i]);
813 }
814 size += opcodeEncodingSize(pattern);
815 }
816 break;
817 }
818 }
819 return size;
820 }
821
822
823 bool SegmentFixUpBuilder::samePageContent(const uint8_t page1[], const uint8_t page2[])
824 {
825 bool result = true;
826 if (memcmp(page1, page2, _pageSize) != 0) {
827 if ( _is64 ) {
828 const uint64_t* p1 = (uint64_t* )page1;
829 const uint64_t* p2 = (uint64_t* )page2;
830 for (int i=0; i < _pageSize/8; ++i) {
831 if ( p1[i] != p2[i] ) {
832 fprintf(stderr, "page1[0x%03X] = 0x%016llX, page2[0x%03X] = 0x%016llX\n", i*8, p1[i], i*8, p2[i]);
833 result = false;
834 }
835 }
836 }
837 else {
838 const uint32_t* p1 = (uint32_t* )page1;
839 const uint32_t* p2 = (uint32_t* )page2;
840 for (int i=0; i < _pageSize/4; ++i) {
841 if ( p1[i] != p2[i] ) {
842 fprintf(stderr, "page1[0x%03X] = 0x%016X, page2[0x%03X] = 0x%016X\n", i*4, p1[i], i*4, p2[i]);
843 result = false;
844 }
845 }
846 }
847 }
848 return result;
849 }
850
851 void SegmentFixUpBuilder::printOpcodes(const char* prefix, const std::vector<TmpOpcode> opcodes)
852 {
853 uint32_t offset = 0;
854 printOpcodes(prefix, true, &opcodes[0], opcodes.size(), offset);
855 }
856
857 void SegmentFixUpBuilder::printOpcodes(const char* prefix, bool printOffset, const TmpOpcode opcodes[], size_t opcodesLen, uint32_t& offset)
858 {
859 for (int i=0; i < opcodesLen; ++i) {
860 TmpOpcode tmp = opcodes[i];
861 if ( printOffset )
862 fprintf(stderr, "%s offset=0x%04X: ", prefix, offset);
863 else
864 fprintf(stderr, "%s ", prefix);
865 switch ( tmp.op ) {
866 case binary_format::FixUpOpcode::bind64:
867 fprintf(stderr, "bind64\n");
868 offset += 8;
869 break;
870 case binary_format::FixUpOpcode::bind32:
871 fprintf(stderr, "bind32\n");
872 offset += 4;
873 break;
874 case binary_format::FixUpOpcode::rebase64:
875 fprintf(stderr, "rebase64\n");
876 offset += 8;
877 break;
878 case binary_format::FixUpOpcode::rebase32:
879 fprintf(stderr, "rebase32\n");
880 offset += 4;
881 break;
882 case binary_format::FixUpOpcode::rebaseText32:
883 fprintf(stderr, "rebaseText32\n");
884 offset += 4;
885 break;
886 case binary_format::FixUpOpcode::bindText32:
887 fprintf(stderr, "bindText32\n");
888 offset += 4;
889 break;
890 case binary_format::FixUpOpcode::bindTextRel32:
891 fprintf(stderr, "bindTextRel32\n");
892 offset += 4;
893 break;
894 case binary_format::FixUpOpcode::bindImportJmp32:
895 fprintf(stderr, "bindJmpRel32\n");
896 offset += 4;
897 break;
898 case binary_format::FixUpOpcode::done:
899 fprintf(stderr, "done\n");
900 break;
901 case binary_format::FixUpOpcode::setPageOffset:
902 fprintf(stderr, "setPageOffset(%d)\n", tmp.count);
903 offset = tmp.count;
904 break;
905 case binary_format::FixUpOpcode::incPageOffset:
906 fprintf(stderr, "incPageOffset(%d)\n", tmp.count);
907 offset += (tmp.count*4);
908 break;
909 case binary_format::FixUpOpcode::setOrdinal:
910 fprintf(stderr, "setOrdinal(%d)\n", tmp.count);
911 break;
912 case binary_format::FixUpOpcode::incOrdinal:
913 fprintf(stderr, "incOrdinal(%d)\n", tmp.count);
914 break;
915 case binary_format::FixUpOpcode::repeat: {
916 char morePrefix[128];
917 strcpy(morePrefix, prefix);
918 strcat(morePrefix, " ");
919 uint32_t prevOffset = offset;
920 fprintf(stderr, "repeat(%d times, next %d opcodes)\n", tmp.count, tmp.repeatOpcodeCount);
921 printOpcodes(morePrefix, false, &opcodes[i+1], tmp.repeatOpcodeCount, offset);
922 i += tmp.repeatOpcodeCount;
923 uint32_t repeatDelta = (offset-prevOffset)*(tmp.count-1);
924 offset += repeatDelta;
925 }
926 break;
927 }
928 }
929 }
930
931 ContentBuffer SegmentFixUpBuilder::makeFixupOpcodesForPage(uint32_t pageStartSegmentOffset, const ImageGroupWriter::FixUp* start, const ImageGroupWriter::FixUp* end)
932 {
933 //fprintf(stderr, " makeFixupOpcodesForPage(segOffset=0x%06X, startFixup=%p, endFixup=%p)\n", pageStartSegmentOffset, start, end);
934 std::vector<TmpOpcode> tmpOpcodes;
935 const uint32_t pointerSize = (_is64 ? 8 : 4);
936 uint32_t offset = pageStartSegmentOffset;
937 uint32_t ordinal = 0;
938 const ImageGroupWriter::FixUp* lastFixup = nullptr;
939 for (const ImageGroupWriter::FixUp* f=start; f < end; ++f) {
940 // ignore double bind at same address (ld64 bug)
941 if ( lastFixup && (lastFixup->segOffset == f->segOffset) )
942 continue;
943 // add opcode to adjust current offset if needed
944 if ( f->segOffset != offset ) {
945 if ( ((f->segOffset % 4) != 0) || ((offset % 4) != 0) ) {
946 // mis aligned pointers use bigger set opcode
947 tmpOpcodes.push_back({binary_format::FixUpOpcode::setPageOffset, 0, (uint16_t)(f->segOffset-pageStartSegmentOffset)});
948 }
949 else {
950 uint32_t delta4 = (uint32_t)(f->segOffset - offset)/4;
951 assert(delta4*4 < _pageSize);
952 tmpOpcodes.push_back({binary_format::FixUpOpcode::incPageOffset, 0, (uint16_t)delta4});
953 }
954 offset = (uint32_t)f->segOffset;
955 }
956 uint32_t nextOrd = 0;
957 switch ( f->type ) {
958 case ImageGroupWriter::FixupType::rebase:
959 tmpOpcodes.push_back({_is64 ? binary_format::FixUpOpcode::rebase64 : binary_format::FixUpOpcode::rebase32, 0, 0});
960 offset += pointerSize;
961 _hasFixups = true;
962 break;
963 case ImageGroupWriter::FixupType::pointerLazyBind:
964 case ImageGroupWriter::FixupType::pointerBind:
965 //assert(f->target.imageIndex == binary_format::OrdinalEntry::kImageIndexDyldSharedCache);
966 nextOrd = getOrdinalForTarget(f->target);
967 if ( nextOrd != ordinal ) {
968 if ( (nextOrd > ordinal) && (nextOrd < (ordinal+31)) ) {
969 tmpOpcodes.push_back({binary_format::FixUpOpcode::incOrdinal, 0, (uint16_t)(nextOrd-ordinal)});
970 }
971 else {
972 tmpOpcodes.push_back({binary_format::FixUpOpcode::setOrdinal, 0, (uint16_t)nextOrd});
973 }
974 ordinal = nextOrd;
975 }
976 tmpOpcodes.push_back({_is64 ? binary_format::FixUpOpcode::bind64 : binary_format::FixUpOpcode::bind32, 0, 0});
977 offset += pointerSize;
978 _hasFixups = true;
979 break;
980 case ImageGroupWriter::FixupType::rebaseText:
981 assert(!_is64);
982 tmpOpcodes.push_back({binary_format::FixUpOpcode::rebaseText32, 0, 0});
983 offset += pointerSize;
984 _hasFixups = true;
985 break;
986 case ImageGroupWriter::FixupType::bindText:
987 assert(!_is64);
988 nextOrd = getOrdinalForTarget(f->target);
989 if ( nextOrd != ordinal ) {
990 if ( (nextOrd > ordinal) && (nextOrd < (ordinal+31)) ) {
991 tmpOpcodes.push_back({binary_format::FixUpOpcode::incOrdinal, 0, (uint16_t)(nextOrd-ordinal)});
992 }
993 else {
994 tmpOpcodes.push_back({binary_format::FixUpOpcode::setOrdinal, 0, (uint16_t)nextOrd});
995 }
996 ordinal = nextOrd;
997 }
998 tmpOpcodes.push_back({binary_format::FixUpOpcode::bindText32, 0, 0});
999 offset += pointerSize;
1000 _hasFixups = true;
1001 break;
1002 case ImageGroupWriter::FixupType::bindTextRel:
1003 assert(!_is64);
1004 nextOrd = getOrdinalForTarget(f->target);
1005 if ( nextOrd != ordinal ) {
1006 if ( (nextOrd > ordinal) && (nextOrd < (ordinal+31)) ) {
1007 tmpOpcodes.push_back({binary_format::FixUpOpcode::incOrdinal, 0, (uint16_t)(nextOrd-ordinal)});
1008 }
1009 else {
1010 tmpOpcodes.push_back({binary_format::FixUpOpcode::setOrdinal, 0, (uint16_t)nextOrd});
1011 }
1012 ordinal = nextOrd;
1013 }
1014 tmpOpcodes.push_back({binary_format::FixUpOpcode::bindTextRel32, 0, 0});
1015 offset += pointerSize;
1016 _hasFixups = true;
1017 break;
1018 case ImageGroupWriter::FixupType::bindImportJmpRel:
1019 assert(!_is64);
1020 nextOrd = getOrdinalForTarget(f->target);
1021 if ( nextOrd != ordinal ) {
1022 if ( (nextOrd > ordinal) && (nextOrd < (ordinal+31)) ) {
1023 tmpOpcodes.push_back({binary_format::FixUpOpcode::incOrdinal, 0, (uint16_t)(nextOrd-ordinal)});
1024 }
1025 else {
1026 tmpOpcodes.push_back({binary_format::FixUpOpcode::setOrdinal, 0, (uint16_t)nextOrd});
1027 }
1028 ordinal = nextOrd;
1029 }
1030 tmpOpcodes.push_back({binary_format::FixUpOpcode::bindImportJmp32, 0, 0});
1031 offset += pointerSize;
1032 _hasFixups = true;
1033 break;
1034 case ImageGroupWriter::FixupType::ignore:
1035 assert(0 && "ignore fixup types should have been removed");
1036 break;
1037 }
1038 lastFixup = f;
1039 }
1040
1041 uint8_t firstExpansion[0x4010]; // larger than 16KB to handle unaligned pointers
1042 expandOpcodes(tmpOpcodes, firstExpansion);
1043
1044 if (_log) printOpcodes("start", tmpOpcodes);
1045
1046
1047 for (int stride=1; stride < 6; ++stride) {
1048 for (int i=0; i < tmpOpcodes.size(); ++i) {
1049 int j;
1050 for (j=i+stride; j < tmpOpcodes.size(); j += stride) {
1051 bool strideMatch = true;
1052 for (int k=0; k < stride; ++k) {
1053 if ( (j+k >= tmpOpcodes.size()) || (tmpOpcodes[j+k] != tmpOpcodes[i+k]) ) {
1054 strideMatch = false;
1055 break;
1056 }
1057 if ( (tmpOpcodes[j+k].op == binary_format::FixUpOpcode::repeat) && (tmpOpcodes[j+k].repeatOpcodeCount+k >= stride) ) {
1058 strideMatch = false;
1059 break;
1060 }
1061 }
1062 if ( !strideMatch )
1063 break;
1064 }
1065 // see if same opcode repeated three or more times
1066 int repeats = (j-i)/stride;
1067 if ( repeats > 3 ) {
1068 // replace run with repeat opcode
1069 tmpOpcodes[i].op = binary_format::FixUpOpcode::repeat;
1070 tmpOpcodes[i].repeatOpcodeCount = stride;
1071 tmpOpcodes[i].count = repeats;
1072 tmpOpcodes.erase(tmpOpcodes.begin()+i+1, tmpOpcodes.begin()+j-stride);
1073 i += stride;
1074 }
1075 else {
1076 // don't look for matches inside a repeat loop
1077 if ( tmpOpcodes[i].op == binary_format::FixUpOpcode::repeat )
1078 i += tmpOpcodes[i].repeatOpcodeCount;
1079 }
1080 }
1081 if (_log) {
1082 char tmp[32];
1083 sprintf(tmp, "stride %d", stride);
1084 printOpcodes(tmp, tmpOpcodes);
1085 }
1086 uint8_t secondExpansion[0x4010];
1087 expandOpcodes(tmpOpcodes, secondExpansion);
1088 if ( !samePageContent(firstExpansion, secondExpansion) )
1089 printOpcodes("opt", tmpOpcodes);
1090 }
1091
1092 // convert temp opcodes to real opcodes
1093 bool wroteDone = false;
1094 ContentBuffer opcodes;
1095 for (const TmpOpcode& tmp : tmpOpcodes) {
1096 switch ( tmp.op ) {
1097 case binary_format::FixUpOpcode::bind64:
1098 case binary_format::FixUpOpcode::bind32:
1099 case binary_format::FixUpOpcode::rebase64:
1100 case binary_format::FixUpOpcode::rebase32:
1101 case binary_format::FixUpOpcode::rebaseText32:
1102 case binary_format::FixUpOpcode::bindText32:
1103 case binary_format::FixUpOpcode::bindTextRel32:
1104 case binary_format::FixUpOpcode::bindImportJmp32:
1105 opcodes.append_byte((uint8_t)tmp.op);
1106 break;
1107 case binary_format::FixUpOpcode::done:
1108 opcodes.append_byte((uint8_t)tmp.op);
1109 wroteDone = true;
1110 break;
1111 case binary_format::FixUpOpcode::setPageOffset:
1112 case binary_format::FixUpOpcode::incPageOffset:
1113 case binary_format::FixUpOpcode::setOrdinal:
1114 case binary_format::FixUpOpcode::incOrdinal:
1115 if ( (tmp.count > 0) && (tmp.count < 16) ) {
1116 opcodes.append_byte((uint8_t)tmp.op | tmp.count);
1117 }
1118 else {
1119 opcodes.append_byte((uint8_t)tmp.op);
1120 opcodes.append_uleb128(tmp.count);
1121 }
1122 break;
1123 case binary_format::FixUpOpcode::repeat: {
1124 const TmpOpcode* nextOpcodes = &tmp;
1125 ++nextOpcodes;
1126 std::vector<TmpOpcode> pattern;
1127 for (int i=0; i < tmp.repeatOpcodeCount; ++i) {
1128 pattern.push_back(nextOpcodes[i]);
1129 }
1130 uint32_t repeatBytes = opcodeEncodingSize(pattern);
1131 assert(repeatBytes < 15);
1132 opcodes.append_byte((uint8_t)tmp.op | repeatBytes);
1133 opcodes.append_uleb128(tmp.count);
1134 }
1135 break;
1136 }
1137 }
1138
1139 if ( (opcodes.size() == 0) || !wroteDone )
1140 opcodes.append_byte((uint8_t)binary_format::FixUpOpcode::done);
1141
1142 // make opcodes streams 4-byte aligned
1143 opcodes.pad_to_size(4);
1144
1145 //fprintf(stderr, " makeFixupOpcodesForPage(pageStartSegmentOffset=0x%0X) result=%lu bytes\n", pageStartSegmentOffset, opcodes.size());
1146
1147 return opcodes;
1148 }
1149
1150
1151
1152
1153 void ImageGroupWriter::setImageFixups(Diagnostics& diag, uint32_t imageIndex, std::vector<FixUp>& fixups, bool hasTextRelocs)
1154 {
1155 // only applicable for ImageGroup in a closure (not group of images in dyld cache)
1156 assert(_isDiskImage);
1157
1158 // sort all rebases and binds by address
1159 std::sort(fixups.begin(), fixups.end(), [](FixUp& lhs, FixUp& rhs) -> bool {
1160 if ( &lhs == &rhs )
1161 return false;
1162 // sort by segIndex
1163 if ( lhs.segIndex < rhs.segIndex )
1164 return true;
1165 if ( lhs.segIndex > rhs.segIndex )
1166 return false;
1167 // then sort by segOffset
1168 if ( lhs.segOffset < rhs.segOffset )
1169 return true;
1170 if ( lhs.segOffset > rhs.segOffset )
1171 return false;
1172 // two fixups at same location
1173
1174 // if the same (linker bug), ignore one
1175 if ( lhs.type == rhs.type ) {
1176 rhs.type = FixupType::ignore;
1177 }
1178 // if one is rebase for lazy pointer, ignore rebase because dyld3 does not lazy bind
1179 else if ( (lhs.type == FixupType::pointerLazyBind) && (rhs.type == FixupType::rebase) ) {
1180 // lazy pointers have rebase and (lazy) bind at same location. since dyld3 does not do lazy binding, we mark the rebase to be ignored later
1181 rhs.type = FixupType::ignore;
1182 }
1183 else if ( (rhs.type == FixupType::pointerLazyBind) && (lhs.type == FixupType::rebase) ) {
1184 // lazy pointers have rebase and (lazy) bind at same location. since dyld3 does not do lazy binding, we mark the rebase to be ignored later
1185 lhs.type = FixupType::ignore;
1186 }
1187 return (lhs.type < rhs.type);
1188 });
1189
1190 // remove ignoreable fixups
1191 fixups.erase(std::remove_if(fixups.begin(), fixups.end(),
1192 [&](const FixUp& a) {
1193 return (a.type == FixupType::ignore);
1194 }), fixups.end());
1195
1196 // look for overlapping fixups
1197 const uint32_t pointerSize = (_is64 ? 8 : 4);
1198 const FixUp* lastFixup = nullptr;
1199 for (const FixUp& fixup : fixups) {
1200 if ( lastFixup != nullptr ) {
1201 if ( lastFixup->segIndex == fixup.segIndex ) {
1202 uint64_t increment = fixup.segOffset - lastFixup->segOffset;
1203 if ( increment < pointerSize ) {
1204 if ( (increment == 0) && ((lastFixup->type == FixupType::ignore) || (fixup.type == FixupType::ignore)) ) {
1205 // allow rebase to local lazy helper and lazy bind to same location
1206 }
1207 else {
1208 diag.error("segment %d has overlapping fixups at offset 0x%0llX and 0x%0llX", fixup.segIndex, lastFixup->segOffset, fixup.segOffset);
1209 setImageInvalid(imageIndex);
1210 return;
1211 }
1212 }
1213 }
1214 }
1215 lastFixup = &fixup;
1216 }
1217
1218 if ( hasTextRelocs )
1219 _diskImages[imageIndex].hasTextRelocs = true;
1220
1221 // there is one ordinal table per image, shared by all segments with fixups in that image
1222 std::vector<TargetSymbolValue> targetsForImage;
1223
1224 const bool opcodeLogging = false;
1225 // calculate SegmentFixupsByPage for each segment
1226 std::vector<SegmentFixUpBuilder*> builders;
1227 for (uint32_t segIndex=0, onDiskSegIndex=0; segIndex < _diskImages[imageIndex].segmentsArrayCount; ++segIndex) {
1228 const binary_format::DiskSegment* diskSeg = (const binary_format::DiskSegment*)&(_segmentPool[_diskImages[imageIndex].segmentsArrayStartIndex+segIndex]);
1229 SegmentFixUpBuilder* builder = nullptr;
1230 if ( diskSeg->paddingNotSeg )
1231 continue;
1232 if ( diskSeg->filePageCount == 0 ) {
1233 ++onDiskSegIndex;
1234 continue;
1235 }
1236 if ( diskSeg->permissions & VM_PROT_WRITE ) {
1237 builder = new SegmentFixUpBuilder(onDiskSegIndex, diskSeg->filePageCount, _pageSize, _is64, fixups, targetsForImage, opcodeLogging);
1238 }
1239 else if ( hasTextRelocs && (diskSeg->permissions == (VM_PROT_READ|VM_PROT_EXECUTE)) ) {
1240 builder = new SegmentFixUpBuilder(onDiskSegIndex, diskSeg->filePageCount, _pageSize, _is64, fixups, targetsForImage, opcodeLogging);
1241 }
1242 if ( builder != nullptr ) {
1243 if ( builder->hasFixups() )
1244 builders.push_back(builder);
1245 else
1246 delete builder;
1247 }
1248 ++onDiskSegIndex;
1249 }
1250
1251 // build AllFixupsBySegment for image
1252 _fixupsPool.pad_to_size(4);
1253 uint32_t startOfFixupsOffset = (uint32_t)_fixupsPool.size();
1254 size_t headerSize = builders.size() * sizeof(binary_format::AllFixupsBySegment);
1255 size_t offsetOfSegmentHeaderInBuffer = _fixupsPool.size();
1256 for (int i=0; i < headerSize; ++i) {
1257 _fixupsPool.append_byte(0);
1258 }
1259 uint32_t entryIndex = 0;
1260 for (SegmentFixUpBuilder* builder : builders) {
1261 binary_format::AllFixupsBySegment* entries = (binary_format::AllFixupsBySegment*)(_fixupsPool.start()+offsetOfSegmentHeaderInBuffer);
1262 entries[entryIndex].segIndex = builder->segIndex();
1263 entries[entryIndex].offset = (uint32_t)_fixupsPool.size() - startOfFixupsOffset;
1264 builder->appendSegmentFixUpMap(_fixupsPool);
1265 delete builder;
1266 ++entryIndex;
1267 }
1268 _diskImages[imageIndex].fixupsPoolOffset = (uint32_t)offsetOfSegmentHeaderInBuffer;
1269 _diskImages[imageIndex].fixupsPoolSegCount = entryIndex;
1270
1271 // append targetsForImage into group
1272 size_t start = _targetsPool.size();
1273 size_t count = targetsForImage.size();
1274 _diskImages[imageIndex].targetsArrayStartIndex = (uint32_t)start;
1275 _diskImages[imageIndex].targetsArrayCount = (uint32_t)count;
1276 assert(_diskImages[imageIndex].targetsArrayStartIndex == start);
1277 assert(_diskImages[imageIndex].targetsArrayCount == count);
1278 _targetsPool.insert(_targetsPool.end(), targetsForImage.begin(), targetsForImage.end());
1279 }
1280
1281
1282 }
1283 }
1284
1285