2 * Copyright (c) 2017 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
28 #include <uuid/uuid.h>
29 #include <mach/mach.h>
34 #include <sys/param.h>
35 #include <sys/sysctl.h>
36 #include <sys/resource.h>
37 #include <sys/types.h>
45 #include <unordered_set>
46 #include <unordered_map>
48 #include "LaunchCacheFormat.h"
49 #include "LaunchCacheWriter.h"
50 #include "shared-cache/dyld_cache_format.h"
51 #include "shared-cache/DyldSharedCache.h"
52 #include "shared-cache/FileUtils.h"
57 struct hash
<dyld3::launch_cache::binary_format::ImageRef
>
59 std::size_t operator()(const dyld3::launch_cache::binary_format::ImageRef
& value
) const {
60 return std::hash
<uint16_t>()(value
.value());
67 namespace launch_cache
{
70 static uintptr_t align(uintptr_t value
, uintptr_t align
)
72 return (value
+align
-1) & (-align
);
75 //////////////////////////// ImageGroupWriter ////////////////////////////////////////
77 ImageGroupWriter::ImageGroupWriter(uint32_t groupNum
, bool pages16KB
, bool is64
, bool dylibsExpectedOnDisk
, bool mtimeAndInodeAreValid
)
78 : _isDiskImage(groupNum
!= 0), _is64(is64
), _groupNum(groupNum
), _pageSize(pages16KB
? 0x4000 : 0x1000),
79 _dylibsExpectedOnDisk(dylibsExpectedOnDisk
), _imageFileInfoIsCdHash(!mtimeAndInodeAreValid
)
84 uint32_t ImageGroupWriter::size() const
86 binary_format::ImageGroup tempGroup
;
87 layoutBinary(&tempGroup
);
88 return tempGroup
.stringsPoolOffset
+ tempGroup
.stringsPoolSize
;
91 void ImageGroupWriter::layoutBinary(binary_format::ImageGroup
* grp
) const
93 grp
->imagesEntrySize
= _isDiskImage
? sizeof(binary_format::DiskImage
) : sizeof(binary_format::CachedImage
);
94 grp
->groupNum
= _groupNum
;
95 grp
->dylibsExpectedOnDisk
= _dylibsExpectedOnDisk
;
96 grp
->imageFileInfoIsCdHash
= _imageFileInfoIsCdHash
;
99 grp
->imagesPoolCount
= imageCount();
100 grp
->imagesPoolOffset
= sizeof(binary_format::ImageGroup
);
101 uint32_t imagesPoolSize
= grp
->imagesEntrySize
* grp
->imagesPoolCount
;
103 grp
->imageAliasCount
= (uint32_t)_aliases
.size();
104 grp
->imageAliasOffset
= grp
->imagesPoolOffset
+ imagesPoolSize
;
105 uint32_t imageAliasSize
= grp
->imageAliasCount
* sizeof(binary_format::AliasEntry
);
107 grp
->segmentsPoolCount
= (uint32_t)_segmentPool
.size();
108 grp
->segmentsPoolOffset
= (uint32_t)align(grp
->imageAliasOffset
+ imageAliasSize
, 8);
109 uint32_t segmentsPoolSize
= grp
->segmentsPoolCount
* sizeof(uint64_t);
111 grp
->dependentsPoolCount
= (uint32_t)_dependentsPool
.size();
112 grp
->dependentsPoolOffset
= grp
->segmentsPoolOffset
+ segmentsPoolSize
;
113 uint32_t dependentsPoolSize
= grp
->dependentsPoolCount
* sizeof(binary_format::ImageRef
);
115 grp
->intializerOffsetPoolCount
= (uint32_t)_initializerOffsets
.size();
116 grp
->intializerOffsetPoolOffset
= (uint32_t)align(grp
->dependentsPoolOffset
+ dependentsPoolSize
, 4);
117 uint32_t intializerOffsetSize
= grp
->intializerOffsetPoolCount
* sizeof(uint32_t);
119 grp
->intializerListPoolCount
= (uint32_t)_initializerBeforeLists
.size();
120 grp
->intializerListPoolOffset
= grp
->intializerOffsetPoolOffset
+ intializerOffsetSize
;
121 uint32_t intializerListPoolSize
= grp
->intializerListPoolCount
* sizeof(binary_format::ImageRef
);
123 grp
->targetsPoolCount
= (uint32_t)_targetsPool
.size();
124 grp
->targetsOffset
= (uint32_t)align(grp
->intializerListPoolOffset
+ intializerListPoolSize
, 8);
125 uint32_t targetsSize
= grp
->targetsPoolCount
* sizeof(TargetSymbolValue
);
127 grp
->fixupsPoolSize
= (uint32_t)_fixupsPool
.size();
128 grp
->fixupsOffset
= (uint32_t)align(grp
->targetsOffset
+ targetsSize
, 4);
130 grp
->cachePatchTableCount
= (uint32_t)_patchPool
.size();
131 grp
->cachePatchTableOffset
= (uint32_t)align(grp
->fixupsOffset
+ grp
->fixupsPoolSize
, 4);
132 uint32_t patchTableSize
= grp
->cachePatchTableCount
* sizeof(binary_format::PatchTable
);
134 grp
->cachePatchOffsetsCount
= (uint32_t)_patchLocationPool
.size();
135 grp
->cachePatchOffsetsOffset
= grp
->cachePatchTableOffset
+ patchTableSize
;
136 uint32_t patchOffsetsSize
= grp
->cachePatchOffsetsCount
* sizeof(binary_format::PatchOffset
);
138 grp
->symbolOverrideTableCount
= (uint32_t)_dyldCacheSymbolOverridePool
.size();
139 grp
->symbolOverrideTableOffset
= grp
->cachePatchOffsetsOffset
+ patchOffsetsSize
;
140 uint32_t symbolOverrideSize
= grp
->symbolOverrideTableCount
* sizeof(binary_format::DyldCacheOverride
);
142 grp
->imageOverrideTableCount
= (uint32_t)_imageOverridePool
.size();
143 grp
->imageOverrideTableOffset
= grp
->symbolOverrideTableOffset
+ symbolOverrideSize
;
144 uint32_t imageOverrideSize
= grp
->imageOverrideTableCount
* sizeof(binary_format::ImageRefOverride
);
146 grp
->dofOffsetPoolCount
= (uint32_t)_dofOffsets
.size();
147 grp
->dofOffsetPoolOffset
= grp
->imageOverrideTableOffset
+ imageOverrideSize
;
148 uint32_t dofOffsetSize
= grp
->dofOffsetPoolCount
* sizeof(uint32_t);
150 grp
->indirectGroupNumPoolCount
= (uint32_t)_indirectGroupNumPool
.size();
151 grp
->indirectGroupNumPoolOffset
= grp
->dofOffsetPoolOffset
+ dofOffsetSize
;
152 uint32_t indirectGroupNumSize
= grp
->indirectGroupNumPoolCount
* sizeof(uint32_t);
154 grp
->stringsPoolSize
= (uint32_t)_stringPool
.size();
155 grp
->stringsPoolOffset
= grp
->indirectGroupNumPoolOffset
+ indirectGroupNumSize
;
159 void ImageGroupWriter::finalizeTo(Diagnostics
& diag
, const std::vector
<const BinaryImageGroupData
*>& curGroups
, binary_format::ImageGroup
* grp
) const
162 uint8_t* buffer
= (uint8_t*)grp
;
163 if ( imageCount() > 0 ) {
164 uint32_t pad1Size
= grp
->segmentsPoolOffset
- (grp
->imageAliasOffset
+ grp
->imageAliasCount
* sizeof(binary_format::AliasEntry
));
165 uint32_t pad2Size
= grp
->targetsOffset
- (grp
->intializerListPoolOffset
+ grp
->intializerListPoolCount
* sizeof(binary_format::ImageRef
));
166 memcpy(&buffer
[grp
->imagesPoolOffset
], &imageByIndex(0), grp
->imagesEntrySize
* grp
->imagesPoolCount
);
167 memcpy(&buffer
[grp
->imageAliasOffset
], &_aliases
[0], grp
->imageAliasCount
* sizeof(binary_format::AliasEntry
));
168 bzero( &buffer
[grp
->segmentsPoolOffset
-pad1Size
], pad1Size
);
169 memcpy(&buffer
[grp
->segmentsPoolOffset
], &_segmentPool
[0], grp
->segmentsPoolCount
* sizeof(uint64_t));
170 memcpy(&buffer
[grp
->dependentsPoolOffset
], &_dependentsPool
[0], grp
->dependentsPoolCount
* sizeof(binary_format::ImageRef
));
171 memcpy(&buffer
[grp
->intializerListPoolOffset
], &_initializerBeforeLists
[0], grp
->intializerListPoolCount
* sizeof(binary_format::ImageRef
));
172 memcpy(&buffer
[grp
->intializerOffsetPoolOffset
],&_initializerOffsets
[0], grp
->intializerOffsetPoolCount
* sizeof(uint32_t));
173 bzero( &buffer
[grp
->targetsOffset
-pad2Size
], pad2Size
);
174 memcpy(&buffer
[grp
->targetsOffset
], &_targetsPool
[0], grp
->targetsPoolCount
* sizeof(TargetSymbolValue
));
175 memcpy(&buffer
[grp
->fixupsOffset
], _fixupsPool
.start(), grp
->fixupsPoolSize
);
176 memcpy(&buffer
[grp
->cachePatchTableOffset
], &_patchPool
[0], grp
->cachePatchTableCount
* sizeof(binary_format::PatchTable
));
177 memcpy(&buffer
[grp
->cachePatchOffsetsOffset
], &_patchLocationPool
[0], grp
->cachePatchOffsetsCount
* sizeof(binary_format::PatchOffset
));
178 memcpy(&buffer
[grp
->symbolOverrideTableOffset
], &_dyldCacheSymbolOverridePool
[0], grp
->symbolOverrideTableCount
* sizeof(binary_format::DyldCacheOverride
));
179 memcpy(&buffer
[grp
->imageOverrideTableOffset
], &_imageOverridePool
[0], grp
->imageOverrideTableCount
* sizeof(binary_format::ImageRefOverride
));
180 memcpy(&buffer
[grp
->dofOffsetPoolOffset
], &_dofOffsets
[0], grp
->dofOffsetPoolCount
* sizeof(uint32_t));
181 memcpy(&buffer
[grp
->indirectGroupNumPoolOffset
], &_indirectGroupNumPool
[0], grp
->indirectGroupNumPoolCount
* sizeof(uint32_t));
182 memcpy(&buffer
[grp
->stringsPoolOffset
], &_stringPool
[0], grp
->stringsPoolSize
);
185 // now that we have a real ImageGroup, we can analyze it to find max load counts for each image
186 ImageGroup
imGroup(grp
);
187 std::unordered_set
<const BinaryImageData
*> allDependents
;
188 STACK_ALLOC_DYNARRAY(const binary_format::ImageGroup
*, curGroups
.size()+1, newGroupList
);
189 for (int i
=0; i
< curGroups
.size(); ++i
)
190 newGroupList
[i
] = curGroups
[i
];
191 newGroupList
[newGroupList
.count()-1] = grp
;
192 for (uint32_t i
=0; i
< grp
->imagesPoolCount
; ++i
) {
193 Image image
= imGroup
.image(i
);
194 if ( image
.isInvalid() )
196 allDependents
.clear();
197 allDependents
.insert(image
.binaryData());
198 BinaryImageData
* imageData
= (BinaryImageData
*)(buffer
+ grp
->imagesPoolOffset
+ (i
* grp
->imagesEntrySize
));
199 if ( !image
.recurseAllDependentImages(newGroupList
, allDependents
) ) {
200 //diag.warning("%s dependents on an invalid dylib", image.path());
201 imageData
->isInvalid
= true;
203 imageData
->maxLoadCount
= (uint32_t)allDependents
.size();
207 uint32_t ImageGroupWriter::maxLoadCount(Diagnostics
& diag
, const std::vector
<const BinaryImageGroupData
*>& curGroups
, binary_format::ImageGroup
* grp
) const
209 ImageGroup
imGroup(grp
);
210 std::unordered_set
<const BinaryImageData
*> allDependents
;
211 std::vector
<const BinaryImageGroupData
*> allGroups
= curGroups
;
212 if ( grp
->groupNum
== 2 )
213 allGroups
.push_back(grp
);
214 DynArray
<const binary_format::ImageGroup
*> groupList(allGroups
);
215 for (uint32_t i
=0; i
< grp
->imagesPoolCount
; ++i
) {
216 Image image
= imGroup
.image(i
);
217 if ( image
.isInvalid() )
219 allDependents
.insert(image
.binaryData());
220 BinaryImageData
* imageData
= (BinaryImageData
*)((char*)grp
+ grp
->imagesPoolOffset
+ (i
* grp
->imagesEntrySize
));
221 if ( !image
.recurseAllDependentImages(groupList
, allDependents
) ) {
222 //diag.warning("%s dependents on an invalid dylib", image.path());
223 imageData
->isInvalid
= true;
226 return (uint32_t)allDependents
.size();
229 void ImageGroupWriter::setImageCount(uint32_t count
)
231 if ( _isDiskImage
) {
232 _diskImages
.resize(count
);
233 bzero(&_diskImages
[0], count
*sizeof(binary_format::DiskImage
));
236 _images
.resize(count
);
237 bzero(&_images
[0], count
*sizeof(binary_format::CachedImage
));
240 int32_t offset
= 0 - (int32_t)sizeof(binary_format::ImageGroup
);
241 for (uint32_t i
=0; i
< count
; ++i
) {
242 binary_format::Image
& img
= imageByIndex(i
);
243 img
.isDiskImage
= _isDiskImage
;
244 img
.has16KBpages
= (_pageSize
== 0x4000);
245 img
.groupOffset
= offset
;
247 offset
-= sizeof(binary_format::DiskImage
);
249 offset
-= sizeof(binary_format::CachedImage
);
253 uint32_t ImageGroupWriter::imageCount() const
256 return (uint32_t)_diskImages
.size();
258 return (uint32_t)_images
.size();
261 binary_format::Image
& ImageGroupWriter::imageByIndex(uint32_t imageIndex
)
263 assert(imageIndex
< imageCount());
265 return _diskImages
[imageIndex
];
267 return _images
[imageIndex
];
270 const binary_format::Image
& ImageGroupWriter::imageByIndex(uint32_t imageIndex
) const
272 assert(imageIndex
< imageCount());
274 return _diskImages
[imageIndex
];
276 return _images
[imageIndex
];
279 bool ImageGroupWriter::isInvalid(uint32_t imageIndex
) const
281 return imageByIndex(imageIndex
).isInvalid
;
284 void ImageGroupWriter::setImageInvalid(uint32_t imageIndex
)
286 imageByIndex(imageIndex
).isInvalid
= true;
289 uint32_t ImageGroupWriter::addIndirectGroupNum(uint32_t groupNum
)
291 auto pos
= _indirectGroupNumPoolExisting
.find(groupNum
);
292 if ( pos
!= _indirectGroupNumPoolExisting
.end() )
294 uint32_t startOffset
= (uint32_t)_indirectGroupNumPool
.size();
295 _indirectGroupNumPool
.push_back(groupNum
);
296 _indirectGroupNumPoolExisting
[startOffset
] = groupNum
;
300 uint32_t ImageGroupWriter::addString(const char* str
)
302 auto pos
= _stringPoolExisting
.find(str
);
303 if ( pos
!= _stringPoolExisting
.end() )
305 uint32_t startOffset
= (uint32_t)_stringPool
.size();
306 size_t size
= strlen(str
) + 1;
307 _stringPool
.insert(_stringPool
.end(), str
, &str
[size
]);
308 _stringPoolExisting
[str
] = startOffset
;
312 void ImageGroupWriter::alignStringPool()
314 while ( (_stringPool
.size() % 4) != 0 )
315 _stringPool
.push_back('\0');
318 void ImageGroupWriter::setImagePath(uint32_t imageIndex
, const char* path
)
320 binary_format::Image
& image
= imageByIndex(imageIndex
);
321 image
.pathPoolOffset
= addString(path
);
322 image
.pathHash
= ImageGroup::hashFunction(path
);
325 void ImageGroupWriter::addImageAliasPath(uint32_t imageIndex
, const char* anAlias
)
327 binary_format::AliasEntry entry
;
328 entry
.aliasHash
= ImageGroup::hashFunction(anAlias
);
329 entry
.imageIndexInGroup
= imageIndex
;
330 entry
.aliasOffsetInStringPool
= addString(anAlias
);
331 _aliases
.push_back(entry
);
334 void ImageGroupWriter::ImageGroupWriter::setImageUUID(uint32_t imageIndex
, const uuid_t uuid
)
336 memcpy(imageByIndex(imageIndex
).uuid
, uuid
, sizeof(uuid_t
));
339 void ImageGroupWriter::setImageHasObjC(uint32_t imageIndex
, bool value
)
341 imageByIndex(imageIndex
).hasObjC
= value
;
344 void ImageGroupWriter::setImageIsBundle(uint32_t imageIndex
, bool value
)
346 imageByIndex(imageIndex
).isBundle
= value
;
349 void ImageGroupWriter::setImageHasWeakDefs(uint32_t imageIndex
, bool value
)
351 imageByIndex(imageIndex
).hasWeakDefs
= value
;
354 void ImageGroupWriter::setImageMayHavePlusLoads(uint32_t imageIndex
, bool value
)
356 imageByIndex(imageIndex
).mayHavePlusLoads
= value
;
359 void ImageGroupWriter::setImageNeverUnload(uint32_t imageIndex
, bool value
)
361 imageByIndex(imageIndex
).neverUnload
= value
;
364 void ImageGroupWriter::setImageMustBeThisDir(uint32_t imageIndex
, bool value
)
366 imageByIndex(imageIndex
).cwdSameAsThis
= value
;
369 void ImageGroupWriter::setImageIsPlatformBinary(uint32_t imageIndex
, bool value
)
371 imageByIndex(imageIndex
).isPlatformBinary
= value
;
374 void ImageGroupWriter::setImageOverridableDylib(uint32_t imageIndex
, bool value
)
376 imageByIndex(imageIndex
).overridableDylib
= value
;
379 void ImageGroupWriter::setImageFileMtimeAndInode(uint32_t imageIndex
, uint64_t mTime
, uint64_t inode
)
381 imageByIndex(imageIndex
).fileInfo
.statInfo
.mtime
= mTime
;
382 imageByIndex(imageIndex
).fileInfo
.statInfo
.inode
= inode
;
383 assert(!_imageFileInfoIsCdHash
);
386 void ImageGroupWriter::setImageCdHash(uint32_t imageIndex
, uint8_t cdHash
[20])
388 memcpy(imageByIndex(imageIndex
).fileInfo
.cdHash16
.bytes
, cdHash
, 16);
389 assert(_imageFileInfoIsCdHash
);
392 void ImageGroupWriter::setImageIsEncrypted(uint32_t imageIndex
, bool value
)
394 imageByIndex(imageIndex
).isEncrypted
= value
;
397 void ImageGroupWriter::setImageMaxLoadCount(uint32_t imageIndex
, uint32_t count
)
399 imageByIndex(imageIndex
).maxLoadCount
= count
;
402 void ImageGroupWriter::setImageFairPlayRange(uint32_t imageIndex
, uint32_t offset
, uint32_t size
)
404 assert(imageIndex
< imageCount());
405 assert(_isDiskImage
);
406 binary_format::DiskImage
& image
= _diskImages
[imageIndex
];
407 if ( image
.has16KBpages
) {
408 assert((offset
& 0x3FFF) == 0);
409 assert((size
& 0x3FFF) == 0);
412 assert((offset
& 0xFFF) == 0);
413 assert((size
& 0xFFF) == 0);
415 assert(offset
< (_pageSize
*16));
416 image
.fairPlayTextStartPage
= offset
/ _pageSize
;
417 image
.fairPlayTextPageCount
= size
/ _pageSize
;
420 void ImageGroupWriter::setImageInitializerOffsets(uint32_t imageIndex
, const std::vector
<uint32_t>& offsets
)
422 binary_format::Image
& image
= imageByIndex(imageIndex
);
423 image
.initOffsetsArrayStartIndex
= _initializerOffsets
.size();
424 image
.initOffsetsArrayCount
= offsets
.size();
425 _initializerOffsets
.insert(_initializerOffsets
.end(), offsets
.begin(), offsets
.end());
428 void ImageGroupWriter::setImageDOFOffsets(uint32_t imageIndex
, const std::vector
<uint32_t>& offsets
)
430 binary_format::Image
& image
= imageByIndex(imageIndex
);
431 image
.dofOffsetsArrayStartIndex
= _dofOffsets
.size();
432 image
.dofOffsetsArrayCount
= offsets
.size();
433 _dofOffsets
.insert(_dofOffsets
.end(), offsets
.begin(), offsets
.end());
436 uint32_t ImageGroupWriter::addUniqueInitList(const std::vector
<binary_format::ImageRef
>& initBefore
)
438 // see if this initBefore list already exists in pool
439 if ( _initializerBeforeLists
.size() > initBefore
.size() ) {
440 size_t cmpLen
= initBefore
.size()*sizeof(binary_format::ImageRef
);
441 size_t end
= _initializerBeforeLists
.size() - initBefore
.size();
442 for (uint32_t i
=0; i
< end
; ++i
) {
443 if ( memcmp(&initBefore
[0], &_initializerBeforeLists
[i
], cmpLen
) == 0 ) {
448 uint32_t result
= (uint32_t)_initializerBeforeLists
.size();
449 _initializerBeforeLists
.insert(_initializerBeforeLists
.end(), initBefore
.begin(), initBefore
.end());
453 void ImageGroupWriter::setImageInitBefore(uint32_t imageIndex
, const std::vector
<binary_format::ImageRef
>& initBefore
)
455 binary_format::Image
& image
= imageByIndex(imageIndex
);
456 image
.initBeforeArrayStartIndex
= addUniqueInitList(initBefore
);
457 image
.initBeforeArrayCount
= initBefore
.size();
460 void ImageGroupWriter::setImageSliceOffset(uint32_t imageIndex
, uint64_t fileOffset
)
462 assert(imageIndex
< imageCount());
463 assert(_isDiskImage
);
464 binary_format::DiskImage
& image
= _diskImages
[imageIndex
];
465 image
.sliceOffsetIn4K
= (uint32_t)(fileOffset
/ 4096);
468 void ImageGroupWriter::setImageCodeSignatureLocation(uint32_t imageIndex
, uint32_t fileOffset
, uint32_t size
)
470 assert(imageIndex
< imageCount());
471 assert(_isDiskImage
);
472 binary_format::DiskImage
& image
= _diskImages
[imageIndex
];
473 image
.codeSignFileOffset
= fileOffset
;
474 image
.codeSignFileSize
= size
;
477 void ImageGroupWriter::setImageDependentsCount(uint32_t imageIndex
, uint32_t count
)
479 binary_format::Image
& image
= imageByIndex(imageIndex
);
480 image
.dependentsArrayStartIndex
= _dependentsPool
.size();
481 image
.dependentsArrayCount
= count
;
482 _dependentsPool
.resize(_dependentsPool
.size() + count
);
485 void ImageGroupWriter::setImageDependent(uint32_t imageIndex
, uint32_t depIndex
, binary_format::ImageRef dependent
)
487 binary_format::Image
& image
= imageByIndex(imageIndex
);
488 assert(depIndex
< image
.dependentsArrayCount
);
489 _dependentsPool
[image
.dependentsArrayStartIndex
+ depIndex
] = dependent
;
492 uint32_t ImageGroupWriter::imageDependentsCount(uint32_t imageIndex
) const
494 return imageByIndex(imageIndex
).dependentsArrayCount
;
497 binary_format::ImageRef
ImageGroupWriter::imageDependent(uint32_t imageIndex
, uint32_t depIndex
) const
499 const binary_format::Image
& image
= imageByIndex(imageIndex
);
500 assert(depIndex
< image
.dependentsArrayCount
);
501 return _dependentsPool
[image
.dependentsArrayStartIndex
+ depIndex
];
504 void ImageGroupWriter::setImageSegments(uint32_t imageIndex
, MachOParser
& imageParser
, uint64_t cacheUnslideBaseAddress
)
506 if ( _isDiskImage
) {
507 __block
uint32_t totalPageCount
= 0;
508 __block
uint32_t lastFileOffsetEnd
= 0;
509 __block
uint64_t lastVmAddrEnd
= 0;
510 __block
std::vector
<binary_format::DiskSegment
> diskSegments
;
511 diskSegments
.reserve(8);
512 imageParser
.forEachSegment(^(const char* segName
, uint32_t fileOffset
, uint32_t fileSize
, uint64_t vmAddr
, uint64_t vmSize
, uint8_t protections
, bool& stop
) {
513 if ( (fileOffset
!= 0) && (fileOffset
!= lastFileOffsetEnd
) ) {
514 binary_format::DiskSegment filePadding
;
515 filePadding
.filePageCount
= (fileOffset
- lastFileOffsetEnd
)/_pageSize
;
516 filePadding
.vmPageCount
= 0;
517 filePadding
.permissions
= 0;
518 filePadding
.paddingNotSeg
= 1;
519 diskSegments
.push_back(filePadding
);
521 if ( (lastVmAddrEnd
!= 0) && (vmAddr
!= lastVmAddrEnd
) ) {
522 binary_format::DiskSegment vmPadding
;
523 vmPadding
.filePageCount
= 0;
524 vmPadding
.vmPageCount
= (vmAddr
- lastVmAddrEnd
)/_pageSize
;
525 vmPadding
.permissions
= 0;
526 vmPadding
.paddingNotSeg
= 1;
527 diskSegments
.push_back(vmPadding
);
528 totalPageCount
+= vmPadding
.vmPageCount
;
531 binary_format::DiskSegment segInfo
;
532 segInfo
.filePageCount
= (fileSize
+_pageSize
-1)/_pageSize
;
533 segInfo
.vmPageCount
= (vmSize
+_pageSize
-1)/_pageSize
;
534 segInfo
.permissions
= protections
& 7;
535 segInfo
.paddingNotSeg
= 0;
536 diskSegments
.push_back(segInfo
);
537 totalPageCount
+= segInfo
.vmPageCount
;
539 lastFileOffsetEnd
= fileOffset
+ fileSize
;
541 lastVmAddrEnd
= vmAddr
+ vmSize
;
544 binary_format::Image
& image
= imageByIndex(imageIndex
);
545 image
.segmentsArrayStartIndex
= _segmentPool
.size();
546 image
.segmentsArrayCount
= diskSegments
.size();
547 _segmentPool
.insert(_segmentPool
.end(), (uint64_t*)&diskSegments
[0], (uint64_t*)&diskSegments
[image
.segmentsArrayCount
]);
548 _diskImages
[imageIndex
].totalVmPages
= totalPageCount
;
551 binary_format::Image
& image
= imageByIndex(imageIndex
);
552 image
.segmentsArrayStartIndex
= _segmentPool
.size();
553 image
.segmentsArrayCount
= imageParser
.segmentCount();
554 _segmentPool
.resize(_segmentPool
.size() + image
.segmentsArrayCount
);
555 __block
uint32_t segIndex
= 0;
556 imageParser
.forEachSegment(^(const char* segName
, uint32_t fileOffset
, uint32_t fileSize
, uint64_t vmAddr
, uint64_t vmSize
, uint8_t protections
, bool& stop
) {
557 binary_format::DyldCacheSegment seg
= { (uint32_t)(vmAddr
-cacheUnslideBaseAddress
), (uint32_t)vmSize
, protections
};
558 _segmentPool
[image
.segmentsArrayStartIndex
+ segIndex
] = *((uint64_t*)&seg
);
564 void ImageGroupWriter::setImagePatchLocations(uint32_t imageIndex
, uint32_t funcVmOffset
, const std::unordered_set
<uint32_t>& patchLocations
)
566 assert(imageIndex
< imageCount());
567 binary_format::CachedImage
& image
= _images
[imageIndex
];
568 if ( image
.patchStartIndex
== 0 ) {
569 image
.patchStartIndex
= (uint32_t)_patchPool
.size();
570 image
.patchCount
= 0;
573 assert(image
.patchStartIndex
+ image
.patchCount
== _patchPool
.size());
576 binary_format::PatchTable entry
= { funcVmOffset
, (uint32_t)_patchLocationPool
.size() };
577 for (uint32_t loc
: patchLocations
) {
578 _patchLocationPool
.push_back(*((binary_format::PatchOffset
*)&loc
));
580 _patchLocationPool
.back().last
= true;
581 _patchPool
.push_back(entry
);
582 _images
[imageIndex
].patchCount
++;
585 void ImageGroupWriter::setGroupCacheOverrides(const std::vector
<binary_format::DyldCacheOverride
>& cacheOverrides
)
587 _dyldCacheSymbolOverridePool
= cacheOverrides
;
590 void ImageGroupWriter::addImageIsOverride(binary_format::ImageRef standardDylibRef
, binary_format::ImageRef overrideDylibRef
)
592 _imageOverridePool
.push_back({standardDylibRef
, overrideDylibRef
});
596 class SegmentFixUpBuilder
599 SegmentFixUpBuilder(uint32_t segIndex
, uint32_t dataSegPageCount
, uint32_t pageSize
, bool is64
,
600 const std::vector
<ImageGroupWriter::FixUp
>& fixups
,
601 std::vector
<TargetSymbolValue
>& targetsForImage
, bool log
);
603 bool hasFixups() { return _hasFixups
; }
604 uint32_t segIndex() { return _segIndex
; }
605 void appendSegmentFixUpMap(ContentBuffer
&);
609 binary_format::FixUpOpcode op
;
610 uint8_t repeatOpcodeCount
;
613 bool operator!=(const TmpOpcode
& rhs
) const {
614 return ((op
!= rhs
.op
) || (count
!= rhs
.count
) || (repeatOpcodeCount
!= rhs
.repeatOpcodeCount
));
619 ContentBuffer
makeFixupOpcodesForPage(uint32_t pageStartSegmentOffset
, const ImageGroupWriter::FixUp
* start
,
620 const ImageGroupWriter::FixUp
* end
);
621 uint32_t getOrdinalForTarget(TargetSymbolValue
);
622 void expandOpcodes(const std::vector
<TmpOpcode
>& opcodes
, uint8_t page
[0x4000], uint32_t& offset
, uint32_t& ordinal
);
623 void expandOpcodes(const std::vector
<TmpOpcode
>& opcodes
, uint8_t page
[0x4000]);
624 bool samePageContent(const uint8_t page1
[], const uint8_t page2
[]);
625 void printOpcodes(const char* prefix
, const std::vector
<TmpOpcode
> opcodes
);
626 void printOpcodes(const char* prefix
, bool printOffset
, const TmpOpcode opcodes
[], size_t opcodesLen
, uint32_t& offset
);
627 uint32_t opcodeEncodingSize(const std::vector
<TmpOpcode
>& opcodes
);
632 const uint32_t _segIndex
;
633 const uint32_t _dataSegPageCount
;
634 const uint32_t _pageSize
;
635 std::vector
<TargetSymbolValue
>& _targets
;
636 std::vector
<ContentBuffer
> _opcodesByPage
;
642 SegmentFixUpBuilder::SegmentFixUpBuilder(uint32_t segIndex
, uint32_t segPageCount
, uint32_t pageSize
, bool is64
,
643 const std::vector
<ImageGroupWriter::FixUp
>& fixups
,
644 std::vector
<TargetSymbolValue
>& targetsForImage
, bool log
)
645 : _is64(is64
), _log(log
), _hasFixups(false), _segIndex(segIndex
), _dataSegPageCount(segPageCount
), _pageSize(pageSize
), _targets(targetsForImage
)
647 //fprintf(stderr, "SegmentFixUpBuilder(segIndex=%d, segPageCount=%d)\n", segIndex, segPageCount);
648 _targets
.push_back(TargetSymbolValue::makeInvalid()); // ordinal zero reserved to mean "add slide"
649 _opcodesByPage
.resize(segPageCount
);
650 size_t startFixupIndex
= 0;
651 for (uint32_t pageIndex
=0; pageIndex
< segPageCount
; ++pageIndex
) {
652 uint32_t pageStartOffset
= pageIndex
*_pageSize
;
653 uint32_t pageEndOffset
= pageStartOffset
+_pageSize
;
654 // find first index in this page
655 while ( (startFixupIndex
< fixups
.size()) && ((fixups
[startFixupIndex
].segIndex
!= segIndex
) || (fixups
[startFixupIndex
].segOffset
< pageStartOffset
)) )
657 // find first index beyond this page
658 size_t endFixupIndex
= startFixupIndex
;
659 while ( (endFixupIndex
< fixups
.size()) && (fixups
[endFixupIndex
].segIndex
== segIndex
) && (fixups
[endFixupIndex
].segOffset
< pageEndOffset
) )
661 // create opcodes for fixups on this pageb
662 _opcodesByPage
[pageIndex
] = makeFixupOpcodesForPage(pageStartOffset
, &fixups
[startFixupIndex
], &fixups
[endFixupIndex
]);
663 startFixupIndex
= endFixupIndex
;
668 uint32_t SegmentFixUpBuilder::getOrdinalForTarget(TargetSymbolValue target
)
670 uint32_t ordinal
= 0;
671 for (const TargetSymbolValue
& entry
: _targets
) {
672 if ( entry
== target
)
676 _targets
.push_back(target
);
680 void SegmentFixUpBuilder::appendSegmentFixUpMap(ContentBuffer
& buffer
)
682 std::vector
<uint32_t> offsets
;
683 uint32_t curOffset
= sizeof(binary_format::SegmentFixupsByPage
)-4 + _dataSegPageCount
*4;
684 for (auto& opcodes
: _opcodesByPage
) {
685 if ( opcodes
.size() == 0 )
686 offsets
.push_back(0);
688 offsets
.push_back(curOffset
);
689 curOffset
+= opcodes
.size();
691 uint32_t totalSize
= curOffset
;
694 buffer
.append_uint32(totalSize
); // SegmentFixupsByPage.size
695 buffer
.append_uint32(_pageSize
); // SegmentFixupsByPage.pageSize
696 buffer
.append_uint32(_dataSegPageCount
); // SegmentFixupsByPage.pageCount
697 for (uint32_t i
=0; i
< _dataSegPageCount
; ++i
) {
698 buffer
.append_uint32(offsets
[i
]); // SegmentFixupsByPage.pageInfoOffsets[i]
700 // write each page's opcode stream
701 for (uint32_t i
=0; i
< offsets
.size(); ++i
) {
702 buffer
.append_buffer(_opcodesByPage
[i
]);
706 void SegmentFixUpBuilder::expandOpcodes(const std::vector
<TmpOpcode
>& opcodes
, uint8_t page
[])
709 uint32_t ordinal
= 0;
710 bzero(page
, _pageSize
);
711 expandOpcodes(opcodes
, page
, offset
, ordinal
);
714 void SegmentFixUpBuilder::expandOpcodes(const std::vector
<TmpOpcode
>& opcodes
, uint8_t page
[], uint32_t& offset
, uint32_t& ordinal
)
716 for (int i
=0; i
< opcodes
.size(); ++i
) {
717 assert(offset
< _pageSize
);
718 TmpOpcode tmp
= opcodes
[i
];
720 case binary_format::FixUpOpcode::bind64
:
721 *(uint64_t*)(&page
[offset
]) = ordinal
;
724 case binary_format::FixUpOpcode::bind32
:
725 *(uint32_t*)(&page
[offset
]) = ordinal
;
728 case binary_format::FixUpOpcode::rebase64
:
729 *(uint64_t*)(&page
[offset
]) = 0x1122334455667788;
732 case binary_format::FixUpOpcode::rebase32
:
733 *(uint32_t*)(&page
[offset
]) = 0x23452345;
736 case binary_format::FixUpOpcode::rebaseText32
:
737 *(uint32_t*)(&page
[offset
]) = 0x56785678;
740 case binary_format::FixUpOpcode::bindText32
:
741 *(uint32_t*)(&page
[offset
]) = 0x98769876;
744 case binary_format::FixUpOpcode::bindTextRel32
:
745 *(uint32_t*)(&page
[offset
]) = 0x34563456;
748 case binary_format::FixUpOpcode::bindImportJmp32
:
749 *(uint32_t*)(&page
[offset
]) = 0x44556677;
752 case binary_format::FixUpOpcode::done
:
754 case binary_format::FixUpOpcode::setPageOffset
:
757 case binary_format::FixUpOpcode::incPageOffset
:
758 offset
+= (tmp
.count
*4);
760 case binary_format::FixUpOpcode::setOrdinal
:
763 case binary_format::FixUpOpcode::incOrdinal
:
766 case binary_format::FixUpOpcode::repeat
: {
767 std::vector
<TmpOpcode
> pattern
;
768 for (int j
=0; j
< tmp
.repeatOpcodeCount
; ++j
) {
769 pattern
.push_back(opcodes
[i
+j
+1]);
771 for (int j
=0; j
< tmp
.count
; ++j
) {
772 expandOpcodes(pattern
, page
, offset
, ordinal
);
774 i
+= tmp
.repeatOpcodeCount
;
783 uint32_t SegmentFixUpBuilder::opcodeEncodingSize(const std::vector
<TmpOpcode
>& opcodes
)
786 for (int i
=0; i
< opcodes
.size(); ++i
) {
787 switch ( opcodes
[i
].op
) {
788 case binary_format::FixUpOpcode::bind64
:
789 case binary_format::FixUpOpcode::bind32
:
790 case binary_format::FixUpOpcode::rebase64
:
791 case binary_format::FixUpOpcode::rebase32
:
792 case binary_format::FixUpOpcode::rebaseText32
:
793 case binary_format::FixUpOpcode::bindText32
:
794 case binary_format::FixUpOpcode::bindTextRel32
:
795 case binary_format::FixUpOpcode::bindImportJmp32
:
796 case binary_format::FixUpOpcode::done
:
799 case binary_format::FixUpOpcode::setPageOffset
:
800 case binary_format::FixUpOpcode::incPageOffset
:
801 case binary_format::FixUpOpcode::setOrdinal
:
802 case binary_format::FixUpOpcode::incOrdinal
:
804 if ( opcodes
[i
].count
>= 16 )
805 size
+= ContentBuffer::uleb128_size(opcodes
[i
].count
);
807 case binary_format::FixUpOpcode::repeat
: {
809 size
+= ContentBuffer::uleb128_size(opcodes
[i
].count
);
810 std::vector
<TmpOpcode
> pattern
;
811 for (int j
=0; j
< opcodes
[i
].repeatOpcodeCount
; ++j
) {
812 pattern
.push_back(opcodes
[++i
]);
814 size
+= opcodeEncodingSize(pattern
);
823 bool SegmentFixUpBuilder::samePageContent(const uint8_t page1
[], const uint8_t page2
[])
826 if (memcmp(page1
, page2
, _pageSize
) != 0) {
828 const uint64_t* p1
= (uint64_t* )page1
;
829 const uint64_t* p2
= (uint64_t* )page2
;
830 for (int i
=0; i
< _pageSize
/8; ++i
) {
831 if ( p1
[i
] != p2
[i
] ) {
832 fprintf(stderr
, "page1[0x%03X] = 0x%016llX, page2[0x%03X] = 0x%016llX\n", i
*8, p1
[i
], i
*8, p2
[i
]);
838 const uint32_t* p1
= (uint32_t* )page1
;
839 const uint32_t* p2
= (uint32_t* )page2
;
840 for (int i
=0; i
< _pageSize
/4; ++i
) {
841 if ( p1
[i
] != p2
[i
] ) {
842 fprintf(stderr
, "page1[0x%03X] = 0x%016X, page2[0x%03X] = 0x%016X\n", i
*4, p1
[i
], i
*4, p2
[i
]);
851 void SegmentFixUpBuilder::printOpcodes(const char* prefix
, const std::vector
<TmpOpcode
> opcodes
)
854 printOpcodes(prefix
, true, &opcodes
[0], opcodes
.size(), offset
);
857 void SegmentFixUpBuilder::printOpcodes(const char* prefix
, bool printOffset
, const TmpOpcode opcodes
[], size_t opcodesLen
, uint32_t& offset
)
859 for (int i
=0; i
< opcodesLen
; ++i
) {
860 TmpOpcode tmp
= opcodes
[i
];
862 fprintf(stderr
, "%s offset=0x%04X: ", prefix
, offset
);
864 fprintf(stderr
, "%s ", prefix
);
866 case binary_format::FixUpOpcode::bind64
:
867 fprintf(stderr
, "bind64\n");
870 case binary_format::FixUpOpcode::bind32
:
871 fprintf(stderr
, "bind32\n");
874 case binary_format::FixUpOpcode::rebase64
:
875 fprintf(stderr
, "rebase64\n");
878 case binary_format::FixUpOpcode::rebase32
:
879 fprintf(stderr
, "rebase32\n");
882 case binary_format::FixUpOpcode::rebaseText32
:
883 fprintf(stderr
, "rebaseText32\n");
886 case binary_format::FixUpOpcode::bindText32
:
887 fprintf(stderr
, "bindText32\n");
890 case binary_format::FixUpOpcode::bindTextRel32
:
891 fprintf(stderr
, "bindTextRel32\n");
894 case binary_format::FixUpOpcode::bindImportJmp32
:
895 fprintf(stderr
, "bindJmpRel32\n");
898 case binary_format::FixUpOpcode::done
:
899 fprintf(stderr
, "done\n");
901 case binary_format::FixUpOpcode::setPageOffset
:
902 fprintf(stderr
, "setPageOffset(%d)\n", tmp
.count
);
905 case binary_format::FixUpOpcode::incPageOffset
:
906 fprintf(stderr
, "incPageOffset(%d)\n", tmp
.count
);
907 offset
+= (tmp
.count
*4);
909 case binary_format::FixUpOpcode::setOrdinal
:
910 fprintf(stderr
, "setOrdinal(%d)\n", tmp
.count
);
912 case binary_format::FixUpOpcode::incOrdinal
:
913 fprintf(stderr
, "incOrdinal(%d)\n", tmp
.count
);
915 case binary_format::FixUpOpcode::repeat
: {
916 char morePrefix
[128];
917 strcpy(morePrefix
, prefix
);
918 strcat(morePrefix
, " ");
919 uint32_t prevOffset
= offset
;
920 fprintf(stderr
, "repeat(%d times, next %d opcodes)\n", tmp
.count
, tmp
.repeatOpcodeCount
);
921 printOpcodes(morePrefix
, false, &opcodes
[i
+1], tmp
.repeatOpcodeCount
, offset
);
922 i
+= tmp
.repeatOpcodeCount
;
923 uint32_t repeatDelta
= (offset
-prevOffset
)*(tmp
.count
-1);
924 offset
+= repeatDelta
;
931 ContentBuffer
SegmentFixUpBuilder::makeFixupOpcodesForPage(uint32_t pageStartSegmentOffset
, const ImageGroupWriter::FixUp
* start
, const ImageGroupWriter::FixUp
* end
)
933 //fprintf(stderr, " makeFixupOpcodesForPage(segOffset=0x%06X, startFixup=%p, endFixup=%p)\n", pageStartSegmentOffset, start, end);
934 std::vector
<TmpOpcode
> tmpOpcodes
;
935 const uint32_t pointerSize
= (_is64
? 8 : 4);
936 uint32_t offset
= pageStartSegmentOffset
;
937 uint32_t ordinal
= 0;
938 const ImageGroupWriter::FixUp
* lastFixup
= nullptr;
939 for (const ImageGroupWriter::FixUp
* f
=start
; f
< end
; ++f
) {
940 // ignore double bind at same address (ld64 bug)
941 if ( lastFixup
&& (lastFixup
->segOffset
== f
->segOffset
) )
943 // add opcode to adjust current offset if needed
944 if ( f
->segOffset
!= offset
) {
945 if ( ((f
->segOffset
% 4) != 0) || ((offset
% 4) != 0) ) {
946 // mis aligned pointers use bigger set opcode
947 tmpOpcodes
.push_back({binary_format::FixUpOpcode::setPageOffset
, 0, (uint16_t)(f
->segOffset
-pageStartSegmentOffset
)});
950 uint32_t delta4
= (uint32_t)(f
->segOffset
- offset
)/4;
951 assert(delta4
*4 < _pageSize
);
952 tmpOpcodes
.push_back({binary_format::FixUpOpcode::incPageOffset
, 0, (uint16_t)delta4
});
954 offset
= (uint32_t)f
->segOffset
;
956 uint32_t nextOrd
= 0;
958 case ImageGroupWriter::FixupType::rebase
:
959 tmpOpcodes
.push_back({_is64
? binary_format::FixUpOpcode::rebase64
: binary_format::FixUpOpcode::rebase32
, 0, 0});
960 offset
+= pointerSize
;
963 case ImageGroupWriter::FixupType::pointerLazyBind
:
964 case ImageGroupWriter::FixupType::pointerBind
:
965 //assert(f->target.imageIndex == binary_format::OrdinalEntry::kImageIndexDyldSharedCache);
966 nextOrd
= getOrdinalForTarget(f
->target
);
967 if ( nextOrd
!= ordinal
) {
968 if ( (nextOrd
> ordinal
) && (nextOrd
< (ordinal
+31)) ) {
969 tmpOpcodes
.push_back({binary_format::FixUpOpcode::incOrdinal
, 0, (uint16_t)(nextOrd
-ordinal
)});
972 tmpOpcodes
.push_back({binary_format::FixUpOpcode::setOrdinal
, 0, (uint16_t)nextOrd
});
976 tmpOpcodes
.push_back({_is64
? binary_format::FixUpOpcode::bind64
: binary_format::FixUpOpcode::bind32
, 0, 0});
977 offset
+= pointerSize
;
980 case ImageGroupWriter::FixupType::rebaseText
:
982 tmpOpcodes
.push_back({binary_format::FixUpOpcode::rebaseText32
, 0, 0});
983 offset
+= pointerSize
;
986 case ImageGroupWriter::FixupType::bindText
:
988 nextOrd
= getOrdinalForTarget(f
->target
);
989 if ( nextOrd
!= ordinal
) {
990 if ( (nextOrd
> ordinal
) && (nextOrd
< (ordinal
+31)) ) {
991 tmpOpcodes
.push_back({binary_format::FixUpOpcode::incOrdinal
, 0, (uint16_t)(nextOrd
-ordinal
)});
994 tmpOpcodes
.push_back({binary_format::FixUpOpcode::setOrdinal
, 0, (uint16_t)nextOrd
});
998 tmpOpcodes
.push_back({binary_format::FixUpOpcode::bindText32
, 0, 0});
999 offset
+= pointerSize
;
1002 case ImageGroupWriter::FixupType::bindTextRel
:
1004 nextOrd
= getOrdinalForTarget(f
->target
);
1005 if ( nextOrd
!= ordinal
) {
1006 if ( (nextOrd
> ordinal
) && (nextOrd
< (ordinal
+31)) ) {
1007 tmpOpcodes
.push_back({binary_format::FixUpOpcode::incOrdinal
, 0, (uint16_t)(nextOrd
-ordinal
)});
1010 tmpOpcodes
.push_back({binary_format::FixUpOpcode::setOrdinal
, 0, (uint16_t)nextOrd
});
1014 tmpOpcodes
.push_back({binary_format::FixUpOpcode::bindTextRel32
, 0, 0});
1015 offset
+= pointerSize
;
1018 case ImageGroupWriter::FixupType::bindImportJmpRel
:
1020 nextOrd
= getOrdinalForTarget(f
->target
);
1021 if ( nextOrd
!= ordinal
) {
1022 if ( (nextOrd
> ordinal
) && (nextOrd
< (ordinal
+31)) ) {
1023 tmpOpcodes
.push_back({binary_format::FixUpOpcode::incOrdinal
, 0, (uint16_t)(nextOrd
-ordinal
)});
1026 tmpOpcodes
.push_back({binary_format::FixUpOpcode::setOrdinal
, 0, (uint16_t)nextOrd
});
1030 tmpOpcodes
.push_back({binary_format::FixUpOpcode::bindImportJmp32
, 0, 0});
1031 offset
+= pointerSize
;
1034 case ImageGroupWriter::FixupType::ignore
:
1035 assert(0 && "ignore fixup types should have been removed");
1041 uint8_t firstExpansion
[0x4010]; // larger than 16KB to handle unaligned pointers
1042 expandOpcodes(tmpOpcodes
, firstExpansion
);
1044 if (_log
) printOpcodes("start", tmpOpcodes
);
1047 for (int stride
=1; stride
< 6; ++stride
) {
1048 for (int i
=0; i
< tmpOpcodes
.size(); ++i
) {
1050 for (j
=i
+stride
; j
< tmpOpcodes
.size(); j
+= stride
) {
1051 bool strideMatch
= true;
1052 for (int k
=0; k
< stride
; ++k
) {
1053 if ( (j
+k
>= tmpOpcodes
.size()) || (tmpOpcodes
[j
+k
] != tmpOpcodes
[i
+k
]) ) {
1054 strideMatch
= false;
1057 if ( (tmpOpcodes
[j
+k
].op
== binary_format::FixUpOpcode::repeat
) && (tmpOpcodes
[j
+k
].repeatOpcodeCount
+k
>= stride
) ) {
1058 strideMatch
= false;
1065 // see if same opcode repeated three or more times
1066 int repeats
= (j
-i
)/stride
;
1067 if ( repeats
> 3 ) {
1068 // replace run with repeat opcode
1069 tmpOpcodes
[i
].op
= binary_format::FixUpOpcode::repeat
;
1070 tmpOpcodes
[i
].repeatOpcodeCount
= stride
;
1071 tmpOpcodes
[i
].count
= repeats
;
1072 tmpOpcodes
.erase(tmpOpcodes
.begin()+i
+1, tmpOpcodes
.begin()+j
-stride
);
1076 // don't look for matches inside a repeat loop
1077 if ( tmpOpcodes
[i
].op
== binary_format::FixUpOpcode::repeat
)
1078 i
+= tmpOpcodes
[i
].repeatOpcodeCount
;
1083 sprintf(tmp
, "stride %d", stride
);
1084 printOpcodes(tmp
, tmpOpcodes
);
1086 uint8_t secondExpansion
[0x4010];
1087 expandOpcodes(tmpOpcodes
, secondExpansion
);
1088 if ( !samePageContent(firstExpansion
, secondExpansion
) )
1089 printOpcodes("opt", tmpOpcodes
);
1092 // convert temp opcodes to real opcodes
1093 bool wroteDone
= false;
1094 ContentBuffer opcodes
;
1095 for (const TmpOpcode
& tmp
: tmpOpcodes
) {
1097 case binary_format::FixUpOpcode::bind64
:
1098 case binary_format::FixUpOpcode::bind32
:
1099 case binary_format::FixUpOpcode::rebase64
:
1100 case binary_format::FixUpOpcode::rebase32
:
1101 case binary_format::FixUpOpcode::rebaseText32
:
1102 case binary_format::FixUpOpcode::bindText32
:
1103 case binary_format::FixUpOpcode::bindTextRel32
:
1104 case binary_format::FixUpOpcode::bindImportJmp32
:
1105 opcodes
.append_byte((uint8_t)tmp
.op
);
1107 case binary_format::FixUpOpcode::done
:
1108 opcodes
.append_byte((uint8_t)tmp
.op
);
1111 case binary_format::FixUpOpcode::setPageOffset
:
1112 case binary_format::FixUpOpcode::incPageOffset
:
1113 case binary_format::FixUpOpcode::setOrdinal
:
1114 case binary_format::FixUpOpcode::incOrdinal
:
1115 if ( (tmp
.count
> 0) && (tmp
.count
< 16) ) {
1116 opcodes
.append_byte((uint8_t)tmp
.op
| tmp
.count
);
1119 opcodes
.append_byte((uint8_t)tmp
.op
);
1120 opcodes
.append_uleb128(tmp
.count
);
1123 case binary_format::FixUpOpcode::repeat
: {
1124 const TmpOpcode
* nextOpcodes
= &tmp
;
1126 std::vector
<TmpOpcode
> pattern
;
1127 for (int i
=0; i
< tmp
.repeatOpcodeCount
; ++i
) {
1128 pattern
.push_back(nextOpcodes
[i
]);
1130 uint32_t repeatBytes
= opcodeEncodingSize(pattern
);
1131 assert(repeatBytes
< 15);
1132 opcodes
.append_byte((uint8_t)tmp
.op
| repeatBytes
);
1133 opcodes
.append_uleb128(tmp
.count
);
1139 if ( (opcodes
.size() == 0) || !wroteDone
)
1140 opcodes
.append_byte((uint8_t)binary_format::FixUpOpcode::done
);
1142 // make opcodes streams 4-byte aligned
1143 opcodes
.pad_to_size(4);
1145 //fprintf(stderr, " makeFixupOpcodesForPage(pageStartSegmentOffset=0x%0X) result=%lu bytes\n", pageStartSegmentOffset, opcodes.size());
1153 void ImageGroupWriter::setImageFixups(Diagnostics
& diag
, uint32_t imageIndex
, std::vector
<FixUp
>& fixups
, bool hasTextRelocs
)
1155 // only applicable for ImageGroup in a closure (not group of images in dyld cache)
1156 assert(_isDiskImage
);
1158 // sort all rebases and binds by address
1159 std::sort(fixups
.begin(), fixups
.end(), [](FixUp
& lhs
, FixUp
& rhs
) -> bool {
1163 if ( lhs
.segIndex
< rhs
.segIndex
)
1165 if ( lhs
.segIndex
> rhs
.segIndex
)
1167 // then sort by segOffset
1168 if ( lhs
.segOffset
< rhs
.segOffset
)
1170 if ( lhs
.segOffset
> rhs
.segOffset
)
1172 // two fixups at same location
1174 // if the same (linker bug), ignore one
1175 if ( lhs
.type
== rhs
.type
) {
1176 rhs
.type
= FixupType::ignore
;
1178 // if one is rebase for lazy pointer, ignore rebase because dyld3 does not lazy bind
1179 else if ( (lhs
.type
== FixupType::pointerLazyBind
) && (rhs
.type
== FixupType::rebase
) ) {
1180 // lazy pointers have rebase and (lazy) bind at same location. since dyld3 does not do lazy binding, we mark the rebase to be ignored later
1181 rhs
.type
= FixupType::ignore
;
1183 else if ( (rhs
.type
== FixupType::pointerLazyBind
) && (lhs
.type
== FixupType::rebase
) ) {
1184 // lazy pointers have rebase and (lazy) bind at same location. since dyld3 does not do lazy binding, we mark the rebase to be ignored later
1185 lhs
.type
= FixupType::ignore
;
1187 return (lhs
.type
< rhs
.type
);
1190 // remove ignoreable fixups
1191 fixups
.erase(std::remove_if(fixups
.begin(), fixups
.end(),
1192 [&](const FixUp
& a
) {
1193 return (a
.type
== FixupType::ignore
);
1196 // look for overlapping fixups
1197 const uint32_t pointerSize
= (_is64
? 8 : 4);
1198 const FixUp
* lastFixup
= nullptr;
1199 for (const FixUp
& fixup
: fixups
) {
1200 if ( lastFixup
!= nullptr ) {
1201 if ( lastFixup
->segIndex
== fixup
.segIndex
) {
1202 uint64_t increment
= fixup
.segOffset
- lastFixup
->segOffset
;
1203 if ( increment
< pointerSize
) {
1204 if ( (increment
== 0) && ((lastFixup
->type
== FixupType::ignore
) || (fixup
.type
== FixupType::ignore
)) ) {
1205 // allow rebase to local lazy helper and lazy bind to same location
1208 diag
.error("segment %d has overlapping fixups at offset 0x%0llX and 0x%0llX", fixup
.segIndex
, lastFixup
->segOffset
, fixup
.segOffset
);
1209 setImageInvalid(imageIndex
);
1218 if ( hasTextRelocs
)
1219 _diskImages
[imageIndex
].hasTextRelocs
= true;
1221 // there is one ordinal table per image, shared by all segments with fixups in that image
1222 std::vector
<TargetSymbolValue
> targetsForImage
;
1224 const bool opcodeLogging
= false;
1225 // calculate SegmentFixupsByPage for each segment
1226 std::vector
<SegmentFixUpBuilder
*> builders
;
1227 for (uint32_t segIndex
=0, onDiskSegIndex
=0; segIndex
< _diskImages
[imageIndex
].segmentsArrayCount
; ++segIndex
) {
1228 const binary_format::DiskSegment
* diskSeg
= (const binary_format::DiskSegment
*)&(_segmentPool
[_diskImages
[imageIndex
].segmentsArrayStartIndex
+segIndex
]);
1229 SegmentFixUpBuilder
* builder
= nullptr;
1230 if ( diskSeg
->paddingNotSeg
)
1232 if ( diskSeg
->filePageCount
== 0 ) {
1236 if ( diskSeg
->permissions
& VM_PROT_WRITE
) {
1237 builder
= new SegmentFixUpBuilder(onDiskSegIndex
, diskSeg
->filePageCount
, _pageSize
, _is64
, fixups
, targetsForImage
, opcodeLogging
);
1239 else if ( hasTextRelocs
&& (diskSeg
->permissions
== (VM_PROT_READ
|VM_PROT_EXECUTE
)) ) {
1240 builder
= new SegmentFixUpBuilder(onDiskSegIndex
, diskSeg
->filePageCount
, _pageSize
, _is64
, fixups
, targetsForImage
, opcodeLogging
);
1242 if ( builder
!= nullptr ) {
1243 if ( builder
->hasFixups() )
1244 builders
.push_back(builder
);
1251 // build AllFixupsBySegment for image
1252 _fixupsPool
.pad_to_size(4);
1253 uint32_t startOfFixupsOffset
= (uint32_t)_fixupsPool
.size();
1254 size_t headerSize
= builders
.size() * sizeof(binary_format::AllFixupsBySegment
);
1255 size_t offsetOfSegmentHeaderInBuffer
= _fixupsPool
.size();
1256 for (int i
=0; i
< headerSize
; ++i
) {
1257 _fixupsPool
.append_byte(0);
1259 uint32_t entryIndex
= 0;
1260 for (SegmentFixUpBuilder
* builder
: builders
) {
1261 binary_format::AllFixupsBySegment
* entries
= (binary_format::AllFixupsBySegment
*)(_fixupsPool
.start()+offsetOfSegmentHeaderInBuffer
);
1262 entries
[entryIndex
].segIndex
= builder
->segIndex();
1263 entries
[entryIndex
].offset
= (uint32_t)_fixupsPool
.size() - startOfFixupsOffset
;
1264 builder
->appendSegmentFixUpMap(_fixupsPool
);
1268 _diskImages
[imageIndex
].fixupsPoolOffset
= (uint32_t)offsetOfSegmentHeaderInBuffer
;
1269 _diskImages
[imageIndex
].fixupsPoolSegCount
= entryIndex
;
1271 // append targetsForImage into group
1272 size_t start
= _targetsPool
.size();
1273 size_t count
= targetsForImage
.size();
1274 _diskImages
[imageIndex
].targetsArrayStartIndex
= (uint32_t)start
;
1275 _diskImages
[imageIndex
].targetsArrayCount
= (uint32_t)count
;
1276 assert(_diskImages
[imageIndex
].targetsArrayStartIndex
== start
);
1277 assert(_diskImages
[imageIndex
].targetsArrayCount
== count
);
1278 _targetsPool
.insert(_targetsPool
.end(), targetsForImage
.begin(), targetsForImage
.end());