2 * Copyright (c) 2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * This file contains static dyld helper functions for
31 * exclusive use in platform startup code.
34 #include <mach-o/fixup-chains.h>
35 #include <mach-o/loader.h>
37 #if defined(HAS_APPLE_PAC)
39 #endif /* defined(HAS_APPLE_PAC) */
42 #define dyldLogFunc(msg, ...) kprintf(msg, ## __VA_ARGS__)
46 #define dyldLogFunc(msg, ...) ({int _wait = 0; do { asm volatile ("yield" : "+r"(_wait) : ); } while(!_wait); })
50 // cannot safely callout out to functions like strcmp before initial fixup
52 strings_are_equal(const char* a
, const char* b
)
65 * Functions from dyld to rebase, fixup and sign the contents of MH_FILESET
69 union ChainedFixupPointerOnDisk
{
71 struct dyld_chained_ptr_64_kernel_cache_rebase fixup64
;
74 static uint64_t __unused
75 sign_pointer(struct dyld_chained_ptr_64_kernel_cache_rebase pointer __unused
,
77 uint64_t target __unused
)
80 uint64_t discriminator
= pointer
.diversity
;
81 if (pointer
.addrDiv
) {
83 discriminator
= __builtin_ptrauth_blend_discriminator(loc
, discriminator
);
85 discriminator
= (uint64_t)(uintptr_t)loc
;
88 switch (pointer
.key
) {
90 return (uint64_t)__builtin_ptrauth_sign_unauthenticated((void*)target
, 0, discriminator
);
92 return (uint64_t)__builtin_ptrauth_sign_unauthenticated((void*)target
, 1, discriminator
);
94 return (uint64_t)__builtin_ptrauth_sign_unauthenticated((void*)target
, 2, discriminator
);
96 return (uint64_t)__builtin_ptrauth_sign_unauthenticated((void*)target
, 3, discriminator
);
102 static inline __attribute__((__always_inline__
)) void
103 fixup_value(union ChainedFixupPointerOnDisk
* fixupLoc __unused
,
104 const struct dyld_chained_starts_in_segment
* segInfo
,
105 uintptr_t slide __unused
,
106 const void* basePointers
[KCNumKinds
] __unused
,
110 dyldLogFunc("[LOG] kernel-fixups: fixup_value %p\n", fixupLoc
);
112 switch (segInfo
->pointer_format
) {
114 case DYLD_CHAINED_PTR_64_KERNEL_CACHE
:
115 case DYLD_CHAINED_PTR_X86_64_KERNEL_CACHE
: {
116 const void* baseAddress
= basePointers
[fixupLoc
->fixup64
.cacheLevel
];
117 if (baseAddress
== 0) {
118 dyldLogFunc("Invalid cache level: %d\n", fixupLoc
->fixup64
.cacheLevel
);
122 uintptr_t slidValue
= (uintptr_t)baseAddress
+ fixupLoc
->fixup64
.target
;
124 dyldLogFunc("[LOG] kernel-fixups: slidValue %p (base=%p, target=%p)\n", (void*)slidValue
,
125 (const void *)baseAddress
, (void *)(uintptr_t)fixupLoc
->fixup64
.target
);
128 if (fixupLoc
->fixup64
.isAuth
) {
129 slidValue
= sign_pointer(fixupLoc
->fixup64
, fixupLoc
, slidValue
);
132 if (fixupLoc
->fixup64
.isAuth
) {
133 dyldLogFunc("Unexpected authenticated fixup\n");
137 #endif // HAS_APPLE_PAC
138 fixupLoc
->raw64
= slidValue
;
143 dyldLogFunc("unsupported pointer chain format: 0x%04X", segInfo
->pointer_format
);
149 static inline __attribute__((__always_inline__
)) int
150 walk_chain(const struct mach_header_64
* mh
,
151 const struct dyld_chained_starts_in_segment
* segInfo
,
153 uint16_t offsetInPage
,
154 uintptr_t slide __unused
,
155 const void* basePointers
[KCNumKinds
])
158 dyldLogFunc("[LOG] kernel-fixups: walk_chain page[%d]\n", pageIndex
);
161 uintptr_t pageContentStart
= (uintptr_t)mh
+ (uintptr_t)segInfo
->segment_offset
162 + (pageIndex
* segInfo
->page_size
);
163 union ChainedFixupPointerOnDisk
* chain
= (union ChainedFixupPointerOnDisk
*)(pageContentStart
+ offsetInPage
);
166 dyldLogFunc("[LOG] kernel-fixups: segInfo->segment_offset 0x%llx\n", segInfo
->segment_offset
);
167 dyldLogFunc("[LOG] kernel-fixups: segInfo->segment_pagesize %d\n", segInfo
->page_size
);
168 dyldLogFunc("[LOG] kernel-fixups: segInfo pointer format %d\n", segInfo
->pointer_format
);
170 while (!stop
&& !chainEnd
) {
171 // copy chain content, in case handler modifies location to final value
173 dyldLogFunc("[LOG] kernel-fixups: value of chain %p", chain
);
175 union ChainedFixupPointerOnDisk chainContent __unused
= *chain
;
176 fixup_value(chain
, segInfo
, slide
, basePointers
, &stop
);
178 switch (segInfo
->pointer_format
) {
180 case DYLD_CHAINED_PTR_64_KERNEL_CACHE
:
181 if (chainContent
.fixup64
.next
== 0) {
185 dyldLogFunc("[LOG] kernel-fixups: chainContent fixup 64.next %d\n", chainContent
.fixup64
.next
);
187 chain
= (union ChainedFixupPointerOnDisk
*)((uintptr_t)chain
+ chainContent
.fixup64
.next
* 4);
190 case DYLD_CHAINED_PTR_X86_64_KERNEL_CACHE
:
191 if (chainContent
.fixup64
.next
== 0) {
195 dyldLogFunc("[LOG] kernel-fixups: chainContent fixup x86 64.next %d\n", chainContent
.fixup64
.next
);
197 chain
= (union ChainedFixupPointerOnDisk
*)((uintptr_t)chain
+ chainContent
.fixup64
.next
);
202 dyldLogFunc("unknown pointer format 0x%04X", segInfo
->pointer_format
);
210 static inline __attribute__((__always_inline__
)) int
211 kernel_collection_slide(const struct mach_header_64
* mh
, const void* basePointers
[KCNumKinds
])
213 // First find the slide and chained fixups load command
214 uint64_t textVMAddr
= 0;
215 const struct linkedit_data_command
* chainedFixups
= 0;
216 uint64_t linkeditVMAddr
= 0;
217 uint64_t linkeditFileOffset
= 0;
220 dyldLogFunc("[LOG] kernel-fixups: parsing load commands\n");
223 const struct load_command
* startCmds
= 0;
224 if (mh
->magic
== MH_MAGIC_64
) {
225 startCmds
= (struct load_command
*)((uintptr_t)mh
+ sizeof(struct mach_header_64
));
226 } else if (mh
->magic
== MH_MAGIC
) {
227 startCmds
= (struct load_command
*)((uintptr_t)mh
+ sizeof(struct mach_header
));
229 //const uint32_t* h = (uint32_t*)mh;
230 //diag.error("file does not start with MH_MAGIC[_64]: 0x%08X 0x%08X", h[0], h [1]);
231 return 1; // not a mach-o file
233 const struct load_command
* const cmdsEnd
= (struct load_command
*)((uintptr_t)startCmds
+ mh
->sizeofcmds
);
234 const struct load_command
* cmd
= startCmds
;
235 for (uint32_t i
= 0; i
< mh
->ncmds
; ++i
) {
237 dyldLogFunc("[LOG] kernel-fixups: parsing load command %d with cmd=0x%x\n", i
, cmd
->cmd
);
239 const struct load_command
* nextCmd
= (struct load_command
*)((uintptr_t)cmd
+ cmd
->cmdsize
);
240 if (cmd
->cmdsize
< 8) {
241 //diag.error("malformed load command #%d of %d at %p with mh=%p, size (0x%X) too small", i, this->ncmds, cmd, this, cmd->cmdsize);
244 if ((nextCmd
> cmdsEnd
) || (nextCmd
< startCmds
)) {
245 //diag.error("malformed load command #%d of %d at %p with mh=%p, size (0x%X) is too large, load commands end at %p", i, this->ncmds, cmd, this, cmd->cmdsize, cmdsEnd);
248 if (cmd
->cmd
== LC_DYLD_CHAINED_FIXUPS
) {
249 chainedFixups
= (const struct linkedit_data_command
*)cmd
;
250 } else if (cmd
->cmd
== LC_SEGMENT_64
) {
251 const struct segment_command_64
* seg
= (const struct segment_command_64
*)(uintptr_t)cmd
;
254 dyldLogFunc("[LOG] kernel-fixups: segment name vm start and size: %s 0x%llx 0x%llx\n",
255 seg
->segname
, seg
->vmaddr
, seg
->vmsize
);
257 if (strings_are_equal(seg
->segname
, "__TEXT")) {
258 textVMAddr
= seg
->vmaddr
;
259 } else if (strings_are_equal(seg
->segname
, "__LINKEDIT")) {
260 linkeditVMAddr
= seg
->vmaddr
;
261 linkeditFileOffset
= seg
->fileoff
;
267 uintptr_t slide
= (uintptr_t)mh
- (uintptr_t)textVMAddr
;
270 dyldLogFunc("[LOG] kernel-fixups: slide %lx\n", slide
);
273 if (chainedFixups
== 0) {
278 dyldLogFunc("[LOG] kernel-fixups: found chained fixups %p\n", chainedFixups
);
279 dyldLogFunc("[LOG] kernel-fixups: found linkeditVMAddr %p\n", (void*)linkeditVMAddr
);
280 dyldLogFunc("[LOG] kernel-fixups: found linkeditFileOffset %p\n", (void*)linkeditFileOffset
);
283 // Now we have the chained fixups, walk it to apply all the rebases
284 uint64_t offsetInLinkedit
= chainedFixups
->dataoff
- linkeditFileOffset
;
285 uintptr_t linkeditStartAddr
= (uintptr_t)linkeditVMAddr
+ slide
;
287 dyldLogFunc("[LOG] kernel-fixups: offsetInLinkedit %llx\n", offsetInLinkedit
);
288 dyldLogFunc("[LOG] kernel-fixups: linkeditStartAddr %p\n", (void*)linkeditStartAddr
);
291 const struct dyld_chained_fixups_header
* fixupsHeader
= (const struct dyld_chained_fixups_header
*)(linkeditStartAddr
+ offsetInLinkedit
);
292 const struct dyld_chained_starts_in_image
* fixupStarts
= (const struct dyld_chained_starts_in_image
*)((uintptr_t)fixupsHeader
+ fixupsHeader
->starts_offset
);
294 dyldLogFunc("[LOG] kernel-fixups: fixupsHeader %p\n", fixupsHeader
);
295 dyldLogFunc("[LOG] kernel-fixups: fixupStarts %p\n", fixupStarts
);
299 for (uint32_t segIndex
= 0; segIndex
< fixupStarts
->seg_count
&& !stopped
; ++segIndex
) {
301 dyldLogFunc("[LOG] kernel-fixups: segment %d\n", segIndex
);
303 if (fixupStarts
->seg_info_offset
[segIndex
] == 0) {
306 const struct dyld_chained_starts_in_segment
* segInfo
= (const struct dyld_chained_starts_in_segment
*)((uintptr_t)fixupStarts
+ fixupStarts
->seg_info_offset
[segIndex
]);
307 for (uint32_t pageIndex
= 0; pageIndex
< segInfo
->page_count
&& !stopped
; ++pageIndex
) {
308 uint16_t offsetInPage
= segInfo
->page_start
[pageIndex
];
309 if (offsetInPage
== DYLD_CHAINED_PTR_START_NONE
) {
312 if (offsetInPage
& DYLD_CHAINED_PTR_START_MULTI
) {
313 // FIXME: Implement this
316 // one chain per page
317 if (walk_chain(mh
, segInfo
, pageIndex
, offsetInPage
, slide
, basePointers
)) {
328 * Utility functions to adjust the load command vmaddrs in constituent MachO's
329 * of an MH_FILESET kernel collection.
333 kernel_collection_adjust_fileset_entry_addrs(struct mach_header_64
*mh
, uintptr_t adj
)
335 struct load_command
*lc
;
336 struct segment_command_64
*seg
, *linkedit_cmd
= NULL
;
337 struct symtab_command
*symtab_cmd
= NULL
;
338 struct section_64
*sec
;
341 lc
= (struct load_command
*)((uintptr_t)mh
+ sizeof(*mh
));
342 for (i
= 0; i
< mh
->ncmds
; i
++,
343 lc
= (struct load_command
*)((uintptr_t)lc
+ lc
->cmdsize
)) {
344 if (lc
->cmd
== LC_SYMTAB
) {
345 symtab_cmd
= (struct symtab_command
*)lc
;
348 if (lc
->cmd
!= LC_SEGMENT_64
) {
351 if (strcmp(((struct segment_command_64
*)(uintptr_t)lc
)->segname
, SEG_LINKEDIT
) == 0) {
352 linkedit_cmd
= ((struct segment_command_64
*)(uintptr_t)lc
);
355 seg
= (struct segment_command_64
*)(uintptr_t)lc
;
357 /* slide/adjust every section in the segment */
358 sec
= (struct section_64
*)((uintptr_t)seg
+ sizeof(*seg
));
359 for (j
= 0; j
< seg
->nsects
; j
++, sec
++) {
365 if (symtab_cmd
!= NULL
&& linkedit_cmd
!= NULL
) {
366 struct nlist_64
*sym
;
370 dyldLogFunc("[LOG] Symbols:\n");
371 dyldLogFunc("[LOG] nsyms: %d, symoff: 0x%x\n", symtab_cmd
->nsyms
, symtab_cmd
->symoff
);
374 if (symtab_cmd
->nsyms
== 0) {
375 dyldLogFunc("[LOG] No symbols to relocate\n");
378 sym
= (struct nlist_64
*)(linkedit_cmd
->vmaddr
+ symtab_cmd
->symoff
- linkedit_cmd
->fileoff
);
380 for (i
= 0; i
< symtab_cmd
->nsyms
; i
++) {
381 if (sym
[i
].n_type
& N_STAB
) {
384 sym
[i
].n_value
+= adj
;
388 dyldLogFunc("[LOG] KASLR: Relocated %d symbols\n", cnt
);
394 kernel_collection_adjust_mh_addrs(struct mach_header_64
*kc_mh
, uintptr_t adj
,
395 bool pageable
, uintptr_t *kc_lowest_vmaddr
, uintptr_t *kc_highest_vmaddr
,
396 uintptr_t *kc_lowest_ro_vmaddr
, uintptr_t *kc_highest_ro_vmaddr
,
397 uintptr_t *kc_lowest_rx_vmaddr
, uintptr_t *kc_highest_rx_vmaddr
,
398 uintptr_t *kc_highest_nle_vmaddr
)
400 assert(kc_mh
->filetype
== MH_FILESET
);
402 struct load_command
*lc
;
403 struct fileset_entry_command
*fse
;
404 struct segment_command_64
*seg
;
405 struct section_64
*sec
;
406 struct mach_header_64
*mh
;
407 uintptr_t lowest_vmaddr
= UINTPTR_MAX
, highest_vmaddr
= 0, highest_nle_vmaddr
= 0;
408 uintptr_t lowest_ro_vmaddr
= UINTPTR_MAX
, highest_ro_vmaddr
= 0;
409 uintptr_t lowest_rx_vmaddr
= UINTPTR_MAX
, highest_rx_vmaddr
= 0;
414 * Slide (offset/adjust) every segment/section of every kext contained
415 * in this MH_FILESET mach-o.
417 lc
= (struct load_command
*)((uintptr_t)kc_mh
+ sizeof(*kc_mh
));
418 for (i
= 0; i
< kc_mh
->ncmds
; i
++,
419 lc
= (struct load_command
*)((uintptr_t)lc
+ lc
->cmdsize
)) {
420 if (lc
->cmd
== LC_FILESET_ENTRY
) {
421 fse
= (struct fileset_entry_command
*)(uintptr_t)lc
;
423 * The fileset_entry contains a pointer to the mach-o
424 * of a kext (or the kernel). Slide/adjust this command, and
425 * then slide/adjust all the sub-commands in the mach-o.
428 dyldLogFunc("[MH] sliding %s", (char *)((uintptr_t)fse
+
429 (uintptr_t)(fse
->entry_id
.offset
)));
431 mh
= (struct mach_header_64
*)((uintptr_t)fse
->vmaddr
+ adj
);
434 * Do not adjust mach headers of entries in pageable KC as that
435 * would pull those pages in prematurely
437 kernel_collection_adjust_fileset_entry_addrs(mh
, adj
);
440 } else if (lc
->cmd
== LC_SEGMENT_64
) {
442 * Slide/adjust all LC_SEGMENT_64 commands in the fileset
443 * (and any sections in those segments)
445 seg
= (struct segment_command_64
*)(uintptr_t)lc
;
447 sec
= (struct section_64
*)((uintptr_t)seg
+ sizeof(*seg
));
448 for (j
= 0; j
< seg
->nsects
; j
++, sec
++) {
451 if (seg
->vmsize
== 0) {
455 * Record vmaddr range covered by all non-empty segments in the
458 if (seg
->vmaddr
< lowest_vmaddr
) {
459 lowest_vmaddr
= (uintptr_t)seg
->vmaddr
;
462 is_linkedit
= strings_are_equal(seg
->segname
, "__LINKEDIT");
464 if (seg
->vmaddr
+ seg
->vmsize
> highest_vmaddr
) {
465 highest_vmaddr
= (uintptr_t)seg
->vmaddr
+ (uintptr_t)seg
->vmsize
;
467 highest_nle_vmaddr
= highest_vmaddr
;
471 if ((seg
->maxprot
& VM_PROT_WRITE
) || is_linkedit
) {
475 * Record vmaddr range covered by non-empty read-only segments
476 * in the kernel collection (excluding LINKEDIT).
478 if (seg
->vmaddr
< lowest_ro_vmaddr
) {
479 lowest_ro_vmaddr
= (uintptr_t)seg
->vmaddr
;
481 if (seg
->vmaddr
+ seg
->vmsize
> highest_ro_vmaddr
) {
482 highest_ro_vmaddr
= (uintptr_t)seg
->vmaddr
+ (uintptr_t)seg
->vmsize
;
485 if (!(seg
->maxprot
& VM_PROT_EXECUTE
)) {
489 * Record vmaddr range covered by contiguous execute segments
490 * in the kernel collection.
492 if (seg
->vmaddr
< lowest_rx_vmaddr
&& (lowest_rx_vmaddr
<= seg
->vmaddr
+ seg
->vmsize
|| lowest_rx_vmaddr
== UINTPTR_MAX
)) {
493 lowest_rx_vmaddr
= (uintptr_t)seg
->vmaddr
;
495 if (seg
->vmaddr
+ seg
->vmsize
> highest_rx_vmaddr
&& (highest_rx_vmaddr
>= seg
->vmaddr
|| highest_rx_vmaddr
== 0)) {
496 highest_rx_vmaddr
= (uintptr_t)seg
->vmaddr
+ (uintptr_t)seg
->vmsize
;
500 if (kc_lowest_vmaddr
) {
501 *kc_lowest_vmaddr
= lowest_vmaddr
;
503 if (kc_highest_vmaddr
) {
504 *kc_highest_vmaddr
= highest_vmaddr
;
506 if (kc_lowest_ro_vmaddr
) {
507 *kc_lowest_ro_vmaddr
= lowest_ro_vmaddr
;
509 if (kc_highest_ro_vmaddr
) {
510 *kc_highest_ro_vmaddr
= highest_ro_vmaddr
;
512 if (kc_lowest_rx_vmaddr
) {
513 *kc_lowest_rx_vmaddr
= lowest_rx_vmaddr
;
515 if (kc_highest_rx_vmaddr
) {
516 *kc_highest_rx_vmaddr
= highest_rx_vmaddr
;
518 if (kc_highest_nle_vmaddr
) {
519 *kc_highest_nle_vmaddr
= highest_nle_vmaddr
;
524 * Rebaser functions for the traditional arm64e static kernelcache with
529 rebase_chain(uintptr_t chainStartAddress
, uint64_t stepMultiplier
, uintptr_t baseAddress __unused
, uint64_t slide
)
532 uintptr_t address
= chainStartAddress
;
534 uint64_t value
= *(uint64_t*)address
;
537 uint16_t diversity
= (uint16_t)(value
>> 32);
538 bool hasAddressDiversity
= (value
& (1ULL << 48)) != 0;
539 ptrauth_key key
= (ptrauth_key
)((value
>> 49) & 0x3);
541 bool isAuthenticated
= (value
& (1ULL << 63)) != 0;
542 bool isRebase
= (value
& (1ULL << 62)) == 0;
544 if (isAuthenticated
) {
545 // The new value for a rebase is the low 32-bits of the threaded value plus the slide.
546 uint64_t newValue
= (value
& 0xFFFFFFFF) + slide
;
547 // Add in the offset from the mach_header
548 newValue
+= baseAddress
;
550 // We have bits to merge in to the discriminator
551 uintptr_t discriminator
= diversity
;
552 if (hasAddressDiversity
) {
553 // First calculate a new discriminator using the address of where we are trying to store the value
554 // Only blend if we have a discriminator
556 discriminator
= __builtin_ptrauth_blend_discriminator((void*)address
, discriminator
);
558 discriminator
= address
;
562 case ptrauth_key_asia
:
563 newValue
= (uintptr_t)__builtin_ptrauth_sign_unauthenticated((void*)newValue
, ptrauth_key_asia
, discriminator
);
565 case ptrauth_key_asib
:
566 newValue
= (uintptr_t)__builtin_ptrauth_sign_unauthenticated((void*)newValue
, ptrauth_key_asib
, discriminator
);
568 case ptrauth_key_asda
:
569 newValue
= (uintptr_t)__builtin_ptrauth_sign_unauthenticated((void*)newValue
, ptrauth_key_asda
, discriminator
);
571 case ptrauth_key_asdb
:
572 newValue
= (uintptr_t)__builtin_ptrauth_sign_unauthenticated((void*)newValue
, ptrauth_key_asdb
, discriminator
);
576 *(uint64_t*)address
= newValue
;
578 // Regular pointer which needs to fit in 51-bits of value.
579 // C++ RTTI uses the top bit, so we'll allow the whole top-byte
580 // and the bottom 43-bits to be fit in to 51-bits.
581 uint64_t top8Bits
= value
& 0x0007F80000000000ULL
;
582 uint64_t bottom43Bits
= value
& 0x000007FFFFFFFFFFULL
;
583 uint64_t targetValue
= (top8Bits
<< 13) | (((intptr_t)(bottom43Bits
<< 21) >> 21) & 0x00FFFFFFFFFFFFFF);
584 targetValue
= targetValue
+ slide
;
585 *(uint64_t*)address
= targetValue
;
589 // The delta is bits [51..61]
590 // And bit 62 is to tell us if we are a rebase (0) or bind (1)
591 value
&= ~(1ULL << 62);
592 delta
= (value
& 0x3FF8000000000000) >> 51;
593 address
+= delta
* stepMultiplier
;
594 } while (delta
!= 0);
598 rebase_threaded_starts(uint32_t *threadArrayStart
, uint32_t *threadArrayEnd
,
599 uintptr_t macho_header_addr
, uintptr_t macho_header_vmaddr
, size_t slide
)
601 uint32_t threadStartsHeader
= *threadArrayStart
;
602 uint64_t stepMultiplier
= (threadStartsHeader
& 1) == 1 ? 8 : 4;
603 for (uint32_t* threadOffset
= threadArrayStart
+ 1; threadOffset
!= threadArrayEnd
; ++threadOffset
) {
604 if (*threadOffset
== 0xFFFFFFFF) {
607 rebase_chain(macho_header_addr
+ *threadOffset
, stepMultiplier
, macho_header_vmaddr
, slide
);