2 * Copyright (c) 2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/vm_prot.h>
30 #include <mach-o/loader.h>
31 #include <sys/types.h>
34 #include <mach/vm_param.h>
36 #include <mach/mach_init.h>
39 #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
40 #include <AssertMacros.h>
42 #include "kxld_reloc.h"
43 #include "kxld_sect.h"
45 #include "kxld_symtab.h"
46 #include "kxld_util.h"
50 #define TEXT_SEG_PROT (VM_PROT_READ | VM_PROT_EXECUTE)
51 #define DATA_SEG_PROT (VM_PROT_READ | VM_PROT_WRITE)
53 extern boolean_t isSplitKext
;
54 extern boolean_t isOldInterface
;
56 #if KXLD_USER_OR_OBJECT
57 static kern_return_t
reorder_sections(KXLDSeg
*seg
, KXLDArray
*section_order
);
58 static void reorder_section(KXLDArray
*sects
, u_int
*sect_reorder_index
,
59 KXLDSect
**reorder_buffer
, u_int reorder_buffer_index
);
60 #endif /* KXLD_USER_OR_OBJECT */
63 static KXLDSeg
* get_segment_by_name(KXLDArray
*segarray
, const char *name
);
66 #if KXLD_USER_OR_ILP32
67 static kern_return_t
seg_export_macho_header_32(const KXLDSeg
*seg
, u_char
*buf
,
68 u_long
*header_offset
, u_long header_size
, u_long data_offset
);
71 static kern_return_t
seg_export_macho_header_64(const KXLDSeg
*seg
, u_char
*buf
,
72 u_long
*header_offset
, u_long header_size
, u_long data_offset
);
75 static KXLDSect
* get_sect_by_index(const KXLDSeg
*seg
, u_int idx
);
77 #if KXLD_USER_OR_ILP32
78 /*******************************************************************************
79 *******************************************************************************/
81 kxld_seg_init_from_macho_32(KXLDSeg
*seg
, struct segment_command
*src
)
83 kern_return_t rval
= KERN_FAILURE
;
87 strlcpy(seg
->segname
, src
->segname
, sizeof(seg
->segname
));
88 seg
->base_addr
= src
->vmaddr
;
89 seg
->link_addr
= src
->vmaddr
;
90 seg
->vmsize
= src
->vmsize
;
91 seg
->fileoff
= src
->fileoff
;
92 seg
->maxprot
= src
->maxprot
;
93 seg
->initprot
= src
->initprot
;
94 seg
->flags
= src
->flags
;
96 rval
= kxld_array_init(&seg
->sects
, sizeof(KXLDSect
*), src
->nsects
);
97 require_noerr(rval
, finish
);
104 #endif /* KXLD_USER_OR_ILP32 */
106 #if KXLD_USER_OR_LP64
107 /*******************************************************************************
108 *******************************************************************************/
110 kxld_seg_init_from_macho_64(KXLDSeg
*seg
, struct segment_command_64
*src
)
112 kern_return_t rval
= KERN_FAILURE
;
116 strlcpy(seg
->segname
, src
->segname
, sizeof(seg
->segname
));
117 seg
->base_addr
= src
->vmaddr
;
118 seg
->link_addr
= src
->vmaddr
;
119 seg
->vmsize
= src
->vmsize
;
121 seg
->fileoff
= src
->fileoff
;
122 seg
->maxprot
= src
->maxprot
;
123 seg
->initprot
= src
->initprot
;
124 seg
->flags
= src
->flags
;
126 rval
= kxld_array_init(&seg
->sects
, sizeof(KXLDSect
*), src
->nsects
);
127 require_noerr(rval
, finish
);
134 #endif /* KXLD_USER_OR_LP64 */
136 #if KXLD_USER_OR_OBJECT
137 /*******************************************************************************
138 *******************************************************************************/
140 kxld_seg_create_seg_from_sections(KXLDArray
*segarray
, KXLDArray
*sectarray
)
142 kern_return_t rval
= KERN_FAILURE
;
144 KXLDSect
*sect
= NULL
;
145 KXLDSect
**sectp
= NULL
;
148 /* Initialize the segment array to one segment */
150 rval
= kxld_array_init(segarray
, sizeof(KXLDSeg
), 1);
151 require_noerr(rval
, finish
);
153 /* Initialize the segment */
155 seg
= kxld_array_get_item(segarray
, 0);
156 seg
->initprot
= VM_PROT_ALL
;
157 seg
->maxprot
= VM_PROT_ALL
;
160 /* Add the sections to the segment */
162 rval
= kxld_array_init(&seg
->sects
, sizeof(KXLDSect
*), sectarray
->nitems
);
163 require_noerr(rval
, finish
);
165 for (i
= 0; i
< sectarray
->nitems
; ++i
) {
166 sect
= kxld_array_get_item(sectarray
, i
);
167 sectp
= kxld_array_get_item(&seg
->sects
, i
);
177 /*******************************************************************************
178 *******************************************************************************/
180 kxld_seg_finalize_object_segment(KXLDArray
*segarray
, KXLDArray
*section_order
,
183 kern_return_t rval
= KERN_FAILURE
;
185 KXLDSect
*sect
= NULL
;
186 u_long sect_offset
= 0;
190 check(section_order
);
191 require_action(segarray
->nitems
== 1, finish
, rval
= KERN_FAILURE
);
193 seg
= kxld_array_get_item(segarray
, 0);
195 /* Reorder the sections */
197 rval
= reorder_sections(seg
, section_order
);
198 require_noerr(rval
, finish
);
200 /* Set the initial link address at the end of the header pages */
202 seg
->link_addr
= kxld_round_page_cross_safe(hdrsize
);
204 /* Fix up all of the section addresses */
206 sect_offset
= (u_long
) seg
->link_addr
;
207 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
208 sect
= *(KXLDSect
**)kxld_array_get_item(&seg
->sects
, i
);
210 sect
->link_addr
= kxld_sect_align_address(sect
, sect_offset
);
211 sect_offset
= (u_long
) (sect
->link_addr
+ sect
->size
);
214 /* Finish initializing the segment */
216 seg
->vmsize
= kxld_round_page_cross_safe(sect_offset
) - seg
->link_addr
;
223 /*******************************************************************************
224 * The legacy section ordering used by kld was based of the order of sections
225 * in the kernel file. To achieve the same layout, we save the kernel's
226 * section ordering as an array of section names when the kernel file itself
227 * is linked. Then, when kexts are linked with the KXLD_LEGACY_LAYOUT flag,
228 * we refer to the kernel's section layout to order the kext's sections.
230 * The algorithm below is as follows. We iterate through all of the kernel's
231 * sections grouped by segment name, so that we are processing all of the __TEXT
232 * sections, then all of the __DATA sections, etc. We then iterate through the
233 * kext's sections with a similar grouping, looking for sections that match
234 * the current kernel's section. In this way, we order all of the matching
235 * kext sections in the order in which they appear in the kernel, and then place
236 * all remaining kext sections at the end of the current segment grouping in
237 * the order in which they originally appeared. Sections that only appear in
238 * the kernel are not created. segments that only appear in the kext are
239 * left in their original ordering.
255 * Reordered kext sections:
262 * In the implementation below, we use a reorder buffer to hold pointers to the
263 * sections of the current working segment. We scan this buffer looking for
264 * matching sections, placing them in the segment's section index as we find them.
265 * If this function must exit early, the segment's section index is left in an
267 *******************************************************************************/
269 reorder_sections(KXLDSeg
*seg
, KXLDArray
*section_order
)
271 kern_return_t rval
= KERN_FAILURE
;
272 KXLDSect
*sect
= NULL
;
273 KXLDSect
**reorder_buffer
= NULL
;
274 KXLDSectionName
*section_name
= NULL
;
275 const char *segname
= NULL
;
276 u_int sect_index
= 0, legacy_index
= 0, sect_reorder_index
= 0;
278 u_int sect_start
= 0, sect_end
= 0, legacy_start
= 0, legacy_end
= 0;
282 check(section_order
);
284 /* Allocate the reorder buffer with enough space to hold all of the
288 reorder_buffer
= kxld_alloc(
289 seg
->sects
.nitems
* sizeof(*reorder_buffer
));
290 require_action(reorder_buffer
, finish
, rval
= KERN_RESOURCE_SHORTAGE
);
292 while (legacy_index
< section_order
->nitems
) {
293 /* Find the next group of sections with a common segment in the
294 * section_order array.
297 legacy_start
= legacy_index
++;
298 legacy_end
= legacy_index
;
300 section_name
= kxld_array_get_item(section_order
, legacy_start
);
301 segname
= section_name
->segname
;
302 while (legacy_index
< section_order
->nitems
) {
303 section_name
= kxld_array_get_item(section_order
, legacy_index
);
304 if (!streq_safe(segname
, section_name
->segname
,
305 sizeof(section_name
->segname
))) {
313 /* Find a group of sections in the kext that match the current
314 * section_order segment.
317 sect_start
= sect_index
;
318 sect_end
= sect_index
;
320 while (sect_index
< seg
->sects
.nitems
) {
321 sect
= *(KXLDSect
**) kxld_array_get_item(&seg
->sects
, sect_index
);
322 if (!streq_safe(segname
, sect
->segname
, sizeof(sect
->segname
))) {
329 nsects
= sect_end
- sect_start
;
335 /* Populate the reorder buffer with the current group of kext sections */
337 for (i
= sect_start
; i
< sect_end
; ++i
) {
338 reorder_buffer
[i
- sect_start
] =
339 *(KXLDSect
**) kxld_array_get_item(&seg
->sects
, i
);
342 /* For each section_order section, scan the reorder buffer for a matching
343 * kext section. If one is found, copy it into the next slot in the
344 * segment's section index.
347 sect_reorder_index
= sect_start
;
348 for (i
= legacy_start
; i
< legacy_end
; ++i
) {
349 section_name
= kxld_array_get_item(section_order
, i
);
352 for (j
= 0; j
< nsects
; ++j
) {
353 sect
= reorder_buffer
[j
];
358 if (streq_safe(section_name
->sectname
, sect
->sectname
,
359 sizeof(section_name
->sectname
))) {
367 (void) reorder_section(&seg
->sects
, §_reorder_index
,
372 /* If any sections remain in the reorder buffer, they are not specified
373 * in the section_order array, so append them to the section index in
374 * in the order they are found.
377 for (i
= 0; i
< nsects
; ++i
) {
378 if (!reorder_buffer
[i
]) {
381 reorder_section(&seg
->sects
, §_reorder_index
, reorder_buffer
, i
);
389 if (reorder_buffer
) {
390 kxld_free(reorder_buffer
, seg
->sects
.nitems
* sizeof(*reorder_buffer
));
391 reorder_buffer
= NULL
;
397 /*******************************************************************************
398 *******************************************************************************/
400 reorder_section(KXLDArray
*sects
, u_int
*sect_reorder_index
,
401 KXLDSect
**reorder_buffer
, u_int reorder_buffer_index
)
403 KXLDSect
**tmp
= NULL
;
405 tmp
= kxld_array_get_item(sects
, *sect_reorder_index
);
407 *tmp
= reorder_buffer
[reorder_buffer_index
];
408 reorder_buffer
[reorder_buffer_index
]->sectnum
= *sect_reorder_index
;
409 reorder_buffer
[reorder_buffer_index
] = NULL
;
411 ++(*sect_reorder_index
);
414 /*******************************************************************************
415 *******************************************************************************/
417 kxld_seg_init_linkedit(KXLDArray
*segs
)
419 kern_return_t rval
= KERN_FAILURE
;
423 rval
= kxld_array_resize(segs
, 2);
424 require_noerr(rval
, finish
);
426 seg
= kxld_array_get_item(segs
, 0);
427 le
= kxld_array_get_item(segs
, 1);
429 strlcpy(le
->segname
, SEG_LINKEDIT
, sizeof(le
->segname
));
430 le
->link_addr
= kxld_round_page_cross_safe(seg
->link_addr
+ seg
->vmsize
);
431 le
->maxprot
= VM_PROT_ALL
;
432 le
->initprot
= VM_PROT_DEFAULT
;
439 #endif /* KXLD_USER_OR_OBJECT */
441 /*******************************************************************************
442 *******************************************************************************/
444 kxld_seg_clear(KXLDSeg
*seg
)
448 bzero(seg
->segname
, sizeof(seg
->segname
));
456 /* Don't clear the individual sections here because kxld_kext.c will take
459 kxld_array_clear(&seg
->sects
);
462 /*******************************************************************************
463 *******************************************************************************/
465 kxld_seg_deinit(KXLDSeg
*seg
)
469 kxld_array_deinit(&seg
->sects
);
470 bzero(seg
, sizeof(*seg
));
473 /*******************************************************************************
474 *******************************************************************************/
476 kxld_seg_get_vmsize(const KXLDSeg
*seg
)
483 /*******************************************************************************
484 *******************************************************************************/
486 kxld_seg_get_macho_header_size(const KXLDSeg
*seg
, boolean_t is_32_bit
)
493 size
+= sizeof(struct segment_command
);
495 size
+= sizeof(struct segment_command_64
);
497 size
+= seg
->sects
.nitems
* kxld_sect_get_macho_header_size(is_32_bit
);
502 /*******************************************************************************
503 *******************************************************************************/
504 /* This is no longer used, but may be useful some day... */
507 kxld_seg_get_macho_data_size(const KXLDSeg
*seg
)
511 KXLDSect
*sect
= NULL
;
515 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
516 sect
= get_sect_by_index(seg
, i
);
517 size
= (u_long
) kxld_sect_align_address(sect
, size
);
518 size
+= kxld_sect_get_macho_data_size(sect
);
521 return kxld_round_page_cross_safe(size
);
525 /*******************************************************************************
526 *******************************************************************************/
528 get_sect_by_index(const KXLDSeg
*seg
, u_int idx
)
532 return *(KXLDSect
**) kxld_array_get_item(&seg
->sects
, idx
);
535 /*******************************************************************************
536 *******************************************************************************/
538 kxld_seg_export_macho_to_file_buffer(const KXLDSeg
*seg
, u_char
*buf
,
539 u_long
*header_offset
, u_long header_size
,
540 u_long
*data_offset
, u_long data_size
,
543 kern_return_t rval
= KERN_FAILURE
;
544 KXLDSect
*sect
= NULL
;
545 u_long base_data_offset
= *data_offset
;
547 struct segment_command
*hdr32
=
548 (struct segment_command
*) ((void *) (buf
+ *header_offset
));
549 struct segment_command_64
*hdr64
=
550 (struct segment_command_64
*) ((void *) (buf
+ *header_offset
));
554 check(header_offset
);
557 /* Write out the header */
559 KXLD_3264_FUNC(is_32_bit
, rval
,
560 seg_export_macho_header_32
, seg_export_macho_header_64
,
561 seg
, buf
, header_offset
, header_size
, *data_offset
);
562 require_noerr(rval
, finish
);
564 /* Write out each section */
566 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
567 sect
= get_sect_by_index(seg
, i
);
569 rval
= kxld_sect_export_macho_to_file_buffer(sect
, buf
, header_offset
,
570 header_size
, data_offset
, data_size
, is_32_bit
);
571 require_noerr(rval
, finish
);
574 /* Update the filesize */
577 hdr32
->filesize
= (uint32_t) (*data_offset
- base_data_offset
);
579 hdr64
->filesize
= (uint64_t) (*data_offset
- base_data_offset
);
582 *data_offset
= (u_long
)kxld_round_page_cross_safe(*data_offset
);
591 /*******************************************************************************
592 *******************************************************************************/
594 kxld_seg_export_macho_to_vm(const KXLDSeg
*seg
,
596 u_long
*header_offset
,
599 kxld_addr_t file_link_addr
,
602 kern_return_t rval
= KERN_FAILURE
;
603 KXLDSect
* sect
= NULL
;
605 // data_offset is used to set fileoff field in segment header
611 check(header_offset
);
613 data_offset
= (u_long
) (seg
->link_addr
- file_link_addr
);
615 /* Write out the header */
617 KXLD_3264_FUNC(is_32_bit
, rval
,
618 seg_export_macho_header_32
, seg_export_macho_header_64
,
621 header_offset
, header_size
, data_offset
);
622 require_noerr(rval
, finish
);
624 /* Write out each section */
626 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
627 sect
= get_sect_by_index(seg
, i
);
629 rval
= kxld_sect_export_macho_to_vm(sect
, buf
, header_offset
,
630 header_size
, file_link_addr
, data_size
, is_32_bit
);
631 require_noerr(rval
, finish
);
640 #if KXLD_USER_OR_ILP32
641 /*******************************************************************************
642 *******************************************************************************/
644 seg_export_macho_header_32(const KXLDSeg
*seg
, u_char
*buf
,
645 u_long
*header_offset
, u_long header_size
, u_long data_offset
)
647 kern_return_t rval
= KERN_FAILURE
;
648 struct segment_command
*seghdr
= NULL
;
652 check(header_offset
);
654 require_action(sizeof(*seghdr
) <= header_size
- *header_offset
, finish
,
655 rval
= KERN_FAILURE
);
656 seghdr
= (struct segment_command
*) ((void *) (buf
+ *header_offset
));
657 *header_offset
+= sizeof(*seghdr
);
659 seghdr
->cmd
= LC_SEGMENT
;
660 seghdr
->cmdsize
= (uint32_t) sizeof(*seghdr
);
662 (uint32_t) (seg
->sects
.nitems
* kxld_sect_get_macho_header_size(TRUE
));
663 strlcpy(seghdr
->segname
, seg
->segname
, sizeof(seghdr
->segname
));
664 seghdr
->vmaddr
= (uint32_t) seg
->link_addr
;
665 seghdr
->vmsize
= (uint32_t) seg
->vmsize
;
666 seghdr
->fileoff
= (uint32_t) data_offset
;
667 seghdr
->filesize
= (uint32_t) seg
->vmsize
;
668 seghdr
->maxprot
= seg
->maxprot
;
669 seghdr
->initprot
= seg
->initprot
;
670 seghdr
->nsects
= seg
->sects
.nitems
;
673 #if SPLIT_KEXTS_DEBUG
675 kxld_log(kKxldLogLinking
, kKxldLogErr
,
676 "segname %s seghdr %p vmaddr %p vmsize 0x%02X %u fileoff 0x%02X %u <%s>",
677 seg
->segname
[0] ? seg
->segname
: "none",
679 (void *) ((uint64_t)seghdr
->vmaddr
),
693 #endif /* KXLD_USER_OR_ILP32 */
695 #if KXLD_USER_OR_LP64
696 /*******************************************************************************
697 *******************************************************************************/
699 seg_export_macho_header_64(const KXLDSeg
*seg
, u_char
*buf
,
700 u_long
*header_offset
, u_long header_size
, u_long data_offset
)
702 kern_return_t rval
= KERN_FAILURE
;
703 struct segment_command_64
*seghdr
= NULL
;
707 check(header_offset
);
709 require_action(sizeof(*seghdr
) <= header_size
- *header_offset
, finish
,
710 rval
= KERN_FAILURE
);
712 #if SPLIT_KEXTS_DEBUG
714 struct mach_header_64
*mach
;
716 mach
= (struct mach_header_64
*) ((void *) buf
);
718 if (mach
->magic
!= MH_MAGIC_64
) {
719 kxld_log(kKxldLogLinking
, kKxldLogErr
,
720 "bad macho header at %p <%s>",
721 (void *) mach
, __func__
);
727 seghdr
= (struct segment_command_64
*) ((void *) (buf
+ *header_offset
));
728 *header_offset
+= sizeof(*seghdr
);
730 seghdr
->cmd
= LC_SEGMENT_64
;
731 seghdr
->cmdsize
= (uint32_t) sizeof(*seghdr
);
733 (uint32_t) (seg
->sects
.nitems
* kxld_sect_get_macho_header_size(FALSE
));
734 strlcpy(seghdr
->segname
, seg
->segname
, sizeof(seghdr
->segname
));
735 seghdr
->vmaddr
= (uint64_t) seg
->link_addr
;
736 seghdr
->vmsize
= (uint64_t) seg
->vmsize
;
737 seghdr
->fileoff
= (uint64_t) data_offset
;
738 seghdr
->filesize
= (uint64_t) seg
->vmsize
;
739 seghdr
->maxprot
= seg
->maxprot
;
740 seghdr
->initprot
= seg
->initprot
;
741 seghdr
->nsects
= seg
->sects
.nitems
;
744 #if SPLIT_KEXTS_DEBUG
746 kxld_log(kKxldLogLinking
, kKxldLogErr
,
747 "%p >>> Start of %s seghdr (size %lu) <%s>",
749 seg
->segname
[0] ? seg
->segname
: "none",
752 kxld_log(kKxldLogLinking
, kKxldLogErr
,
753 "%p <<< End of %s seghdr <%s>",
754 (void *) ((u_char
*)seghdr
+ sizeof(*seghdr
)),
755 seg
->segname
[0] ? seg
->segname
: "none",
758 kxld_log(kKxldLogLinking
, kKxldLogErr
,
759 "%s seghdr, cmdsize %d vmaddr %p vmsize %p %llu fileoff %p %llu <%s>",
760 seg
->segname
[0] ? seg
->segname
: "none",
762 (void *) seghdr
->vmaddr
,
763 (void *) seghdr
->vmsize
,
765 (void *) seghdr
->fileoff
,
776 #endif /* KXLD_USER_OR_LP64 */
778 /*******************************************************************************
779 *******************************************************************************/
781 kxld_seg_add_section(KXLDSeg
*seg
, KXLDSect
*sect
)
783 kern_return_t rval
= KERN_FAILURE
;
784 KXLDSect
**sectp
= NULL
;
789 require_action(streq_safe(seg
->segname
, sect
->segname
, sizeof(seg
->segname
)),
790 finish
, rval
= KERN_FAILURE
);
792 /* Add the section into the section index */
794 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
795 sectp
= kxld_array_get_item(&seg
->sects
, i
);
796 if (NULL
== *sectp
) {
801 require_action(i
< seg
->sects
.nitems
, finish
, rval
= KERN_FAILURE
);
810 /*******************************************************************************
811 *******************************************************************************/
813 kxld_seg_finish_init(KXLDSeg
*seg
)
815 kern_return_t rval
= KERN_FAILURE
;
817 KXLDSect
*sect
= NULL
;
818 kxld_addr_t maxaddr
= 0;
819 kxld_size_t maxsize
= 0;
821 /* If we already have a size for this segment (e.g. from the mach-o load
822 * command) then don't recalculate the segment size. This is safer since
823 * when we recalculate we are making assumptions about page alignment and
824 * padding that the kext mach-o file was built with. Better to trust the
825 * macho-o info, if we have it. If we don't (i.e. vmsize == 0) then add up
826 * the section sizes and take a best guess at page padding.
828 if ((seg
->vmsize
== 0) && (seg
->sects
.nitems
)) {
829 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
830 sect
= get_sect_by_index(seg
, i
);
831 require_action(sect
, finish
, rval
= KERN_FAILURE
);
832 if (sect
->base_addr
> maxaddr
) {
833 maxaddr
= sect
->base_addr
;
834 maxsize
= sect
->size
;
837 seg
->vmsize
= kxld_round_page_cross_safe(maxaddr
+
838 maxsize
- seg
->base_addr
);
847 /*******************************************************************************
848 *******************************************************************************/
850 kxld_seg_set_vm_protections(KXLDSeg
*seg
, boolean_t strict_protections
)
852 if (strict_protections
) {
853 if (!strncmp(seg
->segname
, SEG_TEXT
, sizeof(seg
->segname
))) {
854 seg
->initprot
= TEXT_SEG_PROT
;
855 seg
->maxprot
= TEXT_SEG_PROT
;
857 seg
->initprot
= DATA_SEG_PROT
;
858 seg
->maxprot
= DATA_SEG_PROT
;
861 seg
->initprot
= VM_PROT_ALL
;
862 seg
->maxprot
= VM_PROT_ALL
;
866 /*******************************************************************************
867 *******************************************************************************/
869 kxld_seg_relocate(KXLDSeg
*seg
, kxld_addr_t link_addr
)
871 KXLDSect
*sect
= NULL
;
873 splitKextLinkInfo
* link_info
= (splitKextLinkInfo
*) link_addr
;
874 kxld_addr_t my_link_addr
;
876 if (isOldInterface
) {
877 seg
->link_addr
+= link_addr
;
880 // we have a split kext
881 if (kxld_seg_is_text_seg(seg
)) {
882 // assumes this is the beginning of the kext
883 my_link_addr
= link_info
->vmaddr_TEXT
;
884 seg
->link_addr
= my_link_addr
;
885 } else if (kxld_seg_is_text_exec_seg(seg
)) {
886 my_link_addr
= link_info
->vmaddr_TEXT_EXEC
;
887 seg
->link_addr
= my_link_addr
;
888 // vmaddr_TEXT_EXEC is the actual vmaddr for this segment so we need
889 // to adjust for kxld_sect_relocate assuming the link addr is
890 // the address of the kext (macho header in __TEXT)
891 my_link_addr
-= seg
->base_addr
;
892 } else if (kxld_seg_is_data_seg(seg
)) {
893 my_link_addr
= link_info
->vmaddr_DATA
;
894 seg
->link_addr
= my_link_addr
;
895 // vmaddr_DATA is the actual vmaddr for this segment so we need
896 // to adjust for kxld_sect_relocate assuming the link addr is
897 // the address of the kext (macho header in __TEXT)
898 my_link_addr
-= seg
->base_addr
;
899 } else if (kxld_seg_is_data_const_seg(seg
)) {
900 my_link_addr
= link_info
->vmaddr_DATA_CONST
;
901 seg
->link_addr
= my_link_addr
;
902 // vmaddr_DATA_CONST is the actual vmaddr for this segment so we need
903 // to adjust for kxld_sect_relocate assuming the link addr is
904 // the address of the kext (macho header in __TEXT)
905 my_link_addr
-= seg
->base_addr
;
906 } else if (kxld_seg_is_llvm_cov_seg(seg
)) {
907 my_link_addr
= link_info
->vmaddr_LLVM_COV
;
908 seg
->link_addr
= my_link_addr
;
909 // vmaddr_LLVM_COV is the actual vmaddr for this segment so we need
910 // to adjust for kxld_sect_relocate assuming the link addr is
911 // the address of the kext (macho header in __TEXT)
912 my_link_addr
-= seg
->base_addr
;
913 } else if (kxld_seg_is_linkedit_seg(seg
)) {
914 my_link_addr
= link_info
->vmaddr_LINKEDIT
;
915 seg
->link_addr
= my_link_addr
;
916 // vmaddr_DATA is the actual vmaddr for this segment so we need
917 // to adjust for kxld_sect_relocate assuming the link addr is
918 // the address of the kext (macho header in __TEXT)
919 my_link_addr
-= seg
->base_addr
;
921 kxld_log(kKxldLogLinking
, kKxldLogErr
,
922 " not expecting this segment %s!!! <%s>",
923 seg
->segname
[0] ? seg
->segname
: "none",
925 my_link_addr
= link_info
->vmaddr_TEXT
;
926 seg
->link_addr
+= my_link_addr
;
929 my_link_addr
= link_info
->vmaddr_TEXT
;
930 seg
->link_addr
+= my_link_addr
;
934 #if SPLIT_KEXTS_DEBUG
936 kxld_log(kKxldLogLinking
, kKxldLogErr
,
937 "%p >>> Start of %s segment (vmsize %llu) <%s>)",
938 (void *) seg
->link_addr
,
939 seg
->segname
[0] ? seg
->segname
: "none",
942 kxld_log(kKxldLogLinking
, kKxldLogErr
,
943 "%p <<< End of %s segment <%s>",
944 (void *) (seg
->link_addr
+ seg
->vmsize
),
945 seg
->segname
[0] ? seg
->segname
: "none",
950 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
951 sect
= get_sect_by_index(seg
, i
);
952 if (isOldInterface
) {
953 kxld_sect_relocate(sect
, link_addr
);
955 kxld_sect_relocate(sect
, my_link_addr
);
960 /*******************************************************************************
961 *******************************************************************************/
963 kxld_seg_populate_linkedit(KXLDSeg
*seg
, const KXLDSymtab
*symtab
, boolean_t is_32_bit
965 , const KXLDArray
*locrelocs
966 , const KXLDArray
*extrelocs
967 , boolean_t target_supports_slideable_kexts
968 #endif /* KXLD_PIC_KEXTS */
969 , uint32_t splitinfolc_size
974 size
+= kxld_symtab_get_macho_data_size(symtab
, is_32_bit
);
977 if (target_supports_slideable_kexts
) {
978 size
+= kxld_reloc_get_macho_data_size(locrelocs
, extrelocs
);
980 #endif /* KXLD_PIC_KEXTS */
982 // 0 unless this is a split kext
983 size
+= splitinfolc_size
;
985 seg
->vmsize
= kxld_round_page_cross_safe(size
);
988 /*******************************************************************************
989 *******************************************************************************/
991 kxld_seg_is_split_seg(const KXLDSeg
*seg
)
993 boolean_t result
= FALSE
;
997 if (kxld_seg_is_data_seg(seg
) || kxld_seg_is_linkedit_seg(seg
) ||
998 kxld_seg_is_text_exec_seg(seg
) || kxld_seg_is_data_const_seg(seg
) ||
999 kxld_seg_is_llvm_cov_seg(seg
)) {
1008 kxld_seg_is_text_seg(const KXLDSeg
*seg
)
1010 boolean_t result
= FALSE
;
1013 result
= !strncmp(seg
->segname
, SEG_TEXT
, sizeof(seg
->segname
));
1019 kxld_seg_is_text_exec_seg(const KXLDSeg
*seg
)
1021 boolean_t result
= FALSE
;
1024 result
= !strncmp(seg
->segname
, "__TEXT_EXEC", sizeof(seg
->segname
));
1030 kxld_seg_is_data_seg(const KXLDSeg
*seg
)
1032 boolean_t result
= FALSE
;
1035 result
= !strncmp(seg
->segname
, SEG_DATA
, sizeof(seg
->segname
));
1041 kxld_seg_is_data_const_seg(const KXLDSeg
*seg
)
1043 boolean_t result
= FALSE
;
1046 result
= !strncmp(seg
->segname
, "__DATA_CONST", sizeof(seg
->segname
));
1052 kxld_seg_is_linkedit_seg(const KXLDSeg
*seg
)
1054 boolean_t result
= FALSE
;
1057 result
= !strncmp(seg
->segname
, SEG_LINKEDIT
, sizeof(seg
->segname
));
1063 kxld_seg_is_llvm_cov_seg(const KXLDSeg
*seg
)
1065 boolean_t result
= FALSE
;
1068 result
= !strncmp(seg
->segname
, "__LLVM_COV", sizeof(seg
->segname
));