2 * Copyright (c) 2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/vm_prot.h>
30 #include <mach-o/loader.h>
31 #include <sys/types.h>
34 #include <mach/vm_param.h>
36 #include <mach/mach_init.h>
39 #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
40 #include <AssertMacros.h>
42 #include "kxld_reloc.h"
43 #include "kxld_sect.h"
45 #include "kxld_symtab.h"
46 #include "kxld_util.h"
50 #define TEXT_SEG_PROT (VM_PROT_READ | VM_PROT_EXECUTE)
51 #define DATA_SEG_PROT (VM_PROT_READ | VM_PROT_WRITE)
53 #if KXLD_USER_OR_OBJECT
54 static kern_return_t
reorder_sections(KXLDSeg
*seg
, KXLDArray
*section_order
);
55 static void reorder_section(KXLDArray
*sects
, u_int
*sect_reorder_index
,
56 KXLDSect
**reorder_buffer
, u_int reorder_buffer_index
);
57 #endif /* KXLD_USER_OR_OBJECT */
60 static KXLDSeg
* get_segment_by_name(KXLDArray
*segarray
, const char *name
);
63 #if KXLD_USER_OR_ILP32
64 static kern_return_t
seg_export_macho_header_32(const KXLDSeg
*seg
, u_char
*buf
,
65 u_long
*header_offset
, u_long header_size
, u_long data_offset
);
68 static kern_return_t
seg_export_macho_header_64(const KXLDSeg
*seg
, u_char
*buf
,
69 u_long
*header_offset
, u_long header_size
, u_long data_offset
);
72 static KXLDSect
* get_sect_by_index(const KXLDSeg
*seg
, u_int idx
);
74 #if KXLD_USER_OR_ILP32
75 /*******************************************************************************
76 *******************************************************************************/
78 kxld_seg_init_from_macho_32(KXLDSeg
*seg
, struct segment_command
*src
)
80 kern_return_t rval
= KERN_FAILURE
;
84 strlcpy(seg
->segname
, src
->segname
, sizeof(seg
->segname
));
85 seg
->base_addr
= src
->vmaddr
;
86 seg
->link_addr
= src
->vmaddr
;
87 seg
->vmsize
= src
->vmsize
;
88 seg
->fileoff
= src
->fileoff
;
89 seg
->maxprot
= src
->maxprot
;
90 seg
->initprot
= src
->initprot
;
91 seg
->flags
= src
->flags
;
93 rval
= kxld_array_init(&seg
->sects
, sizeof(KXLDSect
*), src
->nsects
);
94 require_noerr(rval
, finish
);
101 #endif /* KXLD_USER_OR_ILP32 */
103 #if KXLD_USER_OR_LP64
104 /*******************************************************************************
105 *******************************************************************************/
107 kxld_seg_init_from_macho_64(KXLDSeg
*seg
, struct segment_command_64
*src
)
109 kern_return_t rval
= KERN_FAILURE
;
113 strlcpy(seg
->segname
, src
->segname
, sizeof(seg
->segname
));
114 seg
->base_addr
= src
->vmaddr
;
115 seg
->link_addr
= src
->vmaddr
;
116 seg
->vmsize
= src
->vmsize
;
117 seg
->fileoff
= src
->fileoff
;
118 seg
->maxprot
= src
->maxprot
;
119 seg
->initprot
= src
->initprot
;
120 seg
->flags
= src
->flags
;
122 rval
= kxld_array_init(&seg
->sects
, sizeof(KXLDSect
*), src
->nsects
);
123 require_noerr(rval
, finish
);
130 #endif /* KXLD_USER_OR_LP64 */
132 #if KXLD_USER_OR_OBJECT
133 /*******************************************************************************
134 *******************************************************************************/
136 kxld_seg_create_seg_from_sections(KXLDArray
*segarray
, KXLDArray
*sectarray
)
138 kern_return_t rval
= KERN_FAILURE
;
140 KXLDSect
*sect
= NULL
;
141 KXLDSect
**sectp
= NULL
;
144 /* Initialize the segment array to one segment */
146 rval
= kxld_array_init(segarray
, sizeof(KXLDSeg
), 1);
147 require_noerr(rval
, finish
);
149 /* Initialize the segment */
151 seg
= kxld_array_get_item(segarray
, 0);
152 seg
->initprot
= VM_PROT_ALL
;
153 seg
->maxprot
= VM_PROT_ALL
;
156 /* Add the sections to the segment */
158 rval
= kxld_array_init(&seg
->sects
, sizeof(KXLDSect
*), sectarray
->nitems
);
159 require_noerr(rval
, finish
);
161 for (i
= 0; i
< sectarray
->nitems
; ++i
) {
162 sect
= kxld_array_get_item(sectarray
, i
);
163 sectp
= kxld_array_get_item(&seg
->sects
, i
);
173 /*******************************************************************************
174 *******************************************************************************/
176 kxld_seg_finalize_object_segment(KXLDArray
*segarray
, KXLDArray
*section_order
,
179 kern_return_t rval
= KERN_FAILURE
;
181 KXLDSect
*sect
= NULL
;
182 u_long sect_offset
= 0;
186 check(section_order
);
187 require_action(segarray
->nitems
== 1, finish
, rval
=KERN_FAILURE
);
189 seg
= kxld_array_get_item(segarray
, 0);
191 /* Reorder the sections */
193 rval
= reorder_sections(seg
, section_order
);
194 require_noerr(rval
, finish
);
196 /* Set the initial link address at the end of the header pages */
198 seg
->link_addr
= round_page(hdrsize
);
200 /* Fix up all of the section addresses */
202 sect_offset
= (u_long
) seg
->link_addr
;
203 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
204 sect
= *(KXLDSect
**)kxld_array_get_item(&seg
->sects
, i
);
206 sect
->link_addr
= kxld_sect_align_address(sect
, sect_offset
);
207 sect_offset
= (u_long
) (sect
->link_addr
+ sect
->size
);
210 /* Finish initializing the segment */
212 seg
->vmsize
= round_page(sect_offset
) - seg
->link_addr
;
219 /*******************************************************************************
220 * The legacy section ordering used by kld was based of the order of sections
221 * in the kernel file. To achieve the same layout, we save the kernel's
222 * section ordering as an array of section names when the kernel file itself
223 * is linked. Then, when kexts are linked with the KXLD_LEGACY_LAYOUT flag,
224 * we refer to the kernel's section layout to order the kext's sections.
226 * The algorithm below is as follows. We iterate through all of the kernel's
227 * sections grouped by segment name, so that we are processing all of the __TEXT
228 * sections, then all of the __DATA sections, etc. We then iterate through the
229 * kext's sections with a similar grouping, looking for sections that match
230 * the current kernel's section. In this way, we order all of the matching
231 * kext sections in the order in which they appear in the kernel, and then place
232 * all remaining kext sections at the end of the current segment grouping in
233 * the order in which they originally appeared. Sections that only appear in
234 * the kernel are not created. segments that only appear in the kext are
235 * left in their original ordering.
252 * Reordered kext sections:
259 * In the implementation below, we use a reorder buffer to hold pointers to the
260 * sections of the current working segment. We scan this buffer looking for
261 * matching sections, placing them in the segment's section index as we find them.
262 * If this function must exit early, the segment's section index is left in an
264 *******************************************************************************/
266 reorder_sections(KXLDSeg
*seg
, KXLDArray
*section_order
)
268 kern_return_t rval
= KERN_FAILURE
;
269 KXLDSect
*sect
= NULL
;
270 KXLDSect
**reorder_buffer
= NULL
;
271 KXLDSectionName
*section_name
= NULL
;
272 const char *segname
= NULL
;
273 u_int sect_index
= 0, legacy_index
= 0, sect_reorder_index
= 0;
275 u_int sect_start
= 0, sect_end
= 0, legacy_start
= 0, legacy_end
= 0;
279 check(section_order
);
281 /* Allocate the reorder buffer with enough space to hold all of the
285 reorder_buffer
= kxld_alloc(
286 seg
->sects
.nitems
* sizeof(*reorder_buffer
));
287 require_action(reorder_buffer
, finish
, rval
=KERN_RESOURCE_SHORTAGE
);
289 while (legacy_index
< section_order
->nitems
) {
291 /* Find the next group of sections with a common segment in the
292 * section_order array.
295 legacy_start
= legacy_index
++;
296 legacy_end
= legacy_index
;
298 section_name
= kxld_array_get_item(section_order
, legacy_start
);
299 segname
= section_name
->segname
;
300 while (legacy_index
< section_order
->nitems
) {
301 section_name
= kxld_array_get_item(section_order
, legacy_index
);
302 if (!streq_safe(segname
, section_name
->segname
,
303 sizeof(section_name
->segname
)))
312 /* Find a group of sections in the kext that match the current
313 * section_order segment.
316 sect_start
= sect_index
;
317 sect_end
= sect_index
;
319 while (sect_index
< seg
->sects
.nitems
) {
320 sect
= *(KXLDSect
**) kxld_array_get_item(&seg
->sects
, sect_index
);
321 if (!streq_safe(segname
, sect
->segname
, sizeof(sect
->segname
))) {
328 nsects
= sect_end
- sect_start
;
330 if (!nsects
) continue;
332 /* Populate the reorder buffer with the current group of kext sections */
334 for (i
= sect_start
; i
< sect_end
; ++i
) {
335 reorder_buffer
[i
- sect_start
] =
336 *(KXLDSect
**) kxld_array_get_item(&seg
->sects
, i
);
339 /* For each section_order section, scan the reorder buffer for a matching
340 * kext section. If one is found, copy it into the next slot in the
341 * segment's section index.
344 sect_reorder_index
= sect_start
;
345 for (i
= legacy_start
; i
< legacy_end
; ++i
) {
346 section_name
= kxld_array_get_item(section_order
, i
);
349 for (j
= 0; j
< nsects
; ++j
) {
350 sect
= reorder_buffer
[j
];
353 if (streq_safe(section_name
->sectname
, sect
->sectname
,
354 sizeof(section_name
->sectname
)))
363 (void) reorder_section(&seg
->sects
, §_reorder_index
,
368 /* If any sections remain in the reorder buffer, they are not specified
369 * in the section_order array, so append them to the section index in
370 * in the order they are found.
373 for (i
= 0; i
< nsects
; ++i
) {
374 if (!reorder_buffer
[i
]) continue;
375 reorder_section(&seg
->sects
, §_reorder_index
, reorder_buffer
, i
);
383 if (reorder_buffer
) {
384 kxld_free(reorder_buffer
, seg
->sects
.nitems
* sizeof(*reorder_buffer
));
385 reorder_buffer
= NULL
;
391 /*******************************************************************************
392 *******************************************************************************/
394 reorder_section(KXLDArray
*sects
, u_int
*sect_reorder_index
,
395 KXLDSect
**reorder_buffer
, u_int reorder_buffer_index
)
397 KXLDSect
**tmp
= NULL
;
399 tmp
= kxld_array_get_item(sects
, *sect_reorder_index
);
401 *tmp
= reorder_buffer
[reorder_buffer_index
];
402 reorder_buffer
[reorder_buffer_index
]->sectnum
= *sect_reorder_index
;
403 reorder_buffer
[reorder_buffer_index
] = NULL
;
405 ++(*sect_reorder_index
);
408 /*******************************************************************************
409 *******************************************************************************/
411 kxld_seg_init_linkedit(KXLDArray
*segs
)
413 kern_return_t rval
= KERN_FAILURE
;
417 rval
= kxld_array_resize(segs
, 2);
418 require_noerr(rval
, finish
);
420 seg
= kxld_array_get_item(segs
, 0);
421 le
= kxld_array_get_item(segs
, 1);
423 strlcpy(le
->segname
, SEG_LINKEDIT
, sizeof(le
->segname
));
424 le
->link_addr
= round_page(seg
->link_addr
+ seg
->vmsize
);
425 le
->maxprot
= VM_PROT_ALL
;
426 le
->initprot
= VM_PROT_DEFAULT
;
433 #endif /* KXLD_USER_OR_OBJECT */
435 /*******************************************************************************
436 *******************************************************************************/
438 kxld_seg_clear(KXLDSeg
*seg
)
442 bzero(seg
->segname
, sizeof(seg
->segname
));
450 /* Don't clear the individual sections here because kxld_kext.c will take
453 kxld_array_clear(&seg
->sects
);
456 /*******************************************************************************
457 *******************************************************************************/
459 kxld_seg_deinit(KXLDSeg
*seg
)
463 kxld_array_deinit(&seg
->sects
);
464 bzero(seg
, sizeof(*seg
));
467 /*******************************************************************************
468 *******************************************************************************/
470 kxld_seg_get_vmsize(const KXLDSeg
*seg
)
477 /*******************************************************************************
478 *******************************************************************************/
480 kxld_seg_get_macho_header_size(const KXLDSeg
*seg
, boolean_t is_32_bit
)
487 size
+= sizeof(struct segment_command
);
489 size
+= sizeof(struct segment_command_64
);
491 size
+= seg
->sects
.nitems
* kxld_sect_get_macho_header_size(is_32_bit
);
496 /*******************************************************************************
497 *******************************************************************************/
498 /* This is no longer used, but may be useful some day... */
501 kxld_seg_get_macho_data_size(const KXLDSeg
*seg
)
505 KXLDSect
*sect
= NULL
;
509 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
510 sect
= get_sect_by_index(seg
, i
);
511 size
= (u_long
) kxld_sect_align_address(sect
, size
);
512 size
+= kxld_sect_get_macho_data_size(sect
);
515 return round_page(size
);
519 /*******************************************************************************
520 *******************************************************************************/
522 get_sect_by_index(const KXLDSeg
*seg
, u_int idx
)
526 return *(KXLDSect
**) kxld_array_get_item(&seg
->sects
, idx
);
529 /*******************************************************************************
530 *******************************************************************************/
532 kxld_seg_export_macho_to_file_buffer(const KXLDSeg
*seg
, u_char
*buf
,
533 u_long
*header_offset
, u_long header_size
,
534 u_long
*data_offset
, u_long data_size
,
537 kern_return_t rval
= KERN_FAILURE
;
538 KXLDSect
*sect
= NULL
;
539 u_long base_data_offset
= *data_offset
;
541 struct segment_command
*hdr32
=
542 (struct segment_command
*) ((void *) (buf
+ *header_offset
));
543 struct segment_command_64
*hdr64
=
544 (struct segment_command_64
*) ((void *) (buf
+ *header_offset
));
548 check(header_offset
);
551 /* Write out the header */
553 KXLD_3264_FUNC(is_32_bit
, rval
,
554 seg_export_macho_header_32
, seg_export_macho_header_64
,
555 seg
, buf
, header_offset
, header_size
, *data_offset
);
556 require_noerr(rval
, finish
);
558 /* Write out each section */
560 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
561 sect
= get_sect_by_index(seg
, i
);
563 rval
= kxld_sect_export_macho_to_file_buffer(sect
, buf
, header_offset
,
564 header_size
, data_offset
, data_size
, is_32_bit
);
565 require_noerr(rval
, finish
);
568 /* Update the filesize */
571 hdr32
->filesize
= (uint32_t) (*data_offset
- base_data_offset
);
573 hdr64
->filesize
= (uint64_t) (*data_offset
- base_data_offset
);
576 *data_offset
= round_page(*data_offset
);
585 /*******************************************************************************
586 *******************************************************************************/
588 kxld_seg_export_macho_to_vm(const KXLDSeg
*seg
, u_char
*buf
,
589 u_long
*header_offset
, u_long header_size
,
590 u_long data_size
, kxld_addr_t file_link_addr
,
593 kern_return_t rval
= KERN_FAILURE
;
594 KXLDSect
*sect
= NULL
;
595 u_long data_offset
= (u_long
) (seg
->link_addr
- file_link_addr
);
600 check(header_offset
);
602 /* Write out the header */
604 KXLD_3264_FUNC(is_32_bit
, rval
,
605 seg_export_macho_header_32
, seg_export_macho_header_64
,
606 seg
, buf
, header_offset
, header_size
, data_offset
);
607 require_noerr(rval
, finish
);
609 /* Write out each section */
611 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
612 sect
= get_sect_by_index(seg
, i
);
614 rval
= kxld_sect_export_macho_to_vm(sect
, buf
, header_offset
,
615 header_size
, file_link_addr
, data_size
, is_32_bit
);
616 require_noerr(rval
, finish
);
625 #if KXLD_USER_OR_ILP32
626 /*******************************************************************************
627 *******************************************************************************/
629 seg_export_macho_header_32(const KXLDSeg
*seg
, u_char
*buf
,
630 u_long
*header_offset
, u_long header_size
, u_long data_offset
)
632 kern_return_t rval
= KERN_FAILURE
;
633 struct segment_command
*seghdr
= NULL
;
637 check(header_offset
);
639 require_action(sizeof(*seghdr
) <= header_size
- *header_offset
, finish
,
641 seghdr
= (struct segment_command
*) ((void *) (buf
+ *header_offset
));
642 *header_offset
+= sizeof(*seghdr
);
644 seghdr
->cmd
= LC_SEGMENT
;
645 seghdr
->cmdsize
= (uint32_t) sizeof(*seghdr
);
647 (uint32_t) (seg
->sects
.nitems
* kxld_sect_get_macho_header_size(TRUE
));
648 strlcpy(seghdr
->segname
, seg
->segname
, sizeof(seghdr
->segname
));
649 seghdr
->vmaddr
= (uint32_t) seg
->link_addr
;
650 seghdr
->vmsize
= (uint32_t) seg
->vmsize
;
651 seghdr
->fileoff
= (uint32_t) data_offset
;
652 seghdr
->filesize
= (uint32_t) seg
->vmsize
;
653 seghdr
->maxprot
= seg
->maxprot
;
654 seghdr
->initprot
= seg
->initprot
;
655 seghdr
->nsects
= seg
->sects
.nitems
;
663 #endif /* KXLD_USER_OR_ILP32 */
665 #if KXLD_USER_OR_LP64
666 /*******************************************************************************
667 *******************************************************************************/
669 seg_export_macho_header_64(const KXLDSeg
*seg
, u_char
*buf
,
670 u_long
*header_offset
, u_long header_size
, u_long data_offset
)
672 kern_return_t rval
= KERN_FAILURE
;
673 struct segment_command_64
*seghdr
= NULL
;
677 check(header_offset
);
679 require_action(sizeof(*seghdr
) <= header_size
- *header_offset
, finish
,
681 seghdr
= (struct segment_command_64
*) ((void *) (buf
+ *header_offset
));
682 *header_offset
+= sizeof(*seghdr
);
684 seghdr
->cmd
= LC_SEGMENT_64
;
685 seghdr
->cmdsize
= (uint32_t) sizeof(*seghdr
);
687 (uint32_t) (seg
->sects
.nitems
* kxld_sect_get_macho_header_size(FALSE
));
688 strlcpy(seghdr
->segname
, seg
->segname
, sizeof(seghdr
->segname
));
689 seghdr
->vmaddr
= (uint64_t) seg
->link_addr
;
690 seghdr
->vmsize
= (uint64_t) seg
->vmsize
;
691 seghdr
->fileoff
= (uint64_t) data_offset
;
692 seghdr
->filesize
= (uint64_t) seg
->vmsize
;
693 seghdr
->maxprot
= seg
->maxprot
;
694 seghdr
->initprot
= seg
->initprot
;
695 seghdr
->nsects
= seg
->sects
.nitems
;
703 #endif /* KXLD_USER_OR_LP64 */
705 /*******************************************************************************
706 *******************************************************************************/
708 kxld_seg_add_section(KXLDSeg
*seg
, KXLDSect
*sect
)
710 kern_return_t rval
= KERN_FAILURE
;
711 KXLDSect
**sectp
= NULL
;
716 require_action(streq_safe(seg
->segname
, sect
->segname
, sizeof(seg
->segname
)),
717 finish
, rval
=KERN_FAILURE
);
719 /* Add the section into the section index */
721 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
722 sectp
= kxld_array_get_item(&seg
->sects
, i
);
723 if (NULL
== *sectp
) {
728 require_action(i
< seg
->sects
.nitems
, finish
, rval
=KERN_FAILURE
);
737 /*******************************************************************************
738 *******************************************************************************/
740 kxld_seg_finish_init(KXLDSeg
*seg
)
742 kern_return_t rval
= KERN_FAILURE
;
744 KXLDSect
*sect
= NULL
;
745 kxld_addr_t maxaddr
= 0;
746 kxld_size_t maxsize
= 0;
748 if (seg
->sects
.nitems
) {
749 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
750 sect
= get_sect_by_index(seg
, i
);
751 require_action(sect
, finish
, rval
=KERN_FAILURE
);
752 if (sect
->base_addr
> maxaddr
) {
753 maxaddr
= sect
->base_addr
;
754 maxsize
= sect
->size
;
758 /* XXX Cross architecture linking will fail if the page size ever differs
759 * from 4096. (As of this writing, we're fine on i386, x86_64, and arm).
761 seg
->vmsize
= round_page(maxaddr
+ maxsize
- seg
->base_addr
);
770 /*******************************************************************************
771 *******************************************************************************/
773 kxld_seg_set_vm_protections(KXLDSeg
*seg
, boolean_t strict_protections
)
775 /* This is unnecessary except to make the clang analyzer happy. When
776 * the analyzer no longer ignores nonnull attributes for if statements,
777 * we can remove this line.
781 if (strict_protections
) {
782 if (streq_safe(seg
->segname
, SEG_TEXT
, const_strlen(SEG_TEXT
))) {
783 seg
->initprot
= TEXT_SEG_PROT
;
784 seg
->maxprot
= VM_PROT_ALL
;
786 seg
->initprot
= DATA_SEG_PROT
;
787 seg
->maxprot
= DATA_SEG_PROT
;
790 seg
->initprot
= VM_PROT_ALL
;
791 seg
->maxprot
= VM_PROT_ALL
;
795 /*******************************************************************************
796 *******************************************************************************/
798 kxld_seg_relocate(KXLDSeg
*seg
, kxld_addr_t link_addr
)
800 KXLDSect
*sect
= NULL
;
803 seg
->link_addr
+= link_addr
;
804 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
805 sect
= get_sect_by_index(seg
, i
);
806 kxld_sect_relocate(sect
, link_addr
);
810 /*******************************************************************************
811 *******************************************************************************/
813 kxld_seg_populate_linkedit(KXLDSeg
*seg
, const KXLDSymtab
*symtab
, boolean_t is_32_bit
815 , const KXLDArray
*locrelocs
816 , const KXLDArray
*extrelocs
817 , boolean_t target_supports_slideable_kexts
818 #endif /* KXLD_PIC_KEXTS */
823 size
+= kxld_symtab_get_macho_data_size(symtab
, is_32_bit
);
826 if (target_supports_slideable_kexts
) {
827 size
+= kxld_reloc_get_macho_data_size(locrelocs
, extrelocs
);
829 #endif /* KXLD_PIC_KEXTS */
831 seg
->vmsize
= round_page(size
);