2 * Copyright (c) 2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/vm_prot.h>
30 #include <mach-o/loader.h>
31 #include <sys/types.h>
34 #include <mach/vm_param.h>
36 #include <mach/mach_init.h>
39 #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
40 #include <AssertMacros.h>
42 #include "kxld_sect.h"
44 #include "kxld_symtab.h"
45 #include "kxld_util.h"
49 #define TEXT_SEG_PROT (VM_PROT_READ | VM_PROT_EXECUTE)
50 #define DATA_SEG_PROT (VM_PROT_READ | VM_PROT_WRITE)
52 #if KXLD_USER_OR_OBJECT
53 static kern_return_t
reorder_sections(KXLDSeg
*seg
, KXLDArray
*section_order
);
54 static void reorder_section(KXLDArray
*sects
, u_int
*sect_reorder_index
,
55 KXLDSect
**reorder_buffer
, u_int reorder_buffer_index
);
56 #endif /* KXLD_USER_OR_OBJECT */
59 static KXLDSeg
* get_segment_by_name(KXLDArray
*segarray
, const char *name
);
62 #if KXLD_USER_OR_ILP32
63 static kern_return_t
seg_export_macho_header_32(const KXLDSeg
*seg
, u_char
*buf
,
64 u_long
*header_offset
, u_long header_size
, u_long data_offset
);
67 static kern_return_t
seg_export_macho_header_64(const KXLDSeg
*seg
, u_char
*buf
,
68 u_long
*header_offset
, u_long header_size
, u_long data_offset
);
71 static KXLDSect
* get_sect_by_index(const KXLDSeg
*seg
, u_int idx
);
73 #if KXLD_USER_OR_ILP32
74 /*******************************************************************************
75 *******************************************************************************/
77 kxld_seg_init_from_macho_32(KXLDSeg
*seg
, struct segment_command
*src
)
79 kern_return_t rval
= KERN_FAILURE
;
83 strlcpy(seg
->segname
, src
->segname
, sizeof(seg
->segname
));
84 seg
->base_addr
= src
->vmaddr
;
85 seg
->link_addr
= src
->vmaddr
;
86 seg
->vmsize
= src
->vmsize
;
87 seg
->fileoff
= src
->fileoff
;
88 seg
->maxprot
= src
->maxprot
;
89 seg
->initprot
= src
->initprot
;
90 seg
->flags
= src
->flags
;
92 rval
= kxld_array_init(&seg
->sects
, sizeof(KXLDSect
*), src
->nsects
);
93 require_noerr(rval
, finish
);
100 #endif /* KXLD_USER_OR_ILP32 */
102 #if KXLD_USER_OR_LP64
103 /*******************************************************************************
104 *******************************************************************************/
106 kxld_seg_init_from_macho_64(KXLDSeg
*seg
, struct segment_command_64
*src
)
108 kern_return_t rval
= KERN_FAILURE
;
112 strlcpy(seg
->segname
, src
->segname
, sizeof(seg
->segname
));
113 seg
->base_addr
= src
->vmaddr
;
114 seg
->link_addr
= src
->vmaddr
;
115 seg
->vmsize
= src
->vmsize
;
116 seg
->fileoff
= src
->fileoff
;
117 seg
->maxprot
= src
->maxprot
;
118 seg
->initprot
= src
->initprot
;
119 seg
->flags
= src
->flags
;
121 rval
= kxld_array_init(&seg
->sects
, sizeof(KXLDSect
*), src
->nsects
);
122 require_noerr(rval
, finish
);
129 #endif /* KXLD_USER_OR_LP64 */
131 #if KXLD_USER_OR_OBJECT
132 /*******************************************************************************
133 *******************************************************************************/
135 kxld_seg_create_seg_from_sections(KXLDArray
*segarray
, KXLDArray
*sectarray
)
137 kern_return_t rval
= KERN_FAILURE
;
139 KXLDSect
*sect
= NULL
;
140 KXLDSect
**sectp
= NULL
;
143 /* Initialize the segment array to one segment */
145 rval
= kxld_array_init(segarray
, sizeof(KXLDSeg
), 1);
146 require_noerr(rval
, finish
);
148 /* Initialize the segment */
150 seg
= kxld_array_get_item(segarray
, 0);
151 seg
->initprot
= VM_PROT_ALL
;
152 seg
->maxprot
= VM_PROT_ALL
;
155 /* Add the sections to the segment */
157 rval
= kxld_array_init(&seg
->sects
, sizeof(KXLDSect
*), sectarray
->nitems
);
158 require_noerr(rval
, finish
);
160 for (i
= 0; i
< sectarray
->nitems
; ++i
) {
161 sect
= kxld_array_get_item(sectarray
, i
);
162 sectp
= kxld_array_get_item(&seg
->sects
, i
);
172 /*******************************************************************************
173 *******************************************************************************/
175 kxld_seg_finalize_object_segment(KXLDArray
*segarray
, KXLDArray
*section_order
,
178 kern_return_t rval
= KERN_FAILURE
;
180 KXLDSect
*sect
= NULL
;
181 u_long sect_offset
= 0;
185 check(section_order
);
186 require_action(segarray
->nitems
== 1, finish
, rval
=KERN_FAILURE
);
188 seg
= kxld_array_get_item(segarray
, 0);
190 /* Reorder the sections */
192 rval
= reorder_sections(seg
, section_order
);
193 require_noerr(rval
, finish
);
195 /* Set the initial link address at the end of the header pages */
197 seg
->link_addr
= round_page(hdrsize
);
199 /* Fix up all of the section addresses */
201 sect_offset
= (u_long
) seg
->link_addr
;
202 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
203 sect
= *(KXLDSect
**)kxld_array_get_item(&seg
->sects
, i
);
205 sect
->link_addr
= kxld_sect_align_address(sect
, sect_offset
);
206 sect_offset
= (u_long
) (sect
->link_addr
+ sect
->size
);
209 /* Finish initializing the segment */
211 seg
->vmsize
= round_page(sect_offset
) - seg
->link_addr
;
218 /*******************************************************************************
219 * The legacy section ordering used by kld was based of the order of sections
220 * in the kernel file. To achieve the same layout, we save the kernel's
221 * section ordering as an array of section names when the kernel file itself
222 * is linked. Then, when kexts are linked with the KXLD_LEGACY_LAYOUT flag,
223 * we refer to the kernel's section layout to order the kext's sections.
225 * The algorithm below is as follows. We iterate through all of the kernel's
226 * sections grouped by segment name, so that we are processing all of the __TEXT
227 * sections, then all of the __DATA sections, etc. We then iterate through the
228 * kext's sections with a similar grouping, looking for sections that match
229 * the current kernel's section. In this way, we order all of the matching
230 * kext sections in the order in which they appear in the kernel, and then place
231 * all remaining kext sections at the end of the current segment grouping in
232 * the order in which they originally appeared. Sections that only appear in
233 * the kernel are not created. segments that only appear in the kext are
234 * left in their original ordering.
251 * Reordered kext sections:
258 * In the implementation below, we use a reorder buffer to hold pointers to the
259 * sections of the current working segment. We scan this buffer looking for
260 * matching sections, placing them in the segment's section index as we find them.
261 * If this function must exit early, the segment's section index is left in an
263 *******************************************************************************/
265 reorder_sections(KXLDSeg
*seg
, KXLDArray
*section_order
)
267 kern_return_t rval
= KERN_FAILURE
;
268 KXLDSect
*sect
= NULL
;
269 KXLDSect
**reorder_buffer
= NULL
;
270 KXLDSectionName
*section_name
= NULL
;
271 const char *segname
= NULL
;
272 u_int sect_index
= 0, legacy_index
= 0, sect_reorder_index
= 0;
274 u_int sect_start
= 0, sect_end
= 0, legacy_start
= 0, legacy_end
= 0;
278 check(section_order
);
280 /* Allocate the reorder buffer with enough space to hold all of the
284 reorder_buffer
= kxld_alloc(
285 seg
->sects
.nitems
* sizeof(*reorder_buffer
));
286 require_action(reorder_buffer
, finish
, rval
=KERN_RESOURCE_SHORTAGE
);
288 while (legacy_index
< section_order
->nitems
) {
290 /* Find the next group of sections with a common segment in the
291 * section_order array.
294 legacy_start
= legacy_index
++;
295 legacy_end
= legacy_index
;
297 section_name
= kxld_array_get_item(section_order
, legacy_start
);
298 segname
= section_name
->segname
;
299 while (legacy_index
< section_order
->nitems
) {
300 section_name
= kxld_array_get_item(section_order
, legacy_index
);
301 if (!streq_safe(segname
, section_name
->segname
,
302 sizeof(section_name
->segname
)))
311 /* Find a group of sections in the kext that match the current
312 * section_order segment.
315 sect_start
= sect_index
;
316 sect_end
= sect_index
;
318 while (sect_index
< seg
->sects
.nitems
) {
319 sect
= *(KXLDSect
**) kxld_array_get_item(&seg
->sects
, sect_index
);
320 if (!streq_safe(segname
, sect
->segname
, sizeof(sect
->segname
))) {
327 nsects
= sect_end
- sect_start
;
329 if (!nsects
) continue;
331 /* Populate the reorder buffer with the current group of kext sections */
333 for (i
= sect_start
; i
< sect_end
; ++i
) {
334 reorder_buffer
[i
- sect_start
] =
335 *(KXLDSect
**) kxld_array_get_item(&seg
->sects
, i
);
338 /* For each section_order section, scan the reorder buffer for a matching
339 * kext section. If one is found, copy it into the next slot in the
340 * segment's section index.
343 sect_reorder_index
= sect_start
;
344 for (i
= legacy_start
; i
< legacy_end
; ++i
) {
345 section_name
= kxld_array_get_item(section_order
, i
);
348 for (j
= 0; j
< nsects
; ++j
) {
349 sect
= reorder_buffer
[j
];
352 if (streq_safe(section_name
->sectname
, sect
->sectname
,
353 sizeof(section_name
->sectname
)))
362 (void) reorder_section(&seg
->sects
, §_reorder_index
,
367 /* If any sections remain in the reorder buffer, they are not specified
368 * in the section_order array, so append them to the section index in
369 * in the order they are found.
372 for (i
= 0; i
< nsects
; ++i
) {
373 if (!reorder_buffer
[i
]) continue;
374 reorder_section(&seg
->sects
, §_reorder_index
, reorder_buffer
, i
);
382 if (reorder_buffer
) {
383 kxld_free(reorder_buffer
, seg
->sects
.nitems
* sizeof(*reorder_buffer
));
384 reorder_buffer
= NULL
;
390 /*******************************************************************************
391 *******************************************************************************/
393 reorder_section(KXLDArray
*sects
, u_int
*sect_reorder_index
,
394 KXLDSect
**reorder_buffer
, u_int reorder_buffer_index
)
396 KXLDSect
**tmp
= NULL
;
398 tmp
= kxld_array_get_item(sects
, *sect_reorder_index
);
400 *tmp
= reorder_buffer
[reorder_buffer_index
];
401 reorder_buffer
[reorder_buffer_index
]->sectnum
= *sect_reorder_index
;
402 reorder_buffer
[reorder_buffer_index
] = NULL
;
404 ++(*sect_reorder_index
);
407 /*******************************************************************************
408 *******************************************************************************/
410 kxld_seg_init_linkedit(KXLDArray
*segs
)
412 kern_return_t rval
= KERN_FAILURE
;
416 rval
= kxld_array_resize(segs
, 2);
417 require_noerr(rval
, finish
);
419 seg
= kxld_array_get_item(segs
, 0);
420 le
= kxld_array_get_item(segs
, 1);
422 strlcpy(le
->segname
, SEG_LINKEDIT
, sizeof(le
->segname
));
423 le
->link_addr
= round_page(seg
->link_addr
+ seg
->vmsize
);
424 le
->maxprot
= VM_PROT_ALL
;
425 le
->initprot
= VM_PROT_DEFAULT
;
432 #endif /* KXLD_USER_OR_OBJECT */
434 /*******************************************************************************
435 *******************************************************************************/
437 kxld_seg_clear(KXLDSeg
*seg
)
441 bzero(seg
->segname
, sizeof(seg
->segname
));
449 /* Don't clear the individual sections here because kxld_kext.c will take
452 kxld_array_clear(&seg
->sects
);
455 /*******************************************************************************
456 *******************************************************************************/
458 kxld_seg_deinit(KXLDSeg
*seg
)
462 kxld_array_deinit(&seg
->sects
);
463 bzero(seg
, sizeof(*seg
));
466 /*******************************************************************************
467 *******************************************************************************/
469 kxld_seg_get_vmsize(const KXLDSeg
*seg
)
476 /*******************************************************************************
477 *******************************************************************************/
479 kxld_seg_get_macho_header_size(const KXLDSeg
*seg
, boolean_t is_32_bit
)
486 size
+= sizeof(struct segment_command
);
488 size
+= sizeof(struct segment_command_64
);
490 size
+= seg
->sects
.nitems
* kxld_sect_get_macho_header_size(is_32_bit
);
495 /*******************************************************************************
496 *******************************************************************************/
498 kxld_seg_get_macho_data_size(const KXLDSeg
*seg
)
502 KXLDSect
*sect
= NULL
;
506 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
507 sect
= get_sect_by_index(seg
, i
);
508 size
= (u_long
) kxld_sect_align_address(sect
, size
);
509 size
+= kxld_sect_get_macho_data_size(sect
);
512 return round_page(size
);
515 /*******************************************************************************
516 *******************************************************************************/
518 get_sect_by_index(const KXLDSeg
*seg
, u_int idx
)
522 return *(KXLDSect
**) kxld_array_get_item(&seg
->sects
, idx
);
525 /*******************************************************************************
526 *******************************************************************************/
528 kxld_seg_export_macho_to_file_buffer(const KXLDSeg
*seg
, u_char
*buf
,
529 u_long
*header_offset
, u_long header_size
,
530 u_long
*data_offset
, u_long data_size
,
533 kern_return_t rval
= KERN_FAILURE
;
534 KXLDSect
*sect
= NULL
;
535 u_long base_data_offset
= *data_offset
;
537 struct segment_command
*hdr32
=
538 (struct segment_command
*) (buf
+ *header_offset
);
539 struct segment_command_64
*hdr64
=
540 (struct segment_command_64
*) (buf
+ *header_offset
);
544 check(header_offset
);
547 /* Write out the header */
549 KXLD_3264_FUNC(is_32_bit
, rval
,
550 seg_export_macho_header_32
, seg_export_macho_header_64
,
551 seg
, buf
, header_offset
, header_size
, *data_offset
);
552 require_noerr(rval
, finish
);
554 /* Write out each section */
556 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
557 sect
= get_sect_by_index(seg
, i
);
559 rval
= kxld_sect_export_macho_to_file_buffer(sect
, buf
, header_offset
,
560 header_size
, data_offset
, data_size
, is_32_bit
);
561 require_noerr(rval
, finish
);
564 /* Update the filesize */
567 hdr32
->filesize
= (uint32_t) (*data_offset
- base_data_offset
);
569 hdr64
->filesize
= (uint64_t) (*data_offset
- base_data_offset
);
572 *data_offset
= round_page(*data_offset
);
581 /*******************************************************************************
582 *******************************************************************************/
584 kxld_seg_export_macho_to_vm(const KXLDSeg
*seg
, u_char
*buf
,
585 u_long
*header_offset
, u_long header_size
,
586 u_long data_size
, kxld_addr_t file_link_addr
,
589 kern_return_t rval
= KERN_FAILURE
;
590 KXLDSect
*sect
= NULL
;
591 u_long data_offset
= (u_long
) (seg
->link_addr
- file_link_addr
);
596 check(header_offset
);
598 /* Write out the header */
600 KXLD_3264_FUNC(is_32_bit
, rval
,
601 seg_export_macho_header_32
, seg_export_macho_header_64
,
602 seg
, buf
, header_offset
, header_size
, data_offset
);
603 require_noerr(rval
, finish
);
605 /* Write out each section */
607 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
608 sect
= get_sect_by_index(seg
, i
);
610 rval
= kxld_sect_export_macho_to_vm(sect
, buf
, header_offset
,
611 header_size
, file_link_addr
, data_size
, is_32_bit
);
612 require_noerr(rval
, finish
);
621 #if KXLD_USER_OR_ILP32
622 /*******************************************************************************
623 *******************************************************************************/
625 seg_export_macho_header_32(const KXLDSeg
*seg
, u_char
*buf
,
626 u_long
*header_offset
, u_long header_size
, u_long data_offset
)
628 kern_return_t rval
= KERN_FAILURE
;
629 struct segment_command
*seghdr
= NULL
;
633 check(header_offset
);
635 require_action(sizeof(*seghdr
) <= header_size
- *header_offset
, finish
,
637 seghdr
= (struct segment_command
*) (buf
+ *header_offset
);
638 *header_offset
+= sizeof(*seghdr
);
640 seghdr
->cmd
= LC_SEGMENT
;
641 seghdr
->cmdsize
= (uint32_t) sizeof(*seghdr
);
643 (uint32_t) (seg
->sects
.nitems
* kxld_sect_get_macho_header_size(TRUE
));
644 strlcpy(seghdr
->segname
, seg
->segname
, sizeof(seghdr
->segname
));
645 seghdr
->vmaddr
= (uint32_t) seg
->link_addr
;
646 seghdr
->vmsize
= (uint32_t) seg
->vmsize
;
647 seghdr
->fileoff
= (uint32_t) data_offset
;
648 seghdr
->filesize
= (uint32_t) seg
->vmsize
;
649 seghdr
->maxprot
= seg
->maxprot
;
650 seghdr
->initprot
= seg
->initprot
;
651 seghdr
->nsects
= seg
->sects
.nitems
;
659 #endif /* KXLD_USER_OR_ILP32 */
661 #if KXLD_USER_OR_LP64
662 /*******************************************************************************
663 *******************************************************************************/
665 seg_export_macho_header_64(const KXLDSeg
*seg
, u_char
*buf
,
666 u_long
*header_offset
, u_long header_size
, u_long data_offset
)
668 kern_return_t rval
= KERN_FAILURE
;
669 struct segment_command_64
*seghdr
= NULL
;
673 check(header_offset
);
675 require_action(sizeof(*seghdr
) <= header_size
- *header_offset
, finish
,
677 seghdr
= (struct segment_command_64
*) (buf
+ *header_offset
);
678 *header_offset
+= sizeof(*seghdr
);
680 seghdr
->cmd
= LC_SEGMENT_64
;
681 seghdr
->cmdsize
= (uint32_t) sizeof(*seghdr
);
683 (uint32_t) (seg
->sects
.nitems
* kxld_sect_get_macho_header_size(FALSE
));
684 strlcpy(seghdr
->segname
, seg
->segname
, sizeof(seghdr
->segname
));
685 seghdr
->vmaddr
= (uint64_t) seg
->link_addr
;
686 seghdr
->vmsize
= (uint64_t) seg
->vmsize
;
687 seghdr
->fileoff
= (uint64_t) data_offset
;
688 seghdr
->filesize
= (uint64_t) seg
->vmsize
;
689 seghdr
->maxprot
= seg
->maxprot
;
690 seghdr
->initprot
= seg
->initprot
;
691 seghdr
->nsects
= seg
->sects
.nitems
;
699 #endif /* KXLD_USER_OR_LP64 */
701 /*******************************************************************************
702 *******************************************************************************/
704 kxld_seg_add_section(KXLDSeg
*seg
, KXLDSect
*sect
)
706 kern_return_t rval
= KERN_FAILURE
;
707 KXLDSect
**sectp
= NULL
;
712 require_action(streq_safe(seg
->segname
, sect
->segname
, sizeof(seg
->segname
)),
713 finish
, rval
=KERN_FAILURE
);
715 /* Add the section into the section index */
717 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
718 sectp
= kxld_array_get_item(&seg
->sects
, i
);
719 if (NULL
== *sectp
) {
724 require_action(i
< seg
->sects
.nitems
, finish
, rval
=KERN_FAILURE
);
733 /*******************************************************************************
734 *******************************************************************************/
736 kxld_seg_finish_init(KXLDSeg
*seg
)
738 kern_return_t rval
= KERN_FAILURE
;
740 KXLDSect
*sect
= NULL
;
741 kxld_addr_t maxaddr
= 0;
742 kxld_size_t maxsize
= 0;
744 if (seg
->sects
.nitems
) {
745 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
746 sect
= get_sect_by_index(seg
, i
);
747 require_action(sect
, finish
, rval
=KERN_FAILURE
);
748 if (sect
->base_addr
> maxaddr
) {
749 maxaddr
= sect
->base_addr
;
750 maxsize
= sect
->size
;
754 /* XXX Cross architecture linking will fail if the page size ever differs
755 * from 4096. (As of this writing, we're fine on ppc, i386, x86_64, and
758 seg
->vmsize
= round_page(maxaddr
+ maxsize
- seg
->base_addr
);
767 /*******************************************************************************
768 *******************************************************************************/
770 kxld_seg_set_vm_protections(KXLDSeg
*seg
, boolean_t strict_protections
)
772 /* This is unnecessary except to make the clang analyzer happy. When
773 * the analyzer no longer ignores nonnull attributes for if statements,
774 * we can remove this line.
778 if (strict_protections
) {
779 if (streq_safe(seg
->segname
, SEG_TEXT
, const_strlen(SEG_TEXT
))) {
780 seg
->initprot
= TEXT_SEG_PROT
;
781 seg
->maxprot
= VM_PROT_ALL
;
783 seg
->initprot
= DATA_SEG_PROT
;
784 seg
->maxprot
= DATA_SEG_PROT
;
787 seg
->initprot
= VM_PROT_ALL
;
788 seg
->maxprot
= VM_PROT_ALL
;
792 /*******************************************************************************
793 *******************************************************************************/
795 kxld_seg_relocate(KXLDSeg
*seg
, kxld_addr_t link_addr
)
797 KXLDSect
*sect
= NULL
;
800 seg
->link_addr
+= link_addr
;
801 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
802 sect
= get_sect_by_index(seg
, i
);
803 kxld_sect_relocate(sect
, link_addr
);
807 /*******************************************************************************
808 *******************************************************************************/
810 kxld_seg_populate_linkedit(KXLDSeg
*seg
,
811 const KXLDSymtab
*symtab
, boolean_t is_32_bit
)
813 seg
->vmsize
= round_page(kxld_symtab_get_macho_data_size(symtab
, is_32_bit
));