2 * Copyright (c) 2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/vm_prot.h>
30 #include <mach-o/loader.h>
31 #include <sys/types.h>
34 #include <mach/vm_param.h>
36 #include <mach/mach_init.h>
39 #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
40 #include <AssertMacros.h>
42 #include "kxld_reloc.h"
43 #include "kxld_sect.h"
45 #include "kxld_symtab.h"
46 #include "kxld_util.h"
50 #define TEXT_SEG_PROT (VM_PROT_READ | VM_PROT_EXECUTE)
51 #define DATA_SEG_PROT (VM_PROT_READ | VM_PROT_WRITE)
53 #if KXLD_USER_OR_OBJECT
54 static kern_return_t
reorder_sections(KXLDSeg
*seg
, KXLDArray
*section_order
);
55 static void reorder_section(KXLDArray
*sects
, u_int
*sect_reorder_index
,
56 KXLDSect
**reorder_buffer
, u_int reorder_buffer_index
);
57 #endif /* KXLD_USER_OR_OBJECT */
60 static KXLDSeg
* get_segment_by_name(KXLDArray
*segarray
, const char *name
);
63 #if KXLD_USER_OR_ILP32
64 static kern_return_t
seg_export_macho_header_32(const KXLDSeg
*seg
, u_char
*buf
,
65 u_long
*header_offset
, u_long header_size
, u_long data_offset
);
68 static kern_return_t
seg_export_macho_header_64(const KXLDSeg
*seg
, u_char
*buf
,
69 u_long
*header_offset
, u_long header_size
, u_long data_offset
);
72 static KXLDSect
* get_sect_by_index(const KXLDSeg
*seg
, u_int idx
);
74 #if KXLD_USER_OR_ILP32
75 /*******************************************************************************
76 *******************************************************************************/
78 kxld_seg_init_from_macho_32(KXLDSeg
*seg
, struct segment_command
*src
)
80 kern_return_t rval
= KERN_FAILURE
;
84 strlcpy(seg
->segname
, src
->segname
, sizeof(seg
->segname
));
85 seg
->base_addr
= src
->vmaddr
;
86 seg
->link_addr
= src
->vmaddr
;
87 seg
->vmsize
= src
->vmsize
;
88 seg
->fileoff
= src
->fileoff
;
89 seg
->maxprot
= src
->maxprot
;
90 seg
->initprot
= src
->initprot
;
91 seg
->flags
= src
->flags
;
93 rval
= kxld_array_init(&seg
->sects
, sizeof(KXLDSect
*), src
->nsects
);
94 require_noerr(rval
, finish
);
101 #endif /* KXLD_USER_OR_ILP32 */
103 #if KXLD_USER_OR_LP64
104 /*******************************************************************************
105 *******************************************************************************/
107 kxld_seg_init_from_macho_64(KXLDSeg
*seg
, struct segment_command_64
*src
)
109 kern_return_t rval
= KERN_FAILURE
;
113 strlcpy(seg
->segname
, src
->segname
, sizeof(seg
->segname
));
114 seg
->base_addr
= src
->vmaddr
;
115 seg
->link_addr
= src
->vmaddr
;
116 seg
->vmsize
= src
->vmsize
;
117 seg
->fileoff
= src
->fileoff
;
118 seg
->maxprot
= src
->maxprot
;
119 seg
->initprot
= src
->initprot
;
120 seg
->flags
= src
->flags
;
122 rval
= kxld_array_init(&seg
->sects
, sizeof(KXLDSect
*), src
->nsects
);
123 require_noerr(rval
, finish
);
130 #endif /* KXLD_USER_OR_LP64 */
132 #if KXLD_USER_OR_OBJECT
133 /*******************************************************************************
134 *******************************************************************************/
136 kxld_seg_create_seg_from_sections(KXLDArray
*segarray
, KXLDArray
*sectarray
)
138 kern_return_t rval
= KERN_FAILURE
;
140 KXLDSect
*sect
= NULL
;
141 KXLDSect
**sectp
= NULL
;
144 /* Initialize the segment array to one segment */
146 rval
= kxld_array_init(segarray
, sizeof(KXLDSeg
), 1);
147 require_noerr(rval
, finish
);
149 /* Initialize the segment */
151 seg
= kxld_array_get_item(segarray
, 0);
152 seg
->initprot
= VM_PROT_ALL
;
153 seg
->maxprot
= VM_PROT_ALL
;
156 /* Add the sections to the segment */
158 rval
= kxld_array_init(&seg
->sects
, sizeof(KXLDSect
*), sectarray
->nitems
);
159 require_noerr(rval
, finish
);
161 for (i
= 0; i
< sectarray
->nitems
; ++i
) {
162 sect
= kxld_array_get_item(sectarray
, i
);
163 sectp
= kxld_array_get_item(&seg
->sects
, i
);
173 /*******************************************************************************
174 *******************************************************************************/
176 kxld_seg_finalize_object_segment(KXLDArray
*segarray
, KXLDArray
*section_order
,
179 kern_return_t rval
= KERN_FAILURE
;
181 KXLDSect
*sect
= NULL
;
182 u_long sect_offset
= 0;
186 check(section_order
);
187 require_action(segarray
->nitems
== 1, finish
, rval
=KERN_FAILURE
);
189 seg
= kxld_array_get_item(segarray
, 0);
191 /* Reorder the sections */
193 rval
= reorder_sections(seg
, section_order
);
194 require_noerr(rval
, finish
);
196 /* Set the initial link address at the end of the header pages */
198 seg
->link_addr
= round_page(hdrsize
);
200 /* Fix up all of the section addresses */
202 sect_offset
= (u_long
) seg
->link_addr
;
203 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
204 sect
= *(KXLDSect
**)kxld_array_get_item(&seg
->sects
, i
);
206 sect
->link_addr
= kxld_sect_align_address(sect
, sect_offset
);
207 sect_offset
= (u_long
) (sect
->link_addr
+ sect
->size
);
210 /* Finish initializing the segment */
212 seg
->vmsize
= round_page(sect_offset
) - seg
->link_addr
;
219 /*******************************************************************************
220 * The legacy section ordering used by kld was based of the order of sections
221 * in the kernel file. To achieve the same layout, we save the kernel's
222 * section ordering as an array of section names when the kernel file itself
223 * is linked. Then, when kexts are linked with the KXLD_LEGACY_LAYOUT flag,
224 * we refer to the kernel's section layout to order the kext's sections.
226 * The algorithm below is as follows. We iterate through all of the kernel's
227 * sections grouped by segment name, so that we are processing all of the __TEXT
228 * sections, then all of the __DATA sections, etc. We then iterate through the
229 * kext's sections with a similar grouping, looking for sections that match
230 * the current kernel's section. In this way, we order all of the matching
231 * kext sections in the order in which they appear in the kernel, and then place
232 * all remaining kext sections at the end of the current segment grouping in
233 * the order in which they originally appeared. Sections that only appear in
234 * the kernel are not created. segments that only appear in the kext are
235 * left in their original ordering.
251 * Reordered kext sections:
258 * In the implementation below, we use a reorder buffer to hold pointers to the
259 * sections of the current working segment. We scan this buffer looking for
260 * matching sections, placing them in the segment's section index as we find them.
261 * If this function must exit early, the segment's section index is left in an
263 *******************************************************************************/
265 reorder_sections(KXLDSeg
*seg
, KXLDArray
*section_order
)
267 kern_return_t rval
= KERN_FAILURE
;
268 KXLDSect
*sect
= NULL
;
269 KXLDSect
**reorder_buffer
= NULL
;
270 KXLDSectionName
*section_name
= NULL
;
271 const char *segname
= NULL
;
272 u_int sect_index
= 0, legacy_index
= 0, sect_reorder_index
= 0;
274 u_int sect_start
= 0, sect_end
= 0, legacy_start
= 0, legacy_end
= 0;
278 check(section_order
);
280 /* Allocate the reorder buffer with enough space to hold all of the
284 reorder_buffer
= kxld_alloc(
285 seg
->sects
.nitems
* sizeof(*reorder_buffer
));
286 require_action(reorder_buffer
, finish
, rval
=KERN_RESOURCE_SHORTAGE
);
288 while (legacy_index
< section_order
->nitems
) {
290 /* Find the next group of sections with a common segment in the
291 * section_order array.
294 legacy_start
= legacy_index
++;
295 legacy_end
= legacy_index
;
297 section_name
= kxld_array_get_item(section_order
, legacy_start
);
298 segname
= section_name
->segname
;
299 while (legacy_index
< section_order
->nitems
) {
300 section_name
= kxld_array_get_item(section_order
, legacy_index
);
301 if (!streq_safe(segname
, section_name
->segname
,
302 sizeof(section_name
->segname
)))
311 /* Find a group of sections in the kext that match the current
312 * section_order segment.
315 sect_start
= sect_index
;
316 sect_end
= sect_index
;
318 while (sect_index
< seg
->sects
.nitems
) {
319 sect
= *(KXLDSect
**) kxld_array_get_item(&seg
->sects
, sect_index
);
320 if (!streq_safe(segname
, sect
->segname
, sizeof(sect
->segname
))) {
327 nsects
= sect_end
- sect_start
;
329 if (!nsects
) continue;
331 /* Populate the reorder buffer with the current group of kext sections */
333 for (i
= sect_start
; i
< sect_end
; ++i
) {
334 reorder_buffer
[i
- sect_start
] =
335 *(KXLDSect
**) kxld_array_get_item(&seg
->sects
, i
);
338 /* For each section_order section, scan the reorder buffer for a matching
339 * kext section. If one is found, copy it into the next slot in the
340 * segment's section index.
343 sect_reorder_index
= sect_start
;
344 for (i
= legacy_start
; i
< legacy_end
; ++i
) {
345 section_name
= kxld_array_get_item(section_order
, i
);
348 for (j
= 0; j
< nsects
; ++j
) {
349 sect
= reorder_buffer
[j
];
352 if (streq_safe(section_name
->sectname
, sect
->sectname
,
353 sizeof(section_name
->sectname
)))
362 (void) reorder_section(&seg
->sects
, §_reorder_index
,
367 /* If any sections remain in the reorder buffer, they are not specified
368 * in the section_order array, so append them to the section index in
369 * in the order they are found.
372 for (i
= 0; i
< nsects
; ++i
) {
373 if (!reorder_buffer
[i
]) continue;
374 reorder_section(&seg
->sects
, §_reorder_index
, reorder_buffer
, i
);
382 if (reorder_buffer
) {
383 kxld_free(reorder_buffer
, seg
->sects
.nitems
* sizeof(*reorder_buffer
));
384 reorder_buffer
= NULL
;
390 /*******************************************************************************
391 *******************************************************************************/
393 reorder_section(KXLDArray
*sects
, u_int
*sect_reorder_index
,
394 KXLDSect
**reorder_buffer
, u_int reorder_buffer_index
)
396 KXLDSect
**tmp
= NULL
;
398 tmp
= kxld_array_get_item(sects
, *sect_reorder_index
);
400 *tmp
= reorder_buffer
[reorder_buffer_index
];
401 reorder_buffer
[reorder_buffer_index
]->sectnum
= *sect_reorder_index
;
402 reorder_buffer
[reorder_buffer_index
] = NULL
;
404 ++(*sect_reorder_index
);
407 /*******************************************************************************
408 *******************************************************************************/
410 kxld_seg_init_linkedit(KXLDArray
*segs
)
412 kern_return_t rval
= KERN_FAILURE
;
416 rval
= kxld_array_resize(segs
, 2);
417 require_noerr(rval
, finish
);
419 seg
= kxld_array_get_item(segs
, 0);
420 le
= kxld_array_get_item(segs
, 1);
422 strlcpy(le
->segname
, SEG_LINKEDIT
, sizeof(le
->segname
));
423 le
->link_addr
= round_page(seg
->link_addr
+ seg
->vmsize
);
424 le
->maxprot
= VM_PROT_ALL
;
425 le
->initprot
= VM_PROT_DEFAULT
;
432 #endif /* KXLD_USER_OR_OBJECT */
434 /*******************************************************************************
435 *******************************************************************************/
437 kxld_seg_clear(KXLDSeg
*seg
)
441 bzero(seg
->segname
, sizeof(seg
->segname
));
449 /* Don't clear the individual sections here because kxld_kext.c will take
452 kxld_array_clear(&seg
->sects
);
455 /*******************************************************************************
456 *******************************************************************************/
458 kxld_seg_deinit(KXLDSeg
*seg
)
462 kxld_array_deinit(&seg
->sects
);
463 bzero(seg
, sizeof(*seg
));
466 /*******************************************************************************
467 *******************************************************************************/
469 kxld_seg_get_vmsize(const KXLDSeg
*seg
)
476 /*******************************************************************************
477 *******************************************************************************/
479 kxld_seg_get_macho_header_size(const KXLDSeg
*seg
, boolean_t is_32_bit
)
486 size
+= sizeof(struct segment_command
);
488 size
+= sizeof(struct segment_command_64
);
490 size
+= seg
->sects
.nitems
* kxld_sect_get_macho_header_size(is_32_bit
);
495 /*******************************************************************************
496 *******************************************************************************/
497 /* This is no longer used, but may be useful some day... */
500 kxld_seg_get_macho_data_size(const KXLDSeg
*seg
)
504 KXLDSect
*sect
= NULL
;
508 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
509 sect
= get_sect_by_index(seg
, i
);
510 size
= (u_long
) kxld_sect_align_address(sect
, size
);
511 size
+= kxld_sect_get_macho_data_size(sect
);
514 return round_page(size
);
518 /*******************************************************************************
519 *******************************************************************************/
521 get_sect_by_index(const KXLDSeg
*seg
, u_int idx
)
525 return *(KXLDSect
**) kxld_array_get_item(&seg
->sects
, idx
);
528 /*******************************************************************************
529 *******************************************************************************/
531 kxld_seg_export_macho_to_file_buffer(const KXLDSeg
*seg
, u_char
*buf
,
532 u_long
*header_offset
, u_long header_size
,
533 u_long
*data_offset
, u_long data_size
,
536 kern_return_t rval
= KERN_FAILURE
;
537 KXLDSect
*sect
= NULL
;
538 u_long base_data_offset
= *data_offset
;
540 struct segment_command
*hdr32
=
541 (struct segment_command
*) ((void *) (buf
+ *header_offset
));
542 struct segment_command_64
*hdr64
=
543 (struct segment_command_64
*) ((void *) (buf
+ *header_offset
));
547 check(header_offset
);
550 /* Write out the header */
552 KXLD_3264_FUNC(is_32_bit
, rval
,
553 seg_export_macho_header_32
, seg_export_macho_header_64
,
554 seg
, buf
, header_offset
, header_size
, *data_offset
);
555 require_noerr(rval
, finish
);
557 /* Write out each section */
559 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
560 sect
= get_sect_by_index(seg
, i
);
562 rval
= kxld_sect_export_macho_to_file_buffer(sect
, buf
, header_offset
,
563 header_size
, data_offset
, data_size
, is_32_bit
);
564 require_noerr(rval
, finish
);
567 /* Update the filesize */
570 hdr32
->filesize
= (uint32_t) (*data_offset
- base_data_offset
);
572 hdr64
->filesize
= (uint64_t) (*data_offset
- base_data_offset
);
575 *data_offset
= round_page(*data_offset
);
584 /*******************************************************************************
585 *******************************************************************************/
587 kxld_seg_export_macho_to_vm(const KXLDSeg
*seg
, u_char
*buf
,
588 u_long
*header_offset
, u_long header_size
,
589 u_long data_size
, kxld_addr_t file_link_addr
,
592 kern_return_t rval
= KERN_FAILURE
;
593 KXLDSect
*sect
= NULL
;
594 u_long data_offset
= (u_long
) (seg
->link_addr
- file_link_addr
);
599 check(header_offset
);
601 /* Write out the header */
603 KXLD_3264_FUNC(is_32_bit
, rval
,
604 seg_export_macho_header_32
, seg_export_macho_header_64
,
605 seg
, buf
, header_offset
, header_size
, data_offset
);
606 require_noerr(rval
, finish
);
608 /* Write out each section */
610 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
611 sect
= get_sect_by_index(seg
, i
);
613 rval
= kxld_sect_export_macho_to_vm(sect
, buf
, header_offset
,
614 header_size
, file_link_addr
, data_size
, is_32_bit
);
615 require_noerr(rval
, finish
);
624 #if KXLD_USER_OR_ILP32
625 /*******************************************************************************
626 *******************************************************************************/
628 seg_export_macho_header_32(const KXLDSeg
*seg
, u_char
*buf
,
629 u_long
*header_offset
, u_long header_size
, u_long data_offset
)
631 kern_return_t rval
= KERN_FAILURE
;
632 struct segment_command
*seghdr
= NULL
;
636 check(header_offset
);
638 require_action(sizeof(*seghdr
) <= header_size
- *header_offset
, finish
,
640 seghdr
= (struct segment_command
*) ((void *) (buf
+ *header_offset
));
641 *header_offset
+= sizeof(*seghdr
);
643 seghdr
->cmd
= LC_SEGMENT
;
644 seghdr
->cmdsize
= (uint32_t) sizeof(*seghdr
);
646 (uint32_t) (seg
->sects
.nitems
* kxld_sect_get_macho_header_size(TRUE
));
647 strlcpy(seghdr
->segname
, seg
->segname
, sizeof(seghdr
->segname
));
648 seghdr
->vmaddr
= (uint32_t) seg
->link_addr
;
649 seghdr
->vmsize
= (uint32_t) seg
->vmsize
;
650 seghdr
->fileoff
= (uint32_t) data_offset
;
651 seghdr
->filesize
= (uint32_t) seg
->vmsize
;
652 seghdr
->maxprot
= seg
->maxprot
;
653 seghdr
->initprot
= seg
->initprot
;
654 seghdr
->nsects
= seg
->sects
.nitems
;
662 #endif /* KXLD_USER_OR_ILP32 */
664 #if KXLD_USER_OR_LP64
665 /*******************************************************************************
666 *******************************************************************************/
668 seg_export_macho_header_64(const KXLDSeg
*seg
, u_char
*buf
,
669 u_long
*header_offset
, u_long header_size
, u_long data_offset
)
671 kern_return_t rval
= KERN_FAILURE
;
672 struct segment_command_64
*seghdr
= NULL
;
676 check(header_offset
);
678 require_action(sizeof(*seghdr
) <= header_size
- *header_offset
, finish
,
680 seghdr
= (struct segment_command_64
*) ((void *) (buf
+ *header_offset
));
681 *header_offset
+= sizeof(*seghdr
);
683 seghdr
->cmd
= LC_SEGMENT_64
;
684 seghdr
->cmdsize
= (uint32_t) sizeof(*seghdr
);
686 (uint32_t) (seg
->sects
.nitems
* kxld_sect_get_macho_header_size(FALSE
));
687 strlcpy(seghdr
->segname
, seg
->segname
, sizeof(seghdr
->segname
));
688 seghdr
->vmaddr
= (uint64_t) seg
->link_addr
;
689 seghdr
->vmsize
= (uint64_t) seg
->vmsize
;
690 seghdr
->fileoff
= (uint64_t) data_offset
;
691 seghdr
->filesize
= (uint64_t) seg
->vmsize
;
692 seghdr
->maxprot
= seg
->maxprot
;
693 seghdr
->initprot
= seg
->initprot
;
694 seghdr
->nsects
= seg
->sects
.nitems
;
702 #endif /* KXLD_USER_OR_LP64 */
704 /*******************************************************************************
705 *******************************************************************************/
707 kxld_seg_add_section(KXLDSeg
*seg
, KXLDSect
*sect
)
709 kern_return_t rval
= KERN_FAILURE
;
710 KXLDSect
**sectp
= NULL
;
715 require_action(streq_safe(seg
->segname
, sect
->segname
, sizeof(seg
->segname
)),
716 finish
, rval
=KERN_FAILURE
);
718 /* Add the section into the section index */
720 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
721 sectp
= kxld_array_get_item(&seg
->sects
, i
);
722 if (NULL
== *sectp
) {
727 require_action(i
< seg
->sects
.nitems
, finish
, rval
=KERN_FAILURE
);
736 /*******************************************************************************
737 *******************************************************************************/
739 kxld_seg_finish_init(KXLDSeg
*seg
)
741 kern_return_t rval
= KERN_FAILURE
;
743 KXLDSect
*sect
= NULL
;
744 kxld_addr_t maxaddr
= 0;
745 kxld_size_t maxsize
= 0;
747 if (seg
->sects
.nitems
) {
748 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
749 sect
= get_sect_by_index(seg
, i
);
750 require_action(sect
, finish
, rval
=KERN_FAILURE
);
751 if (sect
->base_addr
> maxaddr
) {
752 maxaddr
= sect
->base_addr
;
753 maxsize
= sect
->size
;
757 seg
->vmsize
= round_page(maxaddr
+ maxsize
- seg
->base_addr
);
766 /*******************************************************************************
767 *******************************************************************************/
769 kxld_seg_set_vm_protections(KXLDSeg
*seg
, boolean_t strict_protections
)
771 /* This is unnecessary except to make the clang analyzer happy. When
772 * the analyzer no longer ignores nonnull attributes for if statements,
773 * we can remove this line.
777 if (strict_protections
) {
778 if (streq_safe(seg
->segname
, SEG_TEXT
, const_strlen(SEG_TEXT
))) {
779 seg
->initprot
= TEXT_SEG_PROT
;
780 seg
->maxprot
= TEXT_SEG_PROT
;
782 seg
->initprot
= DATA_SEG_PROT
;
783 seg
->maxprot
= DATA_SEG_PROT
;
786 seg
->initprot
= VM_PROT_ALL
;
787 seg
->maxprot
= VM_PROT_ALL
;
791 /*******************************************************************************
792 *******************************************************************************/
794 kxld_seg_relocate(KXLDSeg
*seg
, kxld_addr_t link_addr
)
796 KXLDSect
*sect
= NULL
;
799 seg
->link_addr
+= link_addr
;
800 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
801 sect
= get_sect_by_index(seg
, i
);
802 kxld_sect_relocate(sect
, link_addr
);
806 /*******************************************************************************
807 *******************************************************************************/
809 kxld_seg_populate_linkedit(KXLDSeg
*seg
, const KXLDSymtab
*symtab
, boolean_t is_32_bit
811 , const KXLDArray
*locrelocs
812 , const KXLDArray
*extrelocs
813 , boolean_t target_supports_slideable_kexts
814 #endif /* KXLD_PIC_KEXTS */
819 size
+= kxld_symtab_get_macho_data_size(symtab
, is_32_bit
);
822 if (target_supports_slideable_kexts
) {
823 size
+= kxld_reloc_get_macho_data_size(locrelocs
, extrelocs
);
825 #endif /* KXLD_PIC_KEXTS */
827 seg
->vmsize
= round_page(size
);