2 * Copyright (c) 2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/vm_prot.h>
30 #include <mach-o/loader.h>
31 #include <sys/types.h>
34 #include <mach/vm_param.h>
36 #include <mach/mach_init.h>
39 #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
40 #include <AssertMacros.h>
42 #include "kxld_sect.h"
44 #include "kxld_util.h"
48 #define TEXT_SEG_PROT (VM_PROT_READ | VM_PROT_EXECUTE)
49 #define DATA_SEG_PROT (VM_PROT_READ | VM_PROT_WRITE)
51 #if KXLD_USER_OR_OBJECT
52 static kern_return_t
reorder_sections(KXLDSeg
*seg
, KXLDArray
*section_order
);
53 static void reorder_section(KXLDArray
*sects
, u_int
*sect_reorder_index
,
54 KXLDSect
**reorder_buffer
, u_int reorder_buffer_index
);
55 #endif /* KXLD_USER_OR_OBJECT */
58 static KXLDSeg
* get_segment_by_name(KXLDArray
*segarray
, const char *name
);
61 #if KXLD_USER_OR_ILP32
62 static kern_return_t
seg_export_macho_header_32(const KXLDSeg
*seg
, u_char
*buf
,
63 u_long
*header_offset
, u_long header_size
, u_long data_offset
);
66 static kern_return_t
seg_export_macho_header_64(const KXLDSeg
*seg
, u_char
*buf
,
67 u_long
*header_offset
, u_long header_size
, u_long data_offset
);
70 static KXLDSect
* get_sect_by_index(const KXLDSeg
*seg
, u_int idx
);
72 #if KXLD_USER_OR_ILP32
73 /*******************************************************************************
74 *******************************************************************************/
76 kxld_seg_init_from_macho_32(KXLDSeg
*seg
, struct segment_command
*src
)
78 kern_return_t rval
= KERN_FAILURE
;
82 strlcpy(seg
->segname
, src
->segname
, sizeof(seg
->segname
));
83 seg
->base_addr
= src
->vmaddr
;
84 seg
->link_addr
= src
->vmaddr
;
85 seg
->vmsize
= src
->vmsize
;
86 seg
->fileoff
= src
->fileoff
;
87 seg
->maxprot
= src
->maxprot
;
88 seg
->initprot
= src
->initprot
;
89 seg
->flags
= src
->flags
;
91 rval
= kxld_array_init(&seg
->sects
, sizeof(KXLDSect
*), src
->nsects
);
92 require_noerr(rval
, finish
);
99 #endif /* KXLD_USER_OR_ILP32 */
101 #if KXLD_USER_OR_LP64
102 /*******************************************************************************
103 *******************************************************************************/
105 kxld_seg_init_from_macho_64(KXLDSeg
*seg
, struct segment_command_64
*src
)
107 kern_return_t rval
= KERN_FAILURE
;
111 strlcpy(seg
->segname
, src
->segname
, sizeof(seg
->segname
));
112 seg
->base_addr
= src
->vmaddr
;
113 seg
->link_addr
= src
->vmaddr
;
114 seg
->vmsize
= src
->vmsize
;
115 seg
->fileoff
= src
->fileoff
;
116 seg
->maxprot
= src
->maxprot
;
117 seg
->initprot
= src
->initprot
;
118 seg
->flags
= src
->flags
;
120 rval
= kxld_array_init(&seg
->sects
, sizeof(KXLDSect
*), src
->nsects
);
121 require_noerr(rval
, finish
);
128 #endif /* KXLD_USER_OR_LP64 */
130 #if KXLD_USER_OR_OBJECT
131 /*******************************************************************************
132 *******************************************************************************/
134 kxld_seg_create_seg_from_sections(KXLDArray
*segarray
, KXLDArray
*sectarray
)
136 kern_return_t rval
= KERN_FAILURE
;
138 KXLDSect
*sect
= NULL
;
139 KXLDSect
**sectp
= NULL
;
142 /* Initialize the segment array to one segment */
144 rval
= kxld_array_init(segarray
, sizeof(KXLDSeg
), 1);
145 require_noerr(rval
, finish
);
147 /* Initialize the segment */
149 seg
= kxld_array_get_item(segarray
, 0);
150 seg
->initprot
= VM_PROT_ALL
;
151 seg
->maxprot
= VM_PROT_ALL
;
154 /* Add the sections to the segment */
156 rval
= kxld_array_init(&seg
->sects
, sizeof(KXLDSect
*), sectarray
->nitems
);
157 require_noerr(rval
, finish
);
159 for (i
= 0; i
< sectarray
->nitems
; ++i
) {
160 sect
= kxld_array_get_item(sectarray
, i
);
161 sectp
= kxld_array_get_item(&seg
->sects
, i
);
171 /*******************************************************************************
172 *******************************************************************************/
174 kxld_seg_finalize_object_segment(KXLDArray
*segarray
, KXLDArray
*section_order
,
177 kern_return_t rval
= KERN_FAILURE
;
179 KXLDSect
*sect
= NULL
;
180 u_long sect_offset
= 0;
184 check(section_order
);
185 require_action(segarray
->nitems
== 1, finish
, rval
=KERN_FAILURE
);
187 seg
= kxld_array_get_item(segarray
, 0);
189 /* Reorder the sections */
191 rval
= reorder_sections(seg
, section_order
);
192 require_noerr(rval
, finish
);
194 /* Set the initial link address at the end of the header pages */
196 seg
->link_addr
= round_page(hdrsize
);
198 /* Fix up all of the section addresses */
200 sect_offset
= (u_long
) seg
->link_addr
;
201 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
202 sect
= *(KXLDSect
**)kxld_array_get_item(&seg
->sects
, i
);
204 sect
->link_addr
= kxld_sect_align_address(sect
, sect_offset
);
205 sect_offset
= (u_long
) (sect
->link_addr
+ sect
->size
);
208 /* Finish initializing the segment */
210 seg
->vmsize
= round_page(sect_offset
) - seg
->link_addr
;
217 /*******************************************************************************
218 * The legacy section ordering used by kld was based of the order of sections
219 * in the kernel file. To achieve the same layout, we save the kernel's
220 * section ordering as an array of section names when the kernel file itself
221 * is linked. Then, when kexts are linked with the KXLD_LEGACY_LAYOUT flag,
222 * we refer to the kernel's section layout to order the kext's sections.
224 * The algorithm below is as follows. We iterate through all of the kernel's
225 * sections grouped by segment name, so that we are processing all of the __TEXT
226 * sections, then all of the __DATA sections, etc. We then iterate through the
227 * kext's sections with a similar grouping, looking for sections that match
228 * the current kernel's section. In this way, we order all of the matching
229 * kext sections in the order in which they appear in the kernel, and then place
230 * all remaining kext sections at the end of the current segment grouping in
231 * the order in which they originally appeared. Sections that only appear in
232 * the kernel are not created. segments that only appear in the kext are
233 * left in their original ordering.
250 * Reordered kext sections:
257 * In the implementation below, we use a reorder buffer to hold pointers to the
258 * sections of the current working segment. We scan this buffer looking for
259 * matching sections, placing them in the segment's section index as we find them.
260 * If this function must exit early, the segment's section index is left in an
262 *******************************************************************************/
264 reorder_sections(KXLDSeg
*seg
, KXLDArray
*section_order
)
266 kern_return_t rval
= KERN_FAILURE
;
267 KXLDSect
*sect
= NULL
;
268 KXLDSect
**reorder_buffer
= NULL
;
269 KXLDSectionName
*section_name
= NULL
;
270 const char *segname
= NULL
;
271 u_int sect_index
= 0, legacy_index
= 0, sect_reorder_index
= 0;
273 u_int sect_start
= 0, sect_end
= 0, legacy_start
= 0, legacy_end
= 0;
277 check(section_order
);
279 /* Allocate the reorder buffer with enough space to hold all of the
283 reorder_buffer
= kxld_alloc(
284 seg
->sects
.nitems
* sizeof(*reorder_buffer
));
285 require_action(reorder_buffer
, finish
, rval
=KERN_RESOURCE_SHORTAGE
);
287 while (legacy_index
< section_order
->nitems
) {
289 /* Find the next group of sections with a common segment in the
290 * section_order array.
293 legacy_start
= legacy_index
++;
294 legacy_end
= legacy_index
;
296 section_name
= kxld_array_get_item(section_order
, legacy_start
);
297 segname
= section_name
->segname
;
298 while (legacy_index
< section_order
->nitems
) {
299 section_name
= kxld_array_get_item(section_order
, legacy_index
);
300 if (!streq_safe(segname
, section_name
->segname
,
301 sizeof(section_name
->segname
)))
310 /* Find a group of sections in the kext that match the current
311 * section_order segment.
314 sect_start
= sect_index
;
315 sect_end
= sect_index
;
317 while (sect_index
< seg
->sects
.nitems
) {
318 sect
= *(KXLDSect
**) kxld_array_get_item(&seg
->sects
, sect_index
);
319 if (!streq_safe(segname
, sect
->segname
, sizeof(sect
->segname
))) {
326 nsects
= sect_end
- sect_start
;
328 if (!nsects
) continue;
330 /* Populate the reorder buffer with the current group of kext sections */
332 for (i
= sect_start
; i
< sect_end
; ++i
) {
333 reorder_buffer
[i
- sect_start
] =
334 *(KXLDSect
**) kxld_array_get_item(&seg
->sects
, i
);
337 /* For each section_order section, scan the reorder buffer for a matching
338 * kext section. If one is found, copy it into the next slot in the
339 * segment's section index.
342 sect_reorder_index
= sect_start
;
343 for (i
= legacy_start
; i
< legacy_end
; ++i
) {
344 section_name
= kxld_array_get_item(section_order
, i
);
347 for (j
= 0; j
< nsects
; ++j
) {
348 sect
= reorder_buffer
[j
];
351 if (streq_safe(section_name
->sectname
, sect
->sectname
,
352 sizeof(section_name
->sectname
)))
361 (void) reorder_section(&seg
->sects
, §_reorder_index
,
366 /* If any sections remain in the reorder buffer, they are not specified
367 * in the section_order array, so append them to the section index in
368 * in the order they are found.
371 for (i
= 0; i
< nsects
; ++i
) {
372 if (!reorder_buffer
[i
]) continue;
373 reorder_section(&seg
->sects
, §_reorder_index
, reorder_buffer
, i
);
381 if (reorder_buffer
) {
382 kxld_free(reorder_buffer
, seg
->sects
.nitems
* sizeof(*reorder_buffer
));
383 reorder_buffer
= NULL
;
389 /*******************************************************************************
390 *******************************************************************************/
392 reorder_section(KXLDArray
*sects
, u_int
*sect_reorder_index
,
393 KXLDSect
**reorder_buffer
, u_int reorder_buffer_index
)
395 KXLDSect
**tmp
= NULL
;
397 tmp
= kxld_array_get_item(sects
, *sect_reorder_index
);
399 *tmp
= reorder_buffer
[reorder_buffer_index
];
400 reorder_buffer
[reorder_buffer_index
]->sectnum
= *sect_reorder_index
;
401 reorder_buffer
[reorder_buffer_index
] = NULL
;
403 ++(*sect_reorder_index
);
405 #endif /* KXLD_USER_OR_OBJECT */
407 /*******************************************************************************
408 *******************************************************************************/
410 kxld_seg_clear(KXLDSeg
*seg
)
414 bzero(seg
->segname
, sizeof(seg
->segname
));
422 /* Don't clear the individual sections here because kxld_kext.c will take
425 kxld_array_clear(&seg
->sects
);
428 /*******************************************************************************
429 *******************************************************************************/
431 kxld_seg_deinit(KXLDSeg
*seg
)
435 kxld_array_deinit(&seg
->sects
);
436 bzero(seg
, sizeof(*seg
));
439 /*******************************************************************************
440 *******************************************************************************/
442 kxld_seg_get_vmsize(const KXLDSeg
*seg
)
449 /*******************************************************************************
450 *******************************************************************************/
452 kxld_seg_get_macho_header_size(const KXLDSeg
*seg
, boolean_t is_32_bit
)
459 size
+= sizeof(struct segment_command
);
461 size
+= sizeof(struct segment_command_64
);
463 size
+= seg
->sects
.nitems
* kxld_sect_get_macho_header_size(is_32_bit
);
468 /*******************************************************************************
469 *******************************************************************************/
471 kxld_seg_get_macho_data_size(const KXLDSeg
*seg
)
475 KXLDSect
*sect
= NULL
;
479 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
480 sect
= get_sect_by_index(seg
, i
);
481 size
= (u_long
) kxld_sect_align_address(sect
, size
);
482 size
+= kxld_sect_get_macho_data_size(sect
);
485 return round_page(size
);
488 /*******************************************************************************
489 *******************************************************************************/
491 get_sect_by_index(const KXLDSeg
*seg
, u_int idx
)
495 return *(KXLDSect
**) kxld_array_get_item(&seg
->sects
, idx
);
498 /*******************************************************************************
499 *******************************************************************************/
501 kxld_seg_export_macho_to_file_buffer(const KXLDSeg
*seg
, u_char
*buf
,
502 u_long
*header_offset
, u_long header_size
,
503 u_long
*data_offset
, u_long data_size
,
506 kern_return_t rval
= KERN_FAILURE
;
507 KXLDSect
*sect
= NULL
;
508 u_long base_data_offset
= *data_offset
;
510 struct segment_command
*hdr32
=
511 (struct segment_command
*) (buf
+ *header_offset
);
512 struct segment_command_64
*hdr64
=
513 (struct segment_command_64
*) (buf
+ *header_offset
);
517 check(header_offset
);
520 /* Write out the header */
522 KXLD_3264_FUNC(is_32_bit
, rval
,
523 seg_export_macho_header_32
, seg_export_macho_header_64
,
524 seg
, buf
, header_offset
, header_size
, *data_offset
);
525 require_noerr(rval
, finish
);
527 /* Write out each section */
529 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
530 sect
= get_sect_by_index(seg
, i
);
532 rval
= kxld_sect_export_macho_to_file_buffer(sect
, buf
, header_offset
,
533 header_size
, data_offset
, data_size
, is_32_bit
);
534 require_noerr(rval
, finish
);
537 /* Update the filesize */
540 hdr32
->filesize
= (uint32_t) (*data_offset
- base_data_offset
);
542 hdr64
->filesize
= (uint64_t) (*data_offset
- base_data_offset
);
545 *data_offset
= round_page(*data_offset
);
554 /*******************************************************************************
555 *******************************************************************************/
557 kxld_seg_export_macho_to_vm(const KXLDSeg
*seg
, u_char
*buf
,
558 u_long
*header_offset
, u_long header_size
,
559 u_long data_size
, kxld_addr_t file_link_addr
,
562 kern_return_t rval
= KERN_FAILURE
;
563 KXLDSect
*sect
= NULL
;
564 u_long data_offset
= (u_long
) (seg
->link_addr
- file_link_addr
);
569 check(header_offset
);
571 /* Write out the header */
573 KXLD_3264_FUNC(is_32_bit
, rval
,
574 seg_export_macho_header_32
, seg_export_macho_header_64
,
575 seg
, buf
, header_offset
, header_size
, data_offset
);
576 require_noerr(rval
, finish
);
578 /* Write out each section */
580 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
581 sect
= get_sect_by_index(seg
, i
);
583 rval
= kxld_sect_export_macho_to_vm(sect
, buf
, header_offset
,
584 header_size
, file_link_addr
, data_size
, is_32_bit
);
585 require_noerr(rval
, finish
);
594 #if KXLD_USER_OR_ILP32
595 /*******************************************************************************
596 *******************************************************************************/
598 seg_export_macho_header_32(const KXLDSeg
*seg
, u_char
*buf
,
599 u_long
*header_offset
, u_long header_size
, u_long data_offset
)
601 kern_return_t rval
= KERN_FAILURE
;
602 struct segment_command
*seghdr
= NULL
;
606 check(header_offset
);
608 require_action(sizeof(*seghdr
) <= header_size
- *header_offset
, finish
,
610 seghdr
= (struct segment_command
*) (buf
+ *header_offset
);
611 *header_offset
+= sizeof(*seghdr
);
613 seghdr
->cmd
= LC_SEGMENT
;
614 seghdr
->cmdsize
= (uint32_t) sizeof(*seghdr
);
616 (uint32_t) (seg
->sects
.nitems
* kxld_sect_get_macho_header_size(TRUE
));
617 strlcpy(seghdr
->segname
, seg
->segname
, sizeof(seghdr
->segname
));
618 seghdr
->vmaddr
= (uint32_t) seg
->link_addr
;
619 seghdr
->vmsize
= (uint32_t) seg
->vmsize
;
620 seghdr
->fileoff
= (uint32_t) data_offset
;
621 seghdr
->filesize
= (uint32_t) seg
->vmsize
;
622 seghdr
->maxprot
= seg
->maxprot
;
623 seghdr
->initprot
= seg
->initprot
;
624 seghdr
->nsects
= seg
->sects
.nitems
;
632 #endif /* KXLD_USER_OR_ILP32 */
634 #if KXLD_USER_OR_LP64
635 /*******************************************************************************
636 *******************************************************************************/
638 seg_export_macho_header_64(const KXLDSeg
*seg
, u_char
*buf
,
639 u_long
*header_offset
, u_long header_size
, u_long data_offset
)
641 kern_return_t rval
= KERN_FAILURE
;
642 struct segment_command_64
*seghdr
= NULL
;
646 check(header_offset
);
648 require_action(sizeof(*seghdr
) <= header_size
- *header_offset
, finish
,
650 seghdr
= (struct segment_command_64
*) (buf
+ *header_offset
);
651 *header_offset
+= sizeof(*seghdr
);
653 seghdr
->cmd
= LC_SEGMENT_64
;
654 seghdr
->cmdsize
= (uint32_t) sizeof(*seghdr
);
656 (uint32_t) (seg
->sects
.nitems
* kxld_sect_get_macho_header_size(FALSE
));
657 strlcpy(seghdr
->segname
, seg
->segname
, sizeof(seghdr
->segname
));
658 seghdr
->vmaddr
= (uint64_t) seg
->link_addr
;
659 seghdr
->vmsize
= (uint64_t) seg
->vmsize
;
660 seghdr
->fileoff
= (uint64_t) data_offset
;
661 seghdr
->filesize
= (uint64_t) seg
->vmsize
;
662 seghdr
->maxprot
= seg
->maxprot
;
663 seghdr
->initprot
= seg
->initprot
;
664 seghdr
->nsects
= seg
->sects
.nitems
;
672 #endif /* KXLD_USER_OR_LP64 */
674 /*******************************************************************************
675 *******************************************************************************/
677 kxld_seg_add_section(KXLDSeg
*seg
, KXLDSect
*sect
)
679 kern_return_t rval
= KERN_FAILURE
;
680 KXLDSect
**sectp
= NULL
;
685 require_action(streq_safe(seg
->segname
, sect
->segname
, sizeof(seg
->segname
)),
686 finish
, rval
=KERN_FAILURE
);
688 /* Add the section into the section index */
690 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
691 sectp
= kxld_array_get_item(&seg
->sects
, i
);
692 if (NULL
== *sectp
) {
697 require_action(i
< seg
->sects
.nitems
, finish
, rval
=KERN_FAILURE
);
706 /*******************************************************************************
707 *******************************************************************************/
709 kxld_seg_finish_init(KXLDSeg
*seg
)
711 kern_return_t rval
= KERN_FAILURE
;
713 KXLDSect
*sect
= NULL
;
714 kxld_addr_t maxaddr
= 0;
715 kxld_size_t maxsize
= 0;
717 if (seg
->sects
.nitems
) {
718 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
719 sect
= get_sect_by_index(seg
, i
);
720 require_action(sect
, finish
, rval
=KERN_FAILURE
);
721 if (sect
->base_addr
> maxaddr
) {
722 maxaddr
= sect
->base_addr
;
723 maxsize
= sect
->size
;
727 /* XXX Cross architecture linking will fail if the page size ever differs
728 * from 4096. (As of this writing, we're fine on ppc, i386, x86_64, and
731 seg
->vmsize
= round_page(maxaddr
+ maxsize
- seg
->base_addr
);
740 /*******************************************************************************
741 *******************************************************************************/
743 kxld_seg_set_vm_protections(KXLDSeg
*seg
, boolean_t strict_protections
)
745 if (strict_protections
) {
746 if (streq_safe(seg
->segname
, SEG_TEXT
, sizeof(SEG_TEXT
))) {
747 seg
->initprot
= TEXT_SEG_PROT
;
748 seg
->maxprot
= VM_PROT_ALL
;
750 seg
->initprot
= DATA_SEG_PROT
;
751 seg
->maxprot
= DATA_SEG_PROT
;
754 seg
->initprot
= VM_PROT_ALL
;
755 seg
->maxprot
= VM_PROT_ALL
;
759 /*******************************************************************************
760 *******************************************************************************/
762 kxld_seg_relocate(KXLDSeg
*seg
, kxld_addr_t link_addr
)
764 KXLDSect
*sect
= NULL
;
767 seg
->link_addr
+= link_addr
;
768 for (i
= 0; i
< seg
->sects
.nitems
; ++i
) {
769 sect
= get_sect_by_index(seg
, i
);
770 kxld_sect_relocate(sect
, link_addr
);