2 * Copyright (c) 2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/machine.h>
30 #include <mach/vm_param.h>
31 #include <mach/vm_types.h>
32 #include <mach/kmod.h>
33 #include <mach-o/loader.h>
34 #include <mach-o/nlist.h>
35 #include <mach-o/reloc.h>
36 #include <sys/types.h>
39 #include <libkern/kernel_mach_header.h>
40 #include <libkern/OSKextLib.h>
41 #include <libkern/OSKextLibPrivate.h>
42 #include <mach/vm_param.h>
43 #include <mach-o/fat.h>
45 #include <architecture/byte_order.h>
46 #include <mach/mach_init.h>
47 #include <mach-o/arch.h>
48 #include <mach-o/swap.h>
51 #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
52 #include <AssertMacros.h>
54 #include "kxld_demangle.h"
55 #include "kxld_dict.h"
56 #include "kxld_kext.h"
57 #include "kxld_reloc.h"
58 #include "kxld_sect.h"
60 #include "kxld_state.h"
61 #include "kxld_symtab.h"
62 #include "kxld_util.h"
63 #include "kxld_uuid.h"
64 #include "kxld_vtable.h"
66 struct symtab_command
;
70 KXLD_LINK_PSEUDO_KEXT
,
75 typedef enum kxld_link_type KXLDLinkType
;
87 KXLDDict vtable_index
;
88 KXLDRelocator relocator
;
91 kxld_addr_t link_addr
;
92 kmod_info_t
*kmod_info
;
93 kxld_addr_t kmod_link_addr
;
95 cpu_subtype_t cpusubtype
;
96 KXLDLinkType link_type
;
98 boolean_t is_final_image
;
99 boolean_t got_is_created
;
100 struct dysymtab_command
*dysymtab_hdr
;
101 #if KXLD_USER_OR_OBJECT
102 KXLDArray
*section_order
;
105 enum NXByteOrder host_order
;
106 enum NXByteOrder target_order
;
110 /*******************************************************************************
112 *******************************************************************************/
114 static kern_return_t
get_target_machine_info(KXLDKext
*kext
, cpu_type_t cputype
,
115 cpu_subtype_t cpusubtype
);
116 static kern_return_t
get_file_for_arch(KXLDKext
*kext
, u_char
*file
, u_long size
);
118 static u_long
get_macho_header_size(const KXLDKext
*kext
);
119 static u_long
get_macho_data_size(const KXLDKext
*kext
);
120 static kern_return_t
export_macho_header(const KXLDKext
*kext
, u_char
*buf
,
121 u_int ncmds
, u_long
*header_offset
, u_long header_size
);
123 static kern_return_t
init_from_execute(KXLDKext
*kext
);
124 static kern_return_t
init_from_final_linked_image(KXLDKext
*kext
, u_int
*filetype_out
,
125 struct symtab_command
**symtab_hdr_out
);
127 static boolean_t
target_supports_protected_segments(const KXLDKext
*kext
)
128 __attribute__((pure
));
130 #if KXLD_USER_OR_OBJECT
131 static boolean_t
target_supports_object(const KXLDKext
*kext
) __attribute((pure
));
132 static kern_return_t
init_from_object(KXLDKext
*kext
);
133 static kern_return_t
process_relocs_from_sections(KXLDKext
*kext
);
134 #endif /* KXLD_USER_OR_OBJECT */
136 #if KXLD_USER_OR_BUNDLE
137 static boolean_t
target_supports_bundle(const KXLDKext
*kext
) __attribute((pure
));
138 static kern_return_t
init_from_bundle(KXLDKext
*kext
);
139 static kern_return_t
process_relocs_from_tables(KXLDKext
*kext
);
140 static kern_return_t
process_symbol_pointers(KXLDKext
*kext
);
141 static void add_to_ptr(u_char
*symptr
, kxld_addr_t val
, boolean_t is_32_bit
);
142 #endif /* KXLD_USER_OR_BUNDLE */
144 static kern_return_t
get_metaclass_symbol_from_super_meta_class_pointer_symbol(
145 KXLDKext
*kext
, KXLDSym
*super_metaclass_pointer_sym
, KXLDSym
**meta_class
);
147 static kern_return_t
resolve_symbols(KXLDKext
*kext
, KXLDDict
*defined_symbols
,
148 KXLDDict
*obsolete_symbols
);
149 static kern_return_t
patch_vtables(KXLDKext
*kext
, KXLDDict
*patched_vtables
,
150 KXLDDict
*defined_symbols
);
151 static kern_return_t
validate_symbols(KXLDKext
*kext
);
152 static kern_return_t
populate_kmod_info(KXLDKext
*kext
);
153 static kern_return_t
copy_vtables(KXLDKext
*kext
, const KXLDDict
*patched_vtables
);
154 static kern_return_t
create_vtables(KXLDKext
*kext
);
155 static void restrict_private_symbols(KXLDKext
*kext
);
157 #if KXLD_USER_OR_GOT || KXLD_USER_OR_COMMON
158 static kern_return_t
add_section(KXLDKext
*kext
, KXLDSect
**sect
);
159 #endif /* KXLD_USER_OR_GOT || KXLD_USER_OR_COMMON */
162 static boolean_t
target_has_got(const KXLDKext
*kext
) __attribute__((pure
));
163 static kern_return_t
create_got(KXLDKext
*kext
);
164 static kern_return_t
populate_got(KXLDKext
*kext
);
165 #endif /* KXLD_USER_OR_GOT */
167 static boolean_t
target_supports_common(const KXLDKext
*kext
) __attribute((pure
));
168 #if KXLD_USER_OR_COMMON
169 static kern_return_t
resolve_common_symbols(KXLDKext
*kext
);
170 #endif /* KXLD_USER_OR_COMMON */
172 static boolean_t
target_supports_strict_patching(KXLDKext
*kext
)
173 __attribute__((pure
));
175 #if KXLD_USER_OR_ILP32
176 static u_long
get_macho_cmd_data_32(u_char
*file
, u_long offset
,
177 u_int
*filetype
, u_int
*ncmds
);
178 static kern_return_t
export_macho_header_32(const KXLDKext
*kext
, u_char
*buf
,
179 u_int ncmds
, u_long
*header_offset
, u_long header_size
);
180 #endif /* KXLD_USER_OR_ILP32 */
181 #if KXLD_USER_OR_LP64
182 static u_long
get_macho_cmd_data_64(u_char
*file
, u_long offset
,
183 u_int
*filetype
, u_int
*ncmds
);
184 static kern_return_t
export_macho_header_64(const KXLDKext
*kext
, u_char
*buf
,
185 u_int ncmds
, u_long
*header_offset
, u_long header_size
);
186 #endif /* KXLD_USER_OR_LP64 */
188 /*******************************************************************************
189 *******************************************************************************/
191 kxld_kext_sizeof(void)
193 return sizeof(KXLDKext
);
196 /*******************************************************************************
197 *******************************************************************************/
199 kxld_kext_init(KXLDKext
*kext
, u_char
*file
, u_long size
,
200 const char *name
, KXLDFlags flags
, boolean_t is_kernel
,
201 KXLDArray
*section_order __unused
,
202 cpu_type_t cputype
, cpu_subtype_t cpusubtype
)
204 kern_return_t rval
= KERN_FAILURE
;
214 #if KXLD_USER_OR_OBJECT
215 kext
->section_order
= section_order
;
218 /* Find the local architecture */
220 rval
= get_target_machine_info(kext
, cputype
, cpusubtype
);
221 require_noerr(rval
, finish
);
223 /* Find the Mach-O file for the target architecture */
225 rval
= get_file_for_arch(kext
, file
, size
);
226 require_noerr(rval
, finish
);
228 /* Build the relocator */
230 rval
= kxld_relocator_init(&kext
->relocator
, kext
->cputype
,
231 kext
->cpusubtype
, kxld_kext_target_needs_swap(kext
));
232 require_noerr(rval
, finish
);
234 /* Allocate the symbol table */
237 kext
->symtab
= kxld_alloc(kxld_symtab_sizeof());
238 require_action(kext
->symtab
, finish
, rval
=KERN_RESOURCE_SHORTAGE
);
239 bzero(kext
->symtab
, kxld_symtab_sizeof());
243 kext
->link_type
= KXLD_LINK_KERNEL
;
245 kext
->link_type
= KXLD_LINK_UNKNOWN
;
248 /* There are four types of Mach-O files that we can support:
249 * 1) 32-bit MH_OBJECT - All pre-SnowLeopard systems
250 * 2) 32-bit MH_KEXT_BUNDLE - Not supported
251 * 3) 64-bit MH_OBJECT - Needed for K64 bringup
252 * 4) 64-bit MH_KEXT_BUNDLE - The likely 64-bit kext filetype
255 if (kxld_kext_is_32_bit(kext
)) {
256 struct mach_header
*mach_hdr
= (struct mach_header
*) kext
->file
;
257 kext
->filetype
= mach_hdr
->filetype
;
259 struct mach_header_64
*mach_hdr
= (struct mach_header_64
*) kext
->file
;
260 kext
->filetype
= mach_hdr
->filetype
;
263 switch (kext
->filetype
) {
264 #if KXLD_USER_OR_OBJECT
266 rval
= init_from_object(kext
);
267 require_noerr(rval
, finish
);
269 #endif /* KXLD_USER_OR_OBJECT */
270 #if KXLD_USER_OR_BUNDLE
272 rval
= init_from_bundle(kext
);
273 require_noerr(rval
, finish
);
275 #endif /* KXLD_USER_OR_BUNDLE */
277 rval
= init_from_execute(kext
);
278 require_noerr(rval
, finish
);
282 kxld_log(kKxldLogLinking
, kKxldLogErr
,
283 kKxldLogFiletypeNotSupported
, kext
->filetype
);
287 for (i
= 0; i
< kext
->segs
.nitems
; ++i
) {
288 seg
= kxld_array_get_item(&kext
->segs
, i
);
289 kxld_seg_set_vm_protections(seg
, target_supports_protected_segments(kext
));
292 switch (kext
->link_type
) {
294 (void) restrict_private_symbols(kext
);
296 case KXLD_LINK_KERNEL
:
297 rval
= create_vtables(kext
);
298 require_noerr(rval
, finish
);
309 /*******************************************************************************
310 *******************************************************************************/
312 get_target_machine_info(KXLDKext
*kext
, cpu_type_t cputype __unused
,
313 cpu_subtype_t cpusubtype __unused
)
317 /* Because the kernel can only link for its own architecture, we know what
318 * the host and target architectures are at compile time, so we can use
319 * a vastly simplified version of this function.
324 #if defined(__i386__)
325 kext
->cputype
= CPU_TYPE_I386
;
326 kext
->cpusubtype
= CPU_SUBTYPE_I386_ALL
;
328 #elif defined(__ppc__)
329 kext
->cputype
= CPU_TYPE_POWERPC
;
330 kext
->cpusubtype
= CPU_SUBTYPE_POWERPC_ALL
;
332 #elif defined(__x86_64__)
333 kext
->cputype
= CPU_TYPE_X86_64
;
334 kext
->cpusubtype
= CPU_SUBTYPE_X86_64_ALL
;
337 kxld_log(kKxldLogLinking
, kKxldLogErr
,
338 kKxldLogArchNotSupported
, _mh_execute_header
->cputype
);
339 return KERN_NOT_SUPPORTED
;
340 #endif /* Supported architecture defines */
345 /* User-space must look up the architecture it's running on and the target
346 * architecture at run-time.
349 kern_return_t rval
= KERN_FAILURE
;
350 const NXArchInfo
*host_arch
= NULL
;
354 host_arch
= NXGetLocalArchInfo();
355 require_action(host_arch
, finish
, rval
=KERN_FAILURE
);
357 kext
->host_order
= host_arch
->byteorder
;
359 /* If the user did not specify a cputype, use the local architecture.
363 kext
->cputype
= cputype
;
364 kext
->cpusubtype
= cpusubtype
;
366 kext
->cputype
= host_arch
->cputype
;
367 kext
->target_order
= kext
->host_order
;
369 switch (kext
->cputype
) {
371 kext
->cpusubtype
= CPU_SUBTYPE_I386_ALL
;
373 case CPU_TYPE_POWERPC
:
374 kext
->cpusubtype
= CPU_SUBTYPE_POWERPC_ALL
;
376 case CPU_TYPE_X86_64
:
377 kext
->cpusubtype
= CPU_SUBTYPE_X86_64_ALL
;
380 kext
->cpusubtype
= CPU_SUBTYPE_ARM_ALL
;
383 kext
->cpusubtype
= 0;
387 /* Validate that we support the target architecture and record its
391 switch(kext
->cputype
) {
394 case CPU_TYPE_X86_64
:
395 kext
->target_order
= NX_LittleEndian
;
397 case CPU_TYPE_POWERPC
:
398 kext
->target_order
= NX_BigEndian
;
401 rval
= KERN_NOT_SUPPORTED
;
402 kxld_log(kKxldLogLinking
, kKxldLogErr
,
403 kKxldLogArchNotSupported
, kext
->cputype
);
414 /*******************************************************************************
415 *******************************************************************************/
417 get_file_for_arch(KXLDKext
*kext
, u_char
*file
, u_long size
)
419 kern_return_t rval
= KERN_FAILURE
;
420 struct mach_header
*mach_hdr
= NULL
;
422 struct fat_header
*fat
= (struct fat_header
*) file
;
423 struct fat_arch
*archs
= (struct fat_arch
*) &fat
[1];
424 boolean_t swap
= FALSE
;
434 /* We are assuming that we will never receive a fat file in the kernel */
437 require_action(size
>= sizeof(*fat
), finish
,
439 kxld_log(kKxldLogLinking
, kKxldLogErr
, kKxldLogTruncatedMachO
));
441 /* The fat header is always big endian, so swap if necessary */
442 if (fat
->magic
== FAT_CIGAM
) {
443 (void) swap_fat_header(fat
, kext
->host_order
);
447 if (fat
->magic
== FAT_MAGIC
) {
448 struct fat_arch
*arch
= NULL
;
450 require_action(size
>= (sizeof(*fat
) + (fat
->nfat_arch
* sizeof(*archs
))),
453 kxld_log(kKxldLogLinking
, kKxldLogErr
, kKxldLogTruncatedMachO
));
455 /* Swap the fat_arch structures if necessary */
457 (void) swap_fat_arch(archs
, fat
->nfat_arch
, kext
->host_order
);
460 /* Locate the Mach-O for the requested architecture */
462 arch
= NXFindBestFatArch(kext
->cputype
, kext
->cpusubtype
, archs
,
464 require_action(arch
, finish
, rval
=KERN_FAILURE
;
465 kxld_log(kKxldLogLinking
, kKxldLogErr
, kKxldLogArchNotFound
));
466 require_action(size
>= arch
->offset
+ arch
->size
, finish
,
468 kxld_log(kKxldLogLinking
, kKxldLogErr
, kKxldLogTruncatedMachO
));
470 kext
->file
= file
+ arch
->offset
;
471 kext
->size
= arch
->size
;
475 /* Swap the Mach-O's headers to this architecture if necessary */
476 if (kxld_kext_is_32_bit(kext
)) {
477 rval
= validate_and_swap_macho_32(kext
->file
, kext
->size
483 rval
= validate_and_swap_macho_64(kext
->file
, kext
->size
489 require_noerr(rval
, finish
);
491 mach_hdr
= (struct mach_header
*) kext
->file
;
492 require_action(kext
->cputype
== mach_hdr
->cputype
, finish
,
494 kxld_log(kKxldLogLinking
, kKxldLogErr
, kKxldLogTruncatedMachO
));
501 /*******************************************************************************
502 *******************************************************************************/
504 kxld_kext_is_32_bit(const KXLDKext
*kext
)
508 return kxld_is_32_bit(kext
->cputype
);
511 /*******************************************************************************
512 *******************************************************************************/
514 kxld_kext_get_cputype(const KXLDKext
*kext
, cpu_type_t
*cputype
,
515 cpu_subtype_t
*cpusubtype
)
521 *cputype
= kext
->cputype
;
522 *cpusubtype
= kext
->cpusubtype
;
525 /*******************************************************************************
526 *******************************************************************************/
528 kxld_kext_validate_cputype(const KXLDKext
*kext
, cpu_type_t cputype
,
529 cpu_subtype_t cpusubtype __unused
)
531 if (kext
->cputype
!= cputype
) return KERN_FAILURE
;
535 /*******************************************************************************
536 *******************************************************************************/
538 target_supports_protected_segments(const KXLDKext
*kext
)
540 return (kext
->is_final_image
&&
541 kext
->cputype
== CPU_TYPE_X86_64
);
544 #if KXLD_USER_OR_OBJECT
545 /*******************************************************************************
546 *******************************************************************************/
547 static boolean_t
target_supports_object(const KXLDKext
*kext
)
549 return (kext
->cputype
== CPU_TYPE_POWERPC
||
550 kext
->cputype
== CPU_TYPE_I386
||
551 kext
->cputype
== CPU_TYPE_ARM
);
554 /*******************************************************************************
555 *******************************************************************************/
557 init_from_object(KXLDKext
*kext
)
559 kern_return_t rval
= KERN_FAILURE
;
560 struct load_command
*cmd_hdr
= NULL
;
561 struct symtab_command
*symtab_hdr
= NULL
;
562 struct uuid_command
*uuid_hdr
= NULL
;
563 KXLDSect
*sect
= NULL
;
565 u_long sect_offset
= 0;
570 boolean_t has_segment
= FALSE
;
574 require_action(target_supports_object(kext
),
575 finish
, rval
=KERN_FAILURE
;
576 kxld_log(kKxldLogLinking
, kKxldLogErr
,
577 kKxldLogFiletypeNotSupported
, MH_OBJECT
));
579 KXLD_3264_FUNC(kxld_kext_is_32_bit(kext
), offset
,
580 get_macho_cmd_data_32
, get_macho_cmd_data_64
,
581 kext
->file
, offset
, &filetype
, &ncmds
);
583 require_action(filetype
== MH_OBJECT
, finish
, rval
=KERN_FAILURE
);
585 /* MH_OBJECTs use one unnamed segment to contain all of the sections. We
586 * loop over all of the load commands to initialize the structures we
587 * expect. Then, we'll use the unnamed segment to get to all of the
588 * sections, and then use those sections to create the actual segments.
591 for (; i
< ncmds
; ++i
, offset
+= cmd_hdr
->cmdsize
) {
592 cmd_hdr
= (struct load_command
*) (kext
->file
+ offset
);
594 switch(cmd_hdr
->cmd
) {
595 #if KXLD_USER_OR_ILP32
598 struct segment_command
*seg_hdr
=
599 (struct segment_command
*) cmd_hdr
;
601 /* Ignore segments with no vm size */
602 if (!seg_hdr
->vmsize
) continue;
604 require_action(kxld_kext_is_32_bit(kext
), finish
, rval
=KERN_FAILURE
;
605 kxld_log(kKxldLogLinking
, kKxldLogErr
, kKxldLogMalformedMachO
606 "LC_SEGMENT in 64-bit kext."));
607 require_action(!has_segment
, finish
, rval
=KERN_FAILURE
;
608 kxld_log(kKxldLogLinking
, kKxldLogErr
, kKxldLogMalformedMachO
609 "Multiple segments in an MH_OBJECT kext."));
611 nsects
= seg_hdr
->nsects
;
612 sect_offset
= offset
+ sizeof(*seg_hdr
);
616 #endif /* KXLD_USER_OR_ILP32 */
617 #if KXLD_USER_OR_LP64
620 struct segment_command_64
*seg_hdr
=
621 (struct segment_command_64
*) cmd_hdr
;
623 /* Ignore segments with no vm size */
624 if (!seg_hdr
->vmsize
) continue;
626 require_action(!kxld_kext_is_32_bit(kext
), finish
, rval
=KERN_FAILURE
;
627 kxld_log(kKxldLogLinking
, kKxldLogErr
, kKxldLogMalformedMachO
628 "LC_SEGMENT_64 in a 32-bit kext."));
629 require_action(!has_segment
, finish
, rval
=KERN_FAILURE
;
630 kxld_log(kKxldLogLinking
, kKxldLogErr
, kKxldLogMalformedMachO
631 "Multiple segments in an MH_OBJECT kext."));
633 nsects
= seg_hdr
->nsects
;
634 sect_offset
= offset
+ sizeof(*seg_hdr
);
638 #endif /* KXLD_USER_OR_LP64 */
640 symtab_hdr
= (struct symtab_command
*) cmd_hdr
;
642 KXLD_3264_FUNC(kxld_kext_is_32_bit(kext
), rval
,
643 kxld_symtab_init_from_macho_32
, kxld_symtab_init_from_macho_64
,
644 kext
->symtab
, kext
->file
, symtab_hdr
, 0);
645 require_noerr(rval
, finish
);
648 uuid_hdr
= (struct uuid_command
*) cmd_hdr
;
649 kxld_uuid_init_from_macho(&kext
->uuid
, uuid_hdr
);
652 /* Don't need to do anything with UNIXTHREAD */
656 kxld_log(kKxldLogLinking
, kKxldLogErr
, kKxldLogMalformedMachO
657 "Invalid segment type in MH_OBJECT kext: %u.", cmd_hdr
->cmd
);
664 /* Get the number of sections from the segment and build the section index */
666 rval
= kxld_array_init(&kext
->sects
, sizeof(KXLDSect
), nsects
);
667 require_noerr(rval
, finish
);
669 /* Loop over all of the sections to initialize the section index */
671 for (i
= 0; i
< nsects
; ++i
) {
672 sect
= kxld_array_get_item(&kext
->sects
, i
);
673 KXLD_3264_FUNC(kxld_kext_is_32_bit(kext
), rval
,
674 kxld_sect_init_from_macho_32
, kxld_sect_init_from_macho_64
,
675 sect
, kext
->file
, §_offset
, i
, &kext
->relocator
);
676 require_noerr(rval
, finish
);
679 /* Create special sections */
682 rval
= create_got(kext
);
683 require_noerr(rval
, finish
);
684 #endif /* KXLD_USER_OR_GOT */
686 #if KXLD_USER_OR_COMMON
687 rval
= resolve_common_symbols(kext
);
688 require_noerr(rval
, finish
);
689 #endif /* KXLD_USER_OR_COMMON */
691 /* Create the segments from the section index */
693 rval
= kxld_seg_create_seg_from_sections(&kext
->segs
, &kext
->sects
);
694 require_noerr(rval
, finish
);
696 rval
= kxld_seg_finalize_object_segment(&kext
->segs
,
697 kext
->section_order
, get_macho_header_size(kext
));
698 require_noerr(rval
, finish
);
700 kext
->link_type
= KXLD_LINK_KEXT
;
702 kext
->link_type
= KXLD_LINK_PSEUDO_KEXT
;
709 #endif /* KXLD_USER_OR_OBJECT */
711 /*******************************************************************************
712 *******************************************************************************/
714 init_from_final_linked_image(KXLDKext
*kext
, u_int
*filetype_out
,
715 struct symtab_command
**symtab_hdr_out
)
717 kern_return_t rval
= KERN_FAILURE
;
719 KXLDSect
*sect
= NULL
;
720 struct load_command
*cmd_hdr
= NULL
;
721 struct symtab_command
*symtab_hdr
= NULL
;
722 struct uuid_command
*uuid_hdr
= NULL
;
723 u_long base_offset
= 0;
725 u_long sect_offset
= 0;
735 KXLD_3264_FUNC(kxld_kext_is_32_bit(kext
), base_offset
,
736 get_macho_cmd_data_32
, get_macho_cmd_data_64
,
737 kext
->file
, offset
, &filetype
, &ncmds
);
739 /* First pass to count segments and sections */
741 offset
= base_offset
;
742 for (i
= 0; i
< ncmds
; ++i
, offset
+= cmd_hdr
->cmdsize
) {
743 cmd_hdr
= (struct load_command
*) (kext
->file
+ offset
);
745 switch(cmd_hdr
->cmd
) {
746 #if KXLD_USER_OR_ILP32
749 struct segment_command
*seg_hdr
=
750 (struct segment_command
*) cmd_hdr
;
752 /* Ignore segments with no vm size */
753 if (!seg_hdr
->vmsize
) continue;
756 nsects
+= seg_hdr
->nsects
;
759 #endif /* KXLD_USER_OR_ILP32 */
760 #if KXLD_USER_OR_LP64
763 struct segment_command_64
*seg_hdr
=
764 (struct segment_command_64
*) cmd_hdr
;
766 /* Ignore segments with no vm size */
767 if (!seg_hdr
->vmsize
) continue;
770 nsects
+= seg_hdr
->nsects
;
773 #endif /* KXLD_USER_OR_LP64 */
779 /* Allocate the segments and sections */
782 rval
= kxld_array_init(&kext
->segs
, sizeof(KXLDSeg
), nsegs
);
783 require_noerr(rval
, finish
);
785 rval
= kxld_array_init(&kext
->sects
, sizeof(KXLDSect
), nsects
);
786 require_noerr(rval
, finish
);
789 /* Initialize the segments and sections */
791 offset
= base_offset
;
792 for (i
= 0; i
< ncmds
; ++i
, offset
+= cmd_hdr
->cmdsize
) {
793 cmd_hdr
= (struct load_command
*) (kext
->file
+ offset
);
796 switch(cmd_hdr
->cmd
) {
797 #if KXLD_USER_OR_ILP32
800 struct segment_command
*seg_hdr
=
801 (struct segment_command
*) cmd_hdr
;
803 /* Ignore segments with no vm size */
804 if (!seg_hdr
->vmsize
) continue;
806 seg
= kxld_array_get_item(&kext
->segs
, segi
++);
808 rval
= kxld_seg_init_from_macho_32(seg
, seg_hdr
);
809 require_noerr(rval
, finish
);
811 sect_offset
= offset
+ sizeof(*seg_hdr
);
814 #endif /* KXLD_USER_OR_ILP32 */
815 #if KXLD_USER_OR_LP64
818 struct segment_command_64
*seg_hdr
=
819 (struct segment_command_64
*) cmd_hdr
;
821 /* Ignore segments with no vm size */
822 if (!seg_hdr
->vmsize
) continue;
824 seg
= kxld_array_get_item(&kext
->segs
, segi
++);
826 rval
= kxld_seg_init_from_macho_64(seg
, seg_hdr
);
827 require_noerr(rval
, finish
);
829 sect_offset
= offset
+ sizeof(*seg_hdr
);
832 #endif /* KXLD_USER_OR_LP64 */
834 symtab_hdr
= (struct symtab_command
*) cmd_hdr
;
837 uuid_hdr
= (struct uuid_command
*) cmd_hdr
;
838 kxld_uuid_init_from_macho(&kext
->uuid
, uuid_hdr
);
841 kext
->dysymtab_hdr
= (struct dysymtab_command
*) cmd_hdr
;
843 rval
= kxld_reloc_create_macho(&kext
->extrelocs
, &kext
->relocator
,
844 (struct relocation_info
*) (kext
->file
+ kext
->dysymtab_hdr
->extreloff
),
845 kext
->dysymtab_hdr
->nextrel
);
846 require_noerr(rval
, finish
);
848 rval
= kxld_reloc_create_macho(&kext
->locrelocs
, &kext
->relocator
,
849 (struct relocation_info
*) (kext
->file
+ kext
->dysymtab_hdr
->locreloff
),
850 kext
->dysymtab_hdr
->nlocrel
);
851 require_noerr(rval
, finish
);
855 /* Don't need to do anything with UNIXTHREAD for the kernel */
856 require_action(kext
->link_type
== KXLD_LINK_KERNEL
, finish
,
858 kxld_log(kKxldLogLinking
, kKxldLogErr
, kKxldLogMalformedMachO
859 "LC_UNIXTHREAD segment is not valid in a kext."));
863 kxld_log(kKxldLogLinking
, kKxldLogErr
, kKxldLogMalformedMachO
864 "Invalid segment type in MH_KEXT_BUNDLE kext: %u.", cmd_hdr
->cmd
);
870 /* Initialize the sections */
871 for (j
= 0; j
< seg
->sects
.nitems
; ++j
, ++secti
) {
872 sect
= kxld_array_get_item(&kext
->sects
, secti
);
873 KXLD_3264_FUNC(kxld_kext_is_32_bit(kext
), rval
,
874 kxld_sect_init_from_macho_32
, kxld_sect_init_from_macho_64
,
875 sect
, kext
->file
, §_offset
, secti
, &kext
->relocator
);
876 require_noerr(rval
, finish
);
878 /* Add the section to the segment. This will also make sure
879 * that the sections and segments have the same segname.
881 rval
= kxld_seg_add_section(seg
, sect
);
882 require_noerr(rval
, finish
);
884 rval
= kxld_seg_finish_init(seg
);
885 require_noerr(rval
, finish
);
889 if (filetype_out
) *filetype_out
= filetype
;
890 if (symtab_hdr_out
) *symtab_hdr_out
= symtab_hdr
;
891 kext
->is_final_image
= TRUE
;
897 /*******************************************************************************
898 *******************************************************************************/
900 init_from_execute(KXLDKext
*kext
)
902 kern_return_t rval
= KERN_FAILURE
;
903 struct symtab_command
*symtab_hdr
= NULL
;
904 kxld_addr_t linkedit_offset
= 0;
907 KXLDSeg
*textseg
= NULL
;
908 KXLDSeg
*linkeditseg
= NULL
;
910 #if KXLD_USER_OR_OBJECT
912 KXLDSect
*sect
= NULL
;
913 KXLDSectionName
*sname
= NULL
;
914 u_int i
= 0, j
= 0, k
= 0;
915 #endif /* KXLD_USER_OR_OBJECT */
919 require_action(kext
->link_type
== KXLD_LINK_KERNEL
, finish
,
922 rval
= init_from_final_linked_image(kext
, &filetype
, &symtab_hdr
);
923 require_noerr(rval
, finish
);
925 require_action(filetype
== MH_EXECUTE
, finish
, rval
=KERN_FAILURE
;
926 kxld_log(kKxldLogLinking
, kKxldLogErr
, kKxldLogMalformedMachO
927 "The kernel file is not of type MH_EXECUTE."));
930 /* When we're in the kernel, the symbol table can no longer be found by the
931 * symtab_command alone because the command specifies offsets for the file
932 * on disk, not the file mapped into memory. We can find the additional
933 * offset necessary by finding the difference between the linkedit segment's
934 * vm address and the text segment's vm address.
937 textseg
= kxld_kext_get_seg_by_name(kext
, SEG_TEXT
);
938 require_action(textseg
, finish
, rval
=KERN_FAILURE
;
939 kxld_log(kKxldLogLinking
, kKxldLogErr
, kKxldLogMalformedMachO
));
941 linkeditseg
= kxld_kext_get_seg_by_name(kext
, SEG_LINKEDIT
);
942 require_action(linkeditseg
, finish
, rval
=KERN_FAILURE
;
943 kxld_log(kKxldLogLinking
, kKxldLogErr
, kKxldLogMalformedMachO
));
945 linkedit_offset
= linkeditseg
->base_addr
- textseg
->base_addr
-
946 linkeditseg
->fileoff
;
949 /* Initialize the symbol table */
951 KXLD_3264_FUNC(kxld_kext_is_32_bit(kext
), rval
,
952 kxld_symtab_init_from_macho_32
, kxld_symtab_init_from_macho_64
,
953 kext
->symtab
, kext
->file
, symtab_hdr
, linkedit_offset
);
954 require_noerr(rval
, finish
);
956 #if KXLD_USER_OR_OBJECT
957 /* Save off the order of section names so that we can lay out kext
958 * sections for MH_OBJECT-based systems.
960 if (target_supports_object(kext
)) {
962 rval
= kxld_array_init(kext
->section_order
, sizeof(KXLDSectionName
),
964 require_noerr(rval
, finish
);
966 /* Copy the section names into the section_order array for future kext
969 for (i
= 0, k
= 0; i
< kext
->segs
.nitems
; ++i
) {
970 seg
= kxld_array_get_item(&kext
->segs
, i
);
972 for (j
= 0; j
< seg
->sects
.nitems
; ++j
, ++k
) {
973 sect
= *(KXLDSect
**) kxld_array_get_item(&seg
->sects
, j
);
974 sname
= kxld_array_get_item(kext
->section_order
, k
);
976 strlcpy(sname
->segname
, sect
->segname
, sizeof(sname
->segname
));
977 strlcpy(sname
->sectname
, sect
->sectname
, sizeof(sname
->sectname
));
981 #endif /* KXLD_USER_OR_OBJECT */
988 #if KXLD_USER_OR_BUNDLE
989 /*******************************************************************************
990 *******************************************************************************/
992 target_supports_bundle(const KXLDKext
*kext
)
994 return (kext
->cputype
== CPU_TYPE_X86_64
);
997 /*******************************************************************************
998 *******************************************************************************/
1000 init_from_bundle(KXLDKext
*kext
)
1002 kern_return_t rval
= KERN_FAILURE
;
1003 KXLDSeg
*seg
= NULL
;
1004 struct symtab_command
*symtab_hdr
= NULL
;
1010 require_action(target_supports_bundle(kext
), finish
,
1012 kxld_log(kKxldLogLinking
, kKxldLogErr
,
1013 kKxldLogFiletypeNotSupported
, MH_KEXT_BUNDLE
));
1015 rval
= init_from_final_linked_image(kext
, &filetype
, &symtab_hdr
);
1016 require_noerr(rval
, finish
);
1018 require_action(filetype
== MH_KEXT_BUNDLE
, finish
,
1021 KXLD_3264_FUNC(kxld_kext_is_32_bit(kext
), rval
,
1022 kxld_symtab_init_from_macho_32
, kxld_symtab_init_from_macho_64
,
1023 kext
->symtab
, kext
->file
, symtab_hdr
, /* linkedit offset */ 0);
1024 require_noerr(rval
, finish
);
1026 if (kext
->segs
.nitems
) {
1027 /* Remove the __LINKEDIT segment, since we never keep the symbol
1028 * table around in memory for kexts.
1030 seg
= kxld_kext_get_seg_by_name(kext
, SEG_LINKEDIT
);
1032 rval
= kxld_array_get_index(&kext
->segs
, seg
, &idx
);
1033 require_noerr(rval
, finish
);
1035 kxld_seg_deinit(seg
);
1037 rval
= kxld_array_remove(&kext
->segs
, idx
);
1038 require_noerr(rval
, finish
);
1041 kext
->link_type
= KXLD_LINK_KEXT
;
1043 kext
->link_type
= KXLD_LINK_PSEUDO_KEXT
;
1046 rval
= KERN_SUCCESS
;
1050 #endif /* KXLD_USER_OR_BUNDLE */
1052 #if KXLD_USER_OR_ILP32
1053 /*******************************************************************************
1054 *******************************************************************************/
1056 get_macho_cmd_data_32(u_char
*file
, u_long offset
, u_int
*filetype
, u_int
*ncmds
)
1058 struct mach_header
*mach_hdr
= (struct mach_header
*) (file
+ offset
);
1060 if (filetype
) *filetype
= mach_hdr
->filetype
;
1061 if (ncmds
) *ncmds
= mach_hdr
->ncmds
;
1063 return sizeof(*mach_hdr
);
1066 #endif /* KXLD_USER_OR_ILP32 */
1068 #if KXLD_USER_OR_LP64
1069 /*******************************************************************************
1070 *******************************************************************************/
1072 get_macho_cmd_data_64(u_char
*file
, u_long offset
, u_int
*filetype
, u_int
*ncmds
)
1074 struct mach_header_64
*mach_hdr
= (struct mach_header_64
*) (file
+ offset
);
1076 if (filetype
) *filetype
= mach_hdr
->filetype
;
1077 if (ncmds
) *ncmds
= mach_hdr
->ncmds
;
1079 return sizeof(*mach_hdr
);
1081 #endif /* KXLD_USER_OR_LP64 */
1083 /*******************************************************************************
1084 *******************************************************************************/
1085 static kern_return_t
1086 create_vtables(KXLDKext
*kext
)
1088 kern_return_t rval
= KERN_FAILURE
;
1089 KXLDSymtabIterator iter
;
1090 KXLDSym
*sym
= NULL
;
1091 KXLDSym
*vtable_sym
= NULL
;
1092 KXLDSym
*meta_vtable_sym
= NULL
;
1093 KXLDSect
*vtable_sect
= NULL
;
1094 KXLDSect
*meta_vtable_sect
= NULL
;
1095 KXLDVTable
*vtable
= NULL
;
1096 KXLDVTable
*meta_vtable
= NULL
;
1097 char class_name
[KXLD_MAX_NAME_LEN
];
1098 char vtable_name
[KXLD_MAX_NAME_LEN
];
1099 char meta_vtable_name
[KXLD_MAX_NAME_LEN
];
1100 char *demangled_name1
= NULL
;
1101 char *demangled_name2
= NULL
;
1102 size_t demangled_length1
= 0;
1103 size_t demangled_length2
= 0;
1107 if (kext
->link_type
== KXLD_LINK_KERNEL
) {
1108 /* Create a vtable object for every vtable symbol */
1109 kxld_symtab_iterator_init(&iter
, kext
->symtab
, kxld_sym_is_vtable
, FALSE
);
1110 nvtables
= kxld_symtab_iterator_get_num_remaining(&iter
);
1112 /* We walk over the super metaclass pointer symbols, because classes
1113 * with them are the only ones that need patching. Then we double the
1114 * number of vtables we're expecting, because every pointer will have a
1115 * class vtable and a MetaClass vtable.
1117 kxld_symtab_iterator_init(&iter
, kext
->symtab
,
1118 kxld_sym_is_super_metaclass_pointer
, FALSE
);
1119 nvtables
= kxld_symtab_iterator_get_num_remaining(&iter
) * 2;
1122 /* Allocate the array of vtable objects.
1124 rval
= kxld_array_init(&kext
->vtables
, sizeof(KXLDVTable
), nvtables
);
1125 require_noerr(rval
, finish
);
1127 /* Initialize from each vtable symbol */
1128 while ((sym
= kxld_symtab_iterator_get_next(&iter
))) {
1130 if (kext
->link_type
== KXLD_LINK_KERNEL
) {
1133 /* Get the class name from the smc pointer */
1134 rval
= kxld_sym_get_class_name_from_super_metaclass_pointer(
1135 sym
, class_name
, sizeof(class_name
));
1136 require_noerr(rval
, finish
);
1138 /* Get the vtable name from the class name */
1139 rval
= kxld_sym_get_vtable_name_from_class_name(class_name
,
1140 vtable_name
, sizeof(vtable_name
));
1141 require_noerr(rval
, finish
);
1143 /* Get the vtable symbol */
1144 vtable_sym
= kxld_symtab_get_symbol_by_name(kext
->symtab
, vtable_name
);
1145 require_action(vtable_sym
, finish
, rval
=KERN_FAILURE
;
1146 kxld_log(kKxldLogPatching
, kKxldLogErr
, kKxldLogMissingVtable
,
1147 vtable_name
, class_name
));
1149 /* Get the meta vtable name from the class name */
1150 rval
= kxld_sym_get_meta_vtable_name_from_class_name(class_name
,
1151 meta_vtable_name
, sizeof(meta_vtable_name
));
1152 require_noerr(rval
, finish
);
1154 /* Get the meta vtable symbol */
1155 meta_vtable_sym
= kxld_symtab_get_symbol_by_name(kext
->symtab
,
1157 if (!meta_vtable_sym
) {
1158 /* If we don't support strict patching and we can't find the vtable,
1159 * log a warning and reduce the expected number of vtables by 1.
1161 if (target_supports_strict_patching(kext
)) {
1162 kxld_log(kKxldLogPatching
, kKxldLogErr
, kKxldLogMissingVtable
,
1163 meta_vtable_name
, class_name
);
1164 rval
= KERN_FAILURE
;
1167 kxld_log(kKxldLogPatching
, kKxldLogErr
,
1168 "Warning: " kKxldLogMissingVtable
,
1169 kxld_demangle(meta_vtable_name
, &demangled_name1
,
1170 &demangled_length1
),
1171 kxld_demangle(class_name
, &demangled_name2
,
1172 &demangled_length2
));
1173 kxld_array_resize(&kext
->vtables
, --nvtables
);
1178 /* Get the vtable's section */
1179 vtable_sect
= kxld_array_get_item(&kext
->sects
, vtable_sym
->sectnum
);
1180 require_action(vtable_sect
, finish
, rval
=KERN_FAILURE
);
1182 vtable
= kxld_array_get_item(&kext
->vtables
, i
++);
1184 if (kext
->link_type
== KXLD_LINK_KERNEL
) {
1185 /* Initialize the kernel vtable */
1186 rval
= kxld_vtable_init_from_kernel_macho(vtable
, vtable_sym
,
1187 vtable_sect
, kext
->symtab
, &kext
->relocator
);
1188 require_noerr(rval
, finish
);
1190 /* Initialize the class vtable */
1191 if (kext
->is_final_image
) {
1192 rval
= kxld_vtable_init_from_final_macho(vtable
, vtable_sym
,
1193 vtable_sect
, kext
->symtab
, &kext
->relocator
, &kext
->extrelocs
);
1194 require_noerr(rval
, finish
);
1196 rval
= kxld_vtable_init_from_object_macho(vtable
, vtable_sym
,
1197 vtable_sect
, kext
->symtab
, &kext
->relocator
);
1198 require_noerr(rval
, finish
);
1201 /* meta_vtable_sym will be null when we don't support strict patching
1202 * and can't find the metaclass vtable.
1204 if (meta_vtable_sym
) {
1205 /* Get the vtable's section */
1206 meta_vtable_sect
= kxld_array_get_item(&kext
->sects
,
1207 meta_vtable_sym
->sectnum
);
1208 require_action(vtable_sect
, finish
, rval
=KERN_FAILURE
);
1210 meta_vtable
= kxld_array_get_item(&kext
->vtables
, i
++);
1212 /* Initialize the metaclass vtable */
1213 if (kext
->is_final_image
) {
1214 rval
= kxld_vtable_init_from_final_macho(meta_vtable
, meta_vtable_sym
,
1215 meta_vtable_sect
, kext
->symtab
, &kext
->relocator
, &kext
->extrelocs
);
1216 require_noerr(rval
, finish
);
1218 rval
= kxld_vtable_init_from_object_macho(meta_vtable
, meta_vtable_sym
,
1219 meta_vtable_sect
, kext
->symtab
, &kext
->relocator
);
1220 require_noerr(rval
, finish
);
1225 require_action(i
== kext
->vtables
.nitems
, finish
,
1228 /* Map vtable names to the vtable structures */
1229 rval
= kxld_dict_init(&kext
->vtable_index
, kxld_dict_string_hash
,
1230 kxld_dict_string_cmp
, kext
->vtables
.nitems
);
1231 require_noerr(rval
, finish
);
1233 for (i
= 0; i
< kext
->vtables
.nitems
; ++i
) {
1234 vtable
= kxld_array_get_item(&kext
->vtables
, i
);
1235 rval
= kxld_dict_insert(&kext
->vtable_index
, vtable
->name
, vtable
);
1236 require_noerr(rval
, finish
);
1239 rval
= KERN_SUCCESS
;
1243 if (demangled_name1
) kxld_free(demangled_name1
, demangled_length1
);
1244 if (demangled_name2
) kxld_free(demangled_name2
, demangled_length2
);
1249 /*******************************************************************************
1250 * Temporary workaround for PR-6668105
1251 * new, new[], delete, and delete[] may be overridden globally in a kext.
1252 * We should do this with some sort of weak symbols, but we'll use a whitelist
1253 * for now to minimize risk.
1254 *******************************************************************************/
1256 restrict_private_symbols(KXLDKext
*kext
)
1258 const char *private_symbols
[] = {
1259 KXLD_KMOD_INFO_SYMBOL
,
1260 KXLD_OPERATOR_NEW_SYMBOL
,
1261 KXLD_OPERATOR_NEW_ARRAY_SYMBOL
,
1262 KXLD_OPERATOR_DELETE_SYMBOL
,
1263 KXLD_OPERATOR_DELETE_ARRAY_SYMBOL
1265 KXLDSymtabIterator iter
;
1266 KXLDSym
*sym
= NULL
;
1267 const char *name
= NULL
;
1270 kxld_symtab_iterator_init(&iter
, kext
->symtab
, kxld_sym_is_exported
, FALSE
);
1271 while ((sym
= kxld_symtab_iterator_get_next(&iter
))) {
1272 for (i
= 0; i
< const_array_len(private_symbols
); ++i
) {
1273 name
= private_symbols
[i
];
1274 if (!streq(sym
->name
, name
)) {
1278 kxld_sym_mark_private(sym
);
1283 /*******************************************************************************
1284 *******************************************************************************/
1286 kxld_kext_clear(KXLDKext
*kext
)
1288 KXLDSeg
*seg
= NULL
;
1289 KXLDSect
*sect
= NULL
;
1290 KXLDVTable
*vtable
= NULL
;
1296 if (kext
->link_type
== KXLD_LINK_KERNEL
) {
1297 unswap_macho(kext
->file
, kext
->host_order
, kext
->target_order
);
1299 #endif /* !KERNEL */
1301 for (i
= 0; i
< kext
->segs
.nitems
; ++i
) {
1302 seg
= kxld_array_get_item(&kext
->segs
, i
);
1303 kxld_seg_clear(seg
);
1305 kxld_array_reset(&kext
->segs
);
1307 for (i
= 0; i
< kext
->sects
.nitems
; ++i
) {
1308 sect
= kxld_array_get_item(&kext
->sects
, i
);
1309 kxld_sect_clear(sect
);
1311 kxld_array_reset(&kext
->sects
);
1313 for (i
= 0; i
< kext
->vtables
.nitems
; ++i
) {
1314 vtable
= kxld_array_get_item(&kext
->vtables
, i
);
1315 kxld_vtable_clear(vtable
);
1317 kxld_array_reset(&kext
->vtables
);
1319 kxld_array_reset(&kext
->extrelocs
);
1320 kxld_array_reset(&kext
->locrelocs
);
1321 kxld_dict_clear(&kext
->vtable_index
);
1322 kxld_relocator_clear(&kext
->relocator
);
1323 kxld_uuid_clear(&kext
->uuid
);
1325 if (kext
->symtab
) kxld_symtab_clear(kext
->symtab
);
1327 kext
->link_addr
= 0;
1328 kext
->kmod_link_addr
= 0;
1330 kext
->cpusubtype
= 0;
1331 kext
->link_type
= KXLD_LINK_UNKNOWN
;
1332 kext
->is_final_image
= FALSE
;
1333 kext
->got_is_created
= FALSE
;
1338 /*******************************************************************************
1339 *******************************************************************************/
1341 kxld_kext_deinit(KXLDKext
*kext
)
1343 KXLDSeg
*seg
= NULL
;
1344 KXLDSect
*sect
= NULL
;
1345 KXLDVTable
*vtable
= NULL
;
1351 if (kext
->link_type
== KXLD_LINK_KERNEL
) {
1352 unswap_macho(kext
->file
, kext
->host_order
, kext
->target_order
);
1354 #endif /* !KERNEL */
1356 for (i
= 0; i
< kext
->segs
.maxitems
; ++i
) {
1357 seg
= kxld_array_get_slot(&kext
->segs
, i
);
1358 kxld_seg_deinit(seg
);
1360 kxld_array_deinit(&kext
->segs
);
1362 for (i
= 0; i
< kext
->sects
.maxitems
; ++i
) {
1363 sect
= kxld_array_get_slot(&kext
->sects
, i
);
1364 kxld_sect_deinit(sect
);
1366 kxld_array_deinit(&kext
->sects
);
1368 for (i
= 0; i
< kext
->vtables
.maxitems
; ++i
) {
1369 vtable
= kxld_array_get_slot(&kext
->vtables
, i
);
1370 kxld_vtable_deinit(vtable
);
1372 kxld_array_deinit(&kext
->vtables
);
1374 kxld_array_deinit(&kext
->extrelocs
);
1375 kxld_array_deinit(&kext
->locrelocs
);
1376 kxld_dict_deinit(&kext
->vtable_index
);
1379 kxld_symtab_deinit(kext
->symtab
);
1380 kxld_free(kext
->symtab
, kxld_symtab_sizeof());
1383 bzero(kext
, sizeof(*kext
));
1386 /*******************************************************************************
1387 *******************************************************************************/
1389 kxld_kext_is_true_kext(const KXLDKext
*kext
)
1391 return (kext
->link_type
== KXLD_LINK_KEXT
);
1394 /*******************************************************************************
1395 *******************************************************************************/
1397 kxld_kext_get_vmsize(const KXLDKext
*kext
, u_long
*header_size
, u_long
*vmsize
)
1405 /* vmsize is the padded header page(s) + segment vmsizes */
1407 *header_size
= (kext
->is_final_image
) ?
1408 0 : round_page(get_macho_header_size(kext
));
1409 *vmsize
= *header_size
+ get_macho_data_size(kext
);
1413 /*******************************************************************************
1414 *******************************************************************************/
1415 const struct kxld_symtab
*
1416 kxld_kext_get_symtab(const KXLDKext
*kext
)
1420 return kext
->symtab
;
1423 /*******************************************************************************
1424 *******************************************************************************/
1426 kxld_kext_get_num_symbols(const KXLDKext
*kext
)
1430 return kxld_symtab_get_num_symbols(kext
->symtab
);
1433 /*******************************************************************************
1434 *******************************************************************************/
1436 kxld_kext_get_vtables(KXLDKext
*kext
, const KXLDArray
**vtables
)
1441 *vtables
= &kext
->vtables
;
1444 /*******************************************************************************
1445 *******************************************************************************/
1447 kxld_kext_get_num_vtables(const KXLDKext
*kext
)
1451 return kext
->vtables
.nitems
;
1454 /*******************************************************************************
1455 *******************************************************************************/
1457 kxld_kext_get_seg_by_name(const KXLDKext
*kext
, const char *segname
)
1459 KXLDSeg
*seg
= NULL
;
1462 for (i
= 0; i
< kext
->segs
.nitems
; ++i
) {
1463 seg
= kxld_array_get_item(&kext
->segs
, i
);
1465 if (streq(segname
, seg
->segname
)) break;
1473 /*******************************************************************************
1474 *******************************************************************************/
1476 kxld_kext_get_sect_by_name(const KXLDKext
*kext
, const char *segname
,
1477 const char *sectname
)
1479 KXLDSect
*sect
= NULL
;
1482 for (i
= 0; i
< kext
->sects
.nitems
; ++i
) {
1483 sect
= kxld_array_get_item(&kext
->sects
, i
);
1485 if (streq(segname
, sect
->segname
) && streq(sectname
, sect
->sectname
)) {
1495 /*******************************************************************************
1496 *******************************************************************************/
1498 kxld_kext_get_sectnum_for_sect(const KXLDKext
*kext
, const KXLDSect
*sect
)
1500 kern_return_t rval
= KERN_FAILURE
;
1503 rval
= kxld_array_get_index(&kext
->sects
, sect
, &idx
);
1509 /*******************************************************************************
1510 *******************************************************************************/
1512 kxld_kext_get_section_order(const KXLDKext
*kext __unused
)
1514 #if KXLD_USER_OR_OBJECT
1515 if (kext
->link_type
== KXLD_LINK_KERNEL
&& target_supports_object(kext
)) {
1516 return kext
->section_order
;
1518 #endif /* KXLD_USER_OR_OBJECT */
1523 /*******************************************************************************
1524 *******************************************************************************/
1526 get_macho_header_size(const KXLDKext
*kext
)
1528 KXLDSeg
*seg
= NULL
;
1529 u_long header_size
= 0;
1534 /* Mach, segment, and UUID headers */
1536 if (kxld_kext_is_32_bit(kext
)) {
1537 header_size
+= sizeof(struct mach_header
);
1539 header_size
+= sizeof(struct mach_header_64
);
1542 for (i
= 0; i
< kext
->segs
.nitems
; ++i
) {
1543 seg
= kxld_array_get_item(&kext
->segs
, i
);
1544 header_size
+= kxld_seg_get_macho_header_size(seg
, kxld_kext_is_32_bit(kext
));
1547 if (kext
->uuid
.has_uuid
) {
1548 header_size
+= kxld_uuid_get_macho_header_size();
1554 /*******************************************************************************
1555 *******************************************************************************/
1557 get_macho_data_size(const KXLDKext
*kext
)
1559 KXLDSeg
*seg
= NULL
;
1560 u_long data_size
= 0;
1565 for (i
= 0; i
< kext
->segs
.nitems
; ++i
) {
1566 seg
= kxld_array_get_item(&kext
->segs
, i
);
1567 data_size
+= (u_long
) kxld_seg_get_vmsize(seg
);
1573 /*******************************************************************************
1574 *******************************************************************************/
1575 kern_return_t
kxld_kext_export_linked_object(const KXLDKext
*kext
,
1576 u_char
*linked_object
, kxld_addr_t
*kmod_info_kern
)
1578 kern_return_t rval
= KERN_FAILURE
;
1579 KXLDSeg
*seg
= NULL
;
1581 u_long header_size
= 0;
1582 u_long header_offset
= 0;
1583 u_long data_offset
= 0;
1588 check(linked_object
);
1589 check(kmod_info_kern
);
1590 *kmod_info_kern
= 0;
1592 /* Calculate the size of the headers and data */
1594 header_size
= get_macho_header_size(kext
);
1595 data_offset
= (kext
->is_final_image
) ? header_size
: round_page(header_size
);
1596 size
= data_offset
+ get_macho_data_size(kext
);
1598 /* Copy data to the file */
1600 ncmds
= kext
->segs
.nitems
+ (kext
->uuid
.has_uuid
== TRUE
);
1602 rval
= export_macho_header(kext
, linked_object
, ncmds
,
1603 &header_offset
, header_size
);
1604 require_noerr(rval
, finish
);
1606 for (i
= 0; i
< kext
->segs
.nitems
; ++i
) {
1607 seg
= kxld_array_get_item(&kext
->segs
, i
);
1609 rval
= kxld_seg_export_macho_to_vm(seg
, linked_object
, &header_offset
,
1610 header_size
, size
, kext
->link_addr
, kxld_kext_is_32_bit(kext
));
1611 require_noerr(rval
, finish
);
1614 if (kext
->uuid
.has_uuid
) {
1615 rval
= kxld_uuid_export_macho(&kext
->uuid
, linked_object
,
1616 &header_offset
, header_size
);
1617 require_noerr(rval
, finish
);
1620 *kmod_info_kern
= kext
->kmod_link_addr
;
1623 unswap_macho(linked_object
, kext
->host_order
, kext
->target_order
);
1626 rval
= KERN_SUCCESS
;
1633 /*******************************************************************************
1634 *******************************************************************************/
1636 kxld_kext_export_symbol_file(const KXLDKext
*kext
,
1637 u_char
**_symbol_file
, u_long
*_filesize
)
1639 kern_return_t rval
= KERN_FAILURE
;
1640 KXLDSeg
*seg
= NULL
;
1641 u_char
*file
= NULL
;
1643 u_long header_size
= 0;
1644 u_long header_offset
= 0;
1645 u_long data_offset
= 0;
1650 check(_symbol_file
);
1651 *_symbol_file
= NULL
;
1653 /* Calculate the size of the file */
1655 if (kxld_kext_is_32_bit(kext
)) {
1656 header_size
+= sizeof(struct mach_header
);
1658 header_size
+= sizeof(struct mach_header_64
);
1661 for (i
= 0; i
< kext
->segs
.nitems
; ++i
) {
1662 seg
= kxld_array_get_item(&kext
->segs
, i
);
1663 header_size
+= kxld_seg_get_macho_header_size(seg
, kxld_kext_is_32_bit(kext
));
1664 size
+= kxld_seg_get_macho_data_size(seg
);
1667 header_size
+= kxld_symtab_get_macho_header_size();
1668 size
+= kxld_symtab_get_macho_data_size(kext
->symtab
, FALSE
,
1669 kxld_kext_is_32_bit(kext
));
1671 if (kext
->uuid
.has_uuid
) {
1672 header_size
+= kxld_uuid_get_macho_header_size();
1675 data_offset
= round_page(header_size
);
1676 size
+= data_offset
;
1678 /* Allocate the symbol file */
1680 file
= kxld_page_alloc_untracked(size
);
1681 require_action(file
, finish
, rval
=KERN_RESOURCE_SHORTAGE
);
1684 /* Copy data to the file */
1686 ncmds
= kext
->segs
.nitems
+ (kext
->uuid
.has_uuid
== TRUE
) + 1; /* +1 for symtab */
1687 rval
= export_macho_header(kext
, file
, ncmds
, &header_offset
, header_size
);
1688 require_noerr(rval
, finish
);
1690 for (i
= 0; i
< kext
->segs
.nitems
; ++i
) {
1691 seg
= kxld_array_get_item(&kext
->segs
, i
);
1692 rval
= kxld_seg_export_macho_to_file_buffer(seg
, file
, &header_offset
,
1693 header_size
, &data_offset
, size
, kxld_kext_is_32_bit(kext
));
1694 require_noerr(rval
, finish
);
1697 rval
= kxld_symtab_export_macho(kext
->symtab
, file
, &header_offset
,
1698 header_size
, &data_offset
, size
, FALSE
, kxld_kext_is_32_bit(kext
));
1699 require_noerr(rval
, finish
);
1701 if (kext
->uuid
.has_uuid
) {
1702 rval
= kxld_uuid_export_macho(&kext
->uuid
, file
, &header_offset
,
1704 require_noerr(rval
, finish
);
1707 header_offset
= header_size
;
1711 unswap_macho(file
, kext
->host_order
, kext
->target_order
);
1714 *_symbol_file
= file
;
1716 rval
= KERN_SUCCESS
;
1721 kxld_page_free_untracked(file
, size
);
1726 check((!rval
) ^ (!*_symbol_file
));
1732 /*******************************************************************************
1733 *******************************************************************************/
1735 kxld_kext_target_needs_swap(const KXLDKext
*kext __unused
)
1740 return (kext
->target_order
!= kext
->host_order
);
1744 /*******************************************************************************
1745 *******************************************************************************/
1746 static kern_return_t
1747 export_macho_header(const KXLDKext
*kext
, u_char
*buf
, u_int ncmds
,
1748 u_long
*header_offset
, u_long header_size
)
1750 kern_return_t rval
= KERN_FAILURE
;
1754 check(header_offset
);
1756 KXLD_3264_FUNC(kxld_kext_is_32_bit(kext
), rval
,
1757 export_macho_header_32
, export_macho_header_64
,
1758 kext
, buf
, ncmds
, header_offset
, header_size
);
1759 require_noerr(rval
, finish
);
1761 rval
= KERN_SUCCESS
;
1767 #if KXLD_USER_OR_ILP32
1768 /*******************************************************************************
1769 *******************************************************************************/
1770 static kern_return_t
1771 export_macho_header_32(const KXLDKext
*kext
, u_char
*buf
, u_int ncmds
,
1772 u_long
*header_offset
, u_long header_size
)
1774 kern_return_t rval
= KERN_FAILURE
;
1775 struct mach_header
*mach
= NULL
;
1779 check(header_offset
);
1781 require_action(sizeof(*mach
) <= header_size
- *header_offset
, finish
,
1783 mach
= (struct mach_header
*) (buf
+ *header_offset
);
1785 mach
->magic
= MH_MAGIC
;
1786 mach
->cputype
= kext
->cputype
;
1787 mach
->filetype
= kext
->filetype
;
1788 mach
->ncmds
= ncmds
;
1789 mach
->sizeofcmds
= (uint32_t) (header_size
- sizeof(*mach
));
1790 mach
->flags
= MH_NOUNDEFS
;
1792 *header_offset
+= sizeof(*mach
);
1794 rval
= KERN_SUCCESS
;
1799 #endif /* KXLD_USER_OR_ILP32 */
1801 #if KXLD_USER_OR_LP64
1802 /*******************************************************************************
1803 *******************************************************************************/
1804 static kern_return_t
1805 export_macho_header_64(const KXLDKext
*kext
, u_char
*buf
, u_int ncmds
,
1806 u_long
*header_offset
, u_long header_size
)
1808 kern_return_t rval
= KERN_FAILURE
;
1809 struct mach_header_64
*mach
= NULL
;
1813 check(header_offset
);
1815 require_action(sizeof(*mach
) <= header_size
- *header_offset
, finish
,
1817 mach
= (struct mach_header_64
*) (buf
+ *header_offset
);
1819 mach
->magic
= MH_MAGIC_64
;
1820 mach
->cputype
= kext
->cputype
;
1821 mach
->cpusubtype
= kext
->cpusubtype
;
1822 mach
->filetype
= kext
->filetype
;
1823 mach
->ncmds
= ncmds
;
1824 mach
->sizeofcmds
= (uint32_t) (header_size
- sizeof(*mach
));
1825 mach
->flags
= MH_NOUNDEFS
;
1827 *header_offset
+= sizeof(*mach
);
1829 rval
= KERN_SUCCESS
;
1834 #endif /* KXLD_USER_OR_LP64 */
1836 /*******************************************************************************
1837 *******************************************************************************/
1839 kxld_kext_resolve(KXLDKext
*kext
, struct kxld_dict
*patched_vtables
,
1840 struct kxld_dict
*defined_symbols
)
1842 kern_return_t rval
= KERN_FAILURE
;
1844 require_action(kext
->link_type
== KXLD_LINK_PSEUDO_KEXT
, finish
,
1847 /* Resolve symbols */
1848 rval
= resolve_symbols(kext
, defined_symbols
, NULL
);
1849 require_noerr(rval
, finish
);
1851 /* Validate symbols */
1852 rval
= validate_symbols(kext
);
1853 require_noerr(rval
, finish
);
1855 /* Pseudokexts re-export their dependencies' vtables */
1856 rval
= copy_vtables(kext
, patched_vtables
);
1857 require_noerr(rval
, finish
);
1859 rval
= KERN_SUCCESS
;
1865 /*******************************************************************************
1866 *******************************************************************************/
1868 kxld_kext_relocate(KXLDKext
*kext
, kxld_addr_t link_address
,
1869 KXLDDict
*patched_vtables
, KXLDDict
*defined_symbols
,
1870 KXLDDict
*obsolete_symbols
)
1872 kern_return_t rval
= KERN_FAILURE
;
1873 KXLDSeg
*seg
= NULL
;
1877 check(patched_vtables
);
1878 check(defined_symbols
);
1880 require_action(kext
->link_type
== KXLD_LINK_KEXT
, finish
, rval
=KERN_FAILURE
);
1882 kext
->link_addr
= link_address
;
1884 /* Relocate segments (which relocates the sections) */
1885 for (i
= 0; i
< kext
->segs
.nitems
; ++i
) {
1886 seg
= kxld_array_get_item(&kext
->segs
, i
);
1887 kxld_seg_relocate(seg
, link_address
);
1890 /* Relocate symbols */
1891 rval
= kxld_symtab_relocate(kext
->symtab
, &kext
->sects
);
1892 require_noerr(rval
, finish
);
1894 /* Populate kmod info structure */
1895 rval
= populate_kmod_info(kext
);
1896 require_noerr(rval
, finish
);
1898 /* Resolve symbols */
1899 rval
= resolve_symbols(kext
, defined_symbols
, obsolete_symbols
);
1900 require_noerr(rval
, finish
);
1903 rval
= patch_vtables(kext
, patched_vtables
, defined_symbols
);
1904 require_noerr(rval
, finish
);
1906 /* Validate symbols */
1907 rval
= validate_symbols(kext
);
1908 require_noerr(rval
, finish
);
1910 /* Process relocation entries and populate the global offset table.
1912 * For final linked images: the relocation entries are contained in a couple
1913 * of tables hanging off the end of the symbol table. The GOT has its own
1914 * section created by the linker; we simply need to fill it.
1916 * For object files: the relocation entries are bound to each section.
1917 * The GOT, if it exists for the target architecture, is created by kxld,
1918 * and we must populate it according to our internal structures.
1920 if (kext
->is_final_image
) {
1921 #if KXLD_USER_OR_BUNDLE
1922 rval
= process_symbol_pointers(kext
);
1923 require_noerr(rval
, finish
);
1925 rval
= process_relocs_from_tables(kext
);
1926 require_noerr(rval
, finish
);
1928 require_action(FALSE
, finish
, rval
=KERN_FAILURE
);
1929 #endif /* KXLD_USER_OR_BUNDLE */
1931 #if KXLD_USER_OR_GOT
1933 rval
= populate_got(kext
);
1934 require_noerr(rval
, finish
);
1935 #endif /* KXLD_USER_OR_GOT */
1936 #if KXLD_USER_OR_OBJECT
1937 rval
= process_relocs_from_sections(kext
);
1938 require_noerr(rval
, finish
);
1940 require_action(FALSE
, finish
, rval
=KERN_FAILURE
);
1941 #endif /* KXLD_USER_OR_OBJECT */
1944 rval
= KERN_SUCCESS
;
1950 /*******************************************************************************
1951 *******************************************************************************/
1952 static kern_return_t
1953 resolve_symbols(KXLDKext
*kext
, KXLDDict
*defined_symbols
,
1954 KXLDDict
*obsolete_symbols
)
1956 kern_return_t rval
= KERN_FAILURE
;
1957 KXLDSymtabIterator iter
;
1958 KXLDSym
*sym
= NULL
;
1960 kxld_addr_t addr
= 0;
1961 const char *name
= NULL
;
1962 boolean_t tests_for_weak
= FALSE
;
1963 boolean_t error
= FALSE
;
1964 boolean_t warning
= FALSE
;
1965 char *demangled_name
= NULL
;
1966 size_t demangled_length
= 0;
1969 check(defined_symbols
);
1971 /* Check if the kext tests for weak symbols */
1972 sym
= kxld_symtab_get_symbol_by_name(kext
->symtab
, KXLD_WEAK_TEST_SYMBOL
);
1973 tests_for_weak
= (sym
!= NULL
);
1975 /* Check for duplicate symbols */
1976 kxld_symtab_iterator_init(&iter
, kext
->symtab
, kxld_sym_is_exported
, FALSE
);
1977 while ((sym
= kxld_symtab_iterator_get_next(&iter
))) {
1978 addrp
= kxld_dict_find(defined_symbols
, sym
->name
);
1980 /* Convert to a kxld_addr_t */
1981 if (kxld_kext_is_32_bit(kext
)) {
1982 addr
= (kxld_addr_t
) (*(uint32_t*)addrp
);
1984 addr
= (kxld_addr_t
) (*(uint64_t*)addrp
);
1987 /* Not a problem if the symbols have the same address */
1988 if (addr
== sym
->link_addr
) {
1994 kxld_log(kKxldLogLinking
, kKxldLogErr
,
1995 "The following symbols were defined more than once:");
1998 kxld_log(kKxldLogLinking
, kKxldLogErr
, "\t%s: %p - %p",
1999 kxld_demangle(sym
->name
, &demangled_name
, &demangled_length
),
2000 (void *) (uintptr_t) sym
->link_addr
,
2001 (void *) (uintptr_t) addr
);
2004 require_noerr_action(error
, finish
, rval
=KERN_FAILURE
);
2006 /* Resolve undefined and indirect symbols */
2008 /* Iterate over all unresolved symbols */
2009 kxld_symtab_iterator_init(&iter
, kext
->symtab
,
2010 kxld_sym_is_unresolved
, FALSE
);
2011 while ((sym
= kxld_symtab_iterator_get_next(&iter
))) {
2013 /* Common symbols are not supported */
2014 if (kxld_sym_is_common(sym
)) {
2018 if (target_supports_common(kext
)) {
2019 kxld_log(kKxldLogLinking
, kKxldLogErr
,
2020 "The following common symbols were not resolved:");
2022 kxld_log(kKxldLogLinking
, kKxldLogErr
,
2023 "Common symbols are not supported in kernel extensions. "
2024 "Use -fno-common to build your kext. "
2025 "The following are common symbols:");
2028 kxld_log(kKxldLogLinking
, kKxldLogErr
, "\t%s",
2029 kxld_demangle(sym
->name
, &demangled_name
, &demangled_length
));
2033 /* Find the address of the defined symbol */
2034 if (kxld_sym_is_undefined(sym
)) {
2039 addrp
= kxld_dict_find(defined_symbols
, name
);
2041 /* Resolve the symbol. If a definition cannot be found, then:
2042 * 1) Psuedokexts log a warning and proceed
2043 * 2) Actual kexts delay the error until validation in case vtable
2044 * patching replaces the undefined symbol.
2049 /* Convert to a kxld_addr_t */
2050 if (kxld_kext_is_32_bit(kext
)) {
2051 addr
= (kxld_addr_t
) (*(uint32_t*)addrp
);
2053 addr
= (kxld_addr_t
) (*(uint64_t*)addrp
);
2056 boolean_t is_exported
= (kext
->link_type
== KXLD_LINK_PSEUDO_KEXT
);
2058 rval
= kxld_sym_resolve(sym
, addr
, is_exported
);
2059 require_noerr(rval
, finish
);
2061 if (obsolete_symbols
&& kxld_dict_find(obsolete_symbols
, name
)) {
2062 kxld_log(kKxldLogLinking
, kKxldLogWarn
,
2063 "This kext uses obsolete symbol %s.",
2064 kxld_demangle(name
, &demangled_name
, &demangled_length
));
2067 } else if (kext
->link_type
== KXLD_LINK_PSEUDO_KEXT
) {
2068 /* Pseudokexts ignore undefined symbols, because any actual
2069 * kexts that need those symbols will fail to link anyway, so
2070 * there's no need to block well-behaved kexts.
2073 kxld_log(kKxldLogLinking
, kKxldLogWarn
,
2074 "This symbol set has the following unresolved symbols:");
2077 kxld_log(kKxldLogLinking
, kKxldLogErr
, "\t%s",
2078 kxld_demangle(sym
->name
, &demangled_name
, &demangled_length
));
2079 kxld_sym_delete(sym
);
2081 } else if (kxld_sym_is_weak(sym
)) {
2082 /* Make sure that the kext has referenced gOSKextUnresolved.
2084 require_action(tests_for_weak
, finish
,
2086 kxld_log(kKxldLogLinking
, kKxldLogErr
,
2087 "This kext has weak references but does not test for "
2088 "them. Test for weak references with "
2089 "OSKextIsSymbolResolved()."));
2092 /* Get the address of the default weak address.
2094 addr
= (kxld_addr_t
) &kext_weak_symbol_referenced
;
2096 /* This is run during symbol generation only, so we only
2097 * need a filler value here.
2099 addr
= kext
->link_addr
;
2102 rval
= kxld_sym_resolve(sym
, addr
, /* exported */ FALSE
);
2103 require_noerr(rval
, finish
);
2107 require_noerr_action(error
, finish
, rval
=KERN_FAILURE
);
2109 rval
= KERN_SUCCESS
;
2112 if (demangled_name
) kxld_free(demangled_name
, demangled_length
);
2117 /*******************************************************************************
2118 *******************************************************************************/
2120 target_supports_strict_patching(KXLDKext
*kext
)
2124 return (kext
->cputype
!= CPU_TYPE_I386
&&
2125 kext
->cputype
!= CPU_TYPE_POWERPC
);
2128 /*******************************************************************************
2129 * We must patch vtables to ensure binary compatibility, and to perform that
2130 * patching, we have to determine the vtables' inheritance relationships. The
2131 * MetaClass system gives us a way to do that:
2132 * 1) Iterate over all of the super MetaClass pointer symbols. Every class
2133 * that inherits from OSObject will have a pointer in its MetaClass that
2134 * points to the MetaClass's super MetaClass.
2135 * 2) Derive the name of the class from the super MetaClass pointer.
2136 * 3) Derive the name of the class's vtable from the name of the class
2137 * 4) Follow the super MetaClass pointer to get the address of the super
2138 * MetaClass's symbol
2139 * 5) Look up the super MetaClass symbol by address
2140 * 6) Derive the super class's name from the super MetaClass name
2141 * 7) Derive the super class's vtable from the super class's name
2142 * This procedure will allow us to find all of the OSObject-derived classes and
2143 * their super classes, and thus patch all of the vtables.
2145 * We also have to take care to patch up the MetaClass's vtables. The
2146 * MetaClasses follow a parallel hierarchy to the classes, so once we have the
2147 * class name and super class name, we can also derive the MetaClass name and
2148 * the super MetaClass name, and thus find and patch their vtables as well.
2149 *******************************************************************************/
2151 #define kOSMetaClassVTableName "__ZTV11OSMetaClass"
2153 static kern_return_t
2154 patch_vtables(KXLDKext
*kext
, KXLDDict
*patched_vtables
,
2155 KXLDDict
*defined_symbols
)
2157 kern_return_t rval
= KERN_FAILURE
;
2158 KXLDSymtabIterator iter
;
2159 KXLDSym
*metaclass
= NULL
;
2160 KXLDSym
*super_metaclass_pointer
= NULL
;
2161 KXLDSym
*final_sym
= NULL
;
2162 KXLDVTable
*vtable
= NULL
;
2163 KXLDVTable
*super_vtable
= NULL
;
2164 char class_name
[KXLD_MAX_NAME_LEN
];
2165 char super_class_name
[KXLD_MAX_NAME_LEN
];
2166 char vtable_name
[KXLD_MAX_NAME_LEN
];
2167 char super_vtable_name
[KXLD_MAX_NAME_LEN
];
2168 char final_sym_name
[KXLD_MAX_NAME_LEN
];
2169 char *demangled_name1
= NULL
;
2170 char *demangled_name2
= NULL
;
2171 size_t demangled_length1
= 0;;
2172 size_t demangled_length2
= 0;
2176 u_int nprogress
= 0;
2177 boolean_t failure
= FALSE
;
2180 check(patched_vtables
);
2182 /* Find each super meta class pointer symbol */
2184 kxld_symtab_iterator_init(&iter
, kext
->symtab
,
2185 kxld_sym_is_super_metaclass_pointer
, FALSE
);
2186 nvtables
= kxld_symtab_iterator_get_num_remaining(&iter
);
2188 while (npatched
< nvtables
) {
2191 kxld_symtab_iterator_reset(&iter
);
2192 while((super_metaclass_pointer
= kxld_symtab_iterator_get_next(&iter
)))
2194 /* Get the class name from the smc pointer */
2195 rval
= kxld_sym_get_class_name_from_super_metaclass_pointer(
2196 super_metaclass_pointer
, class_name
, sizeof(class_name
));
2197 require_noerr(rval
, finish
);
2199 /* Get the vtable name from the class name */
2200 rval
= kxld_sym_get_vtable_name_from_class_name(class_name
,
2201 vtable_name
, sizeof(vtable_name
));
2202 require_noerr(rval
, finish
);
2204 /* Get the vtable and make sure it hasn't been patched */
2205 vtable
= kxld_dict_find(&kext
->vtable_index
, vtable_name
);
2206 require_action(vtable
, finish
, rval
=KERN_FAILURE
;
2207 kxld_log(kKxldLogPatching
, kKxldLogErr
, kKxldLogMissingVtable
,
2208 vtable_name
, class_name
));
2210 if (!vtable
->is_patched
) {
2212 /* Find the SMCP's meta class symbol */
2213 rval
= get_metaclass_symbol_from_super_meta_class_pointer_symbol(
2214 kext
, super_metaclass_pointer
, &metaclass
);
2215 require_noerr(rval
, finish
);
2217 /* Get the super class name from the super metaclass */
2218 rval
= kxld_sym_get_class_name_from_metaclass(metaclass
,
2219 super_class_name
, sizeof(super_class_name
));
2220 require_noerr(rval
, finish
);
2222 /* Get the super vtable name from the class name */
2223 rval
= kxld_sym_get_vtable_name_from_class_name(super_class_name
,
2224 super_vtable_name
, sizeof(super_vtable_name
));
2225 require_noerr(rval
, finish
);
2228 kxld_log(kKxldLogPatching
, kKxldLogErr
,
2229 "\t'%s' (super vtable '%s')",
2230 kxld_demangle(vtable_name
, &demangled_name1
,
2231 &demangled_length1
),
2232 kxld_demangle(super_vtable_name
, &demangled_name2
,
2233 &demangled_length2
));
2237 /* Get the super vtable if it's been patched */
2238 super_vtable
= kxld_dict_find(patched_vtables
, super_vtable_name
);
2239 if (!super_vtable
) continue;
2241 /* Get the final symbol's name from the super vtable */
2242 rval
= kxld_sym_get_final_sym_name_from_class_name(super_class_name
,
2243 final_sym_name
, sizeof(final_sym_name
));
2244 require_noerr(rval
, finish
);
2246 /* Verify that the final symbol does not exist. First check
2247 * all the externally defined symbols, then check locally.
2249 final_sym
= kxld_dict_find(defined_symbols
, final_sym_name
);
2251 final_sym
= kxld_symtab_get_symbol_by_name(kext
->symtab
,
2254 require_action(!final_sym
, finish
,
2256 kxld_log(kKxldLogPatching
, kKxldLogErr
,
2257 "Class '%s' is a subclass of final class '%s'.",
2258 kxld_demangle(class_name
, &demangled_name1
,
2259 &demangled_length1
),
2260 kxld_demangle(super_class_name
, &demangled_name2
,
2261 &demangled_length2
)));
2263 /* Patch the class's vtable */
2264 rval
= kxld_vtable_patch(vtable
, super_vtable
, kext
->symtab
,
2265 target_supports_strict_patching(kext
));
2266 require_noerr(rval
, finish
);
2268 /* Add the class's vtable to the set of patched vtables */
2269 rval
= kxld_dict_insert(patched_vtables
, vtable
->name
, vtable
);
2270 require_noerr(rval
, finish
);
2272 /* Get the meta vtable name from the class name */
2273 rval
= kxld_sym_get_meta_vtable_name_from_class_name(class_name
,
2274 vtable_name
, sizeof(vtable_name
));
2275 require_noerr(rval
, finish
);
2277 /* Get the meta vtable. Whether or not it should exist has already
2278 * been tested in create_vtables(), so if it doesn't exist and we're
2279 * still running, we can safely skip it.
2281 vtable
= kxld_dict_find(&kext
->vtable_index
, vtable_name
);
2287 require_action(!vtable
->is_patched
, finish
, rval
=KERN_FAILURE
);
2289 /* There is no way to look up a metaclass vtable at runtime, but
2290 * we know that every class's metaclass inherits directly from
2291 * OSMetaClass, so we just hardcode that vtable name here.
2293 len
= strlcpy(super_vtable_name
, kOSMetaClassVTableName
,
2294 sizeof(super_vtable_name
));
2295 require_action(len
== const_strlen(kOSMetaClassVTableName
),
2296 finish
, rval
=KERN_FAILURE
);
2298 /* Get the super meta vtable */
2299 super_vtable
= kxld_dict_find(patched_vtables
, super_vtable_name
);
2300 require_action(super_vtable
&& super_vtable
->is_patched
,
2301 finish
, rval
=KERN_FAILURE
);
2303 /* Patch the meta class's vtable */
2304 rval
= kxld_vtable_patch(vtable
, super_vtable
,
2305 kext
->symtab
, target_supports_strict_patching(kext
));
2306 require_noerr(rval
, finish
);
2308 /* Add the MetaClass's vtable to the set of patched vtables */
2309 rval
= kxld_dict_insert(patched_vtables
, vtable
->name
, vtable
);
2310 require_noerr(rval
, finish
);
2318 require_action(!failure
, finish
, rval
=KERN_FAILURE
);
2321 kxld_log(kKxldLogPatching
, kKxldLogErr
,
2322 "The following vtables were unpatchable because each one's "
2323 "parent vtable either was not found or also was not patchable:");
2327 rval
= KERN_SUCCESS
;
2329 if (demangled_name1
) kxld_free(demangled_name1
, demangled_length1
);
2330 if (demangled_name2
) kxld_free(demangled_name2
, demangled_length2
);
2335 /*******************************************************************************
2336 *******************************************************************************/
2337 static kern_return_t
2338 validate_symbols(KXLDKext
*kext
)
2340 kern_return_t rval
= KERN_FAILURE
;
2341 KXLDSymtabIterator iter
;
2342 KXLDSym
*sym
= NULL
;
2343 u_int error
= FALSE
;
2344 char *demangled_name
= NULL
;
2345 size_t demangled_length
= 0;
2347 /* Check for any unresolved symbols */
2348 kxld_symtab_iterator_init(&iter
, kext
->symtab
, kxld_sym_is_unresolved
, FALSE
);
2349 while ((sym
= kxld_symtab_iterator_get_next(&iter
))) {
2352 kxld_log(kKxldLogLinking
, kKxldLogErr
,
2353 "The following symbols are unresolved for this kext:");
2355 kxld_log(kKxldLogLinking
, kKxldLogErr
, "\t%s",
2356 kxld_demangle(sym
->name
, &demangled_name
, &demangled_length
));
2358 require_noerr_action(error
, finish
, rval
=KERN_FAILURE
);
2360 rval
= KERN_SUCCESS
;
2363 if (demangled_name
) kxld_free(demangled_name
, demangled_length
);
2367 #if KXLD_USER_OR_GOT || KXLD_USER_OR_COMMON
2368 /*******************************************************************************
2369 *******************************************************************************/
2370 static kern_return_t
2371 add_section(KXLDKext
*kext
, KXLDSect
**sect
)
2373 kern_return_t rval
= KERN_FAILURE
;
2374 u_int nsects
= kext
->sects
.nitems
;
2376 rval
= kxld_array_resize(&kext
->sects
, nsects
+ 1);
2377 require_noerr(rval
, finish
);
2379 *sect
= kxld_array_get_item(&kext
->sects
, nsects
);
2381 rval
= KERN_SUCCESS
;
2386 #endif /* KXLD_USER_OR_GOT || KXLD_USER_OR_COMMON */
2388 #if KXLD_USER_OR_GOT
2389 /*******************************************************************************
2390 *******************************************************************************/
2392 target_has_got(const KXLDKext
*kext
)
2397 /*******************************************************************************
2398 * Create and initialize the Global Offset Table
2399 *******************************************************************************/
2400 static kern_return_t
2401 create_got(KXLDKext
*kext
)
2403 kern_return_t rval
= KERN_FAILURE
;
2404 KXLDSect
*sect
= NULL
;
2408 if (!target_has_got(kext
)) {
2409 rval
= KERN_SUCCESS
;
2413 for (i
= 0; i
< kext
->sects
.nitems
; ++i
) {
2414 sect
= kxld_array_get_item(&kext
->sects
, i
);
2415 ngots
+= kxld_sect_get_ngots(sect
, &kext
->relocator
,
2419 rval
= add_section(kext
, §
);
2420 require_noerr(rval
, finish
);
2422 rval
= kxld_sect_init_got(sect
, ngots
);
2423 require_noerr(rval
, finish
);
2425 kext
->got_is_created
= TRUE
;
2426 rval
= KERN_SUCCESS
;
2432 /*******************************************************************************
2433 *******************************************************************************/
2434 static kern_return_t
2435 populate_got(KXLDKext
*kext
)
2437 kern_return_t rval
= KERN_FAILURE
;
2438 KXLDSect
*sect
= NULL
;
2441 if (!target_has_got(kext
) || !kext
->got_is_created
) {
2442 rval
= KERN_SUCCESS
;
2446 for (i
= 0; i
< kext
->sects
.nitems
; ++i
) {
2447 sect
= kxld_array_get_item(&kext
->sects
, i
);
2448 if (streq_safe(sect
->segname
, KXLD_SEG_GOT
, sizeof(KXLD_SEG_GOT
)) &&
2449 streq_safe(sect
->sectname
, KXLD_SECT_GOT
, sizeof(KXLD_SECT_GOT
)))
2451 kxld_sect_populate_got(sect
, kext
->symtab
,
2452 kxld_kext_target_needs_swap(kext
));
2457 require_action(i
< kext
->sects
.nitems
, finish
, rval
=KXLD_MISSING_GOT
);
2459 rval
= KERN_SUCCESS
;
2464 #endif /* KXLD_USER_OR_GOT */
2466 /*******************************************************************************
2467 *******************************************************************************/
2469 target_supports_common(const KXLDKext
*kext
)
2472 return (kext
->cputype
== CPU_TYPE_I386
||
2473 kext
->cputype
== CPU_TYPE_POWERPC
);
2476 #if KXLD_USER_OR_COMMON
2477 /*******************************************************************************
2478 * If there are common symbols, calculate how much space they'll need
2479 * and create/grow the __DATA __common section to accommodate them.
2480 * Then, resolve them against that section.
2481 *******************************************************************************/
2482 static kern_return_t
2483 resolve_common_symbols(KXLDKext
*kext
)
2485 kern_return_t rval
= KERN_FAILURE
;
2486 KXLDSymtabIterator iter
;
2487 KXLDSym
*sym
= NULL
;
2488 KXLDSect
*sect
= NULL
;
2489 kxld_addr_t base_addr
= 0;
2490 kxld_size_t size
= 0;
2491 kxld_size_t total_size
= 0;
2493 u_int max_align
= 0;
2496 if (!target_supports_common(kext
)) {
2497 rval
= KERN_SUCCESS
;
2501 /* Iterate over the common symbols to calculate their total aligned size */
2502 kxld_symtab_iterator_init(&iter
, kext
->symtab
, kxld_sym_is_common
, FALSE
);
2503 while ((sym
= kxld_symtab_iterator_get_next(&iter
))) {
2504 align
= kxld_sym_get_common_align(sym
);
2505 size
= kxld_sym_get_common_size(sym
);
2507 if (align
> max_align
) max_align
= align
;
2509 total_size
= kxld_align_address(total_size
, align
) + size
;
2512 /* If there are common symbols, grow or create the __DATA __common section
2516 sect
= kxld_kext_get_sect_by_name(kext
, SEG_DATA
, SECT_COMMON
);
2518 base_addr
= sect
->base_addr
+ sect
->size
;
2520 kxld_sect_grow(sect
, total_size
, max_align
);
2524 rval
= add_section(kext
, §
);
2525 require_noerr(rval
, finish
);
2527 kxld_sect_init_zerofill(sect
, SEG_DATA
, SECT_COMMON
,
2528 total_size
, max_align
);
2531 /* Resolve the common symbols against the new section */
2532 rval
= kxld_array_get_index(&kext
->sects
, sect
, §num
);
2533 require_noerr(rval
, finish
);
2535 kxld_symtab_iterator_reset(&iter
);
2536 while ((sym
= kxld_symtab_iterator_get_next(&iter
))) {
2537 align
= kxld_sym_get_common_align(sym
);
2538 size
= kxld_sym_get_common_size(sym
);
2540 base_addr
= kxld_align_address(base_addr
, align
);
2541 kxld_sym_resolve_common(sym
, sectnum
, base_addr
);
2547 rval
= KERN_SUCCESS
;
2552 #endif /* KXLD_USER_OR_COMMON */
2554 /*******************************************************************************
2555 *******************************************************************************/
2556 static kern_return_t
2557 get_metaclass_symbol_from_super_meta_class_pointer_symbol(KXLDKext
*kext
,
2558 KXLDSym
*super_metaclass_pointer_sym
, KXLDSym
**metaclass
)
2560 kern_return_t rval
= KERN_FAILURE
;
2561 KXLDSect
*sect
= NULL
;
2562 KXLDReloc
*reloc
= NULL
;
2563 uint32_t offset
= 0;
2566 check(super_metaclass_pointer_sym
);
2570 sect
= kxld_array_get_item(&kext
->sects
, super_metaclass_pointer_sym
->sectnum
);
2571 require_action(sect
, finish
, rval
=KERN_FAILURE
);
2573 /* Find the relocation entry for the super metaclass pointer and get the
2574 * symbol associated with that relocation entry
2577 if (kext
->is_final_image
) {
2578 /* The relocation entry could be in either the external or local
2579 * relocation entries. kxld_reloc_get_symbol() can handle either
2582 reloc
= kxld_reloc_get_reloc_by_offset(&kext
->extrelocs
,
2583 super_metaclass_pointer_sym
->base_addr
);
2585 reloc
= kxld_reloc_get_reloc_by_offset(&kext
->locrelocs
,
2586 super_metaclass_pointer_sym
->base_addr
);
2588 require_action(reloc
, finish
, rval
=KERN_FAILURE
);
2590 *metaclass
= kxld_reloc_get_symbol(&kext
->relocator
, reloc
, kext
->file
,
2593 offset
= kxld_sym_get_section_offset(super_metaclass_pointer_sym
, sect
);
2595 reloc
= kxld_reloc_get_reloc_by_offset(§
->relocs
, offset
);
2596 require_action(reloc
, finish
, rval
=KERN_FAILURE
);
2598 *metaclass
= kxld_reloc_get_symbol(&kext
->relocator
, reloc
, sect
->data
,
2601 require_action(*metaclass
, finish
, rval
=KERN_FAILURE
);
2603 rval
= KERN_SUCCESS
;
2609 /*******************************************************************************
2610 *******************************************************************************/
2611 static kern_return_t
2612 copy_vtables(KXLDKext
*kext
, const KXLDDict
*patched_vtables
)
2614 kern_return_t rval
= KERN_FAILURE
;
2615 KXLDSymtabIterator iter
;
2616 KXLDSym
*sym
= NULL
;
2617 KXLDVTable
*vtable
= NULL
, *src
= NULL
;
2620 char class_name
[KXLD_MAX_NAME_LEN
];
2621 char meta_vtable_name
[KXLD_MAX_NAME_LEN
];
2623 kxld_symtab_iterator_init(&iter
, kext
->symtab
,
2624 kxld_sym_is_class_vtable
, FALSE
);
2626 /* The iterator tracks all the class vtables, so we double the number of
2627 * vtables we're expecting because we use the class vtables to find the
2628 * MetaClass vtables.
2630 nvtables
= kxld_symtab_iterator_get_num_remaining(&iter
) * 2;
2631 rval
= kxld_array_init(&kext
->vtables
, sizeof(KXLDVTable
), nvtables
);
2632 require_noerr(rval
, finish
);
2634 while ((sym
= kxld_symtab_iterator_get_next(&iter
))) {
2635 src
= kxld_dict_find(patched_vtables
, sym
->name
);
2636 require_action(src
, finish
, rval
=KERN_FAILURE
);
2638 vtable
= kxld_array_get_item(&kext
->vtables
, i
++);
2639 rval
= kxld_vtable_copy(vtable
, src
);
2640 require_noerr(rval
, finish
);
2642 rval
= kxld_sym_get_class_name_from_vtable(sym
,
2643 class_name
, sizeof(class_name
));
2644 require_noerr(rval
, finish
);
2646 rval
= kxld_sym_get_meta_vtable_name_from_class_name(class_name
,
2647 meta_vtable_name
, sizeof(meta_vtable_name
));
2648 require_noerr(rval
, finish
);
2650 /* Some classes don't have a MetaClass, so when we run across one
2651 * of those, we shrink the vtable array by 1.
2653 src
= kxld_dict_find(patched_vtables
, meta_vtable_name
);
2655 vtable
= kxld_array_get_item(&kext
->vtables
, i
++);
2656 rval
= kxld_vtable_copy(vtable
, src
);
2657 require_noerr(rval
, finish
);
2659 kxld_array_resize(&kext
->vtables
, kext
->vtables
.nitems
- 1);
2663 rval
= KERN_SUCCESS
;
2669 #if KXLD_USER_OR_OBJECT
2670 /*******************************************************************************
2671 *******************************************************************************/
2672 static kern_return_t
2673 process_relocs_from_sections(KXLDKext
*kext
)
2675 kern_return_t rval
= KERN_FAILURE
;
2676 KXLDSect
*sect
= NULL
;
2679 for (i
= 0; i
< kext
->sects
.nitems
; ++i
) {
2680 sect
= kxld_array_get_item(&kext
->sects
, i
);
2681 rval
= kxld_sect_process_relocs(sect
, &kext
->relocator
,
2682 &kext
->sects
, kext
->symtab
);
2683 require_noerr_action(rval
, finish
,
2684 kxld_log(kKxldLogLinking
, kKxldLogErr
, kKxldLogInvalidSectReloc
,
2685 i
, sect
->segname
, sect
->sectname
));
2688 rval
= KERN_SUCCESS
;
2693 #endif /* KXLD_USER_OR_OBJECT */
2695 #if KXLD_USER_OR_BUNDLE
2696 /*******************************************************************************
2697 *******************************************************************************/
2698 static kern_return_t
2699 process_relocs_from_tables(KXLDKext
*kext
)
2701 kern_return_t rval
= KERN_FAILURE
;
2702 KXLDReloc
*reloc
= NULL
;
2703 KXLDSeg
*seg
= NULL
;
2706 /* Offsets for relocations in relocation tables are based on the vm
2707 * address of the first segment.
2709 seg
= kxld_array_get_item(&kext
->segs
, 0);
2711 /* Process external relocations */
2712 for (i
= 0; i
< kext
->extrelocs
.nitems
; ++i
) {
2713 reloc
= kxld_array_get_item(&kext
->extrelocs
, i
);
2715 rval
= kxld_relocator_process_table_reloc(&kext
->relocator
, reloc
, seg
,
2716 kext
->file
, &kext
->sects
, kext
->symtab
);
2717 require_noerr_action(rval
, finish
,
2718 kxld_log(kKxldLogLinking
, kKxldLogErr
, kKxldLogInvalidExtReloc
, i
));
2721 /* Process local relocations */
2722 for (i
= 0; i
< kext
->locrelocs
.nitems
; ++i
) {
2723 reloc
= kxld_array_get_item(&kext
->locrelocs
, i
);
2725 rval
= kxld_relocator_process_table_reloc(&kext
->relocator
, reloc
, seg
,
2726 kext
->file
, &kext
->sects
, kext
->symtab
);
2727 require_noerr_action(rval
, finish
,
2728 kxld_log(kKxldLogLinking
, kKxldLogErr
, kKxldLogInvalidIntReloc
, i
));
2731 rval
= KERN_SUCCESS
;
2737 /*******************************************************************************
2738 *******************************************************************************/
2740 add_to_ptr(u_char
*symptr
, kxld_addr_t val
, boolean_t is_32_bit
)
2743 uint32_t *ptr
= (uint32_t *) symptr
;
2744 *ptr
+= (uint32_t) val
;
2746 uint64_t *ptr
= (uint64_t *) symptr
;
2747 *ptr
+= (uint64_t) val
;
2751 #define SECT_SYM_PTRS "__nl_symbol_ptr"
2753 /*******************************************************************************
2754 * Final linked images create an __nl_symbol_ptr section for the global offset
2755 * table and for symbol pointer lookups in general. Rather than use relocation
2756 * entries, the linker creates an "indirect symbol table" which stores indexes
2757 * into the symbol table corresponding to the entries of this section. This
2758 * function populates the section with the relocated addresses of those symbols.
2759 *******************************************************************************/
2760 static kern_return_t
2761 process_symbol_pointers(KXLDKext
*kext
)
2763 kern_return_t rval
= KERN_FAILURE
;
2764 KXLDSect
*sect
= NULL
;
2765 KXLDSym
*sym
= NULL
;
2766 int32_t *symidx
= NULL
;
2767 u_char
*symptr
= NULL
;
2768 u_long symptrsize
= 0;
2775 require_action(kext
->is_final_image
&& kext
->dysymtab_hdr
,
2776 finish
, rval
=KERN_FAILURE
);
2778 /* Get the __DATA,__nl_symbol_ptr section. If it doesn't exist, we have
2782 sect
= kxld_kext_get_sect_by_name(kext
, SEG_DATA
, SECT_SYM_PTRS
);
2784 rval
= KERN_SUCCESS
;
2788 require_action(sect
->flags
& S_NON_LAZY_SYMBOL_POINTERS
,
2789 finish
, rval
=KERN_FAILURE
;
2790 kxld_log(kKxldLogLinking
, kKxldLogErr
, kKxldLogMalformedMachO
2791 "Section %s,%s does not have S_NON_LAZY_SYMBOL_POINTERS flag.",
2792 SEG_DATA
, SECT_SYM_PTRS
));
2794 /* Calculate the table offset and number of entries in the section */
2796 if (kxld_kext_is_32_bit(kext
)) {
2797 symptrsize
= sizeof(uint32_t);
2799 symptrsize
= sizeof(uint64_t);
2802 nsyms
= (u_int
) (sect
->size
/ symptrsize
);
2803 firstsym
= sect
->reserved1
;
2805 require_action(firstsym
+ nsyms
<= kext
->dysymtab_hdr
->nindirectsyms
,
2806 finish
, rval
=KERN_FAILURE
;
2807 kxld_log(kKxldLogLinking
, kKxldLogErr
, kKxldLogMalformedMachO
));
2809 /* Iterate through the indirect symbol table and fill in the section of
2810 * symbol pointers. There are three cases:
2811 * 1) A normal symbol - put its value directly in the table
2812 * 2) An INDIRECT_SYMBOL_LOCAL - symbols that are local and already have
2813 * their offset from the start of the file in the section. Simply
2814 * add the file's link address to fill this entry.
2815 * 3) An INDIRECT_SYMBOL_ABS - prepopulated absolute symbols. No
2816 * action is required.
2819 symidx
= (int32_t *) (kext
->file
+ kext
->dysymtab_hdr
->indirectsymoff
);
2821 symptr
= sect
->data
;
2822 for (i
= 0; i
< nsyms
; ++i
, ++symidx
, symptr
+=symptrsize
) {
2823 if (*symidx
& INDIRECT_SYMBOL_LOCAL
) {
2824 if (*symidx
& INDIRECT_SYMBOL_ABS
) continue;
2826 add_to_ptr(symptr
, kext
->link_addr
, kxld_kext_is_32_bit(kext
));
2828 sym
= kxld_symtab_get_symbol_by_index(kext
->symtab
, *symidx
);
2829 require_action(sym
, finish
, rval
=KERN_FAILURE
);
2831 add_to_ptr(symptr
, sym
->link_addr
, kxld_kext_is_32_bit(kext
));
2835 rval
= KERN_SUCCESS
;
2839 #endif /* KXLD_USER_OR_BUNDLE */
2841 /*******************************************************************************
2842 *******************************************************************************/
2843 static kern_return_t
2844 populate_kmod_info(KXLDKext
*kext
)
2846 kern_return_t rval
= KERN_FAILURE
;
2847 KXLDSect
*kmodsect
= NULL
;
2848 KXLDSym
*kmodsym
= NULL
;
2849 u_long kmod_offset
= 0;
2853 if (kext
->link_type
!= KXLD_LINK_KEXT
) {
2854 rval
= KERN_SUCCESS
;
2858 kxld_kext_get_vmsize(kext
, &header_size
, &size
);
2860 kmodsym
= kxld_symtab_get_symbol_by_name(kext
->symtab
, KXLD_KMOD_INFO_SYMBOL
);
2861 require_action(kmodsym
, finish
, rval
=KERN_FAILURE
;
2862 kxld_log(kKxldLogLinking
, kKxldLogErr
, kKxldLogNoKmodInfo
));
2864 kmodsect
= kxld_array_get_item(&kext
->sects
, kmodsym
->sectnum
);
2865 kmod_offset
= (u_long
) (kmodsym
->base_addr
- kmodsect
->base_addr
);
2867 kext
->kmod_info
= (kmod_info_t
*) (kmodsect
->data
+ kmod_offset
);
2868 kext
->kmod_link_addr
= kmodsym
->link_addr
;
2870 if (kxld_kext_is_32_bit(kext
)) {
2871 kmod_info_32_v1_t
*kmod
= (kmod_info_32_v1_t
*) (kext
->kmod_info
);
2872 kmod
->address
= (uint32_t) kext
->link_addr
;
2873 kmod
->size
= (uint32_t) size
;
2874 kmod
->hdr_size
= (uint32_t) header_size
;
2877 if (kxld_kext_target_needs_swap(kext
)) {
2878 kmod
->address
= OSSwapInt32(kmod
->address
);
2879 kmod
->size
= OSSwapInt32(kmod
->size
);
2880 kmod
->hdr_size
= OSSwapInt32(kmod
->hdr_size
);
2882 #endif /* !KERNEL */
2884 kmod_info_64_v1_t
*kmod
= (kmod_info_64_v1_t
*) (kext
->kmod_info
);
2885 kmod
->address
= kext
->link_addr
;
2887 kmod
->hdr_size
= header_size
;
2890 if (kxld_kext_target_needs_swap(kext
)) {
2891 kmod
->address
= OSSwapInt64(kmod
->address
);
2892 kmod
->size
= OSSwapInt64(kmod
->size
);
2893 kmod
->hdr_size
= OSSwapInt64(kmod
->hdr_size
);
2895 #endif /* !KERNEL */
2899 rval
= KERN_SUCCESS
;