]> git.saurik.com Git - apple/xnu.git/blob - libkern/kxld/kxld_kext.c
xnu-1504.7.4.tar.gz
[apple/xnu.git] / libkern / kxld / kxld_kext.c
1 /*
2 * Copyright (c) 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <string.h>
29 #include <mach/machine.h>
30 #include <mach/vm_param.h>
31 #include <mach/vm_types.h>
32 #include <mach/kmod.h>
33 #include <mach-o/loader.h>
34 #include <mach-o/nlist.h>
35 #include <mach-o/reloc.h>
36 #include <sys/types.h>
37
38 #if KERNEL
39 #include <libkern/kernel_mach_header.h>
40 #include <libkern/OSKextLib.h>
41 #include <libkern/OSKextLibPrivate.h>
42 #include <mach/vm_param.h>
43 #include <mach-o/fat.h>
44 #else /* !KERNEL */
45 #include <architecture/byte_order.h>
46 #include <mach/mach_init.h>
47 #include <mach-o/arch.h>
48 #include <mach-o/swap.h>
49 #endif /* KERNEL */
50
51 #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
52 #include <AssertMacros.h>
53
54 #include "kxld_demangle.h"
55 #include "kxld_dict.h"
56 #include "kxld_kext.h"
57 #include "kxld_reloc.h"
58 #include "kxld_sect.h"
59 #include "kxld_seg.h"
60 #include "kxld_state.h"
61 #include "kxld_symtab.h"
62 #include "kxld_util.h"
63 #include "kxld_uuid.h"
64 #include "kxld_vtable.h"
65
66 struct symtab_command;
67
68 enum kxld_link_type {
69 KXLD_LINK_KERNEL,
70 KXLD_LINK_PSEUDO_KEXT,
71 KXLD_LINK_KEXT,
72 KXLD_LINK_UNKNOWN
73 };
74
75 typedef enum kxld_link_type KXLDLinkType;
76
77 struct kxld_kext {
78 u_char *file;
79 u_long size;
80 const char *name;
81 uint32_t filetype;
82 KXLDArray segs;
83 KXLDArray sects;
84 KXLDArray vtables;
85 KXLDArray extrelocs;
86 KXLDArray locrelocs;
87 KXLDDict vtable_index;
88 KXLDRelocator relocator;
89 KXLDuuid uuid;
90 KXLDSymtab *symtab;
91 kxld_addr_t link_addr;
92 kmod_info_t *kmod_info;
93 kxld_addr_t kmod_link_addr;
94 cpu_type_t cputype;
95 cpu_subtype_t cpusubtype;
96 KXLDLinkType link_type;
97 KXLDFlags flags;
98 boolean_t is_final_image;
99 boolean_t got_is_created;
100 struct dysymtab_command *dysymtab_hdr;
101 #if KXLD_USER_OR_OBJECT
102 KXLDArray *section_order;
103 #endif
104 #if !KERNEL
105 enum NXByteOrder host_order;
106 enum NXByteOrder target_order;
107 #endif
108 };
109
110 /*******************************************************************************
111 * Prototypes
112 *******************************************************************************/
113
114 static kern_return_t get_target_machine_info(KXLDKext *kext, cpu_type_t cputype,
115 cpu_subtype_t cpusubtype);
116 static kern_return_t get_file_for_arch(KXLDKext *kext, u_char *file, u_long size);
117
118 static u_long get_macho_header_size(const KXLDKext *kext);
119 static u_long get_macho_data_size(const KXLDKext *kext);
120 static kern_return_t export_macho_header(const KXLDKext *kext, u_char *buf,
121 u_int ncmds, u_long *header_offset, u_long header_size);
122
123 static kern_return_t init_from_execute(KXLDKext *kext);
124 static kern_return_t init_from_final_linked_image(KXLDKext *kext, u_int *filetype_out,
125 struct symtab_command **symtab_hdr_out);
126
127 static boolean_t target_supports_protected_segments(const KXLDKext *kext)
128 __attribute__((pure));
129
130 #if KXLD_USER_OR_OBJECT
131 static boolean_t target_supports_object(const KXLDKext *kext) __attribute((pure));
132 static kern_return_t init_from_object(KXLDKext *kext);
133 static kern_return_t process_relocs_from_sections(KXLDKext *kext);
134 #endif /* KXLD_USER_OR_OBJECT */
135
136 #if KXLD_USER_OR_BUNDLE
137 static boolean_t target_supports_bundle(const KXLDKext *kext) __attribute((pure));
138 static kern_return_t init_from_bundle(KXLDKext *kext);
139 static kern_return_t process_relocs_from_tables(KXLDKext *kext);
140 static kern_return_t process_symbol_pointers(KXLDKext *kext);
141 static void add_to_ptr(u_char *symptr, kxld_addr_t val, boolean_t is_32_bit);
142 #endif /* KXLD_USER_OR_BUNDLE */
143
144 static kern_return_t get_metaclass_symbol_from_super_meta_class_pointer_symbol(
145 KXLDKext *kext, KXLDSym *super_metaclass_pointer_sym, KXLDSym **meta_class);
146
147 static kern_return_t resolve_symbols(KXLDKext *kext, KXLDDict *defined_symbols,
148 KXLDDict *obsolete_symbols);
149 static kern_return_t patch_vtables(KXLDKext *kext, KXLDDict *patched_vtables,
150 KXLDDict *defined_symbols);
151 static kern_return_t validate_symbols(KXLDKext *kext);
152 static kern_return_t populate_kmod_info(KXLDKext *kext);
153 static kern_return_t copy_vtables(KXLDKext *kext, const KXLDDict *patched_vtables);
154 static kern_return_t create_vtables(KXLDKext *kext);
155 static void restrict_private_symbols(KXLDKext *kext);
156
157 #if KXLD_USER_OR_GOT || KXLD_USER_OR_COMMON
158 static kern_return_t add_section(KXLDKext *kext, KXLDSect **sect);
159 #endif /* KXLD_USER_OR_GOT || KXLD_USER_OR_COMMON */
160
161 #if KXLD_USER_OR_GOT
162 static boolean_t target_has_got(const KXLDKext *kext) __attribute__((pure));
163 static kern_return_t create_got(KXLDKext *kext);
164 static kern_return_t populate_got(KXLDKext *kext);
165 #endif /* KXLD_USER_OR_GOT */
166
167 static boolean_t target_supports_common(const KXLDKext *kext) __attribute((pure));
168 #if KXLD_USER_OR_COMMON
169 static kern_return_t resolve_common_symbols(KXLDKext *kext);
170 #endif /* KXLD_USER_OR_COMMON */
171
172 static boolean_t target_supports_strict_patching(KXLDKext *kext)
173 __attribute__((pure));
174
175 #if KXLD_USER_OR_ILP32
176 static u_long get_macho_cmd_data_32(u_char *file, u_long offset,
177 u_int *filetype, u_int *ncmds);
178 static kern_return_t export_macho_header_32(const KXLDKext *kext, u_char *buf,
179 u_int ncmds, u_long *header_offset, u_long header_size);
180 #endif /* KXLD_USER_OR_ILP32 */
181 #if KXLD_USER_OR_LP64
182 static u_long get_macho_cmd_data_64(u_char *file, u_long offset,
183 u_int *filetype, u_int *ncmds);
184 static kern_return_t export_macho_header_64(const KXLDKext *kext, u_char *buf,
185 u_int ncmds, u_long *header_offset, u_long header_size);
186 #endif /* KXLD_USER_OR_LP64 */
187
188 /*******************************************************************************
189 *******************************************************************************/
190 size_t
191 kxld_kext_sizeof(void)
192 {
193 return sizeof(KXLDKext);
194 }
195
196 /*******************************************************************************
197 *******************************************************************************/
198 kern_return_t
199 kxld_kext_init(KXLDKext *kext, u_char *file, u_long size,
200 const char *name, KXLDFlags flags, boolean_t is_kernel,
201 KXLDArray *section_order __unused,
202 cpu_type_t cputype, cpu_subtype_t cpusubtype)
203 {
204 kern_return_t rval = KERN_FAILURE;
205 KXLDSeg *seg = NULL;
206 u_int i = 0;
207
208 check(kext);
209 check(file);
210 check(size);
211
212 kext->name = name;
213 kext->flags = flags;
214 #if KXLD_USER_OR_OBJECT
215 kext->section_order = section_order;
216 #endif
217
218 /* Find the local architecture */
219
220 rval = get_target_machine_info(kext, cputype, cpusubtype);
221 require_noerr(rval, finish);
222
223 /* Find the Mach-O file for the target architecture */
224
225 rval = get_file_for_arch(kext, file, size);
226 require_noerr(rval, finish);
227
228 /* Build the relocator */
229
230 rval = kxld_relocator_init(&kext->relocator, kext->cputype,
231 kext->cpusubtype, kxld_kext_target_needs_swap(kext));
232 require_noerr(rval, finish);
233
234 /* Allocate the symbol table */
235
236 if (!kext->symtab) {
237 kext->symtab = kxld_alloc(kxld_symtab_sizeof());
238 require_action(kext->symtab, finish, rval=KERN_RESOURCE_SHORTAGE);
239 bzero(kext->symtab, kxld_symtab_sizeof());
240 }
241
242 if (is_kernel) {
243 kext->link_type = KXLD_LINK_KERNEL;
244 } else {
245 kext->link_type = KXLD_LINK_UNKNOWN;
246 }
247
248 /* There are four types of Mach-O files that we can support:
249 * 1) 32-bit MH_OBJECT - All pre-SnowLeopard systems
250 * 2) 32-bit MH_KEXT_BUNDLE - Not supported
251 * 3) 64-bit MH_OBJECT - Needed for K64 bringup
252 * 4) 64-bit MH_KEXT_BUNDLE - The likely 64-bit kext filetype
253 */
254
255 if (kxld_kext_is_32_bit(kext)) {
256 struct mach_header *mach_hdr = (struct mach_header *) kext->file;
257 kext->filetype = mach_hdr->filetype;
258 } else {
259 struct mach_header_64 *mach_hdr = (struct mach_header_64 *) kext->file;
260 kext->filetype = mach_hdr->filetype;
261 }
262
263 switch (kext->filetype) {
264 #if KXLD_USER_OR_OBJECT
265 case MH_OBJECT:
266 rval = init_from_object(kext);
267 require_noerr(rval, finish);
268 break;
269 #endif /* KXLD_USER_OR_OBJECT */
270 #if KXLD_USER_OR_BUNDLE
271 case MH_KEXT_BUNDLE:
272 rval = init_from_bundle(kext);
273 require_noerr(rval, finish);
274 break;
275 #endif /* KXLD_USER_OR_BUNDLE */
276 case MH_EXECUTE:
277 rval = init_from_execute(kext);
278 require_noerr(rval, finish);
279 break;
280 default:
281 rval = KERN_FAILURE;
282 kxld_log(kKxldLogLinking, kKxldLogErr,
283 kKxldLogFiletypeNotSupported, kext->filetype);
284 goto finish;
285 }
286
287 for (i = 0; i < kext->segs.nitems; ++i) {
288 seg = kxld_array_get_item(&kext->segs, i);
289 kxld_seg_set_vm_protections(seg, target_supports_protected_segments(kext));
290 }
291
292 switch (kext->link_type) {
293 case KXLD_LINK_KEXT:
294 (void) restrict_private_symbols(kext);
295 /* Fallthrough */
296 case KXLD_LINK_KERNEL:
297 rval = create_vtables(kext);
298 require_noerr(rval, finish);
299 break;
300 default:
301 break;
302 }
303
304 rval = KERN_SUCCESS;
305 finish:
306 return rval;
307 }
308
309 /*******************************************************************************
310 *******************************************************************************/
311 kern_return_t
312 get_target_machine_info(KXLDKext *kext, cpu_type_t cputype __unused,
313 cpu_subtype_t cpusubtype __unused)
314 {
315 #if KERNEL
316
317 /* Because the kernel can only link for its own architecture, we know what
318 * the host and target architectures are at compile time, so we can use
319 * a vastly simplified version of this function.
320 */
321
322 check(kext);
323
324 #if defined(__i386__)
325 kext->cputype = CPU_TYPE_I386;
326 kext->cpusubtype = CPU_SUBTYPE_I386_ALL;
327 return KERN_SUCCESS;
328 #elif defined(__ppc__)
329 kext->cputype = CPU_TYPE_POWERPC;
330 kext->cpusubtype = CPU_SUBTYPE_POWERPC_ALL;
331 return KERN_SUCCESS;
332 #elif defined(__x86_64__)
333 kext->cputype = CPU_TYPE_X86_64;
334 kext->cpusubtype = CPU_SUBTYPE_X86_64_ALL;
335 return KERN_SUCCESS;
336 #else
337 kxld_log(kKxldLogLinking, kKxldLogErr,
338 kKxldLogArchNotSupported, _mh_execute_header->cputype);
339 return KERN_NOT_SUPPORTED;
340 #endif /* Supported architecture defines */
341
342
343 #else /* !KERNEL */
344
345 /* User-space must look up the architecture it's running on and the target
346 * architecture at run-time.
347 */
348
349 kern_return_t rval = KERN_FAILURE;
350 const NXArchInfo *host_arch = NULL;
351
352 check(kext);
353
354 host_arch = NXGetLocalArchInfo();
355 require_action(host_arch, finish, rval=KERN_FAILURE);
356
357 kext->host_order = host_arch->byteorder;
358
359 /* If the user did not specify a cputype, use the local architecture.
360 */
361
362 if (cputype) {
363 kext->cputype = cputype;
364 kext->cpusubtype = cpusubtype;
365 } else {
366 kext->cputype = host_arch->cputype;
367 kext->target_order = kext->host_order;
368
369 switch (kext->cputype) {
370 case CPU_TYPE_I386:
371 kext->cpusubtype = CPU_SUBTYPE_I386_ALL;
372 break;
373 case CPU_TYPE_POWERPC:
374 kext->cpusubtype = CPU_SUBTYPE_POWERPC_ALL;
375 break;
376 case CPU_TYPE_X86_64:
377 kext->cpusubtype = CPU_SUBTYPE_X86_64_ALL;
378 break;
379 case CPU_TYPE_ARM:
380 kext->cpusubtype = CPU_SUBTYPE_ARM_ALL;
381 break;
382 default:
383 kext->cpusubtype = 0;
384 }
385 }
386
387 /* Validate that we support the target architecture and record its
388 * endianness.
389 */
390
391 switch(kext->cputype) {
392 case CPU_TYPE_ARM:
393 case CPU_TYPE_I386:
394 case CPU_TYPE_X86_64:
395 kext->target_order = NX_LittleEndian;
396 break;
397 case CPU_TYPE_POWERPC:
398 kext->target_order = NX_BigEndian;
399 break;
400 default:
401 rval = KERN_NOT_SUPPORTED;
402 kxld_log(kKxldLogLinking, kKxldLogErr,
403 kKxldLogArchNotSupported, kext->cputype);
404 goto finish;
405 }
406
407 rval = KERN_SUCCESS;
408
409 finish:
410 return rval;
411 #endif /* KERNEL */
412 }
413
414 /*******************************************************************************
415 *******************************************************************************/
416 static kern_return_t
417 get_file_for_arch(KXLDKext *kext, u_char *file, u_long size)
418 {
419 kern_return_t rval = KERN_FAILURE;
420 struct mach_header *mach_hdr = NULL;
421 #if !KERNEL
422 struct fat_header *fat = (struct fat_header *) file;
423 struct fat_arch *archs = (struct fat_arch *) &fat[1];
424 boolean_t swap = FALSE;
425 #endif /* KERNEL */
426
427 check(kext);
428 check(file);
429 check(size);
430
431 kext->file = file;
432 kext->size = size;
433
434 /* We are assuming that we will never receive a fat file in the kernel */
435
436 #if !KERNEL
437 require_action(size >= sizeof(*fat), finish,
438 rval=KERN_FAILURE;
439 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
440
441 /* The fat header is always big endian, so swap if necessary */
442 if (fat->magic == FAT_CIGAM) {
443 (void) swap_fat_header(fat, kext->host_order);
444 swap = TRUE;
445 }
446
447 if (fat->magic == FAT_MAGIC) {
448 struct fat_arch *arch = NULL;
449
450 require_action(size >= (sizeof(*fat) + (fat->nfat_arch * sizeof(*archs))),
451 finish,
452 rval=KERN_FAILURE;
453 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
454
455 /* Swap the fat_arch structures if necessary */
456 if (swap) {
457 (void) swap_fat_arch(archs, fat->nfat_arch, kext->host_order);
458 }
459
460 /* Locate the Mach-O for the requested architecture */
461
462 arch = NXFindBestFatArch(kext->cputype, kext->cpusubtype, archs,
463 fat->nfat_arch);
464 require_action(arch, finish, rval=KERN_FAILURE;
465 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogArchNotFound));
466 require_action(size >= arch->offset + arch->size, finish,
467 rval=KERN_FAILURE;
468 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
469
470 kext->file = file + arch->offset;
471 kext->size = arch->size;
472 }
473 #endif /* !KERNEL */
474
475 /* Swap the Mach-O's headers to this architecture if necessary */
476 if (kxld_kext_is_32_bit(kext)) {
477 rval = validate_and_swap_macho_32(kext->file, kext->size
478 #if !KERNEL
479 , kext->host_order
480 #endif /* !KERNEL */
481 );
482 } else {
483 rval = validate_and_swap_macho_64(kext->file, kext->size
484 #if !KERNEL
485 , kext->host_order
486 #endif /* !KERNEL */
487 );
488 }
489 require_noerr(rval, finish);
490
491 mach_hdr = (struct mach_header *) kext->file;
492 require_action(kext->cputype == mach_hdr->cputype, finish,
493 rval=KERN_FAILURE;
494 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
495
496 rval = KERN_SUCCESS;
497 finish:
498 return rval;
499 }
500
501 /*******************************************************************************
502 *******************************************************************************/
503 boolean_t
504 kxld_kext_is_32_bit(const KXLDKext *kext)
505 {
506 check(kext);
507
508 return kxld_is_32_bit(kext->cputype);
509 }
510
511 /*******************************************************************************
512 *******************************************************************************/
513 void
514 kxld_kext_get_cputype(const KXLDKext *kext, cpu_type_t *cputype,
515 cpu_subtype_t *cpusubtype)
516 {
517 check(kext);
518 check(cputype);
519 check(cpusubtype);
520
521 *cputype = kext->cputype;
522 *cpusubtype = kext->cpusubtype;
523 }
524
525 /*******************************************************************************
526 *******************************************************************************/
527 kern_return_t
528 kxld_kext_validate_cputype(const KXLDKext *kext, cpu_type_t cputype,
529 cpu_subtype_t cpusubtype __unused)
530 {
531 if (kext->cputype != cputype) return KERN_FAILURE;
532 return KERN_SUCCESS;
533 }
534
535 /*******************************************************************************
536 *******************************************************************************/
537 static boolean_t
538 target_supports_protected_segments(const KXLDKext *kext)
539 {
540 return (kext->is_final_image &&
541 kext->cputype == CPU_TYPE_X86_64);
542 }
543
544 #if KXLD_USER_OR_OBJECT
545 /*******************************************************************************
546 *******************************************************************************/
547 static boolean_t target_supports_object(const KXLDKext *kext)
548 {
549 return (kext->cputype == CPU_TYPE_POWERPC ||
550 kext->cputype == CPU_TYPE_I386 ||
551 kext->cputype == CPU_TYPE_ARM);
552 }
553
554 /*******************************************************************************
555 *******************************************************************************/
556 static kern_return_t
557 init_from_object(KXLDKext *kext)
558 {
559 kern_return_t rval = KERN_FAILURE;
560 struct load_command *cmd_hdr = NULL;
561 struct symtab_command *symtab_hdr = NULL;
562 struct uuid_command *uuid_hdr = NULL;
563 KXLDSect *sect = NULL;
564 u_long offset = 0;
565 u_long sect_offset = 0;
566 u_int filetype = 0;
567 u_int ncmds = 0;
568 u_int nsects = 0;
569 u_int i = 0;
570 boolean_t has_segment = FALSE;
571
572 check(kext);
573
574 require_action(target_supports_object(kext),
575 finish, rval=KERN_FAILURE;
576 kxld_log(kKxldLogLinking, kKxldLogErr,
577 kKxldLogFiletypeNotSupported, MH_OBJECT));
578
579 KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), offset,
580 get_macho_cmd_data_32, get_macho_cmd_data_64,
581 kext->file, offset, &filetype, &ncmds);
582
583 require_action(filetype == MH_OBJECT, finish, rval=KERN_FAILURE);
584
585 /* MH_OBJECTs use one unnamed segment to contain all of the sections. We
586 * loop over all of the load commands to initialize the structures we
587 * expect. Then, we'll use the unnamed segment to get to all of the
588 * sections, and then use those sections to create the actual segments.
589 */
590
591 for (; i < ncmds; ++i, offset += cmd_hdr->cmdsize) {
592 cmd_hdr = (struct load_command *) (kext->file + offset);
593
594 switch(cmd_hdr->cmd) {
595 #if KXLD_USER_OR_ILP32
596 case LC_SEGMENT:
597 {
598 struct segment_command *seg_hdr =
599 (struct segment_command *) cmd_hdr;
600
601 /* Ignore segments with no vm size */
602 if (!seg_hdr->vmsize) continue;
603
604 require_action(kxld_kext_is_32_bit(kext), finish, rval=KERN_FAILURE;
605 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
606 "LC_SEGMENT in 64-bit kext."));
607 require_action(!has_segment, finish, rval=KERN_FAILURE;
608 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
609 "Multiple segments in an MH_OBJECT kext."));
610
611 nsects = seg_hdr->nsects;
612 sect_offset = offset + sizeof(*seg_hdr);
613 has_segment = TRUE;
614 }
615 break;
616 #endif /* KXLD_USER_OR_ILP32 */
617 #if KXLD_USER_OR_LP64
618 case LC_SEGMENT_64:
619 {
620 struct segment_command_64 *seg_hdr =
621 (struct segment_command_64 *) cmd_hdr;
622
623 /* Ignore segments with no vm size */
624 if (!seg_hdr->vmsize) continue;
625
626 require_action(!kxld_kext_is_32_bit(kext), finish, rval=KERN_FAILURE;
627 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
628 "LC_SEGMENT_64 in a 32-bit kext."));
629 require_action(!has_segment, finish, rval=KERN_FAILURE;
630 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
631 "Multiple segments in an MH_OBJECT kext."));
632
633 nsects = seg_hdr->nsects;
634 sect_offset = offset + sizeof(*seg_hdr);
635 has_segment = TRUE;
636 }
637 break;
638 #endif /* KXLD_USER_OR_LP64 */
639 case LC_SYMTAB:
640 symtab_hdr = (struct symtab_command *) cmd_hdr;
641
642 KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), rval,
643 kxld_symtab_init_from_macho_32, kxld_symtab_init_from_macho_64,
644 kext->symtab, kext->file, symtab_hdr, 0);
645 require_noerr(rval, finish);
646 break;
647 case LC_UUID:
648 uuid_hdr = (struct uuid_command *) cmd_hdr;
649 kxld_uuid_init_from_macho(&kext->uuid, uuid_hdr);
650 break;
651 case LC_UNIXTHREAD:
652 /* Don't need to do anything with UNIXTHREAD */
653 break;
654 default:
655 rval = KERN_FAILURE;
656 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
657 "Invalid segment type in MH_OBJECT kext: %u.", cmd_hdr->cmd);
658 goto finish;
659 }
660 }
661
662 if (has_segment) {
663
664 /* Get the number of sections from the segment and build the section index */
665
666 rval = kxld_array_init(&kext->sects, sizeof(KXLDSect), nsects);
667 require_noerr(rval, finish);
668
669 /* Loop over all of the sections to initialize the section index */
670
671 for (i = 0; i < nsects; ++i) {
672 sect = kxld_array_get_item(&kext->sects, i);
673 KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), rval,
674 kxld_sect_init_from_macho_32, kxld_sect_init_from_macho_64,
675 sect, kext->file, &sect_offset, i, &kext->relocator);
676 require_noerr(rval, finish);
677 }
678
679 /* Create special sections */
680
681 #if KXLD_USER_OR_GOT
682 rval = create_got(kext);
683 require_noerr(rval, finish);
684 #endif /* KXLD_USER_OR_GOT */
685
686 #if KXLD_USER_OR_COMMON
687 rval = resolve_common_symbols(kext);
688 require_noerr(rval, finish);
689 #endif /* KXLD_USER_OR_COMMON */
690
691 /* Create the segments from the section index */
692
693 rval = kxld_seg_create_seg_from_sections(&kext->segs, &kext->sects);
694 require_noerr(rval, finish);
695
696 rval = kxld_seg_finalize_object_segment(&kext->segs,
697 kext->section_order, get_macho_header_size(kext));
698 require_noerr(rval, finish);
699
700 kext->link_type = KXLD_LINK_KEXT;
701 } else {
702 kext->link_type = KXLD_LINK_PSEUDO_KEXT;
703 }
704
705 rval = KERN_SUCCESS;
706 finish:
707 return rval;
708 }
709 #endif /* KXLD_USER_OR_OBJECT */
710
711 /*******************************************************************************
712 *******************************************************************************/
713 static kern_return_t
714 init_from_final_linked_image(KXLDKext *kext, u_int *filetype_out,
715 struct symtab_command **symtab_hdr_out)
716 {
717 kern_return_t rval = KERN_FAILURE;
718 KXLDSeg *seg = NULL;
719 KXLDSect *sect = NULL;
720 struct load_command *cmd_hdr = NULL;
721 struct symtab_command *symtab_hdr = NULL;
722 struct uuid_command *uuid_hdr = NULL;
723 u_long base_offset = 0;
724 u_long offset = 0;
725 u_long sect_offset = 0;
726 u_int filetype = 0;
727 u_int i = 0;
728 u_int j = 0;
729 u_int segi = 0;
730 u_int secti = 0;
731 u_int nsegs = 0;
732 u_int nsects = 0;
733 u_int ncmds = 0;
734
735 KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), base_offset,
736 get_macho_cmd_data_32, get_macho_cmd_data_64,
737 kext->file, offset, &filetype, &ncmds);
738
739 /* First pass to count segments and sections */
740
741 offset = base_offset;
742 for (i = 0; i < ncmds; ++i, offset += cmd_hdr->cmdsize) {
743 cmd_hdr = (struct load_command *) (kext->file + offset);
744
745 switch(cmd_hdr->cmd) {
746 #if KXLD_USER_OR_ILP32
747 case LC_SEGMENT:
748 {
749 struct segment_command *seg_hdr =
750 (struct segment_command *) cmd_hdr;
751
752 /* Ignore segments with no vm size */
753 if (!seg_hdr->vmsize) continue;
754
755 ++nsegs;
756 nsects += seg_hdr->nsects;
757 }
758 break;
759 #endif /* KXLD_USER_OR_ILP32 */
760 #if KXLD_USER_OR_LP64
761 case LC_SEGMENT_64:
762 {
763 struct segment_command_64 *seg_hdr =
764 (struct segment_command_64 *) cmd_hdr;
765
766 /* Ignore segments with no vm size */
767 if (!seg_hdr->vmsize) continue;
768
769 ++nsegs;
770 nsects += seg_hdr->nsects;
771 }
772 break;
773 #endif /* KXLD_USER_OR_LP64 */
774 default:
775 continue;
776 }
777 }
778
779 /* Allocate the segments and sections */
780
781 if (nsegs) {
782 rval = kxld_array_init(&kext->segs, sizeof(KXLDSeg), nsegs);
783 require_noerr(rval, finish);
784
785 rval = kxld_array_init(&kext->sects, sizeof(KXLDSect), nsects);
786 require_noerr(rval, finish);
787 }
788
789 /* Initialize the segments and sections */
790
791 offset = base_offset;
792 for (i = 0; i < ncmds; ++i, offset += cmd_hdr->cmdsize) {
793 cmd_hdr = (struct load_command *) (kext->file + offset);
794 seg = NULL;
795
796 switch(cmd_hdr->cmd) {
797 #if KXLD_USER_OR_ILP32
798 case LC_SEGMENT:
799 {
800 struct segment_command *seg_hdr =
801 (struct segment_command *) cmd_hdr;
802
803 /* Ignore segments with no vm size */
804 if (!seg_hdr->vmsize) continue;
805
806 seg = kxld_array_get_item(&kext->segs, segi++);
807
808 rval = kxld_seg_init_from_macho_32(seg, seg_hdr);
809 require_noerr(rval, finish);
810
811 sect_offset = offset + sizeof(*seg_hdr);
812 }
813 break;
814 #endif /* KXLD_USER_OR_ILP32 */
815 #if KXLD_USER_OR_LP64
816 case LC_SEGMENT_64:
817 {
818 struct segment_command_64 *seg_hdr =
819 (struct segment_command_64 *) cmd_hdr;
820
821 /* Ignore segments with no vm size */
822 if (!seg_hdr->vmsize) continue;
823
824 seg = kxld_array_get_item(&kext->segs, segi++);
825
826 rval = kxld_seg_init_from_macho_64(seg, seg_hdr);
827 require_noerr(rval, finish);
828
829 sect_offset = offset + sizeof(*seg_hdr);
830 }
831 break;
832 #endif /* KXLD_USER_OR_LP64 */
833 case LC_SYMTAB:
834 symtab_hdr = (struct symtab_command *) cmd_hdr;
835 break;
836 case LC_UUID:
837 uuid_hdr = (struct uuid_command *) cmd_hdr;
838 kxld_uuid_init_from_macho(&kext->uuid, uuid_hdr);
839 break;
840 case LC_DYSYMTAB:
841 kext->dysymtab_hdr = (struct dysymtab_command *) cmd_hdr;
842
843 rval = kxld_reloc_create_macho(&kext->extrelocs, &kext->relocator,
844 (struct relocation_info *) (kext->file + kext->dysymtab_hdr->extreloff),
845 kext->dysymtab_hdr->nextrel);
846 require_noerr(rval, finish);
847
848 rval = kxld_reloc_create_macho(&kext->locrelocs, &kext->relocator,
849 (struct relocation_info *) (kext->file + kext->dysymtab_hdr->locreloff),
850 kext->dysymtab_hdr->nlocrel);
851 require_noerr(rval, finish);
852
853 break;
854 case LC_UNIXTHREAD:
855 /* Don't need to do anything with UNIXTHREAD for the kernel */
856 require_action(kext->link_type == KXLD_LINK_KERNEL, finish,
857 rval=KERN_FAILURE;
858 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
859 "LC_UNIXTHREAD segment is not valid in a kext."));
860 break;
861 default:
862 rval=KERN_FAILURE;
863 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
864 "Invalid segment type in MH_KEXT_BUNDLE kext: %u.", cmd_hdr->cmd);
865 goto finish;
866 }
867
868 if (seg) {
869
870 /* Initialize the sections */
871 for (j = 0; j < seg->sects.nitems; ++j, ++secti) {
872 sect = kxld_array_get_item(&kext->sects, secti);
873 KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), rval,
874 kxld_sect_init_from_macho_32, kxld_sect_init_from_macho_64,
875 sect, kext->file, &sect_offset, secti, &kext->relocator);
876 require_noerr(rval, finish);
877
878 /* Add the section to the segment. This will also make sure
879 * that the sections and segments have the same segname.
880 */
881 rval = kxld_seg_add_section(seg, sect);
882 require_noerr(rval, finish);
883 }
884 rval = kxld_seg_finish_init(seg);
885 require_noerr(rval, finish);
886 }
887 }
888
889 if (filetype_out) *filetype_out = filetype;
890 if (symtab_hdr_out) *symtab_hdr_out = symtab_hdr;
891 kext->is_final_image = TRUE;
892 rval = KERN_SUCCESS;
893 finish:
894 return rval;
895 }
896
897 /*******************************************************************************
898 *******************************************************************************/
899 static kern_return_t
900 init_from_execute(KXLDKext *kext)
901 {
902 kern_return_t rval = KERN_FAILURE;
903 struct symtab_command *symtab_hdr = NULL;
904 kxld_addr_t linkedit_offset = 0;
905 u_int filetype = 0;
906 #if KERNEL
907 KXLDSeg *textseg = NULL;
908 KXLDSeg *linkeditseg = NULL;
909 #endif /*KERNEL */
910 #if KXLD_USER_OR_OBJECT
911 KXLDSeg *seg = NULL;
912 KXLDSect *sect = NULL;
913 KXLDSectionName *sname = NULL;
914 u_int i = 0, j = 0, k = 0;
915 #endif /* KXLD_USER_OR_OBJECT */
916
917 check(kext);
918
919 require_action(kext->link_type == KXLD_LINK_KERNEL, finish,
920 rval=KERN_FAILURE);
921
922 rval = init_from_final_linked_image(kext, &filetype, &symtab_hdr);
923 require_noerr(rval, finish);
924
925 require_action(filetype == MH_EXECUTE, finish, rval=KERN_FAILURE;
926 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
927 "The kernel file is not of type MH_EXECUTE."));
928
929 #if KERNEL
930 /* When we're in the kernel, the symbol table can no longer be found by the
931 * symtab_command alone because the command specifies offsets for the file
932 * on disk, not the file mapped into memory. We can find the additional
933 * offset necessary by finding the difference between the linkedit segment's
934 * vm address and the text segment's vm address.
935 */
936
937 textseg = kxld_kext_get_seg_by_name(kext, SEG_TEXT);
938 require_action(textseg, finish, rval=KERN_FAILURE;
939 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO));
940
941 linkeditseg = kxld_kext_get_seg_by_name(kext, SEG_LINKEDIT);
942 require_action(linkeditseg, finish, rval=KERN_FAILURE;
943 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO));
944
945 linkedit_offset = linkeditseg->base_addr - textseg->base_addr -
946 linkeditseg->fileoff;
947 #endif /* KERNEL */
948
949 /* Initialize the symbol table */
950
951 KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), rval,
952 kxld_symtab_init_from_macho_32, kxld_symtab_init_from_macho_64,
953 kext->symtab, kext->file, symtab_hdr, linkedit_offset);
954 require_noerr(rval, finish);
955
956 #if KXLD_USER_OR_OBJECT
957 /* Save off the order of section names so that we can lay out kext
958 * sections for MH_OBJECT-based systems.
959 */
960 if (target_supports_object(kext)) {
961
962 rval = kxld_array_init(kext->section_order, sizeof(KXLDSectionName),
963 kext->sects.nitems);
964 require_noerr(rval, finish);
965
966 /* Copy the section names into the section_order array for future kext
967 * section ordering.
968 */
969 for (i = 0, k = 0; i < kext->segs.nitems; ++i) {
970 seg = kxld_array_get_item(&kext->segs, i);
971
972 for (j = 0; j < seg->sects.nitems; ++j, ++k) {
973 sect = *(KXLDSect **) kxld_array_get_item(&seg->sects, j);
974 sname = kxld_array_get_item(kext->section_order, k);
975
976 strlcpy(sname->segname, sect->segname, sizeof(sname->segname));
977 strlcpy(sname->sectname, sect->sectname, sizeof(sname->sectname));
978 }
979 }
980 }
981 #endif /* KXLD_USER_OR_OBJECT */
982
983 rval = KERN_SUCCESS;
984 finish:
985 return rval;
986 }
987
988 #if KXLD_USER_OR_BUNDLE
989 /*******************************************************************************
990 *******************************************************************************/
991 static boolean_t
992 target_supports_bundle(const KXLDKext *kext)
993 {
994 return (kext->cputype == CPU_TYPE_X86_64);
995 }
996
997 /*******************************************************************************
998 *******************************************************************************/
999 static kern_return_t
1000 init_from_bundle(KXLDKext *kext)
1001 {
1002 kern_return_t rval = KERN_FAILURE;
1003 KXLDSeg *seg = NULL;
1004 struct symtab_command *symtab_hdr = NULL;
1005 u_int filetype = 0;
1006 u_int idx = 0;
1007
1008 check(kext);
1009
1010 require_action(target_supports_bundle(kext), finish,
1011 rval=KERN_FAILURE;
1012 kxld_log(kKxldLogLinking, kKxldLogErr,
1013 kKxldLogFiletypeNotSupported, MH_KEXT_BUNDLE));
1014
1015 rval = init_from_final_linked_image(kext, &filetype, &symtab_hdr);
1016 require_noerr(rval, finish);
1017
1018 require_action(filetype == MH_KEXT_BUNDLE, finish,
1019 rval=KERN_FAILURE);
1020
1021 KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), rval,
1022 kxld_symtab_init_from_macho_32, kxld_symtab_init_from_macho_64,
1023 kext->symtab, kext->file, symtab_hdr, /* linkedit offset */ 0);
1024 require_noerr(rval, finish);
1025
1026 if (kext->segs.nitems) {
1027 /* Remove the __LINKEDIT segment, since we never keep the symbol
1028 * table around in memory for kexts.
1029 */
1030 seg = kxld_kext_get_seg_by_name(kext, SEG_LINKEDIT);
1031 if (seg) {
1032 rval = kxld_array_get_index(&kext->segs, seg, &idx);
1033 require_noerr(rval, finish);
1034
1035 kxld_seg_deinit(seg);
1036
1037 rval = kxld_array_remove(&kext->segs, idx);
1038 require_noerr(rval, finish);
1039 }
1040
1041 kext->link_type = KXLD_LINK_KEXT;
1042 } else {
1043 kext->link_type = KXLD_LINK_PSEUDO_KEXT;
1044 }
1045
1046 rval = KERN_SUCCESS;
1047 finish:
1048 return rval;
1049 }
1050 #endif /* KXLD_USER_OR_BUNDLE */
1051
1052 #if KXLD_USER_OR_ILP32
1053 /*******************************************************************************
1054 *******************************************************************************/
1055 static u_long
1056 get_macho_cmd_data_32(u_char *file, u_long offset, u_int *filetype, u_int *ncmds)
1057 {
1058 struct mach_header *mach_hdr = (struct mach_header *) (file + offset);
1059
1060 if (filetype) *filetype = mach_hdr->filetype;
1061 if (ncmds) *ncmds = mach_hdr->ncmds;
1062
1063 return sizeof(*mach_hdr);
1064 }
1065
1066 #endif /* KXLD_USER_OR_ILP32 */
1067
1068 #if KXLD_USER_OR_LP64
1069 /*******************************************************************************
1070 *******************************************************************************/
1071 static u_long
1072 get_macho_cmd_data_64(u_char *file, u_long offset, u_int *filetype, u_int *ncmds)
1073 {
1074 struct mach_header_64 *mach_hdr = (struct mach_header_64 *) (file + offset);
1075
1076 if (filetype) *filetype = mach_hdr->filetype;
1077 if (ncmds) *ncmds = mach_hdr->ncmds;
1078
1079 return sizeof(*mach_hdr);
1080 }
1081 #endif /* KXLD_USER_OR_LP64 */
1082
1083 /*******************************************************************************
1084 *******************************************************************************/
1085 static kern_return_t
1086 create_vtables(KXLDKext *kext)
1087 {
1088 kern_return_t rval = KERN_FAILURE;
1089 KXLDSymtabIterator iter;
1090 KXLDSym *sym = NULL;
1091 KXLDSym *vtable_sym = NULL;
1092 KXLDSym *meta_vtable_sym = NULL;
1093 KXLDSect *vtable_sect = NULL;
1094 KXLDSect *meta_vtable_sect = NULL;
1095 KXLDVTable *vtable = NULL;
1096 KXLDVTable *meta_vtable = NULL;
1097 char class_name[KXLD_MAX_NAME_LEN];
1098 char vtable_name[KXLD_MAX_NAME_LEN];
1099 char meta_vtable_name[KXLD_MAX_NAME_LEN];
1100 char *demangled_name1 = NULL;
1101 char *demangled_name2 = NULL;
1102 size_t demangled_length1 = 0;
1103 size_t demangled_length2 = 0;
1104 u_int i = 0;
1105 u_int nvtables = 0;
1106
1107 if (kext->link_type == KXLD_LINK_KERNEL) {
1108 /* Create a vtable object for every vtable symbol */
1109 kxld_symtab_iterator_init(&iter, kext->symtab, kxld_sym_is_vtable, FALSE);
1110 nvtables = kxld_symtab_iterator_get_num_remaining(&iter);
1111 } else {
1112 /* We walk over the super metaclass pointer symbols, because classes
1113 * with them are the only ones that need patching. Then we double the
1114 * number of vtables we're expecting, because every pointer will have a
1115 * class vtable and a MetaClass vtable.
1116 */
1117 kxld_symtab_iterator_init(&iter, kext->symtab,
1118 kxld_sym_is_super_metaclass_pointer, FALSE);
1119 nvtables = kxld_symtab_iterator_get_num_remaining(&iter) * 2;
1120 }
1121
1122 /* Allocate the array of vtable objects.
1123 */
1124 rval = kxld_array_init(&kext->vtables, sizeof(KXLDVTable), nvtables);
1125 require_noerr(rval, finish);
1126
1127 /* Initialize from each vtable symbol */
1128 while ((sym = kxld_symtab_iterator_get_next(&iter))) {
1129
1130 if (kext->link_type == KXLD_LINK_KERNEL) {
1131 vtable_sym = sym;
1132 } else {
1133 /* Get the class name from the smc pointer */
1134 rval = kxld_sym_get_class_name_from_super_metaclass_pointer(
1135 sym, class_name, sizeof(class_name));
1136 require_noerr(rval, finish);
1137
1138 /* Get the vtable name from the class name */
1139 rval = kxld_sym_get_vtable_name_from_class_name(class_name,
1140 vtable_name, sizeof(vtable_name));
1141 require_noerr(rval, finish);
1142
1143 /* Get the vtable symbol */
1144 vtable_sym = kxld_symtab_get_symbol_by_name(kext->symtab, vtable_name);
1145 require_action(vtable_sym, finish, rval=KERN_FAILURE;
1146 kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMissingVtable,
1147 vtable_name, class_name));
1148
1149 /* Get the meta vtable name from the class name */
1150 rval = kxld_sym_get_meta_vtable_name_from_class_name(class_name,
1151 meta_vtable_name, sizeof(meta_vtable_name));
1152 require_noerr(rval, finish);
1153
1154 /* Get the meta vtable symbol */
1155 meta_vtable_sym = kxld_symtab_get_symbol_by_name(kext->symtab,
1156 meta_vtable_name);
1157 if (!meta_vtable_sym) {
1158 /* If we don't support strict patching and we can't find the vtable,
1159 * log a warning and reduce the expected number of vtables by 1.
1160 */
1161 if (target_supports_strict_patching(kext)) {
1162 kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMissingVtable,
1163 meta_vtable_name, class_name);
1164 rval = KERN_FAILURE;
1165 goto finish;
1166 } else {
1167 kxld_log(kKxldLogPatching, kKxldLogErr,
1168 "Warning: " kKxldLogMissingVtable,
1169 kxld_demangle(meta_vtable_name, &demangled_name1,
1170 &demangled_length1),
1171 kxld_demangle(class_name, &demangled_name2,
1172 &demangled_length2));
1173 kxld_array_resize(&kext->vtables, --nvtables);
1174 }
1175 }
1176 }
1177
1178 /* Get the vtable's section */
1179 vtable_sect = kxld_array_get_item(&kext->sects, vtable_sym->sectnum);
1180 require_action(vtable_sect, finish, rval=KERN_FAILURE);
1181
1182 vtable = kxld_array_get_item(&kext->vtables, i++);
1183
1184 if (kext->link_type == KXLD_LINK_KERNEL) {
1185 /* Initialize the kernel vtable */
1186 rval = kxld_vtable_init_from_kernel_macho(vtable, vtable_sym,
1187 vtable_sect, kext->symtab, &kext->relocator);
1188 require_noerr(rval, finish);
1189 } else {
1190 /* Initialize the class vtable */
1191 if (kext->is_final_image) {
1192 rval = kxld_vtable_init_from_final_macho(vtable, vtable_sym,
1193 vtable_sect, kext->symtab, &kext->relocator, &kext->extrelocs);
1194 require_noerr(rval, finish);
1195 } else {
1196 rval = kxld_vtable_init_from_object_macho(vtable, vtable_sym,
1197 vtable_sect, kext->symtab, &kext->relocator);
1198 require_noerr(rval, finish);
1199 }
1200
1201 /* meta_vtable_sym will be null when we don't support strict patching
1202 * and can't find the metaclass vtable.
1203 */
1204 if (meta_vtable_sym) {
1205 /* Get the vtable's section */
1206 meta_vtable_sect = kxld_array_get_item(&kext->sects,
1207 meta_vtable_sym->sectnum);
1208 require_action(vtable_sect, finish, rval=KERN_FAILURE);
1209
1210 meta_vtable = kxld_array_get_item(&kext->vtables, i++);
1211
1212 /* Initialize the metaclass vtable */
1213 if (kext->is_final_image) {
1214 rval = kxld_vtable_init_from_final_macho(meta_vtable, meta_vtable_sym,
1215 meta_vtable_sect, kext->symtab, &kext->relocator, &kext->extrelocs);
1216 require_noerr(rval, finish);
1217 } else {
1218 rval = kxld_vtable_init_from_object_macho(meta_vtable, meta_vtable_sym,
1219 meta_vtable_sect, kext->symtab, &kext->relocator);
1220 require_noerr(rval, finish);
1221 }
1222 }
1223 }
1224 }
1225 require_action(i == kext->vtables.nitems, finish,
1226 rval=KERN_FAILURE);
1227
1228 /* Map vtable names to the vtable structures */
1229 rval = kxld_dict_init(&kext->vtable_index, kxld_dict_string_hash,
1230 kxld_dict_string_cmp, kext->vtables.nitems);
1231 require_noerr(rval, finish);
1232
1233 for (i = 0; i < kext->vtables.nitems; ++i) {
1234 vtable = kxld_array_get_item(&kext->vtables, i);
1235 rval = kxld_dict_insert(&kext->vtable_index, vtable->name, vtable);
1236 require_noerr(rval, finish);
1237 }
1238
1239 rval = KERN_SUCCESS;
1240
1241 finish:
1242
1243 if (demangled_name1) kxld_free(demangled_name1, demangled_length1);
1244 if (demangled_name2) kxld_free(demangled_name2, demangled_length2);
1245
1246 return rval;
1247 }
1248
1249 /*******************************************************************************
1250 * Temporary workaround for PR-6668105
1251 * new, new[], delete, and delete[] may be overridden globally in a kext.
1252 * We should do this with some sort of weak symbols, but we'll use a whitelist
1253 * for now to minimize risk.
1254 *******************************************************************************/
1255 static void
1256 restrict_private_symbols(KXLDKext *kext)
1257 {
1258 const char *private_symbols[] = {
1259 KXLD_KMOD_INFO_SYMBOL,
1260 KXLD_OPERATOR_NEW_SYMBOL,
1261 KXLD_OPERATOR_NEW_ARRAY_SYMBOL,
1262 KXLD_OPERATOR_DELETE_SYMBOL,
1263 KXLD_OPERATOR_DELETE_ARRAY_SYMBOL
1264 };
1265 KXLDSymtabIterator iter;
1266 KXLDSym *sym = NULL;
1267 const char *name = NULL;
1268 u_int i = 0;
1269
1270 kxld_symtab_iterator_init(&iter, kext->symtab, kxld_sym_is_exported, FALSE);
1271 while ((sym = kxld_symtab_iterator_get_next(&iter))) {
1272 for (i = 0; i < const_array_len(private_symbols); ++i) {
1273 name = private_symbols[i];
1274 if (!streq(sym->name, name)) {
1275 continue;
1276 }
1277
1278 kxld_sym_mark_private(sym);
1279 }
1280 }
1281 }
1282
1283 /*******************************************************************************
1284 *******************************************************************************/
1285 void
1286 kxld_kext_clear(KXLDKext *kext)
1287 {
1288 KXLDSeg *seg = NULL;
1289 KXLDSect *sect = NULL;
1290 KXLDVTable *vtable = NULL;
1291 u_int i;
1292
1293 check(kext);
1294
1295 #if !KERNEL
1296 if (kext->link_type == KXLD_LINK_KERNEL) {
1297 unswap_macho(kext->file, kext->host_order, kext->target_order);
1298 }
1299 #endif /* !KERNEL */
1300
1301 for (i = 0; i < kext->segs.nitems; ++i) {
1302 seg = kxld_array_get_item(&kext->segs, i);
1303 kxld_seg_clear(seg);
1304 }
1305 kxld_array_reset(&kext->segs);
1306
1307 for (i = 0; i < kext->sects.nitems; ++i) {
1308 sect = kxld_array_get_item(&kext->sects, i);
1309 kxld_sect_clear(sect);
1310 }
1311 kxld_array_reset(&kext->sects);
1312
1313 for (i = 0; i < kext->vtables.nitems; ++i) {
1314 vtable = kxld_array_get_item(&kext->vtables, i);
1315 kxld_vtable_clear(vtable);
1316 }
1317 kxld_array_reset(&kext->vtables);
1318
1319 kxld_array_reset(&kext->extrelocs);
1320 kxld_array_reset(&kext->locrelocs);
1321 kxld_dict_clear(&kext->vtable_index);
1322 kxld_relocator_clear(&kext->relocator);
1323 kxld_uuid_clear(&kext->uuid);
1324
1325 if (kext->symtab) kxld_symtab_clear(kext->symtab);
1326
1327 kext->link_addr = 0;
1328 kext->kmod_link_addr = 0;
1329 kext->cputype = 0;
1330 kext->cpusubtype = 0;
1331 kext->link_type = KXLD_LINK_UNKNOWN;
1332 kext->is_final_image = FALSE;
1333 kext->got_is_created = FALSE;
1334 }
1335
1336
1337
1338 /*******************************************************************************
1339 *******************************************************************************/
1340 void
1341 kxld_kext_deinit(KXLDKext *kext)
1342 {
1343 KXLDSeg *seg = NULL;
1344 KXLDSect *sect = NULL;
1345 KXLDVTable *vtable = NULL;
1346 u_int i;
1347
1348 check(kext);
1349
1350 #if !KERNEL
1351 if (kext->link_type == KXLD_LINK_KERNEL) {
1352 unswap_macho(kext->file, kext->host_order, kext->target_order);
1353 }
1354 #endif /* !KERNEL */
1355
1356 for (i = 0; i < kext->segs.maxitems; ++i) {
1357 seg = kxld_array_get_slot(&kext->segs, i);
1358 kxld_seg_deinit(seg);
1359 }
1360 kxld_array_deinit(&kext->segs);
1361
1362 for (i = 0; i < kext->sects.maxitems; ++i) {
1363 sect = kxld_array_get_slot(&kext->sects, i);
1364 kxld_sect_deinit(sect);
1365 }
1366 kxld_array_deinit(&kext->sects);
1367
1368 for (i = 0; i < kext->vtables.maxitems; ++i) {
1369 vtable = kxld_array_get_slot(&kext->vtables, i);
1370 kxld_vtable_deinit(vtable);
1371 }
1372 kxld_array_deinit(&kext->vtables);
1373
1374 kxld_array_deinit(&kext->extrelocs);
1375 kxld_array_deinit(&kext->locrelocs);
1376 kxld_dict_deinit(&kext->vtable_index);
1377
1378 if (kext->symtab) {
1379 kxld_symtab_deinit(kext->symtab);
1380 kxld_free(kext->symtab, kxld_symtab_sizeof());
1381 }
1382
1383 bzero(kext, sizeof(*kext));
1384 }
1385
1386 /*******************************************************************************
1387 *******************************************************************************/
1388 boolean_t
1389 kxld_kext_is_true_kext(const KXLDKext *kext)
1390 {
1391 return (kext->link_type == KXLD_LINK_KEXT);
1392 }
1393
1394 /*******************************************************************************
1395 *******************************************************************************/
1396 void
1397 kxld_kext_get_vmsize(const KXLDKext *kext, u_long *header_size, u_long *vmsize)
1398 {
1399 check(kext);
1400 check(header_size);
1401 check(vmsize);
1402 *header_size = 0;
1403 *vmsize = 0;
1404
1405 /* vmsize is the padded header page(s) + segment vmsizes */
1406
1407 *header_size = (kext->is_final_image) ?
1408 0 : round_page(get_macho_header_size(kext));
1409 *vmsize = *header_size + get_macho_data_size(kext);
1410
1411 }
1412
1413 /*******************************************************************************
1414 *******************************************************************************/
1415 const struct kxld_symtab *
1416 kxld_kext_get_symtab(const KXLDKext *kext)
1417 {
1418 check(kext);
1419
1420 return kext->symtab;
1421 }
1422
1423 /*******************************************************************************
1424 *******************************************************************************/
1425 u_int
1426 kxld_kext_get_num_symbols(const KXLDKext *kext)
1427 {
1428 check(kext);
1429
1430 return kxld_symtab_get_num_symbols(kext->symtab);
1431 }
1432
1433 /*******************************************************************************
1434 *******************************************************************************/
1435 void
1436 kxld_kext_get_vtables(KXLDKext *kext, const KXLDArray **vtables)
1437 {
1438 check(kext);
1439 check(vtables);
1440
1441 *vtables = &kext->vtables;
1442 }
1443
1444 /*******************************************************************************
1445 *******************************************************************************/
1446 u_int
1447 kxld_kext_get_num_vtables(const KXLDKext *kext)
1448 {
1449 check(kext);
1450
1451 return kext->vtables.nitems;
1452 }
1453
1454 /*******************************************************************************
1455 *******************************************************************************/
1456 KXLDSeg *
1457 kxld_kext_get_seg_by_name(const KXLDKext *kext, const char *segname)
1458 {
1459 KXLDSeg *seg = NULL;
1460 u_int i = 0;
1461
1462 for (i = 0; i < kext->segs.nitems; ++i) {
1463 seg = kxld_array_get_item(&kext->segs, i);
1464
1465 if (streq(segname, seg->segname)) break;
1466
1467 seg = NULL;
1468 }
1469
1470 return seg;
1471 }
1472
1473 /*******************************************************************************
1474 *******************************************************************************/
1475 KXLDSect *
1476 kxld_kext_get_sect_by_name(const KXLDKext *kext, const char *segname,
1477 const char *sectname)
1478 {
1479 KXLDSect *sect = NULL;
1480 u_int i = 0;
1481
1482 for (i = 0; i < kext->sects.nitems; ++i) {
1483 sect = kxld_array_get_item(&kext->sects, i);
1484
1485 if (streq(segname, sect->segname) && streq(sectname, sect->sectname)) {
1486 break;
1487 }
1488
1489 sect = NULL;
1490 }
1491
1492 return sect;
1493 }
1494
1495 /*******************************************************************************
1496 *******************************************************************************/
1497 int
1498 kxld_kext_get_sectnum_for_sect(const KXLDKext *kext, const KXLDSect *sect)
1499 {
1500 kern_return_t rval = KERN_FAILURE;
1501 u_int idx = -1;
1502
1503 rval = kxld_array_get_index(&kext->sects, sect, &idx);
1504 if (rval) idx = -1;
1505
1506 return idx;
1507 }
1508
1509 /*******************************************************************************
1510 *******************************************************************************/
1511 const KXLDArray *
1512 kxld_kext_get_section_order(const KXLDKext *kext __unused)
1513 {
1514 #if KXLD_USER_OR_OBJECT
1515 if (kext->link_type == KXLD_LINK_KERNEL && target_supports_object(kext)) {
1516 return kext->section_order;
1517 }
1518 #endif /* KXLD_USER_OR_OBJECT */
1519
1520 return NULL;
1521 }
1522
1523 /*******************************************************************************
1524 *******************************************************************************/
1525 static u_long
1526 get_macho_header_size(const KXLDKext *kext)
1527 {
1528 KXLDSeg *seg = NULL;
1529 u_long header_size = 0;
1530 u_int i = 0;
1531
1532 check(kext);
1533
1534 /* Mach, segment, and UUID headers */
1535
1536 if (kxld_kext_is_32_bit(kext)) {
1537 header_size += sizeof(struct mach_header);
1538 } else {
1539 header_size += sizeof(struct mach_header_64);
1540 }
1541
1542 for (i = 0; i < kext->segs.nitems; ++i) {
1543 seg = kxld_array_get_item(&kext->segs, i);
1544 header_size += kxld_seg_get_macho_header_size(seg, kxld_kext_is_32_bit(kext));
1545 }
1546
1547 if (kext->uuid.has_uuid) {
1548 header_size += kxld_uuid_get_macho_header_size();
1549 }
1550
1551 return header_size;
1552 }
1553
1554 /*******************************************************************************
1555 *******************************************************************************/
1556 static u_long
1557 get_macho_data_size(const KXLDKext *kext)
1558 {
1559 KXLDSeg *seg = NULL;
1560 u_long data_size = 0;
1561 u_int i = 0;
1562
1563 check(kext);
1564
1565 for (i = 0; i < kext->segs.nitems; ++i) {
1566 seg = kxld_array_get_item(&kext->segs, i);
1567 data_size += (u_long) kxld_seg_get_vmsize(seg);
1568 }
1569
1570 return data_size;
1571 }
1572
1573 /*******************************************************************************
1574 *******************************************************************************/
1575 kern_return_t kxld_kext_export_linked_object(const KXLDKext *kext,
1576 u_char *linked_object, kxld_addr_t *kmod_info_kern)
1577 {
1578 kern_return_t rval = KERN_FAILURE;
1579 KXLDSeg *seg = NULL;
1580 u_long size = 0;
1581 u_long header_size = 0;
1582 u_long header_offset = 0;
1583 u_long data_offset = 0;
1584 u_int ncmds = 0;
1585 u_int i = 0;
1586
1587 check(kext);
1588 check(linked_object);
1589 check(kmod_info_kern);
1590 *kmod_info_kern = 0;
1591
1592 /* Calculate the size of the headers and data */
1593
1594 header_size = get_macho_header_size(kext);
1595 data_offset = (kext->is_final_image) ? header_size : round_page(header_size);
1596 size = data_offset + get_macho_data_size(kext);
1597
1598 /* Copy data to the file */
1599
1600 ncmds = kext->segs.nitems + (kext->uuid.has_uuid == TRUE);
1601
1602 rval = export_macho_header(kext, linked_object, ncmds,
1603 &header_offset, header_size);
1604 require_noerr(rval, finish);
1605
1606 for (i = 0; i < kext->segs.nitems; ++i) {
1607 seg = kxld_array_get_item(&kext->segs, i);
1608
1609 rval = kxld_seg_export_macho_to_vm(seg, linked_object, &header_offset,
1610 header_size, size, kext->link_addr, kxld_kext_is_32_bit(kext));
1611 require_noerr(rval, finish);
1612 }
1613
1614 if (kext->uuid.has_uuid) {
1615 rval = kxld_uuid_export_macho(&kext->uuid, linked_object,
1616 &header_offset, header_size);
1617 require_noerr(rval, finish);
1618 }
1619
1620 *kmod_info_kern = kext->kmod_link_addr;
1621
1622 #if !KERNEL
1623 unswap_macho(linked_object, kext->host_order, kext->target_order);
1624 #endif /* KERNEL */
1625
1626 rval = KERN_SUCCESS;
1627
1628 finish:
1629 return rval;
1630 }
1631
1632 #if !KERNEL
1633 /*******************************************************************************
1634 *******************************************************************************/
1635 kern_return_t
1636 kxld_kext_export_symbol_file(const KXLDKext *kext,
1637 u_char **_symbol_file, u_long *_filesize)
1638 {
1639 kern_return_t rval = KERN_FAILURE;
1640 KXLDSeg *seg = NULL;
1641 u_char *file = NULL;
1642 u_long size = 0;
1643 u_long header_size = 0;
1644 u_long header_offset = 0;
1645 u_long data_offset = 0;
1646 u_int ncmds = 0;
1647 u_int i = 0;
1648
1649 check(kext);
1650 check(_symbol_file);
1651 *_symbol_file = NULL;
1652
1653 /* Calculate the size of the file */
1654
1655 if (kxld_kext_is_32_bit(kext)) {
1656 header_size += sizeof(struct mach_header);
1657 } else {
1658 header_size += sizeof(struct mach_header_64);
1659 }
1660
1661 for (i = 0; i < kext->segs.nitems; ++i) {
1662 seg = kxld_array_get_item(&kext->segs, i);
1663 header_size += kxld_seg_get_macho_header_size(seg, kxld_kext_is_32_bit(kext));
1664 size += kxld_seg_get_macho_data_size(seg);
1665 }
1666
1667 header_size += kxld_symtab_get_macho_header_size();
1668 size += kxld_symtab_get_macho_data_size(kext->symtab, FALSE,
1669 kxld_kext_is_32_bit(kext));
1670
1671 if (kext->uuid.has_uuid) {
1672 header_size += kxld_uuid_get_macho_header_size();
1673 }
1674
1675 data_offset = round_page(header_size);
1676 size += data_offset;
1677
1678 /* Allocate the symbol file */
1679
1680 file = kxld_page_alloc_untracked(size);
1681 require_action(file, finish, rval=KERN_RESOURCE_SHORTAGE);
1682 bzero(file, size);
1683
1684 /* Copy data to the file */
1685
1686 ncmds = kext->segs.nitems + (kext->uuid.has_uuid == TRUE) + 1; /* +1 for symtab */
1687 rval = export_macho_header(kext, file, ncmds, &header_offset, header_size);
1688 require_noerr(rval, finish);
1689
1690 for (i = 0; i < kext->segs.nitems; ++i) {
1691 seg = kxld_array_get_item(&kext->segs, i);
1692 rval = kxld_seg_export_macho_to_file_buffer(seg, file, &header_offset,
1693 header_size, &data_offset, size, kxld_kext_is_32_bit(kext));
1694 require_noerr(rval, finish);
1695 }
1696
1697 rval = kxld_symtab_export_macho(kext->symtab, file, &header_offset,
1698 header_size, &data_offset, size, FALSE, kxld_kext_is_32_bit(kext));
1699 require_noerr(rval, finish);
1700
1701 if (kext->uuid.has_uuid) {
1702 rval = kxld_uuid_export_macho(&kext->uuid, file, &header_offset,
1703 header_size);
1704 require_noerr(rval, finish);
1705 }
1706
1707 header_offset = header_size;
1708
1709 /* Commit */
1710
1711 unswap_macho(file, kext->host_order, kext->target_order);
1712
1713 *_filesize = size;
1714 *_symbol_file = file;
1715 file = NULL;
1716 rval = KERN_SUCCESS;
1717
1718 finish:
1719
1720 if (file) {
1721 kxld_page_free_untracked(file, size);
1722 file = NULL;
1723 }
1724
1725 check(!file);
1726 check((!rval) ^ (!*_symbol_file));
1727
1728 return rval;
1729 }
1730 #endif
1731
1732 /*******************************************************************************
1733 *******************************************************************************/
1734 boolean_t
1735 kxld_kext_target_needs_swap(const KXLDKext *kext __unused)
1736 {
1737 #if KERNEL
1738 return FALSE;
1739 #else
1740 return (kext->target_order != kext->host_order);
1741 #endif /* KERNEL */
1742 }
1743
1744 /*******************************************************************************
1745 *******************************************************************************/
1746 static kern_return_t
1747 export_macho_header(const KXLDKext *kext, u_char *buf, u_int ncmds,
1748 u_long *header_offset, u_long header_size)
1749 {
1750 kern_return_t rval = KERN_FAILURE;
1751
1752 check(kext);
1753 check(buf);
1754 check(header_offset);
1755
1756 KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), rval,
1757 export_macho_header_32, export_macho_header_64,
1758 kext, buf, ncmds, header_offset, header_size);
1759 require_noerr(rval, finish);
1760
1761 rval = KERN_SUCCESS;
1762
1763 finish:
1764 return rval;
1765 }
1766
1767 #if KXLD_USER_OR_ILP32
1768 /*******************************************************************************
1769 *******************************************************************************/
1770 static kern_return_t
1771 export_macho_header_32(const KXLDKext *kext, u_char *buf, u_int ncmds,
1772 u_long *header_offset, u_long header_size)
1773 {
1774 kern_return_t rval = KERN_FAILURE;
1775 struct mach_header *mach = NULL;
1776
1777 check(kext);
1778 check(buf);
1779 check(header_offset);
1780
1781 require_action(sizeof(*mach) <= header_size - *header_offset, finish,
1782 rval=KERN_FAILURE);
1783 mach = (struct mach_header *) (buf + *header_offset);
1784
1785 mach->magic = MH_MAGIC;
1786 mach->cputype = kext->cputype;
1787 mach->filetype = kext->filetype;
1788 mach->ncmds = ncmds;
1789 mach->sizeofcmds = (uint32_t) (header_size - sizeof(*mach));
1790 mach->flags = MH_NOUNDEFS;
1791
1792 *header_offset += sizeof(*mach);
1793
1794 rval = KERN_SUCCESS;
1795
1796 finish:
1797 return rval;
1798 }
1799 #endif /* KXLD_USER_OR_ILP32 */
1800
1801 #if KXLD_USER_OR_LP64
1802 /*******************************************************************************
1803 *******************************************************************************/
1804 static kern_return_t
1805 export_macho_header_64(const KXLDKext *kext, u_char *buf, u_int ncmds,
1806 u_long *header_offset, u_long header_size)
1807 {
1808 kern_return_t rval = KERN_FAILURE;
1809 struct mach_header_64 *mach = NULL;
1810
1811 check(kext);
1812 check(buf);
1813 check(header_offset);
1814
1815 require_action(sizeof(*mach) <= header_size - *header_offset, finish,
1816 rval=KERN_FAILURE);
1817 mach = (struct mach_header_64 *) (buf + *header_offset);
1818
1819 mach->magic = MH_MAGIC_64;
1820 mach->cputype = kext->cputype;
1821 mach->cpusubtype = kext->cpusubtype;
1822 mach->filetype = kext->filetype;
1823 mach->ncmds = ncmds;
1824 mach->sizeofcmds = (uint32_t) (header_size - sizeof(*mach));
1825 mach->flags = MH_NOUNDEFS;
1826
1827 *header_offset += sizeof(*mach);
1828
1829 rval = KERN_SUCCESS;
1830
1831 finish:
1832 return rval;
1833 }
1834 #endif /* KXLD_USER_OR_LP64 */
1835
1836 /*******************************************************************************
1837 *******************************************************************************/
1838 kern_return_t
1839 kxld_kext_resolve(KXLDKext *kext, struct kxld_dict *patched_vtables,
1840 struct kxld_dict *defined_symbols)
1841 {
1842 kern_return_t rval = KERN_FAILURE;
1843
1844 require_action(kext->link_type == KXLD_LINK_PSEUDO_KEXT, finish,
1845 rval=KERN_FAILURE);
1846
1847 /* Resolve symbols */
1848 rval = resolve_symbols(kext, defined_symbols, NULL);
1849 require_noerr(rval, finish);
1850
1851 /* Validate symbols */
1852 rval = validate_symbols(kext);
1853 require_noerr(rval, finish);
1854
1855 /* Pseudokexts re-export their dependencies' vtables */
1856 rval = copy_vtables(kext, patched_vtables);
1857 require_noerr(rval, finish);
1858
1859 rval = KERN_SUCCESS;
1860
1861 finish:
1862 return rval;
1863 }
1864
1865 /*******************************************************************************
1866 *******************************************************************************/
1867 kern_return_t
1868 kxld_kext_relocate(KXLDKext *kext, kxld_addr_t link_address,
1869 KXLDDict *patched_vtables, KXLDDict *defined_symbols,
1870 KXLDDict *obsolete_symbols)
1871 {
1872 kern_return_t rval = KERN_FAILURE;
1873 KXLDSeg *seg = NULL;
1874 u_int i = 0;
1875
1876 check(kext);
1877 check(patched_vtables);
1878 check(defined_symbols);
1879
1880 require_action(kext->link_type == KXLD_LINK_KEXT, finish, rval=KERN_FAILURE);
1881
1882 kext->link_addr = link_address;
1883
1884 /* Relocate segments (which relocates the sections) */
1885 for (i = 0; i < kext->segs.nitems; ++i) {
1886 seg = kxld_array_get_item(&kext->segs, i);
1887 kxld_seg_relocate(seg, link_address);
1888 }
1889
1890 /* Relocate symbols */
1891 rval = kxld_symtab_relocate(kext->symtab, &kext->sects);
1892 require_noerr(rval, finish);
1893
1894 /* Populate kmod info structure */
1895 rval = populate_kmod_info(kext);
1896 require_noerr(rval, finish);
1897
1898 /* Resolve symbols */
1899 rval = resolve_symbols(kext, defined_symbols, obsolete_symbols);
1900 require_noerr(rval, finish);
1901
1902 /* Patch vtables */
1903 rval = patch_vtables(kext, patched_vtables, defined_symbols);
1904 require_noerr(rval, finish);
1905
1906 /* Validate symbols */
1907 rval = validate_symbols(kext);
1908 require_noerr(rval, finish);
1909
1910 /* Process relocation entries and populate the global offset table.
1911 *
1912 * For final linked images: the relocation entries are contained in a couple
1913 * of tables hanging off the end of the symbol table. The GOT has its own
1914 * section created by the linker; we simply need to fill it.
1915 *
1916 * For object files: the relocation entries are bound to each section.
1917 * The GOT, if it exists for the target architecture, is created by kxld,
1918 * and we must populate it according to our internal structures.
1919 */
1920 if (kext->is_final_image) {
1921 #if KXLD_USER_OR_BUNDLE
1922 rval = process_symbol_pointers(kext);
1923 require_noerr(rval, finish);
1924
1925 rval = process_relocs_from_tables(kext);
1926 require_noerr(rval, finish);
1927 #else
1928 require_action(FALSE, finish, rval=KERN_FAILURE);
1929 #endif /* KXLD_USER_OR_BUNDLE */
1930 } else {
1931 #if KXLD_USER_OR_GOT
1932 /* Populate GOT */
1933 rval = populate_got(kext);
1934 require_noerr(rval, finish);
1935 #endif /* KXLD_USER_OR_GOT */
1936 #if KXLD_USER_OR_OBJECT
1937 rval = process_relocs_from_sections(kext);
1938 require_noerr(rval, finish);
1939 #else
1940 require_action(FALSE, finish, rval=KERN_FAILURE);
1941 #endif /* KXLD_USER_OR_OBJECT */
1942 }
1943
1944 rval = KERN_SUCCESS;
1945
1946 finish:
1947 return rval;
1948 }
1949
1950 /*******************************************************************************
1951 *******************************************************************************/
1952 static kern_return_t
1953 resolve_symbols(KXLDKext *kext, KXLDDict *defined_symbols,
1954 KXLDDict *obsolete_symbols)
1955 {
1956 kern_return_t rval = KERN_FAILURE;
1957 KXLDSymtabIterator iter;
1958 KXLDSym *sym = NULL;
1959 void *addrp = NULL;
1960 kxld_addr_t addr = 0;
1961 const char *name = NULL;
1962 boolean_t tests_for_weak = FALSE;
1963 boolean_t error = FALSE;
1964 boolean_t warning = FALSE;
1965 char *demangled_name = NULL;
1966 size_t demangled_length = 0;
1967
1968 check(kext);
1969 check(defined_symbols);
1970
1971 /* Check if the kext tests for weak symbols */
1972 sym = kxld_symtab_get_symbol_by_name(kext->symtab, KXLD_WEAK_TEST_SYMBOL);
1973 tests_for_weak = (sym != NULL);
1974
1975 /* Check for duplicate symbols */
1976 kxld_symtab_iterator_init(&iter, kext->symtab, kxld_sym_is_exported, FALSE);
1977 while ((sym = kxld_symtab_iterator_get_next(&iter))) {
1978 addrp = kxld_dict_find(defined_symbols, sym->name);
1979 if (addrp) {
1980 /* Convert to a kxld_addr_t */
1981 if (kxld_kext_is_32_bit(kext)) {
1982 addr = (kxld_addr_t) (*(uint32_t*)addrp);
1983 } else {
1984 addr = (kxld_addr_t) (*(uint64_t*)addrp);
1985 }
1986
1987 /* Not a problem if the symbols have the same address */
1988 if (addr == sym->link_addr) {
1989 continue;
1990 }
1991
1992 if (!error) {
1993 error = TRUE;
1994 kxld_log(kKxldLogLinking, kKxldLogErr,
1995 "The following symbols were defined more than once:");
1996 }
1997
1998 kxld_log(kKxldLogLinking, kKxldLogErr, "\t%s: %p - %p",
1999 kxld_demangle(sym->name, &demangled_name, &demangled_length),
2000 (void *) (uintptr_t) sym->link_addr,
2001 (void *) (uintptr_t) addr);
2002 }
2003 }
2004 require_noerr_action(error, finish, rval=KERN_FAILURE);
2005
2006 /* Resolve undefined and indirect symbols */
2007
2008 /* Iterate over all unresolved symbols */
2009 kxld_symtab_iterator_init(&iter, kext->symtab,
2010 kxld_sym_is_unresolved, FALSE);
2011 while ((sym = kxld_symtab_iterator_get_next(&iter))) {
2012
2013 /* Common symbols are not supported */
2014 if (kxld_sym_is_common(sym)) {
2015
2016 if (!error) {
2017 error = TRUE;
2018 if (target_supports_common(kext)) {
2019 kxld_log(kKxldLogLinking, kKxldLogErr,
2020 "The following common symbols were not resolved:");
2021 } else {
2022 kxld_log(kKxldLogLinking, kKxldLogErr,
2023 "Common symbols are not supported in kernel extensions. "
2024 "Use -fno-common to build your kext. "
2025 "The following are common symbols:");
2026 }
2027 }
2028 kxld_log(kKxldLogLinking, kKxldLogErr, "\t%s",
2029 kxld_demangle(sym->name, &demangled_name, &demangled_length));
2030
2031 } else {
2032
2033 /* Find the address of the defined symbol */
2034 if (kxld_sym_is_undefined(sym)) {
2035 name = sym->name;
2036 } else {
2037 name = sym->alias;
2038 }
2039 addrp = kxld_dict_find(defined_symbols, name);
2040
2041 /* Resolve the symbol. If a definition cannot be found, then:
2042 * 1) Psuedokexts log a warning and proceed
2043 * 2) Actual kexts delay the error until validation in case vtable
2044 * patching replaces the undefined symbol.
2045 */
2046
2047 if (addrp) {
2048
2049 /* Convert to a kxld_addr_t */
2050 if (kxld_kext_is_32_bit(kext)) {
2051 addr = (kxld_addr_t) (*(uint32_t*)addrp);
2052 } else {
2053 addr = (kxld_addr_t) (*(uint64_t*)addrp);
2054 }
2055
2056 boolean_t is_exported = (kext->link_type == KXLD_LINK_PSEUDO_KEXT);
2057
2058 rval = kxld_sym_resolve(sym, addr, is_exported);
2059 require_noerr(rval, finish);
2060
2061 if (obsolete_symbols && kxld_dict_find(obsolete_symbols, name)) {
2062 kxld_log(kKxldLogLinking, kKxldLogWarn,
2063 "This kext uses obsolete symbol %s.",
2064 kxld_demangle(name, &demangled_name, &demangled_length));
2065 }
2066
2067 } else if (kext->link_type == KXLD_LINK_PSEUDO_KEXT) {
2068 /* Pseudokexts ignore undefined symbols, because any actual
2069 * kexts that need those symbols will fail to link anyway, so
2070 * there's no need to block well-behaved kexts.
2071 */
2072 if (!warning) {
2073 kxld_log(kKxldLogLinking, kKxldLogWarn,
2074 "This symbol set has the following unresolved symbols:");
2075 warning = TRUE;
2076 }
2077 kxld_log(kKxldLogLinking, kKxldLogErr, "\t%s",
2078 kxld_demangle(sym->name, &demangled_name, &demangled_length));
2079 kxld_sym_delete(sym);
2080
2081 } else if (kxld_sym_is_weak(sym)) {
2082 /* Make sure that the kext has referenced gOSKextUnresolved.
2083 */
2084 require_action(tests_for_weak, finish,
2085 rval=KERN_FAILURE;
2086 kxld_log(kKxldLogLinking, kKxldLogErr,
2087 "This kext has weak references but does not test for "
2088 "them. Test for weak references with "
2089 "OSKextIsSymbolResolved()."));
2090
2091 #if KERNEL
2092 /* Get the address of the default weak address.
2093 */
2094 addr = (kxld_addr_t) &kext_weak_symbol_referenced;
2095 #else
2096 /* This is run during symbol generation only, so we only
2097 * need a filler value here.
2098 */
2099 addr = kext->link_addr;
2100 #endif /* KERNEL */
2101
2102 rval = kxld_sym_resolve(sym, addr, /* exported */ FALSE);
2103 require_noerr(rval, finish);
2104 }
2105 }
2106 }
2107 require_noerr_action(error, finish, rval=KERN_FAILURE);
2108
2109 rval = KERN_SUCCESS;
2110
2111 finish:
2112 if (demangled_name) kxld_free(demangled_name, demangled_length);
2113
2114 return rval;
2115 }
2116
2117 /*******************************************************************************
2118 *******************************************************************************/
2119 static boolean_t
2120 target_supports_strict_patching(KXLDKext *kext)
2121 {
2122 check(kext);
2123
2124 return (kext->cputype != CPU_TYPE_I386 &&
2125 kext->cputype != CPU_TYPE_POWERPC);
2126 }
2127
2128 /*******************************************************************************
2129 * We must patch vtables to ensure binary compatibility, and to perform that
2130 * patching, we have to determine the vtables' inheritance relationships. The
2131 * MetaClass system gives us a way to do that:
2132 * 1) Iterate over all of the super MetaClass pointer symbols. Every class
2133 * that inherits from OSObject will have a pointer in its MetaClass that
2134 * points to the MetaClass's super MetaClass.
2135 * 2) Derive the name of the class from the super MetaClass pointer.
2136 * 3) Derive the name of the class's vtable from the name of the class
2137 * 4) Follow the super MetaClass pointer to get the address of the super
2138 * MetaClass's symbol
2139 * 5) Look up the super MetaClass symbol by address
2140 * 6) Derive the super class's name from the super MetaClass name
2141 * 7) Derive the super class's vtable from the super class's name
2142 * This procedure will allow us to find all of the OSObject-derived classes and
2143 * their super classes, and thus patch all of the vtables.
2144 *
2145 * We also have to take care to patch up the MetaClass's vtables. The
2146 * MetaClasses follow a parallel hierarchy to the classes, so once we have the
2147 * class name and super class name, we can also derive the MetaClass name and
2148 * the super MetaClass name, and thus find and patch their vtables as well.
2149 *******************************************************************************/
2150
2151 #define kOSMetaClassVTableName "__ZTV11OSMetaClass"
2152
2153 static kern_return_t
2154 patch_vtables(KXLDKext *kext, KXLDDict *patched_vtables,
2155 KXLDDict *defined_symbols)
2156 {
2157 kern_return_t rval = KERN_FAILURE;
2158 KXLDSymtabIterator iter;
2159 KXLDSym *metaclass = NULL;
2160 KXLDSym *super_metaclass_pointer = NULL;
2161 KXLDSym *final_sym = NULL;
2162 KXLDVTable *vtable = NULL;
2163 KXLDVTable *super_vtable = NULL;
2164 char class_name[KXLD_MAX_NAME_LEN];
2165 char super_class_name[KXLD_MAX_NAME_LEN];
2166 char vtable_name[KXLD_MAX_NAME_LEN];
2167 char super_vtable_name[KXLD_MAX_NAME_LEN];
2168 char final_sym_name[KXLD_MAX_NAME_LEN];
2169 char *demangled_name1 = NULL;
2170 char *demangled_name2 = NULL;
2171 size_t demangled_length1 = 0;;
2172 size_t demangled_length2 = 0;
2173 size_t len = 0;
2174 u_int nvtables = 0;
2175 u_int npatched = 0;
2176 u_int nprogress = 0;
2177 boolean_t failure = FALSE;
2178
2179 check(kext);
2180 check(patched_vtables);
2181
2182 /* Find each super meta class pointer symbol */
2183
2184 kxld_symtab_iterator_init(&iter, kext->symtab,
2185 kxld_sym_is_super_metaclass_pointer, FALSE);
2186 nvtables = kxld_symtab_iterator_get_num_remaining(&iter);
2187
2188 while (npatched < nvtables) {
2189 npatched = 0;
2190 nprogress = 0;
2191 kxld_symtab_iterator_reset(&iter);
2192 while((super_metaclass_pointer = kxld_symtab_iterator_get_next(&iter)))
2193 {
2194 /* Get the class name from the smc pointer */
2195 rval = kxld_sym_get_class_name_from_super_metaclass_pointer(
2196 super_metaclass_pointer, class_name, sizeof(class_name));
2197 require_noerr(rval, finish);
2198
2199 /* Get the vtable name from the class name */
2200 rval = kxld_sym_get_vtable_name_from_class_name(class_name,
2201 vtable_name, sizeof(vtable_name));
2202 require_noerr(rval, finish);
2203
2204 /* Get the vtable and make sure it hasn't been patched */
2205 vtable = kxld_dict_find(&kext->vtable_index, vtable_name);
2206 require_action(vtable, finish, rval=KERN_FAILURE;
2207 kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMissingVtable,
2208 vtable_name, class_name));
2209
2210 if (!vtable->is_patched) {
2211
2212 /* Find the SMCP's meta class symbol */
2213 rval = get_metaclass_symbol_from_super_meta_class_pointer_symbol(
2214 kext, super_metaclass_pointer, &metaclass);
2215 require_noerr(rval, finish);
2216
2217 /* Get the super class name from the super metaclass */
2218 rval = kxld_sym_get_class_name_from_metaclass(metaclass,
2219 super_class_name, sizeof(super_class_name));
2220 require_noerr(rval, finish);
2221
2222 /* Get the super vtable name from the class name */
2223 rval = kxld_sym_get_vtable_name_from_class_name(super_class_name,
2224 super_vtable_name, sizeof(super_vtable_name));
2225 require_noerr(rval, finish);
2226
2227 if (failure) {
2228 kxld_log(kKxldLogPatching, kKxldLogErr,
2229 "\t'%s' (super vtable '%s')",
2230 kxld_demangle(vtable_name, &demangled_name1,
2231 &demangled_length1),
2232 kxld_demangle(super_vtable_name, &demangled_name2,
2233 &demangled_length2));
2234 continue;
2235 }
2236
2237 /* Get the super vtable if it's been patched */
2238 super_vtable = kxld_dict_find(patched_vtables, super_vtable_name);
2239 if (!super_vtable) continue;
2240
2241 /* Get the final symbol's name from the super vtable */
2242 rval = kxld_sym_get_final_sym_name_from_class_name(super_class_name,
2243 final_sym_name, sizeof(final_sym_name));
2244 require_noerr(rval, finish);
2245
2246 /* Verify that the final symbol does not exist. First check
2247 * all the externally defined symbols, then check locally.
2248 */
2249 final_sym = kxld_dict_find(defined_symbols, final_sym_name);
2250 if (!final_sym) {
2251 final_sym = kxld_symtab_get_symbol_by_name(kext->symtab,
2252 final_sym_name);
2253 }
2254 require_action(!final_sym, finish,
2255 rval=KERN_FAILURE;
2256 kxld_log(kKxldLogPatching, kKxldLogErr,
2257 "Class '%s' is a subclass of final class '%s'.",
2258 kxld_demangle(class_name, &demangled_name1,
2259 &demangled_length1),
2260 kxld_demangle(super_class_name, &demangled_name2,
2261 &demangled_length2)));
2262
2263 /* Patch the class's vtable */
2264 rval = kxld_vtable_patch(vtable, super_vtable, kext->symtab,
2265 target_supports_strict_patching(kext));
2266 require_noerr(rval, finish);
2267
2268 /* Add the class's vtable to the set of patched vtables */
2269 rval = kxld_dict_insert(patched_vtables, vtable->name, vtable);
2270 require_noerr(rval, finish);
2271
2272 /* Get the meta vtable name from the class name */
2273 rval = kxld_sym_get_meta_vtable_name_from_class_name(class_name,
2274 vtable_name, sizeof(vtable_name));
2275 require_noerr(rval, finish);
2276
2277 /* Get the meta vtable. Whether or not it should exist has already
2278 * been tested in create_vtables(), so if it doesn't exist and we're
2279 * still running, we can safely skip it.
2280 */
2281 vtable = kxld_dict_find(&kext->vtable_index, vtable_name);
2282 if (!vtable) {
2283 ++nprogress;
2284 ++npatched;
2285 continue;
2286 }
2287 require_action(!vtable->is_patched, finish, rval=KERN_FAILURE);
2288
2289 /* There is no way to look up a metaclass vtable at runtime, but
2290 * we know that every class's metaclass inherits directly from
2291 * OSMetaClass, so we just hardcode that vtable name here.
2292 */
2293 len = strlcpy(super_vtable_name, kOSMetaClassVTableName,
2294 sizeof(super_vtable_name));
2295 require_action(len == const_strlen(kOSMetaClassVTableName),
2296 finish, rval=KERN_FAILURE);
2297
2298 /* Get the super meta vtable */
2299 super_vtable = kxld_dict_find(patched_vtables, super_vtable_name);
2300 require_action(super_vtable && super_vtable->is_patched,
2301 finish, rval=KERN_FAILURE);
2302
2303 /* Patch the meta class's vtable */
2304 rval = kxld_vtable_patch(vtable, super_vtable,
2305 kext->symtab, target_supports_strict_patching(kext));
2306 require_noerr(rval, finish);
2307
2308 /* Add the MetaClass's vtable to the set of patched vtables */
2309 rval = kxld_dict_insert(patched_vtables, vtable->name, vtable);
2310 require_noerr(rval, finish);
2311
2312 ++nprogress;
2313 }
2314
2315 ++npatched;
2316 }
2317
2318 require_action(!failure, finish, rval=KERN_FAILURE);
2319 if (!nprogress) {
2320 failure = TRUE;
2321 kxld_log(kKxldLogPatching, kKxldLogErr,
2322 "The following vtables were unpatchable because each one's "
2323 "parent vtable either was not found or also was not patchable:");
2324 }
2325 }
2326
2327 rval = KERN_SUCCESS;
2328 finish:
2329 if (demangled_name1) kxld_free(demangled_name1, demangled_length1);
2330 if (demangled_name2) kxld_free(demangled_name2, demangled_length2);
2331
2332 return rval;
2333 }
2334
2335 /*******************************************************************************
2336 *******************************************************************************/
2337 static kern_return_t
2338 validate_symbols(KXLDKext *kext)
2339 {
2340 kern_return_t rval = KERN_FAILURE;
2341 KXLDSymtabIterator iter;
2342 KXLDSym *sym = NULL;
2343 u_int error = FALSE;
2344 char *demangled_name = NULL;
2345 size_t demangled_length = 0;
2346
2347 /* Check for any unresolved symbols */
2348 kxld_symtab_iterator_init(&iter, kext->symtab, kxld_sym_is_unresolved, FALSE);
2349 while ((sym = kxld_symtab_iterator_get_next(&iter))) {
2350 if (!error) {
2351 error = TRUE;
2352 kxld_log(kKxldLogLinking, kKxldLogErr,
2353 "The following symbols are unresolved for this kext:");
2354 }
2355 kxld_log(kKxldLogLinking, kKxldLogErr, "\t%s",
2356 kxld_demangle(sym->name, &demangled_name, &demangled_length));
2357 }
2358 require_noerr_action(error, finish, rval=KERN_FAILURE);
2359
2360 rval = KERN_SUCCESS;
2361
2362 finish:
2363 if (demangled_name) kxld_free(demangled_name, demangled_length);
2364 return rval;
2365 }
2366
2367 #if KXLD_USER_OR_GOT || KXLD_USER_OR_COMMON
2368 /*******************************************************************************
2369 *******************************************************************************/
2370 static kern_return_t
2371 add_section(KXLDKext *kext, KXLDSect **sect)
2372 {
2373 kern_return_t rval = KERN_FAILURE;
2374 u_int nsects = kext->sects.nitems;
2375
2376 rval = kxld_array_resize(&kext->sects, nsects + 1);
2377 require_noerr(rval, finish);
2378
2379 *sect = kxld_array_get_item(&kext->sects, nsects);
2380
2381 rval = KERN_SUCCESS;
2382
2383 finish:
2384 return rval;
2385 }
2386 #endif /* KXLD_USER_OR_GOT || KXLD_USER_OR_COMMON */
2387
2388 #if KXLD_USER_OR_GOT
2389 /*******************************************************************************
2390 *******************************************************************************/
2391 static boolean_t
2392 target_has_got(const KXLDKext *kext)
2393 {
2394 return FALSE:
2395 }
2396
2397 /*******************************************************************************
2398 * Create and initialize the Global Offset Table
2399 *******************************************************************************/
2400 static kern_return_t
2401 create_got(KXLDKext *kext)
2402 {
2403 kern_return_t rval = KERN_FAILURE;
2404 KXLDSect *sect = NULL;
2405 u_int ngots = 0;
2406 u_int i = 0;
2407
2408 if (!target_has_got(kext)) {
2409 rval = KERN_SUCCESS;
2410 goto finish;
2411 }
2412
2413 for (i = 0; i < kext->sects.nitems; ++i) {
2414 sect = kxld_array_get_item(&kext->sects, i);
2415 ngots += kxld_sect_get_ngots(sect, &kext->relocator,
2416 kext->symtab);
2417 }
2418
2419 rval = add_section(kext, &sect);
2420 require_noerr(rval, finish);
2421
2422 rval = kxld_sect_init_got(sect, ngots);
2423 require_noerr(rval, finish);
2424
2425 kext->got_is_created = TRUE;
2426 rval = KERN_SUCCESS;
2427
2428 finish:
2429 return rval;
2430 }
2431
2432 /*******************************************************************************
2433 *******************************************************************************/
2434 static kern_return_t
2435 populate_got(KXLDKext *kext)
2436 {
2437 kern_return_t rval = KERN_FAILURE;
2438 KXLDSect *sect = NULL;
2439 u_int i = 0;
2440
2441 if (!target_has_got(kext) || !kext->got_is_created) {
2442 rval = KERN_SUCCESS;
2443 goto finish;
2444 }
2445
2446 for (i = 0; i < kext->sects.nitems; ++i) {
2447 sect = kxld_array_get_item(&kext->sects, i);
2448 if (streq_safe(sect->segname, KXLD_SEG_GOT, sizeof(KXLD_SEG_GOT)) &&
2449 streq_safe(sect->sectname, KXLD_SECT_GOT, sizeof(KXLD_SECT_GOT)))
2450 {
2451 kxld_sect_populate_got(sect, kext->symtab,
2452 kxld_kext_target_needs_swap(kext));
2453 break;
2454 }
2455 }
2456
2457 require_action(i < kext->sects.nitems, finish, rval=KXLD_MISSING_GOT);
2458
2459 rval = KERN_SUCCESS;
2460
2461 finish:
2462 return rval;
2463 }
2464 #endif /* KXLD_USER_OR_GOT */
2465
2466 /*******************************************************************************
2467 *******************************************************************************/
2468 static boolean_t
2469 target_supports_common(const KXLDKext *kext)
2470 {
2471 check(kext);
2472 return (kext->cputype == CPU_TYPE_I386 ||
2473 kext->cputype == CPU_TYPE_POWERPC);
2474 }
2475
2476 #if KXLD_USER_OR_COMMON
2477 /*******************************************************************************
2478 * If there are common symbols, calculate how much space they'll need
2479 * and create/grow the __DATA __common section to accommodate them.
2480 * Then, resolve them against that section.
2481 *******************************************************************************/
2482 static kern_return_t
2483 resolve_common_symbols(KXLDKext *kext)
2484 {
2485 kern_return_t rval = KERN_FAILURE;
2486 KXLDSymtabIterator iter;
2487 KXLDSym *sym = NULL;
2488 KXLDSect *sect = NULL;
2489 kxld_addr_t base_addr = 0;
2490 kxld_size_t size = 0;
2491 kxld_size_t total_size = 0;
2492 u_int align = 0;
2493 u_int max_align = 0;
2494 u_int sectnum = 0;
2495
2496 if (!target_supports_common(kext)) {
2497 rval = KERN_SUCCESS;
2498 goto finish;
2499 }
2500
2501 /* Iterate over the common symbols to calculate their total aligned size */
2502 kxld_symtab_iterator_init(&iter, kext->symtab, kxld_sym_is_common, FALSE);
2503 while ((sym = kxld_symtab_iterator_get_next(&iter))) {
2504 align = kxld_sym_get_common_align(sym);
2505 size = kxld_sym_get_common_size(sym);
2506
2507 if (align > max_align) max_align = align;
2508
2509 total_size = kxld_align_address(total_size, align) + size;
2510 }
2511
2512 /* If there are common symbols, grow or create the __DATA __common section
2513 * to hold them.
2514 */
2515 if (total_size) {
2516 sect = kxld_kext_get_sect_by_name(kext, SEG_DATA, SECT_COMMON);
2517 if (sect) {
2518 base_addr = sect->base_addr + sect->size;
2519
2520 kxld_sect_grow(sect, total_size, max_align);
2521 } else {
2522 base_addr = 0;
2523
2524 rval = add_section(kext, &sect);
2525 require_noerr(rval, finish);
2526
2527 kxld_sect_init_zerofill(sect, SEG_DATA, SECT_COMMON,
2528 total_size, max_align);
2529 }
2530
2531 /* Resolve the common symbols against the new section */
2532 rval = kxld_array_get_index(&kext->sects, sect, &sectnum);
2533 require_noerr(rval, finish);
2534
2535 kxld_symtab_iterator_reset(&iter);
2536 while ((sym = kxld_symtab_iterator_get_next(&iter))) {
2537 align = kxld_sym_get_common_align(sym);
2538 size = kxld_sym_get_common_size(sym);
2539
2540 base_addr = kxld_align_address(base_addr, align);
2541 kxld_sym_resolve_common(sym, sectnum, base_addr);
2542
2543 base_addr += size;
2544 }
2545 }
2546
2547 rval = KERN_SUCCESS;
2548
2549 finish:
2550 return rval;
2551 }
2552 #endif /* KXLD_USER_OR_COMMON */
2553
2554 /*******************************************************************************
2555 *******************************************************************************/
2556 static kern_return_t
2557 get_metaclass_symbol_from_super_meta_class_pointer_symbol(KXLDKext *kext,
2558 KXLDSym *super_metaclass_pointer_sym, KXLDSym **metaclass)
2559 {
2560 kern_return_t rval = KERN_FAILURE;
2561 KXLDSect *sect = NULL;
2562 KXLDReloc *reloc = NULL;
2563 uint32_t offset = 0;
2564
2565 check(kext);
2566 check(super_metaclass_pointer_sym);
2567 check(metaclass);
2568 *metaclass = NULL;
2569
2570 sect = kxld_array_get_item(&kext->sects, super_metaclass_pointer_sym->sectnum);
2571 require_action(sect, finish, rval=KERN_FAILURE);
2572
2573 /* Find the relocation entry for the super metaclass pointer and get the
2574 * symbol associated with that relocation entry
2575 */
2576
2577 if (kext->is_final_image) {
2578 /* The relocation entry could be in either the external or local
2579 * relocation entries. kxld_reloc_get_symbol() can handle either
2580 * type.
2581 */
2582 reloc = kxld_reloc_get_reloc_by_offset(&kext->extrelocs,
2583 super_metaclass_pointer_sym->base_addr);
2584 if (!reloc) {
2585 reloc = kxld_reloc_get_reloc_by_offset(&kext->locrelocs,
2586 super_metaclass_pointer_sym->base_addr);
2587 }
2588 require_action(reloc, finish, rval=KERN_FAILURE);
2589
2590 *metaclass = kxld_reloc_get_symbol(&kext->relocator, reloc, kext->file,
2591 kext->symtab);
2592 } else {
2593 offset = kxld_sym_get_section_offset(super_metaclass_pointer_sym, sect);
2594
2595 reloc = kxld_reloc_get_reloc_by_offset(&sect->relocs, offset);
2596 require_action(reloc, finish, rval=KERN_FAILURE);
2597
2598 *metaclass = kxld_reloc_get_symbol(&kext->relocator, reloc, sect->data,
2599 kext->symtab);
2600 }
2601 require_action(*metaclass, finish, rval=KERN_FAILURE);
2602
2603 rval = KERN_SUCCESS;
2604
2605 finish:
2606 return rval;
2607 }
2608
2609 /*******************************************************************************
2610 *******************************************************************************/
2611 static kern_return_t
2612 copy_vtables(KXLDKext *kext, const KXLDDict *patched_vtables)
2613 {
2614 kern_return_t rval = KERN_FAILURE;
2615 KXLDSymtabIterator iter;
2616 KXLDSym *sym = NULL;
2617 KXLDVTable *vtable = NULL, *src = NULL;
2618 u_int i = 0;
2619 u_int nvtables = 0;
2620 char class_name[KXLD_MAX_NAME_LEN];
2621 char meta_vtable_name[KXLD_MAX_NAME_LEN];
2622
2623 kxld_symtab_iterator_init(&iter, kext->symtab,
2624 kxld_sym_is_class_vtable, FALSE);
2625
2626 /* The iterator tracks all the class vtables, so we double the number of
2627 * vtables we're expecting because we use the class vtables to find the
2628 * MetaClass vtables.
2629 */
2630 nvtables = kxld_symtab_iterator_get_num_remaining(&iter) * 2;
2631 rval = kxld_array_init(&kext->vtables, sizeof(KXLDVTable), nvtables);
2632 require_noerr(rval, finish);
2633
2634 while ((sym = kxld_symtab_iterator_get_next(&iter))) {
2635 src = kxld_dict_find(patched_vtables, sym->name);
2636 require_action(src, finish, rval=KERN_FAILURE);
2637
2638 vtable = kxld_array_get_item(&kext->vtables, i++);
2639 rval = kxld_vtable_copy(vtable, src);
2640 require_noerr(rval, finish);
2641
2642 rval = kxld_sym_get_class_name_from_vtable(sym,
2643 class_name, sizeof(class_name));
2644 require_noerr(rval, finish);
2645
2646 rval = kxld_sym_get_meta_vtable_name_from_class_name(class_name,
2647 meta_vtable_name, sizeof(meta_vtable_name));
2648 require_noerr(rval, finish);
2649
2650 /* Some classes don't have a MetaClass, so when we run across one
2651 * of those, we shrink the vtable array by 1.
2652 */
2653 src = kxld_dict_find(patched_vtables, meta_vtable_name);
2654 if (src) {
2655 vtable = kxld_array_get_item(&kext->vtables, i++);
2656 rval = kxld_vtable_copy(vtable, src);
2657 require_noerr(rval, finish);
2658 } else {
2659 kxld_array_resize(&kext->vtables, kext->vtables.nitems - 1);
2660 }
2661 }
2662
2663 rval = KERN_SUCCESS;
2664
2665 finish:
2666 return rval;
2667 }
2668
2669 #if KXLD_USER_OR_OBJECT
2670 /*******************************************************************************
2671 *******************************************************************************/
2672 static kern_return_t
2673 process_relocs_from_sections(KXLDKext *kext)
2674 {
2675 kern_return_t rval = KERN_FAILURE;
2676 KXLDSect *sect = NULL;
2677 u_int i = 0;
2678
2679 for (i = 0; i < kext->sects.nitems; ++i) {
2680 sect = kxld_array_get_item(&kext->sects, i);
2681 rval = kxld_sect_process_relocs(sect, &kext->relocator,
2682 &kext->sects, kext->symtab);
2683 require_noerr_action(rval, finish,
2684 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogInvalidSectReloc,
2685 i, sect->segname, sect->sectname));
2686 }
2687
2688 rval = KERN_SUCCESS;
2689
2690 finish:
2691 return rval;
2692 }
2693 #endif /* KXLD_USER_OR_OBJECT */
2694
2695 #if KXLD_USER_OR_BUNDLE
2696 /*******************************************************************************
2697 *******************************************************************************/
2698 static kern_return_t
2699 process_relocs_from_tables(KXLDKext *kext)
2700 {
2701 kern_return_t rval = KERN_FAILURE;
2702 KXLDReloc *reloc = NULL;
2703 KXLDSeg *seg = NULL;
2704 u_int i = 0;
2705
2706 /* Offsets for relocations in relocation tables are based on the vm
2707 * address of the first segment.
2708 */
2709 seg = kxld_array_get_item(&kext->segs, 0);
2710
2711 /* Process external relocations */
2712 for (i = 0; i < kext->extrelocs.nitems; ++i) {
2713 reloc = kxld_array_get_item(&kext->extrelocs, i);
2714
2715 rval = kxld_relocator_process_table_reloc(&kext->relocator, reloc, seg,
2716 kext->file, &kext->sects, kext->symtab);
2717 require_noerr_action(rval, finish,
2718 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogInvalidExtReloc, i));
2719 }
2720
2721 /* Process local relocations */
2722 for (i = 0; i < kext->locrelocs.nitems; ++i) {
2723 reloc = kxld_array_get_item(&kext->locrelocs, i);
2724
2725 rval = kxld_relocator_process_table_reloc(&kext->relocator, reloc, seg,
2726 kext->file, &kext->sects, kext->symtab);
2727 require_noerr_action(rval, finish,
2728 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogInvalidIntReloc, i));
2729 }
2730
2731 rval = KERN_SUCCESS;
2732
2733 finish:
2734 return rval;
2735 }
2736
2737 /*******************************************************************************
2738 *******************************************************************************/
2739 static void
2740 add_to_ptr(u_char *symptr, kxld_addr_t val, boolean_t is_32_bit)
2741 {
2742 if (is_32_bit) {
2743 uint32_t *ptr = (uint32_t *) symptr;
2744 *ptr += (uint32_t) val;
2745 } else {
2746 uint64_t *ptr = (uint64_t *) symptr;
2747 *ptr += (uint64_t) val;
2748 }
2749 }
2750
2751 #define SECT_SYM_PTRS "__nl_symbol_ptr"
2752
2753 /*******************************************************************************
2754 * Final linked images create an __nl_symbol_ptr section for the global offset
2755 * table and for symbol pointer lookups in general. Rather than use relocation
2756 * entries, the linker creates an "indirect symbol table" which stores indexes
2757 * into the symbol table corresponding to the entries of this section. This
2758 * function populates the section with the relocated addresses of those symbols.
2759 *******************************************************************************/
2760 static kern_return_t
2761 process_symbol_pointers(KXLDKext *kext)
2762 {
2763 kern_return_t rval = KERN_FAILURE;
2764 KXLDSect *sect = NULL;
2765 KXLDSym *sym = NULL;
2766 int32_t *symidx = NULL;
2767 u_char *symptr = NULL;
2768 u_long symptrsize = 0;
2769 u_int nsyms = 0;
2770 u_int firstsym = 0;
2771 u_int i = 0;
2772
2773 check(kext);
2774
2775 require_action(kext->is_final_image && kext->dysymtab_hdr,
2776 finish, rval=KERN_FAILURE);
2777
2778 /* Get the __DATA,__nl_symbol_ptr section. If it doesn't exist, we have
2779 * nothing to do.
2780 */
2781
2782 sect = kxld_kext_get_sect_by_name(kext, SEG_DATA, SECT_SYM_PTRS);
2783 if (!sect) {
2784 rval = KERN_SUCCESS;
2785 goto finish;
2786 }
2787
2788 require_action(sect->flags & S_NON_LAZY_SYMBOL_POINTERS,
2789 finish, rval=KERN_FAILURE;
2790 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
2791 "Section %s,%s does not have S_NON_LAZY_SYMBOL_POINTERS flag.",
2792 SEG_DATA, SECT_SYM_PTRS));
2793
2794 /* Calculate the table offset and number of entries in the section */
2795
2796 if (kxld_kext_is_32_bit(kext)) {
2797 symptrsize = sizeof(uint32_t);
2798 } else {
2799 symptrsize = sizeof(uint64_t);
2800 }
2801
2802 nsyms = (u_int) (sect->size / symptrsize);
2803 firstsym = sect->reserved1;
2804
2805 require_action(firstsym + nsyms <= kext->dysymtab_hdr->nindirectsyms,
2806 finish, rval=KERN_FAILURE;
2807 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO));
2808
2809 /* Iterate through the indirect symbol table and fill in the section of
2810 * symbol pointers. There are three cases:
2811 * 1) A normal symbol - put its value directly in the table
2812 * 2) An INDIRECT_SYMBOL_LOCAL - symbols that are local and already have
2813 * their offset from the start of the file in the section. Simply
2814 * add the file's link address to fill this entry.
2815 * 3) An INDIRECT_SYMBOL_ABS - prepopulated absolute symbols. No
2816 * action is required.
2817 */
2818
2819 symidx = (int32_t *) (kext->file + kext->dysymtab_hdr->indirectsymoff);
2820 symidx += firstsym;
2821 symptr = sect->data;
2822 for (i = 0; i < nsyms; ++i, ++symidx, symptr+=symptrsize) {
2823 if (*symidx & INDIRECT_SYMBOL_LOCAL) {
2824 if (*symidx & INDIRECT_SYMBOL_ABS) continue;
2825
2826 add_to_ptr(symptr, kext->link_addr, kxld_kext_is_32_bit(kext));
2827 } else {
2828 sym = kxld_symtab_get_symbol_by_index(kext->symtab, *symidx);
2829 require_action(sym, finish, rval=KERN_FAILURE);
2830
2831 add_to_ptr(symptr, sym->link_addr, kxld_kext_is_32_bit(kext));
2832 }
2833 }
2834
2835 rval = KERN_SUCCESS;
2836 finish:
2837 return rval;
2838 }
2839 #endif /* KXLD_USER_OR_BUNDLE */
2840
2841 /*******************************************************************************
2842 *******************************************************************************/
2843 static kern_return_t
2844 populate_kmod_info(KXLDKext *kext)
2845 {
2846 kern_return_t rval = KERN_FAILURE;
2847 KXLDSect *kmodsect = NULL;
2848 KXLDSym *kmodsym = NULL;
2849 u_long kmod_offset = 0;
2850 u_long header_size;
2851 u_long size;
2852
2853 if (kext->link_type != KXLD_LINK_KEXT) {
2854 rval = KERN_SUCCESS;
2855 goto finish;
2856 }
2857
2858 kxld_kext_get_vmsize(kext, &header_size, &size);
2859
2860 kmodsym = kxld_symtab_get_symbol_by_name(kext->symtab, KXLD_KMOD_INFO_SYMBOL);
2861 require_action(kmodsym, finish, rval=KERN_FAILURE;
2862 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogNoKmodInfo));
2863
2864 kmodsect = kxld_array_get_item(&kext->sects, kmodsym->sectnum);
2865 kmod_offset = (u_long) (kmodsym->base_addr - kmodsect->base_addr);
2866
2867 kext->kmod_info = (kmod_info_t *) (kmodsect->data + kmod_offset);
2868 kext->kmod_link_addr = kmodsym->link_addr;
2869
2870 if (kxld_kext_is_32_bit(kext)) {
2871 kmod_info_32_v1_t *kmod = (kmod_info_32_v1_t *) (kext->kmod_info);
2872 kmod->address = (uint32_t) kext->link_addr;
2873 kmod->size = (uint32_t) size;
2874 kmod->hdr_size = (uint32_t) header_size;
2875
2876 #if !KERNEL
2877 if (kxld_kext_target_needs_swap(kext)) {
2878 kmod->address = OSSwapInt32(kmod->address);
2879 kmod->size = OSSwapInt32(kmod->size);
2880 kmod->hdr_size = OSSwapInt32(kmod->hdr_size);
2881 }
2882 #endif /* !KERNEL */
2883 } else {
2884 kmod_info_64_v1_t *kmod = (kmod_info_64_v1_t *) (kext->kmod_info);
2885 kmod->address = kext->link_addr;
2886 kmod->size = size;
2887 kmod->hdr_size = header_size;
2888
2889 #if !KERNEL
2890 if (kxld_kext_target_needs_swap(kext)) {
2891 kmod->address = OSSwapInt64(kmod->address);
2892 kmod->size = OSSwapInt64(kmod->size);
2893 kmod->hdr_size = OSSwapInt64(kmod->hdr_size);
2894 }
2895 #endif /* !KERNEL */
2896 }
2897
2898
2899 rval = KERN_SUCCESS;
2900
2901 finish:
2902 return rval;
2903 }
2904