]>
Commit | Line | Data |
---|---|---|
b0d623f7 A |
1 | /* |
2 | * Copyright (c) 2008 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | #include <string.h> | |
29 | #include <mach/machine.h> | |
30 | #include <mach/vm_param.h> | |
31 | #include <mach/vm_types.h> | |
32 | #include <mach/kmod.h> | |
33 | #include <mach-o/loader.h> | |
34 | #include <mach-o/nlist.h> | |
35 | #include <mach-o/reloc.h> | |
36 | #include <sys/types.h> | |
37 | ||
38 | #if KERNEL | |
39 | #include <libkern/kernel_mach_header.h> | |
40 | #include <libkern/OSKextLib.h> | |
41 | #include <libkern/OSKextLibPrivate.h> | |
42 | #include <mach/vm_param.h> | |
43 | #include <mach-o/fat.h> | |
44 | #else /* !KERNEL */ | |
45 | #include <architecture/byte_order.h> | |
46 | #include <mach/mach_init.h> | |
47 | #include <mach-o/arch.h> | |
48 | #include <mach-o/swap.h> | |
49 | #endif /* KERNEL */ | |
50 | ||
51 | #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld" | |
52 | #include <AssertMacros.h> | |
53 | ||
54 | #include "kxld_dict.h" | |
55 | #include "kxld_kext.h" | |
56 | #include "kxld_reloc.h" | |
57 | #include "kxld_sect.h" | |
58 | #include "kxld_seg.h" | |
59 | #include "kxld_state.h" | |
60 | #include "kxld_symtab.h" | |
61 | #include "kxld_util.h" | |
62 | #include "kxld_uuid.h" | |
63 | #include "kxld_vtable.h" | |
64 | ||
65 | struct symtab_command; | |
66 | ||
67 | enum kxld_link_type { | |
68 | KXLD_LINK_KERNEL, | |
69 | KXLD_LINK_PSEUDO_KEXT, | |
70 | KXLD_LINK_KEXT, | |
71 | KXLD_LINK_UNKNOWN | |
72 | }; | |
73 | ||
74 | typedef enum kxld_link_type KXLDLinkType; | |
75 | ||
76 | struct kxld_kext { | |
77 | u_char *file; | |
78 | u_long size; | |
79 | const char *name; | |
80 | uint32_t filetype; | |
81 | KXLDArray segs; | |
82 | KXLDArray sects; | |
83 | KXLDArray vtables; | |
84 | KXLDArray extrelocs; | |
85 | KXLDArray locrelocs; | |
86 | KXLDDict vtable_index; | |
87 | KXLDRelocator relocator; | |
88 | KXLDuuid uuid; | |
89 | KXLDSymtab *symtab; | |
90 | kxld_addr_t link_addr; | |
91 | kmod_info_t *kmod_info; | |
92 | kxld_addr_t kmod_link_addr; | |
93 | cpu_type_t cputype; | |
94 | cpu_subtype_t cpusubtype; | |
95 | KXLDLinkType link_type; | |
96 | KXLDFlags flags; | |
97 | boolean_t is_final_image; | |
98 | boolean_t got_is_created; | |
99 | struct dysymtab_command *dysymtab_hdr; | |
100 | #if KXLD_USER_OR_OBJECT | |
101 | KXLDArray *section_order; | |
102 | #endif | |
103 | #if !KERNEL | |
104 | enum NXByteOrder host_order; | |
105 | enum NXByteOrder target_order; | |
106 | #endif | |
107 | }; | |
108 | ||
109 | /******************************************************************************* | |
110 | * Prototypes | |
111 | *******************************************************************************/ | |
112 | ||
113 | static kern_return_t get_target_machine_info(KXLDKext *kext, cpu_type_t cputype, | |
114 | cpu_subtype_t cpusubtype); | |
115 | static kern_return_t get_file_for_arch(KXLDKext *kext, u_char *file, u_long size); | |
116 | ||
117 | static u_long get_macho_header_size(const KXLDKext *kext); | |
118 | static u_long get_macho_data_size(const KXLDKext *kext); | |
119 | static kern_return_t export_macho_header(const KXLDKext *kext, u_char *buf, | |
120 | u_int ncmds, u_long *header_offset, u_long header_size); | |
121 | ||
122 | static kern_return_t init_from_execute(KXLDKext *kext); | |
123 | static kern_return_t init_from_final_linked_image(KXLDKext *kext, u_int *filetype_out, | |
124 | struct symtab_command **symtab_hdr_out); | |
125 | ||
126 | static boolean_t target_supports_protected_segments(const KXLDKext *kext) | |
127 | __attribute__((pure)); | |
128 | ||
129 | #if KXLD_USER_OR_OBJECT | |
130 | static boolean_t target_supports_object(const KXLDKext *kext) __attribute((pure)); | |
131 | static kern_return_t init_from_object(KXLDKext *kext); | |
132 | static kern_return_t process_relocs_from_sections(KXLDKext *kext); | |
133 | #endif /* KXLD_USER_OR_OBJECT */ | |
134 | ||
135 | #if KXLD_USER_OR_BUNDLE | |
136 | static boolean_t target_supports_bundle(const KXLDKext *kext) __attribute((pure)); | |
137 | static kern_return_t init_from_bundle(KXLDKext *kext); | |
138 | static kern_return_t process_relocs_from_tables(KXLDKext *kext); | |
139 | static kern_return_t process_symbol_pointers(KXLDKext *kext); | |
140 | static void add_to_ptr(u_char *symptr, kxld_addr_t val, boolean_t is_32_bit); | |
141 | #endif /* KXLD_USER_OR_BUNDLE */ | |
142 | ||
143 | static kern_return_t get_metaclass_symbol_from_super_meta_class_pointer_symbol( | |
144 | KXLDKext *kext, KXLDSym *super_metaclass_pointer_sym, KXLDSym **meta_class); | |
145 | ||
146 | static kern_return_t resolve_symbols(KXLDKext *kext, KXLDDict *defined_symbols, | |
147 | KXLDDict *obsolete_symbols); | |
148 | static kern_return_t patch_vtables(KXLDKext *kext, KXLDDict *patched_vtables, | |
149 | KXLDDict *defined_symbols); | |
150 | static kern_return_t validate_symbols(KXLDKext *kext); | |
151 | static kern_return_t populate_kmod_info(KXLDKext *kext); | |
152 | static kern_return_t copy_vtables(KXLDKext *kext, const KXLDDict *patched_vtables); | |
153 | static kern_return_t create_vtables(KXLDKext *kext); | |
154 | static void restrict_private_symbols(KXLDKext *kext); | |
155 | ||
156 | #if KXLD_USER_OR_GOT || KXLD_USER_OR_COMMON | |
157 | static kern_return_t add_section(KXLDKext *kext, KXLDSect **sect); | |
158 | #endif /* KXLD_USER_OR_GOT || KXLD_USER_OR_COMMON */ | |
159 | ||
160 | #if KXLD_USER_OR_GOT | |
161 | static boolean_t target_has_got(const KXLDKext *kext) __attribute__((pure)); | |
162 | static kern_return_t create_got(KXLDKext *kext); | |
163 | static kern_return_t populate_got(KXLDKext *kext); | |
164 | #endif /* KXLD_USER_OR_GOT */ | |
165 | ||
166 | static boolean_t target_supports_common(const KXLDKext *kext) __attribute((pure)); | |
167 | #if KXLD_USER_OR_COMMON | |
168 | static kern_return_t resolve_common_symbols(KXLDKext *kext); | |
169 | #endif /* KXLD_USER_OR_COMMON */ | |
170 | ||
171 | static boolean_t target_supports_strict_patching(KXLDKext *kext) | |
172 | __attribute__((pure)); | |
173 | ||
174 | #if KXLD_USER_OR_ILP32 | |
175 | static u_long get_macho_cmd_data_32(u_char *file, u_long offset, | |
176 | u_int *filetype, u_int *ncmds); | |
177 | static kern_return_t export_macho_header_32(const KXLDKext *kext, u_char *buf, | |
178 | u_int ncmds, u_long *header_offset, u_long header_size); | |
179 | #endif /* KXLD_USER_OR_ILP32 */ | |
180 | #if KXLD_USER_OR_LP64 | |
181 | static u_long get_macho_cmd_data_64(u_char *file, u_long offset, | |
182 | u_int *filetype, u_int *ncmds); | |
183 | static kern_return_t export_macho_header_64(const KXLDKext *kext, u_char *buf, | |
184 | u_int ncmds, u_long *header_offset, u_long header_size); | |
185 | #endif /* KXLD_USER_OR_LP64 */ | |
186 | ||
187 | /******************************************************************************* | |
188 | *******************************************************************************/ | |
189 | size_t | |
190 | kxld_kext_sizeof(void) | |
191 | { | |
192 | return sizeof(KXLDKext); | |
193 | } | |
194 | ||
195 | /******************************************************************************* | |
196 | *******************************************************************************/ | |
197 | kern_return_t | |
198 | kxld_kext_init(KXLDKext *kext, u_char *file, u_long size, | |
199 | const char *name, KXLDFlags flags, boolean_t is_kernel, | |
200 | KXLDArray *section_order __unused, | |
201 | cpu_type_t cputype, cpu_subtype_t cpusubtype) | |
202 | { | |
203 | kern_return_t rval = KERN_FAILURE; | |
204 | KXLDSeg *seg = NULL; | |
205 | u_int i = 0; | |
206 | ||
207 | check(kext); | |
208 | check(file); | |
209 | check(size); | |
210 | ||
211 | kext->name = name; | |
212 | kext->flags = flags; | |
213 | #if KXLD_USER_OR_OBJECT | |
214 | kext->section_order = section_order; | |
215 | #endif | |
216 | ||
217 | /* Find the local architecture */ | |
218 | ||
219 | rval = get_target_machine_info(kext, cputype, cpusubtype); | |
220 | require_noerr(rval, finish); | |
221 | ||
222 | /* Find the Mach-O file for the target architecture */ | |
223 | ||
224 | rval = get_file_for_arch(kext, file, size); | |
225 | require_noerr(rval, finish); | |
226 | ||
227 | /* Build the relocator */ | |
228 | ||
229 | rval = kxld_relocator_init(&kext->relocator, kext->cputype, | |
230 | kext->cpusubtype, kxld_kext_target_needs_swap(kext)); | |
231 | require_noerr(rval, finish); | |
232 | ||
233 | /* Allocate the symbol table */ | |
234 | ||
235 | if (!kext->symtab) { | |
236 | kext->symtab = kxld_alloc(kxld_symtab_sizeof()); | |
237 | require_action(kext->symtab, finish, rval=KERN_RESOURCE_SHORTAGE); | |
238 | bzero(kext->symtab, kxld_symtab_sizeof()); | |
239 | } | |
240 | ||
241 | if (is_kernel) { | |
242 | kext->link_type = KXLD_LINK_KERNEL; | |
243 | } else { | |
244 | kext->link_type = KXLD_LINK_UNKNOWN; | |
245 | } | |
246 | ||
247 | /* There are four types of Mach-O files that we can support: | |
248 | * 1) 32-bit MH_OBJECT - All pre-SnowLeopard systems | |
249 | * 2) 32-bit MH_KEXT_BUNDLE - Not supported | |
250 | * 3) 64-bit MH_OBJECT - Needed for K64 bringup | |
251 | * 4) 64-bit MH_KEXT_BUNDLE - The likely 64-bit kext filetype | |
252 | */ | |
253 | ||
254 | if (kxld_kext_is_32_bit(kext)) { | |
255 | struct mach_header *mach_hdr = (struct mach_header *) kext->file; | |
256 | kext->filetype = mach_hdr->filetype; | |
257 | } else { | |
258 | struct mach_header_64 *mach_hdr = (struct mach_header_64 *) kext->file; | |
259 | kext->filetype = mach_hdr->filetype; | |
260 | } | |
261 | ||
262 | switch (kext->filetype) { | |
263 | #if KXLD_USER_OR_OBJECT | |
264 | case MH_OBJECT: | |
265 | rval = init_from_object(kext); | |
266 | require_noerr(rval, finish); | |
267 | break; | |
268 | #endif /* KXLD_USER_OR_OBJECT */ | |
269 | #if KXLD_USER_OR_BUNDLE | |
270 | case MH_KEXT_BUNDLE: | |
271 | rval = init_from_bundle(kext); | |
272 | require_noerr(rval, finish); | |
273 | break; | |
274 | #endif /* KXLD_USER_OR_BUNDLE */ | |
275 | case MH_EXECUTE: | |
276 | rval = init_from_execute(kext); | |
277 | require_noerr(rval, finish); | |
278 | break; | |
279 | default: | |
280 | rval = KERN_FAILURE; | |
281 | kxld_log(kKxldLogLinking, kKxldLogErr, | |
282 | kKxldLogFiletypeNotSupported, kext->filetype); | |
283 | goto finish; | |
284 | } | |
285 | ||
286 | for (i = 0; i < kext->segs.nitems; ++i) { | |
287 | seg = kxld_array_get_item(&kext->segs, i); | |
288 | kxld_seg_set_vm_protections(seg, target_supports_protected_segments(kext)); | |
289 | } | |
290 | ||
291 | switch (kext->link_type) { | |
292 | case KXLD_LINK_KEXT: | |
293 | (void) restrict_private_symbols(kext); | |
294 | /* Fallthrough */ | |
295 | case KXLD_LINK_KERNEL: | |
296 | rval = create_vtables(kext); | |
297 | require_noerr(rval, finish); | |
298 | break; | |
299 | default: | |
300 | break; | |
301 | } | |
302 | ||
303 | rval = KERN_SUCCESS; | |
304 | finish: | |
305 | return rval; | |
306 | } | |
307 | ||
308 | /******************************************************************************* | |
309 | *******************************************************************************/ | |
310 | kern_return_t | |
311 | get_target_machine_info(KXLDKext *kext, cpu_type_t cputype __unused, | |
312 | cpu_subtype_t cpusubtype __unused) | |
313 | { | |
314 | #if KERNEL | |
315 | ||
316 | /* Because the kernel can only link for its own architecture, we know what | |
317 | * the host and target architectures are at compile time, so we can use | |
318 | * a vastly simplified version of this function. | |
319 | */ | |
320 | ||
321 | check(kext); | |
322 | ||
323 | #if defined(__i386__) | |
324 | kext->cputype = CPU_TYPE_I386; | |
325 | kext->cpusubtype = CPU_SUBTYPE_I386_ALL; | |
326 | return KERN_SUCCESS; | |
327 | #elif defined(__ppc__) | |
328 | kext->cputype = CPU_TYPE_POWERPC; | |
329 | kext->cpusubtype = CPU_SUBTYPE_POWERPC_ALL; | |
330 | return KERN_SUCCESS; | |
331 | #elif defined(__x86_64__) | |
332 | kext->cputype = CPU_TYPE_X86_64; | |
333 | kext->cpusubtype = CPU_SUBTYPE_X86_64_ALL; | |
334 | return KERN_SUCCESS; | |
335 | #else | |
336 | kxld_log(kKxldLogLinking, kKxldLogErr, | |
337 | kKxldLogArchNotSupported, _mh_execute_header->cputype); | |
338 | return KERN_NOT_SUPPORTED; | |
339 | #endif /* Supported architecture defines */ | |
340 | ||
341 | ||
342 | #else /* !KERNEL */ | |
343 | ||
344 | /* User-space must look up the architecture it's running on and the target | |
345 | * architecture at run-time. | |
346 | */ | |
347 | ||
348 | kern_return_t rval = KERN_FAILURE; | |
349 | const NXArchInfo *host_arch = NULL; | |
350 | ||
351 | check(kext); | |
352 | ||
353 | host_arch = NXGetLocalArchInfo(); | |
354 | require_action(host_arch, finish, rval=KERN_FAILURE); | |
355 | ||
356 | kext->host_order = host_arch->byteorder; | |
357 | ||
358 | /* If the user did not specify a cputype, use the local architecture. | |
359 | */ | |
360 | ||
361 | if (cputype) { | |
362 | kext->cputype = cputype; | |
363 | kext->cpusubtype = cpusubtype; | |
364 | } else { | |
365 | kext->cputype = host_arch->cputype; | |
366 | kext->target_order = kext->host_order; | |
367 | ||
368 | switch (kext->cputype) { | |
369 | case CPU_TYPE_I386: | |
370 | kext->cpusubtype = CPU_SUBTYPE_I386_ALL; | |
371 | break; | |
372 | case CPU_TYPE_POWERPC: | |
373 | kext->cpusubtype = CPU_SUBTYPE_POWERPC_ALL; | |
374 | break; | |
375 | case CPU_TYPE_X86_64: | |
376 | kext->cpusubtype = CPU_SUBTYPE_X86_64_ALL; | |
377 | break; | |
378 | case CPU_TYPE_ARM: | |
379 | kext->cpusubtype = CPU_SUBTYPE_ARM_ALL; | |
380 | break; | |
381 | default: | |
382 | kext->cpusubtype = 0; | |
383 | } | |
384 | } | |
385 | ||
386 | /* Validate that we support the target architecture and record its | |
387 | * endianness. | |
388 | */ | |
389 | ||
390 | switch(kext->cputype) { | |
391 | case CPU_TYPE_ARM: | |
392 | case CPU_TYPE_I386: | |
393 | case CPU_TYPE_X86_64: | |
394 | kext->target_order = NX_LittleEndian; | |
395 | break; | |
396 | case CPU_TYPE_POWERPC: | |
397 | kext->target_order = NX_BigEndian; | |
398 | break; | |
399 | default: | |
400 | rval = KERN_NOT_SUPPORTED; | |
401 | kxld_log(kKxldLogLinking, kKxldLogErr, | |
402 | kKxldLogArchNotSupported, kext->cputype); | |
403 | goto finish; | |
404 | } | |
405 | ||
406 | rval = KERN_SUCCESS; | |
407 | ||
408 | finish: | |
409 | return rval; | |
410 | #endif /* KERNEL */ | |
411 | } | |
412 | ||
413 | /******************************************************************************* | |
414 | *******************************************************************************/ | |
415 | static kern_return_t | |
416 | get_file_for_arch(KXLDKext *kext, u_char *file, u_long size) | |
417 | { | |
418 | kern_return_t rval = KERN_FAILURE; | |
419 | struct mach_header *mach_hdr = NULL; | |
420 | #if !KERNEL | |
421 | struct fat_header *fat = (struct fat_header *) file; | |
422 | struct fat_arch *archs = (struct fat_arch *) &fat[1]; | |
423 | boolean_t swap = FALSE; | |
424 | #endif /* KERNEL */ | |
425 | ||
426 | check(kext); | |
427 | check(file); | |
428 | check(size); | |
429 | ||
430 | kext->file = file; | |
431 | kext->size = size; | |
432 | ||
433 | /* We are assuming that we will never receive a fat file in the kernel */ | |
434 | ||
435 | #if !KERNEL | |
436 | require_action(size >= sizeof(*fat), finish, | |
437 | rval=KERN_FAILURE; | |
438 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); | |
439 | ||
440 | /* The fat header is always big endian, so swap if necessary */ | |
441 | if (fat->magic == FAT_CIGAM) { | |
442 | (void) swap_fat_header(fat, kext->host_order); | |
443 | swap = TRUE; | |
444 | } | |
445 | ||
446 | if (fat->magic == FAT_MAGIC) { | |
447 | struct fat_arch *arch = NULL; | |
448 | ||
449 | require_action(size >= (sizeof(*fat) + (fat->nfat_arch * sizeof(*archs))), | |
450 | finish, | |
451 | rval=KERN_FAILURE; | |
452 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); | |
453 | ||
454 | /* Swap the fat_arch structures if necessary */ | |
455 | if (swap) { | |
456 | (void) swap_fat_arch(archs, fat->nfat_arch, kext->host_order); | |
457 | } | |
458 | ||
459 | /* Locate the Mach-O for the requested architecture */ | |
460 | ||
461 | arch = NXFindBestFatArch(kext->cputype, kext->cpusubtype, archs, | |
462 | fat->nfat_arch); | |
463 | require_action(arch, finish, rval=KERN_FAILURE; | |
464 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogArchNotFound)); | |
465 | require_action(size >= arch->offset + arch->size, finish, | |
466 | rval=KERN_FAILURE; | |
467 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); | |
468 | ||
469 | kext->file = file + arch->offset; | |
470 | kext->size = arch->size; | |
471 | } | |
472 | #endif /* !KERNEL */ | |
473 | ||
474 | /* Swap the Mach-O's headers to this architecture if necessary */ | |
475 | if (kxld_kext_is_32_bit(kext)) { | |
476 | rval = validate_and_swap_macho_32(kext->file, kext->size | |
477 | #if !KERNEL | |
478 | , kext->host_order | |
479 | #endif /* !KERNEL */ | |
480 | ); | |
481 | } else { | |
482 | rval = validate_and_swap_macho_64(kext->file, kext->size | |
483 | #if !KERNEL | |
484 | , kext->host_order | |
485 | #endif /* !KERNEL */ | |
486 | ); | |
487 | } | |
488 | require_noerr(rval, finish); | |
489 | ||
490 | mach_hdr = (struct mach_header *) kext->file; | |
491 | require_action(kext->cputype == mach_hdr->cputype, finish, | |
492 | rval=KERN_FAILURE; | |
493 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); | |
494 | ||
495 | rval = KERN_SUCCESS; | |
496 | finish: | |
497 | return rval; | |
498 | } | |
499 | ||
500 | /******************************************************************************* | |
501 | *******************************************************************************/ | |
502 | boolean_t | |
503 | kxld_kext_is_32_bit(const KXLDKext *kext) | |
504 | { | |
505 | check(kext); | |
506 | ||
507 | return kxld_is_32_bit(kext->cputype); | |
508 | } | |
509 | ||
510 | /******************************************************************************* | |
511 | *******************************************************************************/ | |
512 | void | |
513 | kxld_kext_get_cputype(const KXLDKext *kext, cpu_type_t *cputype, | |
514 | cpu_subtype_t *cpusubtype) | |
515 | { | |
516 | check(kext); | |
517 | check(cputype); | |
518 | check(cpusubtype); | |
519 | ||
520 | *cputype = kext->cputype; | |
521 | *cpusubtype = kext->cpusubtype; | |
522 | } | |
523 | ||
524 | /******************************************************************************* | |
525 | *******************************************************************************/ | |
526 | kern_return_t | |
527 | kxld_kext_validate_cputype(const KXLDKext *kext, cpu_type_t cputype, | |
528 | cpu_subtype_t cpusubtype __unused) | |
529 | { | |
530 | if (kext->cputype != cputype) return KERN_FAILURE; | |
531 | return KERN_SUCCESS; | |
532 | } | |
533 | ||
534 | /******************************************************************************* | |
535 | *******************************************************************************/ | |
536 | static boolean_t | |
537 | target_supports_protected_segments(const KXLDKext *kext) | |
538 | { | |
539 | return (kext->is_final_image && | |
540 | kext->cputype == CPU_TYPE_X86_64); | |
541 | } | |
542 | ||
543 | #if KXLD_USER_OR_OBJECT | |
544 | /******************************************************************************* | |
545 | *******************************************************************************/ | |
546 | static boolean_t target_supports_object(const KXLDKext *kext) | |
547 | { | |
548 | return (kext->cputype == CPU_TYPE_POWERPC || | |
549 | kext->cputype == CPU_TYPE_I386 || | |
550 | kext->cputype == CPU_TYPE_ARM); | |
551 | } | |
552 | ||
553 | /******************************************************************************* | |
554 | *******************************************************************************/ | |
555 | static kern_return_t | |
556 | init_from_object(KXLDKext *kext) | |
557 | { | |
558 | kern_return_t rval = KERN_FAILURE; | |
559 | struct load_command *cmd_hdr = NULL; | |
560 | struct symtab_command *symtab_hdr = NULL; | |
561 | struct uuid_command *uuid_hdr = NULL; | |
562 | KXLDSect *sect = NULL; | |
563 | u_long offset = 0; | |
564 | u_long sect_offset = 0; | |
565 | u_int filetype = 0; | |
566 | u_int ncmds = 0; | |
567 | u_int nsects = 0; | |
568 | u_int i = 0; | |
569 | boolean_t has_segment = FALSE; | |
570 | ||
571 | check(kext); | |
572 | ||
573 | require_action(target_supports_object(kext), | |
574 | finish, rval=KERN_FAILURE; | |
575 | kxld_log(kKxldLogLinking, kKxldLogErr, | |
576 | kKxldLogFiletypeNotSupported, MH_OBJECT)); | |
577 | ||
578 | KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), offset, | |
579 | get_macho_cmd_data_32, get_macho_cmd_data_64, | |
580 | kext->file, offset, &filetype, &ncmds); | |
581 | ||
582 | require_action(filetype == MH_OBJECT, finish, rval=KERN_FAILURE); | |
583 | ||
584 | /* MH_OBJECTs use one unnamed segment to contain all of the sections. We | |
585 | * loop over all of the load commands to initialize the structures we | |
586 | * expect. Then, we'll use the unnamed segment to get to all of the | |
587 | * sections, and then use those sections to create the actual segments. | |
588 | */ | |
589 | ||
590 | for (; i < ncmds; ++i, offset += cmd_hdr->cmdsize) { | |
591 | cmd_hdr = (struct load_command *) (kext->file + offset); | |
592 | ||
593 | switch(cmd_hdr->cmd) { | |
594 | #if KXLD_USER_OR_ILP32 | |
595 | case LC_SEGMENT: | |
596 | { | |
597 | struct segment_command *seg_hdr = | |
598 | (struct segment_command *) cmd_hdr; | |
599 | ||
600 | /* Ignore segments with no vm size */ | |
601 | if (!seg_hdr->vmsize) continue; | |
602 | ||
603 | require_action(kxld_kext_is_32_bit(kext), finish, rval=KERN_FAILURE; | |
604 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO | |
605 | "LC_SEGMENT in 64-bit kext.")); | |
606 | require_action(!has_segment, finish, rval=KERN_FAILURE; | |
607 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO | |
608 | "Multiple segments in an MH_OBJECT kext.")); | |
609 | ||
610 | nsects = seg_hdr->nsects; | |
611 | sect_offset = offset + sizeof(*seg_hdr); | |
612 | has_segment = TRUE; | |
613 | } | |
614 | break; | |
615 | #endif /* KXLD_USER_OR_ILP32 */ | |
616 | #if KXLD_USER_OR_LP64 | |
617 | case LC_SEGMENT_64: | |
618 | { | |
619 | struct segment_command_64 *seg_hdr = | |
620 | (struct segment_command_64 *) cmd_hdr; | |
621 | ||
622 | /* Ignore segments with no vm size */ | |
623 | if (!seg_hdr->vmsize) continue; | |
624 | ||
625 | require_action(!kxld_kext_is_32_bit(kext), finish, rval=KERN_FAILURE; | |
626 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO | |
627 | "LC_SEGMENT_64 in a 32-bit kext.")); | |
628 | require_action(!has_segment, finish, rval=KERN_FAILURE; | |
629 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO | |
630 | "Multiple segments in an MH_OBJECT kext.")); | |
631 | ||
632 | nsects = seg_hdr->nsects; | |
633 | sect_offset = offset + sizeof(*seg_hdr); | |
634 | has_segment = TRUE; | |
635 | } | |
636 | break; | |
637 | #endif /* KXLD_USER_OR_LP64 */ | |
638 | case LC_SYMTAB: | |
639 | symtab_hdr = (struct symtab_command *) cmd_hdr; | |
640 | ||
641 | KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), rval, | |
642 | kxld_symtab_init_from_macho_32, kxld_symtab_init_from_macho_64, | |
643 | kext->symtab, kext->file, symtab_hdr, 0); | |
644 | require_noerr(rval, finish); | |
645 | break; | |
646 | case LC_UUID: | |
647 | uuid_hdr = (struct uuid_command *) cmd_hdr; | |
648 | kxld_uuid_init_from_macho(&kext->uuid, uuid_hdr); | |
649 | break; | |
650 | case LC_UNIXTHREAD: | |
651 | /* Don't need to do anything with UNIXTHREAD */ | |
652 | break; | |
653 | default: | |
654 | rval = KERN_FAILURE; | |
655 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO | |
656 | "Invalid segment type in MH_OBJECT kext: %u.", cmd_hdr->cmd); | |
657 | goto finish; | |
658 | } | |
659 | } | |
660 | ||
661 | if (has_segment) { | |
662 | ||
663 | /* Get the number of sections from the segment and build the section index */ | |
664 | ||
665 | rval = kxld_array_init(&kext->sects, sizeof(KXLDSect), nsects); | |
666 | require_noerr(rval, finish); | |
667 | ||
668 | /* Loop over all of the sections to initialize the section index */ | |
669 | ||
670 | for (i = 0; i < nsects; ++i) { | |
671 | sect = kxld_array_get_item(&kext->sects, i); | |
672 | KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), rval, | |
673 | kxld_sect_init_from_macho_32, kxld_sect_init_from_macho_64, | |
674 | sect, kext->file, §_offset, i, &kext->relocator); | |
675 | require_noerr(rval, finish); | |
676 | } | |
677 | ||
678 | /* Create special sections */ | |
679 | ||
680 | #if KXLD_USER_OR_GOT | |
681 | rval = create_got(kext); | |
682 | require_noerr(rval, finish); | |
683 | #endif /* KXLD_USER_OR_GOT */ | |
684 | ||
685 | #if KXLD_USER_OR_COMMON | |
686 | rval = resolve_common_symbols(kext); | |
687 | require_noerr(rval, finish); | |
688 | #endif /* KXLD_USER_OR_COMMON */ | |
689 | ||
690 | /* Create the segments from the section index */ | |
691 | ||
692 | rval = kxld_seg_create_seg_from_sections(&kext->segs, &kext->sects); | |
693 | require_noerr(rval, finish); | |
694 | ||
695 | rval = kxld_seg_finalize_object_segment(&kext->segs, | |
696 | kext->section_order, get_macho_header_size(kext)); | |
697 | require_noerr(rval, finish); | |
698 | ||
699 | kext->link_type = KXLD_LINK_KEXT; | |
700 | } else { | |
701 | kext->link_type = KXLD_LINK_PSEUDO_KEXT; | |
702 | } | |
703 | ||
704 | rval = KERN_SUCCESS; | |
705 | finish: | |
706 | return rval; | |
707 | } | |
708 | #endif /* KXLD_USER_OR_OBJECT */ | |
709 | ||
710 | /******************************************************************************* | |
711 | *******************************************************************************/ | |
712 | static kern_return_t | |
713 | init_from_final_linked_image(KXLDKext *kext, u_int *filetype_out, | |
714 | struct symtab_command **symtab_hdr_out) | |
715 | { | |
716 | kern_return_t rval = KERN_FAILURE; | |
717 | KXLDSeg *seg = NULL; | |
718 | KXLDSect *sect = NULL; | |
719 | struct load_command *cmd_hdr = NULL; | |
720 | struct symtab_command *symtab_hdr = NULL; | |
721 | struct uuid_command *uuid_hdr = NULL; | |
722 | u_long base_offset = 0; | |
723 | u_long offset = 0; | |
724 | u_long sect_offset = 0; | |
725 | u_int filetype = 0; | |
726 | u_int i = 0; | |
727 | u_int j = 0; | |
728 | u_int segi = 0; | |
729 | u_int secti = 0; | |
730 | u_int nsegs = 0; | |
731 | u_int nsects = 0; | |
732 | u_int ncmds = 0; | |
733 | ||
734 | KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), base_offset, | |
735 | get_macho_cmd_data_32, get_macho_cmd_data_64, | |
736 | kext->file, offset, &filetype, &ncmds); | |
737 | ||
738 | /* First pass to count segments and sections */ | |
739 | ||
740 | offset = base_offset; | |
741 | for (i = 0; i < ncmds; ++i, offset += cmd_hdr->cmdsize) { | |
742 | cmd_hdr = (struct load_command *) (kext->file + offset); | |
743 | ||
744 | switch(cmd_hdr->cmd) { | |
745 | #if KXLD_USER_OR_ILP32 | |
746 | case LC_SEGMENT: | |
747 | { | |
748 | struct segment_command *seg_hdr = | |
749 | (struct segment_command *) cmd_hdr; | |
750 | ||
751 | /* Ignore segments with no vm size */ | |
752 | if (!seg_hdr->vmsize) continue; | |
753 | ||
754 | ++nsegs; | |
755 | nsects += seg_hdr->nsects; | |
756 | } | |
757 | break; | |
758 | #endif /* KXLD_USER_OR_ILP32 */ | |
759 | #if KXLD_USER_OR_LP64 | |
760 | case LC_SEGMENT_64: | |
761 | { | |
762 | struct segment_command_64 *seg_hdr = | |
763 | (struct segment_command_64 *) cmd_hdr; | |
764 | ||
765 | /* Ignore segments with no vm size */ | |
766 | if (!seg_hdr->vmsize) continue; | |
767 | ||
768 | ++nsegs; | |
769 | nsects += seg_hdr->nsects; | |
770 | } | |
771 | break; | |
772 | #endif /* KXLD_USER_OR_LP64 */ | |
773 | default: | |
774 | continue; | |
775 | } | |
776 | } | |
777 | ||
778 | /* Allocate the segments and sections */ | |
779 | ||
780 | if (nsegs) { | |
781 | rval = kxld_array_init(&kext->segs, sizeof(KXLDSeg), nsegs); | |
782 | require_noerr(rval, finish); | |
783 | ||
784 | rval = kxld_array_init(&kext->sects, sizeof(KXLDSect), nsects); | |
785 | require_noerr(rval, finish); | |
786 | } | |
787 | ||
788 | /* Initialize the segments and sections */ | |
789 | ||
790 | offset = base_offset; | |
791 | for (i = 0; i < ncmds; ++i, offset += cmd_hdr->cmdsize) { | |
792 | cmd_hdr = (struct load_command *) (kext->file + offset); | |
793 | seg = NULL; | |
794 | ||
795 | switch(cmd_hdr->cmd) { | |
796 | #if KXLD_USER_OR_ILP32 | |
797 | case LC_SEGMENT: | |
798 | { | |
799 | struct segment_command *seg_hdr = | |
800 | (struct segment_command *) cmd_hdr; | |
801 | ||
802 | /* Ignore segments with no vm size */ | |
803 | if (!seg_hdr->vmsize) continue; | |
804 | ||
805 | seg = kxld_array_get_item(&kext->segs, segi++); | |
806 | ||
807 | rval = kxld_seg_init_from_macho_32(seg, seg_hdr); | |
808 | require_noerr(rval, finish); | |
809 | ||
810 | sect_offset = offset + sizeof(*seg_hdr); | |
811 | } | |
812 | break; | |
813 | #endif /* KXLD_USER_OR_ILP32 */ | |
814 | #if KXLD_USER_OR_LP64 | |
815 | case LC_SEGMENT_64: | |
816 | { | |
817 | struct segment_command_64 *seg_hdr = | |
818 | (struct segment_command_64 *) cmd_hdr; | |
819 | ||
820 | /* Ignore segments with no vm size */ | |
821 | if (!seg_hdr->vmsize) continue; | |
822 | ||
823 | seg = kxld_array_get_item(&kext->segs, segi++); | |
824 | ||
825 | rval = kxld_seg_init_from_macho_64(seg, seg_hdr); | |
826 | require_noerr(rval, finish); | |
827 | ||
828 | sect_offset = offset + sizeof(*seg_hdr); | |
829 | } | |
830 | break; | |
831 | #endif /* KXLD_USER_OR_LP64 */ | |
832 | case LC_SYMTAB: | |
833 | symtab_hdr = (struct symtab_command *) cmd_hdr; | |
834 | break; | |
835 | case LC_UUID: | |
836 | uuid_hdr = (struct uuid_command *) cmd_hdr; | |
837 | kxld_uuid_init_from_macho(&kext->uuid, uuid_hdr); | |
838 | break; | |
839 | case LC_DYSYMTAB: | |
840 | kext->dysymtab_hdr = (struct dysymtab_command *) cmd_hdr; | |
841 | ||
842 | rval = kxld_reloc_create_macho(&kext->extrelocs, &kext->relocator, | |
843 | (struct relocation_info *) (kext->file + kext->dysymtab_hdr->extreloff), | |
844 | kext->dysymtab_hdr->nextrel); | |
845 | require_noerr(rval, finish); | |
846 | ||
847 | rval = kxld_reloc_create_macho(&kext->locrelocs, &kext->relocator, | |
848 | (struct relocation_info *) (kext->file + kext->dysymtab_hdr->locreloff), | |
849 | kext->dysymtab_hdr->nlocrel); | |
850 | require_noerr(rval, finish); | |
851 | ||
852 | break; | |
853 | case LC_UNIXTHREAD: | |
854 | /* Don't need to do anything with UNIXTHREAD for the kernel */ | |
855 | require_action(kext->link_type == KXLD_LINK_KERNEL, finish, | |
856 | rval=KERN_FAILURE; | |
857 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO | |
858 | "LC_UNIXTHREAD segment is not valid in a kext.")); | |
859 | break; | |
860 | default: | |
861 | rval=KERN_FAILURE; | |
862 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO | |
863 | "Invalid segment type in MH_KEXT_BUNDLE kext: %u.", cmd_hdr->cmd); | |
864 | goto finish; | |
865 | } | |
866 | ||
867 | if (seg) { | |
868 | ||
869 | /* Initialize the sections */ | |
870 | for (j = 0; j < seg->sects.nitems; ++j, ++secti) { | |
871 | sect = kxld_array_get_item(&kext->sects, secti); | |
872 | KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), rval, | |
873 | kxld_sect_init_from_macho_32, kxld_sect_init_from_macho_64, | |
874 | sect, kext->file, §_offset, secti, &kext->relocator); | |
875 | require_noerr(rval, finish); | |
876 | ||
877 | /* Add the section to the segment. This will also make sure | |
878 | * that the sections and segments have the same segname. | |
879 | */ | |
880 | rval = kxld_seg_add_section(seg, sect); | |
881 | require_noerr(rval, finish); | |
882 | } | |
883 | rval = kxld_seg_finish_init(seg); | |
884 | require_noerr(rval, finish); | |
885 | } | |
886 | } | |
887 | ||
888 | if (filetype_out) *filetype_out = filetype; | |
889 | if (symtab_hdr_out) *symtab_hdr_out = symtab_hdr; | |
890 | kext->is_final_image = TRUE; | |
891 | rval = KERN_SUCCESS; | |
892 | finish: | |
893 | return rval; | |
894 | } | |
895 | ||
896 | /******************************************************************************* | |
897 | *******************************************************************************/ | |
898 | static kern_return_t | |
899 | init_from_execute(KXLDKext *kext) | |
900 | { | |
901 | kern_return_t rval = KERN_FAILURE; | |
902 | struct symtab_command *symtab_hdr = NULL; | |
903 | kxld_addr_t linkedit_offset = 0; | |
904 | u_int filetype = 0; | |
905 | #if KERNEL | |
906 | KXLDSeg *textseg = NULL; | |
907 | KXLDSeg *linkeditseg = NULL; | |
908 | #endif /*KERNEL */ | |
909 | #if KXLD_USER_OR_OBJECT | |
910 | KXLDSeg *seg = NULL; | |
911 | KXLDSect *sect = NULL; | |
912 | KXLDSectionName *sname = NULL; | |
913 | u_int i = 0, j = 0, k = 0; | |
914 | #endif /* KXLD_USER_OR_OBJECT */ | |
915 | ||
916 | check(kext); | |
917 | ||
918 | require_action(kext->link_type == KXLD_LINK_KERNEL, finish, | |
919 | rval=KERN_FAILURE); | |
920 | ||
921 | rval = init_from_final_linked_image(kext, &filetype, &symtab_hdr); | |
922 | require_noerr(rval, finish); | |
923 | ||
924 | require_action(filetype == MH_EXECUTE, finish, rval=KERN_FAILURE; | |
925 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO | |
926 | "The kernel file is not of type MH_EXECUTE.")); | |
927 | ||
928 | #if KERNEL | |
929 | /* When we're in the kernel, the symbol table can no longer be found by the | |
930 | * symtab_command alone because the command specifies offsets for the file | |
931 | * on disk, not the file mapped into memory. We can find the additional | |
932 | * offset necessary by finding the difference between the linkedit segment's | |
933 | * vm address and the text segment's vm address. | |
934 | */ | |
935 | ||
936 | textseg = kxld_kext_get_seg_by_name(kext, SEG_TEXT); | |
937 | require_action(textseg, finish, rval=KERN_FAILURE; | |
938 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO)); | |
939 | ||
940 | linkeditseg = kxld_kext_get_seg_by_name(kext, SEG_LINKEDIT); | |
941 | require_action(linkeditseg, finish, rval=KERN_FAILURE; | |
942 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO)); | |
943 | ||
944 | linkedit_offset = linkeditseg->base_addr - textseg->base_addr - | |
945 | linkeditseg->fileoff; | |
946 | #endif /* KERNEL */ | |
947 | ||
948 | /* Initialize the symbol table */ | |
949 | ||
950 | KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), rval, | |
951 | kxld_symtab_init_from_macho_32, kxld_symtab_init_from_macho_64, | |
952 | kext->symtab, kext->file, symtab_hdr, linkedit_offset); | |
953 | require_noerr(rval, finish); | |
954 | ||
955 | #if KXLD_USER_OR_OBJECT | |
956 | /* Save off the order of section names so that we can lay out kext | |
957 | * sections for MH_OBJECT-based systems. | |
958 | */ | |
959 | if (target_supports_object(kext)) { | |
960 | ||
961 | rval = kxld_array_init(kext->section_order, sizeof(KXLDSectionName), | |
962 | kext->sects.nitems); | |
963 | require_noerr(rval, finish); | |
964 | ||
965 | /* Copy the section names into the section_order array for future kext | |
966 | * section ordering. | |
967 | */ | |
968 | for (i = 0, k = 0; i < kext->segs.nitems; ++i) { | |
969 | seg = kxld_array_get_item(&kext->segs, i); | |
970 | ||
971 | for (j = 0; j < seg->sects.nitems; ++j, ++k) { | |
972 | sect = *(KXLDSect **) kxld_array_get_item(&seg->sects, j); | |
973 | sname = kxld_array_get_item(kext->section_order, k); | |
974 | ||
975 | strlcpy(sname->segname, sect->segname, sizeof(sname->segname)); | |
976 | strlcpy(sname->sectname, sect->sectname, sizeof(sname->sectname)); | |
977 | } | |
978 | } | |
979 | } | |
980 | #endif /* KXLD_USER_OR_OBJECT */ | |
981 | ||
982 | rval = KERN_SUCCESS; | |
983 | finish: | |
984 | return rval; | |
985 | } | |
986 | ||
987 | #if KXLD_USER_OR_BUNDLE | |
988 | /******************************************************************************* | |
989 | *******************************************************************************/ | |
990 | static boolean_t | |
991 | target_supports_bundle(const KXLDKext *kext) | |
992 | { | |
993 | return (kext->cputype == CPU_TYPE_X86_64); | |
994 | } | |
995 | ||
996 | /******************************************************************************* | |
997 | *******************************************************************************/ | |
998 | static kern_return_t | |
999 | init_from_bundle(KXLDKext *kext) | |
1000 | { | |
1001 | kern_return_t rval = KERN_FAILURE; | |
1002 | KXLDSeg *seg = NULL; | |
1003 | struct symtab_command *symtab_hdr = NULL; | |
1004 | u_int filetype = 0; | |
1005 | u_int idx = 0; | |
1006 | ||
1007 | check(kext); | |
1008 | ||
1009 | require_action(target_supports_bundle(kext), finish, | |
1010 | rval=KERN_FAILURE; | |
1011 | kxld_log(kKxldLogLinking, kKxldLogErr, | |
1012 | kKxldLogFiletypeNotSupported, MH_KEXT_BUNDLE)); | |
1013 | ||
1014 | rval = init_from_final_linked_image(kext, &filetype, &symtab_hdr); | |
1015 | require_noerr(rval, finish); | |
1016 | ||
1017 | require_action(filetype == MH_KEXT_BUNDLE, finish, | |
1018 | rval=KERN_FAILURE); | |
1019 | ||
1020 | KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), rval, | |
1021 | kxld_symtab_init_from_macho_32, kxld_symtab_init_from_macho_64, | |
1022 | kext->symtab, kext->file, symtab_hdr, /* linkedit offset */ 0); | |
1023 | require_noerr(rval, finish); | |
1024 | ||
1025 | if (kext->segs.nitems) { | |
1026 | /* Remove the __LINKEDIT segment, since we never keep the symbol | |
1027 | * table around in memory for kexts. | |
1028 | */ | |
1029 | seg = kxld_kext_get_seg_by_name(kext, SEG_LINKEDIT); | |
1030 | if (seg) { | |
1031 | rval = kxld_array_get_index(&kext->segs, seg, &idx); | |
1032 | require_noerr(rval, finish); | |
1033 | ||
1034 | kxld_seg_deinit(seg); | |
1035 | ||
1036 | rval = kxld_array_remove(&kext->segs, idx); | |
1037 | require_noerr(rval, finish); | |
1038 | } | |
1039 | ||
1040 | kext->link_type = KXLD_LINK_KEXT; | |
1041 | } else { | |
1042 | kext->link_type = KXLD_LINK_PSEUDO_KEXT; | |
1043 | } | |
1044 | ||
1045 | rval = KERN_SUCCESS; | |
1046 | finish: | |
1047 | return rval; | |
1048 | } | |
1049 | #endif /* KXLD_USER_OR_BUNDLE */ | |
1050 | ||
1051 | #if KXLD_USER_OR_ILP32 | |
1052 | /******************************************************************************* | |
1053 | *******************************************************************************/ | |
1054 | static u_long | |
1055 | get_macho_cmd_data_32(u_char *file, u_long offset, u_int *filetype, u_int *ncmds) | |
1056 | { | |
1057 | struct mach_header *mach_hdr = (struct mach_header *) (file + offset); | |
1058 | ||
1059 | if (filetype) *filetype = mach_hdr->filetype; | |
1060 | if (ncmds) *ncmds = mach_hdr->ncmds; | |
1061 | ||
1062 | return sizeof(*mach_hdr); | |
1063 | } | |
1064 | ||
1065 | #endif /* KXLD_USER_OR_ILP32 */ | |
1066 | ||
1067 | #if KXLD_USER_OR_LP64 | |
1068 | /******************************************************************************* | |
1069 | *******************************************************************************/ | |
1070 | static u_long | |
1071 | get_macho_cmd_data_64(u_char *file, u_long offset, u_int *filetype, u_int *ncmds) | |
1072 | { | |
1073 | struct mach_header_64 *mach_hdr = (struct mach_header_64 *) (file + offset); | |
1074 | ||
1075 | if (filetype) *filetype = mach_hdr->filetype; | |
1076 | if (ncmds) *ncmds = mach_hdr->ncmds; | |
1077 | ||
1078 | return sizeof(*mach_hdr); | |
1079 | } | |
1080 | #endif /* KXLD_USER_OR_LP64 */ | |
1081 | ||
1082 | /******************************************************************************* | |
1083 | *******************************************************************************/ | |
1084 | static kern_return_t | |
1085 | create_vtables(KXLDKext *kext) | |
1086 | { | |
1087 | kern_return_t rval = KERN_FAILURE; | |
1088 | KXLDSymtabIterator iter; | |
1089 | KXLDSym *sym = NULL; | |
1090 | KXLDSym *vtable_sym = NULL; | |
1091 | KXLDSym *meta_vtable_sym = NULL; | |
1092 | KXLDSect *vtable_sect = NULL; | |
1093 | KXLDSect *meta_vtable_sect = NULL; | |
1094 | KXLDVTable *vtable = NULL; | |
1095 | KXLDVTable *meta_vtable = NULL; | |
1096 | char class_name[KXLD_MAX_NAME_LEN]; | |
1097 | char vtable_name[KXLD_MAX_NAME_LEN]; | |
1098 | char meta_vtable_name[KXLD_MAX_NAME_LEN]; | |
1099 | u_int i = 0; | |
1100 | u_int nvtables = 0; | |
1101 | ||
1102 | if (kext->link_type == KXLD_LINK_KERNEL) { | |
1103 | /* Create a vtable object for every vtable symbol */ | |
1104 | kxld_symtab_iterator_init(&iter, kext->symtab, kxld_sym_is_vtable, FALSE); | |
1105 | nvtables = kxld_symtab_iterator_get_num_remaining(&iter); | |
1106 | } else { | |
1107 | /* We walk over the super metaclass pointer symbols, because classes | |
1108 | * with them are the only ones that need patching. Then we double the | |
1109 | * number of vtables we're expecting, because every pointer will have a | |
1110 | * class vtable and a MetaClass vtable. | |
1111 | */ | |
1112 | kxld_symtab_iterator_init(&iter, kext->symtab, | |
1113 | kxld_sym_is_super_metaclass_pointer, FALSE); | |
1114 | nvtables = kxld_symtab_iterator_get_num_remaining(&iter) * 2; | |
1115 | } | |
1116 | ||
1117 | /* Allocate the array of vtable objects. | |
1118 | */ | |
1119 | rval = kxld_array_init(&kext->vtables, sizeof(KXLDVTable), nvtables); | |
1120 | require_noerr(rval, finish); | |
1121 | ||
1122 | /* Initialize from each vtable symbol */ | |
1123 | while ((sym = kxld_symtab_iterator_get_next(&iter))) { | |
1124 | ||
1125 | if (kext->link_type == KXLD_LINK_KERNEL) { | |
1126 | vtable_sym = sym; | |
1127 | } else { | |
1128 | /* Get the class name from the smc pointer */ | |
1129 | rval = kxld_sym_get_class_name_from_super_metaclass_pointer( | |
1130 | sym, class_name, sizeof(class_name)); | |
1131 | require_noerr(rval, finish); | |
1132 | ||
1133 | /* Get the vtable name from the class name */ | |
1134 | rval = kxld_sym_get_vtable_name_from_class_name(class_name, | |
1135 | vtable_name, sizeof(vtable_name)); | |
1136 | require_noerr(rval, finish); | |
1137 | ||
1138 | /* Get the vtable symbol */ | |
1139 | vtable_sym = kxld_symtab_get_symbol_by_name(kext->symtab, vtable_name); | |
1140 | require_action(vtable_sym, finish, rval=KERN_FAILURE; | |
1141 | kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMissingVtable, | |
1142 | vtable_name, class_name)); | |
1143 | ||
1144 | /* Get the meta vtable name from the class name */ | |
1145 | rval = kxld_sym_get_meta_vtable_name_from_class_name(class_name, | |
1146 | meta_vtable_name, sizeof(meta_vtable_name)); | |
1147 | require_noerr(rval, finish); | |
1148 | ||
1149 | /* Get the meta vtable symbol */ | |
1150 | meta_vtable_sym = kxld_symtab_get_symbol_by_name(kext->symtab, | |
1151 | meta_vtable_name); | |
1152 | if (!meta_vtable_sym) { | |
1153 | /* If we don't support strict patching and we can't find the vtable, | |
1154 | * log a warning and reduce the expected number of vtables by 1. | |
1155 | */ | |
1156 | if (target_supports_strict_patching(kext)) { | |
1157 | kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMissingVtable, | |
1158 | meta_vtable_name, class_name); | |
1159 | rval = KERN_FAILURE; | |
1160 | goto finish; | |
1161 | } else { | |
1162 | kxld_log(kKxldLogPatching, kKxldLogErr, | |
1163 | "Warning: " kKxldLogMissingVtable, | |
1164 | meta_vtable_name, class_name); | |
1165 | kxld_array_resize(&kext->vtables, --nvtables); | |
1166 | } | |
1167 | } | |
1168 | } | |
1169 | ||
1170 | /* Get the vtable's section */ | |
1171 | vtable_sect = kxld_array_get_item(&kext->sects, vtable_sym->sectnum); | |
1172 | require_action(vtable_sect, finish, rval=KERN_FAILURE); | |
1173 | ||
1174 | vtable = kxld_array_get_item(&kext->vtables, i++); | |
1175 | ||
1176 | if (kext->link_type == KXLD_LINK_KERNEL) { | |
1177 | /* Initialize the kernel vtable */ | |
1178 | rval = kxld_vtable_init_from_kernel_macho(vtable, vtable_sym, | |
1179 | vtable_sect, kext->symtab, &kext->relocator); | |
1180 | require_noerr(rval, finish); | |
1181 | } else { | |
1182 | /* Initialize the class vtable */ | |
1183 | if (kext->is_final_image) { | |
1184 | rval = kxld_vtable_init_from_final_macho(vtable, vtable_sym, | |
1185 | vtable_sect, kext->symtab, &kext->relocator, &kext->extrelocs); | |
1186 | require_noerr(rval, finish); | |
1187 | } else { | |
1188 | rval = kxld_vtable_init_from_object_macho(vtable, vtable_sym, | |
1189 | vtable_sect, kext->symtab, &kext->relocator); | |
1190 | require_noerr(rval, finish); | |
1191 | } | |
1192 | ||
1193 | /* meta_vtable_sym will be null when we don't support strict patching | |
1194 | * and can't find the metaclass vtable. | |
1195 | */ | |
1196 | if (meta_vtable_sym) { | |
1197 | /* Get the vtable's section */ | |
1198 | meta_vtable_sect = kxld_array_get_item(&kext->sects, | |
1199 | meta_vtable_sym->sectnum); | |
1200 | require_action(vtable_sect, finish, rval=KERN_FAILURE); | |
1201 | ||
1202 | meta_vtable = kxld_array_get_item(&kext->vtables, i++); | |
1203 | ||
1204 | /* Initialize the metaclass vtable */ | |
1205 | if (kext->is_final_image) { | |
1206 | rval = kxld_vtable_init_from_final_macho(meta_vtable, meta_vtable_sym, | |
1207 | meta_vtable_sect, kext->symtab, &kext->relocator, &kext->extrelocs); | |
1208 | require_noerr(rval, finish); | |
1209 | } else { | |
1210 | rval = kxld_vtable_init_from_object_macho(meta_vtable, meta_vtable_sym, | |
1211 | meta_vtable_sect, kext->symtab, &kext->relocator); | |
1212 | require_noerr(rval, finish); | |
1213 | } | |
1214 | } | |
1215 | } | |
1216 | } | |
1217 | require_action(i == kext->vtables.nitems, finish, | |
1218 | rval=KERN_FAILURE); | |
1219 | ||
1220 | /* Map vtable names to the vtable structures */ | |
1221 | rval = kxld_dict_init(&kext->vtable_index, kxld_dict_string_hash, | |
1222 | kxld_dict_string_cmp, kext->vtables.nitems); | |
1223 | require_noerr(rval, finish); | |
1224 | ||
1225 | for (i = 0; i < kext->vtables.nitems; ++i) { | |
1226 | vtable = kxld_array_get_item(&kext->vtables, i); | |
1227 | rval = kxld_dict_insert(&kext->vtable_index, vtable->name, vtable); | |
1228 | require_noerr(rval, finish); | |
1229 | } | |
1230 | ||
1231 | rval = KERN_SUCCESS; | |
1232 | ||
1233 | finish: | |
1234 | return rval; | |
1235 | } | |
1236 | ||
1237 | /******************************************************************************* | |
1238 | * Temporary workaround for PR-6668105 | |
1239 | * new, new[], delete, and delete[] may be overridden globally in a kext. | |
1240 | * We should do this with some sort of weak symbols, but we'll use a whitelist | |
1241 | * for now to minimize risk. | |
1242 | *******************************************************************************/ | |
1243 | static void | |
1244 | restrict_private_symbols(KXLDKext *kext) | |
1245 | { | |
1246 | const char *private_symbols[] = { | |
1247 | KXLD_KMOD_INFO_SYMBOL, | |
1248 | KXLD_OPERATOR_NEW_SYMBOL, | |
1249 | KXLD_OPERATOR_NEW_ARRAY_SYMBOL, | |
1250 | KXLD_OPERATOR_DELETE_SYMBOL, | |
1251 | KXLD_OPERATOR_DELETE_ARRAY_SYMBOL | |
1252 | }; | |
1253 | KXLDSymtabIterator iter; | |
1254 | KXLDSym *sym = NULL; | |
1255 | const char *name = NULL; | |
1256 | u_int i = 0; | |
1257 | ||
1258 | kxld_symtab_iterator_init(&iter, kext->symtab, kxld_sym_is_exported, FALSE); | |
1259 | while ((sym = kxld_symtab_iterator_get_next(&iter))) { | |
1260 | for (i = 0; i < const_array_len(private_symbols); ++i) { | |
1261 | name = private_symbols[i]; | |
1262 | if (!streq(sym->name, name)) { | |
1263 | continue; | |
1264 | } | |
1265 | ||
1266 | kxld_sym_mark_private(sym); | |
1267 | } | |
1268 | } | |
1269 | } | |
1270 | ||
1271 | /******************************************************************************* | |
1272 | *******************************************************************************/ | |
1273 | void | |
1274 | kxld_kext_clear(KXLDKext *kext) | |
1275 | { | |
1276 | KXLDSeg *seg = NULL; | |
1277 | KXLDSect *sect = NULL; | |
1278 | KXLDVTable *vtable = NULL; | |
1279 | u_int i; | |
1280 | ||
1281 | check(kext); | |
1282 | ||
1283 | #if !KERNEL | |
1284 | if (kext->link_type == KXLD_LINK_KERNEL) { | |
1285 | unswap_macho(kext->file, kext->host_order, kext->target_order); | |
1286 | } | |
1287 | #endif /* !KERNEL */ | |
1288 | ||
1289 | for (i = 0; i < kext->segs.nitems; ++i) { | |
1290 | seg = kxld_array_get_item(&kext->segs, i); | |
1291 | kxld_seg_clear(seg); | |
1292 | } | |
1293 | kxld_array_reset(&kext->segs); | |
1294 | ||
1295 | for (i = 0; i < kext->sects.nitems; ++i) { | |
1296 | sect = kxld_array_get_item(&kext->sects, i); | |
1297 | kxld_sect_clear(sect); | |
1298 | } | |
1299 | kxld_array_reset(&kext->sects); | |
1300 | ||
1301 | for (i = 0; i < kext->vtables.nitems; ++i) { | |
1302 | vtable = kxld_array_get_item(&kext->vtables, i); | |
1303 | kxld_vtable_clear(vtable); | |
1304 | } | |
1305 | kxld_array_reset(&kext->vtables); | |
1306 | ||
1307 | kxld_array_reset(&kext->extrelocs); | |
1308 | kxld_array_reset(&kext->locrelocs); | |
1309 | kxld_dict_clear(&kext->vtable_index); | |
1310 | kxld_relocator_clear(&kext->relocator); | |
1311 | kxld_uuid_clear(&kext->uuid); | |
1312 | ||
1313 | if (kext->symtab) kxld_symtab_clear(kext->symtab); | |
1314 | ||
1315 | kext->link_addr = 0; | |
1316 | kext->kmod_link_addr = 0; | |
1317 | kext->cputype = 0; | |
1318 | kext->cpusubtype = 0; | |
1319 | kext->link_type = KXLD_LINK_UNKNOWN; | |
1320 | kext->is_final_image = FALSE; | |
1321 | kext->got_is_created = FALSE; | |
1322 | } | |
1323 | ||
1324 | ||
1325 | ||
1326 | /******************************************************************************* | |
1327 | *******************************************************************************/ | |
1328 | void | |
1329 | kxld_kext_deinit(KXLDKext *kext) | |
1330 | { | |
1331 | KXLDSeg *seg = NULL; | |
1332 | KXLDSect *sect = NULL; | |
1333 | KXLDVTable *vtable = NULL; | |
1334 | u_int i; | |
1335 | ||
1336 | check(kext); | |
1337 | ||
1338 | #if !KERNEL | |
1339 | if (kext->link_type == KXLD_LINK_KERNEL) { | |
1340 | unswap_macho(kext->file, kext->host_order, kext->target_order); | |
1341 | } | |
1342 | #endif /* !KERNEL */ | |
1343 | ||
1344 | for (i = 0; i < kext->segs.maxitems; ++i) { | |
1345 | seg = kxld_array_get_slot(&kext->segs, i); | |
1346 | kxld_seg_deinit(seg); | |
1347 | } | |
1348 | kxld_array_deinit(&kext->segs); | |
1349 | ||
1350 | for (i = 0; i < kext->sects.maxitems; ++i) { | |
1351 | sect = kxld_array_get_slot(&kext->sects, i); | |
1352 | kxld_sect_deinit(sect); | |
1353 | } | |
1354 | kxld_array_deinit(&kext->sects); | |
1355 | ||
1356 | for (i = 0; i < kext->vtables.maxitems; ++i) { | |
1357 | vtable = kxld_array_get_slot(&kext->vtables, i); | |
1358 | kxld_vtable_deinit(vtable); | |
1359 | } | |
1360 | kxld_array_deinit(&kext->vtables); | |
1361 | ||
1362 | kxld_array_deinit(&kext->extrelocs); | |
1363 | kxld_array_deinit(&kext->locrelocs); | |
1364 | kxld_dict_deinit(&kext->vtable_index); | |
1365 | ||
1366 | if (kext->symtab) { | |
1367 | kxld_symtab_deinit(kext->symtab); | |
1368 | kxld_free(kext->symtab, kxld_symtab_sizeof()); | |
1369 | } | |
1370 | ||
1371 | bzero(kext, sizeof(*kext)); | |
1372 | } | |
1373 | ||
1374 | /******************************************************************************* | |
1375 | *******************************************************************************/ | |
1376 | boolean_t | |
1377 | kxld_kext_is_true_kext(const KXLDKext *kext) | |
1378 | { | |
1379 | return (kext->link_type == KXLD_LINK_KEXT); | |
1380 | } | |
1381 | ||
1382 | /******************************************************************************* | |
1383 | *******************************************************************************/ | |
1384 | void | |
1385 | kxld_kext_get_vmsize(const KXLDKext *kext, u_long *header_size, u_long *vmsize) | |
1386 | { | |
1387 | check(kext); | |
1388 | check(header_size); | |
1389 | check(vmsize); | |
1390 | *header_size = 0; | |
1391 | *vmsize = 0; | |
1392 | ||
1393 | /* vmsize is the padded header page(s) + segment vmsizes */ | |
1394 | ||
1395 | *header_size = (kext->is_final_image) ? | |
1396 | 0 : round_page(get_macho_header_size(kext)); | |
1397 | *vmsize = *header_size + get_macho_data_size(kext); | |
1398 | ||
1399 | } | |
1400 | ||
1401 | /******************************************************************************* | |
1402 | *******************************************************************************/ | |
1403 | const struct kxld_symtab * | |
1404 | kxld_kext_get_symtab(const KXLDKext *kext) | |
1405 | { | |
1406 | check(kext); | |
1407 | ||
1408 | return kext->symtab; | |
1409 | } | |
1410 | ||
1411 | /******************************************************************************* | |
1412 | *******************************************************************************/ | |
1413 | u_int | |
1414 | kxld_kext_get_num_symbols(const KXLDKext *kext) | |
1415 | { | |
1416 | check(kext); | |
1417 | ||
1418 | return kxld_symtab_get_num_symbols(kext->symtab); | |
1419 | } | |
1420 | ||
1421 | /******************************************************************************* | |
1422 | *******************************************************************************/ | |
1423 | void | |
1424 | kxld_kext_get_vtables(KXLDKext *kext, const KXLDArray **vtables) | |
1425 | { | |
1426 | check(kext); | |
1427 | check(vtables); | |
1428 | ||
1429 | *vtables = &kext->vtables; | |
1430 | } | |
1431 | ||
1432 | /******************************************************************************* | |
1433 | *******************************************************************************/ | |
1434 | u_int | |
1435 | kxld_kext_get_num_vtables(const KXLDKext *kext) | |
1436 | { | |
1437 | check(kext); | |
1438 | ||
1439 | return kext->vtables.nitems; | |
1440 | } | |
1441 | ||
1442 | /******************************************************************************* | |
1443 | *******************************************************************************/ | |
1444 | KXLDSeg * | |
1445 | kxld_kext_get_seg_by_name(const KXLDKext *kext, const char *segname) | |
1446 | { | |
1447 | KXLDSeg *seg = NULL; | |
1448 | u_int i = 0; | |
1449 | ||
1450 | for (i = 0; i < kext->segs.nitems; ++i) { | |
1451 | seg = kxld_array_get_item(&kext->segs, i); | |
1452 | ||
1453 | if (streq(segname, seg->segname)) break; | |
1454 | ||
1455 | seg = NULL; | |
1456 | } | |
1457 | ||
1458 | return seg; | |
1459 | } | |
1460 | ||
1461 | /******************************************************************************* | |
1462 | *******************************************************************************/ | |
1463 | KXLDSect * | |
1464 | kxld_kext_get_sect_by_name(const KXLDKext *kext, const char *segname, | |
1465 | const char *sectname) | |
1466 | { | |
1467 | KXLDSect *sect = NULL; | |
1468 | u_int i = 0; | |
1469 | ||
1470 | for (i = 0; i < kext->sects.nitems; ++i) { | |
1471 | sect = kxld_array_get_item(&kext->sects, i); | |
1472 | ||
1473 | if (streq(segname, sect->segname) && streq(sectname, sect->sectname)) { | |
1474 | break; | |
1475 | } | |
1476 | ||
1477 | sect = NULL; | |
1478 | } | |
1479 | ||
1480 | return sect; | |
1481 | } | |
1482 | ||
1483 | /******************************************************************************* | |
1484 | *******************************************************************************/ | |
1485 | int | |
1486 | kxld_kext_get_sectnum_for_sect(const KXLDKext *kext, const KXLDSect *sect) | |
1487 | { | |
1488 | kern_return_t rval = KERN_FAILURE; | |
1489 | u_int idx = -1; | |
1490 | ||
1491 | rval = kxld_array_get_index(&kext->sects, sect, &idx); | |
1492 | if (rval) idx = -1; | |
1493 | ||
1494 | return idx; | |
1495 | } | |
1496 | ||
1497 | /******************************************************************************* | |
1498 | *******************************************************************************/ | |
1499 | const KXLDArray * | |
1500 | kxld_kext_get_section_order(const KXLDKext *kext __unused) | |
1501 | { | |
1502 | #if KXLD_USER_OR_OBJECT | |
1503 | if (kext->link_type == KXLD_LINK_KERNEL && target_supports_object(kext)) { | |
1504 | return kext->section_order; | |
1505 | } | |
1506 | #endif /* KXLD_USER_OR_OBJECT */ | |
1507 | ||
1508 | return NULL; | |
1509 | } | |
1510 | ||
1511 | /******************************************************************************* | |
1512 | *******************************************************************************/ | |
1513 | static u_long | |
1514 | get_macho_header_size(const KXLDKext *kext) | |
1515 | { | |
1516 | KXLDSeg *seg = NULL; | |
1517 | u_long header_size = 0; | |
1518 | u_int i = 0; | |
1519 | ||
1520 | check(kext); | |
1521 | ||
1522 | /* Mach, segment, and UUID headers */ | |
1523 | ||
1524 | if (kxld_kext_is_32_bit(kext)) { | |
1525 | header_size += sizeof(struct mach_header); | |
1526 | } else { | |
1527 | header_size += sizeof(struct mach_header_64); | |
1528 | } | |
1529 | ||
1530 | for (i = 0; i < kext->segs.nitems; ++i) { | |
1531 | seg = kxld_array_get_item(&kext->segs, i); | |
1532 | header_size += kxld_seg_get_macho_header_size(seg, kxld_kext_is_32_bit(kext)); | |
1533 | } | |
1534 | ||
1535 | if (kext->uuid.has_uuid) { | |
1536 | header_size += kxld_uuid_get_macho_header_size(); | |
1537 | } | |
1538 | ||
1539 | return header_size; | |
1540 | } | |
1541 | ||
1542 | /******************************************************************************* | |
1543 | *******************************************************************************/ | |
1544 | static u_long | |
1545 | get_macho_data_size(const KXLDKext *kext) | |
1546 | { | |
1547 | KXLDSeg *seg = NULL; | |
1548 | u_long data_size = 0; | |
1549 | u_int i = 0; | |
1550 | ||
1551 | check(kext); | |
1552 | ||
1553 | for (i = 0; i < kext->segs.nitems; ++i) { | |
1554 | seg = kxld_array_get_item(&kext->segs, i); | |
1555 | data_size += (u_long) kxld_seg_get_vmsize(seg); | |
1556 | } | |
1557 | ||
1558 | return data_size; | |
1559 | } | |
1560 | ||
1561 | /******************************************************************************* | |
1562 | *******************************************************************************/ | |
1563 | kern_return_t kxld_kext_export_linked_object(const KXLDKext *kext, | |
1564 | u_char *linked_object, kxld_addr_t *kmod_info_kern) | |
1565 | { | |
1566 | kern_return_t rval = KERN_FAILURE; | |
1567 | KXLDSeg *seg = NULL; | |
1568 | u_long size = 0; | |
1569 | u_long header_size = 0; | |
1570 | u_long header_offset = 0; | |
1571 | u_long data_offset = 0; | |
1572 | u_int ncmds = 0; | |
1573 | u_int i = 0; | |
1574 | ||
1575 | check(kext); | |
1576 | check(linked_object); | |
1577 | check(kmod_info_kern); | |
1578 | *kmod_info_kern = 0; | |
1579 | ||
1580 | /* Calculate the size of the headers and data */ | |
1581 | ||
1582 | header_size = get_macho_header_size(kext); | |
1583 | data_offset = (kext->is_final_image) ? header_size : round_page(header_size); | |
1584 | size = data_offset + get_macho_data_size(kext); | |
1585 | ||
1586 | /* Copy data to the file */ | |
1587 | ||
1588 | ncmds = kext->segs.nitems + (kext->uuid.has_uuid == TRUE); | |
1589 | ||
1590 | rval = export_macho_header(kext, linked_object, ncmds, | |
1591 | &header_offset, header_size); | |
1592 | require_noerr(rval, finish); | |
1593 | ||
1594 | for (i = 0; i < kext->segs.nitems; ++i) { | |
1595 | seg = kxld_array_get_item(&kext->segs, i); | |
1596 | ||
1597 | rval = kxld_seg_export_macho_to_vm(seg, linked_object, &header_offset, | |
1598 | header_size, size, kext->link_addr, kxld_kext_is_32_bit(kext)); | |
1599 | require_noerr(rval, finish); | |
1600 | } | |
1601 | ||
1602 | if (kext->uuid.has_uuid) { | |
1603 | rval = kxld_uuid_export_macho(&kext->uuid, linked_object, | |
1604 | &header_offset, header_size); | |
1605 | require_noerr(rval, finish); | |
1606 | } | |
1607 | ||
1608 | *kmod_info_kern = kext->kmod_link_addr; | |
1609 | ||
1610 | #if !KERNEL | |
1611 | unswap_macho(linked_object, kext->host_order, kext->target_order); | |
1612 | #endif /* KERNEL */ | |
1613 | ||
1614 | rval = KERN_SUCCESS; | |
1615 | ||
1616 | finish: | |
1617 | return rval; | |
1618 | } | |
1619 | ||
1620 | #if !KERNEL | |
1621 | /******************************************************************************* | |
1622 | *******************************************************************************/ | |
1623 | kern_return_t | |
1624 | kxld_kext_export_symbol_file(const KXLDKext *kext, | |
1625 | u_char **_symbol_file, u_long *_filesize) | |
1626 | { | |
1627 | kern_return_t rval = KERN_FAILURE; | |
1628 | KXLDSeg *seg = NULL; | |
1629 | u_char *file = NULL; | |
1630 | u_long size = 0; | |
1631 | u_long header_size = 0; | |
1632 | u_long header_offset = 0; | |
1633 | u_long data_offset = 0; | |
1634 | u_int ncmds = 0; | |
1635 | u_int i = 0; | |
1636 | ||
1637 | check(kext); | |
1638 | check(_symbol_file); | |
1639 | *_symbol_file = NULL; | |
1640 | ||
1641 | /* Calculate the size of the file */ | |
1642 | ||
1643 | if (kxld_kext_is_32_bit(kext)) { | |
1644 | header_size += sizeof(struct mach_header); | |
1645 | } else { | |
1646 | header_size += sizeof(struct mach_header_64); | |
1647 | } | |
1648 | ||
1649 | for (i = 0; i < kext->segs.nitems; ++i) { | |
1650 | seg = kxld_array_get_item(&kext->segs, i); | |
1651 | header_size += kxld_seg_get_macho_header_size(seg, kxld_kext_is_32_bit(kext)); | |
1652 | size += kxld_seg_get_macho_data_size(seg); | |
1653 | } | |
1654 | ||
1655 | header_size += kxld_symtab_get_macho_header_size(); | |
1656 | size += kxld_symtab_get_macho_data_size(kext->symtab, FALSE, | |
1657 | kxld_kext_is_32_bit(kext)); | |
1658 | ||
1659 | if (kext->uuid.has_uuid) { | |
1660 | header_size += kxld_uuid_get_macho_header_size(); | |
1661 | } | |
1662 | ||
1663 | data_offset = round_page(header_size); | |
1664 | size += data_offset; | |
1665 | ||
1666 | /* Allocate the symbol file */ | |
1667 | ||
1668 | file = kxld_page_alloc_untracked(size); | |
1669 | require_action(file, finish, rval=KERN_RESOURCE_SHORTAGE); | |
1670 | bzero(file, size); | |
1671 | ||
1672 | /* Copy data to the file */ | |
1673 | ||
1674 | ncmds = kext->segs.nitems + (kext->uuid.has_uuid == TRUE) + 1; /* +1 for symtab */ | |
1675 | rval = export_macho_header(kext, file, ncmds, &header_offset, header_size); | |
1676 | require_noerr(rval, finish); | |
1677 | ||
1678 | for (i = 0; i < kext->segs.nitems; ++i) { | |
1679 | seg = kxld_array_get_item(&kext->segs, i); | |
1680 | rval = kxld_seg_export_macho_to_file_buffer(seg, file, &header_offset, | |
1681 | header_size, &data_offset, size, kxld_kext_is_32_bit(kext)); | |
1682 | require_noerr(rval, finish); | |
1683 | } | |
1684 | ||
1685 | rval = kxld_symtab_export_macho(kext->symtab, file, &header_offset, | |
1686 | header_size, &data_offset, size, FALSE, kxld_kext_is_32_bit(kext)); | |
1687 | require_noerr(rval, finish); | |
1688 | ||
1689 | if (kext->uuid.has_uuid) { | |
1690 | rval = kxld_uuid_export_macho(&kext->uuid, file, &header_offset, | |
1691 | header_size); | |
1692 | require_noerr(rval, finish); | |
1693 | } | |
1694 | ||
1695 | header_offset = header_size; | |
1696 | ||
1697 | /* Commit */ | |
1698 | ||
1699 | unswap_macho(file, kext->host_order, kext->target_order); | |
1700 | ||
1701 | *_filesize = size; | |
1702 | *_symbol_file = file; | |
1703 | file = NULL; | |
1704 | rval = KERN_SUCCESS; | |
1705 | ||
1706 | finish: | |
1707 | ||
1708 | if (file) { | |
1709 | kxld_page_free_untracked(file, size); | |
1710 | file = NULL; | |
1711 | } | |
1712 | ||
1713 | check(!file); | |
1714 | check((!rval) ^ (!*_symbol_file)); | |
1715 | ||
1716 | return rval; | |
1717 | } | |
1718 | #endif | |
1719 | ||
1720 | /******************************************************************************* | |
1721 | *******************************************************************************/ | |
1722 | boolean_t | |
1723 | kxld_kext_target_needs_swap(const KXLDKext *kext __unused) | |
1724 | { | |
1725 | #if KERNEL | |
1726 | return FALSE; | |
1727 | #else | |
1728 | return (kext->target_order != kext->host_order); | |
1729 | #endif /* KERNEL */ | |
1730 | } | |
1731 | ||
1732 | /******************************************************************************* | |
1733 | *******************************************************************************/ | |
1734 | static kern_return_t | |
1735 | export_macho_header(const KXLDKext *kext, u_char *buf, u_int ncmds, | |
1736 | u_long *header_offset, u_long header_size) | |
1737 | { | |
1738 | kern_return_t rval = KERN_FAILURE; | |
1739 | ||
1740 | check(kext); | |
1741 | check(buf); | |
1742 | check(header_offset); | |
1743 | ||
1744 | KXLD_3264_FUNC(kxld_kext_is_32_bit(kext), rval, | |
1745 | export_macho_header_32, export_macho_header_64, | |
1746 | kext, buf, ncmds, header_offset, header_size); | |
1747 | require_noerr(rval, finish); | |
1748 | ||
1749 | rval = KERN_SUCCESS; | |
1750 | ||
1751 | finish: | |
1752 | return rval; | |
1753 | } | |
1754 | ||
1755 | #if KXLD_USER_OR_ILP32 | |
1756 | /******************************************************************************* | |
1757 | *******************************************************************************/ | |
1758 | static kern_return_t | |
1759 | export_macho_header_32(const KXLDKext *kext, u_char *buf, u_int ncmds, | |
1760 | u_long *header_offset, u_long header_size) | |
1761 | { | |
1762 | kern_return_t rval = KERN_FAILURE; | |
1763 | struct mach_header *mach = NULL; | |
1764 | ||
1765 | check(kext); | |
1766 | check(buf); | |
1767 | check(header_offset); | |
1768 | ||
1769 | require_action(sizeof(*mach) <= header_size - *header_offset, finish, | |
1770 | rval=KERN_FAILURE); | |
1771 | mach = (struct mach_header *) (buf + *header_offset); | |
1772 | ||
1773 | mach->magic = MH_MAGIC; | |
1774 | mach->cputype = kext->cputype; | |
1775 | mach->filetype = kext->filetype; | |
1776 | mach->ncmds = ncmds; | |
1777 | mach->sizeofcmds = (uint32_t) (header_size - sizeof(*mach)); | |
1778 | mach->flags = MH_NOUNDEFS; | |
1779 | ||
1780 | *header_offset += sizeof(*mach); | |
1781 | ||
1782 | rval = KERN_SUCCESS; | |
1783 | ||
1784 | finish: | |
1785 | return rval; | |
1786 | } | |
1787 | #endif /* KXLD_USER_OR_ILP32 */ | |
1788 | ||
1789 | #if KXLD_USER_OR_LP64 | |
1790 | /******************************************************************************* | |
1791 | *******************************************************************************/ | |
1792 | static kern_return_t | |
1793 | export_macho_header_64(const KXLDKext *kext, u_char *buf, u_int ncmds, | |
1794 | u_long *header_offset, u_long header_size) | |
1795 | { | |
1796 | kern_return_t rval = KERN_FAILURE; | |
1797 | struct mach_header_64 *mach = NULL; | |
1798 | ||
1799 | check(kext); | |
1800 | check(buf); | |
1801 | check(header_offset); | |
1802 | ||
1803 | require_action(sizeof(*mach) <= header_size - *header_offset, finish, | |
1804 | rval=KERN_FAILURE); | |
1805 | mach = (struct mach_header_64 *) (buf + *header_offset); | |
1806 | ||
1807 | mach->magic = MH_MAGIC_64; | |
1808 | mach->cputype = kext->cputype; | |
1809 | mach->cpusubtype = kext->cpusubtype; | |
1810 | mach->filetype = kext->filetype; | |
1811 | mach->ncmds = ncmds; | |
1812 | mach->sizeofcmds = (uint32_t) (header_size - sizeof(*mach)); | |
1813 | mach->flags = MH_NOUNDEFS; | |
1814 | ||
1815 | *header_offset += sizeof(*mach); | |
1816 | ||
1817 | rval = KERN_SUCCESS; | |
1818 | ||
1819 | finish: | |
1820 | return rval; | |
1821 | } | |
1822 | #endif /* KXLD_USER_OR_LP64 */ | |
1823 | ||
1824 | /******************************************************************************* | |
1825 | *******************************************************************************/ | |
1826 | kern_return_t | |
1827 | kxld_kext_resolve(KXLDKext *kext, struct kxld_dict *patched_vtables, | |
1828 | struct kxld_dict *defined_symbols) | |
1829 | { | |
1830 | kern_return_t rval = KERN_FAILURE; | |
1831 | ||
1832 | require_action(kext->link_type == KXLD_LINK_PSEUDO_KEXT, finish, | |
1833 | rval=KERN_FAILURE); | |
1834 | ||
1835 | /* Resolve symbols */ | |
1836 | rval = resolve_symbols(kext, defined_symbols, NULL); | |
1837 | require_noerr(rval, finish); | |
1838 | ||
1839 | /* Validate symbols */ | |
1840 | rval = validate_symbols(kext); | |
1841 | require_noerr(rval, finish); | |
1842 | ||
1843 | /* Pseudokexts re-export their dependencies' vtables */ | |
1844 | rval = copy_vtables(kext, patched_vtables); | |
1845 | require_noerr(rval, finish); | |
1846 | ||
1847 | rval = KERN_SUCCESS; | |
1848 | ||
1849 | finish: | |
1850 | return rval; | |
1851 | } | |
1852 | ||
1853 | /******************************************************************************* | |
1854 | *******************************************************************************/ | |
1855 | kern_return_t | |
1856 | kxld_kext_relocate(KXLDKext *kext, kxld_addr_t link_address, | |
1857 | KXLDDict *patched_vtables, KXLDDict *defined_symbols, | |
1858 | KXLDDict *obsolete_symbols) | |
1859 | { | |
1860 | kern_return_t rval = KERN_FAILURE; | |
1861 | KXLDSeg *seg = NULL; | |
1862 | u_int i = 0; | |
1863 | ||
1864 | check(kext); | |
1865 | check(patched_vtables); | |
1866 | check(defined_symbols); | |
1867 | ||
1868 | require_action(kext->link_type == KXLD_LINK_KEXT, finish, rval=KERN_FAILURE); | |
1869 | ||
1870 | kext->link_addr = link_address; | |
1871 | ||
1872 | /* Relocate segments (which relocates the sections) */ | |
1873 | for (i = 0; i < kext->segs.nitems; ++i) { | |
1874 | seg = kxld_array_get_item(&kext->segs, i); | |
1875 | kxld_seg_relocate(seg, link_address); | |
1876 | } | |
1877 | ||
1878 | /* Relocate symbols */ | |
1879 | rval = kxld_symtab_relocate(kext->symtab, &kext->sects); | |
1880 | require_noerr(rval, finish); | |
1881 | ||
1882 | /* Populate kmod info structure */ | |
1883 | rval = populate_kmod_info(kext); | |
1884 | require_noerr(rval, finish); | |
1885 | ||
1886 | /* Resolve symbols */ | |
1887 | rval = resolve_symbols(kext, defined_symbols, obsolete_symbols); | |
1888 | require_noerr(rval, finish); | |
1889 | ||
1890 | /* Patch vtables */ | |
1891 | rval = patch_vtables(kext, patched_vtables, defined_symbols); | |
1892 | require_noerr(rval, finish); | |
1893 | ||
1894 | /* Validate symbols */ | |
1895 | rval = validate_symbols(kext); | |
1896 | require_noerr(rval, finish); | |
1897 | ||
1898 | /* Process relocation entries and populate the global offset table. | |
1899 | * | |
1900 | * For final linked images: the relocation entries are contained in a couple | |
1901 | * of tables hanging off the end of the symbol table. The GOT has its own | |
1902 | * section created by the linker; we simply need to fill it. | |
1903 | * | |
1904 | * For object files: the relocation entries are bound to each section. | |
1905 | * The GOT, if it exists for the target architecture, is created by kxld, | |
1906 | * and we must populate it according to our internal structures. | |
1907 | */ | |
1908 | if (kext->is_final_image) { | |
1909 | #if KXLD_USER_OR_BUNDLE | |
1910 | rval = process_symbol_pointers(kext); | |
1911 | require_noerr(rval, finish); | |
1912 | ||
1913 | rval = process_relocs_from_tables(kext); | |
1914 | require_noerr(rval, finish); | |
1915 | #else | |
1916 | require_action(FALSE, finish, rval=KERN_FAILURE); | |
1917 | #endif /* KXLD_USER_OR_BUNDLE */ | |
1918 | } else { | |
1919 | #if KXLD_USER_OR_GOT | |
1920 | /* Populate GOT */ | |
1921 | rval = populate_got(kext); | |
1922 | require_noerr(rval, finish); | |
1923 | #endif /* KXLD_USER_OR_GOT */ | |
1924 | #if KXLD_USER_OR_OBJECT | |
1925 | rval = process_relocs_from_sections(kext); | |
1926 | require_noerr(rval, finish); | |
1927 | #else | |
1928 | require_action(FALSE, finish, rval=KERN_FAILURE); | |
1929 | #endif /* KXLD_USER_OR_OBJECT */ | |
1930 | } | |
1931 | ||
1932 | rval = KERN_SUCCESS; | |
1933 | ||
1934 | finish: | |
1935 | return rval; | |
1936 | } | |
1937 | ||
1938 | /******************************************************************************* | |
1939 | *******************************************************************************/ | |
1940 | static kern_return_t | |
1941 | resolve_symbols(KXLDKext *kext, KXLDDict *defined_symbols, | |
1942 | KXLDDict *obsolete_symbols) | |
1943 | { | |
1944 | kern_return_t rval = KERN_FAILURE; | |
1945 | KXLDSymtabIterator iter; | |
1946 | KXLDSym *sym = NULL; | |
1947 | void *addrp = NULL; | |
1948 | kxld_addr_t addr = 0; | |
1949 | const char *name = NULL; | |
1950 | boolean_t tests_for_weak = FALSE; | |
1951 | boolean_t error = FALSE; | |
1952 | boolean_t warning = FALSE; | |
1953 | ||
1954 | check(kext); | |
1955 | check(defined_symbols); | |
1956 | ||
1957 | /* Check if the kext tests for weak symbols */ | |
1958 | sym = kxld_symtab_get_symbol_by_name(kext->symtab, KXLD_WEAK_TEST_SYMBOL); | |
1959 | tests_for_weak = (sym != NULL); | |
1960 | ||
1961 | /* Check for duplicate symbols */ | |
1962 | kxld_symtab_iterator_init(&iter, kext->symtab, kxld_sym_is_exported, FALSE); | |
1963 | while ((sym = kxld_symtab_iterator_get_next(&iter))) { | |
1964 | addrp = kxld_dict_find(defined_symbols, sym->name); | |
1965 | if (addrp) { | |
1966 | /* Convert to a kxld_addr_t */ | |
1967 | if (kxld_kext_is_32_bit(kext)) { | |
1968 | addr = (kxld_addr_t) (*(uint32_t*)addrp); | |
1969 | } else { | |
1970 | addr = (kxld_addr_t) (*(uint64_t*)addrp); | |
1971 | } | |
1972 | ||
1973 | /* Not a problem if the symbols have the same address */ | |
1974 | if (addr == sym->link_addr) { | |
1975 | continue; | |
1976 | } | |
1977 | ||
1978 | if (!error) { | |
1979 | error = TRUE; | |
1980 | kxld_log(kKxldLogLinking, kKxldLogErr, | |
1981 | "The following symbols were defined more than once:"); | |
1982 | } | |
1983 | ||
1984 | kxld_log(kKxldLogLinking, kKxldLogErr, | |
1985 | "\t%s: %p - %p", sym->name, | |
1986 | (void *) (uintptr_t) sym->link_addr, | |
1987 | (void *) (uintptr_t) addr); | |
1988 | } | |
1989 | } | |
1990 | require_noerr_action(error, finish, rval=KERN_FAILURE); | |
1991 | ||
1992 | /* Resolve undefined and indirect symbols */ | |
1993 | ||
1994 | /* Iterate over all unresolved symbols */ | |
1995 | kxld_symtab_iterator_init(&iter, kext->symtab, | |
1996 | kxld_sym_is_unresolved, FALSE); | |
1997 | while ((sym = kxld_symtab_iterator_get_next(&iter))) { | |
1998 | ||
1999 | /* Common symbols are not supported */ | |
2000 | if (kxld_sym_is_common(sym)) { | |
2001 | ||
2002 | if (!error) { | |
2003 | error = TRUE; | |
2004 | if (target_supports_common(kext)) { | |
2005 | kxld_log(kKxldLogLinking, kKxldLogErr, | |
2006 | "The following common symbols were not resolved:"); | |
2007 | } else { | |
2008 | kxld_log(kKxldLogLinking, kKxldLogErr, | |
2009 | "Common symbols are not supported in kernel extensions. " | |
2010 | "Use -fno-common to build your kext. " | |
2011 | "The following are common symbols:"); | |
2012 | } | |
2013 | } | |
2014 | kxld_log(kKxldLogLinking, kKxldLogErr, "\t%s", sym->name); | |
2015 | ||
2016 | } else { | |
2017 | ||
2018 | /* Find the address of the defined symbol */ | |
2019 | if (kxld_sym_is_undefined(sym)) { | |
2020 | name = sym->name; | |
2021 | } else { | |
2022 | name = sym->alias; | |
2023 | } | |
2024 | addrp = kxld_dict_find(defined_symbols, name); | |
2025 | ||
2026 | /* Resolve the symbol. If a definition cannot be found, then: | |
2027 | * 1) Psuedokexts log a warning and proceed | |
2028 | * 2) Actual kexts delay the error until validation in case vtable | |
2029 | * patching replaces the undefined symbol. | |
2030 | */ | |
2031 | ||
2032 | if (addrp) { | |
2033 | ||
2034 | /* Convert to a kxld_addr_t */ | |
2035 | if (kxld_kext_is_32_bit(kext)) { | |
2036 | addr = (kxld_addr_t) (*(uint32_t*)addrp); | |
2037 | } else { | |
2038 | addr = (kxld_addr_t) (*(uint64_t*)addrp); | |
2039 | } | |
2040 | ||
2041 | boolean_t is_exported = (kext->link_type == KXLD_LINK_PSEUDO_KEXT); | |
2042 | ||
2043 | rval = kxld_sym_resolve(sym, addr, is_exported); | |
2044 | require_noerr(rval, finish); | |
2045 | ||
2046 | if (obsolete_symbols && kxld_dict_find(obsolete_symbols, name)) { | |
2047 | kxld_log(kKxldLogLinking, kKxldLogWarn, | |
2048 | "This kext uses obsolete symbol %s.", name); | |
2049 | } | |
2050 | ||
2051 | } else if (kext->link_type == KXLD_LINK_PSEUDO_KEXT) { | |
2052 | /* Pseudokexts ignore undefined symbols, because any actual | |
2053 | * kexts that need those symbols will fail to link anyway, so | |
2054 | * there's no need to block well-behaved kexts. | |
2055 | */ | |
2056 | if (!warning) { | |
2057 | kxld_log(kKxldLogLinking, kKxldLogWarn, | |
2058 | "This symbol set has the following unresolved symbols:"); | |
2059 | warning = TRUE; | |
2060 | } | |
2061 | kxld_log(kKxldLogLinking, kKxldLogErr, "\t%s", sym->name); | |
2062 | kxld_sym_delete(sym); | |
2063 | ||
2064 | } else if (kxld_sym_is_weak(sym)) { | |
2065 | /* Make sure that the kext has referenced gOSKextUnresolved. | |
2066 | */ | |
2067 | require_action(tests_for_weak, finish, | |
2068 | rval=KERN_FAILURE; | |
2069 | kxld_log(kKxldLogLinking, kKxldLogErr, | |
2070 | "This kext has weak references but does not test for " | |
2071 | "them. Test for weak references with " | |
2072 | "OSKextIsSymbolResolved().")); | |
2073 | ||
2074 | #if KERNEL | |
2075 | /* Get the address of the default weak address. | |
2076 | */ | |
2077 | addr = (kxld_addr_t) &kext_weak_symbol_referenced; | |
2078 | #else | |
2079 | /* This is run during symbol generation only, so we only | |
2080 | * need a filler value here. | |
2081 | */ | |
2082 | addr = kext->link_addr; | |
2083 | #endif /* KERNEL */ | |
2084 | ||
2085 | rval = kxld_sym_resolve(sym, addr, /* exported */ FALSE); | |
2086 | require_noerr(rval, finish); | |
2087 | } | |
2088 | } | |
2089 | } | |
2090 | require_noerr_action(error, finish, rval=KERN_FAILURE); | |
2091 | ||
2092 | rval = KERN_SUCCESS; | |
2093 | ||
2094 | finish: | |
2095 | ||
2096 | return rval; | |
2097 | } | |
2098 | ||
2099 | /******************************************************************************* | |
2100 | *******************************************************************************/ | |
2101 | static boolean_t | |
2102 | target_supports_strict_patching(KXLDKext *kext) | |
2103 | { | |
2104 | check(kext); | |
2105 | ||
2106 | return (kext->cputype != CPU_TYPE_I386 && | |
2107 | kext->cputype != CPU_TYPE_POWERPC); | |
2108 | } | |
2109 | ||
2110 | /******************************************************************************* | |
2111 | * We must patch vtables to ensure binary compatibility, and to perform that | |
2112 | * patching, we have to determine the vtables' inheritance relationships. The | |
2113 | * MetaClass system gives us a way to do that: | |
2114 | * 1) Iterate over all of the super MetaClass pointer symbols. Every class | |
2115 | * that inherits from OSObject will have a pointer in its MetaClass that | |
2116 | * points to the MetaClass's super MetaClass. | |
2117 | * 2) Derive the name of the class from the super MetaClass pointer. | |
2118 | * 3) Derive the name of the class's vtable from the name of the class | |
2119 | * 4) Follow the super MetaClass pointer to get the address of the super | |
2120 | * MetaClass's symbol | |
2121 | * 5) Look up the super MetaClass symbol by address | |
2122 | * 6) Derive the super class's name from the super MetaClass name | |
2123 | * 7) Derive the super class's vtable from the super class's name | |
2124 | * This procedure will allow us to find all of the OSObject-derived classes and | |
2125 | * their super classes, and thus patch all of the vtables. | |
2126 | * | |
2127 | * We also have to take care to patch up the MetaClass's vtables. The | |
2128 | * MetaClasses follow a parallel hierarchy to the classes, so once we have the | |
2129 | * class name and super class name, we can also derive the MetaClass name and | |
2130 | * the super MetaClass name, and thus find and patch their vtables as well. | |
2131 | *******************************************************************************/ | |
2132 | ||
2133 | #define kOSMetaClassVTableName "__ZTV11OSMetaClass" | |
2134 | ||
2135 | static kern_return_t | |
2136 | patch_vtables(KXLDKext *kext, KXLDDict *patched_vtables, | |
2137 | KXLDDict *defined_symbols) | |
2138 | { | |
2139 | kern_return_t rval = KERN_FAILURE; | |
2140 | KXLDSymtabIterator iter; | |
2141 | KXLDSym *metaclass = NULL; | |
2142 | KXLDSym *super_metaclass_pointer = NULL; | |
2143 | KXLDSym *final_sym = NULL; | |
2144 | KXLDVTable *vtable = NULL; | |
2145 | KXLDVTable *super_vtable = NULL; | |
2146 | char class_name[KXLD_MAX_NAME_LEN]; | |
2147 | char super_class_name[KXLD_MAX_NAME_LEN]; | |
2148 | char vtable_name[KXLD_MAX_NAME_LEN]; | |
2149 | char super_vtable_name[KXLD_MAX_NAME_LEN]; | |
2150 | char final_sym_name[KXLD_MAX_NAME_LEN]; | |
2151 | size_t len = 0; | |
2152 | u_int nvtables = 0; | |
2153 | u_int npatched = 0; | |
2154 | u_int nprogress = 0; | |
2155 | boolean_t failure = FALSE; | |
2156 | ||
2157 | check(kext); | |
2158 | check(patched_vtables); | |
2159 | ||
2160 | /* Find each super meta class pointer symbol */ | |
2161 | ||
2162 | kxld_symtab_iterator_init(&iter, kext->symtab, | |
2163 | kxld_sym_is_super_metaclass_pointer, FALSE); | |
2164 | nvtables = kxld_symtab_iterator_get_num_remaining(&iter); | |
2165 | ||
2166 | while (npatched < nvtables) { | |
2167 | npatched = 0; | |
2168 | nprogress = 0; | |
2169 | kxld_symtab_iterator_reset(&iter); | |
2170 | while((super_metaclass_pointer = kxld_symtab_iterator_get_next(&iter))) | |
2171 | { | |
2172 | /* Get the class name from the smc pointer */ | |
2173 | rval = kxld_sym_get_class_name_from_super_metaclass_pointer( | |
2174 | super_metaclass_pointer, class_name, sizeof(class_name)); | |
2175 | require_noerr(rval, finish); | |
2176 | ||
2177 | /* Get the vtable name from the class name */ | |
2178 | rval = kxld_sym_get_vtable_name_from_class_name(class_name, | |
2179 | vtable_name, sizeof(vtable_name)); | |
2180 | require_noerr(rval, finish); | |
2181 | ||
2182 | /* Get the vtable and make sure it hasn't been patched */ | |
2183 | vtable = kxld_dict_find(&kext->vtable_index, vtable_name); | |
2184 | require_action(vtable, finish, rval=KERN_FAILURE; | |
2185 | kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMissingVtable, | |
2186 | vtable_name, class_name)); | |
2187 | ||
2188 | if (!vtable->is_patched) { | |
2189 | ||
2190 | /* Find the SMCP's meta class symbol */ | |
2191 | rval = get_metaclass_symbol_from_super_meta_class_pointer_symbol( | |
2192 | kext, super_metaclass_pointer, &metaclass); | |
2193 | require_noerr(rval, finish); | |
2194 | ||
2195 | /* Get the super class name from the super metaclass */ | |
2196 | rval = kxld_sym_get_class_name_from_metaclass(metaclass, | |
2197 | super_class_name, sizeof(super_class_name)); | |
2198 | require_noerr(rval, finish); | |
2199 | ||
2200 | /* Get the super vtable name from the class name */ | |
2201 | rval = kxld_sym_get_vtable_name_from_class_name(super_class_name, | |
2202 | super_vtable_name, sizeof(super_vtable_name)); | |
2203 | require_noerr(rval, finish); | |
2204 | ||
2205 | if (failure) { | |
2206 | kxld_log(kKxldLogPatching, kKxldLogErr, | |
2207 | "\t%s (super vtable %s)", vtable_name, super_vtable_name); | |
2208 | continue; | |
2209 | } | |
2210 | ||
2211 | /* Get the super vtable if it's been patched */ | |
2212 | super_vtable = kxld_dict_find(patched_vtables, super_vtable_name); | |
2213 | if (!super_vtable) continue; | |
2214 | ||
2215 | /* Get the final symbol's name from the super vtable */ | |
2216 | rval = kxld_sym_get_final_sym_name_from_class_name(super_class_name, | |
2217 | final_sym_name, sizeof(final_sym_name)); | |
2218 | require_noerr(rval, finish); | |
2219 | ||
2220 | /* Verify that the final symbol does not exist. First check | |
2221 | * all the externally defined symbols, then check locally. | |
2222 | */ | |
2223 | final_sym = kxld_dict_find(defined_symbols, final_sym_name); | |
2224 | if (!final_sym) { | |
2225 | final_sym = kxld_symtab_get_symbol_by_name(kext->symtab, | |
2226 | final_sym_name); | |
2227 | } | |
2228 | require_action(!final_sym, finish, | |
2229 | rval=KERN_FAILURE; | |
2230 | kxld_log(kKxldLogPatching, kKxldLogErr, | |
2231 | "Class %s is a subclass of final class %s.", | |
2232 | class_name, super_class_name)); | |
2233 | ||
2234 | /* Patch the class's vtable */ | |
2235 | rval = kxld_vtable_patch(vtable, super_vtable, kext->symtab, | |
2236 | target_supports_strict_patching(kext)); | |
2237 | require_noerr(rval, finish); | |
2238 | ||
2239 | /* Add the class's vtable to the set of patched vtables */ | |
2240 | rval = kxld_dict_insert(patched_vtables, vtable->name, vtable); | |
2241 | require_noerr(rval, finish); | |
2242 | ||
2243 | /* Get the meta vtable name from the class name */ | |
2244 | rval = kxld_sym_get_meta_vtable_name_from_class_name(class_name, | |
2245 | vtable_name, sizeof(vtable_name)); | |
2246 | require_noerr(rval, finish); | |
2247 | ||
2248 | /* Get the meta vtable. Whether or not it should exist has already | |
2249 | * been tested in create_vtables(), so if it doesn't exist and we're | |
2250 | * still running, we can safely skip it. | |
2251 | */ | |
2252 | vtable = kxld_dict_find(&kext->vtable_index, vtable_name); | |
2253 | if (!vtable) { | |
2254 | ++nprogress; | |
2255 | ++npatched; | |
2256 | continue; | |
2257 | } | |
2258 | require_action(!vtable->is_patched, finish, rval=KERN_FAILURE); | |
2259 | ||
2260 | /* There is no way to look up a metaclass vtable at runtime, but | |
2261 | * we know that every class's metaclass inherits directly from | |
2262 | * OSMetaClass, so we just hardcode that vtable name here. | |
2263 | */ | |
2264 | len = strlcpy(super_vtable_name, kOSMetaClassVTableName, | |
2265 | sizeof(super_vtable_name)); | |
2266 | require_action(len == const_strlen(kOSMetaClassVTableName), | |
2267 | finish, rval=KERN_FAILURE); | |
2268 | ||
2269 | /* Get the super meta vtable */ | |
2270 | super_vtable = kxld_dict_find(patched_vtables, super_vtable_name); | |
2271 | require_action(super_vtable && super_vtable->is_patched, | |
2272 | finish, rval=KERN_FAILURE); | |
2273 | ||
2274 | /* Patch the meta class's vtable */ | |
2275 | rval = kxld_vtable_patch(vtable, super_vtable, | |
2276 | kext->symtab, target_supports_strict_patching(kext)); | |
2277 | require_noerr(rval, finish); | |
2278 | ||
2279 | /* Add the MetaClass's vtable to the set of patched vtables */ | |
2280 | rval = kxld_dict_insert(patched_vtables, vtable->name, vtable); | |
2281 | require_noerr(rval, finish); | |
2282 | ||
2283 | ++nprogress; | |
2284 | } | |
2285 | ||
2286 | ++npatched; | |
2287 | } | |
2288 | ||
2289 | require_action(!failure, finish, rval=KERN_FAILURE); | |
2290 | if (!nprogress) { | |
2291 | failure = TRUE; | |
2292 | kxld_log(kKxldLogPatching, kKxldLogErr, | |
2293 | "The following vtables were unpatchable because each one's " | |
2294 | "parent vtable either was not found or also was not patchable:"); | |
2295 | } | |
2296 | } | |
2297 | ||
2298 | rval = KERN_SUCCESS; | |
2299 | finish: | |
2300 | return rval; | |
2301 | } | |
2302 | ||
2303 | /******************************************************************************* | |
2304 | *******************************************************************************/ | |
2305 | static kern_return_t | |
2306 | validate_symbols(KXLDKext *kext) | |
2307 | { | |
2308 | kern_return_t rval = KERN_FAILURE; | |
2309 | KXLDSymtabIterator iter; | |
2310 | KXLDSym *sym = NULL; | |
2311 | u_int error = FALSE; | |
2312 | ||
2313 | /* Check for any unresolved symbols */ | |
2314 | kxld_symtab_iterator_init(&iter, kext->symtab, kxld_sym_is_unresolved, FALSE); | |
2315 | while ((sym = kxld_symtab_iterator_get_next(&iter))) { | |
2316 | if (!error) { | |
2317 | error = TRUE; | |
2318 | kxld_log(kKxldLogLinking, kKxldLogErr, | |
2319 | "The following symbols are unresolved for this kext:"); | |
2320 | } | |
2321 | kxld_log(kKxldLogLinking, kKxldLogErr, "\t%s", sym->name); | |
2322 | } | |
2323 | require_noerr_action(error, finish, rval=KERN_FAILURE); | |
2324 | ||
2325 | rval = KERN_SUCCESS; | |
2326 | ||
2327 | finish: | |
2328 | return rval; | |
2329 | } | |
2330 | ||
2331 | #if KXLD_USER_OR_GOT || KXLD_USER_OR_COMMON | |
2332 | /******************************************************************************* | |
2333 | *******************************************************************************/ | |
2334 | static kern_return_t | |
2335 | add_section(KXLDKext *kext, KXLDSect **sect) | |
2336 | { | |
2337 | kern_return_t rval = KERN_FAILURE; | |
2338 | u_int nsects = kext->sects.nitems; | |
2339 | ||
2340 | rval = kxld_array_resize(&kext->sects, nsects + 1); | |
2341 | require_noerr(rval, finish); | |
2342 | ||
2343 | *sect = kxld_array_get_item(&kext->sects, nsects); | |
2344 | ||
2345 | rval = KERN_SUCCESS; | |
2346 | ||
2347 | finish: | |
2348 | return rval; | |
2349 | } | |
2350 | #endif /* KXLD_USER_OR_GOT || KXLD_USER_OR_COMMON */ | |
2351 | ||
2352 | #if KXLD_USER_OR_GOT | |
2353 | /******************************************************************************* | |
2354 | *******************************************************************************/ | |
2355 | static boolean_t | |
2356 | target_has_got(const KXLDKext *kext) | |
2357 | { | |
2358 | return FALSE: | |
2359 | } | |
2360 | ||
2361 | /******************************************************************************* | |
2362 | * Create and initialize the Global Offset Table | |
2363 | *******************************************************************************/ | |
2364 | static kern_return_t | |
2365 | create_got(KXLDKext *kext) | |
2366 | { | |
2367 | kern_return_t rval = KERN_FAILURE; | |
2368 | KXLDSect *sect = NULL; | |
2369 | u_int ngots = 0; | |
2370 | u_int i = 0; | |
2371 | ||
2372 | if (!target_has_got(kext)) { | |
2373 | rval = KERN_SUCCESS; | |
2374 | goto finish; | |
2375 | } | |
2376 | ||
2377 | for (i = 0; i < kext->sects.nitems; ++i) { | |
2378 | sect = kxld_array_get_item(&kext->sects, i); | |
2379 | ngots += kxld_sect_get_ngots(sect, &kext->relocator, | |
2380 | kext->symtab); | |
2381 | } | |
2382 | ||
2383 | rval = add_section(kext, §); | |
2384 | require_noerr(rval, finish); | |
2385 | ||
2386 | rval = kxld_sect_init_got(sect, ngots); | |
2387 | require_noerr(rval, finish); | |
2388 | ||
2389 | kext->got_is_created = TRUE; | |
2390 | rval = KERN_SUCCESS; | |
2391 | ||
2392 | finish: | |
2393 | return rval; | |
2394 | } | |
2395 | ||
2396 | /******************************************************************************* | |
2397 | *******************************************************************************/ | |
2398 | static kern_return_t | |
2399 | populate_got(KXLDKext *kext) | |
2400 | { | |
2401 | kern_return_t rval = KERN_FAILURE; | |
2402 | KXLDSect *sect = NULL; | |
2403 | u_int i = 0; | |
2404 | ||
2405 | if (!target_has_got(kext) || !kext->got_is_created) { | |
2406 | rval = KERN_SUCCESS; | |
2407 | goto finish; | |
2408 | } | |
2409 | ||
2410 | for (i = 0; i < kext->sects.nitems; ++i) { | |
2411 | sect = kxld_array_get_item(&kext->sects, i); | |
2412 | if (streq_safe(sect->segname, KXLD_SEG_GOT, sizeof(KXLD_SEG_GOT)) && | |
2413 | streq_safe(sect->sectname, KXLD_SECT_GOT, sizeof(KXLD_SECT_GOT))) | |
2414 | { | |
2415 | kxld_sect_populate_got(sect, kext->symtab, | |
2416 | kxld_kext_target_needs_swap(kext)); | |
2417 | break; | |
2418 | } | |
2419 | } | |
2420 | ||
2421 | require_action(i < kext->sects.nitems, finish, rval=KXLD_MISSING_GOT); | |
2422 | ||
2423 | rval = KERN_SUCCESS; | |
2424 | ||
2425 | finish: | |
2426 | return rval; | |
2427 | } | |
2428 | #endif /* KXLD_USER_OR_GOT */ | |
2429 | ||
2430 | /******************************************************************************* | |
2431 | *******************************************************************************/ | |
2432 | static boolean_t | |
2433 | target_supports_common(const KXLDKext *kext) | |
2434 | { | |
2435 | check(kext); | |
2436 | return (kext->cputype == CPU_TYPE_I386 || | |
2437 | kext->cputype == CPU_TYPE_POWERPC); | |
2438 | } | |
2439 | ||
2440 | #if KXLD_USER_OR_COMMON | |
2441 | /******************************************************************************* | |
2442 | * If there are common symbols, calculate how much space they'll need | |
2443 | * and create/grow the __DATA __common section to accommodate them. | |
2444 | * Then, resolve them against that section. | |
2445 | *******************************************************************************/ | |
2446 | static kern_return_t | |
2447 | resolve_common_symbols(KXLDKext *kext) | |
2448 | { | |
2449 | kern_return_t rval = KERN_FAILURE; | |
2450 | KXLDSymtabIterator iter; | |
2451 | KXLDSym *sym = NULL; | |
2452 | KXLDSect *sect = NULL; | |
2453 | kxld_addr_t base_addr = 0; | |
2454 | kxld_size_t size = 0; | |
2455 | kxld_size_t total_size = 0; | |
2456 | u_int align = 0; | |
2457 | u_int max_align = 0; | |
2458 | u_int sectnum = 0; | |
2459 | ||
2460 | if (!target_supports_common(kext)) { | |
2461 | rval = KERN_SUCCESS; | |
2462 | goto finish; | |
2463 | } | |
2464 | ||
2465 | /* Iterate over the common symbols to calculate their total aligned size */ | |
2466 | kxld_symtab_iterator_init(&iter, kext->symtab, kxld_sym_is_common, FALSE); | |
2467 | while ((sym = kxld_symtab_iterator_get_next(&iter))) { | |
2468 | align = kxld_sym_get_common_align(sym); | |
2469 | size = kxld_sym_get_common_size(sym); | |
2470 | ||
2471 | if (align > max_align) max_align = align; | |
2472 | ||
2473 | total_size = kxld_align_address(total_size, align) + size; | |
2474 | } | |
2475 | ||
2476 | /* If there are common symbols, grow or create the __DATA __common section | |
2477 | * to hold them. | |
2478 | */ | |
2479 | if (total_size) { | |
2480 | sect = kxld_kext_get_sect_by_name(kext, SEG_DATA, SECT_COMMON); | |
2481 | if (sect) { | |
2482 | base_addr = sect->base_addr + sect->size; | |
2483 | ||
2484 | kxld_sect_grow(sect, total_size, max_align); | |
2485 | } else { | |
2486 | base_addr = 0; | |
2487 | ||
2488 | rval = add_section(kext, §); | |
2489 | require_noerr(rval, finish); | |
2490 | ||
2491 | kxld_sect_init_zerofill(sect, SEG_DATA, SECT_COMMON, | |
2492 | total_size, max_align); | |
2493 | } | |
2494 | ||
2495 | /* Resolve the common symbols against the new section */ | |
2496 | rval = kxld_array_get_index(&kext->sects, sect, §num); | |
2497 | require_noerr(rval, finish); | |
2498 | ||
2499 | kxld_symtab_iterator_reset(&iter); | |
2500 | while ((sym = kxld_symtab_iterator_get_next(&iter))) { | |
2501 | align = kxld_sym_get_common_align(sym); | |
2502 | size = kxld_sym_get_common_size(sym); | |
2503 | ||
2504 | base_addr = kxld_align_address(base_addr, align); | |
2505 | kxld_sym_resolve_common(sym, sectnum, base_addr); | |
2506 | ||
2507 | base_addr += size; | |
2508 | } | |
2509 | } | |
2510 | ||
2511 | rval = KERN_SUCCESS; | |
2512 | ||
2513 | finish: | |
2514 | return rval; | |
2515 | } | |
2516 | #endif /* KXLD_USER_OR_COMMON */ | |
2517 | ||
2518 | /******************************************************************************* | |
2519 | *******************************************************************************/ | |
2520 | static kern_return_t | |
2521 | get_metaclass_symbol_from_super_meta_class_pointer_symbol(KXLDKext *kext, | |
2522 | KXLDSym *super_metaclass_pointer_sym, KXLDSym **metaclass) | |
2523 | { | |
2524 | kern_return_t rval = KERN_FAILURE; | |
2525 | KXLDSect *sect = NULL; | |
2526 | KXLDReloc *reloc = NULL; | |
2527 | uint32_t offset = 0; | |
2528 | ||
2529 | check(kext); | |
2530 | check(super_metaclass_pointer_sym); | |
2531 | check(metaclass); | |
2532 | *metaclass = NULL; | |
2533 | ||
2534 | sect = kxld_array_get_item(&kext->sects, super_metaclass_pointer_sym->sectnum); | |
2535 | require_action(sect, finish, rval=KERN_FAILURE); | |
2536 | ||
2537 | /* Find the relocation entry for the super metaclass pointer and get the | |
2538 | * symbol associated with that relocation entry | |
2539 | */ | |
2540 | ||
2541 | if (kext->is_final_image) { | |
2542 | /* The relocation entry could be in either the external or local | |
2543 | * relocation entries. kxld_reloc_get_symbol() can handle either | |
2544 | * type. | |
2545 | */ | |
2546 | reloc = kxld_reloc_get_reloc_by_offset(&kext->extrelocs, | |
2547 | super_metaclass_pointer_sym->base_addr); | |
2548 | if (!reloc) { | |
2549 | reloc = kxld_reloc_get_reloc_by_offset(&kext->locrelocs, | |
2550 | super_metaclass_pointer_sym->base_addr); | |
2551 | } | |
2552 | require_action(reloc, finish, rval=KERN_FAILURE); | |
2553 | ||
2554 | *metaclass = kxld_reloc_get_symbol(&kext->relocator, reloc, kext->file, | |
2555 | kext->symtab); | |
2556 | } else { | |
2557 | offset = kxld_sym_get_section_offset(super_metaclass_pointer_sym, sect); | |
2558 | ||
2559 | reloc = kxld_reloc_get_reloc_by_offset(§->relocs, offset); | |
2560 | require_action(reloc, finish, rval=KERN_FAILURE); | |
2561 | ||
2562 | *metaclass = kxld_reloc_get_symbol(&kext->relocator, reloc, sect->data, | |
2563 | kext->symtab); | |
2564 | } | |
2565 | require_action(*metaclass, finish, rval=KERN_FAILURE); | |
2566 | ||
2567 | rval = KERN_SUCCESS; | |
2568 | ||
2569 | finish: | |
2570 | return rval; | |
2571 | } | |
2572 | ||
2573 | /******************************************************************************* | |
2574 | *******************************************************************************/ | |
2575 | static kern_return_t | |
2576 | copy_vtables(KXLDKext *kext, const KXLDDict *patched_vtables) | |
2577 | { | |
2578 | kern_return_t rval = KERN_FAILURE; | |
2579 | KXLDSymtabIterator iter; | |
2580 | KXLDSym *sym = NULL; | |
2581 | KXLDVTable *vtable = NULL, *src = NULL; | |
2582 | u_int i = 0; | |
2583 | u_int nvtables = 0; | |
2584 | char class_name[KXLD_MAX_NAME_LEN]; | |
2585 | char meta_vtable_name[KXLD_MAX_NAME_LEN]; | |
2586 | ||
2587 | kxld_symtab_iterator_init(&iter, kext->symtab, | |
2588 | kxld_sym_is_class_vtable, FALSE); | |
2589 | ||
2590 | /* The iterator tracks all the class vtables, so we double the number of | |
2591 | * vtables we're expecting because we use the class vtables to find the | |
2592 | * MetaClass vtables. | |
2593 | */ | |
2594 | nvtables = kxld_symtab_iterator_get_num_remaining(&iter) * 2; | |
2595 | rval = kxld_array_init(&kext->vtables, sizeof(KXLDVTable), nvtables); | |
2596 | require_noerr(rval, finish); | |
2597 | ||
2598 | while ((sym = kxld_symtab_iterator_get_next(&iter))) { | |
2599 | src = kxld_dict_find(patched_vtables, sym->name); | |
2600 | require_action(src, finish, rval=KERN_FAILURE); | |
2601 | ||
2602 | vtable = kxld_array_get_item(&kext->vtables, i++); | |
2603 | rval = kxld_vtable_copy(vtable, src); | |
2604 | require_noerr(rval, finish); | |
2605 | ||
2606 | rval = kxld_sym_get_class_name_from_vtable(sym, | |
2607 | class_name, sizeof(class_name)); | |
2608 | require_noerr(rval, finish); | |
2609 | ||
2610 | rval = kxld_sym_get_meta_vtable_name_from_class_name(class_name, | |
2611 | meta_vtable_name, sizeof(meta_vtable_name)); | |
2612 | require_noerr(rval, finish); | |
2613 | ||
2614 | /* Some classes don't have a MetaClass, so when we run across one | |
2615 | * of those, we shrink the vtable array by 1. | |
2616 | */ | |
2617 | src = kxld_dict_find(patched_vtables, meta_vtable_name); | |
2618 | if (src) { | |
2619 | vtable = kxld_array_get_item(&kext->vtables, i++); | |
2620 | rval = kxld_vtable_copy(vtable, src); | |
2621 | require_noerr(rval, finish); | |
2622 | } else { | |
2623 | kxld_array_resize(&kext->vtables, kext->vtables.nitems - 1); | |
2624 | } | |
2625 | } | |
2626 | ||
2627 | rval = KERN_SUCCESS; | |
2628 | ||
2629 | finish: | |
2630 | return rval; | |
2631 | } | |
2632 | ||
2633 | #if KXLD_USER_OR_OBJECT | |
2634 | /******************************************************************************* | |
2635 | *******************************************************************************/ | |
2636 | static kern_return_t | |
2637 | process_relocs_from_sections(KXLDKext *kext) | |
2638 | { | |
2639 | kern_return_t rval = KERN_FAILURE; | |
2640 | KXLDSect *sect = NULL; | |
2641 | u_int i = 0; | |
2642 | ||
2643 | for (i = 0; i < kext->sects.nitems; ++i) { | |
2644 | sect = kxld_array_get_item(&kext->sects, i); | |
2645 | rval = kxld_sect_process_relocs(sect, &kext->relocator, | |
2646 | &kext->sects, kext->symtab); | |
2647 | require_noerr_action(rval, finish, | |
2648 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogInvalidSectReloc, | |
2649 | i, sect->segname, sect->sectname)); | |
2650 | } | |
2651 | ||
2652 | rval = KERN_SUCCESS; | |
2653 | ||
2654 | finish: | |
2655 | return rval; | |
2656 | } | |
2657 | #endif /* KXLD_USER_OR_OBJECT */ | |
2658 | ||
2659 | #if KXLD_USER_OR_BUNDLE | |
2660 | /******************************************************************************* | |
2661 | *******************************************************************************/ | |
2662 | static kern_return_t | |
2663 | process_relocs_from_tables(KXLDKext *kext) | |
2664 | { | |
2665 | kern_return_t rval = KERN_FAILURE; | |
2666 | KXLDReloc *reloc = NULL; | |
2667 | KXLDSeg *seg = NULL; | |
2668 | u_int i = 0; | |
2669 | ||
2670 | /* Offsets for relocations in relocation tables are based on the vm | |
2671 | * address of the first segment. | |
2672 | */ | |
2673 | seg = kxld_array_get_item(&kext->segs, 0); | |
2674 | ||
2675 | /* Process external relocations */ | |
2676 | for (i = 0; i < kext->extrelocs.nitems; ++i) { | |
2677 | reloc = kxld_array_get_item(&kext->extrelocs, i); | |
2678 | ||
2679 | rval = kxld_relocator_process_table_reloc(&kext->relocator, reloc, seg, | |
2680 | kext->file, &kext->sects, kext->symtab); | |
2681 | require_noerr_action(rval, finish, | |
2682 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogInvalidExtReloc, i)); | |
2683 | } | |
2684 | ||
2685 | /* Process local relocations */ | |
2686 | for (i = 0; i < kext->locrelocs.nitems; ++i) { | |
2687 | reloc = kxld_array_get_item(&kext->locrelocs, i); | |
2688 | ||
2689 | rval = kxld_relocator_process_table_reloc(&kext->relocator, reloc, seg, | |
2690 | kext->file, &kext->sects, kext->symtab); | |
2691 | require_noerr_action(rval, finish, | |
2692 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogInvalidIntReloc, i)); | |
2693 | } | |
2694 | ||
2695 | rval = KERN_SUCCESS; | |
2696 | ||
2697 | finish: | |
2698 | return rval; | |
2699 | } | |
2700 | ||
2701 | /******************************************************************************* | |
2702 | *******************************************************************************/ | |
2703 | static void | |
2704 | add_to_ptr(u_char *symptr, kxld_addr_t val, boolean_t is_32_bit) | |
2705 | { | |
2706 | if (is_32_bit) { | |
2707 | uint32_t *ptr = (uint32_t *) symptr; | |
2708 | *ptr += (uint32_t) val; | |
2709 | } else { | |
2710 | uint64_t *ptr = (uint64_t *) symptr; | |
2711 | *ptr += (uint64_t) val; | |
2712 | } | |
2713 | } | |
2714 | ||
2715 | #define SECT_SYM_PTRS "__nl_symbol_ptr" | |
2716 | ||
2717 | /******************************************************************************* | |
2718 | * Final linked images create an __nl_symbol_ptr section for the global offset | |
2719 | * table and for symbol pointer lookups in general. Rather than use relocation | |
2720 | * entries, the linker creates an "indirect symbol table" which stores indexes | |
2721 | * into the symbol table corresponding to the entries of this section. This | |
2722 | * function populates the section with the relocated addresses of those symbols. | |
2723 | *******************************************************************************/ | |
2724 | static kern_return_t | |
2725 | process_symbol_pointers(KXLDKext *kext) | |
2726 | { | |
2727 | kern_return_t rval = KERN_FAILURE; | |
2728 | KXLDSect *sect = NULL; | |
2729 | KXLDSym *sym = NULL; | |
2730 | int32_t *symidx = NULL; | |
2731 | u_char *symptr = NULL; | |
2732 | u_long symptrsize = 0; | |
2733 | u_int nsyms = 0; | |
2734 | u_int firstsym = 0; | |
2735 | u_int i = 0; | |
2736 | ||
2737 | check(kext); | |
2738 | ||
2739 | require_action(kext->is_final_image && kext->dysymtab_hdr, | |
2740 | finish, rval=KERN_FAILURE); | |
2741 | ||
2742 | /* Get the __DATA,__nl_symbol_ptr section. If it doesn't exist, we have | |
2743 | * nothing to do. | |
2744 | */ | |
2745 | ||
2746 | sect = kxld_kext_get_sect_by_name(kext, SEG_DATA, SECT_SYM_PTRS); | |
2747 | if (!sect) { | |
2748 | rval = KERN_SUCCESS; | |
2749 | goto finish; | |
2750 | } | |
2751 | ||
2752 | require_action(sect->flags & S_NON_LAZY_SYMBOL_POINTERS, | |
2753 | finish, rval=KERN_FAILURE; | |
2754 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO | |
2755 | "Section %s,%s does not have S_NON_LAZY_SYMBOL_POINTERS flag.", | |
2756 | SEG_DATA, SECT_SYM_PTRS)); | |
2757 | ||
2758 | /* Calculate the table offset and number of entries in the section */ | |
2759 | ||
2760 | if (kxld_kext_is_32_bit(kext)) { | |
2761 | symptrsize = sizeof(uint32_t); | |
2762 | } else { | |
2763 | symptrsize = sizeof(uint64_t); | |
2764 | } | |
2765 | ||
2766 | nsyms = (u_int) (sect->size / symptrsize); | |
2767 | firstsym = sect->reserved1; | |
2768 | ||
2769 | require_action(firstsym + nsyms <= kext->dysymtab_hdr->nindirectsyms, | |
2770 | finish, rval=KERN_FAILURE; | |
2771 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO)); | |
2772 | ||
2773 | /* Iterate through the indirect symbol table and fill in the section of | |
2774 | * symbol pointers. There are three cases: | |
2775 | * 1) A normal symbol - put its value directly in the table | |
2776 | * 2) An INDIRECT_SYMBOL_LOCAL - symbols that are local and already have | |
2777 | * their offset from the start of the file in the section. Simply | |
2778 | * add the file's link address to fill this entry. | |
2779 | * 3) An INDIRECT_SYMBOL_ABS - prepopulated absolute symbols. No | |
2780 | * action is required. | |
2781 | */ | |
2782 | ||
2783 | symidx = (int32_t *) (kext->file + kext->dysymtab_hdr->indirectsymoff); | |
2784 | symidx += firstsym; | |
2785 | symptr = sect->data; | |
2786 | for (i = 0; i < nsyms; ++i, ++symidx, symptr+=symptrsize) { | |
2787 | if (*symidx & INDIRECT_SYMBOL_LOCAL) { | |
2788 | if (*symidx & INDIRECT_SYMBOL_ABS) continue; | |
2789 | ||
2790 | add_to_ptr(symptr, kext->link_addr, kxld_kext_is_32_bit(kext)); | |
2791 | } else { | |
2792 | sym = kxld_symtab_get_symbol_by_index(kext->symtab, *symidx); | |
2793 | require_action(sym, finish, rval=KERN_FAILURE); | |
2794 | ||
2795 | add_to_ptr(symptr, sym->link_addr, kxld_kext_is_32_bit(kext)); | |
2796 | } | |
2797 | } | |
2798 | ||
2799 | rval = KERN_SUCCESS; | |
2800 | finish: | |
2801 | return rval; | |
2802 | } | |
2803 | #endif /* KXLD_USER_OR_BUNDLE */ | |
2804 | ||
2805 | /******************************************************************************* | |
2806 | *******************************************************************************/ | |
2807 | static kern_return_t | |
2808 | populate_kmod_info(KXLDKext *kext) | |
2809 | { | |
2810 | kern_return_t rval = KERN_FAILURE; | |
2811 | KXLDSect *kmodsect = NULL; | |
2812 | KXLDSym *kmodsym = NULL; | |
2813 | u_long kmod_offset = 0; | |
2814 | u_long header_size; | |
2815 | u_long size; | |
2816 | ||
2817 | if (kext->link_type != KXLD_LINK_KEXT) { | |
2818 | rval = KERN_SUCCESS; | |
2819 | goto finish; | |
2820 | } | |
2821 | ||
2822 | kxld_kext_get_vmsize(kext, &header_size, &size); | |
2823 | ||
2824 | kmodsym = kxld_symtab_get_symbol_by_name(kext->symtab, KXLD_KMOD_INFO_SYMBOL); | |
2825 | require_action(kmodsym, finish, rval=KERN_FAILURE; | |
2826 | kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogNoKmodInfo)); | |
2827 | ||
2828 | kmodsect = kxld_array_get_item(&kext->sects, kmodsym->sectnum); | |
2829 | kmod_offset = (u_long) (kmodsym->base_addr - kmodsect->base_addr); | |
2830 | ||
2831 | kext->kmod_info = (kmod_info_t *) (kmodsect->data + kmod_offset); | |
2832 | kext->kmod_link_addr = kmodsym->link_addr; | |
2833 | ||
2834 | if (kxld_kext_is_32_bit(kext)) { | |
2835 | kmod_info_32_v1_t *kmod = (kmod_info_32_v1_t *) (kext->kmod_info); | |
2836 | kmod->address = (uint32_t) kext->link_addr; | |
2837 | kmod->size = (uint32_t) size; | |
2838 | kmod->hdr_size = (uint32_t) header_size; | |
2839 | ||
2840 | #if !KERNEL | |
2841 | if (kxld_kext_target_needs_swap(kext)) { | |
2842 | kmod->address = OSSwapInt32(kmod->address); | |
2843 | kmod->size = OSSwapInt32(kmod->size); | |
2844 | kmod->hdr_size = OSSwapInt32(kmod->hdr_size); | |
2845 | } | |
2846 | #endif /* !KERNEL */ | |
2847 | } else { | |
2848 | kmod_info_64_v1_t *kmod = (kmod_info_64_v1_t *) (kext->kmod_info); | |
2849 | kmod->address = kext->link_addr; | |
2850 | kmod->size = size; | |
2851 | kmod->hdr_size = header_size; | |
2852 | ||
2853 | #if !KERNEL | |
2854 | if (kxld_kext_target_needs_swap(kext)) { | |
2855 | kmod->address = OSSwapInt64(kmod->address); | |
2856 | kmod->size = OSSwapInt64(kmod->size); | |
2857 | kmod->hdr_size = OSSwapInt64(kmod->hdr_size); | |
2858 | } | |
2859 | #endif /* !KERNEL */ | |
2860 | } | |
2861 | ||
2862 | ||
2863 | rval = KERN_SUCCESS; | |
2864 | ||
2865 | finish: | |
2866 | return rval; | |
2867 | } | |
2868 |