2 * Copyright (c) 2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach-o/loader.h>
30 #include <sys/types.h>
37 #include <kern/assert.h>
42 #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
43 #include <AssertMacros.h>
45 #include "kxld_demangle.h"
46 #include "kxld_dict.h"
47 #include "kxld_object.h"
48 #include "kxld_reloc.h"
49 #include "kxld_sect.h"
51 #include "kxld_symtab.h"
52 #include "kxld_util.h"
53 #include "kxld_vtable.h"
55 #define VTABLE_ENTRY_SIZE_32 4
56 #define VTABLE_HEADER_LEN_32 2
57 #define VTABLE_HEADER_SIZE_32 (VTABLE_HEADER_LEN_32 * VTABLE_ENTRY_SIZE_32)
59 #define VTABLE_ENTRY_SIZE_64 8
60 #define VTABLE_HEADER_LEN_64 2
61 #define VTABLE_HEADER_SIZE_64 (VTABLE_HEADER_LEN_64 * VTABLE_ENTRY_SIZE_64)
63 static void get_vtable_base_sizes(boolean_t is_32_bit
, u_int
*vtable_entry_size
,
64 u_int
*vtable_header_size
);
66 static kern_return_t
init_by_relocs(KXLDVTable
*vtable
, const KXLDSym
*vtable_sym
,
67 const KXLDSect
*sect
, const KXLDRelocator
*relocator
);
69 static kern_return_t
init_by_entries_and_relocs(KXLDVTable
*vtable
,
70 const KXLDSym
*vtable_sym
, const KXLDRelocator
*relocator
,
71 const KXLDArray
*relocs
, const KXLDDict
*defined_cxx_symbols
);
73 static kern_return_t
init_by_entries(KXLDVTable
*vtable
,
74 const KXLDRelocator
*relocator
, const KXLDDict
*defined_cxx_symbols
);
76 /*******************************************************************************
77 *******************************************************************************/
79 kxld_vtable_init(KXLDVTable
*vtable
, const KXLDSym
*vtable_sym
,
80 const KXLDObject
*object
, const KXLDDict
*defined_cxx_symbols
)
82 kern_return_t rval
= KERN_FAILURE
;
83 const KXLDArray
*extrelocs
= NULL
;
84 const KXLDRelocator
*relocator
= NULL
;
85 const KXLDSect
*vtable_sect
= NULL
;
86 char *demangled_name
= NULL
;
87 size_t demangled_length
= 0;
93 relocator
= kxld_object_get_relocator(object
);
95 vtable_sect
= kxld_object_get_section_by_index(object
,
97 require_action(vtable_sect
, finish
, rval
= KERN_FAILURE
);
99 vtable
->name
= vtable_sym
->name
;
100 vtable
->vtable
= vtable_sect
->data
+
101 kxld_sym_get_section_offset(vtable_sym
, vtable_sect
);
103 if (kxld_object_is_linked(object
)) {
104 rval
= init_by_entries(vtable
, relocator
, defined_cxx_symbols
);
105 require_noerr(rval
, finish
);
107 vtable
->is_patched
= TRUE
;
109 if (kxld_object_is_final_image(object
)) {
110 extrelocs
= kxld_object_get_extrelocs(object
);
112 require_action(extrelocs
, finish
,
114 kxld_log(kKxldLogPatching
, kKxldLogErr
,
115 kKxldLogMalformedVTable
,
116 kxld_demangle(vtable
->name
,
117 &demangled_name
, &demangled_length
)));
119 rval
= init_by_entries_and_relocs(vtable
, vtable_sym
,
120 relocator
, extrelocs
, defined_cxx_symbols
);
121 require_noerr(rval
, finish
);
123 require_action(kxld_sect_get_num_relocs(vtable_sect
) > 0, finish
,
125 kxld_log(kKxldLogPatching
, kKxldLogErr
,
126 kKxldLogMalformedVTable
,
127 kxld_demangle(vtable
->name
,
128 &demangled_name
, &demangled_length
)));
130 rval
= init_by_relocs(vtable
, vtable_sym
, vtable_sect
, relocator
);
131 require_noerr(rval
, finish
);
134 vtable
->is_patched
= FALSE
;
140 if (demangled_name
) {
141 kxld_free(demangled_name
, demangled_length
);
147 /*******************************************************************************
148 *******************************************************************************/
150 get_vtable_base_sizes(boolean_t is_32_bit
, u_int
*vtable_entry_size
,
151 u_int
*vtable_header_size
)
153 check(vtable_entry_size
);
154 check(vtable_header_size
);
157 *vtable_entry_size
= VTABLE_ENTRY_SIZE_32
;
158 *vtable_header_size
= VTABLE_HEADER_SIZE_32
;
160 *vtable_entry_size
= VTABLE_ENTRY_SIZE_64
;
161 *vtable_header_size
= VTABLE_HEADER_SIZE_64
;
165 /*******************************************************************************
166 * Initializes a vtable object by matching up relocation entries to the vtable's
167 * entries and finding the corresponding symbols.
168 *******************************************************************************/
170 init_by_relocs(KXLDVTable
*vtable
, const KXLDSym
*vtable_sym
,
171 const KXLDSect
*sect
, const KXLDRelocator
*relocator
)
173 kern_return_t rval
= KERN_FAILURE
;
174 KXLDReloc
*reloc
= NULL
;
175 KXLDVTableEntry
*entry
= NULL
;
177 kxld_addr_t vtable_base_offset
= 0;
178 kxld_addr_t entry_offset
= 0;
181 u_int vtable_entry_size
= 0;
182 u_int vtable_header_size
= 0;
183 u_int base_reloc_index
= 0;
184 u_int reloc_index
= 0;
191 /* Find the first entry past the vtable padding */
193 (void) get_vtable_base_sizes(relocator
->is_32_bit
,
194 &vtable_entry_size
, &vtable_header_size
);
196 vtable_base_offset
= kxld_sym_get_section_offset(vtable_sym
, sect
) +
199 /* Find the relocation entry at the start of the vtable */
201 rval
= kxld_reloc_get_reloc_index_by_offset(§
->relocs
,
202 vtable_base_offset
, &base_reloc_index
);
203 require_noerr(rval
, finish
);
205 /* Count the number of consecutive relocation entries to find the number of
206 * vtable entries. For some reason, the __TEXT,__const relocations are
207 * sorted in descending order, so we have to walk backwards. Also, make
208 * sure we don't run off the end of the section's relocs.
211 reloc_index
= base_reloc_index
;
212 entry_offset
= vtable_base_offset
;
213 reloc
= kxld_array_get_item(§
->relocs
, reloc_index
);
214 while (reloc
->address
== entry_offset
) {
222 reloc
= kxld_array_get_item(§
->relocs
, reloc_index
);
223 entry_offset
+= vtable_entry_size
;
226 /* Allocate the symbol index */
228 rval
= kxld_array_init(&vtable
->entries
, sizeof(KXLDVTableEntry
), nentries
);
229 require_noerr(rval
, finish
);
231 /* Find the symbols for each vtable entry */
233 for (i
= 0; i
< vtable
->entries
.nitems
; ++i
) {
234 reloc
= kxld_array_get_item(§
->relocs
, base_reloc_index
- i
);
235 entry
= kxld_array_get_item(&vtable
->entries
, i
);
237 /* If we can't find a symbol, it means it is a locally-defined,
238 * non-external symbol that has been stripped. We don't patch over
239 * locally-defined symbols, so we leave the symbol as NULL and just
240 * skip it. We won't be able to patch subclasses with this symbol,
241 * but there isn't much we can do about that.
243 sym
= kxld_reloc_get_symbol(relocator
, reloc
, sect
->data
);
245 entry
->unpatched
.sym
= sym
;
246 entry
->unpatched
.reloc
= reloc
;
254 /*******************************************************************************
255 * Initializes a vtable object by reading the symbol values out of the vtable
256 * entries and performing reverse symbol lookups on those values.
257 *******************************************************************************/
259 init_by_entries(KXLDVTable
*vtable
, const KXLDRelocator
*relocator
,
260 const KXLDDict
*defined_cxx_symbols
)
262 kern_return_t rval
= KERN_FAILURE
;
263 KXLDVTableEntry
*tmpentry
= NULL
;
265 kxld_addr_t entry_value
= 0;
267 u_int vtable_entry_size
= 0;
268 u_int vtable_header_size
= 0;
275 (void) get_vtable_base_sizes(relocator
->is_32_bit
,
276 &vtable_entry_size
, &vtable_header_size
);
278 /* Count the number of entries (the vtable is null-terminated) */
280 entry_offset
= vtable_header_size
;
282 entry_value
= kxld_relocator_get_pointer_at_addr(relocator
,
283 vtable
->vtable
, entry_offset
);
288 entry_offset
+= vtable_entry_size
;
292 /* Allocate the symbol index */
294 rval
= kxld_array_init(&vtable
->entries
, sizeof(KXLDVTableEntry
), nentries
);
295 require_noerr(rval
, finish
);
297 /* Look up the symbols for each entry */
299 for (i
= 0, entry_offset
= vtable_header_size
;
300 i
< vtable
->entries
.nitems
;
301 ++i
, entry_offset
+= vtable_entry_size
) {
302 entry_value
= kxld_relocator_get_pointer_at_addr(relocator
,
303 vtable
->vtable
, entry_offset
);
305 /* If we can't find the symbol, it means that the virtual function was
306 * defined inline. There's not much I can do about this; it just means
307 * I can't patch this function.
309 tmpentry
= kxld_array_get_item(&vtable
->entries
, i
);
310 sym
= kxld_dict_find(defined_cxx_symbols
, &entry_value
);
313 tmpentry
->patched
.name
= sym
->name
;
314 tmpentry
->patched
.addr
= sym
->link_addr
;
316 tmpentry
->patched
.name
= NULL
;
317 tmpentry
->patched
.addr
= 0;
326 /*******************************************************************************
327 * Initializes vtables by performing a reverse lookup on symbol values when
328 * they exist in the vtable entry, and by looking through a matching relocation
329 * entry when the vtable entry is NULL.
331 * Final linked images require this hybrid vtable initialization approach
332 * because they are already internally resolved. This means that the vtables
333 * contain valid entries to local symbols, but still have relocation entries for
335 *******************************************************************************/
337 init_by_entries_and_relocs(KXLDVTable
*vtable
, const KXLDSym
*vtable_sym
,
338 const KXLDRelocator
*relocator
, const KXLDArray
*relocs
,
339 const KXLDDict
*defined_cxx_symbols
)
341 kern_return_t rval
= KERN_FAILURE
;
342 KXLDReloc
*reloc
= NULL
;
343 KXLDVTableEntry
*tmpentry
= NULL
;
345 u_int vtable_entry_size
= 0;
346 u_int vtable_header_size
= 0;
347 kxld_addr_t entry_value
= 0;
348 u_long entry_offset
= 0;
351 char *demangled_name1
= NULL
;
352 size_t demangled_length1
= 0;
359 /* Find the first entry and its offset past the vtable padding */
361 (void) get_vtable_base_sizes(relocator
->is_32_bit
,
362 &vtable_entry_size
, &vtable_header_size
);
364 /* In a final linked image, a vtable slot is valid if it is nonzero
365 * (meaning the userspace linker has already resolved it) or if it has
366 * a relocation entry. We'll know the end of the vtable when we find a
367 * slot that meets neither of these conditions.
369 entry_offset
= vtable_header_size
;
371 entry_value
= kxld_relocator_get_pointer_at_addr(relocator
,
372 vtable
->vtable
, entry_offset
);
374 reloc
= kxld_reloc_get_reloc_by_offset(relocs
,
375 vtable_sym
->base_addr
+ entry_offset
);
382 entry_offset
+= vtable_entry_size
;
385 /* Allocate the symbol index */
387 rval
= kxld_array_init(&vtable
->entries
, sizeof(KXLDVTableEntry
), nentries
);
388 require_noerr(rval
, finish
);
390 /* Find the symbols for each vtable entry */
392 for (i
= 0, entry_offset
= vtable_header_size
;
393 i
< vtable
->entries
.nitems
;
394 ++i
, entry_offset
+= vtable_entry_size
) {
395 entry_value
= kxld_relocator_get_pointer_at_addr(relocator
,
396 vtable
->vtable
, entry_offset
);
398 /* If we can't find a symbol, it means it is a locally-defined,
399 * non-external symbol that has been stripped. We don't patch over
400 * locally-defined symbols, so we leave the symbol as NULL and just
401 * skip it. We won't be able to patch subclasses with this symbol,
402 * but there isn't much we can do about that.
406 sym
= kxld_dict_find(defined_cxx_symbols
, &entry_value
);
408 reloc
= kxld_reloc_get_reloc_by_offset(relocs
,
409 vtable_sym
->base_addr
+ entry_offset
);
411 require_action(reloc
, finish
,
413 kxld_log(kKxldLogPatching
, kKxldLogErr
,
414 kKxldLogMalformedVTable
,
415 kxld_demangle(vtable
->name
, &demangled_name1
,
416 &demangled_length1
)));
418 sym
= kxld_reloc_get_symbol(relocator
, reloc
, /* data */ NULL
);
421 tmpentry
= kxld_array_get_item(&vtable
->entries
, i
);
422 tmpentry
->unpatched
.reloc
= reloc
;
423 tmpentry
->unpatched
.sym
= sym
;
431 /*******************************************************************************
432 *******************************************************************************/
434 kxld_vtable_clear(KXLDVTable
*vtable
)
438 vtable
->vtable
= NULL
;
440 vtable
->is_patched
= FALSE
;
441 kxld_array_clear(&vtable
->entries
);
444 /*******************************************************************************
445 *******************************************************************************/
447 kxld_vtable_deinit(KXLDVTable
*vtable
)
451 kxld_array_deinit(&vtable
->entries
);
452 bzero(vtable
, sizeof(*vtable
));
455 /*******************************************************************************
456 *******************************************************************************/
458 kxld_vtable_get_entry_for_offset(const KXLDVTable
*vtable
, u_long offset
,
461 KXLDVTableEntry
*rval
= NULL
;
462 u_int vtable_entry_size
= 0;
463 u_int vtable_header_size
= 0;
464 u_int vtable_entry_idx
= 0;
466 (void) get_vtable_base_sizes(is_32_bit
,
467 &vtable_entry_size
, &vtable_header_size
);
469 if (offset
% vtable_entry_size
) {
473 vtable_entry_idx
= (u_int
) ((offset
- vtable_header_size
) / vtable_entry_size
);
474 rval
= kxld_array_get_item(&vtable
->entries
, vtable_entry_idx
);
479 /*******************************************************************************
480 * Patching vtables allows us to preserve binary compatibility across releases.
481 *******************************************************************************/
483 kxld_vtable_patch(KXLDVTable
*vtable
, const KXLDVTable
*super_vtable
,
486 kern_return_t rval
= KERN_FAILURE
;
487 const KXLDSymtab
*symtab
= NULL
;
488 const KXLDSym
*sym
= NULL
;
489 KXLDVTableEntry
*child_entry
= NULL
;
490 KXLDVTableEntry
*parent_entry
= NULL
;
493 char *demangled_name1
= NULL
;
494 char *demangled_name2
= NULL
;
495 char *demangled_name3
= NULL
;
496 size_t demangled_length1
= 0;
497 size_t demangled_length2
= 0;
498 size_t demangled_length3
= 0;
499 boolean_t failure
= FALSE
;
504 symtab
= kxld_object_get_symtab(object
);
506 require_action(!vtable
->is_patched
, finish
, rval
= KERN_SUCCESS
);
507 require_action(super_vtable
->is_patched
, finish
, rval
= KERN_FAILURE
);
508 require_action(vtable
->entries
.nitems
>= super_vtable
->entries
.nitems
, finish
,
510 kxld_log(kKxldLogPatching
, kKxldLogErr
, kKxldLogMalformedVTable
,
511 kxld_demangle(vtable
->name
, &demangled_name1
, &demangled_length1
)));
513 for (i
= 0; i
< super_vtable
->entries
.nitems
; ++i
) {
514 child_entry
= kxld_array_get_item(&vtable
->entries
, i
);
515 parent_entry
= kxld_array_get_item(&super_vtable
->entries
, i
);
517 /* The child entry can be NULL when a locally-defined, non-external
518 * symbol is stripped. We wouldn't patch this entry anyway, so we
522 if (!child_entry
->unpatched
.sym
) {
526 /* It's possible for the patched parent entry not to have a symbol
527 * (e.g. when the definition is inlined). We can't patch this entry no
528 * matter what, so we'll just skip it and die later if it's a problem
529 * (which is not likely).
532 if (!parent_entry
->patched
.name
) {
536 /* 1) If the symbol is defined locally, do not patch */
538 if (kxld_sym_is_defined_locally(child_entry
->unpatched
.sym
)) {
542 /* 2) If the child is a pure virtual function, do not patch.
543 * In general, we want to proceed with patching when the symbol is
544 * externally defined because pad slots fall into this category.
545 * The pure virtual function symbol is special case, as the pure
546 * virtual property itself overrides the parent's implementation.
549 if (kxld_sym_is_pure_virtual(child_entry
->unpatched
.sym
)) {
553 /* 3) If the symbols are the same, do not patch */
555 if (streq(child_entry
->unpatched
.sym
->name
,
556 parent_entry
->patched
.name
)) {
560 /* 4) If the parent vtable entry is a pad slot, and the child does not
561 * match it, then the child was built against a newer version of the
562 * libraries, so it is binary-incompatible.
565 require_action(!kxld_sym_name_is_padslot(parent_entry
->patched
.name
),
566 finish
, rval
= KERN_FAILURE
;
567 kxld_log(kKxldLogPatching
, kKxldLogErr
,
568 kKxldLogParentOutOfDate
,
569 kxld_demangle(super_vtable
->name
, &demangled_name1
,
571 kxld_demangle(vtable
->name
, &demangled_name2
,
572 &demangled_length2
)));
574 #if KXLD_USER_OR_STRICT_PATCHING
575 /* 5) If we are doing strict patching, we prevent kexts from declaring
576 * virtual functions and not implementing them. We can tell if a
577 * virtual function is declared but not implemented because we resolve
578 * symbols before patching; an unimplemented function will still be
579 * undefined at this point. We then look at whether the symbol has
580 * the same class prefix as the vtable. If it does, the symbol was
581 * declared as part of the class and not inherited, which means we
582 * should not patch it.
585 if (kxld_object_target_supports_strict_patching(object
) &&
586 !kxld_sym_is_defined(child_entry
->unpatched
.sym
)) {
587 char class_name
[KXLD_MAX_NAME_LEN
];
588 char function_prefix
[KXLD_MAX_NAME_LEN
];
589 u_long function_prefix_len
= 0;
591 rval
= kxld_sym_get_class_name_from_vtable_name(vtable
->name
,
592 class_name
, sizeof(class_name
));
593 require_noerr(rval
, finish
);
595 function_prefix_len
=
596 kxld_sym_get_function_prefix_from_class_name(class_name
,
597 function_prefix
, sizeof(function_prefix
));
598 require(function_prefix_len
, finish
);
600 if (!strncmp(child_entry
->unpatched
.sym
->name
,
601 function_prefix
, function_prefix_len
)) {
603 kxld_log(kKxldLogPatching
, kKxldLogErr
,
604 "The %s is unpatchable because its class declares the "
605 "method '%s' without providing an implementation.",
606 kxld_demangle(vtable
->name
,
607 &demangled_name1
, &demangled_length1
),
608 kxld_demangle(child_entry
->unpatched
.sym
->name
,
609 &demangled_name2
, &demangled_length2
));
613 #endif /* KXLD_USER_OR_STRICT_PATCHING */
615 /* 6) The child symbol is unresolved and different from its parent, so
616 * we need to patch it up. We do this by modifying the relocation
617 * entry of the vtable entry to point to the symbol of the parent
618 * vtable entry. If that symbol does not exist (i.e. we got the data
619 * from a link state object's vtable representation), then we create a
620 * new symbol in the symbol table and point the relocation entry to
624 sym
= kxld_symtab_get_locally_defined_symbol_by_name(symtab
,
625 parent_entry
->patched
.name
);
627 rval
= kxld_object_add_symbol(object
, parent_entry
->patched
.name
,
628 parent_entry
->patched
.addr
, &sym
);
629 require_noerr(rval
, finish
);
631 require_action(sym
, finish
, rval
= KERN_FAILURE
);
633 rval
= kxld_symtab_get_sym_index(symtab
, sym
, &symindex
);
634 require_noerr(rval
, finish
);
636 rval
= kxld_reloc_update_symindex(child_entry
->unpatched
.reloc
, symindex
);
637 require_noerr(rval
, finish
);
639 kxld_log(kKxldLogPatching
, kKxldLogDetail
,
640 "In vtable '%s', patching '%s' with '%s'.",
641 kxld_demangle(vtable
->name
, &demangled_name1
, &demangled_length1
),
642 kxld_demangle(child_entry
->unpatched
.sym
->name
,
643 &demangled_name2
, &demangled_length2
),
644 kxld_demangle(sym
->name
, &demangled_name3
, &demangled_length3
));
646 rval
= kxld_object_patch_symbol(object
, child_entry
->unpatched
.sym
);
647 require_noerr(rval
, finish
);
649 child_entry
->unpatched
.sym
= sym
;
652 * The C++ ABI requires that functions be aligned on a 2-byte boundary:
653 * http://www.codesourcery.com/public/cxx-abi/abi.html#member-pointers
654 * If the LSB of any virtual function's link address is 1, then the
655 * compiler has violated that part of the ABI, and we're going to panic
656 * in _ptmf2ptf() (in OSMetaClass.h). Better to panic here with some
659 assert(kxld_sym_is_pure_virtual(sym
) || !(sym
->link_addr
& 1));
662 require_action(!failure
, finish
, rval
= KERN_FAILURE
);
664 /* Change the vtable representation from the unpatched layout to the
668 for (i
= 0; i
< vtable
->entries
.nitems
; ++i
) {
672 child_entry
= kxld_array_get_item(&vtable
->entries
, i
);
673 if (child_entry
->unpatched
.sym
) {
674 name
= child_entry
->unpatched
.sym
->name
;
675 addr
= child_entry
->unpatched
.sym
->link_addr
;
681 child_entry
->patched
.name
= name
;
682 child_entry
->patched
.addr
= addr
;
685 vtable
->is_patched
= TRUE
;
689 if (demangled_name1
) {
690 kxld_free(demangled_name1
, demangled_length1
);
692 if (demangled_name2
) {
693 kxld_free(demangled_name2
, demangled_length2
);
695 if (demangled_name3
) {
696 kxld_free(demangled_name3
, demangled_length3
);