2 * Copyright (c) 2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach-o/loader.h>
30 #include <sys/types.h>
32 #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
33 #include <AssertMacros.h>
35 #include "kxld_demangle.h"
36 #include "kxld_reloc.h"
37 #include "kxld_sect.h"
38 #include "kxld_state.h"
40 #include "kxld_symtab.h"
41 #include "kxld_util.h"
42 #include "kxld_vtable.h"
44 #define VTABLE_ENTRY_SIZE_32 4
45 #define VTABLE_HEADER_LEN_32 2
46 #define VTABLE_HEADER_SIZE_32 (VTABLE_HEADER_LEN_32 * VTABLE_ENTRY_SIZE_32)
48 #define VTABLE_ENTRY_SIZE_64 8
49 #define VTABLE_HEADER_LEN_64 2
50 #define VTABLE_HEADER_SIZE_64 (VTABLE_HEADER_LEN_64 * VTABLE_ENTRY_SIZE_64)
52 static kern_return_t
init_by_relocs(KXLDVTable
*vtable
, const KXLDSym
*sym
,
53 const KXLDSect
*sect
, const KXLDSymtab
*symtab
,
54 const KXLDRelocator
*relocator
);
56 static kern_return_t
init_by_entries_and_relocs(KXLDVTable
*vtable
,
57 const KXLDSym
*sym
, const KXLDSymtab
*symtab
,
58 const KXLDRelocator
*relocator
, const KXLDArray
*relocs
);
60 static kxld_addr_t
get_entry_value(u_char
*entry
, const KXLDRelocator
*relocator
)
61 __attribute__((pure
));
63 static kxld_addr_t
swap_entry_value(kxld_addr_t entry_value
,
64 const KXLDRelocator
*relocator
) __attribute__((const));
66 static kern_return_t
init_by_entries(KXLDVTable
*vtable
, const KXLDSymtab
*symtab
,
67 const KXLDRelocator
*relocator
);
69 /*******************************************************************************
70 *******************************************************************************/
72 kxld_vtable_init_from_kernel_macho(KXLDVTable
*vtable
, const KXLDSym
*sym
,
73 const KXLDSect
*sect
, const KXLDSymtab
*symtab
,
74 const KXLDRelocator
*relocator
)
76 kern_return_t rval
= KERN_FAILURE
;
77 char *demangled_name
= NULL
;
78 size_t demangled_length
= 0;
85 vtable
->name
= sym
->name
;
86 vtable
->vtable
= sect
->data
+ kxld_sym_get_section_offset(sym
, sect
);
87 vtable
->is_patched
= FALSE
;
89 require_action(kxld_sect_get_num_relocs(sect
) == 0, finish
,
91 kxld_log(kKxldLogPatching
, kKxldLogErr
,
92 kKxldLogMalformedVTable
,
93 kxld_demangle(vtable
->name
, &demangled_name
, &demangled_length
)));
95 rval
= init_by_entries(vtable
, symtab
, relocator
);
96 require_noerr(rval
, finish
);
98 vtable
->is_patched
= TRUE
;
103 if (rval
) kxld_vtable_deinit(vtable
);
104 if (demangled_name
) kxld_free(demangled_name
, demangled_length
);
109 /*******************************************************************************
110 *******************************************************************************/
112 kxld_vtable_init_from_object_macho(KXLDVTable
*vtable
, const KXLDSym
*sym
,
113 const KXLDSect
*sect
, const KXLDSymtab
*symtab
,
114 const KXLDRelocator
*relocator
)
116 kern_return_t rval
= KERN_FAILURE
;
117 char *demangled_name
= NULL
;
118 size_t demangled_length
= 0;
125 vtable
->name
= sym
->name
;
126 vtable
->vtable
= sect
->data
+ kxld_sym_get_section_offset(sym
, sect
);
127 vtable
->is_patched
= FALSE
;
129 require_action(kxld_sect_get_num_relocs(sect
) > 0, finish
,
131 kxld_log(kKxldLogPatching
, kKxldLogErr
,
132 kKxldLogMalformedVTable
,
133 kxld_demangle(vtable
->name
, &demangled_name
, &demangled_length
)));
135 rval
= init_by_relocs(vtable
, sym
, sect
, symtab
, relocator
);
136 require_noerr(rval
, finish
);
141 if (rval
) kxld_vtable_deinit(vtable
);
142 if (demangled_name
) kxld_free(demangled_name
, demangled_length
);
147 /*******************************************************************************
148 *******************************************************************************/
150 kxld_vtable_init_from_final_macho(KXLDVTable
*vtable
, const KXLDSym
*sym
,
151 const KXLDSect
*sect
, const KXLDSymtab
*symtab
,
152 const KXLDRelocator
*relocator
, const KXLDArray
*relocs
)
154 kern_return_t rval
= KERN_FAILURE
;
155 char *demangled_name
= NULL
;
156 size_t demangled_length
= 0;
163 vtable
->name
= sym
->name
;
164 vtable
->vtable
= sect
->data
+ kxld_sym_get_section_offset(sym
, sect
);
165 vtable
->is_patched
= FALSE
;
167 require_action(kxld_sect_get_num_relocs(sect
) == 0, finish
,
169 kxld_log(kKxldLogPatching
, kKxldLogErr
,
170 kKxldLogMalformedVTable
,
171 kxld_demangle(vtable
->name
, &demangled_name
, &demangled_length
)));
173 rval
= init_by_entries_and_relocs(vtable
, sym
, symtab
,
175 require_noerr(rval
, finish
);
180 if (rval
) kxld_vtable_deinit(vtable
);
181 if (demangled_name
) kxld_free(demangled_name
, demangled_length
);
186 #if KXLD_USER_OR_ILP32
187 /*******************************************************************************
188 *******************************************************************************/
190 kxld_vtable_init_from_link_state_32(KXLDVTable
*vtable
, u_char
*file
,
193 kern_return_t rval
= KERN_FAILURE
;
194 KXLDSymEntry32
*sym
= NULL
;
195 KXLDVTableEntry
*entry
= NULL
;
202 vtable
->name
= (char *) (file
+ hdr
->nameoff
);
203 vtable
->is_patched
= TRUE
;
205 rval
= kxld_array_init(&vtable
->entries
, sizeof(KXLDVTableEntry
),
207 require_noerr(rval
, finish
);
209 sym
= (KXLDSymEntry32
*) (file
+ hdr
->vtableoff
);
210 for (i
= 0; i
< vtable
->entries
.nitems
; ++i
, ++sym
) {
211 entry
= kxld_array_get_item(&vtable
->entries
, i
);
212 entry
->patched
.name
= (char *) (file
+ sym
->nameoff
);
213 entry
->patched
.addr
= sym
->addr
;
221 #endif /* KXLD_USER_OR_ILP32 */
223 #if KXLD_USER_OR_LP64
224 /*******************************************************************************
225 *******************************************************************************/
227 kxld_vtable_init_from_link_state_64(KXLDVTable
*vtable
, u_char
*file
,
230 kern_return_t rval
= KERN_FAILURE
;
231 KXLDSymEntry64
*sym
= NULL
;
232 KXLDVTableEntry
*entry
= NULL
;
239 vtable
->name
= (char *) (file
+ hdr
->nameoff
);
240 vtable
->is_patched
= TRUE
;
242 rval
= kxld_array_init(&vtable
->entries
, sizeof(KXLDVTableEntry
),
244 require_noerr(rval
, finish
);
246 sym
= (KXLDSymEntry64
*) (file
+ hdr
->vtableoff
);
247 for (i
= 0; i
< vtable
->entries
.nitems
; ++i
, ++sym
) {
248 entry
= kxld_array_get_item(&vtable
->entries
, i
);
249 entry
->patched
.name
= (char *) (file
+ sym
->nameoff
);
250 entry
->patched
.addr
= sym
->addr
;
258 #endif /* KXLD_USER_OR_LP64 */
260 /*******************************************************************************
261 *******************************************************************************/
263 kxld_vtable_copy(KXLDVTable
*vtable
, const KXLDVTable
*src
)
265 kern_return_t rval
= KERN_FAILURE
;
270 vtable
->vtable
= src
->vtable
;
271 vtable
->name
= src
->name
;
272 vtable
->is_patched
= src
->is_patched
;
274 rval
= kxld_array_copy(&vtable
->entries
, &src
->entries
);
275 require_noerr(rval
, finish
);
283 /*******************************************************************************
284 * Initializes a vtable object by matching up relocation entries to the vtable's
285 * entries and finding the corresponding symbols.
286 *******************************************************************************/
288 init_by_relocs(KXLDVTable
*vtable
, const KXLDSym
*sym
, const KXLDSect
*sect
,
289 const KXLDSymtab
*symtab
, const KXLDRelocator
*relocator
)
291 kern_return_t rval
= KERN_FAILURE
;
292 KXLDReloc
*reloc
= NULL
;
293 KXLDVTableEntry
*entry
= NULL
;
294 KXLDSym
*tmpsym
= NULL
;
295 kxld_addr_t vtable_base_offset
= 0;
296 kxld_addr_t entry_offset
= 0;
299 u_int vtable_entry_size
= 0;
300 u_int base_reloc_index
= 0;
301 u_int reloc_index
= 0;
309 /* Find the first entry past the vtable padding */
311 vtable_base_offset
= kxld_sym_get_section_offset(sym
, sect
);
312 if (relocator
->is_32_bit
) {
313 vtable_entry_size
= VTABLE_ENTRY_SIZE_32
;
314 vtable_base_offset
+= VTABLE_HEADER_SIZE_32
;
316 vtable_entry_size
= VTABLE_ENTRY_SIZE_64
;
317 vtable_base_offset
+= VTABLE_HEADER_SIZE_64
;
320 /* Find the relocation entry at the start of the vtable */
322 rval
= kxld_reloc_get_reloc_index_by_offset(§
->relocs
,
323 vtable_base_offset
, &base_reloc_index
);
324 require_noerr(rval
, finish
);
326 /* Count the number of consecutive relocation entries to find the number of
327 * vtable entries. For some reason, the __TEXT,__const relocations are
328 * sorted in descending order, so we have to walk backwards. Also, make
329 * sure we don't run off the end of the section's relocs.
332 reloc_index
= base_reloc_index
;
333 entry_offset
= vtable_base_offset
;
334 reloc
= kxld_array_get_item(§
->relocs
, reloc_index
);
335 while (reloc
->address
== entry_offset
) {
337 if (!reloc_index
) break;
341 reloc
= kxld_array_get_item(§
->relocs
, reloc_index
);
342 entry_offset
+= vtable_entry_size
;
345 /* Allocate the symbol index */
347 rval
= kxld_array_init(&vtable
->entries
, sizeof(KXLDVTableEntry
), nentries
);
348 require_noerr(rval
, finish
);
350 /* Find the symbols for each vtable entry */
352 for (i
= 0; i
< vtable
->entries
.nitems
; ++i
) {
353 reloc
= kxld_array_get_item(§
->relocs
, base_reloc_index
- i
);
354 entry
= kxld_array_get_item(&vtable
->entries
, i
);
356 /* If we can't find a symbol, it means it is a locally-defined,
357 * non-external symbol that has been stripped. We don't patch over
358 * locally-defined symbols, so we leave the symbol as NULL and just
359 * skip it. We won't be able to patch subclasses with this symbol,
360 * but there isn't much we can do about that.
362 tmpsym
= kxld_reloc_get_symbol(relocator
, reloc
, sect
->data
, symtab
);
364 entry
->unpatched
.sym
= tmpsym
;
365 entry
->unpatched
.reloc
= reloc
;
373 /*******************************************************************************
374 *******************************************************************************/
376 get_entry_value(u_char
*entry
, const KXLDRelocator
*relocator
)
378 kxld_addr_t entry_value
;
380 if (relocator
->is_32_bit
) {
381 entry_value
= *(uint32_t *)entry
;
383 entry_value
= *(uint64_t *)entry
;
390 /*******************************************************************************
391 *******************************************************************************/
393 swap_entry_value(kxld_addr_t entry_value
, const KXLDRelocator
*relocator
)
395 if (relocator
->is_32_bit
) {
396 entry_value
= OSSwapInt32((uint32_t) entry_value
);
398 entry_value
= OSSwapInt64((uint64_t) entry_value
);
405 /*******************************************************************************
406 * Initializes a vtable object by reading the symbol values out of the vtable
407 * entries and performing reverse symbol lookups on those values.
408 *******************************************************************************/
410 init_by_entries(KXLDVTable
*vtable
, const KXLDSymtab
*symtab
,
411 const KXLDRelocator
*relocator
)
413 kern_return_t rval
= KERN_FAILURE
;
414 KXLDVTableEntry
*tmpentry
= NULL
;
416 u_char
*base_entry
= NULL
;
417 u_char
*entry
= NULL
;
418 kxld_addr_t entry_value
= 0;
419 u_int vtable_entry_size
= 0;
420 u_int vtable_header_size
= 0;
424 if (relocator
->is_32_bit
) {
425 vtable_entry_size
= VTABLE_ENTRY_SIZE_32
;
426 vtable_header_size
= VTABLE_HEADER_SIZE_32
;
428 vtable_entry_size
= VTABLE_ENTRY_SIZE_64
;
429 vtable_header_size
= VTABLE_HEADER_SIZE_64
;
432 base_entry
= vtable
->vtable
+ vtable_header_size
;
434 /* Count the number of entries (the vtable is null-terminated) */
437 entry_value
= get_entry_value(entry
, relocator
);
438 while (entry_value
) {
440 entry
+= vtable_entry_size
;
441 entry_value
= get_entry_value(entry
, relocator
);
444 /* Allocate the symbol index */
446 rval
= kxld_array_init(&vtable
->entries
, sizeof(KXLDVTableEntry
), nentries
);
447 require_noerr(rval
, finish
);
449 /* Look up the symbols for each entry */
453 for (i
= 0; i
< vtable
->entries
.nitems
; ++i
) {
454 entry
= base_entry
+ (i
* vtable_entry_size
);
455 entry_value
= get_entry_value(entry
, relocator
);
458 if (relocator
->swap
) {
459 entry_value
= swap_entry_value(entry_value
, relocator
);
463 /* If we can't find the symbol, it means that the virtual function was
464 * defined inline. There's not much I can do about this; it just means
465 * I can't patch this function.
467 tmpentry
= kxld_array_get_item(&vtable
->entries
, i
);
468 sym
= kxld_symtab_get_cxx_symbol_by_value(symtab
, entry_value
);
471 tmpentry
->patched
.name
= sym
->name
;
472 tmpentry
->patched
.addr
= sym
->link_addr
;
474 tmpentry
->patched
.name
= NULL
;
475 tmpentry
->patched
.addr
= 0;
485 /*******************************************************************************
486 * Initializes vtables by performing a reverse lookup on symbol values when
487 * they exist in the vtable entry, and by looking through a matching relocation
488 * entry when the vtable entry is NULL.
490 * Final linked images require this hybrid vtable initialization approach
491 * because they are already internally resolved. This means that the vtables
492 * contain valid entries to local symbols, but still have relocation entries for
494 *******************************************************************************/
496 init_by_entries_and_relocs(KXLDVTable
*vtable
, const KXLDSym
*sym
,
497 const KXLDSymtab
*symtab
, const KXLDRelocator
*relocator
,
498 const KXLDArray
*relocs
)
500 kern_return_t rval
= KERN_FAILURE
;
501 KXLDReloc
*reloc
= NULL
;
502 KXLDVTableEntry
*tmpentry
= NULL
;
503 KXLDSym
*tmpsym
= NULL
;
504 u_int vtable_entry_size
= 0;
505 u_int vtable_header_size
= 0;
506 u_char
*base_entry
= NULL
;
507 u_char
*entry
= NULL
;
508 kxld_addr_t entry_value
= 0;
509 kxld_addr_t base_entry_offset
= 0;
510 kxld_addr_t entry_offset
= 0;
513 char *demangled_name1
= NULL
;
514 size_t demangled_length1
= 0;
521 /* Find the first entry and its offset past the vtable padding */
523 if (relocator
->is_32_bit
) {
524 vtable_entry_size
= VTABLE_ENTRY_SIZE_32
;
525 vtable_header_size
= VTABLE_HEADER_SIZE_32
;
527 vtable_entry_size
= VTABLE_ENTRY_SIZE_64
;
528 vtable_header_size
= VTABLE_HEADER_SIZE_64
;
531 base_entry
= vtable
->vtable
+ vtable_header_size
;
533 base_entry_offset
= sym
->base_addr
;
534 base_entry_offset
+= vtable_header_size
;
536 /* In a final linked image, a vtable slot is valid if it is nonzero
537 * (meaning the userspace linker has already resolved it, or if it has
538 * a relocation entry. We'll know the end of the vtable when we find a
539 * slot that meets neither of these conditions.
542 entry_value
= get_entry_value(entry
, relocator
);
543 entry_offset
= base_entry_offset
;
545 entry_value
= get_entry_value(entry
, relocator
);
547 reloc
= kxld_reloc_get_reloc_by_offset(relocs
, entry_offset
);
552 entry
+= vtable_entry_size
;
553 entry_offset
+= vtable_entry_size
;
556 /* Allocate the symbol index */
558 rval
= kxld_array_init(&vtable
->entries
, sizeof(KXLDVTableEntry
), nentries
);
559 require_noerr(rval
, finish
);
561 /* Find the symbols for each vtable entry */
564 entry_value
= get_entry_value(entry
, relocator
);
565 entry_offset
= base_entry_offset
;
566 for (i
= 0; i
< vtable
->entries
.nitems
; ++i
) {
567 entry_value
= get_entry_value(entry
, relocator
);
569 /* If we can't find a symbol, it means it is a locally-defined,
570 * non-external symbol that has been stripped. We don't patch over
571 * locally-defined symbols, so we leave the symbol as NULL and just
572 * skip it. We won't be able to patch subclasses with this symbol,
573 * but there isn't much we can do about that.
577 if (relocator
->swap
) {
578 entry_value
= swap_entry_value(entry_value
, relocator
);
583 tmpsym
= kxld_symtab_get_cxx_symbol_by_value(symtab
, entry_value
);
585 reloc
= kxld_reloc_get_reloc_by_offset(relocs
, entry_offset
);
586 require_action(reloc
, finish
,
588 kxld_log(kKxldLogPatching
, kKxldLogErr
,
589 kKxldLogMalformedVTable
,
590 kxld_demangle(vtable
->name
, &demangled_name1
,
591 &demangled_length1
)));
593 tmpsym
= kxld_reloc_get_symbol(relocator
, reloc
,
594 /* data */ NULL
, symtab
);
597 tmpentry
= kxld_array_get_item(&vtable
->entries
, i
);
598 tmpentry
->unpatched
.reloc
= reloc
;
599 tmpentry
->unpatched
.sym
= tmpsym
;
601 entry
+= vtable_entry_size
;
602 entry_offset
+= vtable_entry_size
;
611 /*******************************************************************************
612 *******************************************************************************/
614 kxld_vtable_clear(KXLDVTable
*vtable
)
618 vtable
->vtable
= NULL
;
620 vtable
->is_patched
= FALSE
;
621 kxld_array_clear(&vtable
->entries
);
624 /*******************************************************************************
625 *******************************************************************************/
627 kxld_vtable_deinit(KXLDVTable
*vtable
)
631 kxld_array_deinit(&vtable
->entries
);
632 bzero(vtable
, sizeof(*vtable
));
635 /*******************************************************************************
636 * Patching vtables allows us to preserve binary compatibility across releases.
637 *******************************************************************************/
639 kxld_vtable_patch(KXLDVTable
*vtable
, const KXLDVTable
*super_vtable
,
640 KXLDSymtab
*symtab
, boolean_t strict_patching __unused
)
642 kern_return_t rval
= KERN_FAILURE
;
643 KXLDVTableEntry
*child_entry
= NULL
;
644 KXLDVTableEntry
*parent_entry
= NULL
;
648 char *demangled_name1
= NULL
;
649 char *demangled_name2
= NULL
;
650 char *demangled_name3
= NULL
;
651 size_t demangled_length1
= 0;
652 size_t demangled_length2
= 0;
653 size_t demangled_length3
= 0;
658 require_action(!vtable
->is_patched
, finish
, rval
=KERN_SUCCESS
);
659 require_action(vtable
->entries
.nitems
>= super_vtable
->entries
.nitems
, finish
,
661 kxld_log(kKxldLogPatching
, kKxldLogErr
, kKxldLogMalformedVTable
,
662 kxld_demangle(vtable
->name
, &demangled_name1
, &demangled_length1
)));
664 for (i
= 0; i
< super_vtable
->entries
.nitems
; ++i
) {
665 child_entry
= kxld_array_get_item(&vtable
->entries
, i
);
666 parent_entry
= kxld_array_get_item(&super_vtable
->entries
, i
);
668 /* The child entry can be NULL when a locally-defined, non-external
669 * symbol is stripped. We wouldn't patch this entry anyway, so we
673 if (!child_entry
->unpatched
.sym
) continue;
675 /* It's possible for the patched parent entry not to have a symbol
676 * (e.g. when the definition is inlined). We can't patch this entry no
677 * matter what, so we'll just skip it and die later if it's a problem
678 * (which is not likely).
681 if (!parent_entry
->patched
.name
) continue;
683 /* 1) If the symbol is defined locally, do not patch */
685 if (kxld_sym_is_defined_locally(child_entry
->unpatched
.sym
)) continue;
687 /* 2) If the child is a pure virtual function, do not patch.
688 * In general, we want to proceed with patching when the symbol is
689 * externally defined because pad slots fall into this category.
690 * The pure virtual function symbol is special case, as the pure
691 * virtual property itself overrides the parent's implementation.
694 if (kxld_sym_is_pure_virtual(child_entry
->unpatched
.sym
)) continue;
696 /* 3) If the symbols are the same, do not patch */
698 if (streq(child_entry
->unpatched
.sym
->name
,
699 parent_entry
->patched
.name
))
704 /* 4) If the parent vtable entry is a pad slot, and the child does not
705 * match it, then the child was built against a newer version of the
706 * libraries, so it is binary-incompatible.
709 require_action(!kxld_sym_name_is_padslot(parent_entry
->patched
.name
),
710 finish
, rval
=KERN_FAILURE
;
711 kxld_log(kKxldLogPatching
, kKxldLogErr
,
712 kKxldLogParentOutOfDate
,
713 kxld_demangle(super_vtable
->name
, &demangled_name1
,
715 kxld_demangle(vtable
->name
, &demangled_name2
,
716 &demangled_length2
)));
718 #if KXLD_USER_OR_STRICT_PATCHING
719 /* 5) If we are doing strict patching, we prevent kexts from declaring
720 * virtual functions and not implementing them. We can tell if a
721 * virtual function is declared but not implemented because we resolve
722 * symbols before patching; an unimplemented function will still be
723 * undefined at this point. We then look at whether the symbol has
724 * the same class prefix as the vtable. If it does, the symbol was
725 * declared as part of the class and not inherited, which means we
726 * should not patch it.
729 if (strict_patching
&& !kxld_sym_is_defined(child_entry
->unpatched
.sym
))
731 char class_name
[KXLD_MAX_NAME_LEN
];
732 char function_prefix
[KXLD_MAX_NAME_LEN
];
733 u_long function_prefix_len
= 0;
735 rval
= kxld_sym_get_class_name_from_vtable_name(vtable
->name
,
736 class_name
, sizeof(class_name
));
737 require_noerr(rval
, finish
);
739 function_prefix_len
=
740 kxld_sym_get_function_prefix_from_class_name(class_name
,
741 function_prefix
, sizeof(function_prefix
));
742 require(function_prefix_len
, finish
);
744 if (!strncmp(child_entry
->unpatched
.sym
->name
,
745 function_prefix
, function_prefix_len
))
750 #endif /* KXLD_USER_OR_STRICT_PATCHING */
752 /* 6) The child symbol is unresolved and different from its parent, so
753 * we need to patch it up. We do this by modifying the relocation
754 * entry of the vtable entry to point to the symbol of the parent
755 * vtable entry. If that symbol does not exist (i.e. we got the data
756 * from a link state object's vtable representation), then we create a
757 * new symbol in the symbol table and point the relocation entry to
761 sym
= kxld_symtab_get_symbol_by_name(symtab
, parent_entry
->patched
.name
);
763 rval
= kxld_symtab_add_symbol(symtab
, parent_entry
->patched
.name
,
764 parent_entry
->patched
.addr
, &sym
);
765 require_noerr(rval
, finish
);
767 require_action(sym
, finish
, rval
=KERN_FAILURE
);
769 rval
= kxld_symtab_get_sym_index(symtab
, sym
, &symindex
);
770 require_noerr(rval
, finish
);
772 rval
= kxld_reloc_update_symindex(child_entry
->unpatched
.reloc
, symindex
);
773 require_noerr(rval
, finish
);
775 kxld_log(kKxldLogPatching
, kKxldLogDetail
,
776 "In vtable '%s', patching '%s' with '%s'.",
777 kxld_demangle(vtable
->name
, &demangled_name1
, &demangled_length1
),
778 kxld_demangle(child_entry
->unpatched
.sym
->name
,
779 &demangled_name2
, &demangled_length2
),
780 kxld_demangle(sym
->name
, &demangled_name3
, &demangled_length3
));
782 kxld_sym_patch(child_entry
->unpatched
.sym
);
783 child_entry
->unpatched
.sym
= sym
;
786 /* Change the vtable representation from the unpatched layout to the
789 for (i
= 0; i
< vtable
->entries
.nitems
; ++i
) {
793 child_entry
= kxld_array_get_item(&vtable
->entries
, i
);
794 if (child_entry
->unpatched
.sym
) {
795 name
= child_entry
->unpatched
.sym
->name
;
796 addr
= child_entry
->unpatched
.sym
->link_addr
;
802 child_entry
->patched
.name
= name
;
803 child_entry
->patched
.addr
= addr
;
806 vtable
->is_patched
= TRUE
;
810 if (demangled_name1
) kxld_free(demangled_name1
, demangled_length1
);
811 if (demangled_name2
) kxld_free(demangled_name2
, demangled_length2
);
812 if (demangled_name3
) kxld_free(demangled_name3
, demangled_length3
);