2 * Copyright (c) 2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach-o/loader.h>
30 #include <sys/types.h>
32 #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
33 #include <AssertMacros.h>
35 #include "kxld_reloc.h"
36 #include "kxld_sect.h"
37 #include "kxld_state.h"
39 #include "kxld_symtab.h"
40 #include "kxld_util.h"
41 #include "kxld_vtable.h"
43 #define VTABLE_ENTRY_SIZE_32 4
44 #define VTABLE_HEADER_LEN_32 2
45 #define VTABLE_HEADER_SIZE_32 (VTABLE_HEADER_LEN_32 * VTABLE_ENTRY_SIZE_32)
47 #define VTABLE_ENTRY_SIZE_64 8
48 #define VTABLE_HEADER_LEN_64 2
49 #define VTABLE_HEADER_SIZE_64 (VTABLE_HEADER_LEN_64 * VTABLE_ENTRY_SIZE_64)
51 static kern_return_t
init_by_relocs(KXLDVTable
*vtable
, const KXLDSym
*sym
,
52 const KXLDSect
*sect
, const KXLDSymtab
*symtab
,
53 const KXLDRelocator
*relocator
);
55 static kern_return_t
init_by_entries_and_relocs(KXLDVTable
*vtable
,
56 const KXLDSym
*sym
, const KXLDSymtab
*symtab
,
57 const KXLDRelocator
*relocator
, const KXLDArray
*relocs
);
59 static kxld_addr_t
get_entry_value(u_char
*entry
, const KXLDRelocator
*relocator
)
60 __attribute__((pure
));
62 static kxld_addr_t
swap_entry_value(kxld_addr_t entry_value
,
63 const KXLDRelocator
*relocator
) __attribute__((const));
65 static kern_return_t
init_by_entries(KXLDVTable
*vtable
, const KXLDSymtab
*symtab
,
66 const KXLDRelocator
*relocator
);
68 /*******************************************************************************
69 *******************************************************************************/
71 kxld_vtable_init_from_kernel_macho(KXLDVTable
*vtable
, const KXLDSym
*sym
,
72 const KXLDSect
*sect
, const KXLDSymtab
*symtab
,
73 const KXLDRelocator
*relocator
)
75 kern_return_t rval
= KERN_FAILURE
;
82 vtable
->name
= sym
->name
;
83 vtable
->vtable
= sect
->data
+ kxld_sym_get_section_offset(sym
, sect
);
84 vtable
->is_patched
= FALSE
;
86 require_action(kxld_sect_get_num_relocs(sect
) == 0, finish
,
88 kxld_log(kKxldLogPatching
, kKxldLogErr
,
89 kKxldLogMalformedVTable
, vtable
->name
));
91 rval
= init_by_entries(vtable
, symtab
, relocator
);
92 require_noerr(rval
, finish
);
94 vtable
->is_patched
= TRUE
;
100 if (rval
) kxld_vtable_deinit(vtable
);
105 /*******************************************************************************
106 *******************************************************************************/
108 kxld_vtable_init_from_object_macho(KXLDVTable
*vtable
, const KXLDSym
*sym
,
109 const KXLDSect
*sect
, const KXLDSymtab
*symtab
,
110 const KXLDRelocator
*relocator
)
112 kern_return_t rval
= KERN_FAILURE
;
119 vtable
->name
= sym
->name
;
120 vtable
->vtable
= sect
->data
+ kxld_sym_get_section_offset(sym
, sect
);
121 vtable
->is_patched
= FALSE
;
123 require_action(kxld_sect_get_num_relocs(sect
) > 0, finish
,
125 kxld_log(kKxldLogPatching
, kKxldLogErr
,
126 kKxldLogMalformedVTable
, vtable
->name
));
128 rval
= init_by_relocs(vtable
, sym
, sect
, symtab
, relocator
);
129 require_noerr(rval
, finish
);
135 if (rval
) kxld_vtable_deinit(vtable
);
140 /*******************************************************************************
141 *******************************************************************************/
143 kxld_vtable_init_from_final_macho(KXLDVTable
*vtable
, const KXLDSym
*sym
,
144 const KXLDSect
*sect
, const KXLDSymtab
*symtab
,
145 const KXLDRelocator
*relocator
, const KXLDArray
*relocs
)
147 kern_return_t rval
= KERN_FAILURE
;
154 vtable
->name
= sym
->name
;
155 vtable
->vtable
= sect
->data
+ kxld_sym_get_section_offset(sym
, sect
);
156 vtable
->is_patched
= FALSE
;
158 require_action(kxld_sect_get_num_relocs(sect
) == 0, finish
,
160 kxld_log(kKxldLogPatching
, kKxldLogErr
,
161 kKxldLogMalformedVTable
, vtable
->name
));
163 rval
= init_by_entries_and_relocs(vtable
, sym
, symtab
,
165 require_noerr(rval
, finish
);
170 if (rval
) kxld_vtable_deinit(vtable
);
175 #if KXLD_USER_OR_ILP32
176 /*******************************************************************************
177 *******************************************************************************/
179 kxld_vtable_init_from_link_state_32(KXLDVTable
*vtable
, u_char
*file
,
182 kern_return_t rval
= KERN_FAILURE
;
183 KXLDSymEntry32
*sym
= NULL
;
184 KXLDVTableEntry
*entry
= NULL
;
191 vtable
->name
= (char *) (file
+ hdr
->nameoff
);
192 vtable
->is_patched
= TRUE
;
194 rval
= kxld_array_init(&vtable
->entries
, sizeof(KXLDVTableEntry
),
196 require_noerr(rval
, finish
);
198 sym
= (KXLDSymEntry32
*) (file
+ hdr
->vtableoff
);
199 for (i
= 0; i
< vtable
->entries
.nitems
; ++i
, ++sym
) {
200 entry
= kxld_array_get_item(&vtable
->entries
, i
);
201 entry
->patched
.name
= (char *) (file
+ sym
->nameoff
);
202 entry
->patched
.addr
= sym
->addr
;
210 #endif /* KXLD_USER_OR_ILP32 */
212 #if KXLD_USER_OR_LP64
213 /*******************************************************************************
214 *******************************************************************************/
216 kxld_vtable_init_from_link_state_64(KXLDVTable
*vtable
, u_char
*file
,
219 kern_return_t rval
= KERN_FAILURE
;
220 KXLDSymEntry64
*sym
= NULL
;
221 KXLDVTableEntry
*entry
= NULL
;
228 vtable
->name
= (char *) (file
+ hdr
->nameoff
);
229 vtable
->is_patched
= TRUE
;
231 rval
= kxld_array_init(&vtable
->entries
, sizeof(KXLDVTableEntry
),
233 require_noerr(rval
, finish
);
235 sym
= (KXLDSymEntry64
*) (file
+ hdr
->vtableoff
);
236 for (i
= 0; i
< vtable
->entries
.nitems
; ++i
, ++sym
) {
237 entry
= kxld_array_get_item(&vtable
->entries
, i
);
238 entry
->patched
.name
= (char *) (file
+ sym
->nameoff
);
239 entry
->patched
.addr
= sym
->addr
;
247 #endif /* KXLD_USER_OR_LP64 */
249 /*******************************************************************************
250 *******************************************************************************/
252 kxld_vtable_copy(KXLDVTable
*vtable
, const KXLDVTable
*src
)
254 kern_return_t rval
= KERN_FAILURE
;
259 vtable
->vtable
= src
->vtable
;
260 vtable
->name
= src
->name
;
261 vtable
->is_patched
= src
->is_patched
;
263 rval
= kxld_array_copy(&vtable
->entries
, &src
->entries
);
264 require_noerr(rval
, finish
);
272 /*******************************************************************************
273 * Initializes a vtable object by matching up relocation entries to the vtable's
274 * entries and finding the corresponding symbols.
275 *******************************************************************************/
277 init_by_relocs(KXLDVTable
*vtable
, const KXLDSym
*sym
, const KXLDSect
*sect
,
278 const KXLDSymtab
*symtab
, const KXLDRelocator
*relocator
)
280 kern_return_t rval
= KERN_FAILURE
;
281 KXLDReloc
*reloc
= NULL
;
282 KXLDVTableEntry
*entry
= NULL
;
283 KXLDSym
*tmpsym
= NULL
;
284 kxld_addr_t vtable_base_offset
= 0;
285 kxld_addr_t entry_offset
= 0;
288 u_int vtable_entry_size
= 0;
289 u_int base_reloc_index
= 0;
290 u_int reloc_index
= 0;
298 /* Find the first entry past the vtable padding */
300 vtable_base_offset
= kxld_sym_get_section_offset(sym
, sect
);
301 if (relocator
->is_32_bit
) {
302 vtable_entry_size
= VTABLE_ENTRY_SIZE_32
;
303 vtable_base_offset
+= VTABLE_HEADER_SIZE_32
;
305 vtable_entry_size
= VTABLE_ENTRY_SIZE_64
;
306 vtable_base_offset
+= VTABLE_HEADER_SIZE_64
;
309 /* Find the relocation entry at the start of the vtable */
311 rval
= kxld_reloc_get_reloc_index_by_offset(§
->relocs
,
312 vtable_base_offset
, &base_reloc_index
);
313 require_noerr(rval
, finish
);
315 /* Count the number of consecutive relocation entries to find the number of
316 * vtable entries. For some reason, the __TEXT,__const relocations are
317 * sorted in descending order, so we have to walk backwards. Also, make
318 * sure we don't run off the end of the section's relocs.
321 reloc_index
= base_reloc_index
;
322 entry_offset
= vtable_base_offset
;
323 reloc
= kxld_array_get_item(§
->relocs
, reloc_index
);
324 while (reloc
->address
== entry_offset
) {
326 if (!reloc_index
) break;
330 reloc
= kxld_array_get_item(§
->relocs
, reloc_index
);
331 entry_offset
+= vtable_entry_size
;
334 /* Allocate the symbol index */
336 rval
= kxld_array_init(&vtable
->entries
, sizeof(KXLDVTableEntry
), nentries
);
337 require_noerr(rval
, finish
);
339 /* Find the symbols for each vtable entry */
341 for (i
= 0; i
< vtable
->entries
.nitems
; ++i
) {
342 reloc
= kxld_array_get_item(§
->relocs
, base_reloc_index
- i
);
343 entry
= kxld_array_get_item(&vtable
->entries
, i
);
345 /* If we can't find a symbol, it means it is a locally-defined,
346 * non-external symbol that has been stripped. We don't patch over
347 * locally-defined symbols, so we leave the symbol as NULL and just
348 * skip it. We won't be able to patch subclasses with this symbol,
349 * but there isn't much we can do about that.
351 tmpsym
= kxld_reloc_get_symbol(relocator
, reloc
, sect
->data
, symtab
);
353 entry
->unpatched
.sym
= tmpsym
;
354 entry
->unpatched
.reloc
= reloc
;
362 /*******************************************************************************
363 *******************************************************************************/
365 get_entry_value(u_char
*entry
, const KXLDRelocator
*relocator
)
367 kxld_addr_t entry_value
;
369 if (relocator
->is_32_bit
) {
370 entry_value
= *(uint32_t *)entry
;
372 entry_value
= *(uint64_t *)entry
;
379 /*******************************************************************************
380 *******************************************************************************/
382 swap_entry_value(kxld_addr_t entry_value
, const KXLDRelocator
*relocator
)
384 if (relocator
->is_32_bit
) {
385 entry_value
= OSSwapInt32((uint32_t) entry_value
);
387 entry_value
= OSSwapInt64((uint64_t) entry_value
);
394 /*******************************************************************************
395 * Initializes a vtable object by reading the symbol values out of the vtable
396 * entries and performing reverse symbol lookups on those values.
397 *******************************************************************************/
399 init_by_entries(KXLDVTable
*vtable
, const KXLDSymtab
*symtab
,
400 const KXLDRelocator
*relocator
)
402 kern_return_t rval
= KERN_FAILURE
;
403 KXLDVTableEntry
*tmpentry
= NULL
;
405 u_char
*base_entry
= NULL
;
406 u_char
*entry
= NULL
;
407 kxld_addr_t entry_value
= 0;
408 u_int vtable_entry_size
= 0;
409 u_int vtable_header_size
= 0;
413 if (relocator
->is_32_bit
) {
414 vtable_entry_size
= VTABLE_ENTRY_SIZE_32
;
415 vtable_header_size
= VTABLE_HEADER_SIZE_32
;
417 vtable_entry_size
= VTABLE_ENTRY_SIZE_64
;
418 vtable_header_size
= VTABLE_HEADER_SIZE_64
;
421 base_entry
= vtable
->vtable
+ vtable_header_size
;
423 /* Count the number of entries (the vtable is null-terminated) */
426 entry_value
= get_entry_value(entry
, relocator
);
427 while (entry_value
) {
429 entry
+= vtable_entry_size
;
430 entry_value
= get_entry_value(entry
, relocator
);
433 /* Allocate the symbol index */
435 rval
= kxld_array_init(&vtable
->entries
, sizeof(KXLDVTableEntry
), nentries
);
436 require_noerr(rval
, finish
);
438 /* Look up the symbols for each entry */
442 for (i
= 0; i
< vtable
->entries
.nitems
; ++i
) {
443 entry
= base_entry
+ (i
* vtable_entry_size
);
444 entry_value
= get_entry_value(entry
, relocator
);
447 if (relocator
->swap
) {
448 entry_value
= swap_entry_value(entry_value
, relocator
);
452 /* If we can't find the symbol, it means that the virtual function was
453 * defined inline. There's not much I can do about this; it just means
454 * I can't patch this function.
456 tmpentry
= kxld_array_get_item(&vtable
->entries
, i
);
457 sym
= kxld_symtab_get_cxx_symbol_by_value(symtab
, entry_value
);
460 tmpentry
->patched
.name
= sym
->name
;
461 tmpentry
->patched
.addr
= sym
->link_addr
;
463 tmpentry
->patched
.name
= NULL
;
464 tmpentry
->patched
.addr
= 0;
474 /*******************************************************************************
475 * Initializes vtables by performing a reverse lookup on symbol values when
476 * they exist in the vtable entry, and by looking through a matching relocation
477 * entry when the vtable entry is NULL.
479 * Final linked images require this hybrid vtable initialization approach
480 * because they are already internally resolved. This means that the vtables
481 * contain valid entries to local symbols, but still have relocation entries for
483 *******************************************************************************/
485 init_by_entries_and_relocs(KXLDVTable
*vtable
, const KXLDSym
*sym
,
486 const KXLDSymtab
*symtab
, const KXLDRelocator
*relocator
,
487 const KXLDArray
*relocs
)
489 kern_return_t rval
= KERN_FAILURE
;
490 KXLDReloc
*reloc
= NULL
;
491 KXLDVTableEntry
*tmpentry
= NULL
;
492 KXLDSym
*tmpsym
= NULL
;
493 u_int vtable_entry_size
= 0;
494 u_int vtable_header_size
= 0;
495 u_char
*base_entry
= NULL
;
496 u_char
*entry
= NULL
;
497 kxld_addr_t entry_value
= 0;
498 kxld_addr_t base_entry_offset
= 0;
499 kxld_addr_t entry_offset
= 0;
508 /* Find the first entry and its offset past the vtable padding */
510 if (relocator
->is_32_bit
) {
511 vtable_entry_size
= VTABLE_ENTRY_SIZE_32
;
512 vtable_header_size
= VTABLE_HEADER_SIZE_32
;
514 vtable_entry_size
= VTABLE_ENTRY_SIZE_64
;
515 vtable_header_size
= VTABLE_HEADER_SIZE_64
;
518 base_entry
= vtable
->vtable
+ vtable_header_size
;
520 base_entry_offset
= sym
->base_addr
;
521 base_entry_offset
+= vtable_header_size
;
523 /* In a final linked image, a vtable slot is valid if it is nonzero
524 * (meaning the userspace linker has already resolved it, or if it has
525 * a relocation entry. We'll know the end of the vtable when we find a
526 * slot that meets neither of these conditions.
529 entry_value
= get_entry_value(entry
, relocator
);
530 entry_offset
= base_entry_offset
;
532 entry_value
= get_entry_value(entry
, relocator
);
534 reloc
= kxld_reloc_get_reloc_by_offset(relocs
, entry_offset
);
539 entry
+= vtable_entry_size
;
540 entry_offset
+= vtable_entry_size
;
543 /* Allocate the symbol index */
545 rval
= kxld_array_init(&vtable
->entries
, sizeof(KXLDVTableEntry
), nentries
);
546 require_noerr(rval
, finish
);
548 /* Find the symbols for each vtable entry */
551 entry_value
= get_entry_value(entry
, relocator
);
552 entry_offset
= base_entry_offset
;
553 for (i
= 0; i
< vtable
->entries
.nitems
; ++i
) {
554 entry_value
= get_entry_value(entry
, relocator
);
556 /* If we can't find a symbol, it means it is a locally-defined,
557 * non-external symbol that has been stripped. We don't patch over
558 * locally-defined symbols, so we leave the symbol as NULL and just
559 * skip it. We won't be able to patch subclasses with this symbol,
560 * but there isn't much we can do about that.
564 if (relocator
->swap
) {
565 entry_value
= swap_entry_value(entry_value
, relocator
);
570 tmpsym
= kxld_symtab_get_cxx_symbol_by_value(symtab
, entry_value
);
572 reloc
= kxld_reloc_get_reloc_by_offset(relocs
, entry_offset
);
573 require_action(reloc
, finish
,
575 kxld_log(kKxldLogPatching
, kKxldLogErr
,
576 kKxldLogMalformedVTable
, vtable
->name
));
578 tmpsym
= kxld_reloc_get_symbol(relocator
, reloc
,
579 /* data */ NULL
, symtab
);
582 tmpentry
= kxld_array_get_item(&vtable
->entries
, i
);
583 tmpentry
->unpatched
.reloc
= reloc
;
584 tmpentry
->unpatched
.sym
= tmpsym
;
586 entry
+= vtable_entry_size
;
587 entry_offset
+= vtable_entry_size
;
596 /*******************************************************************************
597 *******************************************************************************/
599 kxld_vtable_clear(KXLDVTable
*vtable
)
603 vtable
->vtable
= NULL
;
605 vtable
->is_patched
= FALSE
;
606 kxld_array_clear(&vtable
->entries
);
609 /*******************************************************************************
610 *******************************************************************************/
612 kxld_vtable_deinit(KXLDVTable
*vtable
)
616 kxld_array_deinit(&vtable
->entries
);
617 bzero(vtable
, sizeof(*vtable
));
620 /*******************************************************************************
621 * Patching vtables allows us to preserve binary compatibility across releases.
622 *******************************************************************************/
624 kxld_vtable_patch(KXLDVTable
*vtable
, const KXLDVTable
*super_vtable
,
625 KXLDSymtab
*symtab
, boolean_t strict_patching __unused
)
627 kern_return_t rval
= KERN_FAILURE
;
628 KXLDVTableEntry
*child_entry
= NULL
;
629 KXLDVTableEntry
*parent_entry
= NULL
;
637 require_action(!vtable
->is_patched
, finish
, rval
=KERN_SUCCESS
);
638 require_action(vtable
->entries
.nitems
>= super_vtable
->entries
.nitems
, finish
,
640 kxld_log(kKxldLogPatching
, kKxldLogErr
,
641 kKxldLogMalformedVTable
, vtable
->name
));
643 for (i
= 0; i
< super_vtable
->entries
.nitems
; ++i
) {
644 child_entry
= kxld_array_get_item(&vtable
->entries
, i
);
645 parent_entry
= kxld_array_get_item(&super_vtable
->entries
, i
);
647 /* The child entry can be NULL when a locally-defined, non-external
648 * symbol is stripped. We wouldn't patch this entry anyway, so we
652 if (!child_entry
->unpatched
.sym
) continue;
654 /* It's possible for the patched parent entry not to have a symbol
655 * (e.g. when the definition is inlined). We can't patch this entry no
656 * matter what, so we'll just skip it and die later if it's a problem
657 * (which is not likely).
660 if (!parent_entry
->patched
.name
) continue;
662 /* 1) If the symbol is defined locally, do not patch */
664 if (kxld_sym_is_defined_locally(child_entry
->unpatched
.sym
)) continue;
666 /* 2) If the child is a pure virtual function, do not patch.
667 * In general, we want to proceed with patching when the symbol is
668 * externally defined because pad slots fall into this category.
669 * The pure virtual function symbol is special case, as the pure
670 * virtual property itself overrides the parent's implementation.
673 if (kxld_sym_is_pure_virtual(child_entry
->unpatched
.sym
)) continue;
675 /* 3) If the symbols are the same, do not patch */
677 if (streq(child_entry
->unpatched
.sym
->name
,
678 parent_entry
->patched
.name
))
683 /* 4) If the parent vtable entry is a pad slot, and the child does not
684 * match it, then the child was built against a newer version of the
685 * libraries, so it is binary-incompatible.
688 require_action(!kxld_sym_name_is_padslot(parent_entry
->patched
.name
),
689 finish
, rval
=KERN_FAILURE
;
690 kxld_log(kKxldLogPatching
, kKxldLogErr
,
691 kKxldLogParentOutOfDate
, super_vtable
->name
, vtable
->name
));
693 #if KXLD_USER_OR_STRICT_PATCHING
694 /* 5) If we are doing strict patching, we prevent kexts from declaring
695 * virtual functions and not implementing them. We can tell if a
696 * virtual function is declared but not implemented because we resolve
697 * symbols before patching; an unimplemented function will still be
698 * undefined at this point. We then look at whether the symbol has
699 * the same class prefix as the vtable. If it does, the symbol was
700 * declared as part of the class and not inherited, which means we
701 * should not patch it.
704 if (strict_patching
&& !kxld_sym_is_defined(child_entry
->unpatched
.sym
))
706 char class_name
[KXLD_MAX_NAME_LEN
];
707 char function_prefix
[KXLD_MAX_NAME_LEN
];
708 u_long function_prefix_len
= 0;
710 rval
= kxld_sym_get_class_name_from_vtable_name(vtable
->name
,
711 class_name
, sizeof(class_name
));
712 require_noerr(rval
, finish
);
714 function_prefix_len
=
715 kxld_sym_get_function_prefix_from_class_name(class_name
,
716 function_prefix
, sizeof(function_prefix
));
717 require(function_prefix_len
, finish
);
719 if (!strncmp(child_entry
->unpatched
.sym
->name
,
720 function_prefix
, function_prefix_len
))
725 #endif /* KXLD_USER_OR_STRICT_PATCHING */
727 /* 6) The child symbol is unresolved and different from its parent, so
728 * we need to patch it up. We do this by modifying the relocation
729 * entry of the vtable entry to point to the symbol of the parent
730 * vtable entry. If that symbol does not exist (i.e. we got the data
731 * from a link state object's vtable representation), then we create a
732 * new symbol in the symbol table and point the relocation entry to
736 sym
= kxld_symtab_get_symbol_by_name(symtab
, parent_entry
->patched
.name
);
738 rval
= kxld_symtab_add_symbol(symtab
, parent_entry
->patched
.name
,
739 parent_entry
->patched
.addr
, &sym
);
740 require_noerr(rval
, finish
);
742 require_action(sym
, finish
, rval
=KERN_FAILURE
);
744 rval
= kxld_symtab_get_sym_index(symtab
, sym
, &symindex
);
745 require_noerr(rval
, finish
);
747 rval
= kxld_reloc_update_symindex(child_entry
->unpatched
.reloc
, symindex
);
748 require_noerr(rval
, finish
);
750 kxld_log(kKxldLogPatching
, kKxldLogDetail
,
751 "In vtable %s, patching %s with %s.",
752 vtable
->name
, child_entry
->unpatched
.sym
->name
, sym
->name
);
754 kxld_sym_patch(child_entry
->unpatched
.sym
);
755 child_entry
->unpatched
.sym
= sym
;
758 /* Change the vtable representation from the unpatched layout to the
761 for (i
= 0; i
< vtable
->entries
.nitems
; ++i
) {
765 child_entry
= kxld_array_get_item(&vtable
->entries
, i
);
766 if (child_entry
->unpatched
.sym
) {
767 name
= child_entry
->unpatched
.sym
->name
;
768 addr
= child_entry
->unpatched
.sym
->link_addr
;
774 child_entry
->patched
.name
= name
;
775 child_entry
->patched
.addr
= addr
;
778 vtable
->is_patched
= TRUE
;