]> git.saurik.com Git - apple/xnu.git/blame_incremental - libkern/kxld/kxld_vtable.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / libkern / kxld / kxld_vtable.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <string.h>
29#include <mach-o/loader.h>
30#include <sys/types.h>
31
32#if KERNEL
33 #ifdef MACH_ASSERT
34 #undef MACH_ASSERT
35 #endif
36 #define MACH_ASSERT 1
37 #include <kern/assert.h>
38#else
39 #include <assert.h>
40#endif
41
42#define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
43#include <AssertMacros.h>
44
45#include "kxld_demangle.h"
46#include "kxld_dict.h"
47#include "kxld_object.h"
48#include "kxld_reloc.h"
49#include "kxld_sect.h"
50#include "kxld_sym.h"
51#include "kxld_symtab.h"
52#include "kxld_util.h"
53#include "kxld_vtable.h"
54
55#define VTABLE_ENTRY_SIZE_32 4
56#define VTABLE_HEADER_LEN_32 2
57#define VTABLE_HEADER_SIZE_32 (VTABLE_HEADER_LEN_32 * VTABLE_ENTRY_SIZE_32)
58
59#define VTABLE_ENTRY_SIZE_64 8
60#define VTABLE_HEADER_LEN_64 2
61#define VTABLE_HEADER_SIZE_64 (VTABLE_HEADER_LEN_64 * VTABLE_ENTRY_SIZE_64)
62
63static void get_vtable_base_sizes(boolean_t is_32_bit, u_int *vtable_entry_size,
64 u_int *vtable_header_size);
65
66static kern_return_t init_by_relocs(KXLDVTable *vtable, const KXLDSym *vtable_sym,
67 const KXLDSect *sect, const KXLDRelocator *relocator);
68
69static kern_return_t init_by_entries_and_relocs(KXLDVTable *vtable,
70 const KXLDSym *vtable_sym, const KXLDRelocator *relocator,
71 const KXLDArray *relocs, const KXLDDict *defined_cxx_symbols);
72
73static kern_return_t init_by_entries(KXLDVTable *vtable,
74 const KXLDRelocator *relocator, const KXLDDict *defined_cxx_symbols);
75
76/*******************************************************************************
77*******************************************************************************/
78kern_return_t
79kxld_vtable_init(KXLDVTable *vtable, const KXLDSym *vtable_sym,
80 const KXLDObject *object, const KXLDDict *defined_cxx_symbols)
81{
82 kern_return_t rval = KERN_FAILURE;
83 const KXLDArray *extrelocs = NULL;
84 const KXLDRelocator *relocator = NULL;
85 const KXLDSect *vtable_sect = NULL;
86 char *demangled_name = NULL;
87 size_t demangled_length = 0;
88
89 check(vtable);
90 check(vtable_sym);
91 check(object);
92
93 relocator = kxld_object_get_relocator(object);
94
95 vtable_sect = kxld_object_get_section_by_index(object,
96 vtable_sym->sectnum);
97 require_action(vtable_sect, finish, rval = KERN_FAILURE);
98
99 vtable->name = vtable_sym->name;
100 vtable->vtable = vtable_sect->data +
101 kxld_sym_get_section_offset(vtable_sym, vtable_sect);
102
103 if (kxld_object_is_linked(object)) {
104 rval = init_by_entries(vtable, relocator, defined_cxx_symbols);
105 require_noerr(rval, finish);
106
107 vtable->is_patched = TRUE;
108 } else {
109 if (kxld_object_is_final_image(object)) {
110 extrelocs = kxld_object_get_extrelocs(object);
111
112 require_action(extrelocs, finish,
113 rval = KERN_FAILURE;
114 kxld_log(kKxldLogPatching, kKxldLogErr,
115 kKxldLogMalformedVTable,
116 kxld_demangle(vtable->name,
117 &demangled_name, &demangled_length)));
118
119 rval = init_by_entries_and_relocs(vtable, vtable_sym,
120 relocator, extrelocs, defined_cxx_symbols);
121 require_noerr(rval, finish);
122 } else {
123 require_action(kxld_sect_get_num_relocs(vtable_sect) > 0, finish,
124 rval = KERN_FAILURE;
125 kxld_log(kKxldLogPatching, kKxldLogErr,
126 kKxldLogMalformedVTable,
127 kxld_demangle(vtable->name,
128 &demangled_name, &demangled_length)));
129
130 rval = init_by_relocs(vtable, vtable_sym, vtable_sect, relocator);
131 require_noerr(rval, finish);
132 }
133
134 vtable->is_patched = FALSE;
135 }
136
137 rval = KERN_SUCCESS;
138finish:
139
140 if (demangled_name) {
141 kxld_free(demangled_name, demangled_length);
142 }
143
144 return rval;
145}
146
147/*******************************************************************************
148*******************************************************************************/
149static void
150get_vtable_base_sizes(boolean_t is_32_bit, u_int *vtable_entry_size,
151 u_int *vtable_header_size)
152{
153 check(vtable_entry_size);
154 check(vtable_header_size);
155
156 if (is_32_bit) {
157 *vtable_entry_size = VTABLE_ENTRY_SIZE_32;
158 *vtable_header_size = VTABLE_HEADER_SIZE_32;
159 } else {
160 *vtable_entry_size = VTABLE_ENTRY_SIZE_64;
161 *vtable_header_size = VTABLE_HEADER_SIZE_64;
162 }
163}
164
165/*******************************************************************************
166* Initializes a vtable object by matching up relocation entries to the vtable's
167* entries and finding the corresponding symbols.
168*******************************************************************************/
169static kern_return_t
170init_by_relocs(KXLDVTable *vtable, const KXLDSym *vtable_sym,
171 const KXLDSect *sect, const KXLDRelocator *relocator)
172{
173 kern_return_t rval = KERN_FAILURE;
174 KXLDReloc *reloc = NULL;
175 KXLDVTableEntry *entry = NULL;
176 KXLDSym *sym = NULL;
177 kxld_addr_t vtable_base_offset = 0;
178 kxld_addr_t entry_offset = 0;
179 u_int i = 0;
180 u_int nentries = 0;
181 u_int vtable_entry_size = 0;
182 u_int vtable_header_size = 0;
183 u_int base_reloc_index = 0;
184 u_int reloc_index = 0;
185
186 check(vtable);
187 check(vtable_sym);
188 check(sect);
189 check(relocator);
190
191 /* Find the first entry past the vtable padding */
192
193 (void) get_vtable_base_sizes(relocator->is_32_bit,
194 &vtable_entry_size, &vtable_header_size);
195
196 vtable_base_offset = kxld_sym_get_section_offset(vtable_sym, sect) +
197 vtable_header_size;
198
199 /* Find the relocation entry at the start of the vtable */
200
201 rval = kxld_reloc_get_reloc_index_by_offset(&sect->relocs,
202 vtable_base_offset, &base_reloc_index);
203 require_noerr(rval, finish);
204
205 /* Count the number of consecutive relocation entries to find the number of
206 * vtable entries. For some reason, the __TEXT,__const relocations are
207 * sorted in descending order, so we have to walk backwards. Also, make
208 * sure we don't run off the end of the section's relocs.
209 */
210
211 reloc_index = base_reloc_index;
212 entry_offset = vtable_base_offset;
213 reloc = kxld_array_get_item(&sect->relocs, reloc_index);
214 while (reloc->address == entry_offset) {
215 ++nentries;
216 if (!reloc_index) {
217 break;
218 }
219
220 --reloc_index;
221
222 reloc = kxld_array_get_item(&sect->relocs, reloc_index);
223 entry_offset += vtable_entry_size;
224 }
225
226 /* Allocate the symbol index */
227
228 rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries);
229 require_noerr(rval, finish);
230
231 /* Find the symbols for each vtable entry */
232
233 for (i = 0; i < vtable->entries.nitems; ++i) {
234 reloc = kxld_array_get_item(&sect->relocs, base_reloc_index - i);
235 entry = kxld_array_get_item(&vtable->entries, i);
236
237 /* If we can't find a symbol, it means it is a locally-defined,
238 * non-external symbol that has been stripped. We don't patch over
239 * locally-defined symbols, so we leave the symbol as NULL and just
240 * skip it. We won't be able to patch subclasses with this symbol,
241 * but there isn't much we can do about that.
242 */
243 sym = kxld_reloc_get_symbol(relocator, reloc, sect->data);
244
245 entry->unpatched.sym = sym;
246 entry->unpatched.reloc = reloc;
247 }
248
249 rval = KERN_SUCCESS;
250finish:
251 return rval;
252}
253
254/*******************************************************************************
255* Initializes a vtable object by reading the symbol values out of the vtable
256* entries and performing reverse symbol lookups on those values.
257*******************************************************************************/
258static kern_return_t
259init_by_entries(KXLDVTable *vtable, const KXLDRelocator *relocator,
260 const KXLDDict *defined_cxx_symbols)
261{
262 kern_return_t rval = KERN_FAILURE;
263 KXLDVTableEntry *tmpentry = NULL;
264 KXLDSym *sym = NULL;
265 kxld_addr_t entry_value = 0;
266 u_long entry_offset;
267 u_int vtable_entry_size = 0;
268 u_int vtable_header_size = 0;
269 u_int nentries = 0;
270 u_int i = 0;
271
272 check(vtable);
273 check(relocator);
274
275 (void) get_vtable_base_sizes(relocator->is_32_bit,
276 &vtable_entry_size, &vtable_header_size);
277
278 /* Count the number of entries (the vtable is null-terminated) */
279
280 entry_offset = vtable_header_size;
281 while (1) {
282 entry_value = kxld_relocator_get_pointer_at_addr(relocator,
283 vtable->vtable, entry_offset);
284 if (!entry_value) {
285 break;
286 }
287
288 entry_offset += vtable_entry_size;
289 ++nentries;
290 }
291
292 /* Allocate the symbol index */
293
294 rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries);
295 require_noerr(rval, finish);
296
297 /* Look up the symbols for each entry */
298
299 for (i = 0, entry_offset = vtable_header_size;
300 i < vtable->entries.nitems;
301 ++i, entry_offset += vtable_entry_size) {
302 entry_value = kxld_relocator_get_pointer_at_addr(relocator,
303 vtable->vtable, entry_offset);
304
305 /* If we can't find the symbol, it means that the virtual function was
306 * defined inline. There's not much I can do about this; it just means
307 * I can't patch this function.
308 */
309 tmpentry = kxld_array_get_item(&vtable->entries, i);
310 sym = kxld_dict_find(defined_cxx_symbols, &entry_value);
311
312 if (sym) {
313 tmpentry->patched.name = sym->name;
314 tmpentry->patched.addr = sym->link_addr;
315 } else {
316 tmpentry->patched.name = NULL;
317 tmpentry->patched.addr = 0;
318 }
319 }
320
321 rval = KERN_SUCCESS;
322finish:
323 return rval;
324}
325
326/*******************************************************************************
327* Initializes vtables by performing a reverse lookup on symbol values when
328* they exist in the vtable entry, and by looking through a matching relocation
329* entry when the vtable entry is NULL.
330*
331* Final linked images require this hybrid vtable initialization approach
332* because they are already internally resolved. This means that the vtables
333* contain valid entries to local symbols, but still have relocation entries for
334* external symbols.
335*******************************************************************************/
336static kern_return_t
337init_by_entries_and_relocs(KXLDVTable *vtable, const KXLDSym *vtable_sym,
338 const KXLDRelocator *relocator, const KXLDArray *relocs,
339 const KXLDDict *defined_cxx_symbols)
340{
341 kern_return_t rval = KERN_FAILURE;
342 KXLDReloc *reloc = NULL;
343 KXLDVTableEntry *tmpentry = NULL;
344 KXLDSym *sym = NULL;
345 u_int vtable_entry_size = 0;
346 u_int vtable_header_size = 0;
347 kxld_addr_t entry_value = 0;
348 u_long entry_offset = 0;
349 u_int nentries = 0;
350 u_int i = 0;
351 char *demangled_name1 = NULL;
352 size_t demangled_length1 = 0;
353
354 check(vtable);
355 check(vtable_sym);
356 check(relocator);
357 check(relocs);
358
359 /* Find the first entry and its offset past the vtable padding */
360
361 (void) get_vtable_base_sizes(relocator->is_32_bit,
362 &vtable_entry_size, &vtable_header_size);
363
364 /* In a final linked image, a vtable slot is valid if it is nonzero
365 * (meaning the userspace linker has already resolved it) or if it has
366 * a relocation entry. We'll know the end of the vtable when we find a
367 * slot that meets neither of these conditions.
368 */
369 entry_offset = vtable_header_size;
370 while (1) {
371 entry_value = kxld_relocator_get_pointer_at_addr(relocator,
372 vtable->vtable, entry_offset);
373 if (!entry_value) {
374 reloc = kxld_reloc_get_reloc_by_offset(relocs,
375 vtable_sym->base_addr + entry_offset);
376 if (!reloc) {
377 break;
378 }
379 }
380
381 ++nentries;
382 entry_offset += vtable_entry_size;
383 }
384
385 /* Allocate the symbol index */
386
387 rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries);
388 require_noerr(rval, finish);
389
390 /* Find the symbols for each vtable entry */
391
392 for (i = 0, entry_offset = vtable_header_size;
393 i < vtable->entries.nitems;
394 ++i, entry_offset += vtable_entry_size) {
395 entry_value = kxld_relocator_get_pointer_at_addr(relocator,
396 vtable->vtable, entry_offset);
397
398 /* If we can't find a symbol, it means it is a locally-defined,
399 * non-external symbol that has been stripped. We don't patch over
400 * locally-defined symbols, so we leave the symbol as NULL and just
401 * skip it. We won't be able to patch subclasses with this symbol,
402 * but there isn't much we can do about that.
403 */
404 if (entry_value) {
405 reloc = NULL;
406 sym = kxld_dict_find(defined_cxx_symbols, &entry_value);
407 } else {
408 reloc = kxld_reloc_get_reloc_by_offset(relocs,
409 vtable_sym->base_addr + entry_offset);
410
411 require_action(reloc, finish,
412 rval = KERN_FAILURE;
413 kxld_log(kKxldLogPatching, kKxldLogErr,
414 kKxldLogMalformedVTable,
415 kxld_demangle(vtable->name, &demangled_name1,
416 &demangled_length1)));
417
418 sym = kxld_reloc_get_symbol(relocator, reloc, /* data */ NULL);
419 }
420
421 tmpentry = kxld_array_get_item(&vtable->entries, i);
422 tmpentry->unpatched.reloc = reloc;
423 tmpentry->unpatched.sym = sym;
424 }
425
426 rval = KERN_SUCCESS;
427finish:
428 return rval;
429}
430
431/*******************************************************************************
432*******************************************************************************/
433void
434kxld_vtable_clear(KXLDVTable *vtable)
435{
436 check(vtable);
437
438 vtable->vtable = NULL;
439 vtable->name = NULL;
440 vtable->is_patched = FALSE;
441 kxld_array_clear(&vtable->entries);
442}
443
444/*******************************************************************************
445*******************************************************************************/
446void
447kxld_vtable_deinit(KXLDVTable *vtable)
448{
449 check(vtable);
450
451 kxld_array_deinit(&vtable->entries);
452 bzero(vtable, sizeof(*vtable));
453}
454
455/*******************************************************************************
456*******************************************************************************/
457KXLDVTableEntry *
458kxld_vtable_get_entry_for_offset(const KXLDVTable *vtable, u_long offset,
459 boolean_t is_32_bit)
460{
461 KXLDVTableEntry *rval = NULL;
462 u_int vtable_entry_size = 0;
463 u_int vtable_header_size = 0;
464 u_int vtable_entry_idx = 0;
465
466 (void) get_vtable_base_sizes(is_32_bit,
467 &vtable_entry_size, &vtable_header_size);
468
469 if (offset % vtable_entry_size) {
470 goto finish;
471 }
472
473 vtable_entry_idx = (u_int) ((offset - vtable_header_size) / vtable_entry_size);
474 rval = kxld_array_get_item(&vtable->entries, vtable_entry_idx);
475finish:
476 return rval;
477}
478
479/*******************************************************************************
480* Patching vtables allows us to preserve binary compatibility across releases.
481*******************************************************************************/
482kern_return_t
483kxld_vtable_patch(KXLDVTable *vtable, const KXLDVTable *super_vtable,
484 KXLDObject *object)
485{
486 kern_return_t rval = KERN_FAILURE;
487 const KXLDSymtab *symtab = NULL;
488 const KXLDSym *sym = NULL;
489 KXLDVTableEntry *child_entry = NULL;
490 KXLDVTableEntry *parent_entry = NULL;
491 u_int symindex = 0;
492 u_int i = 0;
493 char *demangled_name1 = NULL;
494 char *demangled_name2 = NULL;
495 char *demangled_name3 = NULL;
496 size_t demangled_length1 = 0;
497 size_t demangled_length2 = 0;
498 size_t demangled_length3 = 0;
499 boolean_t failure = FALSE;
500
501 check(vtable);
502 check(super_vtable);
503
504 symtab = kxld_object_get_symtab(object);
505
506 require_action(!vtable->is_patched, finish, rval = KERN_SUCCESS);
507 require_action(super_vtable->is_patched, finish, rval = KERN_FAILURE);
508 require_action(vtable->entries.nitems >= super_vtable->entries.nitems, finish,
509 rval = KERN_FAILURE;
510 kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMalformedVTable,
511 kxld_demangle(vtable->name, &demangled_name1, &demangled_length1)));
512
513 for (i = 0; i < super_vtable->entries.nitems; ++i) {
514 child_entry = kxld_array_get_item(&vtable->entries, i);
515 parent_entry = kxld_array_get_item(&super_vtable->entries, i);
516
517 /* The child entry can be NULL when a locally-defined, non-external
518 * symbol is stripped. We wouldn't patch this entry anyway, so we
519 * just skip it.
520 */
521
522 if (!child_entry->unpatched.sym) {
523 continue;
524 }
525
526 /* It's possible for the patched parent entry not to have a symbol
527 * (e.g. when the definition is inlined). We can't patch this entry no
528 * matter what, so we'll just skip it and die later if it's a problem
529 * (which is not likely).
530 */
531
532 if (!parent_entry->patched.name) {
533 continue;
534 }
535
536 /* 1) If the symbol is defined locally, do not patch */
537
538 if (kxld_sym_is_defined_locally(child_entry->unpatched.sym)) {
539 continue;
540 }
541
542 /* 2) If the child is a pure virtual function, do not patch.
543 * In general, we want to proceed with patching when the symbol is
544 * externally defined because pad slots fall into this category.
545 * The pure virtual function symbol is special case, as the pure
546 * virtual property itself overrides the parent's implementation.
547 */
548
549 if (kxld_sym_is_pure_virtual(child_entry->unpatched.sym)) {
550 continue;
551 }
552
553 /* 3) If the symbols are the same, do not patch */
554
555 if (streq(child_entry->unpatched.sym->name,
556 parent_entry->patched.name)) {
557 continue;
558 }
559
560 /* 4) If the parent vtable entry is a pad slot, and the child does not
561 * match it, then the child was built against a newer version of the
562 * libraries, so it is binary-incompatible.
563 */
564
565 require_action(!kxld_sym_name_is_padslot(parent_entry->patched.name),
566 finish, rval = KERN_FAILURE;
567 kxld_log(kKxldLogPatching, kKxldLogErr,
568 kKxldLogParentOutOfDate,
569 kxld_demangle(super_vtable->name, &demangled_name1,
570 &demangled_length1),
571 kxld_demangle(vtable->name, &demangled_name2,
572 &demangled_length2)));
573
574#if KXLD_USER_OR_STRICT_PATCHING
575 /* 5) If we are doing strict patching, we prevent kexts from declaring
576 * virtual functions and not implementing them. We can tell if a
577 * virtual function is declared but not implemented because we resolve
578 * symbols before patching; an unimplemented function will still be
579 * undefined at this point. We then look at whether the symbol has
580 * the same class prefix as the vtable. If it does, the symbol was
581 * declared as part of the class and not inherited, which means we
582 * should not patch it.
583 */
584
585 if (kxld_object_target_supports_strict_patching(object) &&
586 !kxld_sym_is_defined(child_entry->unpatched.sym)) {
587 char class_name[KXLD_MAX_NAME_LEN];
588 char function_prefix[KXLD_MAX_NAME_LEN];
589 u_long function_prefix_len = 0;
590
591 rval = kxld_sym_get_class_name_from_vtable_name(vtable->name,
592 class_name, sizeof(class_name));
593 require_noerr(rval, finish);
594
595 function_prefix_len =
596 kxld_sym_get_function_prefix_from_class_name(class_name,
597 function_prefix, sizeof(function_prefix));
598 require(function_prefix_len, finish);
599
600 if (!strncmp(child_entry->unpatched.sym->name,
601 function_prefix, function_prefix_len)) {
602 failure = TRUE;
603 kxld_log(kKxldLogPatching, kKxldLogErr,
604 "The %s is unpatchable because its class declares the "
605 "method '%s' without providing an implementation.",
606 kxld_demangle(vtable->name,
607 &demangled_name1, &demangled_length1),
608 kxld_demangle(child_entry->unpatched.sym->name,
609 &demangled_name2, &demangled_length2));
610 continue;
611 }
612 }
613#endif /* KXLD_USER_OR_STRICT_PATCHING */
614
615 /* 6) The child symbol is unresolved and different from its parent, so
616 * we need to patch it up. We do this by modifying the relocation
617 * entry of the vtable entry to point to the symbol of the parent
618 * vtable entry. If that symbol does not exist (i.e. we got the data
619 * from a link state object's vtable representation), then we create a
620 * new symbol in the symbol table and point the relocation entry to
621 * that.
622 */
623
624 sym = kxld_symtab_get_locally_defined_symbol_by_name(symtab,
625 parent_entry->patched.name);
626 if (!sym) {
627 rval = kxld_object_add_symbol(object, parent_entry->patched.name,
628 parent_entry->patched.addr, &sym);
629 require_noerr(rval, finish);
630 }
631 require_action(sym, finish, rval = KERN_FAILURE);
632
633 rval = kxld_symtab_get_sym_index(symtab, sym, &symindex);
634 require_noerr(rval, finish);
635
636 rval = kxld_reloc_update_symindex(child_entry->unpatched.reloc, symindex);
637 require_noerr(rval, finish);
638
639 kxld_log(kKxldLogPatching, kKxldLogDetail,
640 "In vtable '%s', patching '%s' with '%s'.",
641 kxld_demangle(vtable->name, &demangled_name1, &demangled_length1),
642 kxld_demangle(child_entry->unpatched.sym->name,
643 &demangled_name2, &demangled_length2),
644 kxld_demangle(sym->name, &demangled_name3, &demangled_length3));
645
646 rval = kxld_object_patch_symbol(object, child_entry->unpatched.sym);
647 require_noerr(rval, finish);
648
649 child_entry->unpatched.sym = sym;
650
651 /*
652 * The C++ ABI requires that functions be aligned on a 2-byte boundary:
653 * http://www.codesourcery.com/public/cxx-abi/abi.html#member-pointers
654 * If the LSB of any virtual function's link address is 1, then the
655 * compiler has violated that part of the ABI, and we're going to panic
656 * in _ptmf2ptf() (in OSMetaClass.h). Better to panic here with some
657 * context.
658 */
659 assert(kxld_sym_is_pure_virtual(sym) || !(sym->link_addr & 1));
660 }
661
662 require_action(!failure, finish, rval = KERN_FAILURE);
663
664 /* Change the vtable representation from the unpatched layout to the
665 * patched layout.
666 */
667
668 for (i = 0; i < vtable->entries.nitems; ++i) {
669 char *name;
670 kxld_addr_t addr;
671
672 child_entry = kxld_array_get_item(&vtable->entries, i);
673 if (child_entry->unpatched.sym) {
674 name = child_entry->unpatched.sym->name;
675 addr = child_entry->unpatched.sym->link_addr;
676 } else {
677 name = NULL;
678 addr = 0;
679 }
680
681 child_entry->patched.name = name;
682 child_entry->patched.addr = addr;
683 }
684
685 vtable->is_patched = TRUE;
686 rval = KERN_SUCCESS;
687
688finish:
689 if (demangled_name1) {
690 kxld_free(demangled_name1, demangled_length1);
691 }
692 if (demangled_name2) {
693 kxld_free(demangled_name2, demangled_length2);
694 }
695 if (demangled_name3) {
696 kxld_free(demangled_name3, demangled_length3);
697 }
698
699 return rval;
700}