]> git.saurik.com Git - apple/xnu.git/blame - libkern/kxld/kxld_vtable.c
xnu-4903.241.1.tar.gz
[apple/xnu.git] / libkern / kxld / kxld_vtable.c
CommitLineData
b0d623f7
A
1/*
2 * Copyright (c) 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <string.h>
29#include <mach-o/loader.h>
30#include <sys/types.h>
31
6d2010ae
A
32#if KERNEL
33 #ifdef MACH_ASSERT
34 #undef MACH_ASSERT
35 #endif
36 #define MACH_ASSERT 1
37 #include <kern/assert.h>
38#else
39 #include <assert.h>
40#endif
41
b0d623f7
A
42#define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
43#include <AssertMacros.h>
44
b7266188 45#include "kxld_demangle.h"
6d2010ae
A
46#include "kxld_dict.h"
47#include "kxld_object.h"
b0d623f7
A
48#include "kxld_reloc.h"
49#include "kxld_sect.h"
b0d623f7
A
50#include "kxld_sym.h"
51#include "kxld_symtab.h"
52#include "kxld_util.h"
53#include "kxld_vtable.h"
54
55#define VTABLE_ENTRY_SIZE_32 4
56#define VTABLE_HEADER_LEN_32 2
57#define VTABLE_HEADER_SIZE_32 (VTABLE_HEADER_LEN_32 * VTABLE_ENTRY_SIZE_32)
58
59#define VTABLE_ENTRY_SIZE_64 8
60#define VTABLE_HEADER_LEN_64 2
61#define VTABLE_HEADER_SIZE_64 (VTABLE_HEADER_LEN_64 * VTABLE_ENTRY_SIZE_64)
62
6d2010ae
A
63static void get_vtable_base_sizes(boolean_t is_32_bit, u_int *vtable_entry_size,
64 u_int *vtable_header_size);
b0d623f7 65
6d2010ae
A
66static kern_return_t init_by_relocs(KXLDVTable *vtable, const KXLDSym *vtable_sym,
67 const KXLDSect *sect, const KXLDRelocator *relocator);
b0d623f7 68
6d2010ae
A
69static kern_return_t init_by_entries_and_relocs(KXLDVTable *vtable,
70 const KXLDSym *vtable_sym, const KXLDRelocator *relocator,
71 const KXLDArray *relocs, const KXLDDict *defined_cxx_symbols);
b0d623f7 72
6d2010ae
A
73static kern_return_t init_by_entries(KXLDVTable *vtable,
74 const KXLDRelocator *relocator, const KXLDDict *defined_cxx_symbols);
b0d623f7
A
75
76/*******************************************************************************
77*******************************************************************************/
6d2010ae
A
78kern_return_t
79kxld_vtable_init(KXLDVTable *vtable, const KXLDSym *vtable_sym,
80 const KXLDObject *object, const KXLDDict *defined_cxx_symbols)
b0d623f7
A
81{
82 kern_return_t rval = KERN_FAILURE;
6d2010ae
A
83 const KXLDArray *extrelocs = NULL;
84 const KXLDRelocator *relocator = NULL;
85 const KXLDSect *vtable_sect = NULL;
b7266188
A
86 char *demangled_name = NULL;
87 size_t demangled_length = 0;
b0d623f7
A
88
89 check(vtable);
6d2010ae
A
90 check(vtable_sym);
91 check(object);
b0d623f7 92
6d2010ae 93 relocator = kxld_object_get_relocator(object);
b0d623f7 94
6d2010ae
A
95 vtable_sect = kxld_object_get_section_by_index(object,
96 vtable_sym->sectnum);
97 require_action(vtable_sect, finish, rval=KERN_FAILURE);
b0d623f7 98
6d2010ae
A
99 vtable->name = vtable_sym->name;
100 vtable->vtable = vtable_sect->data +
101 kxld_sym_get_section_offset(vtable_sym, vtable_sect);
b0d623f7 102
6d2010ae
A
103 if (kxld_object_is_linked(object)) {
104 rval = init_by_entries(vtable, relocator, defined_cxx_symbols);
105 require_noerr(rval, finish);
b0d623f7 106
6d2010ae
A
107 vtable->is_patched = TRUE;
108 } else {
109 if (kxld_object_is_final_image(object)) {
110 extrelocs = kxld_object_get_extrelocs(object);
39037602
A
111
112 require_action(extrelocs, finish,
6d2010ae
A
113 rval=KERN_FAILURE;
114 kxld_log(kKxldLogPatching, kKxldLogErr,
115 kKxldLogMalformedVTable,
116 kxld_demangle(vtable->name,
117 &demangled_name, &demangled_length)));
b0d623f7 118
6d2010ae
A
119 rval = init_by_entries_and_relocs(vtable, vtable_sym,
120 relocator, extrelocs, defined_cxx_symbols);
121 require_noerr(rval, finish);
122 } else {
39037602 123
6d2010ae
A
124 require_action(kxld_sect_get_num_relocs(vtable_sect) > 0, finish,
125 rval=KERN_FAILURE;
126 kxld_log(kKxldLogPatching, kKxldLogErr,
127 kKxldLogMalformedVTable,
128 kxld_demangle(vtable->name,
129 &demangled_name, &demangled_length)));
b0d623f7 130
6d2010ae
A
131 rval = init_by_relocs(vtable, vtable_sym, vtable_sect, relocator);
132 require_noerr(rval, finish);
133 }
134
135 vtable->is_patched = FALSE;
136 }
b0d623f7
A
137
138 rval = KERN_SUCCESS;
b0d623f7 139finish:
39037602 140
b7266188 141 if (demangled_name) kxld_free(demangled_name, demangled_length);
b0d623f7
A
142
143 return rval;
144}
145
b0d623f7
A
146/*******************************************************************************
147*******************************************************************************/
6d2010ae
A
148static void
149get_vtable_base_sizes(boolean_t is_32_bit, u_int *vtable_entry_size,
150 u_int *vtable_header_size)
b0d623f7 151{
6d2010ae
A
152 check(vtable_entry_size);
153 check(vtable_header_size);
b0d623f7 154
6d2010ae
A
155 if (is_32_bit) {
156 *vtable_entry_size = VTABLE_ENTRY_SIZE_32;
157 *vtable_header_size = VTABLE_HEADER_SIZE_32;
158 } else {
159 *vtable_entry_size = VTABLE_ENTRY_SIZE_64;
160 *vtable_header_size = VTABLE_HEADER_SIZE_64;
b0d623f7 161 }
b0d623f7
A
162}
163
164/*******************************************************************************
165* Initializes a vtable object by matching up relocation entries to the vtable's
166* entries and finding the corresponding symbols.
167*******************************************************************************/
168static kern_return_t
6d2010ae
A
169init_by_relocs(KXLDVTable *vtable, const KXLDSym *vtable_sym,
170 const KXLDSect *sect, const KXLDRelocator *relocator)
b0d623f7
A
171{
172 kern_return_t rval = KERN_FAILURE;
173 KXLDReloc *reloc = NULL;
174 KXLDVTableEntry *entry = NULL;
6d2010ae 175 KXLDSym *sym = NULL;
b0d623f7
A
176 kxld_addr_t vtable_base_offset = 0;
177 kxld_addr_t entry_offset = 0;
178 u_int i = 0;
179 u_int nentries = 0;
180 u_int vtable_entry_size = 0;
6d2010ae 181 u_int vtable_header_size = 0;
b0d623f7
A
182 u_int base_reloc_index = 0;
183 u_int reloc_index = 0;
184
185 check(vtable);
6d2010ae 186 check(vtable_sym);
b0d623f7 187 check(sect);
b0d623f7
A
188 check(relocator);
189
190 /* Find the first entry past the vtable padding */
191
6d2010ae
A
192 (void) get_vtable_base_sizes(relocator->is_32_bit,
193 &vtable_entry_size, &vtable_header_size);
b0d623f7 194
6d2010ae
A
195 vtable_base_offset = kxld_sym_get_section_offset(vtable_sym, sect) +
196 vtable_header_size;
197
b0d623f7
A
198 /* Find the relocation entry at the start of the vtable */
199
200 rval = kxld_reloc_get_reloc_index_by_offset(&sect->relocs,
201 vtable_base_offset, &base_reloc_index);
202 require_noerr(rval, finish);
203
204 /* Count the number of consecutive relocation entries to find the number of
205 * vtable entries. For some reason, the __TEXT,__const relocations are
206 * sorted in descending order, so we have to walk backwards. Also, make
207 * sure we don't run off the end of the section's relocs.
208 */
209
210 reloc_index = base_reloc_index;
211 entry_offset = vtable_base_offset;
212 reloc = kxld_array_get_item(&sect->relocs, reloc_index);
213 while (reloc->address == entry_offset) {
214 ++nentries;
215 if (!reloc_index) break;
216
217 --reloc_index;
218
219 reloc = kxld_array_get_item(&sect->relocs, reloc_index);
220 entry_offset += vtable_entry_size;
221 }
222
223 /* Allocate the symbol index */
224
225 rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries);
226 require_noerr(rval, finish);
227
228 /* Find the symbols for each vtable entry */
229
230 for (i = 0; i < vtable->entries.nitems; ++i) {
231 reloc = kxld_array_get_item(&sect->relocs, base_reloc_index - i);
232 entry = kxld_array_get_item(&vtable->entries, i);
233
234 /* If we can't find a symbol, it means it is a locally-defined,
235 * non-external symbol that has been stripped. We don't patch over
236 * locally-defined symbols, so we leave the symbol as NULL and just
237 * skip it. We won't be able to patch subclasses with this symbol,
238 * but there isn't much we can do about that.
239 */
6d2010ae 240 sym = kxld_reloc_get_symbol(relocator, reloc, sect->data);
b0d623f7 241
6d2010ae 242 entry->unpatched.sym = sym;
b0d623f7
A
243 entry->unpatched.reloc = reloc;
244 }
245
246 rval = KERN_SUCCESS;
247finish:
248 return rval;
249}
250
b0d623f7
A
251/*******************************************************************************
252* Initializes a vtable object by reading the symbol values out of the vtable
253* entries and performing reverse symbol lookups on those values.
254*******************************************************************************/
255static kern_return_t
6d2010ae
A
256init_by_entries(KXLDVTable *vtable, const KXLDRelocator *relocator,
257 const KXLDDict *defined_cxx_symbols)
b0d623f7
A
258{
259 kern_return_t rval = KERN_FAILURE;
260 KXLDVTableEntry *tmpentry = NULL;
261 KXLDSym *sym = NULL;
b0d623f7 262 kxld_addr_t entry_value = 0;
6d2010ae 263 u_long entry_offset;
b0d623f7
A
264 u_int vtable_entry_size = 0;
265 u_int vtable_header_size = 0;
266 u_int nentries = 0;
267 u_int i = 0;
268
6d2010ae
A
269 check(vtable);
270 check(relocator);
b0d623f7 271
6d2010ae
A
272 (void) get_vtable_base_sizes(relocator->is_32_bit,
273 &vtable_entry_size, &vtable_header_size);
b0d623f7
A
274
275 /* Count the number of entries (the vtable is null-terminated) */
276
6d2010ae
A
277 entry_offset = vtable_header_size;
278 while (1) {
279 entry_value = kxld_relocator_get_pointer_at_addr(relocator,
280 vtable->vtable, entry_offset);
281 if (!entry_value) break;
282
283 entry_offset += vtable_entry_size;
b0d623f7 284 ++nentries;
b0d623f7 285 }
6d2010ae 286
b0d623f7
A
287 /* Allocate the symbol index */
288
289 rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries);
290 require_noerr(rval, finish);
291
292 /* Look up the symbols for each entry */
293
6d2010ae
A
294 for (i = 0, entry_offset = vtable_header_size;
295 i < vtable->entries.nitems;
296 ++i, entry_offset += vtable_entry_size)
297 {
298 entry_value = kxld_relocator_get_pointer_at_addr(relocator,
299 vtable->vtable, entry_offset);
b0d623f7 300
b0d623f7
A
301 /* If we can't find the symbol, it means that the virtual function was
302 * defined inline. There's not much I can do about this; it just means
303 * I can't patch this function.
304 */
305 tmpentry = kxld_array_get_item(&vtable->entries, i);
6d2010ae 306 sym = kxld_dict_find(defined_cxx_symbols, &entry_value);
b0d623f7
A
307
308 if (sym) {
309 tmpentry->patched.name = sym->name;
310 tmpentry->patched.addr = sym->link_addr;
311 } else {
312 tmpentry->patched.name = NULL;
313 tmpentry->patched.addr = 0;
314 }
315 }
316
317 rval = KERN_SUCCESS;
b0d623f7
A
318finish:
319 return rval;
320}
321
322/*******************************************************************************
323* Initializes vtables by performing a reverse lookup on symbol values when
324* they exist in the vtable entry, and by looking through a matching relocation
325* entry when the vtable entry is NULL.
326*
327* Final linked images require this hybrid vtable initialization approach
328* because they are already internally resolved. This means that the vtables
329* contain valid entries to local symbols, but still have relocation entries for
330* external symbols.
331*******************************************************************************/
332static kern_return_t
6d2010ae
A
333init_by_entries_and_relocs(KXLDVTable *vtable, const KXLDSym *vtable_sym,
334 const KXLDRelocator *relocator, const KXLDArray *relocs,
335 const KXLDDict *defined_cxx_symbols)
b0d623f7
A
336{
337 kern_return_t rval = KERN_FAILURE;
338 KXLDReloc *reloc = NULL;
339 KXLDVTableEntry *tmpentry = NULL;
6d2010ae 340 KXLDSym *sym = NULL;
b0d623f7
A
341 u_int vtable_entry_size = 0;
342 u_int vtable_header_size = 0;
b0d623f7 343 kxld_addr_t entry_value = 0;
6d2010ae 344 u_long entry_offset = 0;
b0d623f7
A
345 u_int nentries = 0;
346 u_int i = 0;
b7266188
A
347 char *demangled_name1 = NULL;
348 size_t demangled_length1 = 0;
b0d623f7
A
349
350 check(vtable);
6d2010ae
A
351 check(vtable_sym);
352 check(relocator);
b0d623f7
A
353 check(relocs);
354
355 /* Find the first entry and its offset past the vtable padding */
356
6d2010ae
A
357 (void) get_vtable_base_sizes(relocator->is_32_bit,
358 &vtable_entry_size, &vtable_header_size);
b0d623f7
A
359
360 /* In a final linked image, a vtable slot is valid if it is nonzero
6d2010ae 361 * (meaning the userspace linker has already resolved it) or if it has
b0d623f7
A
362 * a relocation entry. We'll know the end of the vtable when we find a
363 * slot that meets neither of these conditions.
364 */
6d2010ae 365 entry_offset = vtable_header_size;
b0d623f7 366 while (1) {
6d2010ae
A
367 entry_value = kxld_relocator_get_pointer_at_addr(relocator,
368 vtable->vtable, entry_offset);
b0d623f7 369 if (!entry_value) {
6d2010ae
A
370 reloc = kxld_reloc_get_reloc_by_offset(relocs,
371 vtable_sym->base_addr + entry_offset);
b0d623f7
A
372 if (!reloc) break;
373 }
374
375 ++nentries;
b0d623f7
A
376 entry_offset += vtable_entry_size;
377 }
378
379 /* Allocate the symbol index */
380
381 rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries);
382 require_noerr(rval, finish);
383
384 /* Find the symbols for each vtable entry */
385
6d2010ae
A
386 for (i = 0, entry_offset = vtable_header_size;
387 i < vtable->entries.nitems;
388 ++i, entry_offset += vtable_entry_size)
389 {
390 entry_value = kxld_relocator_get_pointer_at_addr(relocator,
391 vtable->vtable, entry_offset);
b0d623f7
A
392
393 /* If we can't find a symbol, it means it is a locally-defined,
394 * non-external symbol that has been stripped. We don't patch over
395 * locally-defined symbols, so we leave the symbol as NULL and just
396 * skip it. We won't be able to patch subclasses with this symbol,
397 * but there isn't much we can do about that.
398 */
399 if (entry_value) {
b0d623f7 400 reloc = NULL;
6d2010ae 401 sym = kxld_dict_find(defined_cxx_symbols, &entry_value);
b0d623f7 402 } else {
6d2010ae
A
403 reloc = kxld_reloc_get_reloc_by_offset(relocs,
404 vtable_sym->base_addr + entry_offset);
39037602 405
b0d623f7
A
406 require_action(reloc, finish,
407 rval=KERN_FAILURE;
408 kxld_log(kKxldLogPatching, kKxldLogErr,
b7266188
A
409 kKxldLogMalformedVTable,
410 kxld_demangle(vtable->name, &demangled_name1,
411 &demangled_length1)));
b0d623f7 412
6d2010ae 413 sym = kxld_reloc_get_symbol(relocator, reloc, /* data */ NULL);
b0d623f7 414 }
6d2010ae 415
b0d623f7
A
416 tmpentry = kxld_array_get_item(&vtable->entries, i);
417 tmpentry->unpatched.reloc = reloc;
6d2010ae 418 tmpentry->unpatched.sym = sym;
b0d623f7
A
419 }
420
421 rval = KERN_SUCCESS;
b0d623f7
A
422finish:
423 return rval;
424}
425
426/*******************************************************************************
427*******************************************************************************/
428void
429kxld_vtable_clear(KXLDVTable *vtable)
430{
431 check(vtable);
432
433 vtable->vtable = NULL;
434 vtable->name = NULL;
435 vtable->is_patched = FALSE;
436 kxld_array_clear(&vtable->entries);
437}
438
439/*******************************************************************************
440*******************************************************************************/
441void
442kxld_vtable_deinit(KXLDVTable *vtable)
443{
444 check(vtable);
445
446 kxld_array_deinit(&vtable->entries);
447 bzero(vtable, sizeof(*vtable));
448}
449
6d2010ae
A
450/*******************************************************************************
451*******************************************************************************/
452KXLDVTableEntry *
453kxld_vtable_get_entry_for_offset(const KXLDVTable *vtable, u_long offset,
454 boolean_t is_32_bit)
455{
456 KXLDVTableEntry *rval = NULL;
457 u_int vtable_entry_size = 0;
458 u_int vtable_header_size = 0;
459 u_int vtable_entry_idx = 0;
460
461 (void) get_vtable_base_sizes(is_32_bit,
462 &vtable_entry_size, &vtable_header_size);
463
464 if (offset % vtable_entry_size) {
465 goto finish;
466 }
467
468 vtable_entry_idx = (u_int) ((offset - vtable_header_size) / vtable_entry_size);
469 rval = kxld_array_get_item(&vtable->entries, vtable_entry_idx);
470finish:
471 return rval;
472}
473
b0d623f7
A
474/*******************************************************************************
475* Patching vtables allows us to preserve binary compatibility across releases.
476*******************************************************************************/
477kern_return_t
478kxld_vtable_patch(KXLDVTable *vtable, const KXLDVTable *super_vtable,
6d2010ae 479 KXLDObject *object)
b0d623f7
A
480{
481 kern_return_t rval = KERN_FAILURE;
6d2010ae
A
482 const KXLDSymtab *symtab = NULL;
483 const KXLDSym *sym = NULL;
b0d623f7
A
484 KXLDVTableEntry *child_entry = NULL;
485 KXLDVTableEntry *parent_entry = NULL;
b0d623f7
A
486 u_int symindex = 0;
487 u_int i = 0;
b7266188
A
488 char *demangled_name1 = NULL;
489 char *demangled_name2 = NULL;
490 char *demangled_name3 = NULL;
491 size_t demangled_length1 = 0;
492 size_t demangled_length2 = 0;
493 size_t demangled_length3 = 0;
6d2010ae 494 boolean_t failure = FALSE;
b0d623f7
A
495
496 check(vtable);
497 check(super_vtable);
498
6d2010ae
A
499 symtab = kxld_object_get_symtab(object);
500
b0d623f7 501 require_action(!vtable->is_patched, finish, rval=KERN_SUCCESS);
316670eb 502 require_action(super_vtable->is_patched, finish, rval=KERN_FAILURE);
b0d623f7
A
503 require_action(vtable->entries.nitems >= super_vtable->entries.nitems, finish,
504 rval=KERN_FAILURE;
b7266188
A
505 kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMalformedVTable,
506 kxld_demangle(vtable->name, &demangled_name1, &demangled_length1)));
b0d623f7
A
507
508 for (i = 0; i < super_vtable->entries.nitems; ++i) {
509 child_entry = kxld_array_get_item(&vtable->entries, i);
510 parent_entry = kxld_array_get_item(&super_vtable->entries, i);
511
512 /* The child entry can be NULL when a locally-defined, non-external
513 * symbol is stripped. We wouldn't patch this entry anyway, so we
514 * just skip it.
515 */
516
517 if (!child_entry->unpatched.sym) continue;
518
519 /* It's possible for the patched parent entry not to have a symbol
520 * (e.g. when the definition is inlined). We can't patch this entry no
521 * matter what, so we'll just skip it and die later if it's a problem
522 * (which is not likely).
523 */
524
525 if (!parent_entry->patched.name) continue;
6d2010ae 526
b0d623f7
A
527 /* 1) If the symbol is defined locally, do not patch */
528
529 if (kxld_sym_is_defined_locally(child_entry->unpatched.sym)) continue;
530
531 /* 2) If the child is a pure virtual function, do not patch.
532 * In general, we want to proceed with patching when the symbol is
533 * externally defined because pad slots fall into this category.
534 * The pure virtual function symbol is special case, as the pure
535 * virtual property itself overrides the parent's implementation.
536 */
537
538 if (kxld_sym_is_pure_virtual(child_entry->unpatched.sym)) continue;
539
540 /* 3) If the symbols are the same, do not patch */
541
542 if (streq(child_entry->unpatched.sym->name,
543 parent_entry->patched.name))
544 {
545 continue;
546 }
547
548 /* 4) If the parent vtable entry is a pad slot, and the child does not
549 * match it, then the child was built against a newer version of the
550 * libraries, so it is binary-incompatible.
551 */
552
553 require_action(!kxld_sym_name_is_padslot(parent_entry->patched.name),
554 finish, rval=KERN_FAILURE;
555 kxld_log(kKxldLogPatching, kKxldLogErr,
b7266188
A
556 kKxldLogParentOutOfDate,
557 kxld_demangle(super_vtable->name, &demangled_name1,
558 &demangled_length1),
559 kxld_demangle(vtable->name, &demangled_name2,
560 &demangled_length2)));
b0d623f7
A
561
562#if KXLD_USER_OR_STRICT_PATCHING
563 /* 5) If we are doing strict patching, we prevent kexts from declaring
564 * virtual functions and not implementing them. We can tell if a
565 * virtual function is declared but not implemented because we resolve
566 * symbols before patching; an unimplemented function will still be
567 * undefined at this point. We then look at whether the symbol has
568 * the same class prefix as the vtable. If it does, the symbol was
569 * declared as part of the class and not inherited, which means we
570 * should not patch it.
571 */
572
6d2010ae
A
573 if (kxld_object_target_supports_strict_patching(object) &&
574 !kxld_sym_is_defined(child_entry->unpatched.sym))
b0d623f7
A
575 {
576 char class_name[KXLD_MAX_NAME_LEN];
577 char function_prefix[KXLD_MAX_NAME_LEN];
578 u_long function_prefix_len = 0;
579
580 rval = kxld_sym_get_class_name_from_vtable_name(vtable->name,
581 class_name, sizeof(class_name));
582 require_noerr(rval, finish);
583
584 function_prefix_len =
585 kxld_sym_get_function_prefix_from_class_name(class_name,
586 function_prefix, sizeof(function_prefix));
587 require(function_prefix_len, finish);
588
589 if (!strncmp(child_entry->unpatched.sym->name,
590 function_prefix, function_prefix_len))
591 {
6d2010ae
A
592 failure = TRUE;
593 kxld_log(kKxldLogPatching, kKxldLogErr,
594 "The %s is unpatchable because its class declares the "
595 "method '%s' without providing an implementation.",
596 kxld_demangle(vtable->name,
597 &demangled_name1, &demangled_length1),
598 kxld_demangle(child_entry->unpatched.sym->name,
599 &demangled_name2, &demangled_length2));
b0d623f7
A
600 continue;
601 }
602 }
603#endif /* KXLD_USER_OR_STRICT_PATCHING */
604
605 /* 6) The child symbol is unresolved and different from its parent, so
606 * we need to patch it up. We do this by modifying the relocation
607 * entry of the vtable entry to point to the symbol of the parent
608 * vtable entry. If that symbol does not exist (i.e. we got the data
609 * from a link state object's vtable representation), then we create a
610 * new symbol in the symbol table and point the relocation entry to
611 * that.
612 */
613
6d2010ae
A
614 sym = kxld_symtab_get_locally_defined_symbol_by_name(symtab,
615 parent_entry->patched.name);
b0d623f7 616 if (!sym) {
6d2010ae 617 rval = kxld_object_add_symbol(object, parent_entry->patched.name,
b0d623f7
A
618 parent_entry->patched.addr, &sym);
619 require_noerr(rval, finish);
620 }
621 require_action(sym, finish, rval=KERN_FAILURE);
622
623 rval = kxld_symtab_get_sym_index(symtab, sym, &symindex);
624 require_noerr(rval, finish);
625
626 rval = kxld_reloc_update_symindex(child_entry->unpatched.reloc, symindex);
627 require_noerr(rval, finish);
39037602 628
b0d623f7 629 kxld_log(kKxldLogPatching, kKxldLogDetail,
b7266188
A
630 "In vtable '%s', patching '%s' with '%s'.",
631 kxld_demangle(vtable->name, &demangled_name1, &demangled_length1),
632 kxld_demangle(child_entry->unpatched.sym->name,
633 &demangled_name2, &demangled_length2),
634 kxld_demangle(sym->name, &demangled_name3, &demangled_length3));
b0d623f7 635
6d2010ae
A
636 rval = kxld_object_patch_symbol(object, child_entry->unpatched.sym);
637 require_noerr(rval, finish);
638
b0d623f7 639 child_entry->unpatched.sym = sym;
6d2010ae
A
640
641 /*
642 * The C++ ABI requires that functions be aligned on a 2-byte boundary:
643 * http://www.codesourcery.com/public/cxx-abi/abi.html#member-pointers
644 * If the LSB of any virtual function's link address is 1, then the
645 * compiler has violated that part of the ABI, and we're going to panic
646 * in _ptmf2ptf() (in OSMetaClass.h). Better to panic here with some
647 * context.
648 */
649 assert(kxld_sym_is_pure_virtual(sym) || !(sym->link_addr & 1));
b0d623f7
A
650 }
651
6d2010ae
A
652 require_action(!failure, finish, rval=KERN_FAILURE);
653
b0d623f7
A
654 /* Change the vtable representation from the unpatched layout to the
655 * patched layout.
656 */
6d2010ae 657
b0d623f7
A
658 for (i = 0; i < vtable->entries.nitems; ++i) {
659 char *name;
660 kxld_addr_t addr;
661
662 child_entry = kxld_array_get_item(&vtable->entries, i);
663 if (child_entry->unpatched.sym) {
664 name = child_entry->unpatched.sym->name;
665 addr = child_entry->unpatched.sym->link_addr;
666 } else {
667 name = NULL;
668 addr = 0;
669 }
670
671 child_entry->patched.name = name;
672 child_entry->patched.addr = addr;
673 }
674
675 vtable->is_patched = TRUE;
676 rval = KERN_SUCCESS;
677
678finish:
39037602 679 if (demangled_name1) kxld_free(demangled_name1, demangled_length1);
b7266188
A
680 if (demangled_name2) kxld_free(demangled_name2, demangled_length2);
681 if (demangled_name3) kxld_free(demangled_name3, demangled_length3);
682
b0d623f7
A
683 return rval;
684}
685