]>
Commit | Line | Data |
---|---|---|
b0d623f7 A |
1 | /* |
2 | * Copyright (c) 2008 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | #include <string.h> | |
29 | #include <mach-o/loader.h> | |
30 | #include <sys/types.h> | |
31 | ||
32 | #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld" | |
33 | #include <AssertMacros.h> | |
34 | ||
b7266188 | 35 | #include "kxld_demangle.h" |
b0d623f7 A |
36 | #include "kxld_reloc.h" |
37 | #include "kxld_sect.h" | |
38 | #include "kxld_state.h" | |
39 | #include "kxld_sym.h" | |
40 | #include "kxld_symtab.h" | |
41 | #include "kxld_util.h" | |
42 | #include "kxld_vtable.h" | |
43 | ||
44 | #define VTABLE_ENTRY_SIZE_32 4 | |
45 | #define VTABLE_HEADER_LEN_32 2 | |
46 | #define VTABLE_HEADER_SIZE_32 (VTABLE_HEADER_LEN_32 * VTABLE_ENTRY_SIZE_32) | |
47 | ||
48 | #define VTABLE_ENTRY_SIZE_64 8 | |
49 | #define VTABLE_HEADER_LEN_64 2 | |
50 | #define VTABLE_HEADER_SIZE_64 (VTABLE_HEADER_LEN_64 * VTABLE_ENTRY_SIZE_64) | |
51 | ||
52 | static kern_return_t init_by_relocs(KXLDVTable *vtable, const KXLDSym *sym, | |
53 | const KXLDSect *sect, const KXLDSymtab *symtab, | |
54 | const KXLDRelocator *relocator); | |
55 | ||
56 | static kern_return_t init_by_entries_and_relocs(KXLDVTable *vtable, | |
57 | const KXLDSym *sym, const KXLDSymtab *symtab, | |
58 | const KXLDRelocator *relocator, const KXLDArray *relocs); | |
59 | ||
60 | static kxld_addr_t get_entry_value(u_char *entry, const KXLDRelocator *relocator) | |
61 | __attribute__((pure)); | |
62 | #if !KERNEL | |
63 | static kxld_addr_t swap_entry_value(kxld_addr_t entry_value, | |
64 | const KXLDRelocator *relocator) __attribute__((const)); | |
65 | #endif /* !KERNEL */ | |
66 | static kern_return_t init_by_entries(KXLDVTable *vtable, const KXLDSymtab *symtab, | |
67 | const KXLDRelocator *relocator); | |
68 | ||
69 | /******************************************************************************* | |
70 | *******************************************************************************/ | |
71 | kern_return_t | |
72 | kxld_vtable_init_from_kernel_macho(KXLDVTable *vtable, const KXLDSym *sym, | |
73 | const KXLDSect *sect, const KXLDSymtab *symtab, | |
74 | const KXLDRelocator *relocator) | |
75 | { | |
76 | kern_return_t rval = KERN_FAILURE; | |
b7266188 A |
77 | char *demangled_name = NULL; |
78 | size_t demangled_length = 0; | |
b0d623f7 A |
79 | |
80 | check(vtable); | |
81 | check(sym); | |
82 | check(sect); | |
83 | check(symtab); | |
84 | ||
85 | vtable->name = sym->name; | |
86 | vtable->vtable = sect->data + kxld_sym_get_section_offset(sym, sect); | |
87 | vtable->is_patched = FALSE; | |
88 | ||
89 | require_action(kxld_sect_get_num_relocs(sect) == 0, finish, | |
90 | rval=KERN_FAILURE; | |
91 | kxld_log(kKxldLogPatching, kKxldLogErr, | |
b7266188 A |
92 | kKxldLogMalformedVTable, |
93 | kxld_demangle(vtable->name, &demangled_name, &demangled_length))); | |
b0d623f7 A |
94 | |
95 | rval = init_by_entries(vtable, symtab, relocator); | |
96 | require_noerr(rval, finish); | |
97 | ||
98 | vtable->is_patched = TRUE; | |
99 | ||
100 | rval = KERN_SUCCESS; | |
101 | ||
102 | finish: | |
b0d623f7 | 103 | if (rval) kxld_vtable_deinit(vtable); |
b7266188 | 104 | if (demangled_name) kxld_free(demangled_name, demangled_length); |
b0d623f7 A |
105 | |
106 | return rval; | |
107 | } | |
108 | ||
109 | /******************************************************************************* | |
110 | *******************************************************************************/ | |
111 | kern_return_t | |
112 | kxld_vtable_init_from_object_macho(KXLDVTable *vtable, const KXLDSym *sym, | |
113 | const KXLDSect *sect, const KXLDSymtab *symtab, | |
114 | const KXLDRelocator *relocator) | |
115 | { | |
116 | kern_return_t rval = KERN_FAILURE; | |
b7266188 A |
117 | char *demangled_name = NULL; |
118 | size_t demangled_length = 0; | |
b0d623f7 A |
119 | |
120 | check(vtable); | |
121 | check(sym); | |
122 | check(sect); | |
123 | check(symtab); | |
124 | ||
125 | vtable->name = sym->name; | |
126 | vtable->vtable = sect->data + kxld_sym_get_section_offset(sym, sect); | |
127 | vtable->is_patched = FALSE; | |
128 | ||
129 | require_action(kxld_sect_get_num_relocs(sect) > 0, finish, | |
130 | rval=KERN_FAILURE; | |
131 | kxld_log(kKxldLogPatching, kKxldLogErr, | |
b7266188 A |
132 | kKxldLogMalformedVTable, |
133 | kxld_demangle(vtable->name, &demangled_name, &demangled_length))); | |
b0d623f7 A |
134 | |
135 | rval = init_by_relocs(vtable, sym, sect, symtab, relocator); | |
136 | require_noerr(rval, finish); | |
137 | ||
138 | rval = KERN_SUCCESS; | |
139 | ||
140 | finish: | |
b0d623f7 | 141 | if (rval) kxld_vtable_deinit(vtable); |
b7266188 | 142 | if (demangled_name) kxld_free(demangled_name, demangled_length); |
b0d623f7 A |
143 | |
144 | return rval; | |
145 | } | |
146 | ||
147 | /******************************************************************************* | |
148 | *******************************************************************************/ | |
149 | kern_return_t | |
150 | kxld_vtable_init_from_final_macho(KXLDVTable *vtable, const KXLDSym *sym, | |
151 | const KXLDSect *sect, const KXLDSymtab *symtab, | |
152 | const KXLDRelocator *relocator, const KXLDArray *relocs) | |
153 | { | |
154 | kern_return_t rval = KERN_FAILURE; | |
b7266188 A |
155 | char *demangled_name = NULL; |
156 | size_t demangled_length = 0; | |
b0d623f7 A |
157 | |
158 | check(vtable); | |
159 | check(sym); | |
160 | check(sect); | |
161 | check(symtab); | |
162 | ||
163 | vtable->name = sym->name; | |
164 | vtable->vtable = sect->data + kxld_sym_get_section_offset(sym, sect); | |
165 | vtable->is_patched = FALSE; | |
166 | ||
167 | require_action(kxld_sect_get_num_relocs(sect) == 0, finish, | |
168 | rval=KERN_FAILURE; | |
169 | kxld_log(kKxldLogPatching, kKxldLogErr, | |
b7266188 A |
170 | kKxldLogMalformedVTable, |
171 | kxld_demangle(vtable->name, &demangled_name, &demangled_length))); | |
b0d623f7 A |
172 | |
173 | rval = init_by_entries_and_relocs(vtable, sym, symtab, | |
174 | relocator, relocs); | |
175 | require_noerr(rval, finish); | |
176 | ||
177 | rval = KERN_SUCCESS; | |
178 | ||
179 | finish: | |
180 | if (rval) kxld_vtable_deinit(vtable); | |
b7266188 | 181 | if (demangled_name) kxld_free(demangled_name, demangled_length); |
b0d623f7 A |
182 | |
183 | return rval; | |
184 | } | |
185 | ||
186 | #if KXLD_USER_OR_ILP32 | |
187 | /******************************************************************************* | |
188 | *******************************************************************************/ | |
189 | kern_return_t | |
190 | kxld_vtable_init_from_link_state_32(KXLDVTable *vtable, u_char *file, | |
191 | KXLDVTableHdr *hdr) | |
192 | { | |
193 | kern_return_t rval = KERN_FAILURE; | |
194 | KXLDSymEntry32 *sym = NULL; | |
195 | KXLDVTableEntry *entry = NULL; | |
196 | u_int i = 0; | |
197 | ||
198 | check(vtable); | |
199 | check(file); | |
200 | check(hdr); | |
201 | ||
202 | vtable->name = (char *) (file + hdr->nameoff); | |
203 | vtable->is_patched = TRUE; | |
204 | ||
205 | rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), | |
206 | hdr->nentries); | |
207 | require_noerr(rval, finish); | |
208 | ||
209 | sym = (KXLDSymEntry32 *) (file + hdr->vtableoff); | |
210 | for (i = 0; i < vtable->entries.nitems; ++i, ++sym) { | |
211 | entry = kxld_array_get_item(&vtable->entries, i); | |
212 | entry->patched.name = (char *) (file + sym->nameoff); | |
213 | entry->patched.addr = sym->addr; | |
214 | } | |
215 | ||
216 | rval = KERN_SUCCESS; | |
217 | ||
218 | finish: | |
219 | return rval; | |
220 | } | |
221 | #endif /* KXLD_USER_OR_ILP32 */ | |
222 | ||
223 | #if KXLD_USER_OR_LP64 | |
224 | /******************************************************************************* | |
225 | *******************************************************************************/ | |
226 | kern_return_t | |
227 | kxld_vtable_init_from_link_state_64(KXLDVTable *vtable, u_char *file, | |
228 | KXLDVTableHdr *hdr) | |
229 | { | |
230 | kern_return_t rval = KERN_FAILURE; | |
231 | KXLDSymEntry64 *sym = NULL; | |
232 | KXLDVTableEntry *entry = NULL; | |
233 | u_int i = 0; | |
234 | ||
235 | check(vtable); | |
236 | check(file); | |
237 | check(hdr); | |
238 | ||
239 | vtable->name = (char *) (file + hdr->nameoff); | |
240 | vtable->is_patched = TRUE; | |
241 | ||
242 | rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), | |
243 | hdr->nentries); | |
244 | require_noerr(rval, finish); | |
245 | ||
246 | sym = (KXLDSymEntry64 *) (file + hdr->vtableoff); | |
247 | for (i = 0; i < vtable->entries.nitems; ++i, ++sym) { | |
248 | entry = kxld_array_get_item(&vtable->entries, i); | |
249 | entry->patched.name = (char *) (file + sym->nameoff); | |
250 | entry->patched.addr = sym->addr; | |
251 | } | |
252 | ||
253 | rval = KERN_SUCCESS; | |
254 | ||
255 | finish: | |
256 | return rval; | |
257 | } | |
258 | #endif /* KXLD_USER_OR_LP64 */ | |
259 | ||
260 | /******************************************************************************* | |
261 | *******************************************************************************/ | |
262 | kern_return_t | |
263 | kxld_vtable_copy(KXLDVTable *vtable, const KXLDVTable *src) | |
264 | { | |
265 | kern_return_t rval = KERN_FAILURE; | |
266 | ||
267 | check(vtable); | |
268 | check(src); | |
269 | ||
270 | vtable->vtable = src->vtable; | |
271 | vtable->name = src->name; | |
272 | vtable->is_patched = src->is_patched; | |
273 | ||
274 | rval = kxld_array_copy(&vtable->entries, &src->entries); | |
275 | require_noerr(rval, finish); | |
276 | ||
277 | rval = KERN_SUCCESS; | |
278 | ||
279 | finish: | |
280 | return rval; | |
281 | } | |
282 | ||
283 | /******************************************************************************* | |
284 | * Initializes a vtable object by matching up relocation entries to the vtable's | |
285 | * entries and finding the corresponding symbols. | |
286 | *******************************************************************************/ | |
287 | static kern_return_t | |
288 | init_by_relocs(KXLDVTable *vtable, const KXLDSym *sym, const KXLDSect *sect, | |
289 | const KXLDSymtab *symtab, const KXLDRelocator *relocator) | |
290 | { | |
291 | kern_return_t rval = KERN_FAILURE; | |
292 | KXLDReloc *reloc = NULL; | |
293 | KXLDVTableEntry *entry = NULL; | |
294 | KXLDSym *tmpsym = NULL; | |
295 | kxld_addr_t vtable_base_offset = 0; | |
296 | kxld_addr_t entry_offset = 0; | |
297 | u_int i = 0; | |
298 | u_int nentries = 0; | |
299 | u_int vtable_entry_size = 0; | |
300 | u_int base_reloc_index = 0; | |
301 | u_int reloc_index = 0; | |
302 | ||
303 | check(vtable); | |
304 | check(sym); | |
305 | check(sect); | |
306 | check(symtab); | |
307 | check(relocator); | |
308 | ||
309 | /* Find the first entry past the vtable padding */ | |
310 | ||
311 | vtable_base_offset = kxld_sym_get_section_offset(sym, sect); | |
312 | if (relocator->is_32_bit) { | |
313 | vtable_entry_size = VTABLE_ENTRY_SIZE_32; | |
314 | vtable_base_offset += VTABLE_HEADER_SIZE_32; | |
315 | } else { | |
316 | vtable_entry_size = VTABLE_ENTRY_SIZE_64; | |
317 | vtable_base_offset += VTABLE_HEADER_SIZE_64; | |
318 | } | |
319 | ||
320 | /* Find the relocation entry at the start of the vtable */ | |
321 | ||
322 | rval = kxld_reloc_get_reloc_index_by_offset(§->relocs, | |
323 | vtable_base_offset, &base_reloc_index); | |
324 | require_noerr(rval, finish); | |
325 | ||
326 | /* Count the number of consecutive relocation entries to find the number of | |
327 | * vtable entries. For some reason, the __TEXT,__const relocations are | |
328 | * sorted in descending order, so we have to walk backwards. Also, make | |
329 | * sure we don't run off the end of the section's relocs. | |
330 | */ | |
331 | ||
332 | reloc_index = base_reloc_index; | |
333 | entry_offset = vtable_base_offset; | |
334 | reloc = kxld_array_get_item(§->relocs, reloc_index); | |
335 | while (reloc->address == entry_offset) { | |
336 | ++nentries; | |
337 | if (!reloc_index) break; | |
338 | ||
339 | --reloc_index; | |
340 | ||
341 | reloc = kxld_array_get_item(§->relocs, reloc_index); | |
342 | entry_offset += vtable_entry_size; | |
343 | } | |
344 | ||
345 | /* Allocate the symbol index */ | |
346 | ||
347 | rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries); | |
348 | require_noerr(rval, finish); | |
349 | ||
350 | /* Find the symbols for each vtable entry */ | |
351 | ||
352 | for (i = 0; i < vtable->entries.nitems; ++i) { | |
353 | reloc = kxld_array_get_item(§->relocs, base_reloc_index - i); | |
354 | entry = kxld_array_get_item(&vtable->entries, i); | |
355 | ||
356 | /* If we can't find a symbol, it means it is a locally-defined, | |
357 | * non-external symbol that has been stripped. We don't patch over | |
358 | * locally-defined symbols, so we leave the symbol as NULL and just | |
359 | * skip it. We won't be able to patch subclasses with this symbol, | |
360 | * but there isn't much we can do about that. | |
361 | */ | |
362 | tmpsym = kxld_reloc_get_symbol(relocator, reloc, sect->data, symtab); | |
363 | ||
364 | entry->unpatched.sym = tmpsym; | |
365 | entry->unpatched.reloc = reloc; | |
366 | } | |
367 | ||
368 | rval = KERN_SUCCESS; | |
369 | finish: | |
370 | return rval; | |
371 | } | |
372 | ||
373 | /******************************************************************************* | |
374 | *******************************************************************************/ | |
375 | static kxld_addr_t | |
376 | get_entry_value(u_char *entry, const KXLDRelocator *relocator) | |
377 | { | |
378 | kxld_addr_t entry_value; | |
379 | ||
380 | if (relocator->is_32_bit) { | |
381 | entry_value = *(uint32_t *)entry; | |
382 | } else { | |
383 | entry_value = *(uint64_t *)entry; | |
384 | } | |
385 | ||
386 | return entry_value; | |
387 | } | |
388 | ||
389 | #if !KERNEL | |
390 | /******************************************************************************* | |
391 | *******************************************************************************/ | |
392 | static kxld_addr_t | |
393 | swap_entry_value(kxld_addr_t entry_value, const KXLDRelocator *relocator) | |
394 | { | |
395 | if (relocator->is_32_bit) { | |
396 | entry_value = OSSwapInt32((uint32_t) entry_value); | |
397 | } else { | |
398 | entry_value = OSSwapInt64((uint64_t) entry_value); | |
399 | } | |
400 | ||
401 | return entry_value; | |
402 | } | |
403 | #endif /* KERNEL */ | |
404 | ||
405 | /******************************************************************************* | |
406 | * Initializes a vtable object by reading the symbol values out of the vtable | |
407 | * entries and performing reverse symbol lookups on those values. | |
408 | *******************************************************************************/ | |
409 | static kern_return_t | |
410 | init_by_entries(KXLDVTable *vtable, const KXLDSymtab *symtab, | |
411 | const KXLDRelocator *relocator) | |
412 | { | |
413 | kern_return_t rval = KERN_FAILURE; | |
414 | KXLDVTableEntry *tmpentry = NULL; | |
415 | KXLDSym *sym = NULL; | |
416 | u_char *base_entry = NULL; | |
417 | u_char *entry = NULL; | |
418 | kxld_addr_t entry_value = 0; | |
419 | u_int vtable_entry_size = 0; | |
420 | u_int vtable_header_size = 0; | |
421 | u_int nentries = 0; | |
422 | u_int i = 0; | |
423 | ||
424 | if (relocator->is_32_bit) { | |
425 | vtable_entry_size = VTABLE_ENTRY_SIZE_32; | |
426 | vtable_header_size = VTABLE_HEADER_SIZE_32; | |
427 | } else { | |
428 | vtable_entry_size = VTABLE_ENTRY_SIZE_64; | |
429 | vtable_header_size = VTABLE_HEADER_SIZE_64; | |
430 | } | |
431 | ||
432 | base_entry = vtable->vtable + vtable_header_size; | |
433 | ||
434 | /* Count the number of entries (the vtable is null-terminated) */ | |
435 | ||
436 | entry = base_entry; | |
437 | entry_value = get_entry_value(entry, relocator); | |
438 | while (entry_value) { | |
439 | ++nentries; | |
440 | entry += vtable_entry_size; | |
441 | entry_value = get_entry_value(entry, relocator); | |
442 | } | |
443 | ||
444 | /* Allocate the symbol index */ | |
445 | ||
446 | rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries); | |
447 | require_noerr(rval, finish); | |
448 | ||
449 | /* Look up the symbols for each entry */ | |
450 | ||
451 | entry = base_entry; | |
452 | rval = KERN_SUCCESS; | |
453 | for (i = 0; i < vtable->entries.nitems; ++i) { | |
454 | entry = base_entry + (i * vtable_entry_size); | |
455 | entry_value = get_entry_value(entry, relocator); | |
456 | ||
457 | #if !KERNEL | |
458 | if (relocator->swap) { | |
459 | entry_value = swap_entry_value(entry_value, relocator); | |
460 | } | |
461 | #endif /* !KERNEL */ | |
462 | ||
463 | /* If we can't find the symbol, it means that the virtual function was | |
464 | * defined inline. There's not much I can do about this; it just means | |
465 | * I can't patch this function. | |
466 | */ | |
467 | tmpentry = kxld_array_get_item(&vtable->entries, i); | |
468 | sym = kxld_symtab_get_cxx_symbol_by_value(symtab, entry_value); | |
469 | ||
470 | if (sym) { | |
471 | tmpentry->patched.name = sym->name; | |
472 | tmpentry->patched.addr = sym->link_addr; | |
473 | } else { | |
474 | tmpentry->patched.name = NULL; | |
475 | tmpentry->patched.addr = 0; | |
476 | } | |
477 | } | |
478 | ||
479 | rval = KERN_SUCCESS; | |
480 | ||
481 | finish: | |
482 | return rval; | |
483 | } | |
484 | ||
485 | /******************************************************************************* | |
486 | * Initializes vtables by performing a reverse lookup on symbol values when | |
487 | * they exist in the vtable entry, and by looking through a matching relocation | |
488 | * entry when the vtable entry is NULL. | |
489 | * | |
490 | * Final linked images require this hybrid vtable initialization approach | |
491 | * because they are already internally resolved. This means that the vtables | |
492 | * contain valid entries to local symbols, but still have relocation entries for | |
493 | * external symbols. | |
494 | *******************************************************************************/ | |
495 | static kern_return_t | |
496 | init_by_entries_and_relocs(KXLDVTable *vtable, const KXLDSym *sym, | |
497 | const KXLDSymtab *symtab, const KXLDRelocator *relocator, | |
498 | const KXLDArray *relocs) | |
499 | { | |
500 | kern_return_t rval = KERN_FAILURE; | |
501 | KXLDReloc *reloc = NULL; | |
502 | KXLDVTableEntry *tmpentry = NULL; | |
503 | KXLDSym *tmpsym = NULL; | |
504 | u_int vtable_entry_size = 0; | |
505 | u_int vtable_header_size = 0; | |
506 | u_char *base_entry = NULL; | |
507 | u_char *entry = NULL; | |
508 | kxld_addr_t entry_value = 0; | |
509 | kxld_addr_t base_entry_offset = 0; | |
510 | kxld_addr_t entry_offset = 0; | |
511 | u_int nentries = 0; | |
512 | u_int i = 0; | |
b7266188 A |
513 | char *demangled_name1 = NULL; |
514 | size_t demangled_length1 = 0; | |
b0d623f7 A |
515 | |
516 | check(vtable); | |
517 | check(sym); | |
518 | check(symtab); | |
519 | check(relocs); | |
520 | ||
521 | /* Find the first entry and its offset past the vtable padding */ | |
522 | ||
523 | if (relocator->is_32_bit) { | |
524 | vtable_entry_size = VTABLE_ENTRY_SIZE_32; | |
525 | vtable_header_size = VTABLE_HEADER_SIZE_32; | |
526 | } else { | |
527 | vtable_entry_size = VTABLE_ENTRY_SIZE_64; | |
528 | vtable_header_size = VTABLE_HEADER_SIZE_64; | |
529 | } | |
530 | ||
531 | base_entry = vtable->vtable + vtable_header_size; | |
532 | ||
533 | base_entry_offset = sym->base_addr; | |
534 | base_entry_offset += vtable_header_size; | |
535 | ||
536 | /* In a final linked image, a vtable slot is valid if it is nonzero | |
537 | * (meaning the userspace linker has already resolved it, or if it has | |
538 | * a relocation entry. We'll know the end of the vtable when we find a | |
539 | * slot that meets neither of these conditions. | |
540 | */ | |
541 | entry = base_entry; | |
542 | entry_value = get_entry_value(entry, relocator); | |
543 | entry_offset = base_entry_offset; | |
544 | while (1) { | |
545 | entry_value = get_entry_value(entry, relocator); | |
546 | if (!entry_value) { | |
547 | reloc = kxld_reloc_get_reloc_by_offset(relocs, entry_offset); | |
548 | if (!reloc) break; | |
549 | } | |
550 | ||
551 | ++nentries; | |
552 | entry += vtable_entry_size; | |
553 | entry_offset += vtable_entry_size; | |
554 | } | |
555 | ||
556 | /* Allocate the symbol index */ | |
557 | ||
558 | rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries); | |
559 | require_noerr(rval, finish); | |
560 | ||
561 | /* Find the symbols for each vtable entry */ | |
562 | ||
563 | entry = base_entry; | |
564 | entry_value = get_entry_value(entry, relocator); | |
565 | entry_offset = base_entry_offset; | |
566 | for (i = 0; i < vtable->entries.nitems; ++i) { | |
567 | entry_value = get_entry_value(entry, relocator); | |
568 | ||
569 | /* If we can't find a symbol, it means it is a locally-defined, | |
570 | * non-external symbol that has been stripped. We don't patch over | |
571 | * locally-defined symbols, so we leave the symbol as NULL and just | |
572 | * skip it. We won't be able to patch subclasses with this symbol, | |
573 | * but there isn't much we can do about that. | |
574 | */ | |
575 | if (entry_value) { | |
576 | #if !KERNEL | |
577 | if (relocator->swap) { | |
578 | entry_value = swap_entry_value(entry_value, relocator); | |
579 | } | |
580 | #endif /* !KERNEL */ | |
581 | ||
582 | reloc = NULL; | |
583 | tmpsym = kxld_symtab_get_cxx_symbol_by_value(symtab, entry_value); | |
584 | } else { | |
585 | reloc = kxld_reloc_get_reloc_by_offset(relocs, entry_offset); | |
586 | require_action(reloc, finish, | |
587 | rval=KERN_FAILURE; | |
588 | kxld_log(kKxldLogPatching, kKxldLogErr, | |
b7266188 A |
589 | kKxldLogMalformedVTable, |
590 | kxld_demangle(vtable->name, &demangled_name1, | |
591 | &demangled_length1))); | |
b0d623f7 A |
592 | |
593 | tmpsym = kxld_reloc_get_symbol(relocator, reloc, | |
594 | /* data */ NULL, symtab); | |
595 | } | |
596 | ||
597 | tmpentry = kxld_array_get_item(&vtable->entries, i); | |
598 | tmpentry->unpatched.reloc = reloc; | |
599 | tmpentry->unpatched.sym = tmpsym; | |
600 | ||
601 | entry += vtable_entry_size; | |
602 | entry_offset += vtable_entry_size; | |
603 | } | |
604 | ||
605 | rval = KERN_SUCCESS; | |
606 | ||
607 | finish: | |
608 | return rval; | |
609 | } | |
610 | ||
611 | /******************************************************************************* | |
612 | *******************************************************************************/ | |
613 | void | |
614 | kxld_vtable_clear(KXLDVTable *vtable) | |
615 | { | |
616 | check(vtable); | |
617 | ||
618 | vtable->vtable = NULL; | |
619 | vtable->name = NULL; | |
620 | vtable->is_patched = FALSE; | |
621 | kxld_array_clear(&vtable->entries); | |
622 | } | |
623 | ||
624 | /******************************************************************************* | |
625 | *******************************************************************************/ | |
626 | void | |
627 | kxld_vtable_deinit(KXLDVTable *vtable) | |
628 | { | |
629 | check(vtable); | |
630 | ||
631 | kxld_array_deinit(&vtable->entries); | |
632 | bzero(vtable, sizeof(*vtable)); | |
633 | } | |
634 | ||
635 | /******************************************************************************* | |
636 | * Patching vtables allows us to preserve binary compatibility across releases. | |
637 | *******************************************************************************/ | |
638 | kern_return_t | |
639 | kxld_vtable_patch(KXLDVTable *vtable, const KXLDVTable *super_vtable, | |
640 | KXLDSymtab *symtab, boolean_t strict_patching __unused) | |
641 | { | |
642 | kern_return_t rval = KERN_FAILURE; | |
643 | KXLDVTableEntry *child_entry = NULL; | |
644 | KXLDVTableEntry *parent_entry = NULL; | |
645 | KXLDSym *sym = NULL; | |
646 | u_int symindex = 0; | |
647 | u_int i = 0; | |
b7266188 A |
648 | char *demangled_name1 = NULL; |
649 | char *demangled_name2 = NULL; | |
650 | char *demangled_name3 = NULL; | |
651 | size_t demangled_length1 = 0; | |
652 | size_t demangled_length2 = 0; | |
653 | size_t demangled_length3 = 0; | |
b0d623f7 A |
654 | |
655 | check(vtable); | |
656 | check(super_vtable); | |
657 | ||
658 | require_action(!vtable->is_patched, finish, rval=KERN_SUCCESS); | |
659 | require_action(vtable->entries.nitems >= super_vtable->entries.nitems, finish, | |
660 | rval=KERN_FAILURE; | |
b7266188 A |
661 | kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMalformedVTable, |
662 | kxld_demangle(vtable->name, &demangled_name1, &demangled_length1))); | |
b0d623f7 A |
663 | |
664 | for (i = 0; i < super_vtable->entries.nitems; ++i) { | |
665 | child_entry = kxld_array_get_item(&vtable->entries, i); | |
666 | parent_entry = kxld_array_get_item(&super_vtable->entries, i); | |
667 | ||
668 | /* The child entry can be NULL when a locally-defined, non-external | |
669 | * symbol is stripped. We wouldn't patch this entry anyway, so we | |
670 | * just skip it. | |
671 | */ | |
672 | ||
673 | if (!child_entry->unpatched.sym) continue; | |
674 | ||
675 | /* It's possible for the patched parent entry not to have a symbol | |
676 | * (e.g. when the definition is inlined). We can't patch this entry no | |
677 | * matter what, so we'll just skip it and die later if it's a problem | |
678 | * (which is not likely). | |
679 | */ | |
680 | ||
681 | if (!parent_entry->patched.name) continue; | |
682 | ||
683 | /* 1) If the symbol is defined locally, do not patch */ | |
684 | ||
685 | if (kxld_sym_is_defined_locally(child_entry->unpatched.sym)) continue; | |
686 | ||
687 | /* 2) If the child is a pure virtual function, do not patch. | |
688 | * In general, we want to proceed with patching when the symbol is | |
689 | * externally defined because pad slots fall into this category. | |
690 | * The pure virtual function symbol is special case, as the pure | |
691 | * virtual property itself overrides the parent's implementation. | |
692 | */ | |
693 | ||
694 | if (kxld_sym_is_pure_virtual(child_entry->unpatched.sym)) continue; | |
695 | ||
696 | /* 3) If the symbols are the same, do not patch */ | |
697 | ||
698 | if (streq(child_entry->unpatched.sym->name, | |
699 | parent_entry->patched.name)) | |
700 | { | |
701 | continue; | |
702 | } | |
703 | ||
704 | /* 4) If the parent vtable entry is a pad slot, and the child does not | |
705 | * match it, then the child was built against a newer version of the | |
706 | * libraries, so it is binary-incompatible. | |
707 | */ | |
708 | ||
709 | require_action(!kxld_sym_name_is_padslot(parent_entry->patched.name), | |
710 | finish, rval=KERN_FAILURE; | |
711 | kxld_log(kKxldLogPatching, kKxldLogErr, | |
b7266188 A |
712 | kKxldLogParentOutOfDate, |
713 | kxld_demangle(super_vtable->name, &demangled_name1, | |
714 | &demangled_length1), | |
715 | kxld_demangle(vtable->name, &demangled_name2, | |
716 | &demangled_length2))); | |
b0d623f7 A |
717 | |
718 | #if KXLD_USER_OR_STRICT_PATCHING | |
719 | /* 5) If we are doing strict patching, we prevent kexts from declaring | |
720 | * virtual functions and not implementing them. We can tell if a | |
721 | * virtual function is declared but not implemented because we resolve | |
722 | * symbols before patching; an unimplemented function will still be | |
723 | * undefined at this point. We then look at whether the symbol has | |
724 | * the same class prefix as the vtable. If it does, the symbol was | |
725 | * declared as part of the class and not inherited, which means we | |
726 | * should not patch it. | |
727 | */ | |
728 | ||
729 | if (strict_patching && !kxld_sym_is_defined(child_entry->unpatched.sym)) | |
730 | { | |
731 | char class_name[KXLD_MAX_NAME_LEN]; | |
732 | char function_prefix[KXLD_MAX_NAME_LEN]; | |
733 | u_long function_prefix_len = 0; | |
734 | ||
735 | rval = kxld_sym_get_class_name_from_vtable_name(vtable->name, | |
736 | class_name, sizeof(class_name)); | |
737 | require_noerr(rval, finish); | |
738 | ||
739 | function_prefix_len = | |
740 | kxld_sym_get_function_prefix_from_class_name(class_name, | |
741 | function_prefix, sizeof(function_prefix)); | |
742 | require(function_prefix_len, finish); | |
743 | ||
744 | if (!strncmp(child_entry->unpatched.sym->name, | |
745 | function_prefix, function_prefix_len)) | |
746 | { | |
747 | continue; | |
748 | } | |
749 | } | |
750 | #endif /* KXLD_USER_OR_STRICT_PATCHING */ | |
751 | ||
752 | /* 6) The child symbol is unresolved and different from its parent, so | |
753 | * we need to patch it up. We do this by modifying the relocation | |
754 | * entry of the vtable entry to point to the symbol of the parent | |
755 | * vtable entry. If that symbol does not exist (i.e. we got the data | |
756 | * from a link state object's vtable representation), then we create a | |
757 | * new symbol in the symbol table and point the relocation entry to | |
758 | * that. | |
759 | */ | |
760 | ||
761 | sym = kxld_symtab_get_symbol_by_name(symtab, parent_entry->patched.name); | |
762 | if (!sym) { | |
763 | rval = kxld_symtab_add_symbol(symtab, parent_entry->patched.name, | |
764 | parent_entry->patched.addr, &sym); | |
765 | require_noerr(rval, finish); | |
766 | } | |
767 | require_action(sym, finish, rval=KERN_FAILURE); | |
768 | ||
769 | rval = kxld_symtab_get_sym_index(symtab, sym, &symindex); | |
770 | require_noerr(rval, finish); | |
771 | ||
772 | rval = kxld_reloc_update_symindex(child_entry->unpatched.reloc, symindex); | |
773 | require_noerr(rval, finish); | |
774 | ||
775 | kxld_log(kKxldLogPatching, kKxldLogDetail, | |
b7266188 A |
776 | "In vtable '%s', patching '%s' with '%s'.", |
777 | kxld_demangle(vtable->name, &demangled_name1, &demangled_length1), | |
778 | kxld_demangle(child_entry->unpatched.sym->name, | |
779 | &demangled_name2, &demangled_length2), | |
780 | kxld_demangle(sym->name, &demangled_name3, &demangled_length3)); | |
b0d623f7 A |
781 | |
782 | kxld_sym_patch(child_entry->unpatched.sym); | |
783 | child_entry->unpatched.sym = sym; | |
784 | } | |
785 | ||
786 | /* Change the vtable representation from the unpatched layout to the | |
787 | * patched layout. | |
788 | */ | |
789 | for (i = 0; i < vtable->entries.nitems; ++i) { | |
790 | char *name; | |
791 | kxld_addr_t addr; | |
792 | ||
793 | child_entry = kxld_array_get_item(&vtable->entries, i); | |
794 | if (child_entry->unpatched.sym) { | |
795 | name = child_entry->unpatched.sym->name; | |
796 | addr = child_entry->unpatched.sym->link_addr; | |
797 | } else { | |
798 | name = NULL; | |
799 | addr = 0; | |
800 | } | |
801 | ||
802 | child_entry->patched.name = name; | |
803 | child_entry->patched.addr = addr; | |
804 | } | |
805 | ||
806 | vtable->is_patched = TRUE; | |
807 | rval = KERN_SUCCESS; | |
808 | ||
809 | finish: | |
b7266188 A |
810 | if (demangled_name1) kxld_free(demangled_name1, demangled_length1); |
811 | if (demangled_name2) kxld_free(demangled_name2, demangled_length2); | |
812 | if (demangled_name3) kxld_free(demangled_name3, demangled_length3); | |
813 | ||
b0d623f7 A |
814 | return rval; |
815 | } | |
816 |