]>
Commit | Line | Data |
---|---|---|
b0d623f7 A |
1 | /* |
2 | * Copyright (c) 2008 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | #include <string.h> | |
29 | #include <mach-o/loader.h> | |
30 | #include <sys/types.h> | |
31 | ||
32 | #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld" | |
33 | #include <AssertMacros.h> | |
34 | ||
35 | #include "kxld_reloc.h" | |
36 | #include "kxld_sect.h" | |
37 | #include "kxld_state.h" | |
38 | #include "kxld_sym.h" | |
39 | #include "kxld_symtab.h" | |
40 | #include "kxld_util.h" | |
41 | #include "kxld_vtable.h" | |
42 | ||
43 | #define VTABLE_ENTRY_SIZE_32 4 | |
44 | #define VTABLE_HEADER_LEN_32 2 | |
45 | #define VTABLE_HEADER_SIZE_32 (VTABLE_HEADER_LEN_32 * VTABLE_ENTRY_SIZE_32) | |
46 | ||
47 | #define VTABLE_ENTRY_SIZE_64 8 | |
48 | #define VTABLE_HEADER_LEN_64 2 | |
49 | #define VTABLE_HEADER_SIZE_64 (VTABLE_HEADER_LEN_64 * VTABLE_ENTRY_SIZE_64) | |
50 | ||
51 | static kern_return_t init_by_relocs(KXLDVTable *vtable, const KXLDSym *sym, | |
52 | const KXLDSect *sect, const KXLDSymtab *symtab, | |
53 | const KXLDRelocator *relocator); | |
54 | ||
55 | static kern_return_t init_by_entries_and_relocs(KXLDVTable *vtable, | |
56 | const KXLDSym *sym, const KXLDSymtab *symtab, | |
57 | const KXLDRelocator *relocator, const KXLDArray *relocs); | |
58 | ||
59 | static kxld_addr_t get_entry_value(u_char *entry, const KXLDRelocator *relocator) | |
60 | __attribute__((pure)); | |
61 | #if !KERNEL | |
62 | static kxld_addr_t swap_entry_value(kxld_addr_t entry_value, | |
63 | const KXLDRelocator *relocator) __attribute__((const)); | |
64 | #endif /* !KERNEL */ | |
65 | static kern_return_t init_by_entries(KXLDVTable *vtable, const KXLDSymtab *symtab, | |
66 | const KXLDRelocator *relocator); | |
67 | ||
68 | /******************************************************************************* | |
69 | *******************************************************************************/ | |
70 | kern_return_t | |
71 | kxld_vtable_init_from_kernel_macho(KXLDVTable *vtable, const KXLDSym *sym, | |
72 | const KXLDSect *sect, const KXLDSymtab *symtab, | |
73 | const KXLDRelocator *relocator) | |
74 | { | |
75 | kern_return_t rval = KERN_FAILURE; | |
76 | ||
77 | check(vtable); | |
78 | check(sym); | |
79 | check(sect); | |
80 | check(symtab); | |
81 | ||
82 | vtable->name = sym->name; | |
83 | vtable->vtable = sect->data + kxld_sym_get_section_offset(sym, sect); | |
84 | vtable->is_patched = FALSE; | |
85 | ||
86 | require_action(kxld_sect_get_num_relocs(sect) == 0, finish, | |
87 | rval=KERN_FAILURE; | |
88 | kxld_log(kKxldLogPatching, kKxldLogErr, | |
89 | kKxldLogMalformedVTable, vtable->name)); | |
90 | ||
91 | rval = init_by_entries(vtable, symtab, relocator); | |
92 | require_noerr(rval, finish); | |
93 | ||
94 | vtable->is_patched = TRUE; | |
95 | ||
96 | rval = KERN_SUCCESS; | |
97 | ||
98 | finish: | |
99 | ||
100 | if (rval) kxld_vtable_deinit(vtable); | |
101 | ||
102 | return rval; | |
103 | } | |
104 | ||
105 | /******************************************************************************* | |
106 | *******************************************************************************/ | |
107 | kern_return_t | |
108 | kxld_vtable_init_from_object_macho(KXLDVTable *vtable, const KXLDSym *sym, | |
109 | const KXLDSect *sect, const KXLDSymtab *symtab, | |
110 | const KXLDRelocator *relocator) | |
111 | { | |
112 | kern_return_t rval = KERN_FAILURE; | |
113 | ||
114 | check(vtable); | |
115 | check(sym); | |
116 | check(sect); | |
117 | check(symtab); | |
118 | ||
119 | vtable->name = sym->name; | |
120 | vtable->vtable = sect->data + kxld_sym_get_section_offset(sym, sect); | |
121 | vtable->is_patched = FALSE; | |
122 | ||
123 | require_action(kxld_sect_get_num_relocs(sect) > 0, finish, | |
124 | rval=KERN_FAILURE; | |
125 | kxld_log(kKxldLogPatching, kKxldLogErr, | |
126 | kKxldLogMalformedVTable, vtable->name)); | |
127 | ||
128 | rval = init_by_relocs(vtable, sym, sect, symtab, relocator); | |
129 | require_noerr(rval, finish); | |
130 | ||
131 | rval = KERN_SUCCESS; | |
132 | ||
133 | finish: | |
134 | ||
135 | if (rval) kxld_vtable_deinit(vtable); | |
136 | ||
137 | return rval; | |
138 | } | |
139 | ||
140 | /******************************************************************************* | |
141 | *******************************************************************************/ | |
142 | kern_return_t | |
143 | kxld_vtable_init_from_final_macho(KXLDVTable *vtable, const KXLDSym *sym, | |
144 | const KXLDSect *sect, const KXLDSymtab *symtab, | |
145 | const KXLDRelocator *relocator, const KXLDArray *relocs) | |
146 | { | |
147 | kern_return_t rval = KERN_FAILURE; | |
148 | ||
149 | check(vtable); | |
150 | check(sym); | |
151 | check(sect); | |
152 | check(symtab); | |
153 | ||
154 | vtable->name = sym->name; | |
155 | vtable->vtable = sect->data + kxld_sym_get_section_offset(sym, sect); | |
156 | vtable->is_patched = FALSE; | |
157 | ||
158 | require_action(kxld_sect_get_num_relocs(sect) == 0, finish, | |
159 | rval=KERN_FAILURE; | |
160 | kxld_log(kKxldLogPatching, kKxldLogErr, | |
161 | kKxldLogMalformedVTable, vtable->name)); | |
162 | ||
163 | rval = init_by_entries_and_relocs(vtable, sym, symtab, | |
164 | relocator, relocs); | |
165 | require_noerr(rval, finish); | |
166 | ||
167 | rval = KERN_SUCCESS; | |
168 | ||
169 | finish: | |
170 | if (rval) kxld_vtable_deinit(vtable); | |
171 | ||
172 | return rval; | |
173 | } | |
174 | ||
175 | #if KXLD_USER_OR_ILP32 | |
176 | /******************************************************************************* | |
177 | *******************************************************************************/ | |
178 | kern_return_t | |
179 | kxld_vtable_init_from_link_state_32(KXLDVTable *vtable, u_char *file, | |
180 | KXLDVTableHdr *hdr) | |
181 | { | |
182 | kern_return_t rval = KERN_FAILURE; | |
183 | KXLDSymEntry32 *sym = NULL; | |
184 | KXLDVTableEntry *entry = NULL; | |
185 | u_int i = 0; | |
186 | ||
187 | check(vtable); | |
188 | check(file); | |
189 | check(hdr); | |
190 | ||
191 | vtable->name = (char *) (file + hdr->nameoff); | |
192 | vtable->is_patched = TRUE; | |
193 | ||
194 | rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), | |
195 | hdr->nentries); | |
196 | require_noerr(rval, finish); | |
197 | ||
198 | sym = (KXLDSymEntry32 *) (file + hdr->vtableoff); | |
199 | for (i = 0; i < vtable->entries.nitems; ++i, ++sym) { | |
200 | entry = kxld_array_get_item(&vtable->entries, i); | |
201 | entry->patched.name = (char *) (file + sym->nameoff); | |
202 | entry->patched.addr = sym->addr; | |
203 | } | |
204 | ||
205 | rval = KERN_SUCCESS; | |
206 | ||
207 | finish: | |
208 | return rval; | |
209 | } | |
210 | #endif /* KXLD_USER_OR_ILP32 */ | |
211 | ||
212 | #if KXLD_USER_OR_LP64 | |
213 | /******************************************************************************* | |
214 | *******************************************************************************/ | |
215 | kern_return_t | |
216 | kxld_vtable_init_from_link_state_64(KXLDVTable *vtable, u_char *file, | |
217 | KXLDVTableHdr *hdr) | |
218 | { | |
219 | kern_return_t rval = KERN_FAILURE; | |
220 | KXLDSymEntry64 *sym = NULL; | |
221 | KXLDVTableEntry *entry = NULL; | |
222 | u_int i = 0; | |
223 | ||
224 | check(vtable); | |
225 | check(file); | |
226 | check(hdr); | |
227 | ||
228 | vtable->name = (char *) (file + hdr->nameoff); | |
229 | vtable->is_patched = TRUE; | |
230 | ||
231 | rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), | |
232 | hdr->nentries); | |
233 | require_noerr(rval, finish); | |
234 | ||
235 | sym = (KXLDSymEntry64 *) (file + hdr->vtableoff); | |
236 | for (i = 0; i < vtable->entries.nitems; ++i, ++sym) { | |
237 | entry = kxld_array_get_item(&vtable->entries, i); | |
238 | entry->patched.name = (char *) (file + sym->nameoff); | |
239 | entry->patched.addr = sym->addr; | |
240 | } | |
241 | ||
242 | rval = KERN_SUCCESS; | |
243 | ||
244 | finish: | |
245 | return rval; | |
246 | } | |
247 | #endif /* KXLD_USER_OR_LP64 */ | |
248 | ||
249 | /******************************************************************************* | |
250 | *******************************************************************************/ | |
251 | kern_return_t | |
252 | kxld_vtable_copy(KXLDVTable *vtable, const KXLDVTable *src) | |
253 | { | |
254 | kern_return_t rval = KERN_FAILURE; | |
255 | ||
256 | check(vtable); | |
257 | check(src); | |
258 | ||
259 | vtable->vtable = src->vtable; | |
260 | vtable->name = src->name; | |
261 | vtable->is_patched = src->is_patched; | |
262 | ||
263 | rval = kxld_array_copy(&vtable->entries, &src->entries); | |
264 | require_noerr(rval, finish); | |
265 | ||
266 | rval = KERN_SUCCESS; | |
267 | ||
268 | finish: | |
269 | return rval; | |
270 | } | |
271 | ||
272 | /******************************************************************************* | |
273 | * Initializes a vtable object by matching up relocation entries to the vtable's | |
274 | * entries and finding the corresponding symbols. | |
275 | *******************************************************************************/ | |
276 | static kern_return_t | |
277 | init_by_relocs(KXLDVTable *vtable, const KXLDSym *sym, const KXLDSect *sect, | |
278 | const KXLDSymtab *symtab, const KXLDRelocator *relocator) | |
279 | { | |
280 | kern_return_t rval = KERN_FAILURE; | |
281 | KXLDReloc *reloc = NULL; | |
282 | KXLDVTableEntry *entry = NULL; | |
283 | KXLDSym *tmpsym = NULL; | |
284 | kxld_addr_t vtable_base_offset = 0; | |
285 | kxld_addr_t entry_offset = 0; | |
286 | u_int i = 0; | |
287 | u_int nentries = 0; | |
288 | u_int vtable_entry_size = 0; | |
289 | u_int base_reloc_index = 0; | |
290 | u_int reloc_index = 0; | |
291 | ||
292 | check(vtable); | |
293 | check(sym); | |
294 | check(sect); | |
295 | check(symtab); | |
296 | check(relocator); | |
297 | ||
298 | /* Find the first entry past the vtable padding */ | |
299 | ||
300 | vtable_base_offset = kxld_sym_get_section_offset(sym, sect); | |
301 | if (relocator->is_32_bit) { | |
302 | vtable_entry_size = VTABLE_ENTRY_SIZE_32; | |
303 | vtable_base_offset += VTABLE_HEADER_SIZE_32; | |
304 | } else { | |
305 | vtable_entry_size = VTABLE_ENTRY_SIZE_64; | |
306 | vtable_base_offset += VTABLE_HEADER_SIZE_64; | |
307 | } | |
308 | ||
309 | /* Find the relocation entry at the start of the vtable */ | |
310 | ||
311 | rval = kxld_reloc_get_reloc_index_by_offset(§->relocs, | |
312 | vtable_base_offset, &base_reloc_index); | |
313 | require_noerr(rval, finish); | |
314 | ||
315 | /* Count the number of consecutive relocation entries to find the number of | |
316 | * vtable entries. For some reason, the __TEXT,__const relocations are | |
317 | * sorted in descending order, so we have to walk backwards. Also, make | |
318 | * sure we don't run off the end of the section's relocs. | |
319 | */ | |
320 | ||
321 | reloc_index = base_reloc_index; | |
322 | entry_offset = vtable_base_offset; | |
323 | reloc = kxld_array_get_item(§->relocs, reloc_index); | |
324 | while (reloc->address == entry_offset) { | |
325 | ++nentries; | |
326 | if (!reloc_index) break; | |
327 | ||
328 | --reloc_index; | |
329 | ||
330 | reloc = kxld_array_get_item(§->relocs, reloc_index); | |
331 | entry_offset += vtable_entry_size; | |
332 | } | |
333 | ||
334 | /* Allocate the symbol index */ | |
335 | ||
336 | rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries); | |
337 | require_noerr(rval, finish); | |
338 | ||
339 | /* Find the symbols for each vtable entry */ | |
340 | ||
341 | for (i = 0; i < vtable->entries.nitems; ++i) { | |
342 | reloc = kxld_array_get_item(§->relocs, base_reloc_index - i); | |
343 | entry = kxld_array_get_item(&vtable->entries, i); | |
344 | ||
345 | /* If we can't find a symbol, it means it is a locally-defined, | |
346 | * non-external symbol that has been stripped. We don't patch over | |
347 | * locally-defined symbols, so we leave the symbol as NULL and just | |
348 | * skip it. We won't be able to patch subclasses with this symbol, | |
349 | * but there isn't much we can do about that. | |
350 | */ | |
351 | tmpsym = kxld_reloc_get_symbol(relocator, reloc, sect->data, symtab); | |
352 | ||
353 | entry->unpatched.sym = tmpsym; | |
354 | entry->unpatched.reloc = reloc; | |
355 | } | |
356 | ||
357 | rval = KERN_SUCCESS; | |
358 | finish: | |
359 | return rval; | |
360 | } | |
361 | ||
362 | /******************************************************************************* | |
363 | *******************************************************************************/ | |
364 | static kxld_addr_t | |
365 | get_entry_value(u_char *entry, const KXLDRelocator *relocator) | |
366 | { | |
367 | kxld_addr_t entry_value; | |
368 | ||
369 | if (relocator->is_32_bit) { | |
370 | entry_value = *(uint32_t *)entry; | |
371 | } else { | |
372 | entry_value = *(uint64_t *)entry; | |
373 | } | |
374 | ||
375 | return entry_value; | |
376 | } | |
377 | ||
378 | #if !KERNEL | |
379 | /******************************************************************************* | |
380 | *******************************************************************************/ | |
381 | static kxld_addr_t | |
382 | swap_entry_value(kxld_addr_t entry_value, const KXLDRelocator *relocator) | |
383 | { | |
384 | if (relocator->is_32_bit) { | |
385 | entry_value = OSSwapInt32((uint32_t) entry_value); | |
386 | } else { | |
387 | entry_value = OSSwapInt64((uint64_t) entry_value); | |
388 | } | |
389 | ||
390 | return entry_value; | |
391 | } | |
392 | #endif /* KERNEL */ | |
393 | ||
394 | /******************************************************************************* | |
395 | * Initializes a vtable object by reading the symbol values out of the vtable | |
396 | * entries and performing reverse symbol lookups on those values. | |
397 | *******************************************************************************/ | |
398 | static kern_return_t | |
399 | init_by_entries(KXLDVTable *vtable, const KXLDSymtab *symtab, | |
400 | const KXLDRelocator *relocator) | |
401 | { | |
402 | kern_return_t rval = KERN_FAILURE; | |
403 | KXLDVTableEntry *tmpentry = NULL; | |
404 | KXLDSym *sym = NULL; | |
405 | u_char *base_entry = NULL; | |
406 | u_char *entry = NULL; | |
407 | kxld_addr_t entry_value = 0; | |
408 | u_int vtable_entry_size = 0; | |
409 | u_int vtable_header_size = 0; | |
410 | u_int nentries = 0; | |
411 | u_int i = 0; | |
412 | ||
413 | if (relocator->is_32_bit) { | |
414 | vtable_entry_size = VTABLE_ENTRY_SIZE_32; | |
415 | vtable_header_size = VTABLE_HEADER_SIZE_32; | |
416 | } else { | |
417 | vtable_entry_size = VTABLE_ENTRY_SIZE_64; | |
418 | vtable_header_size = VTABLE_HEADER_SIZE_64; | |
419 | } | |
420 | ||
421 | base_entry = vtable->vtable + vtable_header_size; | |
422 | ||
423 | /* Count the number of entries (the vtable is null-terminated) */ | |
424 | ||
425 | entry = base_entry; | |
426 | entry_value = get_entry_value(entry, relocator); | |
427 | while (entry_value) { | |
428 | ++nentries; | |
429 | entry += vtable_entry_size; | |
430 | entry_value = get_entry_value(entry, relocator); | |
431 | } | |
432 | ||
433 | /* Allocate the symbol index */ | |
434 | ||
435 | rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries); | |
436 | require_noerr(rval, finish); | |
437 | ||
438 | /* Look up the symbols for each entry */ | |
439 | ||
440 | entry = base_entry; | |
441 | rval = KERN_SUCCESS; | |
442 | for (i = 0; i < vtable->entries.nitems; ++i) { | |
443 | entry = base_entry + (i * vtable_entry_size); | |
444 | entry_value = get_entry_value(entry, relocator); | |
445 | ||
446 | #if !KERNEL | |
447 | if (relocator->swap) { | |
448 | entry_value = swap_entry_value(entry_value, relocator); | |
449 | } | |
450 | #endif /* !KERNEL */ | |
451 | ||
452 | /* If we can't find the symbol, it means that the virtual function was | |
453 | * defined inline. There's not much I can do about this; it just means | |
454 | * I can't patch this function. | |
455 | */ | |
456 | tmpentry = kxld_array_get_item(&vtable->entries, i); | |
457 | sym = kxld_symtab_get_cxx_symbol_by_value(symtab, entry_value); | |
458 | ||
459 | if (sym) { | |
460 | tmpentry->patched.name = sym->name; | |
461 | tmpentry->patched.addr = sym->link_addr; | |
462 | } else { | |
463 | tmpentry->patched.name = NULL; | |
464 | tmpentry->patched.addr = 0; | |
465 | } | |
466 | } | |
467 | ||
468 | rval = KERN_SUCCESS; | |
469 | ||
470 | finish: | |
471 | return rval; | |
472 | } | |
473 | ||
474 | /******************************************************************************* | |
475 | * Initializes vtables by performing a reverse lookup on symbol values when | |
476 | * they exist in the vtable entry, and by looking through a matching relocation | |
477 | * entry when the vtable entry is NULL. | |
478 | * | |
479 | * Final linked images require this hybrid vtable initialization approach | |
480 | * because they are already internally resolved. This means that the vtables | |
481 | * contain valid entries to local symbols, but still have relocation entries for | |
482 | * external symbols. | |
483 | *******************************************************************************/ | |
484 | static kern_return_t | |
485 | init_by_entries_and_relocs(KXLDVTable *vtable, const KXLDSym *sym, | |
486 | const KXLDSymtab *symtab, const KXLDRelocator *relocator, | |
487 | const KXLDArray *relocs) | |
488 | { | |
489 | kern_return_t rval = KERN_FAILURE; | |
490 | KXLDReloc *reloc = NULL; | |
491 | KXLDVTableEntry *tmpentry = NULL; | |
492 | KXLDSym *tmpsym = NULL; | |
493 | u_int vtable_entry_size = 0; | |
494 | u_int vtable_header_size = 0; | |
495 | u_char *base_entry = NULL; | |
496 | u_char *entry = NULL; | |
497 | kxld_addr_t entry_value = 0; | |
498 | kxld_addr_t base_entry_offset = 0; | |
499 | kxld_addr_t entry_offset = 0; | |
500 | u_int nentries = 0; | |
501 | u_int i = 0; | |
502 | ||
503 | check(vtable); | |
504 | check(sym); | |
505 | check(symtab); | |
506 | check(relocs); | |
507 | ||
508 | /* Find the first entry and its offset past the vtable padding */ | |
509 | ||
510 | if (relocator->is_32_bit) { | |
511 | vtable_entry_size = VTABLE_ENTRY_SIZE_32; | |
512 | vtable_header_size = VTABLE_HEADER_SIZE_32; | |
513 | } else { | |
514 | vtable_entry_size = VTABLE_ENTRY_SIZE_64; | |
515 | vtable_header_size = VTABLE_HEADER_SIZE_64; | |
516 | } | |
517 | ||
518 | base_entry = vtable->vtable + vtable_header_size; | |
519 | ||
520 | base_entry_offset = sym->base_addr; | |
521 | base_entry_offset += vtable_header_size; | |
522 | ||
523 | /* In a final linked image, a vtable slot is valid if it is nonzero | |
524 | * (meaning the userspace linker has already resolved it, or if it has | |
525 | * a relocation entry. We'll know the end of the vtable when we find a | |
526 | * slot that meets neither of these conditions. | |
527 | */ | |
528 | entry = base_entry; | |
529 | entry_value = get_entry_value(entry, relocator); | |
530 | entry_offset = base_entry_offset; | |
531 | while (1) { | |
532 | entry_value = get_entry_value(entry, relocator); | |
533 | if (!entry_value) { | |
534 | reloc = kxld_reloc_get_reloc_by_offset(relocs, entry_offset); | |
535 | if (!reloc) break; | |
536 | } | |
537 | ||
538 | ++nentries; | |
539 | entry += vtable_entry_size; | |
540 | entry_offset += vtable_entry_size; | |
541 | } | |
542 | ||
543 | /* Allocate the symbol index */ | |
544 | ||
545 | rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries); | |
546 | require_noerr(rval, finish); | |
547 | ||
548 | /* Find the symbols for each vtable entry */ | |
549 | ||
550 | entry = base_entry; | |
551 | entry_value = get_entry_value(entry, relocator); | |
552 | entry_offset = base_entry_offset; | |
553 | for (i = 0; i < vtable->entries.nitems; ++i) { | |
554 | entry_value = get_entry_value(entry, relocator); | |
555 | ||
556 | /* If we can't find a symbol, it means it is a locally-defined, | |
557 | * non-external symbol that has been stripped. We don't patch over | |
558 | * locally-defined symbols, so we leave the symbol as NULL and just | |
559 | * skip it. We won't be able to patch subclasses with this symbol, | |
560 | * but there isn't much we can do about that. | |
561 | */ | |
562 | if (entry_value) { | |
563 | #if !KERNEL | |
564 | if (relocator->swap) { | |
565 | entry_value = swap_entry_value(entry_value, relocator); | |
566 | } | |
567 | #endif /* !KERNEL */ | |
568 | ||
569 | reloc = NULL; | |
570 | tmpsym = kxld_symtab_get_cxx_symbol_by_value(symtab, entry_value); | |
571 | } else { | |
572 | reloc = kxld_reloc_get_reloc_by_offset(relocs, entry_offset); | |
573 | require_action(reloc, finish, | |
574 | rval=KERN_FAILURE; | |
575 | kxld_log(kKxldLogPatching, kKxldLogErr, | |
576 | kKxldLogMalformedVTable, vtable->name)); | |
577 | ||
578 | tmpsym = kxld_reloc_get_symbol(relocator, reloc, | |
579 | /* data */ NULL, symtab); | |
580 | } | |
581 | ||
582 | tmpentry = kxld_array_get_item(&vtable->entries, i); | |
583 | tmpentry->unpatched.reloc = reloc; | |
584 | tmpentry->unpatched.sym = tmpsym; | |
585 | ||
586 | entry += vtable_entry_size; | |
587 | entry_offset += vtable_entry_size; | |
588 | } | |
589 | ||
590 | rval = KERN_SUCCESS; | |
591 | ||
592 | finish: | |
593 | return rval; | |
594 | } | |
595 | ||
596 | /******************************************************************************* | |
597 | *******************************************************************************/ | |
598 | void | |
599 | kxld_vtable_clear(KXLDVTable *vtable) | |
600 | { | |
601 | check(vtable); | |
602 | ||
603 | vtable->vtable = NULL; | |
604 | vtable->name = NULL; | |
605 | vtable->is_patched = FALSE; | |
606 | kxld_array_clear(&vtable->entries); | |
607 | } | |
608 | ||
609 | /******************************************************************************* | |
610 | *******************************************************************************/ | |
611 | void | |
612 | kxld_vtable_deinit(KXLDVTable *vtable) | |
613 | { | |
614 | check(vtable); | |
615 | ||
616 | kxld_array_deinit(&vtable->entries); | |
617 | bzero(vtable, sizeof(*vtable)); | |
618 | } | |
619 | ||
620 | /******************************************************************************* | |
621 | * Patching vtables allows us to preserve binary compatibility across releases. | |
622 | *******************************************************************************/ | |
623 | kern_return_t | |
624 | kxld_vtable_patch(KXLDVTable *vtable, const KXLDVTable *super_vtable, | |
625 | KXLDSymtab *symtab, boolean_t strict_patching __unused) | |
626 | { | |
627 | kern_return_t rval = KERN_FAILURE; | |
628 | KXLDVTableEntry *child_entry = NULL; | |
629 | KXLDVTableEntry *parent_entry = NULL; | |
630 | KXLDSym *sym = NULL; | |
631 | u_int symindex = 0; | |
632 | u_int i = 0; | |
633 | ||
634 | check(vtable); | |
635 | check(super_vtable); | |
636 | ||
637 | require_action(!vtable->is_patched, finish, rval=KERN_SUCCESS); | |
638 | require_action(vtable->entries.nitems >= super_vtable->entries.nitems, finish, | |
639 | rval=KERN_FAILURE; | |
640 | kxld_log(kKxldLogPatching, kKxldLogErr, | |
641 | kKxldLogMalformedVTable, vtable->name)); | |
642 | ||
643 | for (i = 0; i < super_vtable->entries.nitems; ++i) { | |
644 | child_entry = kxld_array_get_item(&vtable->entries, i); | |
645 | parent_entry = kxld_array_get_item(&super_vtable->entries, i); | |
646 | ||
647 | /* The child entry can be NULL when a locally-defined, non-external | |
648 | * symbol is stripped. We wouldn't patch this entry anyway, so we | |
649 | * just skip it. | |
650 | */ | |
651 | ||
652 | if (!child_entry->unpatched.sym) continue; | |
653 | ||
654 | /* It's possible for the patched parent entry not to have a symbol | |
655 | * (e.g. when the definition is inlined). We can't patch this entry no | |
656 | * matter what, so we'll just skip it and die later if it's a problem | |
657 | * (which is not likely). | |
658 | */ | |
659 | ||
660 | if (!parent_entry->patched.name) continue; | |
661 | ||
662 | /* 1) If the symbol is defined locally, do not patch */ | |
663 | ||
664 | if (kxld_sym_is_defined_locally(child_entry->unpatched.sym)) continue; | |
665 | ||
666 | /* 2) If the child is a pure virtual function, do not patch. | |
667 | * In general, we want to proceed with patching when the symbol is | |
668 | * externally defined because pad slots fall into this category. | |
669 | * The pure virtual function symbol is special case, as the pure | |
670 | * virtual property itself overrides the parent's implementation. | |
671 | */ | |
672 | ||
673 | if (kxld_sym_is_pure_virtual(child_entry->unpatched.sym)) continue; | |
674 | ||
675 | /* 3) If the symbols are the same, do not patch */ | |
676 | ||
677 | if (streq(child_entry->unpatched.sym->name, | |
678 | parent_entry->patched.name)) | |
679 | { | |
680 | continue; | |
681 | } | |
682 | ||
683 | /* 4) If the parent vtable entry is a pad slot, and the child does not | |
684 | * match it, then the child was built against a newer version of the | |
685 | * libraries, so it is binary-incompatible. | |
686 | */ | |
687 | ||
688 | require_action(!kxld_sym_name_is_padslot(parent_entry->patched.name), | |
689 | finish, rval=KERN_FAILURE; | |
690 | kxld_log(kKxldLogPatching, kKxldLogErr, | |
691 | kKxldLogParentOutOfDate, super_vtable->name, vtable->name)); | |
692 | ||
693 | #if KXLD_USER_OR_STRICT_PATCHING | |
694 | /* 5) If we are doing strict patching, we prevent kexts from declaring | |
695 | * virtual functions and not implementing them. We can tell if a | |
696 | * virtual function is declared but not implemented because we resolve | |
697 | * symbols before patching; an unimplemented function will still be | |
698 | * undefined at this point. We then look at whether the symbol has | |
699 | * the same class prefix as the vtable. If it does, the symbol was | |
700 | * declared as part of the class and not inherited, which means we | |
701 | * should not patch it. | |
702 | */ | |
703 | ||
704 | if (strict_patching && !kxld_sym_is_defined(child_entry->unpatched.sym)) | |
705 | { | |
706 | char class_name[KXLD_MAX_NAME_LEN]; | |
707 | char function_prefix[KXLD_MAX_NAME_LEN]; | |
708 | u_long function_prefix_len = 0; | |
709 | ||
710 | rval = kxld_sym_get_class_name_from_vtable_name(vtable->name, | |
711 | class_name, sizeof(class_name)); | |
712 | require_noerr(rval, finish); | |
713 | ||
714 | function_prefix_len = | |
715 | kxld_sym_get_function_prefix_from_class_name(class_name, | |
716 | function_prefix, sizeof(function_prefix)); | |
717 | require(function_prefix_len, finish); | |
718 | ||
719 | if (!strncmp(child_entry->unpatched.sym->name, | |
720 | function_prefix, function_prefix_len)) | |
721 | { | |
722 | continue; | |
723 | } | |
724 | } | |
725 | #endif /* KXLD_USER_OR_STRICT_PATCHING */ | |
726 | ||
727 | /* 6) The child symbol is unresolved and different from its parent, so | |
728 | * we need to patch it up. We do this by modifying the relocation | |
729 | * entry of the vtable entry to point to the symbol of the parent | |
730 | * vtable entry. If that symbol does not exist (i.e. we got the data | |
731 | * from a link state object's vtable representation), then we create a | |
732 | * new symbol in the symbol table and point the relocation entry to | |
733 | * that. | |
734 | */ | |
735 | ||
736 | sym = kxld_symtab_get_symbol_by_name(symtab, parent_entry->patched.name); | |
737 | if (!sym) { | |
738 | rval = kxld_symtab_add_symbol(symtab, parent_entry->patched.name, | |
739 | parent_entry->patched.addr, &sym); | |
740 | require_noerr(rval, finish); | |
741 | } | |
742 | require_action(sym, finish, rval=KERN_FAILURE); | |
743 | ||
744 | rval = kxld_symtab_get_sym_index(symtab, sym, &symindex); | |
745 | require_noerr(rval, finish); | |
746 | ||
747 | rval = kxld_reloc_update_symindex(child_entry->unpatched.reloc, symindex); | |
748 | require_noerr(rval, finish); | |
749 | ||
750 | kxld_log(kKxldLogPatching, kKxldLogDetail, | |
751 | "In vtable %s, patching %s with %s.", | |
752 | vtable->name, child_entry->unpatched.sym->name, sym->name); | |
753 | ||
754 | kxld_sym_patch(child_entry->unpatched.sym); | |
755 | child_entry->unpatched.sym = sym; | |
756 | } | |
757 | ||
758 | /* Change the vtable representation from the unpatched layout to the | |
759 | * patched layout. | |
760 | */ | |
761 | for (i = 0; i < vtable->entries.nitems; ++i) { | |
762 | char *name; | |
763 | kxld_addr_t addr; | |
764 | ||
765 | child_entry = kxld_array_get_item(&vtable->entries, i); | |
766 | if (child_entry->unpatched.sym) { | |
767 | name = child_entry->unpatched.sym->name; | |
768 | addr = child_entry->unpatched.sym->link_addr; | |
769 | } else { | |
770 | name = NULL; | |
771 | addr = 0; | |
772 | } | |
773 | ||
774 | child_entry->patched.name = name; | |
775 | child_entry->patched.addr = addr; | |
776 | } | |
777 | ||
778 | vtable->is_patched = TRUE; | |
779 | rval = KERN_SUCCESS; | |
780 | ||
781 | finish: | |
782 | return rval; | |
783 | } | |
784 |