]> git.saurik.com Git - apple/xnu.git/blob - libkern/kxld/kxld_util.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / libkern / kxld / kxld_util.c
1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <stdarg.h>
29 #include <string.h>
30 #include <mach-o/loader.h>
31 #include <mach-o/nlist.h>
32 #include <mach-o/reloc.h>
33 #if KERNEL
34 #include <kern/kalloc.h>
35 #include <libkern/libkern.h>
36 #include <mach/vm_param.h>
37 #include <vm/vm_kern.h>
38 #else
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <mach/mach_init.h>
42 #include <mach-o/swap.h>
43 #endif
44
45 #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
46 #include <AssertMacros.h>
47
48 #include "kxld_util.h"
49
50 #if !KERNEL
51 static void unswap_macho_32(u_char *file, enum NXByteOrder host_order,
52 enum NXByteOrder target_order);
53 static void unswap_macho_64(u_char *file, enum NXByteOrder host_order,
54 enum NXByteOrder target_order);
55 #endif /* !KERNEL */
56
57 #if DEBUG
58 static unsigned long num_allocations = 0;
59 static unsigned long num_frees = 0;
60 static unsigned long bytes_allocated = 0;
61 static unsigned long bytes_freed = 0;
62 #endif
63
64 static KXLDLoggingCallback s_logging_callback = NULL;
65 static char s_callback_name[64] = "internal";
66 static void *s_callback_data = NULL;
67
68 #if !KERNEL
69 static boolean_t s_cross_link_enabled = FALSE;
70 static kxld_size_t s_cross_link_page_size = PAGE_SIZE;
71 #endif
72
73
74 /*******************************************************************************
75 *******************************************************************************/
76 void
77 kxld_set_logging_callback(KXLDLoggingCallback logging_callback)
78 {
79 s_logging_callback = logging_callback;
80 }
81
82 /*******************************************************************************
83 *******************************************************************************/
84 void
85 kxld_set_logging_callback_data(const char *name, void *user_data)
86 {
87 if (name) {
88 (void)strlcpy(s_callback_name, name, sizeof(s_callback_name));
89 /* disallow format strings in the kxld logging callback name */
90 for (size_t i = 0; i < sizeof(s_callback_name); i++) {
91 if (s_callback_name[i] == '%') {
92 s_callback_name[i] = '.';
93 }
94 }
95 } else {
96 (void)strlcpy(s_callback_name, "internal", sizeof(s_callback_name));
97 }
98
99 s_callback_data = user_data;
100 }
101
102 /*******************************************************************************
103 *******************************************************************************/
104 void
105 kxld_log(KXLDLogSubsystem subsystem, KXLDLogLevel level,
106 const char *in_format, ...)
107 {
108 char stack_buffer[256];
109 char *alloc_buffer = NULL;
110 char *format = stack_buffer;
111 u_int length = 0;
112 va_list ap;
113
114 if (s_logging_callback) {
115 length = snprintf(stack_buffer, sizeof(stack_buffer), "kxld[%s]: %s",
116 s_callback_name, in_format);
117
118 if (length >= sizeof(stack_buffer)) {
119 length += 1;
120 alloc_buffer = kxld_alloc(length);
121 if (!alloc_buffer) {
122 return;
123 }
124
125 snprintf(alloc_buffer, length, "kxld[%s]: %s",
126 s_callback_name, in_format);
127 format = alloc_buffer;
128 }
129
130 va_start(ap, in_format);
131 s_logging_callback(subsystem, level, format, ap, s_callback_data);
132 va_end(ap);
133
134 if (alloc_buffer) {
135 kxld_free(alloc_buffer, length);
136 }
137 }
138 }
139
140 /* We'll use kalloc for any page-based allocations under this threshold, and
141 * kmem_alloc otherwise.
142 */
143 #define KALLOC_MAX 16 * 1024
144
145 /*******************************************************************************
146 *******************************************************************************/
147 void *
148 kxld_alloc(size_t size)
149 {
150 void * ptr = NULL;
151
152 #if KERNEL
153 ptr = kalloc(size);
154 #else
155 ptr = malloc(size);
156 #endif
157
158 #if DEBUG
159 if (ptr) {
160 ++num_allocations;
161 bytes_allocated += size;
162 }
163 #endif
164
165 return ptr;
166 }
167
168 /*******************************************************************************
169 *******************************************************************************/
170 void *
171 kxld_page_alloc_untracked(size_t size)
172 {
173 void * ptr = NULL;
174 #if KERNEL
175 kern_return_t rval = 0;
176 vm_offset_t addr = 0;
177 #endif /* KERNEL */
178
179 size = round_page(size);
180
181 #if KERNEL
182 if (size < KALLOC_MAX) {
183 ptr = kalloc(size);
184 } else {
185 rval = kmem_alloc(kernel_map, &addr, size, VM_KERN_MEMORY_OSKEXT);
186 if (!rval) {
187 ptr = (void *) addr;
188 }
189 }
190 #else /* !KERNEL */
191 ptr = malloc(size);
192 #endif /* KERNEL */
193
194 return ptr;
195 }
196
197 /*******************************************************************************
198 *******************************************************************************/
199 void *
200 kxld_page_alloc(size_t size)
201 {
202 void * ptr = NULL;
203
204 ptr = kxld_page_alloc_untracked(size);
205 #if DEBUG
206 if (ptr) {
207 ++num_allocations;
208 bytes_allocated += round_page(size);
209 }
210 #endif /* DEBUG */
211
212 return ptr;
213 }
214
215 /*******************************************************************************
216 *******************************************************************************/
217 void *
218 kxld_alloc_pageable(size_t size)
219 {
220 size = round_page(size);
221
222 #if KERNEL
223 kern_return_t rval = 0;
224 vm_offset_t ptr = 0;
225
226 rval = kmem_alloc_pageable(kernel_map, &ptr, size, VM_KERN_MEMORY_OSKEXT);
227 if (rval) {
228 ptr = 0;
229 }
230
231 return (void *) ptr;
232 #else
233 return kxld_page_alloc_untracked(size);
234 #endif
235 }
236
237 /*******************************************************************************
238 *******************************************************************************/
239 void
240 kxld_free(void *ptr, size_t size __unused)
241 {
242 #if DEBUG
243 ++num_frees;
244 bytes_freed += size;
245 #endif
246
247 #if KERNEL
248 kfree(ptr, size);
249 #else
250 free(ptr);
251 #endif
252 }
253
254 /*******************************************************************************
255 *******************************************************************************/
256 void
257 kxld_page_free_untracked(void *ptr, size_t size __unused)
258 {
259 #if KERNEL
260 size = round_page(size);
261
262 if (size < KALLOC_MAX) {
263 kfree(ptr, size);
264 } else {
265 kmem_free(kernel_map, (vm_offset_t) ptr, size);
266 }
267 #else /* !KERNEL */
268 free(ptr);
269 #endif /* KERNEL */
270 }
271
272
273 /*******************************************************************************
274 *******************************************************************************/
275 void
276 kxld_page_free(void *ptr, size_t size)
277 {
278 #if DEBUG
279 ++num_frees;
280 bytes_freed += round_page(size);
281 #endif /* DEBUG */
282 kxld_page_free_untracked(ptr, size);
283 }
284
285 /*******************************************************************************
286 *******************************************************************************/
287 kern_return_t
288 validate_and_swap_macho_32(u_char *file, u_long size
289 #if !KERNEL
290 , enum NXByteOrder host_order
291 #endif /* !KERNEL */
292 )
293 {
294 kern_return_t rval = KERN_FAILURE;
295 struct mach_header *mach_hdr = (struct mach_header *) ((void *) file);
296 struct load_command *load_hdr = NULL;
297 struct segment_command *seg_hdr = NULL;
298 struct section *sects = NULL;
299 struct relocation_info *relocs = NULL;
300 struct symtab_command *symtab_hdr = NULL;
301 struct nlist *symtab = NULL;
302 u_long offset = 0;
303 u_int cmd = 0;
304 u_int cmdsize = 0;
305 u_int i = 0;
306 u_int j = 0;
307 #if !KERNEL
308 boolean_t swap = FALSE;
309 #endif /* !KERNEL */
310
311 check(file);
312 check(size);
313
314 /* Verify that the file is big enough for the mach header */
315 require_action(size >= sizeof(*mach_hdr), finish,
316 rval = KERN_FAILURE;
317 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
318 offset = sizeof(*mach_hdr);
319
320 #if !KERNEL
321 /* Swap the mach header if necessary */
322 if (mach_hdr->magic == MH_CIGAM) {
323 swap = TRUE;
324 (void) swap_mach_header(mach_hdr, host_order);
325 }
326 #endif /* !KERNEL */
327
328 /* Validate the mach_header's magic number */
329 require_action(mach_hdr->magic == MH_MAGIC, finish,
330 rval = KERN_FAILURE;
331 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
332 "Invalid magic number: 0x%x.", mach_hdr->magic));
333
334 /* If in the running kernel, and asked to validate the kernel
335 * (which is the only file of type MH_EXECUTE we should ever see),
336 * then just assume it's ok or we wouldn't be running to begin with.
337 */
338 #if KERNEL
339 if (mach_hdr->filetype == MH_EXECUTE) {
340 rval = KERN_SUCCESS;
341 goto finish;
342 }
343 #endif /* KERNEL */
344
345 /* Validate and potentially swap the load commands */
346 for (i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) {
347 /* Get the load command and size */
348 load_hdr = (struct load_command *) ((void *) (file + offset));
349 cmd = load_hdr->cmd;
350 cmdsize = load_hdr->cmdsize;
351
352 #if !KERNEL
353 if (swap) {
354 cmd = OSSwapInt32(load_hdr->cmd);
355 cmdsize = OSSwapInt32(load_hdr->cmdsize);
356 }
357 #endif /* !KERNEL */
358
359 /* Verify that the file is big enough to contain the load command */
360 require_action(size >= offset + cmdsize, finish,
361 rval = KERN_FAILURE;
362 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
363
364 switch (cmd) {
365 case LC_SEGMENT:
366 /* Get and swap the segment header */
367 seg_hdr = (struct segment_command *) load_hdr;
368 #if !KERNEL
369 if (swap) {
370 swap_segment_command(seg_hdr, host_order);
371 }
372 #endif /* !KERNEL */
373
374 /* Get and swap the section headers */
375 sects = (struct section *) &seg_hdr[1];
376 #if !KERNEL
377 if (swap) {
378 swap_section(sects, seg_hdr->nsects, host_order);
379 }
380 #endif /* !KERNEL */
381
382 /* Ignore segments with no vm size */
383 if (!seg_hdr->vmsize) {
384 continue;
385 }
386
387 /* Verify that the file is big enough for the segment data. */
388 require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish,
389 rval = KERN_FAILURE;
390 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
391
392 for (j = 0; j < seg_hdr->nsects; ++j) {
393 /* Verify that, if the section is not to be zero filled on
394 * demand, that file is big enough for the section's data.
395 */
396 require_action((sects[j].flags & S_ZEROFILL) ||
397 (size >= sects[j].offset + sects[j].size), finish,
398 rval = KERN_FAILURE;
399 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
400
401 /* Verify that the file is big enough for the section's
402 * relocation entries.
403 */
404 require_action(size >=
405 sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish,
406 rval = KERN_FAILURE;
407 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
408
409 /* Swap the relocation entries */
410 relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff));
411 #if !KERNEL
412 if (swap) {
413 swap_relocation_info(relocs, sects[j].nreloc,
414 host_order);
415 }
416 #endif /* !KERNEL */
417 }
418
419 break;
420 case LC_SYMTAB:
421 /* Get and swap the symtab header */
422 symtab_hdr = (struct symtab_command *) load_hdr;
423 #if !KERNEL
424 if (swap) {
425 swap_symtab_command(symtab_hdr, host_order);
426 }
427 #endif /* !KERNEL */
428
429 /* Verify that the file is big enough for the symbol table */
430 require_action(size >=
431 symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish,
432 rval = KERN_FAILURE;
433 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
434
435 /* Verify that the file is big enough for the string table */
436 require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish,
437 rval = KERN_FAILURE;
438 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
439
440 #if !KERNEL
441 /* Swap the symbol table entries */
442 symtab = (struct nlist *) ((void *) (file + symtab_hdr->symoff));
443 if (swap) {
444 swap_nlist(symtab, symtab_hdr->nsyms, host_order);
445 }
446 #endif /* !KERNEL */
447
448 break;
449 default:
450 #if !KERNEL
451 /* Swap the load command */
452 if (swap) {
453 swap_load_command(load_hdr, host_order);
454 }
455 #endif /* !KERNEL */
456 break;
457 }
458 }
459
460 rval = KERN_SUCCESS;
461
462 finish:
463 return rval;
464 }
465
466 /*******************************************************************************
467 *******************************************************************************/
468 kern_return_t
469 validate_and_swap_macho_64(u_char *file, u_long size
470 #if !KERNEL
471 , enum NXByteOrder host_order
472 #endif /* !KERNEL */
473 )
474 {
475 kern_return_t rval = KERN_FAILURE;
476 struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file);
477 struct load_command *load_hdr = NULL;
478 struct segment_command_64 *seg_hdr = NULL;
479 struct section_64 *sects = NULL;
480 struct relocation_info *relocs = NULL;
481 struct symtab_command *symtab_hdr = NULL;
482 struct nlist_64 *symtab = NULL;
483 u_long offset = 0;
484 u_int cmd = 0;
485 u_int cmdsize = 0;
486 u_int i = 0;
487 u_int j = 0;
488 #if !KERNEL
489 boolean_t swap = FALSE;
490 #endif /* !KERNEL */
491
492 check(file);
493 check(size);
494
495 /* Verify that the file is big enough for the mach header */
496 require_action(size >= sizeof(*mach_hdr), finish,
497 rval = KERN_FAILURE;
498 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
499 offset = sizeof(*mach_hdr);
500
501 #if !KERNEL
502 /* Swap the mach header if necessary */
503 if (mach_hdr->magic == MH_CIGAM_64) {
504 swap = TRUE;
505 (void) swap_mach_header_64(mach_hdr, host_order);
506 }
507 #endif /* !KERNEL */
508
509 /* Validate the mach_header's magic number */
510 require_action(mach_hdr->magic == MH_MAGIC_64, finish,
511 rval = KERN_FAILURE;
512 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
513 "Invalid magic number: 0x%x.", mach_hdr->magic));
514
515 /* If in the running kernel, and asked to validate the kernel
516 * (which is the only file of type MH_EXECUTE we should ever see),
517 * then just assume it's ok or we wouldn't be running to begin with.
518 */
519 #if KERNEL
520 if (mach_hdr->filetype == MH_EXECUTE) {
521 rval = KERN_SUCCESS;
522 goto finish;
523 }
524 #endif /* KERNEL */
525
526 /* Validate and potentially swap the load commands */
527 for (i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) {
528 /* Get the load command and size */
529 load_hdr = (struct load_command *) ((void *) (file + offset));
530 cmd = load_hdr->cmd;
531 cmdsize = load_hdr->cmdsize;
532
533 #if !KERNEL
534 if (swap) {
535 cmd = OSSwapInt32(load_hdr->cmd);
536 cmdsize = OSSwapInt32(load_hdr->cmdsize);
537 }
538 #endif /* !KERNEL */
539
540 /* Verify that the file is big enough to contain the load command */
541 require_action(size >= offset + cmdsize, finish,
542 rval = KERN_FAILURE;
543 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
544 switch (cmd) {
545 case LC_SEGMENT_64:
546 /* Get and swap the segment header */
547 seg_hdr = (struct segment_command_64 *) ((void *) load_hdr);
548 #if !KERNEL
549 if (swap) {
550 swap_segment_command_64(seg_hdr, host_order);
551 }
552 #endif /* !KERNEL */
553
554 /* Get and swap the section headers */
555 sects = (struct section_64 *) &seg_hdr[1];
556 #if !KERNEL
557 if (swap) {
558 swap_section_64(sects, seg_hdr->nsects, host_order);
559 }
560 #endif /* !KERNEL */
561
562 /* If the segment has no vm footprint, skip it */
563 if (!seg_hdr->vmsize) {
564 continue;
565 }
566
567 /* Verify that the file is big enough for the segment data. */
568 require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish,
569 rval = KERN_FAILURE;
570 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
571
572 for (j = 0; j < seg_hdr->nsects; ++j) {
573 /* Verify that, if the section is not to be zero filled on
574 * demand, that file is big enough for the section's data.
575 */
576 require_action((sects[j].flags & S_ZEROFILL) ||
577 (size >= sects[j].offset + sects[j].size), finish,
578 rval = KERN_FAILURE;
579 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
580
581 /* Verify that the file is big enough for the section's
582 * relocation entries.
583 */
584 require_action(size >=
585 sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish,
586 rval = KERN_FAILURE;
587 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
588
589 /* Swap the relocation entries */
590 relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff));
591 #if !KERNEL
592 if (swap) {
593 swap_relocation_info(relocs, sects[j].nreloc,
594 host_order);
595 }
596 #endif /* !KERNEL */
597 }
598
599 break;
600 case LC_SYMTAB:
601 /* Get and swap the symtab header */
602 symtab_hdr = (struct symtab_command *) load_hdr;
603 #if !KERNEL
604 if (swap) {
605 swap_symtab_command(symtab_hdr, host_order);
606 }
607 #endif /* !KERNEL */
608
609 /* Verify that the file is big enough for the symbol table */
610 require_action(size >=
611 symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish,
612 rval = KERN_FAILURE;
613 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
614
615 /* Verify that the file is big enough for the string table */
616 require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish,
617 rval = KERN_FAILURE;
618 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
619
620 #if !KERNEL
621 /* Swap the symbol table entries */
622 symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff));
623 if (swap) {
624 swap_nlist_64(symtab, symtab_hdr->nsyms, host_order);
625 }
626 #endif /* !KERNEL */
627
628 break;
629 default:
630 #if !KERNEL
631 /* Swap the load command */
632 if (swap) {
633 swap_load_command(load_hdr, host_order);
634 }
635 #endif /* !KERNEL */
636 break;
637 }
638 }
639
640 rval = KERN_SUCCESS;
641
642 finish:
643 return rval;
644 }
645
646 #if !KERNEL
647 /*******************************************************************************
648 *******************************************************************************/
649 void
650 unswap_macho(u_char *file, enum NXByteOrder host_order,
651 enum NXByteOrder target_order)
652 {
653 struct mach_header *hdr = (struct mach_header *) ((void *) file);
654
655 if (!hdr) {
656 return;
657 }
658
659 if (hdr->magic == MH_MAGIC) {
660 unswap_macho_32(file, host_order, target_order);
661 } else if (hdr->magic == MH_MAGIC_64) {
662 unswap_macho_64(file, host_order, target_order);
663 }
664 }
665
666 /*******************************************************************************
667 *******************************************************************************/
668 static void
669 unswap_macho_32(u_char *file, enum NXByteOrder host_order,
670 enum NXByteOrder target_order)
671 {
672 struct mach_header *mach_hdr = (struct mach_header *) ((void *) file);
673 struct load_command *load_hdr = NULL;
674 struct segment_command *seg_hdr = NULL;
675 struct section *sects = NULL;
676 struct symtab_command *symtab_hdr = NULL;
677 struct nlist *symtab = NULL;
678 u_long offset = 0;
679 u_int cmd = 0;
680 u_int size = 0;
681 u_int i = 0;
682
683 check(file);
684
685 if (target_order == host_order) {
686 return;
687 }
688
689 offset = sizeof(*mach_hdr);
690 for (i = 0; i < mach_hdr->ncmds; ++i, offset += size) {
691 load_hdr = (struct load_command *) ((void *) (file + offset));
692 cmd = load_hdr->cmd;
693 size = load_hdr->cmdsize;
694
695 switch (cmd) {
696 case LC_SEGMENT:
697 seg_hdr = (struct segment_command *) load_hdr;
698 sects = (struct section *) &seg_hdr[1];
699
700 /* We don't need to unswap relocations because this function is
701 * called when linking is completed (so there are no relocations).
702 */
703
704 swap_section(sects, seg_hdr->nsects, target_order);
705 swap_segment_command(seg_hdr, target_order);
706 break;
707 case LC_SYMTAB:
708 symtab_hdr = (struct symtab_command *) load_hdr;
709 symtab = (struct nlist*) ((void *) (file + symtab_hdr->symoff));
710
711 swap_nlist(symtab, symtab_hdr->nsyms, target_order);
712 swap_symtab_command(symtab_hdr, target_order);
713
714 break;
715 default:
716 swap_load_command(load_hdr, target_order);
717 break;
718 }
719 }
720
721 (void) swap_mach_header(mach_hdr, target_order);
722 }
723
724 /*******************************************************************************
725 *******************************************************************************/
726 static void
727 unswap_macho_64(u_char *file, enum NXByteOrder host_order,
728 enum NXByteOrder target_order)
729 {
730 struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file);
731 struct load_command *load_hdr = NULL;
732 struct segment_command_64 *seg_hdr = NULL;
733 struct section_64 *sects = NULL;
734 struct symtab_command *symtab_hdr = NULL;
735 struct nlist_64 *symtab = NULL;
736 u_long offset = 0;
737 u_int cmd = 0;
738 u_int size = 0;
739 u_int i = 0;
740
741 check(file);
742
743 if (target_order == host_order) {
744 return;
745 }
746
747 offset = sizeof(*mach_hdr);
748 for (i = 0; i < mach_hdr->ncmds; ++i, offset += size) {
749 load_hdr = (struct load_command *) ((void *) (file + offset));
750 cmd = load_hdr->cmd;
751 size = load_hdr->cmdsize;
752
753 switch (cmd) {
754 case LC_SEGMENT_64:
755 seg_hdr = (struct segment_command_64 *) ((void *) load_hdr);
756 sects = (struct section_64 *) &seg_hdr[1];
757
758 /* We don't need to unswap relocations because this function is
759 * called when linking is completed (so there are no relocations).
760 */
761
762 swap_section_64(sects, seg_hdr->nsects, target_order);
763 swap_segment_command_64(seg_hdr, target_order);
764 break;
765 case LC_SYMTAB:
766 symtab_hdr = (struct symtab_command *) load_hdr;
767 symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff));
768
769 swap_nlist_64(symtab, symtab_hdr->nsyms, target_order);
770 swap_symtab_command(symtab_hdr, target_order);
771
772 break;
773 default:
774 swap_load_command(load_hdr, target_order);
775 break;
776 }
777 }
778
779 (void) swap_mach_header_64(mach_hdr, target_order);
780 }
781 #endif /* !KERNEL */
782
783 /*******************************************************************************
784 *******************************************************************************/
785 kxld_addr_t
786 kxld_align_address(kxld_addr_t address, u_int align)
787 {
788 kxld_addr_t alignment = (1 << align);
789 kxld_addr_t low_bits = 0;
790
791 if (!align) {
792 return address;
793 }
794
795 low_bits = (address) & (alignment - 1);
796 if (low_bits) {
797 address += (alignment - low_bits);
798 }
799
800 return address;
801 }
802
803 /*******************************************************************************
804 *******************************************************************************/
805 boolean_t
806 kxld_is_32_bit(cpu_type_t cputype)
807 {
808 return !(cputype & CPU_ARCH_ABI64);
809 }
810
811 /*******************************************************************************
812 * Borrowed (and slightly modified) the libc implementation for the kernel
813 * until the kernel has a supported strstr().
814 * Find the first occurrence of find in s.
815 *******************************************************************************/
816 const char *
817 kxld_strstr(const char *s, const char *find)
818 {
819 #if KERNEL
820 char c, sc;
821 size_t len;
822 if (!s || !find) {
823 return s;
824 }
825 if ((c = *find++) != 0) {
826 len = strlen(find);
827 do {
828 do {
829 if ((sc = *s++) == 0) {
830 return NULL;
831 }
832 } while (sc != c);
833 } while (strncmp(s, find, len) != 0);
834 s--;
835 }
836 return s;
837 #else
838 return strstr(s, find);
839 #endif /* KERNEL */
840 }
841
842 /*******************************************************************************
843 *******************************************************************************/
844 void
845 kxld_print_memory_report(void)
846 {
847 #if DEBUG
848 kxld_log(kKxldLogLinking, kKxldLogExplicit, "kxld memory usage report:\n"
849 "\tNumber of allocations: %8lu\n"
850 "\tNumber of frees: %8lu\n"
851 "\tAverage allocation size: %8lu\n"
852 "\tTotal bytes allocated: %8lu\n"
853 "\tTotal bytes freed: %8lu\n"
854 "\tTotal bytes leaked: %8lu",
855 num_allocations, num_frees, bytes_allocated / num_allocations,
856 bytes_allocated, bytes_freed, bytes_allocated - bytes_freed);
857 #endif
858 }
859
860 /*********************************************************************
861 *********************************************************************/
862 #if !KERNEL
863 boolean_t
864 kxld_set_cross_link_page_size(kxld_size_t target_page_size)
865 {
866 // verify radix 2
867 if ((target_page_size != 0) &&
868 ((target_page_size & (target_page_size - 1)) == 0)) {
869 s_cross_link_enabled = TRUE;
870 s_cross_link_page_size = target_page_size;
871
872 return TRUE;
873 } else {
874 return FALSE;
875 }
876 }
877 #endif /* !KERNEL */
878
879 /*********************************************************************
880 *********************************************************************/
881 kxld_size_t
882 kxld_get_effective_page_size(void)
883 {
884 #if KERNEL
885 return PAGE_SIZE;
886 #else
887 if (s_cross_link_enabled) {
888 return s_cross_link_page_size;
889 } else {
890 return PAGE_SIZE;
891 }
892 #endif /* KERNEL */
893 }
894
895 /*********************************************************************
896 *********************************************************************/
897 kxld_addr_t
898 kxld_round_page_cross_safe(kxld_addr_t offset)
899 {
900 #if KERNEL
901 return round_page(offset);
902 #else
903 // assume s_cross_link_page_size is power of 2
904 if (s_cross_link_enabled) {
905 return (offset + (s_cross_link_page_size - 1)) &
906 (~(s_cross_link_page_size - 1));
907 } else {
908 return round_page(offset);
909 }
910 #endif /* KERNEL */
911 }
912
913 #if SPLIT_KEXTS_DEBUG
914
915 void
916 kxld_show_split_info(splitKextLinkInfo *info)
917 {
918 kxld_log(kKxldLogLinking, kKxldLogErr,
919 "splitKextLinkInfo: \n"
920 "kextExecutable %p to %p kextSize %lu \n"
921 "linkedKext %p to %p linkedKextSize %lu \n"
922 "vmaddr_TEXT %p vmaddr_TEXT_EXEC %p "
923 "vmaddr_DATA %p vmaddr_DATA_CONST %p "
924 "vmaddr_LLVM_COV %p vmaddr_LINKEDIT %p",
925 (void *) info->kextExecutable,
926 (void *) (info->kextExecutable + info->kextSize),
927 info->kextSize,
928 (void*) info->linkedKext,
929 (void*) (info->linkedKext + info->linkedKextSize),
930 info->linkedKextSize,
931 (void *) info->vmaddr_TEXT,
932 (void *) info->vmaddr_TEXT_EXEC,
933 (void *) info->vmaddr_DATA,
934 (void *) info->vmaddr_DATA_CONST,
935 (void *) info->vmaddr_LLVM_COV,
936 (void *) info->vmaddr_LINKEDIT);
937 }
938
939 boolean_t
940 isTargetKextName(const char * the_name)
941 {
942 if (the_name && 0 == strcmp(the_name, KXLD_TARGET_KEXT)) {
943 return TRUE;
944 }
945 return FALSE;
946 }
947 #endif