]> git.saurik.com Git - apple/xnu.git/blob - libkern/kxld/kxld_util.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / libkern / kxld / kxld_util.c
1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <stdarg.h>
29 #include <string.h>
30 #include <mach-o/loader.h>
31 #include <mach-o/nlist.h>
32 #include <mach-o/reloc.h>
33 #if KERNEL
34 #include <kern/kalloc.h>
35 #include <libkern/libkern.h>
36 #include <mach/vm_param.h>
37 #include <vm/vm_kern.h>
38 #else
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <mach/mach_init.h>
42 #include <mach-o/swap.h>
43 #endif
44
45 #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
46 #include <AssertMacros.h>
47
48 #include "kxld_util.h"
49
50 #if !KERNEL
51 static void unswap_macho_32(u_char *file, enum NXByteOrder host_order,
52 enum NXByteOrder target_order);
53 static void unswap_macho_64(u_char *file, enum NXByteOrder host_order,
54 enum NXByteOrder target_order);
55 #endif /* !KERNEL */
56
57 #if DEBUG
58 static unsigned long num_allocations = 0;
59 static unsigned long num_frees = 0;
60 static unsigned long bytes_allocated = 0;
61 static unsigned long bytes_freed = 0;
62 #endif
63
64 static KXLDLoggingCallback s_logging_callback = NULL;
65 static char s_callback_name[64] = "internal";
66 static void *s_callback_data = NULL;
67
68 #if !KERNEL
69 static boolean_t s_cross_link_enabled = FALSE;
70 /* Can't use PAGE_SIZE here because it is not a compile-time constant.
71 * However from inspection below, s_cross_link_page_size is not used
72 * unless s_cross_link_enabled is TRUE, and s_cross_link_enabled is
73 * only set to TRUE when a client specifies the value. So the
74 * default should never be used in practice,
75 */
76 static kxld_size_t s_cross_link_page_size;
77 #endif
78
79
80 /*******************************************************************************
81 *******************************************************************************/
82 void
83 kxld_set_logging_callback(KXLDLoggingCallback logging_callback)
84 {
85 s_logging_callback = logging_callback;
86 }
87
88 /*******************************************************************************
89 *******************************************************************************/
90 void
91 kxld_set_logging_callback_data(const char *name, void *user_data)
92 {
93 if (name) {
94 (void)strlcpy(s_callback_name, name, sizeof(s_callback_name));
95 /* disallow format strings in the kxld logging callback name */
96 for (size_t i = 0; i < sizeof(s_callback_name); i++) {
97 if (s_callback_name[i] == '%') {
98 s_callback_name[i] = '.';
99 }
100 }
101 } else {
102 (void)strlcpy(s_callback_name, "internal", sizeof(s_callback_name));
103 }
104
105 s_callback_data = user_data;
106 }
107
108 /*******************************************************************************
109 *******************************************************************************/
110 void
111 kxld_log(KXLDLogSubsystem subsystem, KXLDLogLevel level,
112 const char *in_format, ...)
113 {
114 char stack_buffer[256];
115 char *alloc_buffer = NULL;
116 char *format = stack_buffer;
117 u_int length = 0;
118 va_list ap;
119
120 if (s_logging_callback) {
121 length = snprintf(stack_buffer, sizeof(stack_buffer), "kxld[%s]: %s",
122 s_callback_name, in_format);
123
124 if (length >= sizeof(stack_buffer)) {
125 length += 1;
126 alloc_buffer = kxld_alloc(length);
127 if (!alloc_buffer) {
128 return;
129 }
130
131 snprintf(alloc_buffer, length, "kxld[%s]: %s",
132 s_callback_name, in_format);
133 format = alloc_buffer;
134 }
135
136 va_start(ap, in_format);
137 s_logging_callback(subsystem, level, format, ap, s_callback_data);
138 va_end(ap);
139
140 if (alloc_buffer) {
141 kxld_free(alloc_buffer, length);
142 }
143 }
144 }
145
146 /* We'll use kalloc for any page-based allocations under this threshold, and
147 * kmem_alloc otherwise.
148 */
149 #define KALLOC_MAX 16 * 1024
150
151 /*******************************************************************************
152 *******************************************************************************/
153 void *
154 kxld_calloc(size_t size)
155 {
156 void * ptr = NULL;
157
158 #if KERNEL
159 ptr = kalloc(size);
160 if (ptr) {
161 bzero(ptr, size);
162 }
163 #else
164 ptr = calloc(1, size);
165 #endif
166
167 #if DEBUG
168 if (ptr) {
169 ++num_allocations;
170 bytes_allocated += size;
171 }
172 #endif
173
174 return ptr;
175 }
176
177 void *
178 kxld_alloc(size_t size)
179 {
180 void * ptr = NULL;
181
182 #if KERNEL
183 ptr = kalloc(size);
184 #else
185 ptr = malloc(size);
186 #endif
187
188 #if DEBUG
189 if (ptr) {
190 ++num_allocations;
191 bytes_allocated += size;
192 }
193 #endif
194
195 return ptr;
196 }
197
198 /*******************************************************************************
199 *******************************************************************************/
200 void *
201 kxld_page_alloc_untracked(size_t size)
202 {
203 void * ptr = NULL;
204 #if KERNEL
205 kern_return_t rval = 0;
206 vm_offset_t addr = 0;
207 #endif /* KERNEL */
208
209 size = round_page(size);
210
211 #if KERNEL
212 if (size < KALLOC_MAX) {
213 ptr = kalloc(size);
214 } else {
215 rval = kmem_alloc(kernel_map, &addr, size, VM_KERN_MEMORY_OSKEXT);
216 if (!rval) {
217 ptr = (void *) addr;
218 }
219 }
220 if (ptr) {
221 bzero(ptr, size);
222 }
223 #else /* !KERNEL */
224 ptr = calloc(1, size);
225 #endif /* KERNEL */
226
227 return ptr;
228 }
229
230 /*******************************************************************************
231 *******************************************************************************/
232 void *
233 kxld_page_alloc(size_t size)
234 {
235 void * ptr = NULL;
236
237 ptr = kxld_page_alloc_untracked(size);
238 #if DEBUG
239 if (ptr) {
240 ++num_allocations;
241 bytes_allocated += round_page(size);
242 }
243 #endif /* DEBUG */
244
245 return ptr;
246 }
247
248 /*******************************************************************************
249 *******************************************************************************/
250 void *
251 kxld_alloc_pageable(size_t size)
252 {
253 size = round_page(size);
254
255 #if KERNEL
256 kern_return_t rval = 0;
257 vm_offset_t ptr = 0;
258
259 rval = kmem_alloc_pageable(kernel_map, &ptr, size, VM_KERN_MEMORY_OSKEXT);
260 if (rval) {
261 ptr = 0;
262 }
263
264 return (void *) ptr;
265 #else
266 return kxld_page_alloc_untracked(size);
267 #endif
268 }
269
270 /*******************************************************************************
271 *******************************************************************************/
272 void
273 kxld_free(void *ptr, size_t size __unused)
274 {
275 #if DEBUG
276 ++num_frees;
277 bytes_freed += size;
278 #endif
279
280 #if KERNEL
281 kfree(ptr, size);
282 #else
283 free(ptr);
284 #endif
285 }
286
287 /*******************************************************************************
288 *******************************************************************************/
289 void
290 kxld_page_free_untracked(void *ptr, size_t size __unused)
291 {
292 #if KERNEL
293 size = round_page(size);
294
295 if (size < KALLOC_MAX) {
296 kfree(ptr, size);
297 } else {
298 kmem_free(kernel_map, (vm_offset_t) ptr, size);
299 }
300 #else /* !KERNEL */
301 free(ptr);
302 #endif /* KERNEL */
303 }
304
305
306 /*******************************************************************************
307 *******************************************************************************/
308 void
309 kxld_page_free(void *ptr, size_t size)
310 {
311 #if DEBUG
312 ++num_frees;
313 bytes_freed += round_page(size);
314 #endif /* DEBUG */
315 kxld_page_free_untracked(ptr, size);
316 }
317
318 /*******************************************************************************
319 *******************************************************************************/
320 kern_return_t
321 validate_and_swap_macho_32(u_char *file, u_long size
322 #if !KERNEL
323 , enum NXByteOrder host_order
324 #endif /* !KERNEL */
325 )
326 {
327 kern_return_t rval = KERN_FAILURE;
328 struct mach_header *mach_hdr = (struct mach_header *) ((void *) file);
329 struct load_command *load_hdr = NULL;
330 struct segment_command *seg_hdr = NULL;
331 struct section *sects = NULL;
332 struct relocation_info *relocs = NULL;
333 struct symtab_command *symtab_hdr = NULL;
334 struct nlist *symtab = NULL;
335 u_long offset = 0;
336 u_int cmd = 0;
337 u_int cmdsize = 0;
338 u_int i = 0;
339 u_int j = 0;
340 #if !KERNEL
341 boolean_t swap = FALSE;
342 #endif /* !KERNEL */
343
344 check(file);
345 check(size);
346
347 /* Verify that the file is big enough for the mach header */
348 require_action(size >= sizeof(*mach_hdr), finish,
349 rval = KERN_FAILURE;
350 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
351 offset = sizeof(*mach_hdr);
352
353 #if !KERNEL
354 /* Swap the mach header if necessary */
355 if (mach_hdr->magic == MH_CIGAM) {
356 swap = TRUE;
357 (void) swap_mach_header(mach_hdr, host_order);
358 }
359 #endif /* !KERNEL */
360
361 /* Validate the mach_header's magic number */
362 require_action(mach_hdr->magic == MH_MAGIC, finish,
363 rval = KERN_FAILURE;
364 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
365 "Invalid magic number: 0x%x.", mach_hdr->magic));
366
367 /* If in the running kernel, and asked to validate the kernel
368 * (which is the only file of type MH_EXECUTE we should ever see),
369 * then just assume it's ok or we wouldn't be running to begin with.
370 */
371 #if KERNEL
372 if (mach_hdr->filetype == MH_EXECUTE) {
373 rval = KERN_SUCCESS;
374 goto finish;
375 }
376 #endif /* KERNEL */
377
378 /* Validate and potentially swap the load commands */
379 for (i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) {
380 /* Get the load command and size */
381 load_hdr = (struct load_command *) ((void *) (file + offset));
382 cmd = load_hdr->cmd;
383 cmdsize = load_hdr->cmdsize;
384
385 #if !KERNEL
386 if (swap) {
387 cmd = OSSwapInt32(load_hdr->cmd);
388 cmdsize = OSSwapInt32(load_hdr->cmdsize);
389 }
390 #endif /* !KERNEL */
391
392 /* Verify that the file is big enough to contain the load command */
393 require_action(size >= offset + cmdsize, finish,
394 rval = KERN_FAILURE;
395 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
396
397 switch (cmd) {
398 case LC_SEGMENT:
399 /* Get and swap the segment header */
400 seg_hdr = (struct segment_command *) load_hdr;
401 #if !KERNEL
402 if (swap) {
403 swap_segment_command(seg_hdr, host_order);
404 }
405 #endif /* !KERNEL */
406
407 /* Get and swap the section headers */
408 sects = (struct section *) &seg_hdr[1];
409 #if !KERNEL
410 if (swap) {
411 swap_section(sects, seg_hdr->nsects, host_order);
412 }
413 #endif /* !KERNEL */
414
415 /* Ignore segments with no vm size */
416 if (!seg_hdr->vmsize) {
417 continue;
418 }
419
420 /* Verify that the file is big enough for the segment data. */
421 require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish,
422 rval = KERN_FAILURE;
423 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
424
425 for (j = 0; j < seg_hdr->nsects; ++j) {
426 /* Verify that, if the section is not to be zero filled on
427 * demand, that file is big enough for the section's data.
428 */
429 require_action((sects[j].flags & S_ZEROFILL) ||
430 (size >= sects[j].offset + sects[j].size), finish,
431 rval = KERN_FAILURE;
432 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
433
434 /* Verify that the file is big enough for the section's
435 * relocation entries.
436 */
437 require_action(size >=
438 sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish,
439 rval = KERN_FAILURE;
440 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
441
442 /* Swap the relocation entries */
443 relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff));
444 #if !KERNEL
445 if (swap) {
446 swap_relocation_info(relocs, sects[j].nreloc,
447 host_order);
448 }
449 #endif /* !KERNEL */
450 }
451
452 break;
453 case LC_SYMTAB:
454 /* Get and swap the symtab header */
455 symtab_hdr = (struct symtab_command *) load_hdr;
456 #if !KERNEL
457 if (swap) {
458 swap_symtab_command(symtab_hdr, host_order);
459 }
460 #endif /* !KERNEL */
461
462 /* Verify that the file is big enough for the symbol table */
463 require_action(size >=
464 symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish,
465 rval = KERN_FAILURE;
466 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
467
468 /* Verify that the file is big enough for the string table */
469 require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish,
470 rval = KERN_FAILURE;
471 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
472
473 #if !KERNEL
474 /* Swap the symbol table entries */
475 symtab = (struct nlist *) ((void *) (file + symtab_hdr->symoff));
476 if (swap) {
477 swap_nlist(symtab, symtab_hdr->nsyms, host_order);
478 }
479 #endif /* !KERNEL */
480
481 break;
482 default:
483 #if !KERNEL
484 /* Swap the load command */
485 if (swap) {
486 swap_load_command(load_hdr, host_order);
487 }
488 #endif /* !KERNEL */
489 break;
490 }
491 }
492
493 rval = KERN_SUCCESS;
494
495 finish:
496 return rval;
497 }
498
499 /*******************************************************************************
500 *******************************************************************************/
501 kern_return_t
502 validate_and_swap_macho_64(u_char *file, u_long size
503 #if !KERNEL
504 , enum NXByteOrder host_order
505 #endif /* !KERNEL */
506 )
507 {
508 kern_return_t rval = KERN_FAILURE;
509 struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file);
510 struct load_command *load_hdr = NULL;
511 struct segment_command_64 *seg_hdr = NULL;
512 struct section_64 *sects = NULL;
513 struct relocation_info *relocs = NULL;
514 struct symtab_command *symtab_hdr = NULL;
515 struct nlist_64 *symtab = NULL;
516 u_long offset = 0;
517 u_int cmd = 0;
518 u_int cmdsize = 0;
519 u_int i = 0;
520 u_int j = 0;
521 #if !KERNEL
522 boolean_t swap = FALSE;
523 #endif /* !KERNEL */
524
525 check(file);
526 check(size);
527
528 /* Verify that the file is big enough for the mach header */
529 require_action(size >= sizeof(*mach_hdr), finish,
530 rval = KERN_FAILURE;
531 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
532 offset = sizeof(*mach_hdr);
533
534 #if !KERNEL
535 /* Swap the mach header if necessary */
536 if (mach_hdr->magic == MH_CIGAM_64) {
537 swap = TRUE;
538 (void) swap_mach_header_64(mach_hdr, host_order);
539 }
540 #endif /* !KERNEL */
541
542 /* Validate the mach_header's magic number */
543 require_action(mach_hdr->magic == MH_MAGIC_64, finish,
544 rval = KERN_FAILURE;
545 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
546 "Invalid magic number: 0x%x.", mach_hdr->magic));
547
548 /* If in the running kernel, and asked to validate the kernel
549 * (which is the only file of type MH_EXECUTE we should ever see),
550 * then just assume it's ok or we wouldn't be running to begin with.
551 */
552 #if KERNEL
553 if (mach_hdr->filetype == MH_EXECUTE) {
554 rval = KERN_SUCCESS;
555 goto finish;
556 }
557 #endif /* KERNEL */
558
559 /* Validate and potentially swap the load commands */
560 for (i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) {
561 /* Get the load command and size */
562 load_hdr = (struct load_command *) ((void *) (file + offset));
563 cmd = load_hdr->cmd;
564 cmdsize = load_hdr->cmdsize;
565
566 #if !KERNEL
567 if (swap) {
568 cmd = OSSwapInt32(load_hdr->cmd);
569 cmdsize = OSSwapInt32(load_hdr->cmdsize);
570 }
571 #endif /* !KERNEL */
572
573 /* Verify that the file is big enough to contain the load command */
574 require_action(size >= offset + cmdsize, finish,
575 rval = KERN_FAILURE;
576 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
577 switch (cmd) {
578 case LC_SEGMENT_64:
579 /* Get and swap the segment header */
580 seg_hdr = (struct segment_command_64 *) ((void *) load_hdr);
581 #if !KERNEL
582 if (swap) {
583 swap_segment_command_64(seg_hdr, host_order);
584 }
585 #endif /* !KERNEL */
586
587 /* Get and swap the section headers */
588 sects = (struct section_64 *) &seg_hdr[1];
589 #if !KERNEL
590 if (swap) {
591 swap_section_64(sects, seg_hdr->nsects, host_order);
592 }
593 #endif /* !KERNEL */
594
595 /* If the segment has no vm footprint, skip it */
596 if (!seg_hdr->vmsize) {
597 continue;
598 }
599
600 /* Verify that the file is big enough for the segment data. */
601 require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish,
602 rval = KERN_FAILURE;
603 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
604
605 for (j = 0; j < seg_hdr->nsects; ++j) {
606 /* Verify that, if the section is not to be zero filled on
607 * demand, that file is big enough for the section's data.
608 */
609 require_action((sects[j].flags & S_ZEROFILL) ||
610 (size >= sects[j].offset + sects[j].size), finish,
611 rval = KERN_FAILURE;
612 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
613
614 /* Verify that the file is big enough for the section's
615 * relocation entries.
616 */
617 require_action(size >=
618 sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish,
619 rval = KERN_FAILURE;
620 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
621
622 /* Swap the relocation entries */
623 relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff));
624 #if !KERNEL
625 if (swap) {
626 swap_relocation_info(relocs, sects[j].nreloc,
627 host_order);
628 }
629 #endif /* !KERNEL */
630 }
631
632 break;
633 case LC_SYMTAB:
634 /* Get and swap the symtab header */
635 symtab_hdr = (struct symtab_command *) load_hdr;
636 #if !KERNEL
637 if (swap) {
638 swap_symtab_command(symtab_hdr, host_order);
639 }
640 #endif /* !KERNEL */
641
642 /* Verify that the file is big enough for the symbol table */
643 require_action(size >=
644 symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish,
645 rval = KERN_FAILURE;
646 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
647
648 /* Verify that the file is big enough for the string table */
649 require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish,
650 rval = KERN_FAILURE;
651 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
652
653 #if !KERNEL
654 /* Swap the symbol table entries */
655 symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff));
656 if (swap) {
657 swap_nlist_64(symtab, symtab_hdr->nsyms, host_order);
658 }
659 #endif /* !KERNEL */
660
661 break;
662 default:
663 #if !KERNEL
664 /* Swap the load command */
665 if (swap) {
666 swap_load_command(load_hdr, host_order);
667 }
668 #endif /* !KERNEL */
669 break;
670 }
671 }
672
673 rval = KERN_SUCCESS;
674
675 finish:
676 return rval;
677 }
678
679 #if !KERNEL
680 /*******************************************************************************
681 *******************************************************************************/
682 void
683 unswap_macho(u_char *file, enum NXByteOrder host_order,
684 enum NXByteOrder target_order)
685 {
686 struct mach_header *hdr = (struct mach_header *) ((void *) file);
687
688 if (!hdr) {
689 return;
690 }
691
692 if (hdr->magic == MH_MAGIC) {
693 unswap_macho_32(file, host_order, target_order);
694 } else if (hdr->magic == MH_MAGIC_64) {
695 unswap_macho_64(file, host_order, target_order);
696 }
697 }
698
699 /*******************************************************************************
700 *******************************************************************************/
701 static void
702 unswap_macho_32(u_char *file, enum NXByteOrder host_order,
703 enum NXByteOrder target_order)
704 {
705 struct mach_header *mach_hdr = (struct mach_header *) ((void *) file);
706 struct load_command *load_hdr = NULL;
707 struct segment_command *seg_hdr = NULL;
708 struct section *sects = NULL;
709 struct symtab_command *symtab_hdr = NULL;
710 struct nlist *symtab = NULL;
711 u_long offset = 0;
712 u_int cmd = 0;
713 u_int size = 0;
714 u_int i = 0;
715
716 check(file);
717
718 if (target_order == host_order) {
719 return;
720 }
721
722 offset = sizeof(*mach_hdr);
723 for (i = 0; i < mach_hdr->ncmds; ++i, offset += size) {
724 load_hdr = (struct load_command *) ((void *) (file + offset));
725 cmd = load_hdr->cmd;
726 size = load_hdr->cmdsize;
727
728 switch (cmd) {
729 case LC_SEGMENT:
730 seg_hdr = (struct segment_command *) load_hdr;
731 sects = (struct section *) &seg_hdr[1];
732
733 /* We don't need to unswap relocations because this function is
734 * called when linking is completed (so there are no relocations).
735 */
736
737 swap_section(sects, seg_hdr->nsects, target_order);
738 swap_segment_command(seg_hdr, target_order);
739 break;
740 case LC_SYMTAB:
741 symtab_hdr = (struct symtab_command *) load_hdr;
742 symtab = (struct nlist*) ((void *) (file + symtab_hdr->symoff));
743
744 swap_nlist(symtab, symtab_hdr->nsyms, target_order);
745 swap_symtab_command(symtab_hdr, target_order);
746
747 break;
748 default:
749 swap_load_command(load_hdr, target_order);
750 break;
751 }
752 }
753
754 (void) swap_mach_header(mach_hdr, target_order);
755 }
756
757 /*******************************************************************************
758 *******************************************************************************/
759 static void
760 unswap_macho_64(u_char *file, enum NXByteOrder host_order,
761 enum NXByteOrder target_order)
762 {
763 struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file);
764 struct load_command *load_hdr = NULL;
765 struct segment_command_64 *seg_hdr = NULL;
766 struct section_64 *sects = NULL;
767 struct symtab_command *symtab_hdr = NULL;
768 struct nlist_64 *symtab = NULL;
769 u_long offset = 0;
770 u_int cmd = 0;
771 u_int size = 0;
772 u_int i = 0;
773
774 check(file);
775
776 if (target_order == host_order) {
777 return;
778 }
779
780 offset = sizeof(*mach_hdr);
781 for (i = 0; i < mach_hdr->ncmds; ++i, offset += size) {
782 load_hdr = (struct load_command *) ((void *) (file + offset));
783 cmd = load_hdr->cmd;
784 size = load_hdr->cmdsize;
785
786 switch (cmd) {
787 case LC_SEGMENT_64:
788 seg_hdr = (struct segment_command_64 *) ((void *) load_hdr);
789 sects = (struct section_64 *) &seg_hdr[1];
790
791 /* We don't need to unswap relocations because this function is
792 * called when linking is completed (so there are no relocations).
793 */
794
795 swap_section_64(sects, seg_hdr->nsects, target_order);
796 swap_segment_command_64(seg_hdr, target_order);
797 break;
798 case LC_SYMTAB:
799 symtab_hdr = (struct symtab_command *) load_hdr;
800 symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff));
801
802 swap_nlist_64(symtab, symtab_hdr->nsyms, target_order);
803 swap_symtab_command(symtab_hdr, target_order);
804
805 break;
806 default:
807 swap_load_command(load_hdr, target_order);
808 break;
809 }
810 }
811
812 (void) swap_mach_header_64(mach_hdr, target_order);
813 }
814 #endif /* !KERNEL */
815
816 /*******************************************************************************
817 *******************************************************************************/
818 kxld_addr_t
819 kxld_align_address(kxld_addr_t address, u_int align)
820 {
821 kxld_addr_t alignment = (1 << align);
822 kxld_addr_t low_bits = 0;
823
824 if (!align) {
825 return address;
826 }
827
828 low_bits = (address) & (alignment - 1);
829 if (low_bits) {
830 address += (alignment - low_bits);
831 }
832
833 return address;
834 }
835
836 /*******************************************************************************
837 *******************************************************************************/
838 boolean_t
839 kxld_is_32_bit(cpu_type_t cputype)
840 {
841 return !(cputype & CPU_ARCH_ABI64);
842 }
843
844 /*******************************************************************************
845 *******************************************************************************/
846 void
847 kxld_print_memory_report(void)
848 {
849 #if DEBUG
850 kxld_log(kKxldLogLinking, kKxldLogExplicit, "kxld memory usage report:\n"
851 "\tNumber of allocations: %8lu\n"
852 "\tNumber of frees: %8lu\n"
853 "\tAverage allocation size: %8lu\n"
854 "\tTotal bytes allocated: %8lu\n"
855 "\tTotal bytes freed: %8lu\n"
856 "\tTotal bytes leaked: %8lu",
857 num_allocations, num_frees, bytes_allocated / num_allocations,
858 bytes_allocated, bytes_freed, bytes_allocated - bytes_freed);
859 #endif
860 }
861
862 /*********************************************************************
863 *********************************************************************/
864 #if !KERNEL
865 boolean_t
866 kxld_set_cross_link_page_size(kxld_size_t target_page_size)
867 {
868 // verify radix 2
869 if ((target_page_size != 0) &&
870 ((target_page_size & (target_page_size - 1)) == 0)) {
871 s_cross_link_enabled = TRUE;
872 s_cross_link_page_size = target_page_size;
873
874 return TRUE;
875 } else {
876 return FALSE;
877 }
878 }
879 #endif /* !KERNEL */
880
881 /*********************************************************************
882 *********************************************************************/
883 kxld_size_t
884 kxld_get_effective_page_size(void)
885 {
886 #if KERNEL
887 return PAGE_SIZE;
888 #else
889 if (s_cross_link_enabled) {
890 return s_cross_link_page_size;
891 } else {
892 return PAGE_SIZE;
893 }
894 #endif /* KERNEL */
895 }
896
897 /*********************************************************************
898 *********************************************************************/
899 kxld_addr_t
900 kxld_round_page_cross_safe(kxld_addr_t offset)
901 {
902 #if KERNEL
903 return round_page(offset);
904 #else
905 // assume s_cross_link_page_size is power of 2
906 if (s_cross_link_enabled) {
907 return (offset + (s_cross_link_page_size - 1)) &
908 (~(s_cross_link_page_size - 1));
909 } else {
910 return round_page(offset);
911 }
912 #endif /* KERNEL */
913 }
914
915 #if SPLIT_KEXTS_DEBUG
916
917 void
918 kxld_show_split_info(splitKextLinkInfo *info)
919 {
920 kxld_log(kKxldLogLinking, kKxldLogErr,
921 "splitKextLinkInfo: \n"
922 "kextExecutable %p to %p kextSize %lu \n"
923 "linkedKext %p to %p linkedKextSize %lu \n"
924 "vmaddr_TEXT %p vmaddr_TEXT_EXEC %p "
925 "vmaddr_DATA %p vmaddr_DATA_CONST %p "
926 "vmaddr_LLVM_COV %p vmaddr_LINKEDIT %p",
927 (void *) info->kextExecutable,
928 (void *) (info->kextExecutable + info->kextSize),
929 info->kextSize,
930 (void*) info->linkedKext,
931 (void*) (info->linkedKext + info->linkedKextSize),
932 info->linkedKextSize,
933 (void *) info->vmaddr_TEXT,
934 (void *) info->vmaddr_TEXT_EXEC,
935 (void *) info->vmaddr_DATA,
936 (void *) info->vmaddr_DATA_CONST,
937 (void *) info->vmaddr_LLVM_COV,
938 (void *) info->vmaddr_LINKEDIT);
939 }
940
941 boolean_t
942 isTargetKextName(const char * the_name)
943 {
944 if (the_name && 0 == strcmp(the_name, KXLD_TARGET_KEXT)) {
945 return TRUE;
946 }
947 return FALSE;
948 }
949 #endif