]> git.saurik.com Git - apple/xnu.git/blame - libkern/kxld/kxld_util.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / libkern / kxld / kxld_util.c
CommitLineData
b0d623f7 1/*
39037602 2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
b0d623f7
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
39037602 5 *
b0d623f7
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
39037602 14 *
b0d623f7
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
39037602 17 *
b0d623f7
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
39037602 25 *
b0d623f7
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <stdarg.h>
29#include <string.h>
30#include <mach-o/loader.h>
31#include <mach-o/nlist.h>
32#include <mach-o/reloc.h>
33#if KERNEL
34 #include <kern/kalloc.h>
35 #include <libkern/libkern.h>
36 #include <mach/vm_param.h>
37 #include <vm/vm_kern.h>
38#else
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <mach/mach_init.h>
42 #include <mach-o/swap.h>
43#endif
44
45#define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
46#include <AssertMacros.h>
47
48#include "kxld_util.h"
49
50#if !KERNEL
51static void unswap_macho_32(u_char *file, enum NXByteOrder host_order,
52 enum NXByteOrder target_order);
53static void unswap_macho_64(u_char *file, enum NXByteOrder host_order,
54 enum NXByteOrder target_order);
55#endif /* !KERNEL */
56
57#if DEBUG
58static unsigned long num_allocations = 0;
59static unsigned long num_frees = 0;
60static unsigned long bytes_allocated = 0;
61static unsigned long bytes_freed = 0;
62#endif
63
64static KXLDLoggingCallback s_logging_callback = NULL;
4d15aeb1 65static char s_callback_name[64] = "internal";
b0d623f7
A
66static void *s_callback_data = NULL;
67
3e170ce0
A
68#if !KERNEL
69static boolean_t s_cross_link_enabled = FALSE;
70static kxld_size_t s_cross_link_page_size = PAGE_SIZE;
71#endif
72
73
b0d623f7
A
74/*******************************************************************************
75*******************************************************************************/
76void
77kxld_set_logging_callback(KXLDLoggingCallback logging_callback)
78{
79 s_logging_callback = logging_callback;
80}
81
82/*******************************************************************************
83*******************************************************************************/
84void
85kxld_set_logging_callback_data(const char *name, void *user_data)
86{
4d15aeb1
A
87 if (name) {
88 (void)strlcpy(s_callback_name, name, sizeof(s_callback_name));
89 /* disallow format strings in the kxld logging callback name */
90 for (size_t i = 0; i < sizeof(s_callback_name); i++) {
91 if (s_callback_name[i] == '%') {
92 s_callback_name[i] = '.';
93 }
94 }
95 } else {
96 (void)strlcpy(s_callback_name, "internal", sizeof(s_callback_name));
97 }
98
b0d623f7
A
99 s_callback_data = user_data;
100}
101
102/*******************************************************************************
103*******************************************************************************/
104void
105kxld_log(KXLDLogSubsystem subsystem, KXLDLogLevel level,
106 const char *in_format, ...)
107{
108 char stack_buffer[256];
109 char *alloc_buffer = NULL;
110 char *format = stack_buffer;
b0d623f7
A
111 u_int length = 0;
112 va_list ap;
113
114 if (s_logging_callback) {
115
116 length = snprintf(stack_buffer, sizeof(stack_buffer), "kxld[%s]: %s",
4d15aeb1 117 s_callback_name, in_format);
b0d623f7
A
118
119 if (length >= sizeof(stack_buffer)) {
120 length += 1;
121 alloc_buffer = kxld_alloc(length);
122 if (!alloc_buffer) return;
123
6d2010ae 124 snprintf(alloc_buffer, length, "kxld[%s]: %s",
4d15aeb1 125 s_callback_name, in_format);
b0d623f7
A
126 format = alloc_buffer;
127 }
128
129 va_start(ap, in_format);
130 s_logging_callback(subsystem, level, format, ap, s_callback_data);
131 va_end(ap);
132
133 if (alloc_buffer) {
134 kxld_free(alloc_buffer, length);
135 }
136 }
137}
138
139/* We'll use kalloc for any page-based allocations under this threshold, and
140 * kmem_alloc otherwise.
141 */
142#define KALLOC_MAX 16 * 1024
143
144/*******************************************************************************
145*******************************************************************************/
146void *
147kxld_alloc(size_t size)
148{
149 void * ptr = NULL;
150
151#if KERNEL
152 ptr = kalloc(size);
153#else
154 ptr = malloc(size);
155#endif
156
157#if DEBUG
158 if (ptr) {
159 ++num_allocations;
160 bytes_allocated += size;
161 }
162#endif
163
164 return ptr;
165}
166
167/*******************************************************************************
168*******************************************************************************/
169void *
170kxld_page_alloc_untracked(size_t size)
171{
172 void * ptr = NULL;
173#if KERNEL
174 kern_return_t rval = 0;
175 vm_offset_t addr = 0;
176#endif /* KERNEL */
177
178 size = round_page(size);
179
180#if KERNEL
181 if (size < KALLOC_MAX) {
182 ptr = kalloc(size);
183 } else {
3e170ce0 184 rval = kmem_alloc(kernel_map, &addr, size, VM_KERN_MEMORY_OSKEXT);
b0d623f7
A
185 if (!rval) ptr = (void *) addr;
186 }
187#else /* !KERNEL */
188 ptr = malloc(size);
189#endif /* KERNEL */
190
191 return ptr;
192}
193
194/*******************************************************************************
195*******************************************************************************/
196void *
197kxld_page_alloc(size_t size)
198{
199 void * ptr = NULL;
200
201 ptr = kxld_page_alloc_untracked(size);
202#if DEBUG
203 if (ptr) {
204 ++num_allocations;
205 bytes_allocated += round_page(size);
206 }
207#endif /* DEBUG */
208
209 return ptr;
210}
211
212/*******************************************************************************
213*******************************************************************************/
214void *
215kxld_alloc_pageable(size_t size)
216{
217 size = round_page(size);
218
219#if KERNEL
220 kern_return_t rval = 0;
221 vm_offset_t ptr = 0;
222
3e170ce0 223 rval = kmem_alloc_pageable(kernel_map, &ptr, size, VM_KERN_MEMORY_OSKEXT);
b0d623f7
A
224 if (rval) ptr = 0;
225
226 return (void *) ptr;
227#else
228 return kxld_page_alloc_untracked(size);
229#endif
230}
231
232/*******************************************************************************
233*******************************************************************************/
234void
235kxld_free(void *ptr, size_t size __unused)
236{
237#if DEBUG
238 ++num_frees;
239 bytes_freed += size;
240#endif
241
242#if KERNEL
243 kfree(ptr, size);
244#else
245 free(ptr);
246#endif
247}
248
249/*******************************************************************************
250*******************************************************************************/
251void
252kxld_page_free_untracked(void *ptr, size_t size __unused)
253{
254#if KERNEL
255 size = round_page(size);
256
257 if (size < KALLOC_MAX) {
258 kfree(ptr, size);
259 } else {
260 kmem_free(kernel_map, (vm_offset_t) ptr, size);
261 }
262#else /* !KERNEL */
263 free(ptr);
264#endif /* KERNEL */
265}
266
267
268/*******************************************************************************
269*******************************************************************************/
270void
271kxld_page_free(void *ptr, size_t size)
272{
273#if DEBUG
274 ++num_frees;
275 bytes_freed += round_page(size);
276#endif /* DEBUG */
277 kxld_page_free_untracked(ptr, size);
278}
279
280/*******************************************************************************
281*******************************************************************************/
282kern_return_t
283validate_and_swap_macho_32(u_char *file, u_long size
284#if !KERNEL
285 , enum NXByteOrder host_order
286#endif /* !KERNEL */
287 )
288{
289 kern_return_t rval = KERN_FAILURE;
316670eb 290 struct mach_header *mach_hdr = (struct mach_header *) ((void *) file);
b0d623f7
A
291 struct load_command *load_hdr = NULL;
292 struct segment_command *seg_hdr = NULL;
293 struct section *sects = NULL;
294 struct relocation_info *relocs = NULL;
295 struct symtab_command *symtab_hdr = NULL;
296 struct nlist *symtab = NULL;
297 u_long offset = 0;
298 u_int cmd = 0;
299 u_int cmdsize = 0;
300 u_int i = 0;
301 u_int j = 0;
302#if !KERNEL
303 boolean_t swap = FALSE;
304#endif /* !KERNEL */
305
306 check(file);
307 check(size);
308
309 /* Verify that the file is big enough for the mach header */
310 require_action(size >= sizeof(*mach_hdr), finish,
311 rval=KERN_FAILURE;
312 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
313 offset = sizeof(*mach_hdr);
314
315#if !KERNEL
316 /* Swap the mach header if necessary */
317 if (mach_hdr->magic == MH_CIGAM) {
318 swap = TRUE;
319 (void) swap_mach_header(mach_hdr, host_order);
320 }
321#endif /* !KERNEL */
322
323 /* Validate the mach_header's magic number */
324 require_action(mach_hdr->magic == MH_MAGIC, finish,
325 rval=KERN_FAILURE;
326 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
327 "Invalid magic number: 0x%x.", mach_hdr->magic));
328
6d2010ae
A
329 /* If in the running kernel, and asked to validate the kernel
330 * (which is the only file of type MH_EXECUTE we should ever see),
331 * then just assume it's ok or we wouldn't be running to begin with.
332 */
333#if KERNEL
334 if (mach_hdr->filetype == MH_EXECUTE) {
335 rval = KERN_SUCCESS;
336 goto finish;
337 }
338#endif /* KERNEL */
339
b0d623f7
A
340 /* Validate and potentially swap the load commands */
341 for(i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) {
342
343 /* Get the load command and size */
316670eb 344 load_hdr = (struct load_command *) ((void *) (file + offset));
b0d623f7
A
345 cmd = load_hdr->cmd;
346 cmdsize = load_hdr->cmdsize;
347
348#if !KERNEL
349 if (swap) {
350 cmd = OSSwapInt32(load_hdr->cmd);
351 cmdsize = OSSwapInt32(load_hdr->cmdsize);
352 }
353#endif /* !KERNEL */
354
355 /* Verify that the file is big enough to contain the load command */
356 require_action(size >= offset + cmdsize, finish,
357 rval=KERN_FAILURE;
358 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
359
360 switch(cmd) {
361 case LC_SEGMENT:
362 /* Get and swap the segment header */
363 seg_hdr = (struct segment_command *) load_hdr;
364#if !KERNEL
365 if (swap) swap_segment_command(seg_hdr, host_order);
366#endif /* !KERNEL */
367
368 /* Get and swap the section headers */
369 sects = (struct section *) &seg_hdr[1];
370#if !KERNEL
371 if (swap) swap_section(sects, seg_hdr->nsects, host_order);
372#endif /* !KERNEL */
373
374 /* Ignore segments with no vm size */
375 if (!seg_hdr->vmsize) continue;
376
377 /* Verify that the file is big enough for the segment data. */
378 require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish,
379 rval=KERN_FAILURE;
380 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
381
382 for (j = 0; j < seg_hdr->nsects; ++j) {
383
384 /* Verify that, if the section is not to be zero filled on
385 * demand, that file is big enough for the section's data.
386 */
387 require_action((sects[j].flags & S_ZEROFILL) ||
388 (size >= sects[j].offset + sects[j].size), finish,
389 rval=KERN_FAILURE;
390 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
391
392 /* Verify that the file is big enough for the section's
393 * relocation entries.
394 */
395 require_action(size >=
396 sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish,
397 rval=KERN_FAILURE;
398 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
399
400 /* Swap the relocation entries */
316670eb 401 relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff));
b0d623f7
A
402#if !KERNEL
403 if (swap) {
404 swap_relocation_info(relocs, sects[j].nreloc,
405 host_order);
406 }
407#endif /* !KERNEL */
408 }
409
410 break;
411 case LC_SYMTAB:
412 /* Get and swap the symtab header */
413 symtab_hdr = (struct symtab_command *) load_hdr;
414#if !KERNEL
415 if (swap) swap_symtab_command(symtab_hdr, host_order);
416#endif /* !KERNEL */
417
418 /* Verify that the file is big enough for the symbol table */
419 require_action(size >=
420 symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish,
421 rval=KERN_FAILURE;
422 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
423
424 /* Verify that the file is big enough for the string table */
425 require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish,
426 rval=KERN_FAILURE;
427 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
428
429#if !KERNEL
430 /* Swap the symbol table entries */
316670eb 431 symtab = (struct nlist *) ((void *) (file + symtab_hdr->symoff));
b0d623f7
A
432 if (swap) swap_nlist(symtab, symtab_hdr->nsyms, host_order);
433#endif /* !KERNEL */
434
435 break;
436 default:
437#if !KERNEL
438 /* Swap the load command */
439 if (swap) swap_load_command(load_hdr, host_order);
440#endif /* !KERNEL */
441 break;
442 }
443 }
444
445 rval = KERN_SUCCESS;
446
447finish:
448 return rval;
449}
450
451/*******************************************************************************
452*******************************************************************************/
453kern_return_t
454validate_and_swap_macho_64(u_char *file, u_long size
455#if !KERNEL
456 , enum NXByteOrder host_order
457#endif /* !KERNEL */
458 )
459{
460 kern_return_t rval = KERN_FAILURE;
316670eb 461 struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file);
b0d623f7
A
462 struct load_command *load_hdr = NULL;
463 struct segment_command_64 *seg_hdr = NULL;
464 struct section_64 *sects = NULL;
465 struct relocation_info *relocs = NULL;
466 struct symtab_command *symtab_hdr = NULL;
467 struct nlist_64 *symtab = NULL;
468 u_long offset = 0;
469 u_int cmd = 0;
470 u_int cmdsize = 0;
471 u_int i = 0;
472 u_int j = 0;
473#if !KERNEL
474 boolean_t swap = FALSE;
475#endif /* !KERNEL */
476
477 check(file);
478 check(size);
479
480 /* Verify that the file is big enough for the mach header */
481 require_action(size >= sizeof(*mach_hdr), finish,
482 rval=KERN_FAILURE;
483 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
484 offset = sizeof(*mach_hdr);
485
486#if !KERNEL
487 /* Swap the mach header if necessary */
488 if (mach_hdr->magic == MH_CIGAM_64) {
489 swap = TRUE;
490 (void) swap_mach_header_64(mach_hdr, host_order);
491 }
492#endif /* !KERNEL */
493
494 /* Validate the mach_header's magic number */
495 require_action(mach_hdr->magic == MH_MAGIC_64, finish,
496 rval=KERN_FAILURE;
497 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
498 "Invalid magic number: 0x%x.", mach_hdr->magic));
499
6d2010ae
A
500 /* If in the running kernel, and asked to validate the kernel
501 * (which is the only file of type MH_EXECUTE we should ever see),
502 * then just assume it's ok or we wouldn't be running to begin with.
503 */
504#if KERNEL
505 if (mach_hdr->filetype == MH_EXECUTE) {
506 rval = KERN_SUCCESS;
507 goto finish;
508 }
509#endif /* KERNEL */
510
b0d623f7
A
511 /* Validate and potentially swap the load commands */
512 for(i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) {
513 /* Get the load command and size */
316670eb 514 load_hdr = (struct load_command *) ((void *) (file + offset));
b0d623f7
A
515 cmd = load_hdr->cmd;
516 cmdsize = load_hdr->cmdsize;
517
518#if !KERNEL
519 if (swap) {
520 cmd = OSSwapInt32(load_hdr->cmd);
521 cmdsize = OSSwapInt32(load_hdr->cmdsize);
522 }
523#endif /* !KERNEL */
524
525 /* Verify that the file is big enough to contain the load command */
526 require_action(size >= offset + cmdsize, finish,
527 rval=KERN_FAILURE;
528 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
529 switch(cmd) {
530 case LC_SEGMENT_64:
531 /* Get and swap the segment header */
316670eb 532 seg_hdr = (struct segment_command_64 *) ((void *) load_hdr);
b0d623f7
A
533#if !KERNEL
534 if (swap) swap_segment_command_64(seg_hdr, host_order);
535#endif /* !KERNEL */
536
537 /* Get and swap the section headers */
538 sects = (struct section_64 *) &seg_hdr[1];
539#if !KERNEL
540 if (swap) swap_section_64(sects, seg_hdr->nsects, host_order);
541#endif /* !KERNEL */
542
543 /* If the segment has no vm footprint, skip it */
544 if (!seg_hdr->vmsize) continue;
545
546 /* Verify that the file is big enough for the segment data. */
547 require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish,
548 rval=KERN_FAILURE;
549 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
550
551 for (j = 0; j < seg_hdr->nsects; ++j) {
552
553 /* Verify that, if the section is not to be zero filled on
554 * demand, that file is big enough for the section's data.
555 */
556 require_action((sects[j].flags & S_ZEROFILL) ||
557 (size >= sects[j].offset + sects[j].size), finish,
558 rval=KERN_FAILURE;
559 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
560
561 /* Verify that the file is big enough for the section's
562 * relocation entries.
563 */
564 require_action(size >=
565 sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish,
566 rval=KERN_FAILURE;
567 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
568
569 /* Swap the relocation entries */
316670eb 570 relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff));
b0d623f7
A
571#if !KERNEL
572 if (swap) {
573 swap_relocation_info(relocs, sects[j].nreloc,
574 host_order);
575 }
576#endif /* !KERNEL */
577 }
578
579 break;
580 case LC_SYMTAB:
581 /* Get and swap the symtab header */
582 symtab_hdr = (struct symtab_command *) load_hdr;
583#if !KERNEL
584 if (swap) swap_symtab_command(symtab_hdr, host_order);
585#endif /* !KERNEL */
586
587 /* Verify that the file is big enough for the symbol table */
588 require_action(size >=
589 symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish,
590 rval=KERN_FAILURE;
591 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
592
593 /* Verify that the file is big enough for the string table */
594 require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish,
595 rval=KERN_FAILURE;
596 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
597
598#if !KERNEL
599 /* Swap the symbol table entries */
316670eb 600 symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff));
b0d623f7
A
601 if (swap) swap_nlist_64(symtab, symtab_hdr->nsyms, host_order);
602#endif /* !KERNEL */
603
604 break;
605 default:
606#if !KERNEL
607 /* Swap the load command */
608 if (swap) swap_load_command(load_hdr, host_order);
609#endif /* !KERNEL */
610 break;
611 }
612 }
613
614 rval = KERN_SUCCESS;
615
616finish:
617 return rval;
618}
619
620#if !KERNEL
621/*******************************************************************************
622*******************************************************************************/
623void unswap_macho(u_char *file, enum NXByteOrder host_order,
624 enum NXByteOrder target_order)
625{
316670eb 626 struct mach_header *hdr = (struct mach_header *) ((void *) file);
b0d623f7
A
627
628 if (!hdr) return;
629
630 if (hdr->magic == MH_MAGIC) {
631 unswap_macho_32(file, host_order, target_order);
632 } else if (hdr->magic == MH_MAGIC_64) {
633 unswap_macho_64(file, host_order, target_order);
634 }
635}
636
637/*******************************************************************************
638*******************************************************************************/
639static void
640unswap_macho_32(u_char *file, enum NXByteOrder host_order,
641 enum NXByteOrder target_order)
642{
316670eb 643 struct mach_header *mach_hdr = (struct mach_header *) ((void *) file);
b0d623f7
A
644 struct load_command *load_hdr = NULL;
645 struct segment_command *seg_hdr = NULL;
646 struct section *sects = NULL;
647 struct symtab_command *symtab_hdr = NULL;
648 struct nlist *symtab = NULL;
649 u_long offset = 0;
650 u_int cmd = 0;
651 u_int size = 0;
652 u_int i = 0;
653
654 check(file);
655
656 if (target_order == host_order) return;
657
658 offset = sizeof(*mach_hdr);
659 for(i = 0; i < mach_hdr->ncmds; ++i, offset += size) {
316670eb 660 load_hdr = (struct load_command *) ((void *) (file + offset));
b0d623f7
A
661 cmd = load_hdr->cmd;
662 size = load_hdr->cmdsize;
663
664 switch(cmd) {
665 case LC_SEGMENT:
666 seg_hdr = (struct segment_command *) load_hdr;
667 sects = (struct section *) &seg_hdr[1];
668
669 /* We don't need to unswap relocations because this function is
670 * called when linking is completed (so there are no relocations).
671 */
672
673 swap_section(sects, seg_hdr->nsects, target_order);
674 swap_segment_command(seg_hdr, target_order);
675 break;
676 case LC_SYMTAB:
677 symtab_hdr = (struct symtab_command *) load_hdr;
316670eb 678 symtab = (struct nlist*) ((void *) (file + symtab_hdr->symoff));
b0d623f7
A
679
680 swap_nlist(symtab, symtab_hdr->nsyms, target_order);
681 swap_symtab_command(symtab_hdr, target_order);
682
683 break;
684 default:
685 swap_load_command(load_hdr, target_order);
686 break;
687 }
688 }
689
690 (void) swap_mach_header(mach_hdr, target_order);
691}
692
693/*******************************************************************************
694*******************************************************************************/
695static void
696unswap_macho_64(u_char *file, enum NXByteOrder host_order,
697 enum NXByteOrder target_order)
698{
316670eb 699 struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file);
b0d623f7
A
700 struct load_command *load_hdr = NULL;
701 struct segment_command_64 *seg_hdr = NULL;
702 struct section_64 *sects = NULL;
703 struct symtab_command *symtab_hdr = NULL;
704 struct nlist_64 *symtab = NULL;
705 u_long offset = 0;
706 u_int cmd = 0;
707 u_int size = 0;
708 u_int i = 0;
709
710 check(file);
711
712 if (target_order == host_order) return;
713
714 offset = sizeof(*mach_hdr);
715 for(i = 0; i < mach_hdr->ncmds; ++i, offset += size) {
316670eb 716 load_hdr = (struct load_command *) ((void *) (file + offset));
b0d623f7
A
717 cmd = load_hdr->cmd;
718 size = load_hdr->cmdsize;
719
720 switch(cmd) {
721 case LC_SEGMENT_64:
316670eb 722 seg_hdr = (struct segment_command_64 *) ((void *) load_hdr);
b0d623f7
A
723 sects = (struct section_64 *) &seg_hdr[1];
724
725 /* We don't need to unswap relocations because this function is
726 * called when linking is completed (so there are no relocations).
727 */
728
729 swap_section_64(sects, seg_hdr->nsects, target_order);
730 swap_segment_command_64(seg_hdr, target_order);
731 break;
732 case LC_SYMTAB:
733 symtab_hdr = (struct symtab_command *) load_hdr;
316670eb 734 symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff));
b0d623f7
A
735
736 swap_nlist_64(symtab, symtab_hdr->nsyms, target_order);
737 swap_symtab_command(symtab_hdr, target_order);
738
739 break;
740 default:
741 swap_load_command(load_hdr, target_order);
742 break;
743 }
744 }
745
746 (void) swap_mach_header_64(mach_hdr, target_order);
747}
748#endif /* !KERNEL */
749
750/*******************************************************************************
751*******************************************************************************/
752kxld_addr_t
753kxld_align_address(kxld_addr_t address, u_int align)
754{
755 kxld_addr_t alignment = (1 << align);
756 kxld_addr_t low_bits = 0;
757
6d2010ae
A
758 if (!align) return address;
759
b0d623f7
A
760 low_bits = (address) & (alignment - 1);
761 if (low_bits) {
762 address += (alignment - low_bits);
763 }
764
765 return address;
766}
767
768/*******************************************************************************
769*******************************************************************************/
770boolean_t
771kxld_is_32_bit(cpu_type_t cputype)
772{
773 return !(cputype & CPU_ARCH_ABI64);
774}
775
776/*******************************************************************************
777* Borrowed (and slightly modified) the libc implementation for the kernel
778* until the kernel has a supported strstr().
779* Find the first occurrence of find in s.
780*******************************************************************************/
781const char *
39037602 782kxld_strstr(const char *s, const char *find)
b0d623f7
A
783{
784#if KERNEL
785 char c, sc;
786 size_t len;
787
788 if ((c = *find++) != 0) {
789 len = strlen(find);
790 do {
791 do {
792 if ((sc = *s++) == 0)
793 return (NULL);
794 } while (sc != c);
795 } while (strncmp(s, find, len) != 0);
796 s--;
797 }
798 return s;
799#else
800 return strstr(s, find);
801#endif /* KERNEL */
802}
803
804/*******************************************************************************
805*******************************************************************************/
806void
807kxld_print_memory_report(void)
808{
809#if DEBUG
810 kxld_log(kKxldLogLinking, kKxldLogExplicit, "kxld memory usage report:\n"
811 "\tNumber of allocations: %8lu\n"
812 "\tNumber of frees: %8lu\n"
813 "\tAverage allocation size: %8lu\n"
814 "\tTotal bytes allocated: %8lu\n"
815 "\tTotal bytes freed: %8lu\n"
816 "\tTotal bytes leaked: %8lu",
817 num_allocations, num_frees, bytes_allocated / num_allocations,
818 bytes_allocated, bytes_freed, bytes_allocated - bytes_freed);
819#endif
820}
821
3e170ce0
A
822/*********************************************************************
823*********************************************************************/
824#if !KERNEL
825boolean_t kxld_set_cross_link_page_size(kxld_size_t target_page_size)
826{
827 // verify radix 2
828 if ((target_page_size != 0) &&
829 ((target_page_size & (target_page_size - 1)) == 0)) {
830
831 s_cross_link_enabled = TRUE;
832 s_cross_link_page_size = target_page_size;
833
834 return TRUE;
835 } else {
836 return FALSE;
837 }
838}
839#endif /* !KERNEL */
840
841/*********************************************************************
842*********************************************************************/
843kxld_size_t kxld_get_effective_page_size(void)
844{
845#if KERNEL
846 return PAGE_SIZE;
847#else
848 if (s_cross_link_enabled) {
849 return s_cross_link_page_size;
850 } else {
851 return PAGE_SIZE;
852 }
853#endif /* KERNEL */
854}
855
856/*********************************************************************
857*********************************************************************/
858kxld_addr_t kxld_round_page_cross_safe(kxld_addr_t offset)
859{
860#if KERNEL
861 return round_page(offset);
862#else
863 // assume s_cross_link_page_size is power of 2
864 if (s_cross_link_enabled) {
865 return (offset + (s_cross_link_page_size - 1)) &
866 (~(s_cross_link_page_size - 1));
867 } else {
868 return round_page(offset);
869 }
870#endif /* KERNEL */
871}
39037602
A
872
873#if SPLIT_KEXTS_DEBUG
874
875void kxld_show_split_info(splitKextLinkInfo *info)
876{
877 kxld_log(kKxldLogLinking, kKxldLogErr,
878 "splitKextLinkInfo: \n"
879 "kextExecutable %p to %p kextSize %lu \n"
880 "linkedKext %p to %p linkedKextSize %lu \n"
881 "vmaddr_TEXT %p vmaddr_TEXT_EXEC %p "
813fb2f6
A
882 "vmaddr_DATA %p vmaddr_DATA_CONST %p "
883 "vmaddr_LLVM_COV %p vmaddr_LINKEDIT %p",
39037602
A
884 (void *) info->kextExecutable,
885 (void *) (info->kextExecutable + info->kextSize),
886 info->kextSize,
887 (void*) info->linkedKext,
888 (void*) (info->linkedKext + info->linkedKextSize),
889 info->linkedKextSize,
890 (void *) info->vmaddr_TEXT,
891 (void *) info->vmaddr_TEXT_EXEC,
892 (void *) info->vmaddr_DATA,
893 (void *) info->vmaddr_DATA_CONST,
813fb2f6 894 (void *) info->vmaddr_LLVM_COV,
39037602
A
895 (void *) info->vmaddr_LINKEDIT);
896}
897
898boolean_t isTargetKextName(const char * the_name)
899{
900 if (the_name && 0 == strcmp(the_name, KXLD_TARGET_KEXT)) {
901 return(TRUE);
902 }
903 return(FALSE);
904}
905#endif
906