]> git.saurik.com Git - apple/xnu.git/blob - libkern/kxld/kxld_util.c
xnu-2782.40.9.tar.gz
[apple/xnu.git] / libkern / kxld / kxld_util.c
1 /*
2 * Copyright (c) 2007-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <stdarg.h>
29 #include <string.h>
30 #include <mach-o/loader.h>
31 #include <mach-o/nlist.h>
32 #include <mach-o/reloc.h>
33 #if KERNEL
34 #include <kern/kalloc.h>
35 #include <libkern/libkern.h>
36 #include <mach/vm_param.h>
37 #include <vm/vm_kern.h>
38 #else
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <mach/mach_init.h>
42 #include <mach-o/swap.h>
43 #endif
44
45 #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
46 #include <AssertMacros.h>
47
48 #include "kxld_util.h"
49
50 #if !KERNEL
51 static void unswap_macho_32(u_char *file, enum NXByteOrder host_order,
52 enum NXByteOrder target_order);
53 static void unswap_macho_64(u_char *file, enum NXByteOrder host_order,
54 enum NXByteOrder target_order);
55 #endif /* !KERNEL */
56
57 #if DEBUG
58 static unsigned long num_allocations = 0;
59 static unsigned long num_frees = 0;
60 static unsigned long bytes_allocated = 0;
61 static unsigned long bytes_freed = 0;
62 #endif
63
64 static KXLDLoggingCallback s_logging_callback = NULL;
65 static const char *s_callback_name = NULL;
66 static void *s_callback_data = NULL;
67
68 /*******************************************************************************
69 *******************************************************************************/
70 void
71 kxld_set_logging_callback(KXLDLoggingCallback logging_callback)
72 {
73 s_logging_callback = logging_callback;
74 }
75
76 /*******************************************************************************
77 *******************************************************************************/
78 void
79 kxld_set_logging_callback_data(const char *name, void *user_data)
80 {
81 s_callback_name = name;
82 s_callback_data = user_data;
83 }
84
85 /*******************************************************************************
86 *******************************************************************************/
87 void
88 kxld_log(KXLDLogSubsystem subsystem, KXLDLogLevel level,
89 const char *in_format, ...)
90 {
91 char stack_buffer[256];
92 char *alloc_buffer = NULL;
93 char *format = stack_buffer;
94 const char *name = (s_callback_name) ? s_callback_name : "internal";
95 u_int length = 0;
96 va_list ap;
97
98 if (s_logging_callback) {
99
100 length = snprintf(stack_buffer, sizeof(stack_buffer), "kxld[%s]: %s",
101 name, in_format);
102
103 if (length >= sizeof(stack_buffer)) {
104 length += 1;
105 alloc_buffer = kxld_alloc(length);
106 if (!alloc_buffer) return;
107
108 snprintf(alloc_buffer, length, "kxld[%s]: %s",
109 name, in_format);
110 format = alloc_buffer;
111 }
112
113 va_start(ap, in_format);
114 s_logging_callback(subsystem, level, format, ap, s_callback_data);
115 va_end(ap);
116
117 if (alloc_buffer) {
118 kxld_free(alloc_buffer, length);
119 }
120 }
121 }
122
123 /* We'll use kalloc for any page-based allocations under this threshold, and
124 * kmem_alloc otherwise.
125 */
126 #define KALLOC_MAX 16 * 1024
127
128 /*******************************************************************************
129 *******************************************************************************/
130 void *
131 kxld_alloc(size_t size)
132 {
133 void * ptr = NULL;
134
135 #if KERNEL
136 ptr = kalloc(size);
137 #else
138 ptr = malloc(size);
139 #endif
140
141 #if DEBUG
142 if (ptr) {
143 ++num_allocations;
144 bytes_allocated += size;
145 }
146 #endif
147
148 return ptr;
149 }
150
151 /*******************************************************************************
152 *******************************************************************************/
153 void *
154 kxld_page_alloc_untracked(size_t size)
155 {
156 void * ptr = NULL;
157 #if KERNEL
158 kern_return_t rval = 0;
159 vm_offset_t addr = 0;
160 #endif /* KERNEL */
161
162 size = round_page(size);
163
164 #if KERNEL
165 if (size < KALLOC_MAX) {
166 ptr = kalloc(size);
167 } else {
168 rval = kmem_alloc(kernel_map, &addr, size);
169 if (!rval) ptr = (void *) addr;
170 }
171 #else /* !KERNEL */
172 ptr = malloc(size);
173 #endif /* KERNEL */
174
175 return ptr;
176 }
177
178 /*******************************************************************************
179 *******************************************************************************/
180 void *
181 kxld_page_alloc(size_t size)
182 {
183 void * ptr = NULL;
184
185 ptr = kxld_page_alloc_untracked(size);
186 #if DEBUG
187 if (ptr) {
188 ++num_allocations;
189 bytes_allocated += round_page(size);
190 }
191 #endif /* DEBUG */
192
193 return ptr;
194 }
195
196 /*******************************************************************************
197 *******************************************************************************/
198 void *
199 kxld_alloc_pageable(size_t size)
200 {
201 size = round_page(size);
202
203 #if KERNEL
204 kern_return_t rval = 0;
205 vm_offset_t ptr = 0;
206
207 rval = kmem_alloc_pageable(kernel_map, &ptr, size);
208 if (rval) ptr = 0;
209
210 return (void *) ptr;
211 #else
212 return kxld_page_alloc_untracked(size);
213 #endif
214 }
215
216 /*******************************************************************************
217 *******************************************************************************/
218 void
219 kxld_free(void *ptr, size_t size __unused)
220 {
221 #if DEBUG
222 ++num_frees;
223 bytes_freed += size;
224 #endif
225
226 #if KERNEL
227 kfree(ptr, size);
228 #else
229 free(ptr);
230 #endif
231 }
232
233 /*******************************************************************************
234 *******************************************************************************/
235 void
236 kxld_page_free_untracked(void *ptr, size_t size __unused)
237 {
238 #if KERNEL
239 size = round_page(size);
240
241 if (size < KALLOC_MAX) {
242 kfree(ptr, size);
243 } else {
244 kmem_free(kernel_map, (vm_offset_t) ptr, size);
245 }
246 #else /* !KERNEL */
247 free(ptr);
248 #endif /* KERNEL */
249 }
250
251
252 /*******************************************************************************
253 *******************************************************************************/
254 void
255 kxld_page_free(void *ptr, size_t size)
256 {
257 #if DEBUG
258 ++num_frees;
259 bytes_freed += round_page(size);
260 #endif /* DEBUG */
261 kxld_page_free_untracked(ptr, size);
262 }
263
264 /*******************************************************************************
265 *******************************************************************************/
266 kern_return_t
267 validate_and_swap_macho_32(u_char *file, u_long size
268 #if !KERNEL
269 , enum NXByteOrder host_order
270 #endif /* !KERNEL */
271 )
272 {
273 kern_return_t rval = KERN_FAILURE;
274 struct mach_header *mach_hdr = (struct mach_header *) ((void *) file);
275 struct load_command *load_hdr = NULL;
276 struct segment_command *seg_hdr = NULL;
277 struct section *sects = NULL;
278 struct relocation_info *relocs = NULL;
279 struct symtab_command *symtab_hdr = NULL;
280 struct nlist *symtab = NULL;
281 u_long offset = 0;
282 u_int cmd = 0;
283 u_int cmdsize = 0;
284 u_int i = 0;
285 u_int j = 0;
286 #if !KERNEL
287 boolean_t swap = FALSE;
288 #endif /* !KERNEL */
289
290 check(file);
291 check(size);
292
293 /* Verify that the file is big enough for the mach header */
294 require_action(size >= sizeof(*mach_hdr), finish,
295 rval=KERN_FAILURE;
296 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
297 offset = sizeof(*mach_hdr);
298
299 #if !KERNEL
300 /* Swap the mach header if necessary */
301 if (mach_hdr->magic == MH_CIGAM) {
302 swap = TRUE;
303 (void) swap_mach_header(mach_hdr, host_order);
304 }
305 #endif /* !KERNEL */
306
307 /* Validate the mach_header's magic number */
308 require_action(mach_hdr->magic == MH_MAGIC, finish,
309 rval=KERN_FAILURE;
310 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
311 "Invalid magic number: 0x%x.", mach_hdr->magic));
312
313 /* If in the running kernel, and asked to validate the kernel
314 * (which is the only file of type MH_EXECUTE we should ever see),
315 * then just assume it's ok or we wouldn't be running to begin with.
316 */
317 #if KERNEL
318 if (mach_hdr->filetype == MH_EXECUTE) {
319 rval = KERN_SUCCESS;
320 goto finish;
321 }
322 #endif /* KERNEL */
323
324 /* Validate and potentially swap the load commands */
325 for(i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) {
326
327 /* Get the load command and size */
328 load_hdr = (struct load_command *) ((void *) (file + offset));
329 cmd = load_hdr->cmd;
330 cmdsize = load_hdr->cmdsize;
331
332 #if !KERNEL
333 if (swap) {
334 cmd = OSSwapInt32(load_hdr->cmd);
335 cmdsize = OSSwapInt32(load_hdr->cmdsize);
336 }
337 #endif /* !KERNEL */
338
339 /* Verify that the file is big enough to contain the load command */
340 require_action(size >= offset + cmdsize, finish,
341 rval=KERN_FAILURE;
342 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
343
344 switch(cmd) {
345 case LC_SEGMENT:
346 /* Get and swap the segment header */
347 seg_hdr = (struct segment_command *) load_hdr;
348 #if !KERNEL
349 if (swap) swap_segment_command(seg_hdr, host_order);
350 #endif /* !KERNEL */
351
352 /* Get and swap the section headers */
353 sects = (struct section *) &seg_hdr[1];
354 #if !KERNEL
355 if (swap) swap_section(sects, seg_hdr->nsects, host_order);
356 #endif /* !KERNEL */
357
358 /* Ignore segments with no vm size */
359 if (!seg_hdr->vmsize) continue;
360
361 /* Verify that the file is big enough for the segment data. */
362 require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish,
363 rval=KERN_FAILURE;
364 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
365
366 for (j = 0; j < seg_hdr->nsects; ++j) {
367
368 /* Verify that, if the section is not to be zero filled on
369 * demand, that file is big enough for the section's data.
370 */
371 require_action((sects[j].flags & S_ZEROFILL) ||
372 (size >= sects[j].offset + sects[j].size), finish,
373 rval=KERN_FAILURE;
374 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
375
376 /* Verify that the file is big enough for the section's
377 * relocation entries.
378 */
379 require_action(size >=
380 sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish,
381 rval=KERN_FAILURE;
382 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
383
384 /* Swap the relocation entries */
385 relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff));
386 #if !KERNEL
387 if (swap) {
388 swap_relocation_info(relocs, sects[j].nreloc,
389 host_order);
390 }
391 #endif /* !KERNEL */
392 }
393
394 break;
395 case LC_SYMTAB:
396 /* Get and swap the symtab header */
397 symtab_hdr = (struct symtab_command *) load_hdr;
398 #if !KERNEL
399 if (swap) swap_symtab_command(symtab_hdr, host_order);
400 #endif /* !KERNEL */
401
402 /* Verify that the file is big enough for the symbol table */
403 require_action(size >=
404 symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish,
405 rval=KERN_FAILURE;
406 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
407
408 /* Verify that the file is big enough for the string table */
409 require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish,
410 rval=KERN_FAILURE;
411 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
412
413 #if !KERNEL
414 /* Swap the symbol table entries */
415 symtab = (struct nlist *) ((void *) (file + symtab_hdr->symoff));
416 if (swap) swap_nlist(symtab, symtab_hdr->nsyms, host_order);
417 #endif /* !KERNEL */
418
419 break;
420 default:
421 #if !KERNEL
422 /* Swap the load command */
423 if (swap) swap_load_command(load_hdr, host_order);
424 #endif /* !KERNEL */
425 break;
426 }
427 }
428
429 rval = KERN_SUCCESS;
430
431 finish:
432 return rval;
433 }
434
435 /*******************************************************************************
436 *******************************************************************************/
437 kern_return_t
438 validate_and_swap_macho_64(u_char *file, u_long size
439 #if !KERNEL
440 , enum NXByteOrder host_order
441 #endif /* !KERNEL */
442 )
443 {
444 kern_return_t rval = KERN_FAILURE;
445 struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file);
446 struct load_command *load_hdr = NULL;
447 struct segment_command_64 *seg_hdr = NULL;
448 struct section_64 *sects = NULL;
449 struct relocation_info *relocs = NULL;
450 struct symtab_command *symtab_hdr = NULL;
451 struct nlist_64 *symtab = NULL;
452 u_long offset = 0;
453 u_int cmd = 0;
454 u_int cmdsize = 0;
455 u_int i = 0;
456 u_int j = 0;
457 #if !KERNEL
458 boolean_t swap = FALSE;
459 #endif /* !KERNEL */
460
461 check(file);
462 check(size);
463
464 /* Verify that the file is big enough for the mach header */
465 require_action(size >= sizeof(*mach_hdr), finish,
466 rval=KERN_FAILURE;
467 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
468 offset = sizeof(*mach_hdr);
469
470 #if !KERNEL
471 /* Swap the mach header if necessary */
472 if (mach_hdr->magic == MH_CIGAM_64) {
473 swap = TRUE;
474 (void) swap_mach_header_64(mach_hdr, host_order);
475 }
476 #endif /* !KERNEL */
477
478 /* Validate the mach_header's magic number */
479 require_action(mach_hdr->magic == MH_MAGIC_64, finish,
480 rval=KERN_FAILURE;
481 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
482 "Invalid magic number: 0x%x.", mach_hdr->magic));
483
484 /* If in the running kernel, and asked to validate the kernel
485 * (which is the only file of type MH_EXECUTE we should ever see),
486 * then just assume it's ok or we wouldn't be running to begin with.
487 */
488 #if KERNEL
489 if (mach_hdr->filetype == MH_EXECUTE) {
490 rval = KERN_SUCCESS;
491 goto finish;
492 }
493 #endif /* KERNEL */
494
495 /* Validate and potentially swap the load commands */
496 for(i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) {
497 /* Get the load command and size */
498 load_hdr = (struct load_command *) ((void *) (file + offset));
499 cmd = load_hdr->cmd;
500 cmdsize = load_hdr->cmdsize;
501
502 #if !KERNEL
503 if (swap) {
504 cmd = OSSwapInt32(load_hdr->cmd);
505 cmdsize = OSSwapInt32(load_hdr->cmdsize);
506 }
507 #endif /* !KERNEL */
508
509 /* Verify that the file is big enough to contain the load command */
510 require_action(size >= offset + cmdsize, finish,
511 rval=KERN_FAILURE;
512 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
513 switch(cmd) {
514 case LC_SEGMENT_64:
515 /* Get and swap the segment header */
516 seg_hdr = (struct segment_command_64 *) ((void *) load_hdr);
517 #if !KERNEL
518 if (swap) swap_segment_command_64(seg_hdr, host_order);
519 #endif /* !KERNEL */
520
521 /* Get and swap the section headers */
522 sects = (struct section_64 *) &seg_hdr[1];
523 #if !KERNEL
524 if (swap) swap_section_64(sects, seg_hdr->nsects, host_order);
525 #endif /* !KERNEL */
526
527 /* If the segment has no vm footprint, skip it */
528 if (!seg_hdr->vmsize) continue;
529
530 /* Verify that the file is big enough for the segment data. */
531 require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish,
532 rval=KERN_FAILURE;
533 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
534
535 for (j = 0; j < seg_hdr->nsects; ++j) {
536
537 /* Verify that, if the section is not to be zero filled on
538 * demand, that file is big enough for the section's data.
539 */
540 require_action((sects[j].flags & S_ZEROFILL) ||
541 (size >= sects[j].offset + sects[j].size), finish,
542 rval=KERN_FAILURE;
543 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
544
545 /* Verify that the file is big enough for the section's
546 * relocation entries.
547 */
548 require_action(size >=
549 sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish,
550 rval=KERN_FAILURE;
551 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
552
553 /* Swap the relocation entries */
554 relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff));
555 #if !KERNEL
556 if (swap) {
557 swap_relocation_info(relocs, sects[j].nreloc,
558 host_order);
559 }
560 #endif /* !KERNEL */
561 }
562
563 break;
564 case LC_SYMTAB:
565 /* Get and swap the symtab header */
566 symtab_hdr = (struct symtab_command *) load_hdr;
567 #if !KERNEL
568 if (swap) swap_symtab_command(symtab_hdr, host_order);
569 #endif /* !KERNEL */
570
571 /* Verify that the file is big enough for the symbol table */
572 require_action(size >=
573 symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish,
574 rval=KERN_FAILURE;
575 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
576
577 /* Verify that the file is big enough for the string table */
578 require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish,
579 rval=KERN_FAILURE;
580 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
581
582 #if !KERNEL
583 /* Swap the symbol table entries */
584 symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff));
585 if (swap) swap_nlist_64(symtab, symtab_hdr->nsyms, host_order);
586 #endif /* !KERNEL */
587
588 break;
589 default:
590 #if !KERNEL
591 /* Swap the load command */
592 if (swap) swap_load_command(load_hdr, host_order);
593 #endif /* !KERNEL */
594 break;
595 }
596 }
597
598 rval = KERN_SUCCESS;
599
600 finish:
601 return rval;
602 }
603
604 #if !KERNEL
605 /*******************************************************************************
606 *******************************************************************************/
607 void unswap_macho(u_char *file, enum NXByteOrder host_order,
608 enum NXByteOrder target_order)
609 {
610 struct mach_header *hdr = (struct mach_header *) ((void *) file);
611
612 if (!hdr) return;
613
614 if (hdr->magic == MH_MAGIC) {
615 unswap_macho_32(file, host_order, target_order);
616 } else if (hdr->magic == MH_MAGIC_64) {
617 unswap_macho_64(file, host_order, target_order);
618 }
619 }
620
621 /*******************************************************************************
622 *******************************************************************************/
623 static void
624 unswap_macho_32(u_char *file, enum NXByteOrder host_order,
625 enum NXByteOrder target_order)
626 {
627 struct mach_header *mach_hdr = (struct mach_header *) ((void *) file);
628 struct load_command *load_hdr = NULL;
629 struct segment_command *seg_hdr = NULL;
630 struct section *sects = NULL;
631 struct symtab_command *symtab_hdr = NULL;
632 struct nlist *symtab = NULL;
633 u_long offset = 0;
634 u_int cmd = 0;
635 u_int size = 0;
636 u_int i = 0;
637
638 check(file);
639
640 if (target_order == host_order) return;
641
642 offset = sizeof(*mach_hdr);
643 for(i = 0; i < mach_hdr->ncmds; ++i, offset += size) {
644 load_hdr = (struct load_command *) ((void *) (file + offset));
645 cmd = load_hdr->cmd;
646 size = load_hdr->cmdsize;
647
648 switch(cmd) {
649 case LC_SEGMENT:
650 seg_hdr = (struct segment_command *) load_hdr;
651 sects = (struct section *) &seg_hdr[1];
652
653 /* We don't need to unswap relocations because this function is
654 * called when linking is completed (so there are no relocations).
655 */
656
657 swap_section(sects, seg_hdr->nsects, target_order);
658 swap_segment_command(seg_hdr, target_order);
659 break;
660 case LC_SYMTAB:
661 symtab_hdr = (struct symtab_command *) load_hdr;
662 symtab = (struct nlist*) ((void *) (file + symtab_hdr->symoff));
663
664 swap_nlist(symtab, symtab_hdr->nsyms, target_order);
665 swap_symtab_command(symtab_hdr, target_order);
666
667 break;
668 default:
669 swap_load_command(load_hdr, target_order);
670 break;
671 }
672 }
673
674 (void) swap_mach_header(mach_hdr, target_order);
675 }
676
677 /*******************************************************************************
678 *******************************************************************************/
679 static void
680 unswap_macho_64(u_char *file, enum NXByteOrder host_order,
681 enum NXByteOrder target_order)
682 {
683 struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file);
684 struct load_command *load_hdr = NULL;
685 struct segment_command_64 *seg_hdr = NULL;
686 struct section_64 *sects = NULL;
687 struct symtab_command *symtab_hdr = NULL;
688 struct nlist_64 *symtab = NULL;
689 u_long offset = 0;
690 u_int cmd = 0;
691 u_int size = 0;
692 u_int i = 0;
693
694 check(file);
695
696 if (target_order == host_order) return;
697
698 offset = sizeof(*mach_hdr);
699 for(i = 0; i < mach_hdr->ncmds; ++i, offset += size) {
700 load_hdr = (struct load_command *) ((void *) (file + offset));
701 cmd = load_hdr->cmd;
702 size = load_hdr->cmdsize;
703
704 switch(cmd) {
705 case LC_SEGMENT_64:
706 seg_hdr = (struct segment_command_64 *) ((void *) load_hdr);
707 sects = (struct section_64 *) &seg_hdr[1];
708
709 /* We don't need to unswap relocations because this function is
710 * called when linking is completed (so there are no relocations).
711 */
712
713 swap_section_64(sects, seg_hdr->nsects, target_order);
714 swap_segment_command_64(seg_hdr, target_order);
715 break;
716 case LC_SYMTAB:
717 symtab_hdr = (struct symtab_command *) load_hdr;
718 symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff));
719
720 swap_nlist_64(symtab, symtab_hdr->nsyms, target_order);
721 swap_symtab_command(symtab_hdr, target_order);
722
723 break;
724 default:
725 swap_load_command(load_hdr, target_order);
726 break;
727 }
728 }
729
730 (void) swap_mach_header_64(mach_hdr, target_order);
731 }
732 #endif /* !KERNEL */
733
734 /*******************************************************************************
735 *******************************************************************************/
736 kxld_addr_t
737 kxld_align_address(kxld_addr_t address, u_int align)
738 {
739 kxld_addr_t alignment = (1 << align);
740 kxld_addr_t low_bits = 0;
741
742 if (!align) return address;
743
744 low_bits = (address) & (alignment - 1);
745 if (low_bits) {
746 address += (alignment - low_bits);
747 }
748
749 return address;
750 }
751
752 /*******************************************************************************
753 *******************************************************************************/
754 boolean_t
755 kxld_is_32_bit(cpu_type_t cputype)
756 {
757 return !(cputype & CPU_ARCH_ABI64);
758 }
759
760 /*******************************************************************************
761 * Borrowed (and slightly modified) the libc implementation for the kernel
762 * until the kernel has a supported strstr().
763 * Find the first occurrence of find in s.
764 *******************************************************************************/
765 const char *
766 kxld_strstr(s, find)
767 const char *s, *find;
768 {
769 #if KERNEL
770 char c, sc;
771 size_t len;
772
773 if ((c = *find++) != 0) {
774 len = strlen(find);
775 do {
776 do {
777 if ((sc = *s++) == 0)
778 return (NULL);
779 } while (sc != c);
780 } while (strncmp(s, find, len) != 0);
781 s--;
782 }
783 return s;
784 #else
785 return strstr(s, find);
786 #endif /* KERNEL */
787 }
788
789 /*******************************************************************************
790 *******************************************************************************/
791 void
792 kxld_print_memory_report(void)
793 {
794 #if DEBUG
795 kxld_log(kKxldLogLinking, kKxldLogExplicit, "kxld memory usage report:\n"
796 "\tNumber of allocations: %8lu\n"
797 "\tNumber of frees: %8lu\n"
798 "\tAverage allocation size: %8lu\n"
799 "\tTotal bytes allocated: %8lu\n"
800 "\tTotal bytes freed: %8lu\n"
801 "\tTotal bytes leaked: %8lu",
802 num_allocations, num_frees, bytes_allocated / num_allocations,
803 bytes_allocated, bytes_freed, bytes_allocated - bytes_freed);
804 #endif
805 }
806