]> git.saurik.com Git - apple/xnu.git/blob - libkern/kxld/kxld_util.c
e6d56c2ff695b147108d0bd65050eee32d56bc3b
[apple/xnu.git] / libkern / kxld / kxld_util.c
1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <stdarg.h>
29 #include <string.h>
30 #include <mach-o/loader.h>
31 #include <mach-o/nlist.h>
32 #include <mach-o/reloc.h>
33 #if KERNEL
34 #include <kern/kalloc.h>
35 #include <libkern/libkern.h>
36 #include <mach/vm_param.h>
37 #include <vm/vm_kern.h>
38 #else
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <mach/mach_init.h>
42 #include <mach-o/swap.h>
43 #endif
44
45 #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
46 #include <AssertMacros.h>
47
48 #include "kxld_util.h"
49
50 #if !KERNEL
51 static void unswap_macho_32(u_char *file, enum NXByteOrder host_order,
52 enum NXByteOrder target_order);
53 static void unswap_macho_64(u_char *file, enum NXByteOrder host_order,
54 enum NXByteOrder target_order);
55 #endif /* !KERNEL */
56
57 #if DEBUG
58 static unsigned long num_allocations = 0;
59 static unsigned long num_frees = 0;
60 static unsigned long bytes_allocated = 0;
61 static unsigned long bytes_freed = 0;
62 #endif
63
64 static KXLDLoggingCallback s_logging_callback = NULL;
65 static const char *s_callback_name = NULL;
66 static void *s_callback_data = NULL;
67
68 #if !KERNEL
69 static boolean_t s_cross_link_enabled = FALSE;
70 static kxld_size_t s_cross_link_page_size = PAGE_SIZE;
71 #endif
72
73
74 /*******************************************************************************
75 *******************************************************************************/
76 void
77 kxld_set_logging_callback(KXLDLoggingCallback logging_callback)
78 {
79 s_logging_callback = logging_callback;
80 }
81
82 /*******************************************************************************
83 *******************************************************************************/
84 void
85 kxld_set_logging_callback_data(const char *name, void *user_data)
86 {
87 s_callback_name = name;
88 s_callback_data = user_data;
89 }
90
91 /*******************************************************************************
92 *******************************************************************************/
93 void
94 kxld_log(KXLDLogSubsystem subsystem, KXLDLogLevel level,
95 const char *in_format, ...)
96 {
97 char stack_buffer[256];
98 char *alloc_buffer = NULL;
99 char *format = stack_buffer;
100 const char *name = (s_callback_name) ? s_callback_name : "internal";
101 u_int length = 0;
102 va_list ap;
103
104 if (s_logging_callback) {
105
106 length = snprintf(stack_buffer, sizeof(stack_buffer), "kxld[%s]: %s",
107 name, in_format);
108
109 if (length >= sizeof(stack_buffer)) {
110 length += 1;
111 alloc_buffer = kxld_alloc(length);
112 if (!alloc_buffer) return;
113
114 snprintf(alloc_buffer, length, "kxld[%s]: %s",
115 name, in_format);
116 format = alloc_buffer;
117 }
118
119 va_start(ap, in_format);
120 s_logging_callback(subsystem, level, format, ap, s_callback_data);
121 va_end(ap);
122
123 if (alloc_buffer) {
124 kxld_free(alloc_buffer, length);
125 }
126 }
127 }
128
129 /* We'll use kalloc for any page-based allocations under this threshold, and
130 * kmem_alloc otherwise.
131 */
132 #define KALLOC_MAX 16 * 1024
133
134 /*******************************************************************************
135 *******************************************************************************/
136 void *
137 kxld_alloc(size_t size)
138 {
139 void * ptr = NULL;
140
141 #if KERNEL
142 ptr = kalloc(size);
143 #else
144 ptr = malloc(size);
145 #endif
146
147 #if DEBUG
148 if (ptr) {
149 ++num_allocations;
150 bytes_allocated += size;
151 }
152 #endif
153
154 return ptr;
155 }
156
157 /*******************************************************************************
158 *******************************************************************************/
159 void *
160 kxld_page_alloc_untracked(size_t size)
161 {
162 void * ptr = NULL;
163 #if KERNEL
164 kern_return_t rval = 0;
165 vm_offset_t addr = 0;
166 #endif /* KERNEL */
167
168 size = round_page(size);
169
170 #if KERNEL
171 if (size < KALLOC_MAX) {
172 ptr = kalloc(size);
173 } else {
174 rval = kmem_alloc(kernel_map, &addr, size, VM_KERN_MEMORY_OSKEXT);
175 if (!rval) ptr = (void *) addr;
176 }
177 #else /* !KERNEL */
178 ptr = malloc(size);
179 #endif /* KERNEL */
180
181 return ptr;
182 }
183
184 /*******************************************************************************
185 *******************************************************************************/
186 void *
187 kxld_page_alloc(size_t size)
188 {
189 void * ptr = NULL;
190
191 ptr = kxld_page_alloc_untracked(size);
192 #if DEBUG
193 if (ptr) {
194 ++num_allocations;
195 bytes_allocated += round_page(size);
196 }
197 #endif /* DEBUG */
198
199 return ptr;
200 }
201
202 /*******************************************************************************
203 *******************************************************************************/
204 void *
205 kxld_alloc_pageable(size_t size)
206 {
207 size = round_page(size);
208
209 #if KERNEL
210 kern_return_t rval = 0;
211 vm_offset_t ptr = 0;
212
213 rval = kmem_alloc_pageable(kernel_map, &ptr, size, VM_KERN_MEMORY_OSKEXT);
214 if (rval) ptr = 0;
215
216 return (void *) ptr;
217 #else
218 return kxld_page_alloc_untracked(size);
219 #endif
220 }
221
222 /*******************************************************************************
223 *******************************************************************************/
224 void
225 kxld_free(void *ptr, size_t size __unused)
226 {
227 #if DEBUG
228 ++num_frees;
229 bytes_freed += size;
230 #endif
231
232 #if KERNEL
233 kfree(ptr, size);
234 #else
235 free(ptr);
236 #endif
237 }
238
239 /*******************************************************************************
240 *******************************************************************************/
241 void
242 kxld_page_free_untracked(void *ptr, size_t size __unused)
243 {
244 #if KERNEL
245 size = round_page(size);
246
247 if (size < KALLOC_MAX) {
248 kfree(ptr, size);
249 } else {
250 kmem_free(kernel_map, (vm_offset_t) ptr, size);
251 }
252 #else /* !KERNEL */
253 free(ptr);
254 #endif /* KERNEL */
255 }
256
257
258 /*******************************************************************************
259 *******************************************************************************/
260 void
261 kxld_page_free(void *ptr, size_t size)
262 {
263 #if DEBUG
264 ++num_frees;
265 bytes_freed += round_page(size);
266 #endif /* DEBUG */
267 kxld_page_free_untracked(ptr, size);
268 }
269
270 /*******************************************************************************
271 *******************************************************************************/
272 kern_return_t
273 validate_and_swap_macho_32(u_char *file, u_long size
274 #if !KERNEL
275 , enum NXByteOrder host_order
276 #endif /* !KERNEL */
277 )
278 {
279 kern_return_t rval = KERN_FAILURE;
280 struct mach_header *mach_hdr = (struct mach_header *) ((void *) file);
281 struct load_command *load_hdr = NULL;
282 struct segment_command *seg_hdr = NULL;
283 struct section *sects = NULL;
284 struct relocation_info *relocs = NULL;
285 struct symtab_command *symtab_hdr = NULL;
286 struct nlist *symtab = NULL;
287 u_long offset = 0;
288 u_int cmd = 0;
289 u_int cmdsize = 0;
290 u_int i = 0;
291 u_int j = 0;
292 #if !KERNEL
293 boolean_t swap = FALSE;
294 #endif /* !KERNEL */
295
296 check(file);
297 check(size);
298
299 /* Verify that the file is big enough for the mach header */
300 require_action(size >= sizeof(*mach_hdr), finish,
301 rval=KERN_FAILURE;
302 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
303 offset = sizeof(*mach_hdr);
304
305 #if !KERNEL
306 /* Swap the mach header if necessary */
307 if (mach_hdr->magic == MH_CIGAM) {
308 swap = TRUE;
309 (void) swap_mach_header(mach_hdr, host_order);
310 }
311 #endif /* !KERNEL */
312
313 /* Validate the mach_header's magic number */
314 require_action(mach_hdr->magic == MH_MAGIC, finish,
315 rval=KERN_FAILURE;
316 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
317 "Invalid magic number: 0x%x.", mach_hdr->magic));
318
319 /* If in the running kernel, and asked to validate the kernel
320 * (which is the only file of type MH_EXECUTE we should ever see),
321 * then just assume it's ok or we wouldn't be running to begin with.
322 */
323 #if KERNEL
324 if (mach_hdr->filetype == MH_EXECUTE) {
325 rval = KERN_SUCCESS;
326 goto finish;
327 }
328 #endif /* KERNEL */
329
330 /* Validate and potentially swap the load commands */
331 for(i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) {
332
333 /* Get the load command and size */
334 load_hdr = (struct load_command *) ((void *) (file + offset));
335 cmd = load_hdr->cmd;
336 cmdsize = load_hdr->cmdsize;
337
338 #if !KERNEL
339 if (swap) {
340 cmd = OSSwapInt32(load_hdr->cmd);
341 cmdsize = OSSwapInt32(load_hdr->cmdsize);
342 }
343 #endif /* !KERNEL */
344
345 /* Verify that the file is big enough to contain the load command */
346 require_action(size >= offset + cmdsize, finish,
347 rval=KERN_FAILURE;
348 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
349
350 switch(cmd) {
351 case LC_SEGMENT:
352 /* Get and swap the segment header */
353 seg_hdr = (struct segment_command *) load_hdr;
354 #if !KERNEL
355 if (swap) swap_segment_command(seg_hdr, host_order);
356 #endif /* !KERNEL */
357
358 /* Get and swap the section headers */
359 sects = (struct section *) &seg_hdr[1];
360 #if !KERNEL
361 if (swap) swap_section(sects, seg_hdr->nsects, host_order);
362 #endif /* !KERNEL */
363
364 /* Ignore segments with no vm size */
365 if (!seg_hdr->vmsize) continue;
366
367 /* Verify that the file is big enough for the segment data. */
368 require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish,
369 rval=KERN_FAILURE;
370 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
371
372 for (j = 0; j < seg_hdr->nsects; ++j) {
373
374 /* Verify that, if the section is not to be zero filled on
375 * demand, that file is big enough for the section's data.
376 */
377 require_action((sects[j].flags & S_ZEROFILL) ||
378 (size >= sects[j].offset + sects[j].size), finish,
379 rval=KERN_FAILURE;
380 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
381
382 /* Verify that the file is big enough for the section's
383 * relocation entries.
384 */
385 require_action(size >=
386 sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish,
387 rval=KERN_FAILURE;
388 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
389
390 /* Swap the relocation entries */
391 relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff));
392 #if !KERNEL
393 if (swap) {
394 swap_relocation_info(relocs, sects[j].nreloc,
395 host_order);
396 }
397 #endif /* !KERNEL */
398 }
399
400 break;
401 case LC_SYMTAB:
402 /* Get and swap the symtab header */
403 symtab_hdr = (struct symtab_command *) load_hdr;
404 #if !KERNEL
405 if (swap) swap_symtab_command(symtab_hdr, host_order);
406 #endif /* !KERNEL */
407
408 /* Verify that the file is big enough for the symbol table */
409 require_action(size >=
410 symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish,
411 rval=KERN_FAILURE;
412 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
413
414 /* Verify that the file is big enough for the string table */
415 require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish,
416 rval=KERN_FAILURE;
417 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
418
419 #if !KERNEL
420 /* Swap the symbol table entries */
421 symtab = (struct nlist *) ((void *) (file + symtab_hdr->symoff));
422 if (swap) swap_nlist(symtab, symtab_hdr->nsyms, host_order);
423 #endif /* !KERNEL */
424
425 break;
426 default:
427 #if !KERNEL
428 /* Swap the load command */
429 if (swap) swap_load_command(load_hdr, host_order);
430 #endif /* !KERNEL */
431 break;
432 }
433 }
434
435 rval = KERN_SUCCESS;
436
437 finish:
438 return rval;
439 }
440
441 /*******************************************************************************
442 *******************************************************************************/
443 kern_return_t
444 validate_and_swap_macho_64(u_char *file, u_long size
445 #if !KERNEL
446 , enum NXByteOrder host_order
447 #endif /* !KERNEL */
448 )
449 {
450 kern_return_t rval = KERN_FAILURE;
451 struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file);
452 struct load_command *load_hdr = NULL;
453 struct segment_command_64 *seg_hdr = NULL;
454 struct section_64 *sects = NULL;
455 struct relocation_info *relocs = NULL;
456 struct symtab_command *symtab_hdr = NULL;
457 struct nlist_64 *symtab = NULL;
458 u_long offset = 0;
459 u_int cmd = 0;
460 u_int cmdsize = 0;
461 u_int i = 0;
462 u_int j = 0;
463 #if !KERNEL
464 boolean_t swap = FALSE;
465 #endif /* !KERNEL */
466
467 check(file);
468 check(size);
469
470 /* Verify that the file is big enough for the mach header */
471 require_action(size >= sizeof(*mach_hdr), finish,
472 rval=KERN_FAILURE;
473 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
474 offset = sizeof(*mach_hdr);
475
476 #if !KERNEL
477 /* Swap the mach header if necessary */
478 if (mach_hdr->magic == MH_CIGAM_64) {
479 swap = TRUE;
480 (void) swap_mach_header_64(mach_hdr, host_order);
481 }
482 #endif /* !KERNEL */
483
484 /* Validate the mach_header's magic number */
485 require_action(mach_hdr->magic == MH_MAGIC_64, finish,
486 rval=KERN_FAILURE;
487 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
488 "Invalid magic number: 0x%x.", mach_hdr->magic));
489
490 /* If in the running kernel, and asked to validate the kernel
491 * (which is the only file of type MH_EXECUTE we should ever see),
492 * then just assume it's ok or we wouldn't be running to begin with.
493 */
494 #if KERNEL
495 if (mach_hdr->filetype == MH_EXECUTE) {
496 rval = KERN_SUCCESS;
497 goto finish;
498 }
499 #endif /* KERNEL */
500
501 /* Validate and potentially swap the load commands */
502 for(i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) {
503 /* Get the load command and size */
504 load_hdr = (struct load_command *) ((void *) (file + offset));
505 cmd = load_hdr->cmd;
506 cmdsize = load_hdr->cmdsize;
507
508 #if !KERNEL
509 if (swap) {
510 cmd = OSSwapInt32(load_hdr->cmd);
511 cmdsize = OSSwapInt32(load_hdr->cmdsize);
512 }
513 #endif /* !KERNEL */
514
515 /* Verify that the file is big enough to contain the load command */
516 require_action(size >= offset + cmdsize, finish,
517 rval=KERN_FAILURE;
518 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
519 switch(cmd) {
520 case LC_SEGMENT_64:
521 /* Get and swap the segment header */
522 seg_hdr = (struct segment_command_64 *) ((void *) load_hdr);
523 #if !KERNEL
524 if (swap) swap_segment_command_64(seg_hdr, host_order);
525 #endif /* !KERNEL */
526
527 /* Get and swap the section headers */
528 sects = (struct section_64 *) &seg_hdr[1];
529 #if !KERNEL
530 if (swap) swap_section_64(sects, seg_hdr->nsects, host_order);
531 #endif /* !KERNEL */
532
533 /* If the segment has no vm footprint, skip it */
534 if (!seg_hdr->vmsize) continue;
535
536 /* Verify that the file is big enough for the segment data. */
537 require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish,
538 rval=KERN_FAILURE;
539 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
540
541 for (j = 0; j < seg_hdr->nsects; ++j) {
542
543 /* Verify that, if the section is not to be zero filled on
544 * demand, that file is big enough for the section's data.
545 */
546 require_action((sects[j].flags & S_ZEROFILL) ||
547 (size >= sects[j].offset + sects[j].size), finish,
548 rval=KERN_FAILURE;
549 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
550
551 /* Verify that the file is big enough for the section's
552 * relocation entries.
553 */
554 require_action(size >=
555 sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish,
556 rval=KERN_FAILURE;
557 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
558
559 /* Swap the relocation entries */
560 relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff));
561 #if !KERNEL
562 if (swap) {
563 swap_relocation_info(relocs, sects[j].nreloc,
564 host_order);
565 }
566 #endif /* !KERNEL */
567 }
568
569 break;
570 case LC_SYMTAB:
571 /* Get and swap the symtab header */
572 symtab_hdr = (struct symtab_command *) load_hdr;
573 #if !KERNEL
574 if (swap) swap_symtab_command(symtab_hdr, host_order);
575 #endif /* !KERNEL */
576
577 /* Verify that the file is big enough for the symbol table */
578 require_action(size >=
579 symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish,
580 rval=KERN_FAILURE;
581 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
582
583 /* Verify that the file is big enough for the string table */
584 require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish,
585 rval=KERN_FAILURE;
586 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
587
588 #if !KERNEL
589 /* Swap the symbol table entries */
590 symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff));
591 if (swap) swap_nlist_64(symtab, symtab_hdr->nsyms, host_order);
592 #endif /* !KERNEL */
593
594 break;
595 default:
596 #if !KERNEL
597 /* Swap the load command */
598 if (swap) swap_load_command(load_hdr, host_order);
599 #endif /* !KERNEL */
600 break;
601 }
602 }
603
604 rval = KERN_SUCCESS;
605
606 finish:
607 return rval;
608 }
609
610 #if !KERNEL
611 /*******************************************************************************
612 *******************************************************************************/
613 void unswap_macho(u_char *file, enum NXByteOrder host_order,
614 enum NXByteOrder target_order)
615 {
616 struct mach_header *hdr = (struct mach_header *) ((void *) file);
617
618 if (!hdr) return;
619
620 if (hdr->magic == MH_MAGIC) {
621 unswap_macho_32(file, host_order, target_order);
622 } else if (hdr->magic == MH_MAGIC_64) {
623 unswap_macho_64(file, host_order, target_order);
624 }
625 }
626
627 /*******************************************************************************
628 *******************************************************************************/
629 static void
630 unswap_macho_32(u_char *file, enum NXByteOrder host_order,
631 enum NXByteOrder target_order)
632 {
633 struct mach_header *mach_hdr = (struct mach_header *) ((void *) file);
634 struct load_command *load_hdr = NULL;
635 struct segment_command *seg_hdr = NULL;
636 struct section *sects = NULL;
637 struct symtab_command *symtab_hdr = NULL;
638 struct nlist *symtab = NULL;
639 u_long offset = 0;
640 u_int cmd = 0;
641 u_int size = 0;
642 u_int i = 0;
643
644 check(file);
645
646 if (target_order == host_order) return;
647
648 offset = sizeof(*mach_hdr);
649 for(i = 0; i < mach_hdr->ncmds; ++i, offset += size) {
650 load_hdr = (struct load_command *) ((void *) (file + offset));
651 cmd = load_hdr->cmd;
652 size = load_hdr->cmdsize;
653
654 switch(cmd) {
655 case LC_SEGMENT:
656 seg_hdr = (struct segment_command *) load_hdr;
657 sects = (struct section *) &seg_hdr[1];
658
659 /* We don't need to unswap relocations because this function is
660 * called when linking is completed (so there are no relocations).
661 */
662
663 swap_section(sects, seg_hdr->nsects, target_order);
664 swap_segment_command(seg_hdr, target_order);
665 break;
666 case LC_SYMTAB:
667 symtab_hdr = (struct symtab_command *) load_hdr;
668 symtab = (struct nlist*) ((void *) (file + symtab_hdr->symoff));
669
670 swap_nlist(symtab, symtab_hdr->nsyms, target_order);
671 swap_symtab_command(symtab_hdr, target_order);
672
673 break;
674 default:
675 swap_load_command(load_hdr, target_order);
676 break;
677 }
678 }
679
680 (void) swap_mach_header(mach_hdr, target_order);
681 }
682
683 /*******************************************************************************
684 *******************************************************************************/
685 static void
686 unswap_macho_64(u_char *file, enum NXByteOrder host_order,
687 enum NXByteOrder target_order)
688 {
689 struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file);
690 struct load_command *load_hdr = NULL;
691 struct segment_command_64 *seg_hdr = NULL;
692 struct section_64 *sects = NULL;
693 struct symtab_command *symtab_hdr = NULL;
694 struct nlist_64 *symtab = NULL;
695 u_long offset = 0;
696 u_int cmd = 0;
697 u_int size = 0;
698 u_int i = 0;
699
700 check(file);
701
702 if (target_order == host_order) return;
703
704 offset = sizeof(*mach_hdr);
705 for(i = 0; i < mach_hdr->ncmds; ++i, offset += size) {
706 load_hdr = (struct load_command *) ((void *) (file + offset));
707 cmd = load_hdr->cmd;
708 size = load_hdr->cmdsize;
709
710 switch(cmd) {
711 case LC_SEGMENT_64:
712 seg_hdr = (struct segment_command_64 *) ((void *) load_hdr);
713 sects = (struct section_64 *) &seg_hdr[1];
714
715 /* We don't need to unswap relocations because this function is
716 * called when linking is completed (so there are no relocations).
717 */
718
719 swap_section_64(sects, seg_hdr->nsects, target_order);
720 swap_segment_command_64(seg_hdr, target_order);
721 break;
722 case LC_SYMTAB:
723 symtab_hdr = (struct symtab_command *) load_hdr;
724 symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff));
725
726 swap_nlist_64(symtab, symtab_hdr->nsyms, target_order);
727 swap_symtab_command(symtab_hdr, target_order);
728
729 break;
730 default:
731 swap_load_command(load_hdr, target_order);
732 break;
733 }
734 }
735
736 (void) swap_mach_header_64(mach_hdr, target_order);
737 }
738 #endif /* !KERNEL */
739
740 /*******************************************************************************
741 *******************************************************************************/
742 kxld_addr_t
743 kxld_align_address(kxld_addr_t address, u_int align)
744 {
745 kxld_addr_t alignment = (1 << align);
746 kxld_addr_t low_bits = 0;
747
748 if (!align) return address;
749
750 low_bits = (address) & (alignment - 1);
751 if (low_bits) {
752 address += (alignment - low_bits);
753 }
754
755 return address;
756 }
757
758 /*******************************************************************************
759 *******************************************************************************/
760 boolean_t
761 kxld_is_32_bit(cpu_type_t cputype)
762 {
763 return !(cputype & CPU_ARCH_ABI64);
764 }
765
766 /*******************************************************************************
767 * Borrowed (and slightly modified) the libc implementation for the kernel
768 * until the kernel has a supported strstr().
769 * Find the first occurrence of find in s.
770 *******************************************************************************/
771 const char *
772 kxld_strstr(const char *s, const char *find)
773 {
774 #if KERNEL
775 char c, sc;
776 size_t len;
777
778 if ((c = *find++) != 0) {
779 len = strlen(find);
780 do {
781 do {
782 if ((sc = *s++) == 0)
783 return (NULL);
784 } while (sc != c);
785 } while (strncmp(s, find, len) != 0);
786 s--;
787 }
788 return s;
789 #else
790 return strstr(s, find);
791 #endif /* KERNEL */
792 }
793
794 /*******************************************************************************
795 *******************************************************************************/
796 void
797 kxld_print_memory_report(void)
798 {
799 #if DEBUG
800 kxld_log(kKxldLogLinking, kKxldLogExplicit, "kxld memory usage report:\n"
801 "\tNumber of allocations: %8lu\n"
802 "\tNumber of frees: %8lu\n"
803 "\tAverage allocation size: %8lu\n"
804 "\tTotal bytes allocated: %8lu\n"
805 "\tTotal bytes freed: %8lu\n"
806 "\tTotal bytes leaked: %8lu",
807 num_allocations, num_frees, bytes_allocated / num_allocations,
808 bytes_allocated, bytes_freed, bytes_allocated - bytes_freed);
809 #endif
810 }
811
812 /*********************************************************************
813 *********************************************************************/
814 #if !KERNEL
815 boolean_t kxld_set_cross_link_page_size(kxld_size_t target_page_size)
816 {
817 // verify radix 2
818 if ((target_page_size != 0) &&
819 ((target_page_size & (target_page_size - 1)) == 0)) {
820
821 s_cross_link_enabled = TRUE;
822 s_cross_link_page_size = target_page_size;
823
824 return TRUE;
825 } else {
826 return FALSE;
827 }
828 }
829 #endif /* !KERNEL */
830
831 /*********************************************************************
832 *********************************************************************/
833 kxld_size_t kxld_get_effective_page_size(void)
834 {
835 #if KERNEL
836 return PAGE_SIZE;
837 #else
838 if (s_cross_link_enabled) {
839 return s_cross_link_page_size;
840 } else {
841 return PAGE_SIZE;
842 }
843 #endif /* KERNEL */
844 }
845
846 /*********************************************************************
847 *********************************************************************/
848 kxld_addr_t kxld_round_page_cross_safe(kxld_addr_t offset)
849 {
850 #if KERNEL
851 return round_page(offset);
852 #else
853 // assume s_cross_link_page_size is power of 2
854 if (s_cross_link_enabled) {
855 return (offset + (s_cross_link_page_size - 1)) &
856 (~(s_cross_link_page_size - 1));
857 } else {
858 return round_page(offset);
859 }
860 #endif /* KERNEL */
861 }
862
863 #if SPLIT_KEXTS_DEBUG
864
865 void kxld_show_split_info(splitKextLinkInfo *info)
866 {
867 kxld_log(kKxldLogLinking, kKxldLogErr,
868 "splitKextLinkInfo: \n"
869 "kextExecutable %p to %p kextSize %lu \n"
870 "linkedKext %p to %p linkedKextSize %lu \n"
871 "vmaddr_TEXT %p vmaddr_TEXT_EXEC %p "
872 "vmaddr_DATA %p vmaddr_DATA_CONST %p vmaddr_LINKEDIT %p",
873 (void *) info->kextExecutable,
874 (void *) (info->kextExecutable + info->kextSize),
875 info->kextSize,
876 (void*) info->linkedKext,
877 (void*) (info->linkedKext + info->linkedKextSize),
878 info->linkedKextSize,
879 (void *) info->vmaddr_TEXT,
880 (void *) info->vmaddr_TEXT_EXEC,
881 (void *) info->vmaddr_DATA,
882 (void *) info->vmaddr_DATA_CONST,
883 (void *) info->vmaddr_LINKEDIT);
884 }
885
886 boolean_t isTargetKextName(const char * the_name)
887 {
888 if (the_name && 0 == strcmp(the_name, KXLD_TARGET_KEXT)) {
889 return(TRUE);
890 }
891 return(FALSE);
892 }
893 #endif
894