]> git.saurik.com Git - apple/xnu.git/blob - libkern/kxld/kxld_util.c
xnu-1504.9.17.tar.gz
[apple/xnu.git] / libkern / kxld / kxld_util.c
1 /*
2 * Copyright (c) 2007-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <stdarg.h>
29 #include <string.h>
30 #include <mach-o/loader.h>
31 #include <mach-o/nlist.h>
32 #include <mach-o/reloc.h>
33 #if KERNEL
34 #include <kern/kalloc.h>
35 #include <libkern/libkern.h>
36 #include <mach/vm_param.h>
37 #include <vm/vm_kern.h>
38 #else
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <mach/mach_init.h>
42 #include <mach-o/swap.h>
43 #endif
44
45 #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
46 #include <AssertMacros.h>
47
48 #include "kxld_util.h"
49
50 #if !KERNEL
51 static void unswap_macho_32(u_char *file, enum NXByteOrder host_order,
52 enum NXByteOrder target_order);
53 static void unswap_macho_64(u_char *file, enum NXByteOrder host_order,
54 enum NXByteOrder target_order);
55 #endif /* !KERNEL */
56
57 #if DEBUG
58 static unsigned long num_allocations = 0;
59 static unsigned long num_frees = 0;
60 static unsigned long bytes_allocated = 0;
61 static unsigned long bytes_freed = 0;
62 #endif
63
64 static KXLDLoggingCallback s_logging_callback = NULL;
65 static const char *s_callback_name = NULL;
66 static void *s_callback_data = NULL;
67
68 /*******************************************************************************
69 *******************************************************************************/
70 void
71 kxld_set_logging_callback(KXLDLoggingCallback logging_callback)
72 {
73 s_logging_callback = logging_callback;
74 }
75
76 /*******************************************************************************
77 *******************************************************************************/
78 void
79 kxld_set_logging_callback_data(const char *name, void *user_data)
80 {
81 s_callback_name = name;
82 s_callback_data = user_data;
83 }
84
85 /*******************************************************************************
86 *******************************************************************************/
87 void
88 kxld_log(KXLDLogSubsystem subsystem, KXLDLogLevel level,
89 const char *in_format, ...)
90 {
91 char stack_buffer[256];
92 char *alloc_buffer = NULL;
93 char *format = stack_buffer;
94 const char *name = (s_callback_name) ? s_callback_name : "internal";
95 u_int length = 0;
96 va_list ap;
97
98 if (s_logging_callback) {
99
100 length = snprintf(stack_buffer, sizeof(stack_buffer), "kxld[%s]: %s",
101 name, in_format);
102
103 if (length >= sizeof(stack_buffer)) {
104 length += 1;
105 alloc_buffer = kxld_alloc(length);
106 if (!alloc_buffer) return;
107
108 snprintf(alloc_buffer, sizeof(alloc_buffer), "kxld[%s]: %s",
109 name, format);
110 format = alloc_buffer;
111 }
112
113 va_start(ap, in_format);
114 s_logging_callback(subsystem, level, format, ap, s_callback_data);
115 va_end(ap);
116
117 if (alloc_buffer) {
118 kxld_free(alloc_buffer, length);
119 }
120 }
121 }
122
123 /* We'll use kalloc for any page-based allocations under this threshold, and
124 * kmem_alloc otherwise.
125 */
126 #define KALLOC_MAX 16 * 1024
127
128 /*******************************************************************************
129 *******************************************************************************/
130 void *
131 kxld_alloc(size_t size)
132 {
133 void * ptr = NULL;
134
135 #if KERNEL
136 ptr = kalloc(size);
137 #else
138 ptr = malloc(size);
139 #endif
140
141 #if DEBUG
142 if (ptr) {
143 ++num_allocations;
144 bytes_allocated += size;
145 }
146 #endif
147
148 return ptr;
149 }
150
151 /*******************************************************************************
152 *******************************************************************************/
153 void *
154 kxld_page_alloc_untracked(size_t size)
155 {
156 void * ptr = NULL;
157 #if KERNEL
158 kern_return_t rval = 0;
159 vm_offset_t addr = 0;
160 #endif /* KERNEL */
161
162 size = round_page(size);
163
164 #if KERNEL
165 if (size < KALLOC_MAX) {
166 ptr = kalloc(size);
167 } else {
168 rval = kmem_alloc(kernel_map, &addr, size);
169 if (!rval) ptr = (void *) addr;
170 }
171 #else /* !KERNEL */
172 ptr = malloc(size);
173 #endif /* KERNEL */
174
175 return ptr;
176 }
177
178 /*******************************************************************************
179 *******************************************************************************/
180 void *
181 kxld_page_alloc(size_t size)
182 {
183 void * ptr = NULL;
184
185 ptr = kxld_page_alloc_untracked(size);
186 #if DEBUG
187 if (ptr) {
188 ++num_allocations;
189 bytes_allocated += round_page(size);
190 }
191 #endif /* DEBUG */
192
193 return ptr;
194 }
195
196 /*******************************************************************************
197 *******************************************************************************/
198 void *
199 kxld_alloc_pageable(size_t size)
200 {
201 size = round_page(size);
202
203 #if KERNEL
204 kern_return_t rval = 0;
205 vm_offset_t ptr = 0;
206
207 rval = kmem_alloc_pageable(kernel_map, &ptr, size);
208 if (rval) ptr = 0;
209
210 return (void *) ptr;
211 #else
212 return kxld_page_alloc_untracked(size);
213 #endif
214 }
215
216 /*******************************************************************************
217 *******************************************************************************/
218 void
219 kxld_free(void *ptr, size_t size __unused)
220 {
221 #if DEBUG
222 ++num_frees;
223 bytes_freed += size;
224 #endif
225
226 #if KERNEL
227 kfree(ptr, size);
228 #else
229 free(ptr);
230 #endif
231 }
232
233 /*******************************************************************************
234 *******************************************************************************/
235 void
236 kxld_page_free_untracked(void *ptr, size_t size __unused)
237 {
238 #if KERNEL
239 size = round_page(size);
240
241 if (size < KALLOC_MAX) {
242 kfree(ptr, size);
243 } else {
244 kmem_free(kernel_map, (vm_offset_t) ptr, size);
245 }
246 #else /* !KERNEL */
247 free(ptr);
248 #endif /* KERNEL */
249 }
250
251
252 /*******************************************************************************
253 *******************************************************************************/
254 void
255 kxld_page_free(void *ptr, size_t size)
256 {
257 #if DEBUG
258 ++num_frees;
259 bytes_freed += round_page(size);
260 #endif /* DEBUG */
261 kxld_page_free_untracked(ptr, size);
262 }
263
264 /*******************************************************************************
265 *******************************************************************************/
266 kern_return_t
267 validate_and_swap_macho_32(u_char *file, u_long size
268 #if !KERNEL
269 , enum NXByteOrder host_order
270 #endif /* !KERNEL */
271 )
272 {
273 kern_return_t rval = KERN_FAILURE;
274 struct mach_header *mach_hdr = (struct mach_header *) file;
275 struct load_command *load_hdr = NULL;
276 struct segment_command *seg_hdr = NULL;
277 struct section *sects = NULL;
278 struct relocation_info *relocs = NULL;
279 struct symtab_command *symtab_hdr = NULL;
280 struct nlist *symtab = NULL;
281 u_long offset = 0;
282 u_int cmd = 0;
283 u_int cmdsize = 0;
284 u_int i = 0;
285 u_int j = 0;
286 #if !KERNEL
287 boolean_t swap = FALSE;
288 #endif /* !KERNEL */
289
290 check(file);
291 check(size);
292
293 /* Verify that the file is big enough for the mach header */
294 require_action(size >= sizeof(*mach_hdr), finish,
295 rval=KERN_FAILURE;
296 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
297 offset = sizeof(*mach_hdr);
298
299 #if !KERNEL
300 /* Swap the mach header if necessary */
301 if (mach_hdr->magic == MH_CIGAM) {
302 swap = TRUE;
303 (void) swap_mach_header(mach_hdr, host_order);
304 }
305 #endif /* !KERNEL */
306
307 /* Validate the mach_header's magic number */
308 require_action(mach_hdr->magic == MH_MAGIC, finish,
309 rval=KERN_FAILURE;
310 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
311 "Invalid magic number: 0x%x.", mach_hdr->magic));
312
313 /* Validate and potentially swap the load commands */
314 for(i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) {
315
316 /* Get the load command and size */
317 load_hdr = (struct load_command *) (file + offset);
318 cmd = load_hdr->cmd;
319 cmdsize = load_hdr->cmdsize;
320
321 #if !KERNEL
322 if (swap) {
323 cmd = OSSwapInt32(load_hdr->cmd);
324 cmdsize = OSSwapInt32(load_hdr->cmdsize);
325 }
326 #endif /* !KERNEL */
327
328 /* Verify that the file is big enough to contain the load command */
329 require_action(size >= offset + cmdsize, finish,
330 rval=KERN_FAILURE;
331 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
332
333 switch(cmd) {
334 case LC_SEGMENT:
335 /* Get and swap the segment header */
336 seg_hdr = (struct segment_command *) load_hdr;
337 #if !KERNEL
338 if (swap) swap_segment_command(seg_hdr, host_order);
339 #endif /* !KERNEL */
340
341 /* Get and swap the section headers */
342 sects = (struct section *) &seg_hdr[1];
343 #if !KERNEL
344 if (swap) swap_section(sects, seg_hdr->nsects, host_order);
345 #endif /* !KERNEL */
346
347 /* Ignore segments with no vm size */
348 if (!seg_hdr->vmsize) continue;
349
350 /* Verify that the file is big enough for the segment data. */
351 require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish,
352 rval=KERN_FAILURE;
353 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
354
355 for (j = 0; j < seg_hdr->nsects; ++j) {
356
357 /* Verify that, if the section is not to be zero filled on
358 * demand, that file is big enough for the section's data.
359 */
360 require_action((sects[j].flags & S_ZEROFILL) ||
361 (size >= sects[j].offset + sects[j].size), finish,
362 rval=KERN_FAILURE;
363 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
364
365 /* Verify that the file is big enough for the section's
366 * relocation entries.
367 */
368 require_action(size >=
369 sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish,
370 rval=KERN_FAILURE;
371 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
372
373 /* Swap the relocation entries */
374 relocs = (struct relocation_info *) (file + sects[j].reloff);
375 #if !KERNEL
376 if (swap) {
377 swap_relocation_info(relocs, sects[j].nreloc,
378 host_order);
379 }
380 #endif /* !KERNEL */
381 }
382
383 break;
384 case LC_SYMTAB:
385 /* Get and swap the symtab header */
386 symtab_hdr = (struct symtab_command *) load_hdr;
387 #if !KERNEL
388 if (swap) swap_symtab_command(symtab_hdr, host_order);
389 #endif /* !KERNEL */
390
391 /* Verify that the file is big enough for the symbol table */
392 require_action(size >=
393 symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish,
394 rval=KERN_FAILURE;
395 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
396
397 /* Verify that the file is big enough for the string table */
398 require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish,
399 rval=KERN_FAILURE;
400 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
401
402 #if !KERNEL
403 /* Swap the symbol table entries */
404 symtab = (struct nlist *) (file + symtab_hdr->symoff);
405 if (swap) swap_nlist(symtab, symtab_hdr->nsyms, host_order);
406 #endif /* !KERNEL */
407
408 break;
409 default:
410 #if !KERNEL
411 /* Swap the load command */
412 if (swap) swap_load_command(load_hdr, host_order);
413 #endif /* !KERNEL */
414 break;
415 }
416 }
417
418 rval = KERN_SUCCESS;
419
420 finish:
421 return rval;
422 }
423
424 /*******************************************************************************
425 *******************************************************************************/
426 kern_return_t
427 validate_and_swap_macho_64(u_char *file, u_long size
428 #if !KERNEL
429 , enum NXByteOrder host_order
430 #endif /* !KERNEL */
431 )
432 {
433 kern_return_t rval = KERN_FAILURE;
434 struct mach_header_64 *mach_hdr = (struct mach_header_64 *) file;
435 struct load_command *load_hdr = NULL;
436 struct segment_command_64 *seg_hdr = NULL;
437 struct section_64 *sects = NULL;
438 struct relocation_info *relocs = NULL;
439 struct symtab_command *symtab_hdr = NULL;
440 struct nlist_64 *symtab = NULL;
441 u_long offset = 0;
442 u_int cmd = 0;
443 u_int cmdsize = 0;
444 u_int i = 0;
445 u_int j = 0;
446 #if !KERNEL
447 boolean_t swap = FALSE;
448 #endif /* !KERNEL */
449
450 check(file);
451 check(size);
452
453 /* Verify that the file is big enough for the mach header */
454 require_action(size >= sizeof(*mach_hdr), finish,
455 rval=KERN_FAILURE;
456 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
457 offset = sizeof(*mach_hdr);
458
459 #if !KERNEL
460 /* Swap the mach header if necessary */
461 if (mach_hdr->magic == MH_CIGAM_64) {
462 swap = TRUE;
463 (void) swap_mach_header_64(mach_hdr, host_order);
464 }
465 #endif /* !KERNEL */
466
467 /* Validate the mach_header's magic number */
468 require_action(mach_hdr->magic == MH_MAGIC_64, finish,
469 rval=KERN_FAILURE;
470 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO
471 "Invalid magic number: 0x%x.", mach_hdr->magic));
472
473 /* Validate and potentially swap the load commands */
474 for(i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) {
475 /* Get the load command and size */
476 load_hdr = (struct load_command *) (file + offset);
477 cmd = load_hdr->cmd;
478 cmdsize = load_hdr->cmdsize;
479
480 #if !KERNEL
481 if (swap) {
482 cmd = OSSwapInt32(load_hdr->cmd);
483 cmdsize = OSSwapInt32(load_hdr->cmdsize);
484 }
485 #endif /* !KERNEL */
486
487 /* Verify that the file is big enough to contain the load command */
488 require_action(size >= offset + cmdsize, finish,
489 rval=KERN_FAILURE;
490 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
491 switch(cmd) {
492 case LC_SEGMENT_64:
493 /* Get and swap the segment header */
494 seg_hdr = (struct segment_command_64 *) load_hdr;
495 #if !KERNEL
496 if (swap) swap_segment_command_64(seg_hdr, host_order);
497 #endif /* !KERNEL */
498
499 /* Get and swap the section headers */
500 sects = (struct section_64 *) &seg_hdr[1];
501 #if !KERNEL
502 if (swap) swap_section_64(sects, seg_hdr->nsects, host_order);
503 #endif /* !KERNEL */
504
505 /* If the segment has no vm footprint, skip it */
506 if (!seg_hdr->vmsize) continue;
507
508 /* Verify that the file is big enough for the segment data. */
509 require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish,
510 rval=KERN_FAILURE;
511 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
512
513 for (j = 0; j < seg_hdr->nsects; ++j) {
514
515 /* Verify that, if the section is not to be zero filled on
516 * demand, that file is big enough for the section's data.
517 */
518 require_action((sects[j].flags & S_ZEROFILL) ||
519 (size >= sects[j].offset + sects[j].size), finish,
520 rval=KERN_FAILURE;
521 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
522
523 /* Verify that the file is big enough for the section's
524 * relocation entries.
525 */
526 require_action(size >=
527 sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish,
528 rval=KERN_FAILURE;
529 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
530
531 /* Swap the relocation entries */
532 relocs = (struct relocation_info *) (file + sects[j].reloff);
533 #if !KERNEL
534 if (swap) {
535 swap_relocation_info(relocs, sects[j].nreloc,
536 host_order);
537 }
538 #endif /* !KERNEL */
539 }
540
541 break;
542 case LC_SYMTAB:
543 /* Get and swap the symtab header */
544 symtab_hdr = (struct symtab_command *) load_hdr;
545 #if !KERNEL
546 if (swap) swap_symtab_command(symtab_hdr, host_order);
547 #endif /* !KERNEL */
548
549 /* Verify that the file is big enough for the symbol table */
550 require_action(size >=
551 symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish,
552 rval=KERN_FAILURE;
553 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
554
555 /* Verify that the file is big enough for the string table */
556 require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish,
557 rval=KERN_FAILURE;
558 kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO));
559
560 #if !KERNEL
561 /* Swap the symbol table entries */
562 symtab = (struct nlist_64 *) (file + symtab_hdr->symoff);
563 if (swap) swap_nlist_64(symtab, symtab_hdr->nsyms, host_order);
564 #endif /* !KERNEL */
565
566 break;
567 default:
568 #if !KERNEL
569 /* Swap the load command */
570 if (swap) swap_load_command(load_hdr, host_order);
571 #endif /* !KERNEL */
572 break;
573 }
574 }
575
576 rval = KERN_SUCCESS;
577
578 finish:
579 return rval;
580 }
581
582 #if !KERNEL
583 /*******************************************************************************
584 *******************************************************************************/
585 void unswap_macho(u_char *file, enum NXByteOrder host_order,
586 enum NXByteOrder target_order)
587 {
588 struct mach_header *hdr = (struct mach_header *) file;
589
590 if (!hdr) return;
591
592 if (hdr->magic == MH_MAGIC) {
593 unswap_macho_32(file, host_order, target_order);
594 } else if (hdr->magic == MH_MAGIC_64) {
595 unswap_macho_64(file, host_order, target_order);
596 }
597 }
598
599 /*******************************************************************************
600 *******************************************************************************/
601 static void
602 unswap_macho_32(u_char *file, enum NXByteOrder host_order,
603 enum NXByteOrder target_order)
604 {
605 struct mach_header *mach_hdr = (struct mach_header *) file;
606 struct load_command *load_hdr = NULL;
607 struct segment_command *seg_hdr = NULL;
608 struct section *sects = NULL;
609 struct symtab_command *symtab_hdr = NULL;
610 struct nlist *symtab = NULL;
611 u_long offset = 0;
612 u_int cmd = 0;
613 u_int size = 0;
614 u_int i = 0;
615
616 check(file);
617
618 if (target_order == host_order) return;
619
620 offset = sizeof(*mach_hdr);
621 for(i = 0; i < mach_hdr->ncmds; ++i, offset += size) {
622 load_hdr = (struct load_command *) (file + offset);
623 cmd = load_hdr->cmd;
624 size = load_hdr->cmdsize;
625
626 switch(cmd) {
627 case LC_SEGMENT:
628 seg_hdr = (struct segment_command *) load_hdr;
629 sects = (struct section *) &seg_hdr[1];
630
631 /* We don't need to unswap relocations because this function is
632 * called when linking is completed (so there are no relocations).
633 */
634
635 swap_section(sects, seg_hdr->nsects, target_order);
636 swap_segment_command(seg_hdr, target_order);
637 break;
638 case LC_SYMTAB:
639 symtab_hdr = (struct symtab_command *) load_hdr;
640 symtab = (struct nlist*) (file + symtab_hdr->symoff);
641
642 swap_nlist(symtab, symtab_hdr->nsyms, target_order);
643 swap_symtab_command(symtab_hdr, target_order);
644
645 break;
646 default:
647 swap_load_command(load_hdr, target_order);
648 break;
649 }
650 }
651
652 (void) swap_mach_header(mach_hdr, target_order);
653 }
654
655 /*******************************************************************************
656 *******************************************************************************/
657 static void
658 unswap_macho_64(u_char *file, enum NXByteOrder host_order,
659 enum NXByteOrder target_order)
660 {
661 struct mach_header_64 *mach_hdr = (struct mach_header_64 *) file;
662 struct load_command *load_hdr = NULL;
663 struct segment_command_64 *seg_hdr = NULL;
664 struct section_64 *sects = NULL;
665 struct symtab_command *symtab_hdr = NULL;
666 struct nlist_64 *symtab = NULL;
667 u_long offset = 0;
668 u_int cmd = 0;
669 u_int size = 0;
670 u_int i = 0;
671
672 check(file);
673
674 if (target_order == host_order) return;
675
676 offset = sizeof(*mach_hdr);
677 for(i = 0; i < mach_hdr->ncmds; ++i, offset += size) {
678 load_hdr = (struct load_command *) (file + offset);
679 cmd = load_hdr->cmd;
680 size = load_hdr->cmdsize;
681
682 switch(cmd) {
683 case LC_SEGMENT_64:
684 seg_hdr = (struct segment_command_64 *) load_hdr;
685 sects = (struct section_64 *) &seg_hdr[1];
686
687 /* We don't need to unswap relocations because this function is
688 * called when linking is completed (so there are no relocations).
689 */
690
691 swap_section_64(sects, seg_hdr->nsects, target_order);
692 swap_segment_command_64(seg_hdr, target_order);
693 break;
694 case LC_SYMTAB:
695 symtab_hdr = (struct symtab_command *) load_hdr;
696 symtab = (struct nlist_64 *) (file + symtab_hdr->symoff);
697
698 swap_nlist_64(symtab, symtab_hdr->nsyms, target_order);
699 swap_symtab_command(symtab_hdr, target_order);
700
701 break;
702 default:
703 swap_load_command(load_hdr, target_order);
704 break;
705 }
706 }
707
708 (void) swap_mach_header_64(mach_hdr, target_order);
709 }
710 #endif /* !KERNEL */
711
712 /*******************************************************************************
713 *******************************************************************************/
714 kxld_addr_t
715 kxld_align_address(kxld_addr_t address, u_int align)
716 {
717 kxld_addr_t alignment = (1 << align);
718 kxld_addr_t low_bits = 0;
719
720 low_bits = (address) & (alignment - 1);
721 if (low_bits) {
722 address += (alignment - low_bits);
723 }
724
725 return address;
726 }
727
728 /*******************************************************************************
729 *******************************************************************************/
730 boolean_t
731 kxld_is_32_bit(cpu_type_t cputype)
732 {
733 return !(cputype & CPU_ARCH_ABI64);
734 }
735
736 /*******************************************************************************
737 * Borrowed (and slightly modified) the libc implementation for the kernel
738 * until the kernel has a supported strstr().
739 * Find the first occurrence of find in s.
740 *******************************************************************************/
741 const char *
742 kxld_strstr(s, find)
743 const char *s, *find;
744 {
745 #if KERNEL
746 char c, sc;
747 size_t len;
748
749 if ((c = *find++) != 0) {
750 len = strlen(find);
751 do {
752 do {
753 if ((sc = *s++) == 0)
754 return (NULL);
755 } while (sc != c);
756 } while (strncmp(s, find, len) != 0);
757 s--;
758 }
759 return s;
760 #else
761 return strstr(s, find);
762 #endif /* KERNEL */
763 }
764
765 /*******************************************************************************
766 *******************************************************************************/
767 void
768 kxld_print_memory_report(void)
769 {
770 #if DEBUG
771 kxld_log(kKxldLogLinking, kKxldLogExplicit, "kxld memory usage report:\n"
772 "\tNumber of allocations: %8lu\n"
773 "\tNumber of frees: %8lu\n"
774 "\tAverage allocation size: %8lu\n"
775 "\tTotal bytes allocated: %8lu\n"
776 "\tTotal bytes freed: %8lu\n"
777 "\tTotal bytes leaked: %8lu",
778 num_allocations, num_frees, bytes_allocated / num_allocations,
779 bytes_allocated, bytes_freed, bytes_allocated - bytes_freed);
780 #endif
781 }
782