]> git.saurik.com Git - apple/xnu.git/blob - libkern/kxld/kxld_seg.c
ca3d2fb4faefcc1de2cb10b28ebda0bf42e1999c
[apple/xnu.git] / libkern / kxld / kxld_seg.c
1 /*
2 * Copyright (c) 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <string.h>
29 #include <mach/vm_prot.h>
30 #include <mach-o/loader.h>
31 #include <sys/types.h>
32
33 #if KERNEL
34 #include <mach/vm_param.h>
35 #else
36 #include <mach/mach_init.h>
37 #endif /* KERNEL */
38
39 #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
40 #include <AssertMacros.h>
41
42 #include "kxld_reloc.h"
43 #include "kxld_sect.h"
44 #include "kxld_seg.h"
45 #include "kxld_symtab.h"
46 #include "kxld_util.h"
47
48 #define MAX_SEGS 20
49
50 #define TEXT_SEG_PROT (VM_PROT_READ | VM_PROT_EXECUTE)
51 #define DATA_SEG_PROT (VM_PROT_READ | VM_PROT_WRITE)
52
53 #if KXLD_USER_OR_OBJECT
54 static kern_return_t reorder_sections(KXLDSeg *seg, KXLDArray *section_order);
55 static void reorder_section(KXLDArray *sects, u_int *sect_reorder_index,
56 KXLDSect **reorder_buffer, u_int reorder_buffer_index);
57 #endif /* KXLD_USER_OR_OBJECT */
58
59 #if 0
60 static KXLDSeg * get_segment_by_name(KXLDArray *segarray, const char *name);
61 #endif
62
63 #if KXLD_USER_OR_ILP32
64 static kern_return_t seg_export_macho_header_32(const KXLDSeg *seg, u_char *buf,
65 u_long *header_offset, u_long header_size, u_long data_offset);
66 #endif
67 #if KXLD_USER_OR_LP64
68 static kern_return_t seg_export_macho_header_64(const KXLDSeg *seg, u_char *buf,
69 u_long *header_offset, u_long header_size, u_long data_offset);
70 #endif
71
72 static KXLDSect * get_sect_by_index(const KXLDSeg *seg, u_int idx);
73
74 #if KXLD_USER_OR_ILP32
75 /*******************************************************************************
76 *******************************************************************************/
77 kern_return_t
78 kxld_seg_init_from_macho_32(KXLDSeg *seg, struct segment_command *src)
79 {
80 kern_return_t rval = KERN_FAILURE;
81 check(seg);
82 check(src);
83
84 strlcpy(seg->segname, src->segname, sizeof(seg->segname));
85 seg->base_addr = src->vmaddr;
86 seg->link_addr = src->vmaddr;
87 seg->vmsize = src->vmsize;
88 seg->fileoff = src->fileoff;
89 seg->maxprot = src->maxprot;
90 seg->initprot = src->initprot;
91 seg->flags = src->flags;
92
93 rval = kxld_array_init(&seg->sects, sizeof(KXLDSect *), src->nsects);
94 require_noerr(rval, finish);
95
96 rval = KERN_SUCCESS;
97
98 finish:
99 return rval;
100 }
101 #endif /* KXLD_USER_OR_ILP32 */
102
103 #if KXLD_USER_OR_LP64
104 /*******************************************************************************
105 *******************************************************************************/
106 kern_return_t
107 kxld_seg_init_from_macho_64(KXLDSeg *seg, struct segment_command_64 *src)
108 {
109 kern_return_t rval = KERN_FAILURE;
110 check(seg);
111 check(src);
112
113 strlcpy(seg->segname, src->segname, sizeof(seg->segname));
114 seg->base_addr = src->vmaddr;
115 seg->link_addr = src->vmaddr;
116 seg->vmsize = src->vmsize;
117 seg->fileoff = src->fileoff;
118 seg->maxprot = src->maxprot;
119 seg->initprot = src->initprot;
120 seg->flags = src->flags;
121
122 rval = kxld_array_init(&seg->sects, sizeof(KXLDSect *), src->nsects);
123 require_noerr(rval, finish);
124
125 rval = KERN_SUCCESS;
126
127 finish:
128 return rval;
129 }
130 #endif /* KXLD_USER_OR_LP64 */
131
132 #if KXLD_USER_OR_OBJECT
133 /*******************************************************************************
134 *******************************************************************************/
135 kern_return_t
136 kxld_seg_create_seg_from_sections(KXLDArray *segarray, KXLDArray *sectarray)
137 {
138 kern_return_t rval = KERN_FAILURE;
139 KXLDSeg *seg = NULL;
140 KXLDSect *sect = NULL;
141 KXLDSect **sectp = NULL;
142 u_int i = 0;
143
144 /* Initialize the segment array to one segment */
145
146 rval = kxld_array_init(segarray, sizeof(KXLDSeg), 1);
147 require_noerr(rval, finish);
148
149 /* Initialize the segment */
150
151 seg = kxld_array_get_item(segarray, 0);
152 seg->initprot = VM_PROT_ALL;
153 seg->maxprot = VM_PROT_ALL;
154 seg->link_addr = 0;
155
156 /* Add the sections to the segment */
157
158 rval = kxld_array_init(&seg->sects, sizeof(KXLDSect *), sectarray->nitems);
159 require_noerr(rval, finish);
160
161 for (i = 0; i < sectarray->nitems; ++i) {
162 sect = kxld_array_get_item(sectarray, i);
163 sectp = kxld_array_get_item(&seg->sects, i);
164
165 *sectp = sect;
166 }
167
168 rval = KERN_SUCCESS;
169 finish:
170 return rval;
171 }
172
173 /*******************************************************************************
174 *******************************************************************************/
175 kern_return_t
176 kxld_seg_finalize_object_segment(KXLDArray *segarray, KXLDArray *section_order,
177 u_long hdrsize)
178 {
179 kern_return_t rval = KERN_FAILURE;
180 KXLDSeg *seg = NULL;
181 KXLDSect *sect = NULL;
182 u_long sect_offset = 0;
183 u_int i = 0;
184
185 check(segarray);
186 check(section_order);
187 require_action(segarray->nitems == 1, finish, rval=KERN_FAILURE);
188
189 seg = kxld_array_get_item(segarray, 0);
190
191 /* Reorder the sections */
192
193 rval = reorder_sections(seg, section_order);
194 require_noerr(rval, finish);
195
196 /* Set the initial link address at the end of the header pages */
197
198 seg->link_addr = round_page(hdrsize);
199
200 /* Fix up all of the section addresses */
201
202 sect_offset = (u_long) seg->link_addr;
203 for (i = 0; i < seg->sects.nitems; ++i) {
204 sect = *(KXLDSect **)kxld_array_get_item(&seg->sects, i);
205
206 sect->link_addr = kxld_sect_align_address(sect, sect_offset);
207 sect_offset = (u_long) (sect->link_addr + sect->size);
208 }
209
210 /* Finish initializing the segment */
211
212 seg->vmsize = round_page(sect_offset) - seg->link_addr;
213
214 rval = KERN_SUCCESS;
215 finish:
216 return rval;
217 }
218
219 /*******************************************************************************
220 * The legacy section ordering used by kld was based of the order of sections
221 * in the kernel file. To achieve the same layout, we save the kernel's
222 * section ordering as an array of section names when the kernel file itself
223 * is linked. Then, when kexts are linked with the KXLD_LEGACY_LAYOUT flag,
224 * we refer to the kernel's section layout to order the kext's sections.
225 *
226 * The algorithm below is as follows. We iterate through all of the kernel's
227 * sections grouped by segment name, so that we are processing all of the __TEXT
228 * sections, then all of the __DATA sections, etc. We then iterate through the
229 * kext's sections with a similar grouping, looking for sections that match
230 * the current kernel's section. In this way, we order all of the matching
231 * kext sections in the order in which they appear in the kernel, and then place
232 * all remaining kext sections at the end of the current segment grouping in
233 * the order in which they originally appeared. Sections that only appear in
234 * the kernel are not created. segments that only appear in the kext are
235 * left in their original ordering.
236 *
237 * An example:
238 *
239 * Kernel sections:
240 * __TEXT,__text
241 * __TEXT,__initcode
242 * __TEXT,__const
243 * __DATA,__data
244 *
245 * Kext sections:
246 * __TEXT,__const
247 * __TEXT,__literal4
248 * __TEXT,__text
249 * __DATA,__const
250 * __DATA,__data
251 *
252 * Reordered kext sections:
253 * __TEXT,__text
254 * __TEXT,__const
255 * __TEXT,__literal4
256 * __DATA,__data
257 * __DATA,__const
258 *
259 * In the implementation below, we use a reorder buffer to hold pointers to the
260 * sections of the current working segment. We scan this buffer looking for
261 * matching sections, placing them in the segment's section index as we find them.
262 * If this function must exit early, the segment's section index is left in an
263 * unusable state.
264 *******************************************************************************/
265 static kern_return_t
266 reorder_sections(KXLDSeg *seg, KXLDArray *section_order)
267 {
268 kern_return_t rval = KERN_FAILURE;
269 KXLDSect *sect = NULL;
270 KXLDSect **reorder_buffer = NULL;
271 KXLDSectionName *section_name = NULL;
272 const char *segname = NULL;
273 u_int sect_index = 0, legacy_index = 0, sect_reorder_index = 0;
274 u_int i = 0, j = 0;
275 u_int sect_start = 0, sect_end = 0, legacy_start = 0, legacy_end = 0;
276 u_int nsects = 0;
277
278 check(seg);
279 check(section_order);
280
281 /* Allocate the reorder buffer with enough space to hold all of the
282 * sections.
283 */
284
285 reorder_buffer = kxld_alloc(
286 seg->sects.nitems * sizeof(*reorder_buffer));
287 require_action(reorder_buffer, finish, rval=KERN_RESOURCE_SHORTAGE);
288
289 while (legacy_index < section_order->nitems) {
290
291 /* Find the next group of sections with a common segment in the
292 * section_order array.
293 */
294
295 legacy_start = legacy_index++;
296 legacy_end = legacy_index;
297
298 section_name = kxld_array_get_item(section_order, legacy_start);
299 segname = section_name->segname;
300 while (legacy_index < section_order->nitems) {
301 section_name = kxld_array_get_item(section_order, legacy_index);
302 if (!streq_safe(segname, section_name->segname,
303 sizeof(section_name->segname)))
304 {
305 break;
306 }
307
308 ++legacy_index;
309 ++legacy_end;
310 }
311
312 /* Find a group of sections in the kext that match the current
313 * section_order segment.
314 */
315
316 sect_start = sect_index;
317 sect_end = sect_index;
318
319 while (sect_index < seg->sects.nitems) {
320 sect = *(KXLDSect **) kxld_array_get_item(&seg->sects, sect_index);
321 if (!streq_safe(segname, sect->segname, sizeof(sect->segname))) {
322 break;
323 }
324
325 ++sect_index;
326 ++sect_end;
327 }
328 nsects = sect_end - sect_start;
329
330 if (!nsects) continue;
331
332 /* Populate the reorder buffer with the current group of kext sections */
333
334 for (i = sect_start; i < sect_end; ++i) {
335 reorder_buffer[i - sect_start] =
336 *(KXLDSect **) kxld_array_get_item(&seg->sects, i);
337 }
338
339 /* For each section_order section, scan the reorder buffer for a matching
340 * kext section. If one is found, copy it into the next slot in the
341 * segment's section index.
342 */
343
344 sect_reorder_index = sect_start;
345 for (i = legacy_start; i < legacy_end; ++i) {
346 section_name = kxld_array_get_item(section_order, i);
347 sect = NULL;
348
349 for (j = 0; j < nsects; ++j) {
350 sect = reorder_buffer[j];
351 if (!sect) continue;
352
353 if (streq_safe(section_name->sectname, sect->sectname,
354 sizeof(section_name->sectname)))
355 {
356 break;
357 }
358
359 sect = NULL;
360 }
361
362 if (sect) {
363 (void) reorder_section(&seg->sects, &sect_reorder_index,
364 reorder_buffer, j);
365 }
366 }
367
368 /* If any sections remain in the reorder buffer, they are not specified
369 * in the section_order array, so append them to the section index in
370 * in the order they are found.
371 */
372
373 for (i = 0; i < nsects; ++i) {
374 if (!reorder_buffer[i]) continue;
375 reorder_section(&seg->sects, &sect_reorder_index, reorder_buffer, i);
376 }
377 }
378
379 rval = KERN_SUCCESS;
380
381 finish:
382
383 if (reorder_buffer) {
384 kxld_free(reorder_buffer, seg->sects.nitems * sizeof(*reorder_buffer));
385 reorder_buffer = NULL;
386 }
387
388 return rval;
389 }
390
391 /*******************************************************************************
392 *******************************************************************************/
393 static void
394 reorder_section(KXLDArray *sects, u_int *sect_reorder_index,
395 KXLDSect **reorder_buffer, u_int reorder_buffer_index)
396 {
397 KXLDSect **tmp = NULL;
398
399 tmp = kxld_array_get_item(sects, *sect_reorder_index);
400
401 *tmp = reorder_buffer[reorder_buffer_index];
402 reorder_buffer[reorder_buffer_index]->sectnum = *sect_reorder_index;
403 reorder_buffer[reorder_buffer_index] = NULL;
404
405 ++(*sect_reorder_index);
406 }
407
408 /*******************************************************************************
409 *******************************************************************************/
410 kern_return_t
411 kxld_seg_init_linkedit(KXLDArray *segs)
412 {
413 kern_return_t rval = KERN_FAILURE;
414 KXLDSeg *seg = NULL;
415 KXLDSeg *le = NULL;
416
417 rval = kxld_array_resize(segs, 2);
418 require_noerr(rval, finish);
419
420 seg = kxld_array_get_item(segs, 0);
421 le = kxld_array_get_item(segs, 1);
422
423 strlcpy(le->segname, SEG_LINKEDIT, sizeof(le->segname));
424 le->link_addr = round_page(seg->link_addr + seg->vmsize);
425 le->maxprot = VM_PROT_ALL;
426 le->initprot = VM_PROT_DEFAULT;
427
428 rval = KERN_SUCCESS;
429
430 finish:
431 return rval;
432 }
433 #endif /* KXLD_USER_OR_OBJECT */
434
435 /*******************************************************************************
436 *******************************************************************************/
437 void
438 kxld_seg_clear(KXLDSeg *seg)
439 {
440 check(seg);
441
442 bzero(seg->segname, sizeof(seg->segname));
443 seg->base_addr = 0;
444 seg->link_addr = 0;
445 seg->vmsize = 0;
446 seg->flags = 0;
447 seg->maxprot = 0;
448 seg->initprot = 0;
449
450 /* Don't clear the individual sections here because kxld_kext.c will take
451 * care of that.
452 */
453 kxld_array_clear(&seg->sects);
454 }
455
456 /*******************************************************************************
457 *******************************************************************************/
458 void
459 kxld_seg_deinit(KXLDSeg *seg)
460 {
461 check(seg);
462
463 kxld_array_deinit(&seg->sects);
464 bzero(seg, sizeof(*seg));
465 }
466
467 /*******************************************************************************
468 *******************************************************************************/
469 kxld_size_t
470 kxld_seg_get_vmsize(const KXLDSeg *seg)
471 {
472 check(seg);
473
474 return seg->vmsize;
475 }
476
477 /*******************************************************************************
478 *******************************************************************************/
479 u_long
480 kxld_seg_get_macho_header_size(const KXLDSeg *seg, boolean_t is_32_bit)
481 {
482 u_long size = 0;
483
484 check(seg);
485
486 if (is_32_bit) {
487 size += sizeof(struct segment_command);
488 } else {
489 size += sizeof(struct segment_command_64);
490 }
491 size += seg->sects.nitems * kxld_sect_get_macho_header_size(is_32_bit);
492
493 return size;
494 }
495
496 /*******************************************************************************
497 *******************************************************************************/
498 /* This is no longer used, but may be useful some day... */
499 #if 0
500 u_long
501 kxld_seg_get_macho_data_size(const KXLDSeg *seg)
502 {
503 u_long size = 0;
504 u_int i = 0;
505 KXLDSect *sect = NULL;
506
507 check(seg);
508
509 for (i = 0; i < seg->sects.nitems; ++i) {
510 sect = get_sect_by_index(seg, i);
511 size = (u_long) kxld_sect_align_address(sect, size);
512 size += kxld_sect_get_macho_data_size(sect);
513 }
514
515 return round_page(size);
516 }
517 #endif
518
519 /*******************************************************************************
520 *******************************************************************************/
521 static KXLDSect *
522 get_sect_by_index(const KXLDSeg *seg, u_int idx)
523 {
524 check(seg);
525
526 return *(KXLDSect **) kxld_array_get_item(&seg->sects, idx);
527 }
528
529 /*******************************************************************************
530 *******************************************************************************/
531 kern_return_t
532 kxld_seg_export_macho_to_file_buffer(const KXLDSeg *seg, u_char *buf,
533 u_long *header_offset, u_long header_size,
534 u_long *data_offset, u_long data_size,
535 boolean_t is_32_bit)
536 {
537 kern_return_t rval = KERN_FAILURE;
538 KXLDSect *sect = NULL;
539 u_long base_data_offset = *data_offset;
540 u_int i = 0;
541 struct segment_command *hdr32 =
542 (struct segment_command *) ((void *) (buf + *header_offset));
543 struct segment_command_64 *hdr64 =
544 (struct segment_command_64 *) ((void *) (buf + *header_offset));
545
546 check(seg);
547 check(buf);
548 check(header_offset);
549 check(data_offset);
550
551 /* Write out the header */
552
553 KXLD_3264_FUNC(is_32_bit, rval,
554 seg_export_macho_header_32, seg_export_macho_header_64,
555 seg, buf, header_offset, header_size, *data_offset);
556 require_noerr(rval, finish);
557
558 /* Write out each section */
559
560 for (i = 0; i < seg->sects.nitems; ++i) {
561 sect = get_sect_by_index(seg, i);
562
563 rval = kxld_sect_export_macho_to_file_buffer(sect, buf, header_offset,
564 header_size, data_offset, data_size, is_32_bit);
565 require_noerr(rval, finish);
566 }
567
568 /* Update the filesize */
569
570 if (is_32_bit) {
571 hdr32->filesize = (uint32_t) (*data_offset - base_data_offset);
572 } else {
573 hdr64->filesize = (uint64_t) (*data_offset - base_data_offset);
574 }
575
576 *data_offset = round_page(*data_offset);
577
578 rval = KERN_SUCCESS;
579
580 finish:
581 return rval;
582
583 }
584
585 /*******************************************************************************
586 *******************************************************************************/
587 kern_return_t
588 kxld_seg_export_macho_to_vm(const KXLDSeg *seg, u_char *buf,
589 u_long *header_offset, u_long header_size,
590 u_long data_size, kxld_addr_t file_link_addr,
591 boolean_t is_32_bit)
592 {
593 kern_return_t rval = KERN_FAILURE;
594 KXLDSect *sect = NULL;
595 u_long data_offset = (u_long) (seg->link_addr - file_link_addr);
596 u_int i = 0;
597
598 check(seg);
599 check(buf);
600 check(header_offset);
601
602 /* Write out the header */
603
604 KXLD_3264_FUNC(is_32_bit, rval,
605 seg_export_macho_header_32, seg_export_macho_header_64,
606 seg, buf, header_offset, header_size, data_offset);
607 require_noerr(rval, finish);
608
609 /* Write out each section */
610
611 for (i = 0; i < seg->sects.nitems; ++i) {
612 sect = get_sect_by_index(seg, i);
613
614 rval = kxld_sect_export_macho_to_vm(sect, buf, header_offset,
615 header_size, file_link_addr, data_size, is_32_bit);
616 require_noerr(rval, finish);
617 }
618
619 rval = KERN_SUCCESS;
620
621 finish:
622 return rval;
623 }
624
625 #if KXLD_USER_OR_ILP32
626 /*******************************************************************************
627 *******************************************************************************/
628 static kern_return_t
629 seg_export_macho_header_32(const KXLDSeg *seg, u_char *buf,
630 u_long *header_offset, u_long header_size, u_long data_offset)
631 {
632 kern_return_t rval = KERN_FAILURE;
633 struct segment_command *seghdr = NULL;
634
635 check(seg);
636 check(buf);
637 check(header_offset);
638
639 require_action(sizeof(*seghdr) <= header_size - *header_offset, finish,
640 rval=KERN_FAILURE);
641 seghdr = (struct segment_command *) ((void *) (buf + *header_offset));
642 *header_offset += sizeof(*seghdr);
643
644 seghdr->cmd = LC_SEGMENT;
645 seghdr->cmdsize = (uint32_t) sizeof(*seghdr);
646 seghdr->cmdsize +=
647 (uint32_t) (seg->sects.nitems * kxld_sect_get_macho_header_size(TRUE));
648 strlcpy(seghdr->segname, seg->segname, sizeof(seghdr->segname));
649 seghdr->vmaddr = (uint32_t) seg->link_addr;
650 seghdr->vmsize = (uint32_t) seg->vmsize;
651 seghdr->fileoff = (uint32_t) data_offset;
652 seghdr->filesize = (uint32_t) seg->vmsize;
653 seghdr->maxprot = seg->maxprot;
654 seghdr->initprot = seg->initprot;
655 seghdr->nsects = seg->sects.nitems;
656 seghdr->flags = 0;
657
658 rval = KERN_SUCCESS;
659
660 finish:
661 return rval;
662 }
663 #endif /* KXLD_USER_OR_ILP32 */
664
665 #if KXLD_USER_OR_LP64
666 /*******************************************************************************
667 *******************************************************************************/
668 static kern_return_t
669 seg_export_macho_header_64(const KXLDSeg *seg, u_char *buf,
670 u_long *header_offset, u_long header_size, u_long data_offset)
671 {
672 kern_return_t rval = KERN_FAILURE;
673 struct segment_command_64 *seghdr = NULL;
674
675 check(seg);
676 check(buf);
677 check(header_offset);
678
679 require_action(sizeof(*seghdr) <= header_size - *header_offset, finish,
680 rval=KERN_FAILURE);
681 seghdr = (struct segment_command_64 *) ((void *) (buf + *header_offset));
682 *header_offset += sizeof(*seghdr);
683
684 seghdr->cmd = LC_SEGMENT_64;
685 seghdr->cmdsize = (uint32_t) sizeof(*seghdr);
686 seghdr->cmdsize +=
687 (uint32_t) (seg->sects.nitems * kxld_sect_get_macho_header_size(FALSE));
688 strlcpy(seghdr->segname, seg->segname, sizeof(seghdr->segname));
689 seghdr->vmaddr = (uint64_t) seg->link_addr;
690 seghdr->vmsize = (uint64_t) seg->vmsize;
691 seghdr->fileoff = (uint64_t) data_offset;
692 seghdr->filesize = (uint64_t) seg->vmsize;
693 seghdr->maxprot = seg->maxprot;
694 seghdr->initprot = seg->initprot;
695 seghdr->nsects = seg->sects.nitems;
696 seghdr->flags = 0;
697
698 rval = KERN_SUCCESS;
699
700 finish:
701 return rval;
702 }
703 #endif /* KXLD_USER_OR_LP64 */
704
705 /*******************************************************************************
706 *******************************************************************************/
707 kern_return_t
708 kxld_seg_add_section(KXLDSeg *seg, KXLDSect *sect)
709 {
710 kern_return_t rval = KERN_FAILURE;
711 KXLDSect **sectp = NULL;
712 u_int i;
713
714 check(seg);
715 check(sect);
716 require_action(streq_safe(seg->segname, sect->segname, sizeof(seg->segname)),
717 finish, rval=KERN_FAILURE);
718
719 /* Add the section into the section index */
720
721 for (i = 0; i < seg->sects.nitems; ++i) {
722 sectp = kxld_array_get_item(&seg->sects, i);
723 if (NULL == *sectp) {
724 *sectp = sect;
725 break;
726 }
727 }
728 require_action(i < seg->sects.nitems, finish, rval=KERN_FAILURE);
729
730 rval = KERN_SUCCESS;
731
732 finish:
733
734 return rval;
735 }
736
737 /*******************************************************************************
738 *******************************************************************************/
739 kern_return_t
740 kxld_seg_finish_init(KXLDSeg *seg)
741 {
742 kern_return_t rval = KERN_FAILURE;
743 u_int i = 0;
744 KXLDSect *sect = NULL;
745 kxld_addr_t maxaddr = 0;
746 kxld_size_t maxsize = 0;
747
748 if (seg->sects.nitems) {
749 for (i = 0; i < seg->sects.nitems; ++i) {
750 sect = get_sect_by_index(seg, i);
751 require_action(sect, finish, rval=KERN_FAILURE);
752 if (sect->base_addr > maxaddr) {
753 maxaddr = sect->base_addr;
754 maxsize = sect->size;
755 }
756 }
757
758 /* XXX Cross architecture linking will fail if the page size ever differs
759 * from 4096. (As of this writing, we're fine on i386, x86_64, and arm).
760 */
761 seg->vmsize = round_page(maxaddr + maxsize - seg->base_addr);
762 }
763
764 rval = KERN_SUCCESS;
765
766 finish:
767 return rval;
768 }
769
770 /*******************************************************************************
771 *******************************************************************************/
772 void
773 kxld_seg_set_vm_protections(KXLDSeg *seg, boolean_t strict_protections)
774 {
775 /* This is unnecessary except to make the clang analyzer happy. When
776 * the analyzer no longer ignores nonnull attributes for if statements,
777 * we can remove this line.
778 */
779 if (!seg) return;
780
781 if (strict_protections) {
782 if (streq_safe(seg->segname, SEG_TEXT, const_strlen(SEG_TEXT))) {
783 seg->initprot = TEXT_SEG_PROT;
784 seg->maxprot = VM_PROT_ALL;
785 } else {
786 seg->initprot = DATA_SEG_PROT;
787 seg->maxprot = DATA_SEG_PROT;
788 }
789 } else {
790 seg->initprot = VM_PROT_ALL;
791 seg->maxprot = VM_PROT_ALL;
792 }
793 }
794
795 /*******************************************************************************
796 *******************************************************************************/
797 void
798 kxld_seg_relocate(KXLDSeg *seg, kxld_addr_t link_addr)
799 {
800 KXLDSect *sect = NULL;
801 u_int i = 0;
802
803 seg->link_addr += link_addr;
804 for (i = 0; i < seg->sects.nitems; ++i) {
805 sect = get_sect_by_index(seg, i);
806 kxld_sect_relocate(sect, link_addr);
807 }
808 }
809
810 /*******************************************************************************
811 *******************************************************************************/
812 void
813 kxld_seg_populate_linkedit(KXLDSeg *seg, const KXLDSymtab *symtab, boolean_t is_32_bit
814 #if KXLD_PIC_KEXTS
815 , const KXLDArray *locrelocs
816 , const KXLDArray *extrelocs
817 , boolean_t target_supports_slideable_kexts
818 #endif /* KXLD_PIC_KEXTS */
819 )
820 {
821 u_long size = 0;
822
823 size += kxld_symtab_get_macho_data_size(symtab, is_32_bit);
824
825 #if KXLD_PIC_KEXTS
826 if (target_supports_slideable_kexts) {
827 size += kxld_reloc_get_macho_data_size(locrelocs, extrelocs);
828 }
829 #endif /* KXLD_PIC_KEXTS */
830
831 seg->vmsize = round_page(size);
832 }
833