]> git.saurik.com Git - apple/xnu.git/blob - libkern/kxld/kxld_seg.c
7160f7e7d4f13d96fc5fad610f8eed84e576de0a
[apple/xnu.git] / libkern / kxld / kxld_seg.c
1 /*
2 * Copyright (c) 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <string.h>
29 #include <mach/vm_prot.h>
30 #include <mach-o/loader.h>
31 #include <sys/types.h>
32
33 #if KERNEL
34 #include <mach/vm_param.h>
35 #else
36 #include <mach/mach_init.h>
37 #endif /* KERNEL */
38
39 #define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
40 #include <AssertMacros.h>
41
42 #include "kxld_reloc.h"
43 #include "kxld_sect.h"
44 #include "kxld_seg.h"
45 #include "kxld_symtab.h"
46 #include "kxld_util.h"
47
48 #define MAX_SEGS 20
49
50 #define TEXT_SEG_PROT (VM_PROT_READ | VM_PROT_EXECUTE)
51 #define DATA_SEG_PROT (VM_PROT_READ | VM_PROT_WRITE)
52
53 #if KXLD_USER_OR_OBJECT
54 static kern_return_t reorder_sections(KXLDSeg *seg, KXLDArray *section_order);
55 static void reorder_section(KXLDArray *sects, u_int *sect_reorder_index,
56 KXLDSect **reorder_buffer, u_int reorder_buffer_index);
57 #endif /* KXLD_USER_OR_OBJECT */
58
59 #if 0
60 static KXLDSeg * get_segment_by_name(KXLDArray *segarray, const char *name);
61 #endif
62
63 #if KXLD_USER_OR_ILP32
64 static kern_return_t seg_export_macho_header_32(const KXLDSeg *seg, u_char *buf,
65 u_long *header_offset, u_long header_size, u_long data_offset);
66 #endif
67 #if KXLD_USER_OR_LP64
68 static kern_return_t seg_export_macho_header_64(const KXLDSeg *seg, u_char *buf,
69 u_long *header_offset, u_long header_size, u_long data_offset);
70 #endif
71
72 static KXLDSect * get_sect_by_index(const KXLDSeg *seg, u_int idx);
73
74 #if KXLD_USER_OR_ILP32
75 /*******************************************************************************
76 *******************************************************************************/
77 kern_return_t
78 kxld_seg_init_from_macho_32(KXLDSeg *seg, struct segment_command *src)
79 {
80 kern_return_t rval = KERN_FAILURE;
81 check(seg);
82 check(src);
83
84 strlcpy(seg->segname, src->segname, sizeof(seg->segname));
85 seg->base_addr = src->vmaddr;
86 seg->link_addr = src->vmaddr;
87 seg->vmsize = src->vmsize;
88 seg->fileoff = src->fileoff;
89 seg->maxprot = src->maxprot;
90 seg->initprot = src->initprot;
91 seg->flags = src->flags;
92
93 rval = kxld_array_init(&seg->sects, sizeof(KXLDSect *), src->nsects);
94 require_noerr(rval, finish);
95
96 rval = KERN_SUCCESS;
97
98 finish:
99 return rval;
100 }
101 #endif /* KXLD_USER_OR_ILP32 */
102
103 #if KXLD_USER_OR_LP64
104 /*******************************************************************************
105 *******************************************************************************/
106 kern_return_t
107 kxld_seg_init_from_macho_64(KXLDSeg *seg, struct segment_command_64 *src)
108 {
109 kern_return_t rval = KERN_FAILURE;
110 check(seg);
111 check(src);
112
113 strlcpy(seg->segname, src->segname, sizeof(seg->segname));
114 seg->base_addr = src->vmaddr;
115 seg->link_addr = src->vmaddr;
116 seg->vmsize = src->vmsize;
117 seg->fileoff = src->fileoff;
118 seg->maxprot = src->maxprot;
119 seg->initprot = src->initprot;
120 seg->flags = src->flags;
121
122 rval = kxld_array_init(&seg->sects, sizeof(KXLDSect *), src->nsects);
123 require_noerr(rval, finish);
124
125 rval = KERN_SUCCESS;
126
127 finish:
128 return rval;
129 }
130 #endif /* KXLD_USER_OR_LP64 */
131
132 #if KXLD_USER_OR_OBJECT
133 /*******************************************************************************
134 *******************************************************************************/
135 kern_return_t
136 kxld_seg_create_seg_from_sections(KXLDArray *segarray, KXLDArray *sectarray)
137 {
138 kern_return_t rval = KERN_FAILURE;
139 KXLDSeg *seg = NULL;
140 KXLDSect *sect = NULL;
141 KXLDSect **sectp = NULL;
142 u_int i = 0;
143
144 /* Initialize the segment array to one segment */
145
146 rval = kxld_array_init(segarray, sizeof(KXLDSeg), 1);
147 require_noerr(rval, finish);
148
149 /* Initialize the segment */
150
151 seg = kxld_array_get_item(segarray, 0);
152 seg->initprot = VM_PROT_ALL;
153 seg->maxprot = VM_PROT_ALL;
154 seg->link_addr = 0;
155
156 /* Add the sections to the segment */
157
158 rval = kxld_array_init(&seg->sects, sizeof(KXLDSect *), sectarray->nitems);
159 require_noerr(rval, finish);
160
161 for (i = 0; i < sectarray->nitems; ++i) {
162 sect = kxld_array_get_item(sectarray, i);
163 sectp = kxld_array_get_item(&seg->sects, i);
164
165 *sectp = sect;
166 }
167
168 rval = KERN_SUCCESS;
169 finish:
170 return rval;
171 }
172
173 /*******************************************************************************
174 *******************************************************************************/
175 kern_return_t
176 kxld_seg_finalize_object_segment(KXLDArray *segarray, KXLDArray *section_order,
177 u_long hdrsize)
178 {
179 kern_return_t rval = KERN_FAILURE;
180 KXLDSeg *seg = NULL;
181 KXLDSect *sect = NULL;
182 u_long sect_offset = 0;
183 u_int i = 0;
184
185 check(segarray);
186 check(section_order);
187 require_action(segarray->nitems == 1, finish, rval=KERN_FAILURE);
188
189 seg = kxld_array_get_item(segarray, 0);
190
191 /* Reorder the sections */
192
193 rval = reorder_sections(seg, section_order);
194 require_noerr(rval, finish);
195
196 /* Set the initial link address at the end of the header pages */
197
198 seg->link_addr = round_page(hdrsize);
199
200 /* Fix up all of the section addresses */
201
202 sect_offset = (u_long) seg->link_addr;
203 for (i = 0; i < seg->sects.nitems; ++i) {
204 sect = *(KXLDSect **)kxld_array_get_item(&seg->sects, i);
205
206 sect->link_addr = kxld_sect_align_address(sect, sect_offset);
207 sect_offset = (u_long) (sect->link_addr + sect->size);
208 }
209
210 /* Finish initializing the segment */
211
212 seg->vmsize = round_page(sect_offset) - seg->link_addr;
213
214 rval = KERN_SUCCESS;
215 finish:
216 return rval;
217 }
218
219 /*******************************************************************************
220 * The legacy section ordering used by kld was based of the order of sections
221 * in the kernel file. To achieve the same layout, we save the kernel's
222 * section ordering as an array of section names when the kernel file itself
223 * is linked. Then, when kexts are linked with the KXLD_LEGACY_LAYOUT flag,
224 * we refer to the kernel's section layout to order the kext's sections.
225 *
226 * The algorithm below is as follows. We iterate through all of the kernel's
227 * sections grouped by segment name, so that we are processing all of the __TEXT
228 * sections, then all of the __DATA sections, etc. We then iterate through the
229 * kext's sections with a similar grouping, looking for sections that match
230 * the current kernel's section. In this way, we order all of the matching
231 * kext sections in the order in which they appear in the kernel, and then place
232 * all remaining kext sections at the end of the current segment grouping in
233 * the order in which they originally appeared. Sections that only appear in
234 * the kernel are not created. segments that only appear in the kext are
235 * left in their original ordering.
236 *
237 * An example:
238 *
239 * Kernel sections:
240 * __TEXT,__text
241 * __TEXT,__const
242 * __DATA,__data
243 *
244 * Kext sections:
245 * __TEXT,__const
246 * __TEXT,__literal4
247 * __TEXT,__text
248 * __DATA,__const
249 * __DATA,__data
250 *
251 * Reordered kext sections:
252 * __TEXT,__text
253 * __TEXT,__const
254 * __TEXT,__literal4
255 * __DATA,__data
256 * __DATA,__const
257 *
258 * In the implementation below, we use a reorder buffer to hold pointers to the
259 * sections of the current working segment. We scan this buffer looking for
260 * matching sections, placing them in the segment's section index as we find them.
261 * If this function must exit early, the segment's section index is left in an
262 * unusable state.
263 *******************************************************************************/
264 static kern_return_t
265 reorder_sections(KXLDSeg *seg, KXLDArray *section_order)
266 {
267 kern_return_t rval = KERN_FAILURE;
268 KXLDSect *sect = NULL;
269 KXLDSect **reorder_buffer = NULL;
270 KXLDSectionName *section_name = NULL;
271 const char *segname = NULL;
272 u_int sect_index = 0, legacy_index = 0, sect_reorder_index = 0;
273 u_int i = 0, j = 0;
274 u_int sect_start = 0, sect_end = 0, legacy_start = 0, legacy_end = 0;
275 u_int nsects = 0;
276
277 check(seg);
278 check(section_order);
279
280 /* Allocate the reorder buffer with enough space to hold all of the
281 * sections.
282 */
283
284 reorder_buffer = kxld_alloc(
285 seg->sects.nitems * sizeof(*reorder_buffer));
286 require_action(reorder_buffer, finish, rval=KERN_RESOURCE_SHORTAGE);
287
288 while (legacy_index < section_order->nitems) {
289
290 /* Find the next group of sections with a common segment in the
291 * section_order array.
292 */
293
294 legacy_start = legacy_index++;
295 legacy_end = legacy_index;
296
297 section_name = kxld_array_get_item(section_order, legacy_start);
298 segname = section_name->segname;
299 while (legacy_index < section_order->nitems) {
300 section_name = kxld_array_get_item(section_order, legacy_index);
301 if (!streq_safe(segname, section_name->segname,
302 sizeof(section_name->segname)))
303 {
304 break;
305 }
306
307 ++legacy_index;
308 ++legacy_end;
309 }
310
311 /* Find a group of sections in the kext that match the current
312 * section_order segment.
313 */
314
315 sect_start = sect_index;
316 sect_end = sect_index;
317
318 while (sect_index < seg->sects.nitems) {
319 sect = *(KXLDSect **) kxld_array_get_item(&seg->sects, sect_index);
320 if (!streq_safe(segname, sect->segname, sizeof(sect->segname))) {
321 break;
322 }
323
324 ++sect_index;
325 ++sect_end;
326 }
327 nsects = sect_end - sect_start;
328
329 if (!nsects) continue;
330
331 /* Populate the reorder buffer with the current group of kext sections */
332
333 for (i = sect_start; i < sect_end; ++i) {
334 reorder_buffer[i - sect_start] =
335 *(KXLDSect **) kxld_array_get_item(&seg->sects, i);
336 }
337
338 /* For each section_order section, scan the reorder buffer for a matching
339 * kext section. If one is found, copy it into the next slot in the
340 * segment's section index.
341 */
342
343 sect_reorder_index = sect_start;
344 for (i = legacy_start; i < legacy_end; ++i) {
345 section_name = kxld_array_get_item(section_order, i);
346 sect = NULL;
347
348 for (j = 0; j < nsects; ++j) {
349 sect = reorder_buffer[j];
350 if (!sect) continue;
351
352 if (streq_safe(section_name->sectname, sect->sectname,
353 sizeof(section_name->sectname)))
354 {
355 break;
356 }
357
358 sect = NULL;
359 }
360
361 if (sect) {
362 (void) reorder_section(&seg->sects, &sect_reorder_index,
363 reorder_buffer, j);
364 }
365 }
366
367 /* If any sections remain in the reorder buffer, they are not specified
368 * in the section_order array, so append them to the section index in
369 * in the order they are found.
370 */
371
372 for (i = 0; i < nsects; ++i) {
373 if (!reorder_buffer[i]) continue;
374 reorder_section(&seg->sects, &sect_reorder_index, reorder_buffer, i);
375 }
376 }
377
378 rval = KERN_SUCCESS;
379
380 finish:
381
382 if (reorder_buffer) {
383 kxld_free(reorder_buffer, seg->sects.nitems * sizeof(*reorder_buffer));
384 reorder_buffer = NULL;
385 }
386
387 return rval;
388 }
389
390 /*******************************************************************************
391 *******************************************************************************/
392 static void
393 reorder_section(KXLDArray *sects, u_int *sect_reorder_index,
394 KXLDSect **reorder_buffer, u_int reorder_buffer_index)
395 {
396 KXLDSect **tmp = NULL;
397
398 tmp = kxld_array_get_item(sects, *sect_reorder_index);
399
400 *tmp = reorder_buffer[reorder_buffer_index];
401 reorder_buffer[reorder_buffer_index]->sectnum = *sect_reorder_index;
402 reorder_buffer[reorder_buffer_index] = NULL;
403
404 ++(*sect_reorder_index);
405 }
406
407 /*******************************************************************************
408 *******************************************************************************/
409 kern_return_t
410 kxld_seg_init_linkedit(KXLDArray *segs)
411 {
412 kern_return_t rval = KERN_FAILURE;
413 KXLDSeg *seg = NULL;
414 KXLDSeg *le = NULL;
415
416 rval = kxld_array_resize(segs, 2);
417 require_noerr(rval, finish);
418
419 seg = kxld_array_get_item(segs, 0);
420 le = kxld_array_get_item(segs, 1);
421
422 strlcpy(le->segname, SEG_LINKEDIT, sizeof(le->segname));
423 le->link_addr = round_page(seg->link_addr + seg->vmsize);
424 le->maxprot = VM_PROT_ALL;
425 le->initprot = VM_PROT_DEFAULT;
426
427 rval = KERN_SUCCESS;
428
429 finish:
430 return rval;
431 }
432 #endif /* KXLD_USER_OR_OBJECT */
433
434 /*******************************************************************************
435 *******************************************************************************/
436 void
437 kxld_seg_clear(KXLDSeg *seg)
438 {
439 check(seg);
440
441 bzero(seg->segname, sizeof(seg->segname));
442 seg->base_addr = 0;
443 seg->link_addr = 0;
444 seg->vmsize = 0;
445 seg->flags = 0;
446 seg->maxprot = 0;
447 seg->initprot = 0;
448
449 /* Don't clear the individual sections here because kxld_kext.c will take
450 * care of that.
451 */
452 kxld_array_clear(&seg->sects);
453 }
454
455 /*******************************************************************************
456 *******************************************************************************/
457 void
458 kxld_seg_deinit(KXLDSeg *seg)
459 {
460 check(seg);
461
462 kxld_array_deinit(&seg->sects);
463 bzero(seg, sizeof(*seg));
464 }
465
466 /*******************************************************************************
467 *******************************************************************************/
468 kxld_size_t
469 kxld_seg_get_vmsize(const KXLDSeg *seg)
470 {
471 check(seg);
472
473 return seg->vmsize;
474 }
475
476 /*******************************************************************************
477 *******************************************************************************/
478 u_long
479 kxld_seg_get_macho_header_size(const KXLDSeg *seg, boolean_t is_32_bit)
480 {
481 u_long size = 0;
482
483 check(seg);
484
485 if (is_32_bit) {
486 size += sizeof(struct segment_command);
487 } else {
488 size += sizeof(struct segment_command_64);
489 }
490 size += seg->sects.nitems * kxld_sect_get_macho_header_size(is_32_bit);
491
492 return size;
493 }
494
495 /*******************************************************************************
496 *******************************************************************************/
497 /* This is no longer used, but may be useful some day... */
498 #if 0
499 u_long
500 kxld_seg_get_macho_data_size(const KXLDSeg *seg)
501 {
502 u_long size = 0;
503 u_int i = 0;
504 KXLDSect *sect = NULL;
505
506 check(seg);
507
508 for (i = 0; i < seg->sects.nitems; ++i) {
509 sect = get_sect_by_index(seg, i);
510 size = (u_long) kxld_sect_align_address(sect, size);
511 size += kxld_sect_get_macho_data_size(sect);
512 }
513
514 return round_page(size);
515 }
516 #endif
517
518 /*******************************************************************************
519 *******************************************************************************/
520 static KXLDSect *
521 get_sect_by_index(const KXLDSeg *seg, u_int idx)
522 {
523 check(seg);
524
525 return *(KXLDSect **) kxld_array_get_item(&seg->sects, idx);
526 }
527
528 /*******************************************************************************
529 *******************************************************************************/
530 kern_return_t
531 kxld_seg_export_macho_to_file_buffer(const KXLDSeg *seg, u_char *buf,
532 u_long *header_offset, u_long header_size,
533 u_long *data_offset, u_long data_size,
534 boolean_t is_32_bit)
535 {
536 kern_return_t rval = KERN_FAILURE;
537 KXLDSect *sect = NULL;
538 u_long base_data_offset = *data_offset;
539 u_int i = 0;
540 struct segment_command *hdr32 =
541 (struct segment_command *) ((void *) (buf + *header_offset));
542 struct segment_command_64 *hdr64 =
543 (struct segment_command_64 *) ((void *) (buf + *header_offset));
544
545 check(seg);
546 check(buf);
547 check(header_offset);
548 check(data_offset);
549
550 /* Write out the header */
551
552 KXLD_3264_FUNC(is_32_bit, rval,
553 seg_export_macho_header_32, seg_export_macho_header_64,
554 seg, buf, header_offset, header_size, *data_offset);
555 require_noerr(rval, finish);
556
557 /* Write out each section */
558
559 for (i = 0; i < seg->sects.nitems; ++i) {
560 sect = get_sect_by_index(seg, i);
561
562 rval = kxld_sect_export_macho_to_file_buffer(sect, buf, header_offset,
563 header_size, data_offset, data_size, is_32_bit);
564 require_noerr(rval, finish);
565 }
566
567 /* Update the filesize */
568
569 if (is_32_bit) {
570 hdr32->filesize = (uint32_t) (*data_offset - base_data_offset);
571 } else {
572 hdr64->filesize = (uint64_t) (*data_offset - base_data_offset);
573 }
574
575 *data_offset = round_page(*data_offset);
576
577 rval = KERN_SUCCESS;
578
579 finish:
580 return rval;
581
582 }
583
584 /*******************************************************************************
585 *******************************************************************************/
586 kern_return_t
587 kxld_seg_export_macho_to_vm(const KXLDSeg *seg, u_char *buf,
588 u_long *header_offset, u_long header_size,
589 u_long data_size, kxld_addr_t file_link_addr,
590 boolean_t is_32_bit)
591 {
592 kern_return_t rval = KERN_FAILURE;
593 KXLDSect *sect = NULL;
594 u_long data_offset = (u_long) (seg->link_addr - file_link_addr);
595 u_int i = 0;
596
597 check(seg);
598 check(buf);
599 check(header_offset);
600
601 /* Write out the header */
602
603 KXLD_3264_FUNC(is_32_bit, rval,
604 seg_export_macho_header_32, seg_export_macho_header_64,
605 seg, buf, header_offset, header_size, data_offset);
606 require_noerr(rval, finish);
607
608 /* Write out each section */
609
610 for (i = 0; i < seg->sects.nitems; ++i) {
611 sect = get_sect_by_index(seg, i);
612
613 rval = kxld_sect_export_macho_to_vm(sect, buf, header_offset,
614 header_size, file_link_addr, data_size, is_32_bit);
615 require_noerr(rval, finish);
616 }
617
618 rval = KERN_SUCCESS;
619
620 finish:
621 return rval;
622 }
623
624 #if KXLD_USER_OR_ILP32
625 /*******************************************************************************
626 *******************************************************************************/
627 static kern_return_t
628 seg_export_macho_header_32(const KXLDSeg *seg, u_char *buf,
629 u_long *header_offset, u_long header_size, u_long data_offset)
630 {
631 kern_return_t rval = KERN_FAILURE;
632 struct segment_command *seghdr = NULL;
633
634 check(seg);
635 check(buf);
636 check(header_offset);
637
638 require_action(sizeof(*seghdr) <= header_size - *header_offset, finish,
639 rval=KERN_FAILURE);
640 seghdr = (struct segment_command *) ((void *) (buf + *header_offset));
641 *header_offset += sizeof(*seghdr);
642
643 seghdr->cmd = LC_SEGMENT;
644 seghdr->cmdsize = (uint32_t) sizeof(*seghdr);
645 seghdr->cmdsize +=
646 (uint32_t) (seg->sects.nitems * kxld_sect_get_macho_header_size(TRUE));
647 strlcpy(seghdr->segname, seg->segname, sizeof(seghdr->segname));
648 seghdr->vmaddr = (uint32_t) seg->link_addr;
649 seghdr->vmsize = (uint32_t) seg->vmsize;
650 seghdr->fileoff = (uint32_t) data_offset;
651 seghdr->filesize = (uint32_t) seg->vmsize;
652 seghdr->maxprot = seg->maxprot;
653 seghdr->initprot = seg->initprot;
654 seghdr->nsects = seg->sects.nitems;
655 seghdr->flags = 0;
656
657 rval = KERN_SUCCESS;
658
659 finish:
660 return rval;
661 }
662 #endif /* KXLD_USER_OR_ILP32 */
663
664 #if KXLD_USER_OR_LP64
665 /*******************************************************************************
666 *******************************************************************************/
667 static kern_return_t
668 seg_export_macho_header_64(const KXLDSeg *seg, u_char *buf,
669 u_long *header_offset, u_long header_size, u_long data_offset)
670 {
671 kern_return_t rval = KERN_FAILURE;
672 struct segment_command_64 *seghdr = NULL;
673
674 check(seg);
675 check(buf);
676 check(header_offset);
677
678 require_action(sizeof(*seghdr) <= header_size - *header_offset, finish,
679 rval=KERN_FAILURE);
680 seghdr = (struct segment_command_64 *) ((void *) (buf + *header_offset));
681 *header_offset += sizeof(*seghdr);
682
683 seghdr->cmd = LC_SEGMENT_64;
684 seghdr->cmdsize = (uint32_t) sizeof(*seghdr);
685 seghdr->cmdsize +=
686 (uint32_t) (seg->sects.nitems * kxld_sect_get_macho_header_size(FALSE));
687 strlcpy(seghdr->segname, seg->segname, sizeof(seghdr->segname));
688 seghdr->vmaddr = (uint64_t) seg->link_addr;
689 seghdr->vmsize = (uint64_t) seg->vmsize;
690 seghdr->fileoff = (uint64_t) data_offset;
691 seghdr->filesize = (uint64_t) seg->vmsize;
692 seghdr->maxprot = seg->maxprot;
693 seghdr->initprot = seg->initprot;
694 seghdr->nsects = seg->sects.nitems;
695 seghdr->flags = 0;
696
697 rval = KERN_SUCCESS;
698
699 finish:
700 return rval;
701 }
702 #endif /* KXLD_USER_OR_LP64 */
703
704 /*******************************************************************************
705 *******************************************************************************/
706 kern_return_t
707 kxld_seg_add_section(KXLDSeg *seg, KXLDSect *sect)
708 {
709 kern_return_t rval = KERN_FAILURE;
710 KXLDSect **sectp = NULL;
711 u_int i;
712
713 check(seg);
714 check(sect);
715 require_action(streq_safe(seg->segname, sect->segname, sizeof(seg->segname)),
716 finish, rval=KERN_FAILURE);
717
718 /* Add the section into the section index */
719
720 for (i = 0; i < seg->sects.nitems; ++i) {
721 sectp = kxld_array_get_item(&seg->sects, i);
722 if (NULL == *sectp) {
723 *sectp = sect;
724 break;
725 }
726 }
727 require_action(i < seg->sects.nitems, finish, rval=KERN_FAILURE);
728
729 rval = KERN_SUCCESS;
730
731 finish:
732
733 return rval;
734 }
735
736 /*******************************************************************************
737 *******************************************************************************/
738 kern_return_t
739 kxld_seg_finish_init(KXLDSeg *seg)
740 {
741 kern_return_t rval = KERN_FAILURE;
742 u_int i = 0;
743 KXLDSect *sect = NULL;
744 kxld_addr_t maxaddr = 0;
745 kxld_size_t maxsize = 0;
746
747 if (seg->sects.nitems) {
748 for (i = 0; i < seg->sects.nitems; ++i) {
749 sect = get_sect_by_index(seg, i);
750 require_action(sect, finish, rval=KERN_FAILURE);
751 if (sect->base_addr > maxaddr) {
752 maxaddr = sect->base_addr;
753 maxsize = sect->size;
754 }
755 }
756
757 seg->vmsize = round_page(maxaddr + maxsize - seg->base_addr);
758 }
759
760 rval = KERN_SUCCESS;
761
762 finish:
763 return rval;
764 }
765
766 /*******************************************************************************
767 *******************************************************************************/
768 void
769 kxld_seg_set_vm_protections(KXLDSeg *seg, boolean_t strict_protections)
770 {
771 /* This is unnecessary except to make the clang analyzer happy. When
772 * the analyzer no longer ignores nonnull attributes for if statements,
773 * we can remove this line.
774 */
775 if (!seg) return;
776
777 if (strict_protections) {
778 if (streq_safe(seg->segname, SEG_TEXT, const_strlen(SEG_TEXT))) {
779 seg->initprot = TEXT_SEG_PROT;
780 seg->maxprot = TEXT_SEG_PROT;
781 } else {
782 seg->initprot = DATA_SEG_PROT;
783 seg->maxprot = DATA_SEG_PROT;
784 }
785 } else {
786 seg->initprot = VM_PROT_ALL;
787 seg->maxprot = VM_PROT_ALL;
788 }
789 }
790
791 /*******************************************************************************
792 *******************************************************************************/
793 void
794 kxld_seg_relocate(KXLDSeg *seg, kxld_addr_t link_addr)
795 {
796 KXLDSect *sect = NULL;
797 u_int i = 0;
798
799 seg->link_addr += link_addr;
800 for (i = 0; i < seg->sects.nitems; ++i) {
801 sect = get_sect_by_index(seg, i);
802 kxld_sect_relocate(sect, link_addr);
803 }
804 }
805
806 /*******************************************************************************
807 *******************************************************************************/
808 void
809 kxld_seg_populate_linkedit(KXLDSeg *seg, const KXLDSymtab *symtab, boolean_t is_32_bit
810 #if KXLD_PIC_KEXTS
811 , const KXLDArray *locrelocs
812 , const KXLDArray *extrelocs
813 , boolean_t target_supports_slideable_kexts
814 #endif /* KXLD_PIC_KEXTS */
815 )
816 {
817 u_long size = 0;
818
819 size += kxld_symtab_get_macho_data_size(symtab, is_32_bit);
820
821 #if KXLD_PIC_KEXTS
822 if (target_supports_slideable_kexts) {
823 size += kxld_reloc_get_macho_data_size(locrelocs, extrelocs);
824 }
825 #endif /* KXLD_PIC_KEXTS */
826
827 seg->vmsize = round_page(size);
828 }
829