]> git.saurik.com Git - apple/xnu.git/blame - libkern/kxld/kxld_seg.c
xnu-1456.1.26.tar.gz
[apple/xnu.git] / libkern / kxld / kxld_seg.c
CommitLineData
b0d623f7
A
1/*
2 * Copyright (c) 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <string.h>
29#include <mach/vm_prot.h>
30#include <mach-o/loader.h>
31#include <sys/types.h>
32
33#if KERNEL
34 #include <mach/vm_param.h>
35#else
36 #include <mach/mach_init.h>
37#endif /* KERNEL */
38
39#define DEBUG_ASSERT_COMPONENT_NAME_STRING "kxld"
40#include <AssertMacros.h>
41
42#include "kxld_sect.h"
43#include "kxld_seg.h"
44#include "kxld_util.h"
45
46#define MAX_SEGS 20
47
48#define TEXT_SEG_PROT (VM_PROT_READ | VM_PROT_EXECUTE)
49#define DATA_SEG_PROT (VM_PROT_READ | VM_PROT_WRITE)
50
51#if KXLD_USER_OR_OBJECT
52static kern_return_t reorder_sections(KXLDSeg *seg, KXLDArray *section_order);
53static void reorder_section(KXLDArray *sects, u_int *sect_reorder_index,
54 KXLDSect **reorder_buffer, u_int reorder_buffer_index);
55#endif /* KXLD_USER_OR_OBJECT */
56
57#if 0
58static KXLDSeg * get_segment_by_name(KXLDArray *segarray, const char *name);
59#endif
60
61#if KXLD_USER_OR_ILP32
62static kern_return_t seg_export_macho_header_32(const KXLDSeg *seg, u_char *buf,
63 u_long *header_offset, u_long header_size, u_long data_offset);
64#endif
65#if KXLD_USER_OR_LP64
66static kern_return_t seg_export_macho_header_64(const KXLDSeg *seg, u_char *buf,
67 u_long *header_offset, u_long header_size, u_long data_offset);
68#endif
69
70static KXLDSect * get_sect_by_index(const KXLDSeg *seg, u_int idx);
71
72#if KXLD_USER_OR_ILP32
73/*******************************************************************************
74*******************************************************************************/
75kern_return_t
76kxld_seg_init_from_macho_32(KXLDSeg *seg, struct segment_command *src)
77{
78 kern_return_t rval = KERN_FAILURE;
79 check(seg);
80 check(src);
81
82 strlcpy(seg->segname, src->segname, sizeof(seg->segname));
83 seg->base_addr = src->vmaddr;
84 seg->link_addr = src->vmaddr;
85 seg->vmsize = src->vmsize;
86 seg->fileoff = src->fileoff;
87 seg->maxprot = src->maxprot;
88 seg->initprot = src->initprot;
89 seg->flags = src->flags;
90
91 rval = kxld_array_init(&seg->sects, sizeof(KXLDSect *), src->nsects);
92 require_noerr(rval, finish);
93
94 rval = KERN_SUCCESS;
95
96finish:
97 return rval;
98}
99#endif /* KXLD_USER_OR_ILP32 */
100
101#if KXLD_USER_OR_LP64
102/*******************************************************************************
103*******************************************************************************/
104kern_return_t
105kxld_seg_init_from_macho_64(KXLDSeg *seg, struct segment_command_64 *src)
106{
107 kern_return_t rval = KERN_FAILURE;
108 check(seg);
109 check(src);
110
111 strlcpy(seg->segname, src->segname, sizeof(seg->segname));
112 seg->base_addr = src->vmaddr;
113 seg->link_addr = src->vmaddr;
114 seg->vmsize = src->vmsize;
115 seg->fileoff = src->fileoff;
116 seg->maxprot = src->maxprot;
117 seg->initprot = src->initprot;
118 seg->flags = src->flags;
119
120 rval = kxld_array_init(&seg->sects, sizeof(KXLDSect *), src->nsects);
121 require_noerr(rval, finish);
122
123 rval = KERN_SUCCESS;
124
125finish:
126 return rval;
127}
128#endif /* KXLD_USER_OR_LP64 */
129
130#if KXLD_USER_OR_OBJECT
131/*******************************************************************************
132*******************************************************************************/
133kern_return_t
134kxld_seg_create_seg_from_sections(KXLDArray *segarray, KXLDArray *sectarray)
135{
136 kern_return_t rval = KERN_FAILURE;
137 KXLDSeg *seg = NULL;
138 KXLDSect *sect = NULL;
139 KXLDSect **sectp = NULL;
140 u_int i = 0;
141
142 /* Initialize the segment array to one segment */
143
144 rval = kxld_array_init(segarray, sizeof(KXLDSeg), 1);
145 require_noerr(rval, finish);
146
147 /* Initialize the segment */
148
149 seg = kxld_array_get_item(segarray, 0);
150 seg->initprot = VM_PROT_ALL;
151 seg->maxprot = VM_PROT_ALL;
152 seg->link_addr = 0;
153
154 /* Add the sections to the segment */
155
156 rval = kxld_array_init(&seg->sects, sizeof(KXLDSect *), sectarray->nitems);
157 require_noerr(rval, finish);
158
159 for (i = 0; i < sectarray->nitems; ++i) {
160 sect = kxld_array_get_item(sectarray, i);
161 sectp = kxld_array_get_item(&seg->sects, i);
162
163 *sectp = sect;
164 }
165
166 rval = KERN_SUCCESS;
167finish:
168 return rval;
169}
170
171/*******************************************************************************
172*******************************************************************************/
173kern_return_t
174kxld_seg_finalize_object_segment(KXLDArray *segarray, KXLDArray *section_order,
175 u_long hdrsize)
176{
177 kern_return_t rval = KERN_FAILURE;
178 KXLDSeg *seg = NULL;
179 KXLDSect *sect = NULL;
180 u_long sect_offset = 0;
181 u_int i = 0;
182
183 check(segarray);
184 check(section_order);
185 require_action(segarray->nitems == 1, finish, rval=KERN_FAILURE);
186
187 seg = kxld_array_get_item(segarray, 0);
188
189 /* Reorder the sections */
190
191 rval = reorder_sections(seg, section_order);
192 require_noerr(rval, finish);
193
194 /* Set the initial link address at the end of the header pages */
195
196 seg->link_addr = round_page(hdrsize);
197
198 /* Fix up all of the section addresses */
199
200 sect_offset = (u_long) seg->link_addr;
201 for (i = 0; i < seg->sects.nitems; ++i) {
202 sect = *(KXLDSect **)kxld_array_get_item(&seg->sects, i);
203
204 sect->link_addr = kxld_sect_align_address(sect, sect_offset);
205 sect_offset = (u_long) (sect->link_addr + sect->size);
206 }
207
208 /* Finish initializing the segment */
209
210 seg->vmsize = round_page(sect_offset) - seg->link_addr;
211
212 rval = KERN_SUCCESS;
213finish:
214 return rval;
215}
216
217/*******************************************************************************
218* The legacy section ordering used by kld was based of the order of sections
219* in the kernel file. To achieve the same layout, we save the kernel's
220* section ordering as an array of section names when the kernel file itself
221* is linked. Then, when kexts are linked with the KXLD_LEGACY_LAYOUT flag,
222* we refer to the kernel's section layout to order the kext's sections.
223*
224* The algorithm below is as follows. We iterate through all of the kernel's
225* sections grouped by segment name, so that we are processing all of the __TEXT
226* sections, then all of the __DATA sections, etc. We then iterate through the
227* kext's sections with a similar grouping, looking for sections that match
228* the current kernel's section. In this way, we order all of the matching
229* kext sections in the order in which they appear in the kernel, and then place
230* all remaining kext sections at the end of the current segment grouping in
231* the order in which they originally appeared. Sections that only appear in
232* the kernel are not created. segments that only appear in the kext are
233* left in their original ordering.
234*
235* An example:
236*
237* Kernel sections:
238* __TEXT,__text
239* __TEXT,__initcode
240* __TEXT,__const
241* __DATA,__data
242*
243* Kext sections:
244* __TEXT,__const
245* __TEXT,__literal4
246* __TEXT,__text
247* __DATA,__const
248* __DATA,__data
249*
250* Reordered kext sections:
251* __TEXT,__text
252* __TEXT,__const
253* __TEXT,__literal4
254* __DATA,__data
255* __DATA,__const
256*
257* In the implementation below, we use a reorder buffer to hold pointers to the
258* sections of the current working segment. We scan this buffer looking for
259* matching sections, placing them in the segment's section index as we find them.
260* If this function must exit early, the segment's section index is left in an
261* unusable state.
262*******************************************************************************/
263static kern_return_t
264reorder_sections(KXLDSeg *seg, KXLDArray *section_order)
265{
266 kern_return_t rval = KERN_FAILURE;
267 KXLDSect *sect = NULL;
268 KXLDSect **reorder_buffer = NULL;
269 KXLDSectionName *section_name = NULL;
270 const char *segname = NULL;
271 u_int sect_index = 0, legacy_index = 0, sect_reorder_index = 0;
272 u_int i = 0, j = 0;
273 u_int sect_start = 0, sect_end = 0, legacy_start = 0, legacy_end = 0;
274 u_int nsects = 0;
275
276 check(seg);
277 check(section_order);
278
279 /* Allocate the reorder buffer with enough space to hold all of the
280 * sections.
281 */
282
283 reorder_buffer = kxld_alloc(
284 seg->sects.nitems * sizeof(*reorder_buffer));
285 require_action(reorder_buffer, finish, rval=KERN_RESOURCE_SHORTAGE);
286
287 while (legacy_index < section_order->nitems) {
288
289 /* Find the next group of sections with a common segment in the
290 * section_order array.
291 */
292
293 legacy_start = legacy_index++;
294 legacy_end = legacy_index;
295
296 section_name = kxld_array_get_item(section_order, legacy_start);
297 segname = section_name->segname;
298 while (legacy_index < section_order->nitems) {
299 section_name = kxld_array_get_item(section_order, legacy_index);
300 if (!streq_safe(segname, section_name->segname,
301 sizeof(section_name->segname)))
302 {
303 break;
304 }
305
306 ++legacy_index;
307 ++legacy_end;
308 }
309
310 /* Find a group of sections in the kext that match the current
311 * section_order segment.
312 */
313
314 sect_start = sect_index;
315 sect_end = sect_index;
316
317 while (sect_index < seg->sects.nitems) {
318 sect = *(KXLDSect **) kxld_array_get_item(&seg->sects, sect_index);
319 if (!streq_safe(segname, sect->segname, sizeof(sect->segname))) {
320 break;
321 }
322
323 ++sect_index;
324 ++sect_end;
325 }
326 nsects = sect_end - sect_start;
327
328 if (!nsects) continue;
329
330 /* Populate the reorder buffer with the current group of kext sections */
331
332 for (i = sect_start; i < sect_end; ++i) {
333 reorder_buffer[i - sect_start] =
334 *(KXLDSect **) kxld_array_get_item(&seg->sects, i);
335 }
336
337 /* For each section_order section, scan the reorder buffer for a matching
338 * kext section. If one is found, copy it into the next slot in the
339 * segment's section index.
340 */
341
342 sect_reorder_index = sect_start;
343 for (i = legacy_start; i < legacy_end; ++i) {
344 section_name = kxld_array_get_item(section_order, i);
345 sect = NULL;
346
347 for (j = 0; j < nsects; ++j) {
348 sect = reorder_buffer[j];
349 if (!sect) continue;
350
351 if (streq_safe(section_name->sectname, sect->sectname,
352 sizeof(section_name->sectname)))
353 {
354 break;
355 }
356
357 sect = NULL;
358 }
359
360 if (sect) {
361 (void) reorder_section(&seg->sects, &sect_reorder_index,
362 reorder_buffer, j);
363 }
364 }
365
366 /* If any sections remain in the reorder buffer, they are not specified
367 * in the section_order array, so append them to the section index in
368 * in the order they are found.
369 */
370
371 for (i = 0; i < nsects; ++i) {
372 if (!reorder_buffer[i]) continue;
373 reorder_section(&seg->sects, &sect_reorder_index, reorder_buffer, i);
374 }
375 }
376
377 rval = KERN_SUCCESS;
378
379finish:
380
381 if (reorder_buffer) {
382 kxld_free(reorder_buffer, seg->sects.nitems * sizeof(*reorder_buffer));
383 reorder_buffer = NULL;
384 }
385
386 return rval;
387}
388
389/*******************************************************************************
390*******************************************************************************/
391static void
392reorder_section(KXLDArray *sects, u_int *sect_reorder_index,
393 KXLDSect **reorder_buffer, u_int reorder_buffer_index)
394{
395 KXLDSect **tmp = NULL;
396
397 tmp = kxld_array_get_item(sects, *sect_reorder_index);
398
399 *tmp = reorder_buffer[reorder_buffer_index];
400 reorder_buffer[reorder_buffer_index]->sectnum = *sect_reorder_index;
401 reorder_buffer[reorder_buffer_index] = NULL;
402
403 ++(*sect_reorder_index);
404}
405#endif /* KXLD_USER_OR_OBJECT */
406
407/*******************************************************************************
408*******************************************************************************/
409void
410kxld_seg_clear(KXLDSeg *seg)
411{
412 check(seg);
413
414 bzero(seg->segname, sizeof(seg->segname));
415 seg->base_addr = 0;
416 seg->link_addr = 0;
417 seg->vmsize = 0;
418 seg->flags = 0;
419 seg->maxprot = 0;
420 seg->initprot = 0;
421
422 /* Don't clear the individual sections here because kxld_kext.c will take
423 * care of that.
424 */
425 kxld_array_clear(&seg->sects);
426}
427
428/*******************************************************************************
429*******************************************************************************/
430void
431kxld_seg_deinit(KXLDSeg *seg)
432{
433 check(seg);
434
435 kxld_array_deinit(&seg->sects);
436 bzero(seg, sizeof(*seg));
437}
438
439/*******************************************************************************
440*******************************************************************************/
441kxld_size_t
442kxld_seg_get_vmsize(const KXLDSeg *seg)
443{
444 check(seg);
445
446 return seg->vmsize;
447}
448
449/*******************************************************************************
450*******************************************************************************/
451u_long
452kxld_seg_get_macho_header_size(const KXLDSeg *seg, boolean_t is_32_bit)
453{
454 u_long size = 0;
455
456 check(seg);
457
458 if (is_32_bit) {
459 size += sizeof(struct segment_command);
460 } else {
461 size += sizeof(struct segment_command_64);
462 }
463 size += seg->sects.nitems * kxld_sect_get_macho_header_size(is_32_bit);
464
465 return size;
466}
467
468/*******************************************************************************
469*******************************************************************************/
470u_long
471kxld_seg_get_macho_data_size(const KXLDSeg *seg)
472{
473 u_long size = 0;
474 u_int i = 0;
475 KXLDSect *sect = NULL;
476
477 check(seg);
478
479 for (i = 0; i < seg->sects.nitems; ++i) {
480 sect = get_sect_by_index(seg, i);
481 size = (u_long) kxld_sect_align_address(sect, size);
482 size += kxld_sect_get_macho_data_size(sect);
483 }
484
485 return round_page(size);
486}
487
488/*******************************************************************************
489*******************************************************************************/
490static KXLDSect *
491get_sect_by_index(const KXLDSeg *seg, u_int idx)
492{
493 check(seg);
494
495 return *(KXLDSect **) kxld_array_get_item(&seg->sects, idx);
496}
497
498/*******************************************************************************
499*******************************************************************************/
500kern_return_t
501kxld_seg_export_macho_to_file_buffer(const KXLDSeg *seg, u_char *buf,
502 u_long *header_offset, u_long header_size,
503 u_long *data_offset, u_long data_size,
504 boolean_t is_32_bit)
505{
506 kern_return_t rval = KERN_FAILURE;
507 KXLDSect *sect = NULL;
508 u_long base_data_offset = *data_offset;
509 u_int i = 0;
510 struct segment_command *hdr32 =
511 (struct segment_command *) (buf + *header_offset);
512 struct segment_command_64 *hdr64 =
513 (struct segment_command_64 *) (buf + *header_offset);
514
515 check(seg);
516 check(buf);
517 check(header_offset);
518 check(data_offset);
519
520 /* Write out the header */
521
522 KXLD_3264_FUNC(is_32_bit, rval,
523 seg_export_macho_header_32, seg_export_macho_header_64,
524 seg, buf, header_offset, header_size, *data_offset);
525 require_noerr(rval, finish);
526
527 /* Write out each section */
528
529 for (i = 0; i < seg->sects.nitems; ++i) {
530 sect = get_sect_by_index(seg, i);
531
532 rval = kxld_sect_export_macho_to_file_buffer(sect, buf, header_offset,
533 header_size, data_offset, data_size, is_32_bit);
534 require_noerr(rval, finish);
535 }
536
537 /* Update the filesize */
538
539 if (is_32_bit) {
540 hdr32->filesize = (uint32_t) (*data_offset - base_data_offset);
541 } else {
542 hdr64->filesize = (uint64_t) (*data_offset - base_data_offset);
543 }
544
545 *data_offset = round_page(*data_offset);
546
547 rval = KERN_SUCCESS;
548
549finish:
550 return rval;
551
552}
553
554/*******************************************************************************
555*******************************************************************************/
556kern_return_t
557kxld_seg_export_macho_to_vm(const KXLDSeg *seg, u_char *buf,
558 u_long *header_offset, u_long header_size,
559 u_long data_size, kxld_addr_t file_link_addr,
560 boolean_t is_32_bit)
561{
562 kern_return_t rval = KERN_FAILURE;
563 KXLDSect *sect = NULL;
564 u_long data_offset = (u_long) (seg->link_addr - file_link_addr);
565 u_int i = 0;
566
567 check(seg);
568 check(buf);
569 check(header_offset);
570
571 /* Write out the header */
572
573 KXLD_3264_FUNC(is_32_bit, rval,
574 seg_export_macho_header_32, seg_export_macho_header_64,
575 seg, buf, header_offset, header_size, data_offset);
576 require_noerr(rval, finish);
577
578 /* Write out each section */
579
580 for (i = 0; i < seg->sects.nitems; ++i) {
581 sect = get_sect_by_index(seg, i);
582
583 rval = kxld_sect_export_macho_to_vm(sect, buf, header_offset,
584 header_size, file_link_addr, data_size, is_32_bit);
585 require_noerr(rval, finish);
586 }
587
588 rval = KERN_SUCCESS;
589
590finish:
591 return rval;
592}
593
594#if KXLD_USER_OR_ILP32
595/*******************************************************************************
596*******************************************************************************/
597static kern_return_t
598seg_export_macho_header_32(const KXLDSeg *seg, u_char *buf,
599 u_long *header_offset, u_long header_size, u_long data_offset)
600{
601 kern_return_t rval = KERN_FAILURE;
602 struct segment_command *seghdr = NULL;
603
604 check(seg);
605 check(buf);
606 check(header_offset);
607
608 require_action(sizeof(*seghdr) <= header_size - *header_offset, finish,
609 rval=KERN_FAILURE);
610 seghdr = (struct segment_command *) (buf + *header_offset);
611 *header_offset += sizeof(*seghdr);
612
613 seghdr->cmd = LC_SEGMENT;
614 seghdr->cmdsize = (uint32_t) sizeof(*seghdr);
615 seghdr->cmdsize +=
616 (uint32_t) (seg->sects.nitems * kxld_sect_get_macho_header_size(TRUE));
617 strlcpy(seghdr->segname, seg->segname, sizeof(seghdr->segname));
618 seghdr->vmaddr = (uint32_t) seg->link_addr;
619 seghdr->vmsize = (uint32_t) seg->vmsize;
620 seghdr->fileoff = (uint32_t) data_offset;
621 seghdr->filesize = (uint32_t) seg->vmsize;
622 seghdr->maxprot = seg->maxprot;
623 seghdr->initprot = seg->initprot;
624 seghdr->nsects = seg->sects.nitems;
625 seghdr->flags = 0;
626
627 rval = KERN_SUCCESS;
628
629finish:
630 return rval;
631}
632#endif /* KXLD_USER_OR_ILP32 */
633
634#if KXLD_USER_OR_LP64
635/*******************************************************************************
636*******************************************************************************/
637static kern_return_t
638seg_export_macho_header_64(const KXLDSeg *seg, u_char *buf,
639 u_long *header_offset, u_long header_size, u_long data_offset)
640{
641 kern_return_t rval = KERN_FAILURE;
642 struct segment_command_64 *seghdr = NULL;
643
644 check(seg);
645 check(buf);
646 check(header_offset);
647
648 require_action(sizeof(*seghdr) <= header_size - *header_offset, finish,
649 rval=KERN_FAILURE);
650 seghdr = (struct segment_command_64 *) (buf + *header_offset);
651 *header_offset += sizeof(*seghdr);
652
653 seghdr->cmd = LC_SEGMENT_64;
654 seghdr->cmdsize = (uint32_t) sizeof(*seghdr);
655 seghdr->cmdsize +=
656 (uint32_t) (seg->sects.nitems * kxld_sect_get_macho_header_size(FALSE));
657 strlcpy(seghdr->segname, seg->segname, sizeof(seghdr->segname));
658 seghdr->vmaddr = (uint64_t) seg->link_addr;
659 seghdr->vmsize = (uint64_t) seg->vmsize;
660 seghdr->fileoff = (uint64_t) data_offset;
661 seghdr->filesize = (uint64_t) seg->vmsize;
662 seghdr->maxprot = seg->maxprot;
663 seghdr->initprot = seg->initprot;
664 seghdr->nsects = seg->sects.nitems;
665 seghdr->flags = 0;
666
667 rval = KERN_SUCCESS;
668
669finish:
670 return rval;
671}
672#endif /* KXLD_USER_OR_LP64 */
673
674/*******************************************************************************
675*******************************************************************************/
676kern_return_t
677kxld_seg_add_section(KXLDSeg *seg, KXLDSect *sect)
678{
679 kern_return_t rval = KERN_FAILURE;
680 KXLDSect **sectp = NULL;
681 u_int i;
682
683 check(seg);
684 check(sect);
685 require_action(streq_safe(seg->segname, sect->segname, sizeof(seg->segname)),
686 finish, rval=KERN_FAILURE);
687
688 /* Add the section into the section index */
689
690 for (i = 0; i < seg->sects.nitems; ++i) {
691 sectp = kxld_array_get_item(&seg->sects, i);
692 if (NULL == *sectp) {
693 *sectp = sect;
694 break;
695 }
696 }
697 require_action(i < seg->sects.nitems, finish, rval=KERN_FAILURE);
698
699 rval = KERN_SUCCESS;
700
701finish:
702
703 return rval;
704}
705
706/*******************************************************************************
707*******************************************************************************/
708kern_return_t
709kxld_seg_finish_init(KXLDSeg *seg)
710{
711 kern_return_t rval = KERN_FAILURE;
712 u_int i = 0;
713 KXLDSect *sect = NULL;
714 kxld_addr_t maxaddr = 0;
715 kxld_size_t maxsize = 0;
716
717 if (seg->sects.nitems) {
718 for (i = 0; i < seg->sects.nitems; ++i) {
719 sect = get_sect_by_index(seg, i);
720 require_action(sect, finish, rval=KERN_FAILURE);
721 if (sect->base_addr > maxaddr) {
722 maxaddr = sect->base_addr;
723 maxsize = sect->size;
724 }
725 }
726
727 /* XXX Cross architecture linking will fail if the page size ever differs
728 * from 4096. (As of this writing, we're fine on ppc, i386, x86_64, and
729 * arm.)
730 */
731 seg->vmsize = round_page(maxaddr + maxsize - seg->base_addr);
732 }
733
734 rval = KERN_SUCCESS;
735
736finish:
737 return rval;
738}
739
740/*******************************************************************************
741*******************************************************************************/
742void
743kxld_seg_set_vm_protections(KXLDSeg *seg, boolean_t strict_protections)
744{
745 if (strict_protections) {
746 if (streq_safe(seg->segname, SEG_TEXT, sizeof(SEG_TEXT))) {
747 seg->initprot = TEXT_SEG_PROT;
748 seg->maxprot = VM_PROT_ALL;
749 } else {
750 seg->initprot = DATA_SEG_PROT;
751 seg->maxprot = DATA_SEG_PROT;
752 }
753 } else {
754 seg->initprot = VM_PROT_ALL;
755 seg->maxprot = VM_PROT_ALL;
756 }
757}
758
759/*******************************************************************************
760*******************************************************************************/
761void
762kxld_seg_relocate(KXLDSeg *seg, kxld_addr_t link_addr)
763{
764 KXLDSect *sect = NULL;
765 u_int i = 0;
766
767 seg->link_addr += link_addr;
768 for (i = 0; i < seg->sects.nitems; ++i) {
769 sect = get_sect_by_index(seg, i);
770 kxld_sect_relocate(sect, link_addr);
771 }
772}
773