]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/kext_alloc.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / kext_alloc.c
CommitLineData
b0d623f7
A
1/*
2 * Copyright (c) 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
b0d623f7
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
b0d623f7
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
b0d623f7
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
b0d623f7
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <kern/assert.h>
29#include <kern/debug.h>
30#include <kern/kext_alloc.h>
31#include <kern/misc_protos.h>
32
33#include <mach/host_priv_server.h>
34#include <mach/kern_return.h>
35#include <mach/mach_vm.h>
36#include <mach/vm_map.h>
37#include <mach/vm_types.h>
38
39#include <mach-o/loader.h>
40#include <libkern/kernel_mach_header.h>
f427ee49 41#include <libkern/prelink.h>
c3c9b80d 42#include <libkern/OSKextLibPrivate.h>
5ba3f43e 43#include <san/kasan.h>
b0d623f7 44
316670eb
A
45#define KASLR_IOREG_DEBUG 0
46
b0d623f7
A
47
48vm_map_t g_kext_map = 0;
316670eb
A
49#if KASLR_IOREG_DEBUG
50mach_vm_offset_t kext_alloc_base = 0;
51mach_vm_offset_t kext_alloc_max = 0;
52#else
b0d623f7
A
53static mach_vm_offset_t kext_alloc_base = 0;
54static mach_vm_offset_t kext_alloc_max = 0;
316670eb
A
55#if CONFIG_KEXT_BASEMENT
56static mach_vm_offset_t kext_post_boot_base = 0;
57#endif
58#endif
b0d623f7
A
59
60/*
61 * On x86_64 systems, kernel extension text must remain within 2GB of the
62 * kernel's text segment. To ensure this happens, we snag 2GB of kernel VM
63 * as early as possible for kext allocations.
64 */
f427ee49 65__startup_func
0a7de745 66void
b0d623f7
A
67kext_alloc_init(void)
68{
316670eb 69#if CONFIG_KEXT_BASEMENT
0a7de745
A
70 kern_return_t rval = 0;
71 kernel_segment_command_t *text = NULL;
72 kernel_segment_command_t *prelinkTextSegment = NULL;
73 mach_vm_offset_t text_end, text_start;
74 mach_vm_size_t text_size;
75 mach_vm_size_t kext_alloc_size;
76
77 /* Determine the start of the kernel's __TEXT segment and determine the
78 * lower bound of the allocated submap for kext allocations.
79 */
80
81 text = getsegbyname(SEG_TEXT);
82 text_start = vm_map_trunc_page(text->vmaddr,
83 VM_MAP_PAGE_MASK(kernel_map));
84 text_start &= ~((512ULL * 1024 * 1024 * 1024) - 1);
85 text_end = vm_map_round_page(text->vmaddr + text->vmsize,
86 VM_MAP_PAGE_MASK(kernel_map));
87 text_size = text_end - text_start;
88
89 kext_alloc_base = KEXT_ALLOC_BASE(text_end);
90 kext_alloc_size = KEXT_ALLOC_SIZE(text_size);
91 kext_alloc_max = kext_alloc_base + kext_alloc_size;
92
93 /* Post boot kext allocation will start after the prelinked kexts */
94 prelinkTextSegment = getsegbyname("__PRELINK_TEXT");
95 if (prelinkTextSegment) {
96 /* use kext_post_boot_base to start allocations past all the prelinked
97 * kexts
98 */
99 kext_post_boot_base =
100 vm_map_round_page(kext_alloc_base + prelinkTextSegment->vmsize,
101 VM_MAP_PAGE_MASK(kernel_map));
102 } else {
103 kext_post_boot_base = kext_alloc_base;
104 }
105
106 /* Allocate the sub block of the kernel map */
107 rval = kmem_suballoc(kernel_map, (vm_offset_t *) &kext_alloc_base,
108 kext_alloc_size, /* pageable */ TRUE,
109 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
110 VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_KEXT,
111 &g_kext_map);
112 if (rval != KERN_SUCCESS) {
113 panic("kext_alloc_init: kmem_suballoc failed 0x%x\n", rval);
114 }
115
116 if ((kext_alloc_base + kext_alloc_size) > kext_alloc_max) {
117 panic("kext_alloc_init: failed to get first 2GB\n");
118 }
119
120 if (kernel_map->min_offset > kext_alloc_base) {
121 kernel_map->min_offset = kext_alloc_base;
122 }
123
124 printf("kext submap [0x%lx - 0x%lx], kernel text [0x%lx - 0x%lx]\n",
125 VM_KERNEL_UNSLIDE(kext_alloc_base),
126 VM_KERNEL_UNSLIDE(kext_alloc_max),
127 VM_KERNEL_UNSLIDE(text->vmaddr),
128 VM_KERNEL_UNSLIDE(text->vmaddr + text->vmsize));
316670eb 129
b0d623f7 130#else
0a7de745
A
131 g_kext_map = kernel_map;
132 kext_alloc_base = VM_MIN_KERNEL_ADDRESS;
133 kext_alloc_max = VM_MAX_KERNEL_ADDRESS;
316670eb 134#endif /* CONFIG_KEXT_BASEMENT */
b0d623f7
A
135}
136
f427ee49
A
137/*
138 * Get a vm addr in the kext submap where a kext
139 * collection of given size could be mapped.
140 */
141vm_offset_t
142get_address_from_kext_map(vm_size_t fsize)
143{
144 vm_offset_t addr = 0;
145 kern_return_t ret;
146
147 ret = kext_alloc(&addr, fsize, false);
148 assert(ret == KERN_SUCCESS);
149
150 if (ret != KERN_SUCCESS) {
151 return 0;
152 }
153
154 kext_free(addr, fsize);
155
156 addr += VM_MAP_PAGE_SIZE(g_kext_map);
157 addr = vm_map_trunc_page(addr,
158 VM_MAP_PAGE_MASK(g_kext_map));
159 return addr;
160}
161
b0d623f7
A
162kern_return_t
163kext_alloc(vm_offset_t *_addr, vm_size_t size, boolean_t fixed)
164{
0a7de745 165 kern_return_t rval = 0;
316670eb 166#if CONFIG_KEXT_BASEMENT
0a7de745 167 mach_vm_offset_t addr = (fixed) ? *_addr : kext_post_boot_base;
316670eb 168#else
0a7de745 169 mach_vm_offset_t addr = (fixed) ? *_addr : kext_alloc_base;
316670eb 170#endif
0a7de745
A
171 int flags = (fixed) ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE;
172
316670eb 173#if CONFIG_KEXT_BASEMENT
f427ee49
A
174 kc_format_t kcformat;
175 if (PE_get_primary_kc_format(&kcformat) && kcformat == KCFormatFileset) {
176 /*
177 * There is no need for a kext basement when booting with the
178 * new MH_FILESET format kext collection.
179 */
180 rval = mach_vm_allocate_kernel(g_kext_map, &addr, size, flags, VM_KERN_MEMORY_KEXT);
181 if (rval != KERN_SUCCESS) {
182 printf("vm_allocate failed - %d\n", rval);
183 goto finish;
184 }
185 goto check_reachable;
186 }
187
0a7de745
A
188 /* Allocate the kext virtual memory
189 * 10608884 - use mach_vm_map since we want VM_FLAGS_ANYWHERE allocated past
190 * kext_post_boot_base (when possible). mach_vm_allocate will always
191 * start at 0 into the map no matter what you pass in addr. We want non
192 * fixed (post boot) kext allocations to start looking for free space
193 * just past where prelinked kexts have loaded.
194 */
195 rval = mach_vm_map_kernel(g_kext_map,
196 &addr,
197 size,
198 0,
199 flags,
200 VM_MAP_KERNEL_FLAGS_NONE,
201 VM_KERN_MEMORY_KEXT,
202 MACH_PORT_NULL,
203 0,
204 TRUE,
205 VM_PROT_DEFAULT,
206 VM_PROT_ALL,
207 VM_INHERIT_DEFAULT);
208 if (rval != KERN_SUCCESS) {
209 printf("mach_vm_map failed - %d\n", rval);
210 goto finish;
211 }
f427ee49 212check_reachable:
316670eb 213#else
0a7de745
A
214 rval = mach_vm_allocate_kernel(g_kext_map, &addr, size, flags, VM_KERN_MEMORY_KEXT);
215 if (rval != KERN_SUCCESS) {
216 printf("vm_allocate failed - %d\n", rval);
217 goto finish;
218 }
316670eb 219#endif
b0d623f7 220
0a7de745
A
221 /* Check that the memory is reachable by kernel text */
222 if ((addr + size) > kext_alloc_max) {
223 kext_free((vm_offset_t)addr, size);
224 rval = KERN_INVALID_ADDRESS;
225 goto finish;
226 }
b0d623f7 227
0a7de745
A
228 *_addr = (vm_offset_t)addr;
229 rval = KERN_SUCCESS;
5ba3f43e 230#if KASAN
0a7de745 231 kasan_notify_address(addr, size);
5ba3f43e 232#endif
b0d623f7
A
233
234finish:
0a7de745 235 return rval;
b0d623f7
A
236}
237
0a7de745 238void
b0d623f7
A
239kext_free(vm_offset_t addr, vm_size_t size)
240{
0a7de745 241 kern_return_t rval;
b0d623f7 242
0a7de745
A
243 rval = mach_vm_deallocate(g_kext_map, addr, size);
244 assert(rval == KERN_SUCCESS);
b0d623f7 245}
f427ee49
A
246
247kern_return_t
248kext_receipt(void **addrp, size_t *sizep)
249{
c3c9b80d 250 kern_return_t ret = KERN_FAILURE;
f427ee49 251 if (addrp == NULL || sizep == NULL) {
c3c9b80d 252 goto finish;
f427ee49
A
253 }
254
255 kernel_mach_header_t *kc = PE_get_kc_header(KCKindAuxiliary);
256 if (kc == NULL) {
c3c9b80d
A
257 ret = KERN_MISSING_KC;
258 goto finish;
259 }
260
261 /*
262 * This will be set in early boot once we've successfully checked that
263 * the AuxKC is properly linked against the BootKC. If this isn't set,
264 * and we have a valid AuxKC mach header, then the booter gave us a
265 * bad KC.
266 */
267 if (auxkc_uuid_valid == FALSE) {
268 ret = KERN_INVALID_KC;
269 goto finish;
f427ee49
A
270 }
271
272 size_t size;
273 void *addr = getsectdatafromheader(kc,
274 kReceiptInfoSegment, kAuxKCReceiptSection, &size);
275 if (addr == NULL) {
c3c9b80d
A
276 ret = KERN_INVALID_KC;
277 goto finish;
f427ee49
A
278 }
279
280 *addrp = addr;
281 *sizep = size;
c3c9b80d
A
282 ret = KERN_SUCCESS;
283
284finish:
285 /*
286 * If we do return success, we'll want to wait for the other side to
287 * call kext_receipt_set_queried themselves, so we can confirm that
288 * it made the roundtrip before allowing third party kexts to load.
289 */
290 if (ret != KERN_SUCCESS) {
291 kext_receipt_set_queried();
292 }
293 return ret;
294}
295
296/*
297 * Returns KERN_FAILURE if the variable was already set.
298 */
299kern_return_t
300kext_receipt_set_queried()
301{
302 return OSKextSetReceiptQueried();
f427ee49 303}