]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
91447636 | 2 | * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. |
1c79356b | 3 | * |
6601e61a | 4 | * @APPLE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
6601e61a A |
6 | * The contents of this file constitute Original Code as defined in and |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
8f6c56a5 | 11 | * |
6601e61a A |
12 | * This Original Code and all software distributed under the License are |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
6601e61a A |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
8f6c56a5 | 19 | * |
6601e61a | 20 | * @APPLE_LICENSE_HEADER_END@ |
1c79356b A |
21 | */ |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | /* | |
26 | * Mach Operating System | |
27 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
28 | * All Rights Reserved. | |
29 | * | |
30 | * Permission to use, copy, modify and distribute this software and its | |
31 | * documentation is hereby granted, provided that both the copyright | |
32 | * notice and this permission notice appear in all copies of the | |
33 | * software, derivative works or modified versions, and any portions | |
34 | * thereof, and that both notices appear in supporting documentation. | |
35 | * | |
36 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
37 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
38 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
39 | * | |
40 | * Carnegie Mellon requests users of this software to return to | |
41 | * | |
42 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
43 | * School of Computer Science | |
44 | * Carnegie Mellon University | |
45 | * Pittsburgh PA 15213-3890 | |
46 | * | |
47 | * any improvements or extensions that they make and grant Carnegie Mellon | |
48 | * the rights to redistribute these changes. | |
49 | */ | |
50 | /* | |
51 | */ | |
52 | /* | |
53 | * File: vm/vm_kern.c | |
54 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
55 | * Date: 1985 | |
56 | * | |
57 | * Kernel memory management. | |
58 | */ | |
59 | ||
1c79356b A |
60 | #include <mach/kern_return.h> |
61 | #include <mach/vm_param.h> | |
62 | #include <kern/assert.h> | |
63 | #include <kern/lock.h> | |
64 | #include <kern/thread.h> | |
65 | #include <vm/vm_kern.h> | |
66 | #include <vm/vm_map.h> | |
67 | #include <vm/vm_object.h> | |
68 | #include <vm/vm_page.h> | |
69 | #include <vm/vm_pageout.h> | |
70 | #include <kern/misc_protos.h> | |
71 | #include <vm/cpm.h> | |
72 | ||
73 | #include <string.h> | |
74 | /* | |
75 | * Variables exported by this module. | |
76 | */ | |
77 | ||
78 | vm_map_t kernel_map; | |
79 | vm_map_t kernel_pageable_map; | |
80 | ||
81 | /* | |
82 | * Forward declarations for internal functions. | |
83 | */ | |
84 | extern kern_return_t kmem_alloc_pages( | |
85 | register vm_object_t object, | |
86 | register vm_object_offset_t offset, | |
91447636 | 87 | register vm_object_size_t size); |
1c79356b A |
88 | |
89 | extern void kmem_remap_pages( | |
90 | register vm_object_t object, | |
91 | register vm_object_offset_t offset, | |
92 | register vm_offset_t start, | |
93 | register vm_offset_t end, | |
94 | vm_prot_t protection); | |
95 | ||
96 | kern_return_t | |
97 | kmem_alloc_contig( | |
91447636 A |
98 | vm_map_t map, |
99 | vm_offset_t *addrp, | |
100 | vm_size_t size, | |
101 | vm_offset_t mask, | |
102 | int flags) | |
1c79356b A |
103 | { |
104 | vm_object_t object; | |
1c79356b | 105 | vm_object_offset_t offset; |
91447636 A |
106 | vm_map_offset_t map_addr; |
107 | vm_map_offset_t map_mask; | |
108 | vm_map_size_t map_size, i; | |
1c79356b | 109 | vm_map_entry_t entry; |
91447636 A |
110 | vm_page_t m, pages; |
111 | kern_return_t kr; | |
1c79356b A |
112 | |
113 | if (map == VM_MAP_NULL || (flags && (flags ^ KMA_KOBJECT))) | |
114 | return KERN_INVALID_ARGUMENT; | |
115 | ||
116 | if (size == 0) { | |
117 | *addrp = 0; | |
118 | return KERN_INVALID_ARGUMENT; | |
119 | } | |
120 | ||
91447636 A |
121 | map_size = vm_map_round_page(size); |
122 | map_mask = (vm_map_offset_t)mask; | |
1c79356b | 123 | |
91447636 A |
124 | /* |
125 | * Allocate a new object (if necessary) and the reference we | |
126 | * will be donating to the map entry. We must do this before | |
127 | * locking the map, or risk deadlock with the default pager. | |
128 | */ | |
129 | if ((flags & KMA_KOBJECT) != 0) { | |
130 | object = kernel_object; | |
131 | vm_object_reference(object); | |
1c79356b | 132 | } else { |
91447636 | 133 | object = vm_object_allocate(map_size); |
1c79356b A |
134 | } |
135 | ||
6601e61a | 136 | kr = vm_map_find_space(map, &map_addr, map_size, map_mask, &entry); |
91447636 A |
137 | if (KERN_SUCCESS != kr) { |
138 | vm_object_deallocate(object); | |
1c79356b A |
139 | return kr; |
140 | } | |
141 | ||
91447636 A |
142 | entry->object.vm_object = object; |
143 | entry->offset = offset = (object == kernel_object) ? | |
144 | map_addr - VM_MIN_KERNEL_ADDRESS : 0; | |
145 | ||
146 | /* Take an extra object ref in case the map entry gets deleted */ | |
147 | vm_object_reference(object); | |
1c79356b A |
148 | vm_map_unlock(map); |
149 | ||
91447636 | 150 | kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, FALSE); |
1c79356b A |
151 | |
152 | if (kr != KERN_SUCCESS) { | |
91447636 A |
153 | vm_map_remove(map, vm_map_trunc_page(map_addr), |
154 | vm_map_round_page(map_addr + map_size), 0); | |
155 | vm_object_deallocate(object); | |
1c79356b A |
156 | *addrp = 0; |
157 | return kr; | |
158 | } | |
159 | ||
160 | vm_object_lock(object); | |
91447636 | 161 | for (i = 0; i < map_size; i += PAGE_SIZE) { |
1c79356b A |
162 | m = pages; |
163 | pages = NEXT_PAGE(m); | |
164 | m->busy = FALSE; | |
165 | vm_page_insert(m, object, offset + i); | |
166 | } | |
167 | vm_object_unlock(object); | |
168 | ||
91447636 A |
169 | if ((kr = vm_map_wire(map, vm_map_trunc_page(map_addr), |
170 | vm_map_round_page(map_addr + map_size), VM_PROT_DEFAULT, FALSE)) | |
1c79356b A |
171 | != KERN_SUCCESS) { |
172 | if (object == kernel_object) { | |
173 | vm_object_lock(object); | |
91447636 | 174 | vm_object_page_remove(object, offset, offset + map_size); |
1c79356b A |
175 | vm_object_unlock(object); |
176 | } | |
91447636 A |
177 | vm_map_remove(map, vm_map_trunc_page(map_addr), |
178 | vm_map_round_page(map_addr + map_size), 0); | |
179 | vm_object_deallocate(object); | |
1c79356b A |
180 | return kr; |
181 | } | |
91447636 A |
182 | vm_object_deallocate(object); |
183 | ||
1c79356b | 184 | if (object == kernel_object) |
91447636 | 185 | vm_map_simplify(map, map_addr); |
1c79356b | 186 | |
91447636 | 187 | *addrp = map_addr; |
1c79356b A |
188 | return KERN_SUCCESS; |
189 | } | |
190 | ||
191 | /* | |
192 | * Master entry point for allocating kernel memory. | |
193 | * NOTE: this routine is _never_ interrupt safe. | |
194 | * | |
195 | * map : map to allocate into | |
196 | * addrp : pointer to start address of new memory | |
197 | * size : size of memory requested | |
198 | * flags : options | |
199 | * KMA_HERE *addrp is base address, else "anywhere" | |
200 | * KMA_NOPAGEWAIT don't wait for pages if unavailable | |
201 | * KMA_KOBJECT use kernel_object | |
202 | */ | |
203 | ||
204 | kern_return_t | |
205 | kernel_memory_allocate( | |
206 | register vm_map_t map, | |
207 | register vm_offset_t *addrp, | |
208 | register vm_size_t size, | |
209 | register vm_offset_t mask, | |
210 | int flags) | |
211 | { | |
91447636 A |
212 | vm_object_t object; |
213 | vm_object_offset_t offset; | |
1c79356b | 214 | vm_map_entry_t entry; |
91447636 A |
215 | vm_map_offset_t map_addr; |
216 | vm_map_offset_t map_mask; | |
217 | vm_map_size_t map_size; | |
218 | vm_map_size_t i; | |
1c79356b A |
219 | kern_return_t kr; |
220 | ||
91447636 A |
221 | if (size == 0) { |
222 | *addrp = 0; | |
223 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 224 | } |
91447636 A |
225 | |
226 | map_size = vm_map_round_page(size); | |
227 | map_mask = (vm_map_offset_t) mask; | |
228 | ||
229 | /* | |
230 | * Allocate a new object (if necessary). We must do this before | |
231 | * locking the map, or risk deadlock with the default pager. | |
232 | */ | |
233 | if ((flags & KMA_KOBJECT) != 0) { | |
1c79356b | 234 | object = kernel_object; |
91447636 A |
235 | vm_object_reference(object); |
236 | } else { | |
237 | object = vm_object_allocate(map_size); | |
1c79356b | 238 | } |
91447636 | 239 | |
6601e61a | 240 | kr = vm_map_find_space(map, &map_addr, map_size, map_mask, &entry); |
91447636 A |
241 | if (KERN_SUCCESS != kr) { |
242 | vm_object_deallocate(object); | |
1c79356b A |
243 | return kr; |
244 | } | |
6601e61a | 245 | |
91447636 A |
246 | entry->object.vm_object = object; |
247 | entry->offset = offset = (object == kernel_object) ? | |
248 | map_addr - VM_MIN_KERNEL_ADDRESS : 0; | |
1c79356b | 249 | |
b4c24cb9 | 250 | vm_object_reference(object); |
1c79356b A |
251 | vm_map_unlock(map); |
252 | ||
253 | vm_object_lock(object); | |
91447636 | 254 | for (i = 0; i < map_size; i += PAGE_SIZE) { |
1c79356b A |
255 | vm_page_t mem; |
256 | ||
6601e61a A |
257 | while (VM_PAGE_NULL == |
258 | (mem = vm_page_alloc(object, offset + i))) { | |
1c79356b A |
259 | if (flags & KMA_NOPAGEWAIT) { |
260 | if (object == kernel_object) | |
91447636 | 261 | vm_object_page_remove(object, offset, offset + i); |
1c79356b | 262 | vm_object_unlock(object); |
91447636 | 263 | vm_map_remove(map, map_addr, map_addr + map_size, 0); |
b4c24cb9 | 264 | vm_object_deallocate(object); |
1c79356b A |
265 | return KERN_RESOURCE_SHORTAGE; |
266 | } | |
267 | vm_object_unlock(object); | |
268 | VM_PAGE_WAIT(); | |
269 | vm_object_lock(object); | |
270 | } | |
271 | mem->busy = FALSE; | |
272 | } | |
273 | vm_object_unlock(object); | |
274 | ||
91447636 | 275 | if ((kr = vm_map_wire(map, map_addr, map_addr + map_size, VM_PROT_DEFAULT, FALSE)) |
1c79356b A |
276 | != KERN_SUCCESS) { |
277 | if (object == kernel_object) { | |
278 | vm_object_lock(object); | |
91447636 | 279 | vm_object_page_remove(object, offset, offset + map_size); |
1c79356b A |
280 | vm_object_unlock(object); |
281 | } | |
91447636 | 282 | vm_map_remove(map, map_addr, map_addr + map_size, 0); |
b4c24cb9 | 283 | vm_object_deallocate(object); |
1c79356b A |
284 | return (kr); |
285 | } | |
b4c24cb9 A |
286 | /* now that the page is wired, we no longer have to fear coalesce */ |
287 | vm_object_deallocate(object); | |
1c79356b | 288 | if (object == kernel_object) |
91447636 | 289 | vm_map_simplify(map, map_addr); |
1c79356b A |
290 | |
291 | /* | |
292 | * Return the memory, not zeroed. | |
293 | */ | |
91447636 | 294 | *addrp = CAST_DOWN(vm_offset_t, map_addr); |
1c79356b A |
295 | return KERN_SUCCESS; |
296 | } | |
297 | ||
298 | /* | |
299 | * kmem_alloc: | |
300 | * | |
301 | * Allocate wired-down memory in the kernel's address map | |
302 | * or a submap. The memory is not zero-filled. | |
303 | */ | |
304 | ||
305 | kern_return_t | |
306 | kmem_alloc( | |
307 | vm_map_t map, | |
308 | vm_offset_t *addrp, | |
309 | vm_size_t size) | |
310 | { | |
311 | return kernel_memory_allocate(map, addrp, size, 0, 0); | |
312 | } | |
313 | ||
314 | /* | |
315 | * kmem_realloc: | |
316 | * | |
317 | * Reallocate wired-down memory in the kernel's address map | |
318 | * or a submap. Newly allocated pages are not zeroed. | |
319 | * This can only be used on regions allocated with kmem_alloc. | |
320 | * | |
321 | * If successful, the pages in the old region are mapped twice. | |
322 | * The old region is unchanged. Use kmem_free to get rid of it. | |
323 | */ | |
324 | kern_return_t | |
325 | kmem_realloc( | |
91447636 A |
326 | vm_map_t map, |
327 | vm_offset_t oldaddr, | |
328 | vm_size_t oldsize, | |
329 | vm_offset_t *newaddrp, | |
330 | vm_size_t newsize) | |
1c79356b | 331 | { |
91447636 A |
332 | vm_object_t object; |
333 | vm_object_offset_t offset; | |
334 | vm_map_offset_t oldmapmin; | |
335 | vm_map_offset_t oldmapmax; | |
336 | vm_map_offset_t newmapaddr; | |
337 | vm_map_size_t oldmapsize; | |
338 | vm_map_size_t newmapsize; | |
339 | vm_map_entry_t oldentry; | |
340 | vm_map_entry_t newentry; | |
341 | vm_page_t mem; | |
342 | kern_return_t kr; | |
1c79356b | 343 | |
91447636 A |
344 | oldmapmin = vm_map_trunc_page(oldaddr); |
345 | oldmapmax = vm_map_round_page(oldaddr + oldsize); | |
346 | oldmapsize = oldmapmax - oldmapmin; | |
347 | newmapsize = vm_map_round_page(newsize); | |
1c79356b | 348 | |
1c79356b A |
349 | |
350 | /* | |
351 | * Find the VM object backing the old region. | |
352 | */ | |
353 | ||
b4c24cb9 A |
354 | vm_map_lock(map); |
355 | ||
91447636 | 356 | if (!vm_map_lookup_entry(map, oldmapmin, &oldentry)) |
1c79356b A |
357 | panic("kmem_realloc"); |
358 | object = oldentry->object.vm_object; | |
359 | ||
360 | /* | |
361 | * Increase the size of the object and | |
362 | * fill in the new region. | |
363 | */ | |
364 | ||
365 | vm_object_reference(object); | |
b4c24cb9 A |
366 | /* by grabbing the object lock before unlocking the map */ |
367 | /* we guarantee that we will panic if more than one */ | |
368 | /* attempt is made to realloc a kmem_alloc'd area */ | |
1c79356b | 369 | vm_object_lock(object); |
b4c24cb9 | 370 | vm_map_unlock(map); |
91447636 | 371 | if (object->size != oldmapsize) |
1c79356b | 372 | panic("kmem_realloc"); |
91447636 | 373 | object->size = newmapsize; |
1c79356b A |
374 | vm_object_unlock(object); |
375 | ||
b4c24cb9 A |
376 | /* allocate the new pages while expanded portion of the */ |
377 | /* object is still not mapped */ | |
91447636 A |
378 | kmem_alloc_pages(object, vm_object_round_page(oldmapsize), |
379 | vm_object_round_page(newmapsize-oldmapsize)); | |
1c79356b A |
380 | |
381 | /* | |
b4c24cb9 | 382 | * Find space for the new region. |
1c79356b A |
383 | */ |
384 | ||
91447636 | 385 | kr = vm_map_find_space(map, &newmapaddr, newmapsize, |
6601e61a | 386 | (vm_map_offset_t) 0, &newentry); |
b4c24cb9 A |
387 | if (kr != KERN_SUCCESS) { |
388 | vm_object_lock(object); | |
91447636 A |
389 | for(offset = oldmapsize; |
390 | offset < newmapsize; offset += PAGE_SIZE) { | |
b4c24cb9 A |
391 | if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { |
392 | vm_page_lock_queues(); | |
393 | vm_page_free(mem); | |
394 | vm_page_unlock_queues(); | |
395 | } | |
396 | } | |
91447636 | 397 | object->size = oldmapsize; |
b4c24cb9 A |
398 | vm_object_unlock(object); |
399 | vm_object_deallocate(object); | |
400 | return kr; | |
401 | } | |
402 | newentry->object.vm_object = object; | |
403 | newentry->offset = 0; | |
404 | assert (newentry->wired_count == 0); | |
405 | ||
406 | ||
407 | /* add an extra reference in case we have someone doing an */ | |
408 | /* unexpected deallocate */ | |
409 | vm_object_reference(object); | |
1c79356b A |
410 | vm_map_unlock(map); |
411 | ||
91447636 A |
412 | kr = vm_map_wire(map, newmapaddr, newmapaddr + newmapsize, VM_PROT_DEFAULT, FALSE); |
413 | if (KERN_SUCCESS != kr) { | |
414 | vm_map_remove(map, newmapaddr, newmapaddr + newmapsize, 0); | |
b4c24cb9 | 415 | vm_object_lock(object); |
91447636 | 416 | for(offset = oldsize; offset < newmapsize; offset += PAGE_SIZE) { |
b4c24cb9 A |
417 | if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { |
418 | vm_page_lock_queues(); | |
419 | vm_page_free(mem); | |
420 | vm_page_unlock_queues(); | |
421 | } | |
422 | } | |
91447636 | 423 | object->size = oldmapsize; |
b4c24cb9 A |
424 | vm_object_unlock(object); |
425 | vm_object_deallocate(object); | |
426 | return (kr); | |
427 | } | |
428 | vm_object_deallocate(object); | |
1c79356b | 429 | |
91447636 | 430 | *newaddrp = CAST_DOWN(vm_offset_t, newmapaddr); |
1c79356b A |
431 | return KERN_SUCCESS; |
432 | } | |
433 | ||
434 | /* | |
435 | * kmem_alloc_wired: | |
436 | * | |
437 | * Allocate wired-down memory in the kernel's address map | |
438 | * or a submap. The memory is not zero-filled. | |
439 | * | |
440 | * The memory is allocated in the kernel_object. | |
441 | * It may not be copied with vm_map_copy, and | |
442 | * it may not be reallocated with kmem_realloc. | |
443 | */ | |
444 | ||
445 | kern_return_t | |
446 | kmem_alloc_wired( | |
447 | vm_map_t map, | |
448 | vm_offset_t *addrp, | |
449 | vm_size_t size) | |
450 | { | |
451 | return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT); | |
452 | } | |
453 | ||
454 | /* | |
455 | * kmem_alloc_aligned: | |
456 | * | |
457 | * Like kmem_alloc_wired, except that the memory is aligned. | |
458 | * The size should be a power-of-2. | |
459 | */ | |
460 | ||
461 | kern_return_t | |
462 | kmem_alloc_aligned( | |
463 | vm_map_t map, | |
464 | vm_offset_t *addrp, | |
465 | vm_size_t size) | |
466 | { | |
467 | if ((size & (size - 1)) != 0) | |
468 | panic("kmem_alloc_aligned: size not aligned"); | |
469 | return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT); | |
470 | } | |
471 | ||
472 | /* | |
473 | * kmem_alloc_pageable: | |
474 | * | |
475 | * Allocate pageable memory in the kernel's address map. | |
476 | */ | |
477 | ||
478 | kern_return_t | |
479 | kmem_alloc_pageable( | |
480 | vm_map_t map, | |
481 | vm_offset_t *addrp, | |
482 | vm_size_t size) | |
483 | { | |
91447636 A |
484 | vm_map_offset_t map_addr; |
485 | vm_map_size_t map_size; | |
1c79356b A |
486 | kern_return_t kr; |
487 | ||
488 | #ifndef normal | |
91447636 | 489 | map_addr = (vm_map_min(map)) + 0x1000; |
1c79356b | 490 | #else |
91447636 | 491 | map_addr = vm_map_min(map); |
1c79356b | 492 | #endif |
91447636 A |
493 | map_size = vm_map_round_page(size); |
494 | ||
495 | kr = vm_map_enter(map, &map_addr, map_size, | |
496 | (vm_map_offset_t) 0, VM_FLAGS_ANYWHERE, | |
1c79356b A |
497 | VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE, |
498 | VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); | |
91447636 | 499 | |
1c79356b A |
500 | if (kr != KERN_SUCCESS) |
501 | return kr; | |
502 | ||
91447636 | 503 | *addrp = CAST_DOWN(vm_offset_t, map_addr); |
1c79356b A |
504 | return KERN_SUCCESS; |
505 | } | |
506 | ||
507 | /* | |
508 | * kmem_free: | |
509 | * | |
510 | * Release a region of kernel virtual memory allocated | |
511 | * with kmem_alloc, kmem_alloc_wired, or kmem_alloc_pageable, | |
512 | * and return the physical pages associated with that region. | |
513 | */ | |
514 | ||
515 | void | |
516 | kmem_free( | |
517 | vm_map_t map, | |
518 | vm_offset_t addr, | |
519 | vm_size_t size) | |
520 | { | |
521 | kern_return_t kr; | |
522 | ||
91447636 A |
523 | kr = vm_map_remove(map, vm_map_trunc_page(addr), |
524 | vm_map_round_page(addr + size), | |
55e303ae | 525 | VM_MAP_REMOVE_KUNWIRE); |
1c79356b A |
526 | if (kr != KERN_SUCCESS) |
527 | panic("kmem_free"); | |
528 | } | |
529 | ||
530 | /* | |
b4c24cb9 | 531 | * Allocate new pages in an object. |
1c79356b A |
532 | */ |
533 | ||
534 | kern_return_t | |
535 | kmem_alloc_pages( | |
536 | register vm_object_t object, | |
537 | register vm_object_offset_t offset, | |
91447636 | 538 | register vm_object_size_t size) |
1c79356b | 539 | { |
91447636 | 540 | vm_object_size_t alloc_size; |
1c79356b | 541 | |
91447636 | 542 | alloc_size = vm_object_round_page(size); |
b4c24cb9 | 543 | vm_object_lock(object); |
91447636 | 544 | while (alloc_size) { |
1c79356b A |
545 | register vm_page_t mem; |
546 | ||
1c79356b A |
547 | |
548 | /* | |
549 | * Allocate a page | |
550 | */ | |
91447636 A |
551 | while (VM_PAGE_NULL == |
552 | (mem = vm_page_alloc(object, offset))) { | |
1c79356b A |
553 | vm_object_unlock(object); |
554 | VM_PAGE_WAIT(); | |
555 | vm_object_lock(object); | |
556 | } | |
91447636 | 557 | mem->busy = FALSE; |
1c79356b | 558 | |
91447636 | 559 | alloc_size -= PAGE_SIZE; |
b4c24cb9 | 560 | offset += PAGE_SIZE; |
1c79356b | 561 | } |
b4c24cb9 | 562 | vm_object_unlock(object); |
1c79356b A |
563 | return KERN_SUCCESS; |
564 | } | |
565 | ||
566 | /* | |
567 | * Remap wired pages in an object into a new region. | |
568 | * The object is assumed to be mapped into the kernel map or | |
569 | * a submap. | |
570 | */ | |
571 | void | |
572 | kmem_remap_pages( | |
573 | register vm_object_t object, | |
574 | register vm_object_offset_t offset, | |
575 | register vm_offset_t start, | |
576 | register vm_offset_t end, | |
577 | vm_prot_t protection) | |
578 | { | |
91447636 A |
579 | |
580 | vm_map_offset_t map_start; | |
581 | vm_map_offset_t map_end; | |
582 | ||
1c79356b A |
583 | /* |
584 | * Mark the pmap region as not pageable. | |
585 | */ | |
91447636 A |
586 | map_start = vm_map_trunc_page(start); |
587 | map_end = vm_map_round_page(end); | |
1c79356b | 588 | |
91447636 A |
589 | pmap_pageable(kernel_pmap, map_start, map_end, FALSE); |
590 | ||
591 | while (map_start < map_end) { | |
1c79356b A |
592 | register vm_page_t mem; |
593 | ||
594 | vm_object_lock(object); | |
595 | ||
596 | /* | |
597 | * Find a page | |
598 | */ | |
599 | if ((mem = vm_page_lookup(object, offset)) == VM_PAGE_NULL) | |
600 | panic("kmem_remap_pages"); | |
601 | ||
602 | /* | |
603 | * Wire it down (again) | |
604 | */ | |
605 | vm_page_lock_queues(); | |
606 | vm_page_wire(mem); | |
607 | vm_page_unlock_queues(); | |
608 | vm_object_unlock(object); | |
609 | ||
91447636 A |
610 | /* |
611 | * ENCRYPTED SWAP: | |
612 | * The page is supposed to be wired now, so it | |
613 | * shouldn't be encrypted at this point. It can | |
614 | * safely be entered in the page table. | |
615 | */ | |
616 | ASSERT_PAGE_DECRYPTED(mem); | |
617 | ||
1c79356b A |
618 | /* |
619 | * Enter it in the kernel pmap. The page isn't busy, | |
620 | * but this shouldn't be a problem because it is wired. | |
621 | */ | |
91447636 | 622 | PMAP_ENTER(kernel_pmap, map_start, mem, protection, |
55e303ae A |
623 | ((unsigned int)(mem->object->wimg_bits)) |
624 | & VM_WIMG_MASK, | |
625 | TRUE); | |
1c79356b | 626 | |
91447636 | 627 | map_start += PAGE_SIZE; |
1c79356b A |
628 | offset += PAGE_SIZE; |
629 | } | |
630 | } | |
631 | ||
632 | /* | |
633 | * kmem_suballoc: | |
634 | * | |
635 | * Allocates a map to manage a subrange | |
636 | * of the kernel virtual address space. | |
637 | * | |
638 | * Arguments are as follows: | |
639 | * | |
640 | * parent Map to take range from | |
641 | * addr Address of start of range (IN/OUT) | |
642 | * size Size of range to find | |
643 | * pageable Can region be paged | |
644 | * anywhere Can region be located anywhere in map | |
645 | * new_map Pointer to new submap | |
646 | */ | |
647 | kern_return_t | |
648 | kmem_suballoc( | |
649 | vm_map_t parent, | |
650 | vm_offset_t *addr, | |
651 | vm_size_t size, | |
652 | boolean_t pageable, | |
91447636 | 653 | int flags, |
1c79356b A |
654 | vm_map_t *new_map) |
655 | { | |
91447636 A |
656 | vm_map_t map; |
657 | vm_map_offset_t map_addr; | |
658 | vm_map_size_t map_size; | |
659 | kern_return_t kr; | |
1c79356b | 660 | |
91447636 | 661 | map_size = vm_map_round_page(size); |
1c79356b A |
662 | |
663 | /* | |
664 | * Need reference on submap object because it is internal | |
665 | * to the vm_system. vm_object_enter will never be called | |
666 | * on it (usual source of reference for vm_map_enter). | |
667 | */ | |
668 | vm_object_reference(vm_submap_object); | |
669 | ||
91447636 A |
670 | map_addr = (flags & VM_FLAGS_ANYWHERE) ? |
671 | vm_map_min(parent) : vm_map_trunc_page(*addr); | |
672 | ||
673 | kr = vm_map_enter(parent, &map_addr, map_size, | |
674 | (vm_map_offset_t) 0, flags, | |
1c79356b A |
675 | vm_submap_object, (vm_object_offset_t) 0, FALSE, |
676 | VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); | |
677 | if (kr != KERN_SUCCESS) { | |
678 | vm_object_deallocate(vm_submap_object); | |
679 | return (kr); | |
680 | } | |
681 | ||
682 | pmap_reference(vm_map_pmap(parent)); | |
91447636 | 683 | map = vm_map_create(vm_map_pmap(parent), map_addr, map_addr + map_size, pageable); |
1c79356b A |
684 | if (map == VM_MAP_NULL) |
685 | panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */ | |
686 | ||
91447636 | 687 | kr = vm_map_submap(parent, map_addr, map_addr + map_size, map, map_addr, FALSE); |
1c79356b A |
688 | if (kr != KERN_SUCCESS) { |
689 | /* | |
690 | * See comment preceding vm_map_submap(). | |
691 | */ | |
91447636 | 692 | vm_map_remove(parent, map_addr, map_addr + map_size, VM_MAP_NO_FLAGS); |
1c79356b A |
693 | vm_map_deallocate(map); /* also removes ref to pmap */ |
694 | vm_object_deallocate(vm_submap_object); | |
695 | return (kr); | |
696 | } | |
91447636 | 697 | *addr = CAST_DOWN(vm_offset_t, map_addr); |
1c79356b A |
698 | *new_map = map; |
699 | return (KERN_SUCCESS); | |
700 | } | |
701 | ||
702 | /* | |
703 | * kmem_init: | |
704 | * | |
705 | * Initialize the kernel's virtual memory map, taking | |
706 | * into account all memory allocated up to this time. | |
707 | */ | |
708 | void | |
709 | kmem_init( | |
710 | vm_offset_t start, | |
711 | vm_offset_t end) | |
712 | { | |
91447636 A |
713 | vm_map_offset_t map_start; |
714 | vm_map_offset_t map_end; | |
715 | ||
716 | map_start = vm_map_trunc_page(start); | |
717 | map_end = vm_map_round_page(end); | |
718 | ||
719 | kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_ADDRESS, | |
6601e61a A |
720 | map_end, FALSE); |
721 | ||
1c79356b A |
722 | /* |
723 | * Reserve virtual memory allocated up to this time. | |
724 | */ | |
725 | ||
726 | if (start != VM_MIN_KERNEL_ADDRESS) { | |
91447636 | 727 | vm_map_offset_t map_addr; |
6601e61a | 728 | |
91447636 | 729 | map_addr = VM_MIN_KERNEL_ADDRESS; |
1c79356b | 730 | (void) vm_map_enter(kernel_map, |
6601e61a A |
731 | &map_addr, |
732 | (vm_map_size_t)(map_start - VM_MIN_KERNEL_ADDRESS), | |
733 | (vm_map_offset_t) 0, | |
734 | VM_FLAGS_ANYWHERE | VM_FLAGS_NO_PMAP_CHECK, | |
735 | VM_OBJECT_NULL, | |
736 | (vm_object_offset_t) 0, FALSE, | |
737 | VM_PROT_DEFAULT, VM_PROT_ALL, | |
738 | VM_INHERIT_DEFAULT); | |
1c79356b A |
739 | } |
740 | ||
741 | /* | |
742 | * Account for kernel memory (text, data, bss, vm shenanigans). | |
743 | * This may include inaccessible "holes" as determined by what | |
55e303ae | 744 | * the machine-dependent init code includes in max_mem. |
1c79356b | 745 | */ |
55e303ae | 746 | vm_page_wire_count = (atop_64(max_mem) - (vm_page_free_count |
1c79356b A |
747 | + vm_page_active_count |
748 | + vm_page_inactive_count)); | |
749 | } | |
750 | ||
1c79356b | 751 | |
1c79356b A |
752 | /* |
753 | * Routine: copyinmap | |
754 | * Purpose: | |
755 | * Like copyin, except that fromaddr is an address | |
756 | * in the specified VM map. This implementation | |
757 | * is incomplete; it handles the current user map | |
758 | * and the kernel map/submaps. | |
759 | */ | |
91447636 | 760 | kern_return_t |
1c79356b | 761 | copyinmap( |
91447636 A |
762 | vm_map_t map, |
763 | vm_map_offset_t fromaddr, | |
764 | void *todata, | |
765 | vm_size_t length) | |
1c79356b | 766 | { |
91447636 A |
767 | kern_return_t kr = KERN_SUCCESS; |
768 | vm_map_t oldmap; | |
769 | ||
770 | if (vm_map_pmap(map) == pmap_kernel()) | |
771 | { | |
1c79356b | 772 | /* assume a correct copy */ |
91447636 A |
773 | memcpy(todata, CAST_DOWN(void *, fromaddr), length); |
774 | } | |
775 | else if (current_map() == map) | |
776 | { | |
777 | if (copyin(fromaddr, todata, length) != 0) | |
778 | kr = KERN_INVALID_ADDRESS; | |
1c79356b | 779 | } |
91447636 A |
780 | else |
781 | { | |
782 | vm_map_reference(map); | |
783 | oldmap = vm_map_switch(map); | |
784 | if (copyin(fromaddr, todata, length) != 0) | |
785 | kr = KERN_INVALID_ADDRESS; | |
786 | vm_map_switch(oldmap); | |
787 | vm_map_deallocate(map); | |
788 | } | |
789 | return kr; | |
1c79356b A |
790 | } |
791 | ||
792 | /* | |
793 | * Routine: copyoutmap | |
794 | * Purpose: | |
795 | * Like copyout, except that toaddr is an address | |
796 | * in the specified VM map. This implementation | |
797 | * is incomplete; it handles the current user map | |
798 | * and the kernel map/submaps. | |
799 | */ | |
91447636 | 800 | kern_return_t |
1c79356b | 801 | copyoutmap( |
91447636 A |
802 | vm_map_t map, |
803 | void *fromdata, | |
804 | vm_map_address_t toaddr, | |
805 | vm_size_t length) | |
1c79356b A |
806 | { |
807 | if (vm_map_pmap(map) == pmap_kernel()) { | |
808 | /* assume a correct copy */ | |
91447636 A |
809 | memcpy(CAST_DOWN(void *, toaddr), fromdata, length); |
810 | return KERN_SUCCESS; | |
1c79356b A |
811 | } |
812 | ||
91447636 A |
813 | if (current_map() != map) |
814 | return KERN_NOT_SUPPORTED; | |
815 | ||
816 | if (copyout(fromdata, toaddr, length) != 0) | |
817 | return KERN_INVALID_ADDRESS; | |
1c79356b | 818 | |
91447636 | 819 | return KERN_SUCCESS; |
1c79356b | 820 | } |
9bccf70c A |
821 | |
822 | ||
823 | kern_return_t | |
824 | vm_conflict_check( | |
825 | vm_map_t map, | |
91447636 A |
826 | vm_map_offset_t off, |
827 | vm_map_size_t len, | |
828 | memory_object_t pager, | |
9bccf70c A |
829 | vm_object_offset_t file_off) |
830 | { | |
831 | vm_map_entry_t entry; | |
832 | vm_object_t obj; | |
833 | vm_object_offset_t obj_off; | |
834 | vm_map_t base_map; | |
91447636 A |
835 | vm_map_offset_t base_offset; |
836 | vm_map_offset_t original_offset; | |
9bccf70c | 837 | kern_return_t kr; |
91447636 | 838 | vm_map_size_t local_len; |
9bccf70c A |
839 | |
840 | base_map = map; | |
841 | base_offset = off; | |
842 | original_offset = off; | |
843 | kr = KERN_SUCCESS; | |
844 | vm_map_lock(map); | |
845 | while(vm_map_lookup_entry(map, off, &entry)) { | |
846 | local_len = len; | |
847 | ||
848 | if (entry->object.vm_object == VM_OBJECT_NULL) { | |
849 | vm_map_unlock(map); | |
850 | return KERN_SUCCESS; | |
851 | } | |
852 | if (entry->is_sub_map) { | |
853 | vm_map_t old_map; | |
55e303ae | 854 | |
9bccf70c A |
855 | old_map = map; |
856 | vm_map_lock(entry->object.sub_map); | |
857 | map = entry->object.sub_map; | |
858 | off = entry->offset + (off - entry->vme_start); | |
859 | vm_map_unlock(old_map); | |
860 | continue; | |
861 | } | |
862 | obj = entry->object.vm_object; | |
863 | obj_off = (off - entry->vme_start) + entry->offset; | |
864 | while(obj->shadow) { | |
865 | obj_off += obj->shadow_offset; | |
866 | obj = obj->shadow; | |
867 | } | |
868 | if((obj->pager_created) && (obj->pager == pager)) { | |
869 | if(((obj->paging_offset) + obj_off) == file_off) { | |
870 | if(off != base_offset) { | |
871 | vm_map_unlock(map); | |
872 | return KERN_FAILURE; | |
873 | } | |
874 | kr = KERN_ALREADY_WAITING; | |
55e303ae A |
875 | } else { |
876 | vm_object_offset_t obj_off_aligned; | |
877 | vm_object_offset_t file_off_aligned; | |
878 | ||
879 | obj_off_aligned = obj_off & ~PAGE_MASK; | |
880 | file_off_aligned = file_off & ~PAGE_MASK; | |
881 | ||
882 | if (file_off_aligned == (obj->paging_offset + obj_off_aligned)) { | |
883 | /* | |
884 | * the target map and the file offset start in the same page | |
885 | * but are not identical... | |
886 | */ | |
887 | vm_map_unlock(map); | |
888 | return KERN_FAILURE; | |
889 | } | |
890 | if ((file_off < (obj->paging_offset + obj_off_aligned)) && | |
891 | ((file_off + len) > (obj->paging_offset + obj_off_aligned))) { | |
892 | /* | |
893 | * some portion of the tail of the I/O will fall | |
894 | * within the encompass of the target map | |
895 | */ | |
896 | vm_map_unlock(map); | |
897 | return KERN_FAILURE; | |
898 | } | |
899 | if ((file_off_aligned > (obj->paging_offset + obj_off)) && | |
900 | (file_off_aligned < (obj->paging_offset + obj_off) + len)) { | |
901 | /* | |
902 | * the beginning page of the file offset falls within | |
903 | * the target map's encompass | |
904 | */ | |
905 | vm_map_unlock(map); | |
906 | return KERN_FAILURE; | |
907 | } | |
9bccf70c A |
908 | } |
909 | } else if(kr != KERN_SUCCESS) { | |
55e303ae | 910 | vm_map_unlock(map); |
9bccf70c A |
911 | return KERN_FAILURE; |
912 | } | |
913 | ||
55e303ae | 914 | if(len <= ((entry->vme_end - entry->vme_start) - |
9bccf70c A |
915 | (off - entry->vme_start))) { |
916 | vm_map_unlock(map); | |
917 | return kr; | |
918 | } else { | |
919 | len -= (entry->vme_end - entry->vme_start) - | |
920 | (off - entry->vme_start); | |
921 | } | |
922 | base_offset = base_offset + (local_len - len); | |
923 | file_off = file_off + (local_len - len); | |
924 | off = base_offset; | |
925 | if(map != base_map) { | |
926 | vm_map_unlock(map); | |
927 | vm_map_lock(base_map); | |
928 | map = base_map; | |
929 | } | |
930 | } | |
931 | ||
932 | vm_map_unlock(map); | |
933 | return kr; | |
9bccf70c | 934 | } |