]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
91447636 | 2 | * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. |
1c79356b | 3 | * |
8f6c56a5 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
8f6c56a5 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
8ad349bb | 24 | * limitations under the License. |
8f6c56a5 A |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm/vm_kern.c | |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
61 | * Date: 1985 | |
62 | * | |
63 | * Kernel memory management. | |
64 | */ | |
65 | ||
1c79356b A |
66 | #include <mach/kern_return.h> |
67 | #include <mach/vm_param.h> | |
68 | #include <kern/assert.h> | |
69 | #include <kern/lock.h> | |
70 | #include <kern/thread.h> | |
71 | #include <vm/vm_kern.h> | |
72 | #include <vm/vm_map.h> | |
73 | #include <vm/vm_object.h> | |
74 | #include <vm/vm_page.h> | |
75 | #include <vm/vm_pageout.h> | |
76 | #include <kern/misc_protos.h> | |
77 | #include <vm/cpm.h> | |
78 | ||
79 | #include <string.h> | |
80 | /* | |
81 | * Variables exported by this module. | |
82 | */ | |
83 | ||
84 | vm_map_t kernel_map; | |
85 | vm_map_t kernel_pageable_map; | |
86 | ||
87 | /* | |
88 | * Forward declarations for internal functions. | |
89 | */ | |
90 | extern kern_return_t kmem_alloc_pages( | |
91 | register vm_object_t object, | |
92 | register vm_object_offset_t offset, | |
91447636 | 93 | register vm_object_size_t size); |
1c79356b A |
94 | |
95 | extern void kmem_remap_pages( | |
96 | register vm_object_t object, | |
97 | register vm_object_offset_t offset, | |
98 | register vm_offset_t start, | |
99 | register vm_offset_t end, | |
100 | vm_prot_t protection); | |
101 | ||
102 | kern_return_t | |
103 | kmem_alloc_contig( | |
91447636 A |
104 | vm_map_t map, |
105 | vm_offset_t *addrp, | |
106 | vm_size_t size, | |
107 | vm_offset_t mask, | |
108 | int flags) | |
1c79356b A |
109 | { |
110 | vm_object_t object; | |
1c79356b | 111 | vm_object_offset_t offset; |
91447636 A |
112 | vm_map_offset_t map_addr; |
113 | vm_map_offset_t map_mask; | |
114 | vm_map_size_t map_size, i; | |
1c79356b | 115 | vm_map_entry_t entry; |
91447636 A |
116 | vm_page_t m, pages; |
117 | kern_return_t kr; | |
1c79356b A |
118 | |
119 | if (map == VM_MAP_NULL || (flags && (flags ^ KMA_KOBJECT))) | |
120 | return KERN_INVALID_ARGUMENT; | |
121 | ||
122 | if (size == 0) { | |
123 | *addrp = 0; | |
124 | return KERN_INVALID_ARGUMENT; | |
125 | } | |
126 | ||
91447636 A |
127 | map_size = vm_map_round_page(size); |
128 | map_mask = (vm_map_offset_t)mask; | |
1c79356b | 129 | |
91447636 A |
130 | /* |
131 | * Allocate a new object (if necessary) and the reference we | |
132 | * will be donating to the map entry. We must do this before | |
133 | * locking the map, or risk deadlock with the default pager. | |
134 | */ | |
135 | if ((flags & KMA_KOBJECT) != 0) { | |
136 | object = kernel_object; | |
137 | vm_object_reference(object); | |
1c79356b | 138 | } else { |
91447636 | 139 | object = vm_object_allocate(map_size); |
1c79356b A |
140 | } |
141 | ||
8f6c56a5 | 142 | kr = vm_map_find_space(map, &map_addr, map_size, map_mask, &entry); |
91447636 A |
143 | if (KERN_SUCCESS != kr) { |
144 | vm_object_deallocate(object); | |
1c79356b A |
145 | return kr; |
146 | } | |
147 | ||
91447636 A |
148 | entry->object.vm_object = object; |
149 | entry->offset = offset = (object == kernel_object) ? | |
150 | map_addr - VM_MIN_KERNEL_ADDRESS : 0; | |
151 | ||
152 | /* Take an extra object ref in case the map entry gets deleted */ | |
153 | vm_object_reference(object); | |
1c79356b A |
154 | vm_map_unlock(map); |
155 | ||
91447636 | 156 | kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, FALSE); |
1c79356b A |
157 | |
158 | if (kr != KERN_SUCCESS) { | |
91447636 A |
159 | vm_map_remove(map, vm_map_trunc_page(map_addr), |
160 | vm_map_round_page(map_addr + map_size), 0); | |
161 | vm_object_deallocate(object); | |
1c79356b A |
162 | *addrp = 0; |
163 | return kr; | |
164 | } | |
165 | ||
166 | vm_object_lock(object); | |
91447636 | 167 | for (i = 0; i < map_size; i += PAGE_SIZE) { |
1c79356b A |
168 | m = pages; |
169 | pages = NEXT_PAGE(m); | |
170 | m->busy = FALSE; | |
171 | vm_page_insert(m, object, offset + i); | |
172 | } | |
173 | vm_object_unlock(object); | |
174 | ||
91447636 A |
175 | if ((kr = vm_map_wire(map, vm_map_trunc_page(map_addr), |
176 | vm_map_round_page(map_addr + map_size), VM_PROT_DEFAULT, FALSE)) | |
1c79356b A |
177 | != KERN_SUCCESS) { |
178 | if (object == kernel_object) { | |
179 | vm_object_lock(object); | |
91447636 | 180 | vm_object_page_remove(object, offset, offset + map_size); |
1c79356b A |
181 | vm_object_unlock(object); |
182 | } | |
91447636 A |
183 | vm_map_remove(map, vm_map_trunc_page(map_addr), |
184 | vm_map_round_page(map_addr + map_size), 0); | |
185 | vm_object_deallocate(object); | |
1c79356b A |
186 | return kr; |
187 | } | |
91447636 A |
188 | vm_object_deallocate(object); |
189 | ||
1c79356b | 190 | if (object == kernel_object) |
91447636 | 191 | vm_map_simplify(map, map_addr); |
1c79356b | 192 | |
91447636 | 193 | *addrp = map_addr; |
1c79356b A |
194 | return KERN_SUCCESS; |
195 | } | |
196 | ||
197 | /* | |
198 | * Master entry point for allocating kernel memory. | |
199 | * NOTE: this routine is _never_ interrupt safe. | |
200 | * | |
201 | * map : map to allocate into | |
202 | * addrp : pointer to start address of new memory | |
203 | * size : size of memory requested | |
204 | * flags : options | |
205 | * KMA_HERE *addrp is base address, else "anywhere" | |
206 | * KMA_NOPAGEWAIT don't wait for pages if unavailable | |
207 | * KMA_KOBJECT use kernel_object | |
208 | */ | |
209 | ||
210 | kern_return_t | |
211 | kernel_memory_allocate( | |
212 | register vm_map_t map, | |
213 | register vm_offset_t *addrp, | |
214 | register vm_size_t size, | |
215 | register vm_offset_t mask, | |
216 | int flags) | |
217 | { | |
91447636 A |
218 | vm_object_t object; |
219 | vm_object_offset_t offset; | |
1c79356b | 220 | vm_map_entry_t entry; |
91447636 A |
221 | vm_map_offset_t map_addr; |
222 | vm_map_offset_t map_mask; | |
223 | vm_map_size_t map_size; | |
224 | vm_map_size_t i; | |
1c79356b A |
225 | kern_return_t kr; |
226 | ||
91447636 A |
227 | if (size == 0) { |
228 | *addrp = 0; | |
229 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 230 | } |
91447636 A |
231 | |
232 | map_size = vm_map_round_page(size); | |
233 | map_mask = (vm_map_offset_t) mask; | |
234 | ||
235 | /* | |
236 | * Allocate a new object (if necessary). We must do this before | |
237 | * locking the map, or risk deadlock with the default pager. | |
238 | */ | |
239 | if ((flags & KMA_KOBJECT) != 0) { | |
1c79356b | 240 | object = kernel_object; |
91447636 A |
241 | vm_object_reference(object); |
242 | } else { | |
243 | object = vm_object_allocate(map_size); | |
1c79356b | 244 | } |
91447636 | 245 | |
8f6c56a5 | 246 | kr = vm_map_find_space(map, &map_addr, map_size, map_mask, &entry); |
91447636 A |
247 | if (KERN_SUCCESS != kr) { |
248 | vm_object_deallocate(object); | |
1c79356b A |
249 | return kr; |
250 | } | |
8f6c56a5 | 251 | |
91447636 A |
252 | entry->object.vm_object = object; |
253 | entry->offset = offset = (object == kernel_object) ? | |
254 | map_addr - VM_MIN_KERNEL_ADDRESS : 0; | |
1c79356b | 255 | |
b4c24cb9 | 256 | vm_object_reference(object); |
1c79356b A |
257 | vm_map_unlock(map); |
258 | ||
259 | vm_object_lock(object); | |
91447636 | 260 | for (i = 0; i < map_size; i += PAGE_SIZE) { |
1c79356b A |
261 | vm_page_t mem; |
262 | ||
8f6c56a5 A |
263 | while (VM_PAGE_NULL == |
264 | (mem = vm_page_alloc(object, offset + i))) { | |
1c79356b A |
265 | if (flags & KMA_NOPAGEWAIT) { |
266 | if (object == kernel_object) | |
91447636 | 267 | vm_object_page_remove(object, offset, offset + i); |
1c79356b | 268 | vm_object_unlock(object); |
91447636 | 269 | vm_map_remove(map, map_addr, map_addr + map_size, 0); |
b4c24cb9 | 270 | vm_object_deallocate(object); |
1c79356b A |
271 | return KERN_RESOURCE_SHORTAGE; |
272 | } | |
273 | vm_object_unlock(object); | |
274 | VM_PAGE_WAIT(); | |
275 | vm_object_lock(object); | |
276 | } | |
277 | mem->busy = FALSE; | |
278 | } | |
279 | vm_object_unlock(object); | |
280 | ||
91447636 | 281 | if ((kr = vm_map_wire(map, map_addr, map_addr + map_size, VM_PROT_DEFAULT, FALSE)) |
1c79356b A |
282 | != KERN_SUCCESS) { |
283 | if (object == kernel_object) { | |
284 | vm_object_lock(object); | |
91447636 | 285 | vm_object_page_remove(object, offset, offset + map_size); |
1c79356b A |
286 | vm_object_unlock(object); |
287 | } | |
91447636 | 288 | vm_map_remove(map, map_addr, map_addr + map_size, 0); |
b4c24cb9 | 289 | vm_object_deallocate(object); |
1c79356b A |
290 | return (kr); |
291 | } | |
b4c24cb9 A |
292 | /* now that the page is wired, we no longer have to fear coalesce */ |
293 | vm_object_deallocate(object); | |
1c79356b | 294 | if (object == kernel_object) |
91447636 | 295 | vm_map_simplify(map, map_addr); |
1c79356b A |
296 | |
297 | /* | |
298 | * Return the memory, not zeroed. | |
299 | */ | |
91447636 | 300 | *addrp = CAST_DOWN(vm_offset_t, map_addr); |
1c79356b A |
301 | return KERN_SUCCESS; |
302 | } | |
303 | ||
304 | /* | |
305 | * kmem_alloc: | |
306 | * | |
307 | * Allocate wired-down memory in the kernel's address map | |
308 | * or a submap. The memory is not zero-filled. | |
309 | */ | |
310 | ||
311 | kern_return_t | |
312 | kmem_alloc( | |
313 | vm_map_t map, | |
314 | vm_offset_t *addrp, | |
315 | vm_size_t size) | |
316 | { | |
317 | return kernel_memory_allocate(map, addrp, size, 0, 0); | |
318 | } | |
319 | ||
320 | /* | |
321 | * kmem_realloc: | |
322 | * | |
323 | * Reallocate wired-down memory in the kernel's address map | |
324 | * or a submap. Newly allocated pages are not zeroed. | |
325 | * This can only be used on regions allocated with kmem_alloc. | |
326 | * | |
327 | * If successful, the pages in the old region are mapped twice. | |
328 | * The old region is unchanged. Use kmem_free to get rid of it. | |
329 | */ | |
330 | kern_return_t | |
331 | kmem_realloc( | |
91447636 A |
332 | vm_map_t map, |
333 | vm_offset_t oldaddr, | |
334 | vm_size_t oldsize, | |
335 | vm_offset_t *newaddrp, | |
336 | vm_size_t newsize) | |
1c79356b | 337 | { |
91447636 A |
338 | vm_object_t object; |
339 | vm_object_offset_t offset; | |
340 | vm_map_offset_t oldmapmin; | |
341 | vm_map_offset_t oldmapmax; | |
342 | vm_map_offset_t newmapaddr; | |
343 | vm_map_size_t oldmapsize; | |
344 | vm_map_size_t newmapsize; | |
345 | vm_map_entry_t oldentry; | |
346 | vm_map_entry_t newentry; | |
347 | vm_page_t mem; | |
348 | kern_return_t kr; | |
1c79356b | 349 | |
91447636 A |
350 | oldmapmin = vm_map_trunc_page(oldaddr); |
351 | oldmapmax = vm_map_round_page(oldaddr + oldsize); | |
352 | oldmapsize = oldmapmax - oldmapmin; | |
353 | newmapsize = vm_map_round_page(newsize); | |
1c79356b | 354 | |
1c79356b A |
355 | |
356 | /* | |
357 | * Find the VM object backing the old region. | |
358 | */ | |
359 | ||
b4c24cb9 A |
360 | vm_map_lock(map); |
361 | ||
91447636 | 362 | if (!vm_map_lookup_entry(map, oldmapmin, &oldentry)) |
1c79356b A |
363 | panic("kmem_realloc"); |
364 | object = oldentry->object.vm_object; | |
365 | ||
366 | /* | |
367 | * Increase the size of the object and | |
368 | * fill in the new region. | |
369 | */ | |
370 | ||
371 | vm_object_reference(object); | |
b4c24cb9 A |
372 | /* by grabbing the object lock before unlocking the map */ |
373 | /* we guarantee that we will panic if more than one */ | |
374 | /* attempt is made to realloc a kmem_alloc'd area */ | |
1c79356b | 375 | vm_object_lock(object); |
b4c24cb9 | 376 | vm_map_unlock(map); |
91447636 | 377 | if (object->size != oldmapsize) |
1c79356b | 378 | panic("kmem_realloc"); |
91447636 | 379 | object->size = newmapsize; |
1c79356b A |
380 | vm_object_unlock(object); |
381 | ||
b4c24cb9 A |
382 | /* allocate the new pages while expanded portion of the */ |
383 | /* object is still not mapped */ | |
91447636 A |
384 | kmem_alloc_pages(object, vm_object_round_page(oldmapsize), |
385 | vm_object_round_page(newmapsize-oldmapsize)); | |
1c79356b A |
386 | |
387 | /* | |
b4c24cb9 | 388 | * Find space for the new region. |
1c79356b A |
389 | */ |
390 | ||
91447636 | 391 | kr = vm_map_find_space(map, &newmapaddr, newmapsize, |
8f6c56a5 | 392 | (vm_map_offset_t) 0, &newentry); |
b4c24cb9 A |
393 | if (kr != KERN_SUCCESS) { |
394 | vm_object_lock(object); | |
91447636 A |
395 | for(offset = oldmapsize; |
396 | offset < newmapsize; offset += PAGE_SIZE) { | |
b4c24cb9 A |
397 | if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { |
398 | vm_page_lock_queues(); | |
399 | vm_page_free(mem); | |
400 | vm_page_unlock_queues(); | |
401 | } | |
402 | } | |
91447636 | 403 | object->size = oldmapsize; |
b4c24cb9 A |
404 | vm_object_unlock(object); |
405 | vm_object_deallocate(object); | |
406 | return kr; | |
407 | } | |
408 | newentry->object.vm_object = object; | |
409 | newentry->offset = 0; | |
410 | assert (newentry->wired_count == 0); | |
411 | ||
412 | ||
413 | /* add an extra reference in case we have someone doing an */ | |
414 | /* unexpected deallocate */ | |
415 | vm_object_reference(object); | |
1c79356b A |
416 | vm_map_unlock(map); |
417 | ||
91447636 A |
418 | kr = vm_map_wire(map, newmapaddr, newmapaddr + newmapsize, VM_PROT_DEFAULT, FALSE); |
419 | if (KERN_SUCCESS != kr) { | |
420 | vm_map_remove(map, newmapaddr, newmapaddr + newmapsize, 0); | |
b4c24cb9 | 421 | vm_object_lock(object); |
91447636 | 422 | for(offset = oldsize; offset < newmapsize; offset += PAGE_SIZE) { |
b4c24cb9 A |
423 | if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { |
424 | vm_page_lock_queues(); | |
425 | vm_page_free(mem); | |
426 | vm_page_unlock_queues(); | |
427 | } | |
428 | } | |
91447636 | 429 | object->size = oldmapsize; |
b4c24cb9 A |
430 | vm_object_unlock(object); |
431 | vm_object_deallocate(object); | |
432 | return (kr); | |
433 | } | |
434 | vm_object_deallocate(object); | |
1c79356b | 435 | |
91447636 | 436 | *newaddrp = CAST_DOWN(vm_offset_t, newmapaddr); |
1c79356b A |
437 | return KERN_SUCCESS; |
438 | } | |
439 | ||
440 | /* | |
441 | * kmem_alloc_wired: | |
442 | * | |
443 | * Allocate wired-down memory in the kernel's address map | |
444 | * or a submap. The memory is not zero-filled. | |
445 | * | |
446 | * The memory is allocated in the kernel_object. | |
447 | * It may not be copied with vm_map_copy, and | |
448 | * it may not be reallocated with kmem_realloc. | |
449 | */ | |
450 | ||
451 | kern_return_t | |
452 | kmem_alloc_wired( | |
453 | vm_map_t map, | |
454 | vm_offset_t *addrp, | |
455 | vm_size_t size) | |
456 | { | |
457 | return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT); | |
458 | } | |
459 | ||
460 | /* | |
461 | * kmem_alloc_aligned: | |
462 | * | |
463 | * Like kmem_alloc_wired, except that the memory is aligned. | |
464 | * The size should be a power-of-2. | |
465 | */ | |
466 | ||
467 | kern_return_t | |
468 | kmem_alloc_aligned( | |
469 | vm_map_t map, | |
470 | vm_offset_t *addrp, | |
471 | vm_size_t size) | |
472 | { | |
473 | if ((size & (size - 1)) != 0) | |
474 | panic("kmem_alloc_aligned: size not aligned"); | |
475 | return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT); | |
476 | } | |
477 | ||
478 | /* | |
479 | * kmem_alloc_pageable: | |
480 | * | |
481 | * Allocate pageable memory in the kernel's address map. | |
482 | */ | |
483 | ||
484 | kern_return_t | |
485 | kmem_alloc_pageable( | |
486 | vm_map_t map, | |
487 | vm_offset_t *addrp, | |
488 | vm_size_t size) | |
489 | { | |
91447636 A |
490 | vm_map_offset_t map_addr; |
491 | vm_map_size_t map_size; | |
1c79356b A |
492 | kern_return_t kr; |
493 | ||
494 | #ifndef normal | |
91447636 | 495 | map_addr = (vm_map_min(map)) + 0x1000; |
1c79356b | 496 | #else |
91447636 | 497 | map_addr = vm_map_min(map); |
1c79356b | 498 | #endif |
91447636 A |
499 | map_size = vm_map_round_page(size); |
500 | ||
501 | kr = vm_map_enter(map, &map_addr, map_size, | |
502 | (vm_map_offset_t) 0, VM_FLAGS_ANYWHERE, | |
1c79356b A |
503 | VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE, |
504 | VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); | |
91447636 | 505 | |
1c79356b A |
506 | if (kr != KERN_SUCCESS) |
507 | return kr; | |
508 | ||
91447636 | 509 | *addrp = CAST_DOWN(vm_offset_t, map_addr); |
1c79356b A |
510 | return KERN_SUCCESS; |
511 | } | |
512 | ||
513 | /* | |
514 | * kmem_free: | |
515 | * | |
516 | * Release a region of kernel virtual memory allocated | |
517 | * with kmem_alloc, kmem_alloc_wired, or kmem_alloc_pageable, | |
518 | * and return the physical pages associated with that region. | |
519 | */ | |
520 | ||
521 | void | |
522 | kmem_free( | |
523 | vm_map_t map, | |
524 | vm_offset_t addr, | |
525 | vm_size_t size) | |
526 | { | |
527 | kern_return_t kr; | |
528 | ||
91447636 A |
529 | kr = vm_map_remove(map, vm_map_trunc_page(addr), |
530 | vm_map_round_page(addr + size), | |
55e303ae | 531 | VM_MAP_REMOVE_KUNWIRE); |
1c79356b A |
532 | if (kr != KERN_SUCCESS) |
533 | panic("kmem_free"); | |
534 | } | |
535 | ||
536 | /* | |
b4c24cb9 | 537 | * Allocate new pages in an object. |
1c79356b A |
538 | */ |
539 | ||
540 | kern_return_t | |
541 | kmem_alloc_pages( | |
542 | register vm_object_t object, | |
543 | register vm_object_offset_t offset, | |
91447636 | 544 | register vm_object_size_t size) |
1c79356b | 545 | { |
91447636 | 546 | vm_object_size_t alloc_size; |
1c79356b | 547 | |
91447636 | 548 | alloc_size = vm_object_round_page(size); |
b4c24cb9 | 549 | vm_object_lock(object); |
91447636 | 550 | while (alloc_size) { |
1c79356b A |
551 | register vm_page_t mem; |
552 | ||
1c79356b A |
553 | |
554 | /* | |
555 | * Allocate a page | |
556 | */ | |
91447636 A |
557 | while (VM_PAGE_NULL == |
558 | (mem = vm_page_alloc(object, offset))) { | |
1c79356b A |
559 | vm_object_unlock(object); |
560 | VM_PAGE_WAIT(); | |
561 | vm_object_lock(object); | |
562 | } | |
91447636 | 563 | mem->busy = FALSE; |
1c79356b | 564 | |
91447636 | 565 | alloc_size -= PAGE_SIZE; |
b4c24cb9 | 566 | offset += PAGE_SIZE; |
1c79356b | 567 | } |
b4c24cb9 | 568 | vm_object_unlock(object); |
1c79356b A |
569 | return KERN_SUCCESS; |
570 | } | |
571 | ||
572 | /* | |
573 | * Remap wired pages in an object into a new region. | |
574 | * The object is assumed to be mapped into the kernel map or | |
575 | * a submap. | |
576 | */ | |
577 | void | |
578 | kmem_remap_pages( | |
579 | register vm_object_t object, | |
580 | register vm_object_offset_t offset, | |
581 | register vm_offset_t start, | |
582 | register vm_offset_t end, | |
583 | vm_prot_t protection) | |
584 | { | |
91447636 A |
585 | |
586 | vm_map_offset_t map_start; | |
587 | vm_map_offset_t map_end; | |
588 | ||
1c79356b A |
589 | /* |
590 | * Mark the pmap region as not pageable. | |
591 | */ | |
91447636 A |
592 | map_start = vm_map_trunc_page(start); |
593 | map_end = vm_map_round_page(end); | |
1c79356b | 594 | |
91447636 A |
595 | pmap_pageable(kernel_pmap, map_start, map_end, FALSE); |
596 | ||
597 | while (map_start < map_end) { | |
1c79356b A |
598 | register vm_page_t mem; |
599 | ||
600 | vm_object_lock(object); | |
601 | ||
602 | /* | |
603 | * Find a page | |
604 | */ | |
605 | if ((mem = vm_page_lookup(object, offset)) == VM_PAGE_NULL) | |
606 | panic("kmem_remap_pages"); | |
607 | ||
608 | /* | |
609 | * Wire it down (again) | |
610 | */ | |
611 | vm_page_lock_queues(); | |
612 | vm_page_wire(mem); | |
613 | vm_page_unlock_queues(); | |
614 | vm_object_unlock(object); | |
615 | ||
91447636 A |
616 | /* |
617 | * ENCRYPTED SWAP: | |
618 | * The page is supposed to be wired now, so it | |
619 | * shouldn't be encrypted at this point. It can | |
620 | * safely be entered in the page table. | |
621 | */ | |
622 | ASSERT_PAGE_DECRYPTED(mem); | |
623 | ||
1c79356b A |
624 | /* |
625 | * Enter it in the kernel pmap. The page isn't busy, | |
626 | * but this shouldn't be a problem because it is wired. | |
627 | */ | |
91447636 | 628 | PMAP_ENTER(kernel_pmap, map_start, mem, protection, |
55e303ae A |
629 | ((unsigned int)(mem->object->wimg_bits)) |
630 | & VM_WIMG_MASK, | |
631 | TRUE); | |
1c79356b | 632 | |
91447636 | 633 | map_start += PAGE_SIZE; |
1c79356b A |
634 | offset += PAGE_SIZE; |
635 | } | |
636 | } | |
637 | ||
638 | /* | |
639 | * kmem_suballoc: | |
640 | * | |
641 | * Allocates a map to manage a subrange | |
642 | * of the kernel virtual address space. | |
643 | * | |
644 | * Arguments are as follows: | |
645 | * | |
646 | * parent Map to take range from | |
647 | * addr Address of start of range (IN/OUT) | |
648 | * size Size of range to find | |
649 | * pageable Can region be paged | |
650 | * anywhere Can region be located anywhere in map | |
651 | * new_map Pointer to new submap | |
652 | */ | |
653 | kern_return_t | |
654 | kmem_suballoc( | |
655 | vm_map_t parent, | |
656 | vm_offset_t *addr, | |
657 | vm_size_t size, | |
658 | boolean_t pageable, | |
91447636 | 659 | int flags, |
1c79356b A |
660 | vm_map_t *new_map) |
661 | { | |
91447636 A |
662 | vm_map_t map; |
663 | vm_map_offset_t map_addr; | |
664 | vm_map_size_t map_size; | |
665 | kern_return_t kr; | |
1c79356b | 666 | |
91447636 | 667 | map_size = vm_map_round_page(size); |
1c79356b A |
668 | |
669 | /* | |
670 | * Need reference on submap object because it is internal | |
671 | * to the vm_system. vm_object_enter will never be called | |
672 | * on it (usual source of reference for vm_map_enter). | |
673 | */ | |
674 | vm_object_reference(vm_submap_object); | |
675 | ||
91447636 A |
676 | map_addr = (flags & VM_FLAGS_ANYWHERE) ? |
677 | vm_map_min(parent) : vm_map_trunc_page(*addr); | |
678 | ||
679 | kr = vm_map_enter(parent, &map_addr, map_size, | |
680 | (vm_map_offset_t) 0, flags, | |
1c79356b A |
681 | vm_submap_object, (vm_object_offset_t) 0, FALSE, |
682 | VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); | |
683 | if (kr != KERN_SUCCESS) { | |
684 | vm_object_deallocate(vm_submap_object); | |
685 | return (kr); | |
686 | } | |
687 | ||
688 | pmap_reference(vm_map_pmap(parent)); | |
91447636 | 689 | map = vm_map_create(vm_map_pmap(parent), map_addr, map_addr + map_size, pageable); |
1c79356b A |
690 | if (map == VM_MAP_NULL) |
691 | panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */ | |
692 | ||
91447636 | 693 | kr = vm_map_submap(parent, map_addr, map_addr + map_size, map, map_addr, FALSE); |
1c79356b A |
694 | if (kr != KERN_SUCCESS) { |
695 | /* | |
696 | * See comment preceding vm_map_submap(). | |
697 | */ | |
91447636 | 698 | vm_map_remove(parent, map_addr, map_addr + map_size, VM_MAP_NO_FLAGS); |
1c79356b A |
699 | vm_map_deallocate(map); /* also removes ref to pmap */ |
700 | vm_object_deallocate(vm_submap_object); | |
701 | return (kr); | |
702 | } | |
91447636 | 703 | *addr = CAST_DOWN(vm_offset_t, map_addr); |
1c79356b A |
704 | *new_map = map; |
705 | return (KERN_SUCCESS); | |
706 | } | |
707 | ||
708 | /* | |
709 | * kmem_init: | |
710 | * | |
711 | * Initialize the kernel's virtual memory map, taking | |
712 | * into account all memory allocated up to this time. | |
713 | */ | |
714 | void | |
715 | kmem_init( | |
716 | vm_offset_t start, | |
717 | vm_offset_t end) | |
718 | { | |
91447636 A |
719 | vm_map_offset_t map_start; |
720 | vm_map_offset_t map_end; | |
721 | ||
722 | map_start = vm_map_trunc_page(start); | |
723 | map_end = vm_map_round_page(end); | |
724 | ||
725 | kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_ADDRESS, | |
726 | map_end, FALSE); | |
1c79356b A |
727 | |
728 | /* | |
729 | * Reserve virtual memory allocated up to this time. | |
730 | */ | |
731 | ||
732 | if (start != VM_MIN_KERNEL_ADDRESS) { | |
91447636 A |
733 | vm_map_offset_t map_addr; |
734 | ||
735 | map_addr = VM_MIN_KERNEL_ADDRESS; | |
1c79356b | 736 | (void) vm_map_enter(kernel_map, |
91447636 A |
737 | &map_addr, |
738 | (vm_map_size_t)(map_start - VM_MIN_KERNEL_ADDRESS), | |
739 | (vm_map_offset_t) 0, | |
740 | VM_FLAGS_ANYWHERE | VM_FLAGS_NO_PMAP_CHECK, | |
1c79356b A |
741 | VM_OBJECT_NULL, |
742 | (vm_object_offset_t) 0, FALSE, | |
743 | VM_PROT_DEFAULT, VM_PROT_ALL, | |
744 | VM_INHERIT_DEFAULT); | |
745 | } | |
746 | ||
747 | /* | |
748 | * Account for kernel memory (text, data, bss, vm shenanigans). | |
749 | * This may include inaccessible "holes" as determined by what | |
55e303ae | 750 | * the machine-dependent init code includes in max_mem. |
1c79356b | 751 | */ |
55e303ae | 752 | vm_page_wire_count = (atop_64(max_mem) - (vm_page_free_count |
1c79356b A |
753 | + vm_page_active_count |
754 | + vm_page_inactive_count)); | |
755 | } | |
756 | ||
1c79356b | 757 | |
1c79356b A |
758 | /* |
759 | * Routine: copyinmap | |
760 | * Purpose: | |
761 | * Like copyin, except that fromaddr is an address | |
762 | * in the specified VM map. This implementation | |
763 | * is incomplete; it handles the current user map | |
764 | * and the kernel map/submaps. | |
765 | */ | |
91447636 | 766 | kern_return_t |
1c79356b | 767 | copyinmap( |
91447636 A |
768 | vm_map_t map, |
769 | vm_map_offset_t fromaddr, | |
770 | void *todata, | |
771 | vm_size_t length) | |
1c79356b | 772 | { |
91447636 A |
773 | kern_return_t kr = KERN_SUCCESS; |
774 | vm_map_t oldmap; | |
775 | ||
776 | if (vm_map_pmap(map) == pmap_kernel()) | |
777 | { | |
1c79356b | 778 | /* assume a correct copy */ |
91447636 A |
779 | memcpy(todata, CAST_DOWN(void *, fromaddr), length); |
780 | } | |
781 | else if (current_map() == map) | |
782 | { | |
783 | if (copyin(fromaddr, todata, length) != 0) | |
784 | kr = KERN_INVALID_ADDRESS; | |
1c79356b | 785 | } |
91447636 A |
786 | else |
787 | { | |
788 | vm_map_reference(map); | |
789 | oldmap = vm_map_switch(map); | |
790 | if (copyin(fromaddr, todata, length) != 0) | |
791 | kr = KERN_INVALID_ADDRESS; | |
792 | vm_map_switch(oldmap); | |
793 | vm_map_deallocate(map); | |
794 | } | |
795 | return kr; | |
1c79356b A |
796 | } |
797 | ||
798 | /* | |
799 | * Routine: copyoutmap | |
800 | * Purpose: | |
801 | * Like copyout, except that toaddr is an address | |
802 | * in the specified VM map. This implementation | |
803 | * is incomplete; it handles the current user map | |
804 | * and the kernel map/submaps. | |
805 | */ | |
91447636 | 806 | kern_return_t |
1c79356b | 807 | copyoutmap( |
91447636 A |
808 | vm_map_t map, |
809 | void *fromdata, | |
810 | vm_map_address_t toaddr, | |
811 | vm_size_t length) | |
1c79356b A |
812 | { |
813 | if (vm_map_pmap(map) == pmap_kernel()) { | |
814 | /* assume a correct copy */ | |
91447636 A |
815 | memcpy(CAST_DOWN(void *, toaddr), fromdata, length); |
816 | return KERN_SUCCESS; | |
1c79356b A |
817 | } |
818 | ||
91447636 A |
819 | if (current_map() != map) |
820 | return KERN_NOT_SUPPORTED; | |
821 | ||
822 | if (copyout(fromdata, toaddr, length) != 0) | |
823 | return KERN_INVALID_ADDRESS; | |
1c79356b | 824 | |
91447636 | 825 | return KERN_SUCCESS; |
1c79356b | 826 | } |
9bccf70c A |
827 | |
828 | ||
829 | kern_return_t | |
830 | vm_conflict_check( | |
831 | vm_map_t map, | |
91447636 A |
832 | vm_map_offset_t off, |
833 | vm_map_size_t len, | |
834 | memory_object_t pager, | |
9bccf70c A |
835 | vm_object_offset_t file_off) |
836 | { | |
837 | vm_map_entry_t entry; | |
838 | vm_object_t obj; | |
839 | vm_object_offset_t obj_off; | |
840 | vm_map_t base_map; | |
91447636 A |
841 | vm_map_offset_t base_offset; |
842 | vm_map_offset_t original_offset; | |
9bccf70c | 843 | kern_return_t kr; |
91447636 | 844 | vm_map_size_t local_len; |
9bccf70c A |
845 | |
846 | base_map = map; | |
847 | base_offset = off; | |
848 | original_offset = off; | |
849 | kr = KERN_SUCCESS; | |
850 | vm_map_lock(map); | |
851 | while(vm_map_lookup_entry(map, off, &entry)) { | |
852 | local_len = len; | |
853 | ||
854 | if (entry->object.vm_object == VM_OBJECT_NULL) { | |
855 | vm_map_unlock(map); | |
856 | return KERN_SUCCESS; | |
857 | } | |
858 | if (entry->is_sub_map) { | |
859 | vm_map_t old_map; | |
55e303ae | 860 | |
9bccf70c A |
861 | old_map = map; |
862 | vm_map_lock(entry->object.sub_map); | |
863 | map = entry->object.sub_map; | |
864 | off = entry->offset + (off - entry->vme_start); | |
865 | vm_map_unlock(old_map); | |
866 | continue; | |
867 | } | |
868 | obj = entry->object.vm_object; | |
869 | obj_off = (off - entry->vme_start) + entry->offset; | |
870 | while(obj->shadow) { | |
871 | obj_off += obj->shadow_offset; | |
872 | obj = obj->shadow; | |
873 | } | |
874 | if((obj->pager_created) && (obj->pager == pager)) { | |
875 | if(((obj->paging_offset) + obj_off) == file_off) { | |
876 | if(off != base_offset) { | |
877 | vm_map_unlock(map); | |
878 | return KERN_FAILURE; | |
879 | } | |
880 | kr = KERN_ALREADY_WAITING; | |
55e303ae A |
881 | } else { |
882 | vm_object_offset_t obj_off_aligned; | |
883 | vm_object_offset_t file_off_aligned; | |
884 | ||
885 | obj_off_aligned = obj_off & ~PAGE_MASK; | |
886 | file_off_aligned = file_off & ~PAGE_MASK; | |
887 | ||
888 | if (file_off_aligned == (obj->paging_offset + obj_off_aligned)) { | |
889 | /* | |
890 | * the target map and the file offset start in the same page | |
891 | * but are not identical... | |
892 | */ | |
893 | vm_map_unlock(map); | |
894 | return KERN_FAILURE; | |
895 | } | |
896 | if ((file_off < (obj->paging_offset + obj_off_aligned)) && | |
897 | ((file_off + len) > (obj->paging_offset + obj_off_aligned))) { | |
898 | /* | |
899 | * some portion of the tail of the I/O will fall | |
900 | * within the encompass of the target map | |
901 | */ | |
902 | vm_map_unlock(map); | |
903 | return KERN_FAILURE; | |
904 | } | |
905 | if ((file_off_aligned > (obj->paging_offset + obj_off)) && | |
906 | (file_off_aligned < (obj->paging_offset + obj_off) + len)) { | |
907 | /* | |
908 | * the beginning page of the file offset falls within | |
909 | * the target map's encompass | |
910 | */ | |
911 | vm_map_unlock(map); | |
912 | return KERN_FAILURE; | |
913 | } | |
9bccf70c A |
914 | } |
915 | } else if(kr != KERN_SUCCESS) { | |
55e303ae | 916 | vm_map_unlock(map); |
9bccf70c A |
917 | return KERN_FAILURE; |
918 | } | |
919 | ||
55e303ae | 920 | if(len <= ((entry->vme_end - entry->vme_start) - |
9bccf70c A |
921 | (off - entry->vme_start))) { |
922 | vm_map_unlock(map); | |
923 | return kr; | |
924 | } else { | |
925 | len -= (entry->vme_end - entry->vme_start) - | |
926 | (off - entry->vme_start); | |
927 | } | |
928 | base_offset = base_offset + (local_len - len); | |
929 | file_off = file_off + (local_len - len); | |
930 | off = base_offset; | |
931 | if(map != base_map) { | |
932 | vm_map_unlock(map); | |
933 | vm_map_lock(base_map); | |
934 | map = base_map; | |
935 | } | |
936 | } | |
937 | ||
938 | vm_map_unlock(map); | |
939 | return kr; | |
9bccf70c | 940 | } |