]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
43866e37 | 6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. |
1c79356b | 7 | * |
43866e37 A |
8 | * This file contains Original Code and/or Modifications of Original Code |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
43866e37 A |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
1c79356b A |
22 | * |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | /* | |
26 | * @OSF_COPYRIGHT@ | |
27 | */ | |
28 | /* | |
29 | * Mach Operating System | |
30 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
31 | * All Rights Reserved. | |
32 | * | |
33 | * Permission to use, copy, modify and distribute this software and its | |
34 | * documentation is hereby granted, provided that both the copyright | |
35 | * notice and this permission notice appear in all copies of the | |
36 | * software, derivative works or modified versions, and any portions | |
37 | * thereof, and that both notices appear in supporting documentation. | |
38 | * | |
39 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
40 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
41 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
42 | * | |
43 | * Carnegie Mellon requests users of this software to return to | |
44 | * | |
45 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
46 | * School of Computer Science | |
47 | * Carnegie Mellon University | |
48 | * Pittsburgh PA 15213-3890 | |
49 | * | |
50 | * any improvements or extensions that they make and grant Carnegie Mellon | |
51 | * the rights to redistribute these changes. | |
52 | */ | |
53 | /* | |
54 | */ | |
55 | /* | |
56 | * File: vm/vm_kern.c | |
57 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
58 | * Date: 1985 | |
59 | * | |
60 | * Kernel memory management. | |
61 | */ | |
62 | ||
63 | #include <cpus.h> | |
64 | #include <mach/kern_return.h> | |
65 | #include <mach/vm_param.h> | |
66 | #include <kern/assert.h> | |
67 | #include <kern/lock.h> | |
68 | #include <kern/thread.h> | |
69 | #include <vm/vm_kern.h> | |
70 | #include <vm/vm_map.h> | |
71 | #include <vm/vm_object.h> | |
72 | #include <vm/vm_page.h> | |
73 | #include <vm/vm_pageout.h> | |
74 | #include <kern/misc_protos.h> | |
75 | #include <vm/cpm.h> | |
76 | ||
77 | #include <string.h> | |
78 | /* | |
79 | * Variables exported by this module. | |
80 | */ | |
81 | ||
82 | vm_map_t kernel_map; | |
83 | vm_map_t kernel_pageable_map; | |
84 | ||
85 | /* | |
86 | * Forward declarations for internal functions. | |
87 | */ | |
88 | extern kern_return_t kmem_alloc_pages( | |
89 | register vm_object_t object, | |
90 | register vm_object_offset_t offset, | |
b4c24cb9 | 91 | register vm_size_t size); |
1c79356b A |
92 | |
93 | extern void kmem_remap_pages( | |
94 | register vm_object_t object, | |
95 | register vm_object_offset_t offset, | |
96 | register vm_offset_t start, | |
97 | register vm_offset_t end, | |
98 | vm_prot_t protection); | |
99 | ||
100 | kern_return_t | |
101 | kmem_alloc_contig( | |
102 | vm_map_t map, | |
103 | vm_offset_t *addrp, | |
104 | vm_size_t size, | |
105 | vm_offset_t mask, | |
106 | int flags) | |
107 | { | |
108 | vm_object_t object; | |
109 | vm_page_t m, pages; | |
110 | kern_return_t kr; | |
111 | vm_offset_t addr, i; | |
112 | vm_object_offset_t offset; | |
113 | vm_map_entry_t entry; | |
114 | ||
115 | if (map == VM_MAP_NULL || (flags && (flags ^ KMA_KOBJECT))) | |
116 | return KERN_INVALID_ARGUMENT; | |
117 | ||
118 | if (size == 0) { | |
119 | *addrp = 0; | |
120 | return KERN_INVALID_ARGUMENT; | |
121 | } | |
122 | ||
55e303ae | 123 | size = round_page_32(size); |
1c79356b A |
124 | if ((flags & KMA_KOBJECT) == 0) { |
125 | object = vm_object_allocate(size); | |
126 | kr = vm_map_find_space(map, &addr, size, mask, &entry); | |
127 | } | |
128 | else { | |
129 | object = kernel_object; | |
130 | kr = vm_map_find_space(map, &addr, size, mask, &entry); | |
131 | } | |
132 | ||
133 | if ((flags & KMA_KOBJECT) == 0) { | |
134 | entry->object.vm_object = object; | |
135 | entry->offset = offset = 0; | |
136 | } else { | |
137 | offset = addr - VM_MIN_KERNEL_ADDRESS; | |
138 | ||
139 | if (entry->object.vm_object == VM_OBJECT_NULL) { | |
140 | vm_object_reference(object); | |
141 | entry->object.vm_object = object; | |
142 | entry->offset = offset; | |
143 | } | |
144 | } | |
145 | ||
146 | if (kr != KERN_SUCCESS) { | |
147 | if ((flags & KMA_KOBJECT) == 0) | |
148 | vm_object_deallocate(object); | |
149 | return kr; | |
150 | } | |
151 | ||
152 | vm_map_unlock(map); | |
153 | ||
154 | kr = cpm_allocate(size, &pages, FALSE); | |
155 | ||
156 | if (kr != KERN_SUCCESS) { | |
157 | vm_map_remove(map, addr, addr + size, 0); | |
158 | *addrp = 0; | |
159 | return kr; | |
160 | } | |
161 | ||
162 | vm_object_lock(object); | |
163 | for (i = 0; i < size; i += PAGE_SIZE) { | |
164 | m = pages; | |
165 | pages = NEXT_PAGE(m); | |
166 | m->busy = FALSE; | |
167 | vm_page_insert(m, object, offset + i); | |
168 | } | |
169 | vm_object_unlock(object); | |
170 | ||
171 | if ((kr = vm_map_wire(map, addr, addr + size, VM_PROT_DEFAULT, FALSE)) | |
172 | != KERN_SUCCESS) { | |
173 | if (object == kernel_object) { | |
174 | vm_object_lock(object); | |
175 | vm_object_page_remove(object, offset, offset + size); | |
176 | vm_object_unlock(object); | |
177 | } | |
178 | vm_map_remove(map, addr, addr + size, 0); | |
179 | return kr; | |
180 | } | |
181 | if (object == kernel_object) | |
182 | vm_map_simplify(map, addr); | |
183 | ||
184 | *addrp = addr; | |
185 | return KERN_SUCCESS; | |
186 | } | |
187 | ||
188 | /* | |
189 | * Master entry point for allocating kernel memory. | |
190 | * NOTE: this routine is _never_ interrupt safe. | |
191 | * | |
192 | * map : map to allocate into | |
193 | * addrp : pointer to start address of new memory | |
194 | * size : size of memory requested | |
195 | * flags : options | |
196 | * KMA_HERE *addrp is base address, else "anywhere" | |
197 | * KMA_NOPAGEWAIT don't wait for pages if unavailable | |
198 | * KMA_KOBJECT use kernel_object | |
199 | */ | |
200 | ||
201 | kern_return_t | |
202 | kernel_memory_allocate( | |
203 | register vm_map_t map, | |
204 | register vm_offset_t *addrp, | |
205 | register vm_size_t size, | |
206 | register vm_offset_t mask, | |
207 | int flags) | |
208 | { | |
209 | vm_object_t object = VM_OBJECT_NULL; | |
210 | vm_map_entry_t entry; | |
211 | vm_object_offset_t offset; | |
212 | vm_offset_t addr; | |
213 | vm_offset_t i; | |
214 | kern_return_t kr; | |
215 | ||
55e303ae | 216 | size = round_page_32(size); |
1c79356b A |
217 | if ((flags & KMA_KOBJECT) == 0) { |
218 | /* | |
219 | * Allocate a new object. We must do this before locking | |
220 | * the map, or risk deadlock with the default pager: | |
221 | * device_read_alloc uses kmem_alloc, | |
222 | * which tries to allocate an object, | |
223 | * which uses kmem_alloc_wired to get memory, | |
224 | * which blocks for pages. | |
225 | * then the default pager needs to read a block | |
226 | * to process a memory_object_data_write, | |
227 | * and device_read_alloc calls kmem_alloc | |
228 | * and deadlocks on the map lock. | |
229 | */ | |
230 | object = vm_object_allocate(size); | |
231 | kr = vm_map_find_space(map, &addr, size, mask, &entry); | |
232 | } | |
233 | else { | |
234 | object = kernel_object; | |
235 | kr = vm_map_find_space(map, &addr, size, mask, &entry); | |
236 | } | |
237 | if (kr != KERN_SUCCESS) { | |
238 | if ((flags & KMA_KOBJECT) == 0) | |
239 | vm_object_deallocate(object); | |
240 | return kr; | |
241 | } | |
242 | ||
243 | if ((flags & KMA_KOBJECT) == 0) { | |
244 | entry->object.vm_object = object; | |
245 | entry->offset = offset = 0; | |
246 | } else { | |
247 | offset = addr - VM_MIN_KERNEL_ADDRESS; | |
248 | ||
249 | if (entry->object.vm_object == VM_OBJECT_NULL) { | |
250 | vm_object_reference(object); | |
251 | entry->object.vm_object = object; | |
252 | entry->offset = offset; | |
253 | } | |
254 | } | |
255 | ||
256 | /* | |
257 | * Since we have not given out this address yet, | |
b4c24cb9 A |
258 | * it is safe to unlock the map. Except of course |
259 | * we must make certain no one coalesces our address | |
260 | * or does a blind vm_deallocate and removes the object | |
261 | * an extra object reference will suffice to protect | |
262 | * against both contingencies. | |
1c79356b | 263 | */ |
b4c24cb9 | 264 | vm_object_reference(object); |
1c79356b A |
265 | vm_map_unlock(map); |
266 | ||
267 | vm_object_lock(object); | |
268 | for (i = 0; i < size; i += PAGE_SIZE) { | |
269 | vm_page_t mem; | |
270 | ||
271 | while ((mem = vm_page_alloc(object, | |
272 | offset + (vm_object_offset_t)i)) | |
273 | == VM_PAGE_NULL) { | |
274 | if (flags & KMA_NOPAGEWAIT) { | |
275 | if (object == kernel_object) | |
276 | vm_object_page_remove(object, offset, | |
277 | offset + (vm_object_offset_t)i); | |
278 | vm_object_unlock(object); | |
279 | vm_map_remove(map, addr, addr + size, 0); | |
b4c24cb9 | 280 | vm_object_deallocate(object); |
1c79356b A |
281 | return KERN_RESOURCE_SHORTAGE; |
282 | } | |
283 | vm_object_unlock(object); | |
284 | VM_PAGE_WAIT(); | |
285 | vm_object_lock(object); | |
286 | } | |
287 | mem->busy = FALSE; | |
288 | } | |
289 | vm_object_unlock(object); | |
290 | ||
291 | if ((kr = vm_map_wire(map, addr, addr + size, VM_PROT_DEFAULT, FALSE)) | |
292 | != KERN_SUCCESS) { | |
293 | if (object == kernel_object) { | |
294 | vm_object_lock(object); | |
295 | vm_object_page_remove(object, offset, offset + size); | |
296 | vm_object_unlock(object); | |
297 | } | |
298 | vm_map_remove(map, addr, addr + size, 0); | |
b4c24cb9 | 299 | vm_object_deallocate(object); |
1c79356b A |
300 | return (kr); |
301 | } | |
b4c24cb9 A |
302 | /* now that the page is wired, we no longer have to fear coalesce */ |
303 | vm_object_deallocate(object); | |
1c79356b A |
304 | if (object == kernel_object) |
305 | vm_map_simplify(map, addr); | |
306 | ||
307 | /* | |
308 | * Return the memory, not zeroed. | |
309 | */ | |
310 | #if (NCPUS > 1) && i860 | |
311 | bzero( addr, size ); | |
312 | #endif /* #if (NCPUS > 1) && i860 */ | |
313 | *addrp = addr; | |
314 | return KERN_SUCCESS; | |
315 | } | |
316 | ||
317 | /* | |
318 | * kmem_alloc: | |
319 | * | |
320 | * Allocate wired-down memory in the kernel's address map | |
321 | * or a submap. The memory is not zero-filled. | |
322 | */ | |
323 | ||
324 | kern_return_t | |
325 | kmem_alloc( | |
326 | vm_map_t map, | |
327 | vm_offset_t *addrp, | |
328 | vm_size_t size) | |
329 | { | |
330 | return kernel_memory_allocate(map, addrp, size, 0, 0); | |
331 | } | |
332 | ||
333 | /* | |
334 | * kmem_realloc: | |
335 | * | |
336 | * Reallocate wired-down memory in the kernel's address map | |
337 | * or a submap. Newly allocated pages are not zeroed. | |
338 | * This can only be used on regions allocated with kmem_alloc. | |
339 | * | |
340 | * If successful, the pages in the old region are mapped twice. | |
341 | * The old region is unchanged. Use kmem_free to get rid of it. | |
342 | */ | |
343 | kern_return_t | |
344 | kmem_realloc( | |
345 | vm_map_t map, | |
346 | vm_offset_t oldaddr, | |
347 | vm_size_t oldsize, | |
348 | vm_offset_t *newaddrp, | |
349 | vm_size_t newsize) | |
350 | { | |
b4c24cb9 A |
351 | vm_offset_t oldmin, oldmax; |
352 | vm_offset_t newaddr; | |
353 | vm_offset_t offset; | |
354 | vm_object_t object; | |
355 | vm_map_entry_t oldentry, newentry; | |
356 | vm_page_t mem; | |
357 | kern_return_t kr; | |
1c79356b | 358 | |
55e303ae A |
359 | oldmin = trunc_page_32(oldaddr); |
360 | oldmax = round_page_32(oldaddr + oldsize); | |
1c79356b | 361 | oldsize = oldmax - oldmin; |
55e303ae | 362 | newsize = round_page_32(newsize); |
1c79356b | 363 | |
1c79356b A |
364 | |
365 | /* | |
366 | * Find the VM object backing the old region. | |
367 | */ | |
368 | ||
b4c24cb9 A |
369 | vm_map_lock(map); |
370 | ||
1c79356b A |
371 | if (!vm_map_lookup_entry(map, oldmin, &oldentry)) |
372 | panic("kmem_realloc"); | |
373 | object = oldentry->object.vm_object; | |
374 | ||
375 | /* | |
376 | * Increase the size of the object and | |
377 | * fill in the new region. | |
378 | */ | |
379 | ||
380 | vm_object_reference(object); | |
b4c24cb9 A |
381 | /* by grabbing the object lock before unlocking the map */ |
382 | /* we guarantee that we will panic if more than one */ | |
383 | /* attempt is made to realloc a kmem_alloc'd area */ | |
1c79356b | 384 | vm_object_lock(object); |
b4c24cb9 | 385 | vm_map_unlock(map); |
1c79356b A |
386 | if (object->size != oldsize) |
387 | panic("kmem_realloc"); | |
388 | object->size = newsize; | |
389 | vm_object_unlock(object); | |
390 | ||
b4c24cb9 A |
391 | /* allocate the new pages while expanded portion of the */ |
392 | /* object is still not mapped */ | |
393 | kmem_alloc_pages(object, oldsize, newsize-oldsize); | |
394 | ||
1c79356b A |
395 | |
396 | /* | |
b4c24cb9 | 397 | * Find space for the new region. |
1c79356b A |
398 | */ |
399 | ||
b4c24cb9 A |
400 | kr = vm_map_find_space(map, &newaddr, newsize, (vm_offset_t) 0, |
401 | &newentry); | |
402 | if (kr != KERN_SUCCESS) { | |
403 | vm_object_lock(object); | |
404 | for(offset = oldsize; | |
405 | offset<newsize; offset+=PAGE_SIZE) { | |
406 | if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { | |
407 | vm_page_lock_queues(); | |
408 | vm_page_free(mem); | |
409 | vm_page_unlock_queues(); | |
410 | } | |
411 | } | |
412 | object->size = oldsize; | |
413 | vm_object_unlock(object); | |
414 | vm_object_deallocate(object); | |
415 | return kr; | |
416 | } | |
417 | newentry->object.vm_object = object; | |
418 | newentry->offset = 0; | |
419 | assert (newentry->wired_count == 0); | |
420 | ||
421 | ||
422 | /* add an extra reference in case we have someone doing an */ | |
423 | /* unexpected deallocate */ | |
424 | vm_object_reference(object); | |
1c79356b A |
425 | vm_map_unlock(map); |
426 | ||
b4c24cb9 A |
427 | if ((kr = vm_map_wire(map, newaddr, newaddr + newsize, |
428 | VM_PROT_DEFAULT, FALSE)) != KERN_SUCCESS) { | |
429 | vm_map_remove(map, newaddr, newaddr + newsize, 0); | |
430 | vm_object_lock(object); | |
431 | for(offset = oldsize; | |
432 | offset<newsize; offset+=PAGE_SIZE) { | |
433 | if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { | |
434 | vm_page_lock_queues(); | |
435 | vm_page_free(mem); | |
436 | vm_page_unlock_queues(); | |
437 | } | |
438 | } | |
439 | object->size = oldsize; | |
440 | vm_object_unlock(object); | |
441 | vm_object_deallocate(object); | |
442 | return (kr); | |
443 | } | |
444 | vm_object_deallocate(object); | |
1c79356b | 445 | |
1c79356b A |
446 | |
447 | *newaddrp = newaddr; | |
448 | return KERN_SUCCESS; | |
449 | } | |
450 | ||
451 | /* | |
452 | * kmem_alloc_wired: | |
453 | * | |
454 | * Allocate wired-down memory in the kernel's address map | |
455 | * or a submap. The memory is not zero-filled. | |
456 | * | |
457 | * The memory is allocated in the kernel_object. | |
458 | * It may not be copied with vm_map_copy, and | |
459 | * it may not be reallocated with kmem_realloc. | |
460 | */ | |
461 | ||
462 | kern_return_t | |
463 | kmem_alloc_wired( | |
464 | vm_map_t map, | |
465 | vm_offset_t *addrp, | |
466 | vm_size_t size) | |
467 | { | |
468 | return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT); | |
469 | } | |
470 | ||
471 | /* | |
472 | * kmem_alloc_aligned: | |
473 | * | |
474 | * Like kmem_alloc_wired, except that the memory is aligned. | |
475 | * The size should be a power-of-2. | |
476 | */ | |
477 | ||
478 | kern_return_t | |
479 | kmem_alloc_aligned( | |
480 | vm_map_t map, | |
481 | vm_offset_t *addrp, | |
482 | vm_size_t size) | |
483 | { | |
484 | if ((size & (size - 1)) != 0) | |
485 | panic("kmem_alloc_aligned: size not aligned"); | |
486 | return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT); | |
487 | } | |
488 | ||
489 | /* | |
490 | * kmem_alloc_pageable: | |
491 | * | |
492 | * Allocate pageable memory in the kernel's address map. | |
493 | */ | |
494 | ||
495 | kern_return_t | |
496 | kmem_alloc_pageable( | |
497 | vm_map_t map, | |
498 | vm_offset_t *addrp, | |
499 | vm_size_t size) | |
500 | { | |
501 | vm_offset_t addr; | |
502 | kern_return_t kr; | |
503 | ||
504 | #ifndef normal | |
505 | addr = (vm_map_min(map)) + 0x1000; | |
506 | #else | |
507 | addr = vm_map_min(map); | |
508 | #endif | |
55e303ae | 509 | kr = vm_map_enter(map, &addr, round_page_32(size), |
1c79356b A |
510 | (vm_offset_t) 0, TRUE, |
511 | VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE, | |
512 | VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); | |
513 | if (kr != KERN_SUCCESS) | |
514 | return kr; | |
515 | ||
516 | *addrp = addr; | |
517 | return KERN_SUCCESS; | |
518 | } | |
519 | ||
520 | /* | |
521 | * kmem_free: | |
522 | * | |
523 | * Release a region of kernel virtual memory allocated | |
524 | * with kmem_alloc, kmem_alloc_wired, or kmem_alloc_pageable, | |
525 | * and return the physical pages associated with that region. | |
526 | */ | |
527 | ||
528 | void | |
529 | kmem_free( | |
530 | vm_map_t map, | |
531 | vm_offset_t addr, | |
532 | vm_size_t size) | |
533 | { | |
534 | kern_return_t kr; | |
535 | ||
55e303ae A |
536 | kr = vm_map_remove(map, trunc_page_32(addr), |
537 | round_page_32(addr + size), | |
538 | VM_MAP_REMOVE_KUNWIRE); | |
1c79356b A |
539 | if (kr != KERN_SUCCESS) |
540 | panic("kmem_free"); | |
541 | } | |
542 | ||
543 | /* | |
b4c24cb9 | 544 | * Allocate new pages in an object. |
1c79356b A |
545 | */ |
546 | ||
547 | kern_return_t | |
548 | kmem_alloc_pages( | |
549 | register vm_object_t object, | |
550 | register vm_object_offset_t offset, | |
b4c24cb9 | 551 | register vm_size_t size) |
1c79356b | 552 | { |
1c79356b | 553 | |
55e303ae | 554 | size = round_page_32(size); |
b4c24cb9 A |
555 | vm_object_lock(object); |
556 | while (size) { | |
1c79356b A |
557 | register vm_page_t mem; |
558 | ||
1c79356b A |
559 | |
560 | /* | |
561 | * Allocate a page | |
562 | */ | |
563 | while ((mem = vm_page_alloc(object, offset)) | |
564 | == VM_PAGE_NULL) { | |
565 | vm_object_unlock(object); | |
566 | VM_PAGE_WAIT(); | |
567 | vm_object_lock(object); | |
568 | } | |
569 | ||
1c79356b | 570 | |
b4c24cb9 A |
571 | offset += PAGE_SIZE; |
572 | size -= PAGE_SIZE; | |
573 | mem->busy = FALSE; | |
1c79356b | 574 | } |
b4c24cb9 | 575 | vm_object_unlock(object); |
1c79356b A |
576 | return KERN_SUCCESS; |
577 | } | |
578 | ||
579 | /* | |
580 | * Remap wired pages in an object into a new region. | |
581 | * The object is assumed to be mapped into the kernel map or | |
582 | * a submap. | |
583 | */ | |
584 | void | |
585 | kmem_remap_pages( | |
586 | register vm_object_t object, | |
587 | register vm_object_offset_t offset, | |
588 | register vm_offset_t start, | |
589 | register vm_offset_t end, | |
590 | vm_prot_t protection) | |
591 | { | |
592 | /* | |
593 | * Mark the pmap region as not pageable. | |
594 | */ | |
595 | pmap_pageable(kernel_pmap, start, end, FALSE); | |
596 | ||
597 | while (start < end) { | |
598 | register vm_page_t mem; | |
599 | ||
600 | vm_object_lock(object); | |
601 | ||
602 | /* | |
603 | * Find a page | |
604 | */ | |
605 | if ((mem = vm_page_lookup(object, offset)) == VM_PAGE_NULL) | |
606 | panic("kmem_remap_pages"); | |
607 | ||
608 | /* | |
609 | * Wire it down (again) | |
610 | */ | |
611 | vm_page_lock_queues(); | |
612 | vm_page_wire(mem); | |
613 | vm_page_unlock_queues(); | |
614 | vm_object_unlock(object); | |
615 | ||
616 | /* | |
617 | * Enter it in the kernel pmap. The page isn't busy, | |
618 | * but this shouldn't be a problem because it is wired. | |
619 | */ | |
9bccf70c | 620 | PMAP_ENTER(kernel_pmap, start, mem, protection, |
55e303ae A |
621 | ((unsigned int)(mem->object->wimg_bits)) |
622 | & VM_WIMG_MASK, | |
623 | TRUE); | |
1c79356b A |
624 | |
625 | start += PAGE_SIZE; | |
626 | offset += PAGE_SIZE; | |
627 | } | |
628 | } | |
629 | ||
630 | /* | |
631 | * kmem_suballoc: | |
632 | * | |
633 | * Allocates a map to manage a subrange | |
634 | * of the kernel virtual address space. | |
635 | * | |
636 | * Arguments are as follows: | |
637 | * | |
638 | * parent Map to take range from | |
639 | * addr Address of start of range (IN/OUT) | |
640 | * size Size of range to find | |
641 | * pageable Can region be paged | |
642 | * anywhere Can region be located anywhere in map | |
643 | * new_map Pointer to new submap | |
644 | */ | |
645 | kern_return_t | |
646 | kmem_suballoc( | |
647 | vm_map_t parent, | |
648 | vm_offset_t *addr, | |
649 | vm_size_t size, | |
650 | boolean_t pageable, | |
651 | boolean_t anywhere, | |
652 | vm_map_t *new_map) | |
653 | { | |
654 | vm_map_t map; | |
655 | kern_return_t kr; | |
656 | ||
55e303ae | 657 | size = round_page_32(size); |
1c79356b A |
658 | |
659 | /* | |
660 | * Need reference on submap object because it is internal | |
661 | * to the vm_system. vm_object_enter will never be called | |
662 | * on it (usual source of reference for vm_map_enter). | |
663 | */ | |
664 | vm_object_reference(vm_submap_object); | |
665 | ||
666 | if (anywhere == TRUE) | |
667 | *addr = (vm_offset_t)vm_map_min(parent); | |
668 | kr = vm_map_enter(parent, addr, size, | |
669 | (vm_offset_t) 0, anywhere, | |
670 | vm_submap_object, (vm_object_offset_t) 0, FALSE, | |
671 | VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); | |
672 | if (kr != KERN_SUCCESS) { | |
673 | vm_object_deallocate(vm_submap_object); | |
674 | return (kr); | |
675 | } | |
676 | ||
677 | pmap_reference(vm_map_pmap(parent)); | |
678 | map = vm_map_create(vm_map_pmap(parent), *addr, *addr + size, pageable); | |
679 | if (map == VM_MAP_NULL) | |
680 | panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */ | |
681 | ||
682 | kr = vm_map_submap(parent, *addr, *addr + size, map, *addr, FALSE); | |
683 | if (kr != KERN_SUCCESS) { | |
684 | /* | |
685 | * See comment preceding vm_map_submap(). | |
686 | */ | |
687 | vm_map_remove(parent, *addr, *addr + size, VM_MAP_NO_FLAGS); | |
688 | vm_map_deallocate(map); /* also removes ref to pmap */ | |
689 | vm_object_deallocate(vm_submap_object); | |
690 | return (kr); | |
691 | } | |
1c79356b A |
692 | *new_map = map; |
693 | return (KERN_SUCCESS); | |
694 | } | |
695 | ||
696 | /* | |
697 | * kmem_init: | |
698 | * | |
699 | * Initialize the kernel's virtual memory map, taking | |
700 | * into account all memory allocated up to this time. | |
701 | */ | |
702 | void | |
703 | kmem_init( | |
704 | vm_offset_t start, | |
705 | vm_offset_t end) | |
706 | { | |
707 | kernel_map = vm_map_create(pmap_kernel(), | |
708 | VM_MIN_KERNEL_ADDRESS, end, | |
709 | FALSE); | |
710 | ||
711 | /* | |
712 | * Reserve virtual memory allocated up to this time. | |
713 | */ | |
714 | ||
715 | if (start != VM_MIN_KERNEL_ADDRESS) { | |
716 | vm_offset_t addr = VM_MIN_KERNEL_ADDRESS; | |
717 | (void) vm_map_enter(kernel_map, | |
718 | &addr, start - VM_MIN_KERNEL_ADDRESS, | |
719 | (vm_offset_t) 0, TRUE, | |
720 | VM_OBJECT_NULL, | |
721 | (vm_object_offset_t) 0, FALSE, | |
722 | VM_PROT_DEFAULT, VM_PROT_ALL, | |
723 | VM_INHERIT_DEFAULT); | |
724 | } | |
725 | ||
726 | /* | |
727 | * Account for kernel memory (text, data, bss, vm shenanigans). | |
728 | * This may include inaccessible "holes" as determined by what | |
55e303ae | 729 | * the machine-dependent init code includes in max_mem. |
1c79356b | 730 | */ |
55e303ae | 731 | vm_page_wire_count = (atop_64(max_mem) - (vm_page_free_count |
1c79356b A |
732 | + vm_page_active_count |
733 | + vm_page_inactive_count)); | |
734 | } | |
735 | ||
1c79356b A |
736 | |
737 | /* | |
738 | * kmem_io_object_trunc: | |
739 | * | |
740 | * Truncate an object vm_map_copy_t. | |
741 | * Called by the scatter/gather list network code to remove pages from | |
742 | * the tail end of a packet. Also unwires the objects pages. | |
743 | */ | |
744 | ||
745 | kern_return_t | |
746 | kmem_io_object_trunc(copy, new_size) | |
747 | vm_map_copy_t copy; /* IN/OUT copy object */ | |
748 | register vm_size_t new_size; /* IN new object size */ | |
749 | { | |
750 | register vm_size_t offset, old_size; | |
751 | ||
752 | assert(copy->type == VM_MAP_COPY_OBJECT); | |
753 | ||
754 | old_size = (vm_size_t)round_page_64(copy->size); | |
755 | copy->size = new_size; | |
55e303ae | 756 | new_size = round_page_32(new_size); |
1c79356b A |
757 | |
758 | vm_object_lock(copy->cpy_object); | |
759 | vm_object_page_remove(copy->cpy_object, | |
760 | (vm_object_offset_t)new_size, (vm_object_offset_t)old_size); | |
761 | for (offset = 0; offset < new_size; offset += PAGE_SIZE) { | |
762 | register vm_page_t mem; | |
763 | ||
764 | if ((mem = vm_page_lookup(copy->cpy_object, | |
765 | (vm_object_offset_t)offset)) == VM_PAGE_NULL) | |
766 | panic("kmem_io_object_trunc: unable to find object page"); | |
767 | ||
768 | /* | |
769 | * Make sure these pages are marked dirty | |
770 | */ | |
771 | mem->dirty = TRUE; | |
772 | vm_page_lock_queues(); | |
773 | vm_page_unwire(mem); | |
774 | vm_page_unlock_queues(); | |
775 | } | |
776 | copy->cpy_object->size = new_size; /* adjust size of object */ | |
777 | vm_object_unlock(copy->cpy_object); | |
778 | return(KERN_SUCCESS); | |
779 | } | |
780 | ||
781 | /* | |
782 | * kmem_io_object_deallocate: | |
783 | * | |
784 | * Free an vm_map_copy_t. | |
785 | * Called by the scatter/gather list network code to free a packet. | |
786 | */ | |
787 | ||
788 | void | |
789 | kmem_io_object_deallocate( | |
790 | vm_map_copy_t copy) /* IN/OUT copy object */ | |
791 | { | |
792 | kern_return_t ret; | |
793 | ||
794 | /* | |
795 | * Clear out all the object pages (this will leave an empty object). | |
796 | */ | |
797 | ret = kmem_io_object_trunc(copy, 0); | |
798 | if (ret != KERN_SUCCESS) | |
799 | panic("kmem_io_object_deallocate: unable to truncate object"); | |
800 | /* | |
801 | * ...and discard the copy object. | |
802 | */ | |
803 | vm_map_copy_discard(copy); | |
804 | } | |
805 | ||
806 | /* | |
807 | * Routine: copyinmap | |
808 | * Purpose: | |
809 | * Like copyin, except that fromaddr is an address | |
810 | * in the specified VM map. This implementation | |
811 | * is incomplete; it handles the current user map | |
812 | * and the kernel map/submaps. | |
813 | */ | |
814 | boolean_t | |
815 | copyinmap( | |
816 | vm_map_t map, | |
817 | vm_offset_t fromaddr, | |
818 | vm_offset_t toaddr, | |
819 | vm_size_t length) | |
820 | { | |
821 | if (vm_map_pmap(map) == pmap_kernel()) { | |
822 | /* assume a correct copy */ | |
823 | memcpy((void *)toaddr, (void *)fromaddr, length); | |
824 | return FALSE; | |
825 | } | |
826 | ||
827 | if (current_map() == map) | |
828 | return copyin((char *)fromaddr, (char *)toaddr, length); | |
829 | ||
830 | return TRUE; | |
831 | } | |
832 | ||
833 | /* | |
834 | * Routine: copyoutmap | |
835 | * Purpose: | |
836 | * Like copyout, except that toaddr is an address | |
837 | * in the specified VM map. This implementation | |
838 | * is incomplete; it handles the current user map | |
839 | * and the kernel map/submaps. | |
840 | */ | |
841 | boolean_t | |
842 | copyoutmap( | |
843 | vm_map_t map, | |
844 | vm_offset_t fromaddr, | |
845 | vm_offset_t toaddr, | |
846 | vm_size_t length) | |
847 | { | |
848 | if (vm_map_pmap(map) == pmap_kernel()) { | |
849 | /* assume a correct copy */ | |
850 | memcpy((void *)toaddr, (void *)fromaddr, length); | |
851 | return FALSE; | |
852 | } | |
853 | ||
854 | if (current_map() == map) | |
855 | return copyout((char *)fromaddr, (char *)toaddr, length); | |
856 | ||
857 | return TRUE; | |
858 | } | |
9bccf70c A |
859 | |
860 | ||
861 | kern_return_t | |
862 | vm_conflict_check( | |
863 | vm_map_t map, | |
864 | vm_offset_t off, | |
865 | vm_size_t len, | |
866 | memory_object_t pager, | |
867 | vm_object_offset_t file_off) | |
868 | { | |
869 | vm_map_entry_t entry; | |
870 | vm_object_t obj; | |
871 | vm_object_offset_t obj_off; | |
872 | vm_map_t base_map; | |
873 | vm_offset_t base_offset; | |
874 | vm_offset_t original_offset; | |
875 | kern_return_t kr; | |
876 | vm_size_t local_len; | |
877 | ||
878 | base_map = map; | |
879 | base_offset = off; | |
880 | original_offset = off; | |
881 | kr = KERN_SUCCESS; | |
882 | vm_map_lock(map); | |
883 | while(vm_map_lookup_entry(map, off, &entry)) { | |
884 | local_len = len; | |
885 | ||
886 | if (entry->object.vm_object == VM_OBJECT_NULL) { | |
887 | vm_map_unlock(map); | |
888 | return KERN_SUCCESS; | |
889 | } | |
890 | if (entry->is_sub_map) { | |
891 | vm_map_t old_map; | |
55e303ae | 892 | |
9bccf70c A |
893 | old_map = map; |
894 | vm_map_lock(entry->object.sub_map); | |
895 | map = entry->object.sub_map; | |
896 | off = entry->offset + (off - entry->vme_start); | |
897 | vm_map_unlock(old_map); | |
898 | continue; | |
899 | } | |
900 | obj = entry->object.vm_object; | |
901 | obj_off = (off - entry->vme_start) + entry->offset; | |
902 | while(obj->shadow) { | |
903 | obj_off += obj->shadow_offset; | |
904 | obj = obj->shadow; | |
905 | } | |
906 | if((obj->pager_created) && (obj->pager == pager)) { | |
907 | if(((obj->paging_offset) + obj_off) == file_off) { | |
908 | if(off != base_offset) { | |
909 | vm_map_unlock(map); | |
910 | return KERN_FAILURE; | |
911 | } | |
912 | kr = KERN_ALREADY_WAITING; | |
55e303ae A |
913 | } else { |
914 | vm_object_offset_t obj_off_aligned; | |
915 | vm_object_offset_t file_off_aligned; | |
916 | ||
917 | obj_off_aligned = obj_off & ~PAGE_MASK; | |
918 | file_off_aligned = file_off & ~PAGE_MASK; | |
919 | ||
920 | if (file_off_aligned == (obj->paging_offset + obj_off_aligned)) { | |
921 | /* | |
922 | * the target map and the file offset start in the same page | |
923 | * but are not identical... | |
924 | */ | |
925 | vm_map_unlock(map); | |
926 | return KERN_FAILURE; | |
927 | } | |
928 | if ((file_off < (obj->paging_offset + obj_off_aligned)) && | |
929 | ((file_off + len) > (obj->paging_offset + obj_off_aligned))) { | |
930 | /* | |
931 | * some portion of the tail of the I/O will fall | |
932 | * within the encompass of the target map | |
933 | */ | |
934 | vm_map_unlock(map); | |
935 | return KERN_FAILURE; | |
936 | } | |
937 | if ((file_off_aligned > (obj->paging_offset + obj_off)) && | |
938 | (file_off_aligned < (obj->paging_offset + obj_off) + len)) { | |
939 | /* | |
940 | * the beginning page of the file offset falls within | |
941 | * the target map's encompass | |
942 | */ | |
943 | vm_map_unlock(map); | |
944 | return KERN_FAILURE; | |
945 | } | |
9bccf70c A |
946 | } |
947 | } else if(kr != KERN_SUCCESS) { | |
55e303ae | 948 | vm_map_unlock(map); |
9bccf70c A |
949 | return KERN_FAILURE; |
950 | } | |
951 | ||
55e303ae | 952 | if(len <= ((entry->vme_end - entry->vme_start) - |
9bccf70c A |
953 | (off - entry->vme_start))) { |
954 | vm_map_unlock(map); | |
955 | return kr; | |
956 | } else { | |
957 | len -= (entry->vme_end - entry->vme_start) - | |
958 | (off - entry->vme_start); | |
959 | } | |
960 | base_offset = base_offset + (local_len - len); | |
961 | file_off = file_off + (local_len - len); | |
962 | off = base_offset; | |
963 | if(map != base_map) { | |
964 | vm_map_unlock(map); | |
965 | vm_map_lock(base_map); | |
966 | map = base_map; | |
967 | } | |
968 | } | |
969 | ||
970 | vm_map_unlock(map); | |
971 | return kr; | |
9bccf70c | 972 | } |