]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_kern.c
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / vm / vm_kern.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58 /*
59 */
60 /*
61 * File: vm/vm_kern.c
62 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * Date: 1985
64 *
65 * Kernel memory management.
66 */
67
68 #include <mach/kern_return.h>
69 #include <mach/vm_param.h>
70 #include <kern/assert.h>
71 #include <kern/lock.h>
72 #include <kern/thread.h>
73 #include <vm/vm_kern.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_object.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_pageout.h>
78 #include <kern/misc_protos.h>
79 #include <vm/cpm.h>
80
81 #include <string.h>
82 /*
83 * Variables exported by this module.
84 */
85
86 vm_map_t kernel_map;
87 vm_map_t kernel_pageable_map;
88
89 /*
90 * Forward declarations for internal functions.
91 */
92 extern kern_return_t kmem_alloc_pages(
93 register vm_object_t object,
94 register vm_object_offset_t offset,
95 register vm_object_size_t size);
96
97 extern void kmem_remap_pages(
98 register vm_object_t object,
99 register vm_object_offset_t offset,
100 register vm_offset_t start,
101 register vm_offset_t end,
102 vm_prot_t protection);
103
104 kern_return_t
105 kmem_alloc_contig(
106 vm_map_t map,
107 vm_offset_t *addrp,
108 vm_size_t size,
109 vm_offset_t mask,
110 int flags)
111 {
112 vm_object_t object;
113 vm_object_offset_t offset;
114 vm_map_offset_t map_addr;
115 vm_map_offset_t map_mask;
116 vm_map_size_t map_size, i;
117 vm_map_entry_t entry;
118 vm_page_t m, pages;
119 kern_return_t kr;
120
121 if (map == VM_MAP_NULL || (flags && (flags ^ KMA_KOBJECT)))
122 return KERN_INVALID_ARGUMENT;
123
124 if (size == 0) {
125 *addrp = 0;
126 return KERN_INVALID_ARGUMENT;
127 }
128
129 map_size = vm_map_round_page(size);
130 map_mask = (vm_map_offset_t)mask;
131
132 /*
133 * Allocate a new object (if necessary) and the reference we
134 * will be donating to the map entry. We must do this before
135 * locking the map, or risk deadlock with the default pager.
136 */
137 if ((flags & KMA_KOBJECT) != 0) {
138 object = kernel_object;
139 vm_object_reference(object);
140 } else {
141 object = vm_object_allocate(map_size);
142 }
143
144 kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0, &entry);
145 if (KERN_SUCCESS != kr) {
146 vm_object_deallocate(object);
147 return kr;
148 }
149
150 entry->object.vm_object = object;
151 entry->offset = offset = (object == kernel_object) ?
152 map_addr - VM_MIN_KERNEL_ADDRESS : 0;
153
154 /* Take an extra object ref in case the map entry gets deleted */
155 vm_object_reference(object);
156 vm_map_unlock(map);
157
158 kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, FALSE);
159
160 if (kr != KERN_SUCCESS) {
161 vm_map_remove(map, vm_map_trunc_page(map_addr),
162 vm_map_round_page(map_addr + map_size), 0);
163 vm_object_deallocate(object);
164 *addrp = 0;
165 return kr;
166 }
167
168 vm_object_lock(object);
169 for (i = 0; i < map_size; i += PAGE_SIZE) {
170 m = pages;
171 pages = NEXT_PAGE(m);
172 *(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL;
173 m->busy = FALSE;
174 vm_page_insert(m, object, offset + i);
175 }
176 vm_object_unlock(object);
177
178 if ((kr = vm_map_wire(map, vm_map_trunc_page(map_addr),
179 vm_map_round_page(map_addr + map_size), VM_PROT_DEFAULT, FALSE))
180 != KERN_SUCCESS) {
181 if (object == kernel_object) {
182 vm_object_lock(object);
183 vm_object_page_remove(object, offset, offset + map_size);
184 vm_object_unlock(object);
185 }
186 vm_map_remove(map, vm_map_trunc_page(map_addr),
187 vm_map_round_page(map_addr + map_size), 0);
188 vm_object_deallocate(object);
189 return kr;
190 }
191 vm_object_deallocate(object);
192
193 if (object == kernel_object)
194 vm_map_simplify(map, map_addr);
195
196 *addrp = map_addr;
197 return KERN_SUCCESS;
198 }
199
200 /*
201 * Master entry point for allocating kernel memory.
202 * NOTE: this routine is _never_ interrupt safe.
203 *
204 * map : map to allocate into
205 * addrp : pointer to start address of new memory
206 * size : size of memory requested
207 * flags : options
208 * KMA_HERE *addrp is base address, else "anywhere"
209 * KMA_NOPAGEWAIT don't wait for pages if unavailable
210 * KMA_KOBJECT use kernel_object
211 * KMA_LOMEM support for 32 bit devices in a 64 bit world
212 * if set and a lomemory pool is available
213 * grab pages from it... this also implies
214 * KMA_NOPAGEWAIT
215 */
216
217 kern_return_t
218 kernel_memory_allocate(
219 register vm_map_t map,
220 register vm_offset_t *addrp,
221 register vm_size_t size,
222 register vm_offset_t mask,
223 int flags)
224 {
225 vm_object_t object;
226 vm_object_offset_t offset;
227 vm_map_entry_t entry;
228 vm_map_offset_t map_addr;
229 vm_map_offset_t map_mask;
230 vm_map_size_t map_size;
231 vm_map_size_t i;
232 kern_return_t kr;
233
234 if (size == 0) {
235 *addrp = 0;
236 return KERN_INVALID_ARGUMENT;
237 }
238 if (flags & KMA_LOMEM) {
239 if ( !(flags & KMA_NOPAGEWAIT) ) {
240 *addrp = 0;
241 return KERN_INVALID_ARGUMENT;
242 }
243 }
244
245 map_size = vm_map_round_page(size);
246 map_mask = (vm_map_offset_t) mask;
247
248 /*
249 * Allocate a new object (if necessary). We must do this before
250 * locking the map, or risk deadlock with the default pager.
251 */
252 if ((flags & KMA_KOBJECT) != 0) {
253 object = kernel_object;
254 vm_object_reference(object);
255 } else {
256 object = vm_object_allocate(map_size);
257 }
258
259 kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0, &entry);
260 if (KERN_SUCCESS != kr) {
261 vm_object_deallocate(object);
262 return kr;
263 }
264 entry->object.vm_object = object;
265 entry->offset = offset = (object == kernel_object) ?
266 map_addr - VM_MIN_KERNEL_ADDRESS : 0;
267
268 vm_object_reference(object);
269 vm_map_unlock(map);
270
271 vm_object_lock(object);
272 for (i = 0; i < map_size; i += PAGE_SIZE) {
273 vm_page_t mem;
274
275 for (;;) {
276 if (flags & KMA_LOMEM)
277 mem = vm_page_alloclo(object, offset + i);
278 else
279 mem = vm_page_alloc(object, offset + i);
280
281 if (mem != VM_PAGE_NULL)
282 break;
283
284 if (flags & KMA_NOPAGEWAIT) {
285 if (object == kernel_object)
286 vm_object_page_remove(object, offset, offset + i);
287 vm_object_unlock(object);
288 vm_map_remove(map, map_addr, map_addr + map_size, 0);
289 vm_object_deallocate(object);
290 return KERN_RESOURCE_SHORTAGE;
291 }
292 vm_object_unlock(object);
293 VM_PAGE_WAIT();
294 vm_object_lock(object);
295 }
296 mem->busy = FALSE;
297 }
298 vm_object_unlock(object);
299
300 if ((kr = vm_map_wire(map, map_addr, map_addr + map_size, VM_PROT_DEFAULT, FALSE))
301 != KERN_SUCCESS) {
302 if (object == kernel_object) {
303 vm_object_lock(object);
304 vm_object_page_remove(object, offset, offset + map_size);
305 vm_object_unlock(object);
306 }
307 vm_map_remove(map, map_addr, map_addr + map_size, 0);
308 vm_object_deallocate(object);
309 return (kr);
310 }
311 /* now that the page is wired, we no longer have to fear coalesce */
312 vm_object_deallocate(object);
313 if (object == kernel_object)
314 vm_map_simplify(map, map_addr);
315
316 /*
317 * Return the memory, not zeroed.
318 */
319 *addrp = CAST_DOWN(vm_offset_t, map_addr);
320 return KERN_SUCCESS;
321 }
322
323 /*
324 * kmem_alloc:
325 *
326 * Allocate wired-down memory in the kernel's address map
327 * or a submap. The memory is not zero-filled.
328 */
329
330 kern_return_t
331 kmem_alloc(
332 vm_map_t map,
333 vm_offset_t *addrp,
334 vm_size_t size)
335 {
336 return kernel_memory_allocate(map, addrp, size, 0, 0);
337 }
338
339 /*
340 * kmem_realloc:
341 *
342 * Reallocate wired-down memory in the kernel's address map
343 * or a submap. Newly allocated pages are not zeroed.
344 * This can only be used on regions allocated with kmem_alloc.
345 *
346 * If successful, the pages in the old region are mapped twice.
347 * The old region is unchanged. Use kmem_free to get rid of it.
348 */
349 kern_return_t
350 kmem_realloc(
351 vm_map_t map,
352 vm_offset_t oldaddr,
353 vm_size_t oldsize,
354 vm_offset_t *newaddrp,
355 vm_size_t newsize)
356 {
357 vm_object_t object;
358 vm_object_offset_t offset;
359 vm_map_offset_t oldmapmin;
360 vm_map_offset_t oldmapmax;
361 vm_map_offset_t newmapaddr;
362 vm_map_size_t oldmapsize;
363 vm_map_size_t newmapsize;
364 vm_map_entry_t oldentry;
365 vm_map_entry_t newentry;
366 vm_page_t mem;
367 kern_return_t kr;
368
369 oldmapmin = vm_map_trunc_page(oldaddr);
370 oldmapmax = vm_map_round_page(oldaddr + oldsize);
371 oldmapsize = oldmapmax - oldmapmin;
372 newmapsize = vm_map_round_page(newsize);
373
374
375 /*
376 * Find the VM object backing the old region.
377 */
378
379 vm_map_lock(map);
380
381 if (!vm_map_lookup_entry(map, oldmapmin, &oldentry))
382 panic("kmem_realloc");
383 object = oldentry->object.vm_object;
384
385 /*
386 * Increase the size of the object and
387 * fill in the new region.
388 */
389
390 vm_object_reference(object);
391 /* by grabbing the object lock before unlocking the map */
392 /* we guarantee that we will panic if more than one */
393 /* attempt is made to realloc a kmem_alloc'd area */
394 vm_object_lock(object);
395 vm_map_unlock(map);
396 if (object->size != oldmapsize)
397 panic("kmem_realloc");
398 object->size = newmapsize;
399 vm_object_unlock(object);
400
401 /* allocate the new pages while expanded portion of the */
402 /* object is still not mapped */
403 kmem_alloc_pages(object, vm_object_round_page(oldmapsize),
404 vm_object_round_page(newmapsize-oldmapsize));
405
406 /*
407 * Find space for the new region.
408 */
409
410 kr = vm_map_find_space(map, &newmapaddr, newmapsize,
411 (vm_map_offset_t) 0, 0, &newentry);
412 if (kr != KERN_SUCCESS) {
413 vm_object_lock(object);
414 for(offset = oldmapsize;
415 offset < newmapsize; offset += PAGE_SIZE) {
416 if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
417 vm_page_lock_queues();
418 vm_page_free(mem);
419 vm_page_unlock_queues();
420 }
421 }
422 object->size = oldmapsize;
423 vm_object_unlock(object);
424 vm_object_deallocate(object);
425 return kr;
426 }
427 newentry->object.vm_object = object;
428 newentry->offset = 0;
429 assert (newentry->wired_count == 0);
430
431
432 /* add an extra reference in case we have someone doing an */
433 /* unexpected deallocate */
434 vm_object_reference(object);
435 vm_map_unlock(map);
436
437 kr = vm_map_wire(map, newmapaddr, newmapaddr + newmapsize, VM_PROT_DEFAULT, FALSE);
438 if (KERN_SUCCESS != kr) {
439 vm_map_remove(map, newmapaddr, newmapaddr + newmapsize, 0);
440 vm_object_lock(object);
441 for(offset = oldsize; offset < newmapsize; offset += PAGE_SIZE) {
442 if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
443 vm_page_lock_queues();
444 vm_page_free(mem);
445 vm_page_unlock_queues();
446 }
447 }
448 object->size = oldmapsize;
449 vm_object_unlock(object);
450 vm_object_deallocate(object);
451 return (kr);
452 }
453 vm_object_deallocate(object);
454
455 *newaddrp = CAST_DOWN(vm_offset_t, newmapaddr);
456 return KERN_SUCCESS;
457 }
458
459 /*
460 * kmem_alloc_wired:
461 *
462 * Allocate wired-down memory in the kernel's address map
463 * or a submap. The memory is not zero-filled.
464 *
465 * The memory is allocated in the kernel_object.
466 * It may not be copied with vm_map_copy, and
467 * it may not be reallocated with kmem_realloc.
468 */
469
470 kern_return_t
471 kmem_alloc_wired(
472 vm_map_t map,
473 vm_offset_t *addrp,
474 vm_size_t size)
475 {
476 return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT);
477 }
478
479 /*
480 * kmem_alloc_aligned:
481 *
482 * Like kmem_alloc_wired, except that the memory is aligned.
483 * The size should be a power-of-2.
484 */
485
486 kern_return_t
487 kmem_alloc_aligned(
488 vm_map_t map,
489 vm_offset_t *addrp,
490 vm_size_t size)
491 {
492 if ((size & (size - 1)) != 0)
493 panic("kmem_alloc_aligned: size not aligned");
494 return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT);
495 }
496
497 /*
498 * kmem_alloc_pageable:
499 *
500 * Allocate pageable memory in the kernel's address map.
501 */
502
503 kern_return_t
504 kmem_alloc_pageable(
505 vm_map_t map,
506 vm_offset_t *addrp,
507 vm_size_t size)
508 {
509 vm_map_offset_t map_addr;
510 vm_map_size_t map_size;
511 kern_return_t kr;
512
513 #ifndef normal
514 map_addr = (vm_map_min(map)) + 0x1000;
515 #else
516 map_addr = vm_map_min(map);
517 #endif
518 map_size = vm_map_round_page(size);
519
520 kr = vm_map_enter(map, &map_addr, map_size,
521 (vm_map_offset_t) 0, VM_FLAGS_ANYWHERE,
522 VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE,
523 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
524
525 if (kr != KERN_SUCCESS)
526 return kr;
527
528 *addrp = CAST_DOWN(vm_offset_t, map_addr);
529 return KERN_SUCCESS;
530 }
531
532 /*
533 * kmem_free:
534 *
535 * Release a region of kernel virtual memory allocated
536 * with kmem_alloc, kmem_alloc_wired, or kmem_alloc_pageable,
537 * and return the physical pages associated with that region.
538 */
539
540 void
541 kmem_free(
542 vm_map_t map,
543 vm_offset_t addr,
544 vm_size_t size)
545 {
546 kern_return_t kr;
547
548 kr = vm_map_remove(map, vm_map_trunc_page(addr),
549 vm_map_round_page(addr + size),
550 VM_MAP_REMOVE_KUNWIRE);
551 if (kr != KERN_SUCCESS)
552 panic("kmem_free");
553 }
554
555 /*
556 * Allocate new pages in an object.
557 */
558
559 kern_return_t
560 kmem_alloc_pages(
561 register vm_object_t object,
562 register vm_object_offset_t offset,
563 register vm_object_size_t size)
564 {
565 vm_object_size_t alloc_size;
566
567 alloc_size = vm_object_round_page(size);
568 vm_object_lock(object);
569 while (alloc_size) {
570 register vm_page_t mem;
571
572
573 /*
574 * Allocate a page
575 */
576 while (VM_PAGE_NULL ==
577 (mem = vm_page_alloc(object, offset))) {
578 vm_object_unlock(object);
579 VM_PAGE_WAIT();
580 vm_object_lock(object);
581 }
582 mem->busy = FALSE;
583
584 alloc_size -= PAGE_SIZE;
585 offset += PAGE_SIZE;
586 }
587 vm_object_unlock(object);
588 return KERN_SUCCESS;
589 }
590
591 /*
592 * Remap wired pages in an object into a new region.
593 * The object is assumed to be mapped into the kernel map or
594 * a submap.
595 */
596 void
597 kmem_remap_pages(
598 register vm_object_t object,
599 register vm_object_offset_t offset,
600 register vm_offset_t start,
601 register vm_offset_t end,
602 vm_prot_t protection)
603 {
604
605 vm_map_offset_t map_start;
606 vm_map_offset_t map_end;
607
608 /*
609 * Mark the pmap region as not pageable.
610 */
611 map_start = vm_map_trunc_page(start);
612 map_end = vm_map_round_page(end);
613
614 pmap_pageable(kernel_pmap, map_start, map_end, FALSE);
615
616 while (map_start < map_end) {
617 register vm_page_t mem;
618
619 vm_object_lock(object);
620
621 /*
622 * Find a page
623 */
624 if ((mem = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
625 panic("kmem_remap_pages");
626
627 /*
628 * Wire it down (again)
629 */
630 vm_page_lock_queues();
631 vm_page_wire(mem);
632 vm_page_unlock_queues();
633 vm_object_unlock(object);
634
635 /*
636 * ENCRYPTED SWAP:
637 * The page is supposed to be wired now, so it
638 * shouldn't be encrypted at this point. It can
639 * safely be entered in the page table.
640 */
641 ASSERT_PAGE_DECRYPTED(mem);
642
643 /*
644 * Enter it in the kernel pmap. The page isn't busy,
645 * but this shouldn't be a problem because it is wired.
646 */
647 PMAP_ENTER(kernel_pmap, map_start, mem, protection,
648 ((unsigned int)(mem->object->wimg_bits))
649 & VM_WIMG_MASK,
650 TRUE);
651
652 map_start += PAGE_SIZE;
653 offset += PAGE_SIZE;
654 }
655 }
656
657 /*
658 * kmem_suballoc:
659 *
660 * Allocates a map to manage a subrange
661 * of the kernel virtual address space.
662 *
663 * Arguments are as follows:
664 *
665 * parent Map to take range from
666 * addr Address of start of range (IN/OUT)
667 * size Size of range to find
668 * pageable Can region be paged
669 * anywhere Can region be located anywhere in map
670 * new_map Pointer to new submap
671 */
672 kern_return_t
673 kmem_suballoc(
674 vm_map_t parent,
675 vm_offset_t *addr,
676 vm_size_t size,
677 boolean_t pageable,
678 int flags,
679 vm_map_t *new_map)
680 {
681 vm_map_t map;
682 vm_map_offset_t map_addr;
683 vm_map_size_t map_size;
684 kern_return_t kr;
685
686 map_size = vm_map_round_page(size);
687
688 /*
689 * Need reference on submap object because it is internal
690 * to the vm_system. vm_object_enter will never be called
691 * on it (usual source of reference for vm_map_enter).
692 */
693 vm_object_reference(vm_submap_object);
694
695 map_addr = (flags & VM_FLAGS_ANYWHERE) ?
696 vm_map_min(parent) : vm_map_trunc_page(*addr);
697
698 kr = vm_map_enter(parent, &map_addr, map_size,
699 (vm_map_offset_t) 0, flags,
700 vm_submap_object, (vm_object_offset_t) 0, FALSE,
701 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
702 if (kr != KERN_SUCCESS) {
703 vm_object_deallocate(vm_submap_object);
704 return (kr);
705 }
706
707 pmap_reference(vm_map_pmap(parent));
708 map = vm_map_create(vm_map_pmap(parent), map_addr, map_addr + map_size, pageable);
709 if (map == VM_MAP_NULL)
710 panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */
711
712 kr = vm_map_submap(parent, map_addr, map_addr + map_size, map, map_addr, FALSE);
713 if (kr != KERN_SUCCESS) {
714 /*
715 * See comment preceding vm_map_submap().
716 */
717 vm_map_remove(parent, map_addr, map_addr + map_size, VM_MAP_NO_FLAGS);
718 vm_map_deallocate(map); /* also removes ref to pmap */
719 vm_object_deallocate(vm_submap_object);
720 return (kr);
721 }
722 *addr = CAST_DOWN(vm_offset_t, map_addr);
723 *new_map = map;
724 return (KERN_SUCCESS);
725 }
726
727 /*
728 * kmem_init:
729 *
730 * Initialize the kernel's virtual memory map, taking
731 * into account all memory allocated up to this time.
732 */
733 void
734 kmem_init(
735 vm_offset_t start,
736 vm_offset_t end)
737 {
738 vm_map_offset_t map_start;
739 vm_map_offset_t map_end;
740
741 map_start = vm_map_trunc_page(start);
742 map_end = vm_map_round_page(end);
743
744 kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_ADDRESS,
745 map_end, FALSE);
746
747 /*
748 * Reserve virtual memory allocated up to this time.
749 */
750
751 if (start != VM_MIN_KERNEL_ADDRESS) {
752 vm_map_offset_t map_addr;
753
754 map_addr = VM_MIN_KERNEL_ADDRESS;
755 (void) vm_map_enter(kernel_map,
756 &map_addr,
757 (vm_map_size_t)(map_start - VM_MIN_KERNEL_ADDRESS),
758 (vm_map_offset_t) 0,
759 VM_FLAGS_ANYWHERE | VM_FLAGS_NO_PMAP_CHECK,
760 VM_OBJECT_NULL,
761 (vm_object_offset_t) 0, FALSE,
762 VM_PROT_DEFAULT, VM_PROT_ALL,
763 VM_INHERIT_DEFAULT);
764 }
765
766 /*
767 * Account for kernel memory (text, data, bss, vm shenanigans).
768 * This may include inaccessible "holes" as determined by what
769 * the machine-dependent init code includes in max_mem.
770 */
771 vm_page_wire_count = (atop_64(max_mem) - (vm_page_free_count
772 + vm_page_active_count
773 + vm_page_inactive_count));
774 }
775
776
777 /*
778 * Routine: copyinmap
779 * Purpose:
780 * Like copyin, except that fromaddr is an address
781 * in the specified VM map. This implementation
782 * is incomplete; it handles the current user map
783 * and the kernel map/submaps.
784 */
785 kern_return_t
786 copyinmap(
787 vm_map_t map,
788 vm_map_offset_t fromaddr,
789 void *todata,
790 vm_size_t length)
791 {
792 kern_return_t kr = KERN_SUCCESS;
793 vm_map_t oldmap;
794
795 if (vm_map_pmap(map) == pmap_kernel())
796 {
797 /* assume a correct copy */
798 memcpy(todata, CAST_DOWN(void *, fromaddr), length);
799 }
800 else if (current_map() == map)
801 {
802 if (copyin(fromaddr, todata, length) != 0)
803 kr = KERN_INVALID_ADDRESS;
804 }
805 else
806 {
807 vm_map_reference(map);
808 oldmap = vm_map_switch(map);
809 if (copyin(fromaddr, todata, length) != 0)
810 kr = KERN_INVALID_ADDRESS;
811 vm_map_switch(oldmap);
812 vm_map_deallocate(map);
813 }
814 return kr;
815 }
816
817 /*
818 * Routine: copyoutmap
819 * Purpose:
820 * Like copyout, except that toaddr is an address
821 * in the specified VM map. This implementation
822 * is incomplete; it handles the current user map
823 * and the kernel map/submaps.
824 */
825 kern_return_t
826 copyoutmap(
827 vm_map_t map,
828 void *fromdata,
829 vm_map_address_t toaddr,
830 vm_size_t length)
831 {
832 if (vm_map_pmap(map) == pmap_kernel()) {
833 /* assume a correct copy */
834 memcpy(CAST_DOWN(void *, toaddr), fromdata, length);
835 return KERN_SUCCESS;
836 }
837
838 if (current_map() != map)
839 return KERN_NOT_SUPPORTED;
840
841 if (copyout(fromdata, toaddr, length) != 0)
842 return KERN_INVALID_ADDRESS;
843
844 return KERN_SUCCESS;
845 }
846
847
848 kern_return_t
849 vm_conflict_check(
850 vm_map_t map,
851 vm_map_offset_t off,
852 vm_map_size_t len,
853 memory_object_t pager,
854 vm_object_offset_t file_off)
855 {
856 vm_map_entry_t entry;
857 vm_object_t obj;
858 vm_object_offset_t obj_off;
859 vm_map_t base_map;
860 vm_map_offset_t base_offset;
861 vm_map_offset_t original_offset;
862 kern_return_t kr;
863 vm_map_size_t local_len;
864
865 base_map = map;
866 base_offset = off;
867 original_offset = off;
868 kr = KERN_SUCCESS;
869 vm_map_lock(map);
870 while(vm_map_lookup_entry(map, off, &entry)) {
871 local_len = len;
872
873 if (entry->object.vm_object == VM_OBJECT_NULL) {
874 vm_map_unlock(map);
875 return KERN_SUCCESS;
876 }
877 if (entry->is_sub_map) {
878 vm_map_t old_map;
879
880 old_map = map;
881 vm_map_lock(entry->object.sub_map);
882 map = entry->object.sub_map;
883 off = entry->offset + (off - entry->vme_start);
884 vm_map_unlock(old_map);
885 continue;
886 }
887 obj = entry->object.vm_object;
888 obj_off = (off - entry->vme_start) + entry->offset;
889 while(obj->shadow) {
890 obj_off += obj->shadow_offset;
891 obj = obj->shadow;
892 }
893 if((obj->pager_created) && (obj->pager == pager)) {
894 if(((obj->paging_offset) + obj_off) == file_off) {
895 if(off != base_offset) {
896 vm_map_unlock(map);
897 return KERN_FAILURE;
898 }
899 kr = KERN_ALREADY_WAITING;
900 } else {
901 vm_object_offset_t obj_off_aligned;
902 vm_object_offset_t file_off_aligned;
903
904 obj_off_aligned = obj_off & ~PAGE_MASK;
905 file_off_aligned = file_off & ~PAGE_MASK;
906
907 if (file_off_aligned == (obj->paging_offset + obj_off_aligned)) {
908 /*
909 * the target map and the file offset start in the same page
910 * but are not identical...
911 */
912 vm_map_unlock(map);
913 return KERN_FAILURE;
914 }
915 if ((file_off < (obj->paging_offset + obj_off_aligned)) &&
916 ((file_off + len) > (obj->paging_offset + obj_off_aligned))) {
917 /*
918 * some portion of the tail of the I/O will fall
919 * within the encompass of the target map
920 */
921 vm_map_unlock(map);
922 return KERN_FAILURE;
923 }
924 if ((file_off_aligned > (obj->paging_offset + obj_off)) &&
925 (file_off_aligned < (obj->paging_offset + obj_off) + len)) {
926 /*
927 * the beginning page of the file offset falls within
928 * the target map's encompass
929 */
930 vm_map_unlock(map);
931 return KERN_FAILURE;
932 }
933 }
934 } else if(kr != KERN_SUCCESS) {
935 vm_map_unlock(map);
936 return KERN_FAILURE;
937 }
938
939 if(len <= ((entry->vme_end - entry->vme_start) -
940 (off - entry->vme_start))) {
941 vm_map_unlock(map);
942 return kr;
943 } else {
944 len -= (entry->vme_end - entry->vme_start) -
945 (off - entry->vme_start);
946 }
947 base_offset = base_offset + (local_len - len);
948 file_off = file_off + (local_len - len);
949 off = base_offset;
950 if(map != base_map) {
951 vm_map_unlock(map);
952 vm_map_lock(base_map);
953 map = base_map;
954 }
955 }
956
957 vm_map_unlock(map);
958 return kr;
959 }