]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_kern.c
d5ed1f659d2e4e6d543d799e65c3798bfc966a3c
[apple/xnu.git] / osfmk / vm / vm_kern.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_kern.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Kernel memory management.
64 */
65
66 #include <mach/kern_return.h>
67 #include <mach/vm_param.h>
68 #include <kern/assert.h>
69 #include <kern/lock.h>
70 #include <kern/thread.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_object.h>
74 #include <vm/vm_page.h>
75 #include <vm/vm_pageout.h>
76 #include <kern/misc_protos.h>
77 #include <vm/cpm.h>
78
79 #include <string.h>
80 /*
81 * Variables exported by this module.
82 */
83
84 vm_map_t kernel_map;
85 vm_map_t kernel_pageable_map;
86
87 /*
88 * Forward declarations for internal functions.
89 */
90 extern kern_return_t kmem_alloc_pages(
91 register vm_object_t object,
92 register vm_object_offset_t offset,
93 register vm_object_size_t size);
94
95 extern void kmem_remap_pages(
96 register vm_object_t object,
97 register vm_object_offset_t offset,
98 register vm_offset_t start,
99 register vm_offset_t end,
100 vm_prot_t protection);
101
102 kern_return_t
103 kmem_alloc_contig(
104 vm_map_t map,
105 vm_offset_t *addrp,
106 vm_size_t size,
107 vm_offset_t mask,
108 int flags)
109 {
110 vm_object_t object;
111 vm_object_offset_t offset;
112 vm_map_offset_t map_addr;
113 vm_map_offset_t map_mask;
114 vm_map_size_t map_size, i;
115 vm_map_entry_t entry;
116 vm_page_t m, pages;
117 kern_return_t kr;
118
119 if (map == VM_MAP_NULL || (flags && (flags ^ KMA_KOBJECT)))
120 return KERN_INVALID_ARGUMENT;
121
122 if (size == 0) {
123 *addrp = 0;
124 return KERN_INVALID_ARGUMENT;
125 }
126
127 map_size = vm_map_round_page(size);
128 map_mask = (vm_map_offset_t)mask;
129
130 /*
131 * Allocate a new object (if necessary) and the reference we
132 * will be donating to the map entry. We must do this before
133 * locking the map, or risk deadlock with the default pager.
134 */
135 if ((flags & KMA_KOBJECT) != 0) {
136 object = kernel_object;
137 vm_object_reference(object);
138 } else {
139 object = vm_object_allocate(map_size);
140 }
141
142 kr = vm_map_find_space(map, &map_addr, map_size, map_mask, &entry);
143 if (KERN_SUCCESS != kr) {
144 vm_object_deallocate(object);
145 return kr;
146 }
147
148 entry->object.vm_object = object;
149 entry->offset = offset = (object == kernel_object) ?
150 map_addr - VM_MIN_KERNEL_ADDRESS : 0;
151
152 /* Take an extra object ref in case the map entry gets deleted */
153 vm_object_reference(object);
154 vm_map_unlock(map);
155
156 kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, FALSE);
157
158 if (kr != KERN_SUCCESS) {
159 vm_map_remove(map, vm_map_trunc_page(map_addr),
160 vm_map_round_page(map_addr + map_size), 0);
161 vm_object_deallocate(object);
162 *addrp = 0;
163 return kr;
164 }
165
166 vm_object_lock(object);
167 for (i = 0; i < map_size; i += PAGE_SIZE) {
168 m = pages;
169 pages = NEXT_PAGE(m);
170 m->busy = FALSE;
171 vm_page_insert(m, object, offset + i);
172 }
173 vm_object_unlock(object);
174
175 if ((kr = vm_map_wire(map, vm_map_trunc_page(map_addr),
176 vm_map_round_page(map_addr + map_size), VM_PROT_DEFAULT, FALSE))
177 != KERN_SUCCESS) {
178 if (object == kernel_object) {
179 vm_object_lock(object);
180 vm_object_page_remove(object, offset, offset + map_size);
181 vm_object_unlock(object);
182 }
183 vm_map_remove(map, vm_map_trunc_page(map_addr),
184 vm_map_round_page(map_addr + map_size), 0);
185 vm_object_deallocate(object);
186 return kr;
187 }
188 vm_object_deallocate(object);
189
190 if (object == kernel_object)
191 vm_map_simplify(map, map_addr);
192
193 *addrp = map_addr;
194 return KERN_SUCCESS;
195 }
196
197 /*
198 * Master entry point for allocating kernel memory.
199 * NOTE: this routine is _never_ interrupt safe.
200 *
201 * map : map to allocate into
202 * addrp : pointer to start address of new memory
203 * size : size of memory requested
204 * flags : options
205 * KMA_HERE *addrp is base address, else "anywhere"
206 * KMA_NOPAGEWAIT don't wait for pages if unavailable
207 * KMA_KOBJECT use kernel_object
208 */
209
210 kern_return_t
211 kernel_memory_allocate(
212 register vm_map_t map,
213 register vm_offset_t *addrp,
214 register vm_size_t size,
215 register vm_offset_t mask,
216 int flags)
217 {
218 vm_object_t object;
219 vm_object_offset_t offset;
220 vm_map_entry_t entry;
221 vm_map_offset_t map_addr;
222 vm_map_offset_t map_mask;
223 vm_map_size_t map_size;
224 vm_map_size_t i;
225 kern_return_t kr;
226
227 if (size == 0) {
228 *addrp = 0;
229 return KERN_INVALID_ARGUMENT;
230 }
231
232 map_size = vm_map_round_page(size);
233 map_mask = (vm_map_offset_t) mask;
234
235 /*
236 * Allocate a new object (if necessary). We must do this before
237 * locking the map, or risk deadlock with the default pager.
238 */
239 if ((flags & KMA_KOBJECT) != 0) {
240 object = kernel_object;
241 vm_object_reference(object);
242 } else {
243 object = vm_object_allocate(map_size);
244 }
245
246 kr = vm_map_find_space(map, &map_addr, map_size, map_mask, &entry);
247 if (KERN_SUCCESS != kr) {
248 vm_object_deallocate(object);
249 return kr;
250 }
251
252 entry->object.vm_object = object;
253 entry->offset = offset = (object == kernel_object) ?
254 map_addr - VM_MIN_KERNEL_ADDRESS : 0;
255
256 vm_object_reference(object);
257 vm_map_unlock(map);
258
259 vm_object_lock(object);
260 for (i = 0; i < map_size; i += PAGE_SIZE) {
261 vm_page_t mem;
262
263 while (VM_PAGE_NULL ==
264 (mem = vm_page_alloc(object, offset + i))) {
265 if (flags & KMA_NOPAGEWAIT) {
266 if (object == kernel_object)
267 vm_object_page_remove(object, offset, offset + i);
268 vm_object_unlock(object);
269 vm_map_remove(map, map_addr, map_addr + map_size, 0);
270 vm_object_deallocate(object);
271 return KERN_RESOURCE_SHORTAGE;
272 }
273 vm_object_unlock(object);
274 VM_PAGE_WAIT();
275 vm_object_lock(object);
276 }
277 mem->busy = FALSE;
278 }
279 vm_object_unlock(object);
280
281 if ((kr = vm_map_wire(map, map_addr, map_addr + map_size, VM_PROT_DEFAULT, FALSE))
282 != KERN_SUCCESS) {
283 if (object == kernel_object) {
284 vm_object_lock(object);
285 vm_object_page_remove(object, offset, offset + map_size);
286 vm_object_unlock(object);
287 }
288 vm_map_remove(map, map_addr, map_addr + map_size, 0);
289 vm_object_deallocate(object);
290 return (kr);
291 }
292 /* now that the page is wired, we no longer have to fear coalesce */
293 vm_object_deallocate(object);
294 if (object == kernel_object)
295 vm_map_simplify(map, map_addr);
296
297 /*
298 * Return the memory, not zeroed.
299 */
300 *addrp = CAST_DOWN(vm_offset_t, map_addr);
301 return KERN_SUCCESS;
302 }
303
304 /*
305 * kmem_alloc:
306 *
307 * Allocate wired-down memory in the kernel's address map
308 * or a submap. The memory is not zero-filled.
309 */
310
311 kern_return_t
312 kmem_alloc(
313 vm_map_t map,
314 vm_offset_t *addrp,
315 vm_size_t size)
316 {
317 return kernel_memory_allocate(map, addrp, size, 0, 0);
318 }
319
320 /*
321 * kmem_realloc:
322 *
323 * Reallocate wired-down memory in the kernel's address map
324 * or a submap. Newly allocated pages are not zeroed.
325 * This can only be used on regions allocated with kmem_alloc.
326 *
327 * If successful, the pages in the old region are mapped twice.
328 * The old region is unchanged. Use kmem_free to get rid of it.
329 */
330 kern_return_t
331 kmem_realloc(
332 vm_map_t map,
333 vm_offset_t oldaddr,
334 vm_size_t oldsize,
335 vm_offset_t *newaddrp,
336 vm_size_t newsize)
337 {
338 vm_object_t object;
339 vm_object_offset_t offset;
340 vm_map_offset_t oldmapmin;
341 vm_map_offset_t oldmapmax;
342 vm_map_offset_t newmapaddr;
343 vm_map_size_t oldmapsize;
344 vm_map_size_t newmapsize;
345 vm_map_entry_t oldentry;
346 vm_map_entry_t newentry;
347 vm_page_t mem;
348 kern_return_t kr;
349
350 oldmapmin = vm_map_trunc_page(oldaddr);
351 oldmapmax = vm_map_round_page(oldaddr + oldsize);
352 oldmapsize = oldmapmax - oldmapmin;
353 newmapsize = vm_map_round_page(newsize);
354
355
356 /*
357 * Find the VM object backing the old region.
358 */
359
360 vm_map_lock(map);
361
362 if (!vm_map_lookup_entry(map, oldmapmin, &oldentry))
363 panic("kmem_realloc");
364 object = oldentry->object.vm_object;
365
366 /*
367 * Increase the size of the object and
368 * fill in the new region.
369 */
370
371 vm_object_reference(object);
372 /* by grabbing the object lock before unlocking the map */
373 /* we guarantee that we will panic if more than one */
374 /* attempt is made to realloc a kmem_alloc'd area */
375 vm_object_lock(object);
376 vm_map_unlock(map);
377 if (object->size != oldmapsize)
378 panic("kmem_realloc");
379 object->size = newmapsize;
380 vm_object_unlock(object);
381
382 /* allocate the new pages while expanded portion of the */
383 /* object is still not mapped */
384 kmem_alloc_pages(object, vm_object_round_page(oldmapsize),
385 vm_object_round_page(newmapsize-oldmapsize));
386
387 /*
388 * Find space for the new region.
389 */
390
391 kr = vm_map_find_space(map, &newmapaddr, newmapsize,
392 (vm_map_offset_t) 0, &newentry);
393 if (kr != KERN_SUCCESS) {
394 vm_object_lock(object);
395 for(offset = oldmapsize;
396 offset < newmapsize; offset += PAGE_SIZE) {
397 if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
398 vm_page_lock_queues();
399 vm_page_free(mem);
400 vm_page_unlock_queues();
401 }
402 }
403 object->size = oldmapsize;
404 vm_object_unlock(object);
405 vm_object_deallocate(object);
406 return kr;
407 }
408 newentry->object.vm_object = object;
409 newentry->offset = 0;
410 assert (newentry->wired_count == 0);
411
412
413 /* add an extra reference in case we have someone doing an */
414 /* unexpected deallocate */
415 vm_object_reference(object);
416 vm_map_unlock(map);
417
418 kr = vm_map_wire(map, newmapaddr, newmapaddr + newmapsize, VM_PROT_DEFAULT, FALSE);
419 if (KERN_SUCCESS != kr) {
420 vm_map_remove(map, newmapaddr, newmapaddr + newmapsize, 0);
421 vm_object_lock(object);
422 for(offset = oldsize; offset < newmapsize; offset += PAGE_SIZE) {
423 if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
424 vm_page_lock_queues();
425 vm_page_free(mem);
426 vm_page_unlock_queues();
427 }
428 }
429 object->size = oldmapsize;
430 vm_object_unlock(object);
431 vm_object_deallocate(object);
432 return (kr);
433 }
434 vm_object_deallocate(object);
435
436 *newaddrp = CAST_DOWN(vm_offset_t, newmapaddr);
437 return KERN_SUCCESS;
438 }
439
440 /*
441 * kmem_alloc_wired:
442 *
443 * Allocate wired-down memory in the kernel's address map
444 * or a submap. The memory is not zero-filled.
445 *
446 * The memory is allocated in the kernel_object.
447 * It may not be copied with vm_map_copy, and
448 * it may not be reallocated with kmem_realloc.
449 */
450
451 kern_return_t
452 kmem_alloc_wired(
453 vm_map_t map,
454 vm_offset_t *addrp,
455 vm_size_t size)
456 {
457 return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT);
458 }
459
460 /*
461 * kmem_alloc_aligned:
462 *
463 * Like kmem_alloc_wired, except that the memory is aligned.
464 * The size should be a power-of-2.
465 */
466
467 kern_return_t
468 kmem_alloc_aligned(
469 vm_map_t map,
470 vm_offset_t *addrp,
471 vm_size_t size)
472 {
473 if ((size & (size - 1)) != 0)
474 panic("kmem_alloc_aligned: size not aligned");
475 return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT);
476 }
477
478 /*
479 * kmem_alloc_pageable:
480 *
481 * Allocate pageable memory in the kernel's address map.
482 */
483
484 kern_return_t
485 kmem_alloc_pageable(
486 vm_map_t map,
487 vm_offset_t *addrp,
488 vm_size_t size)
489 {
490 vm_map_offset_t map_addr;
491 vm_map_size_t map_size;
492 kern_return_t kr;
493
494 #ifndef normal
495 map_addr = (vm_map_min(map)) + 0x1000;
496 #else
497 map_addr = vm_map_min(map);
498 #endif
499 map_size = vm_map_round_page(size);
500
501 kr = vm_map_enter(map, &map_addr, map_size,
502 (vm_map_offset_t) 0, VM_FLAGS_ANYWHERE,
503 VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE,
504 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
505
506 if (kr != KERN_SUCCESS)
507 return kr;
508
509 *addrp = CAST_DOWN(vm_offset_t, map_addr);
510 return KERN_SUCCESS;
511 }
512
513 /*
514 * kmem_free:
515 *
516 * Release a region of kernel virtual memory allocated
517 * with kmem_alloc, kmem_alloc_wired, or kmem_alloc_pageable,
518 * and return the physical pages associated with that region.
519 */
520
521 void
522 kmem_free(
523 vm_map_t map,
524 vm_offset_t addr,
525 vm_size_t size)
526 {
527 kern_return_t kr;
528
529 kr = vm_map_remove(map, vm_map_trunc_page(addr),
530 vm_map_round_page(addr + size),
531 VM_MAP_REMOVE_KUNWIRE);
532 if (kr != KERN_SUCCESS)
533 panic("kmem_free");
534 }
535
536 /*
537 * Allocate new pages in an object.
538 */
539
540 kern_return_t
541 kmem_alloc_pages(
542 register vm_object_t object,
543 register vm_object_offset_t offset,
544 register vm_object_size_t size)
545 {
546 vm_object_size_t alloc_size;
547
548 alloc_size = vm_object_round_page(size);
549 vm_object_lock(object);
550 while (alloc_size) {
551 register vm_page_t mem;
552
553
554 /*
555 * Allocate a page
556 */
557 while (VM_PAGE_NULL ==
558 (mem = vm_page_alloc(object, offset))) {
559 vm_object_unlock(object);
560 VM_PAGE_WAIT();
561 vm_object_lock(object);
562 }
563 mem->busy = FALSE;
564
565 alloc_size -= PAGE_SIZE;
566 offset += PAGE_SIZE;
567 }
568 vm_object_unlock(object);
569 return KERN_SUCCESS;
570 }
571
572 /*
573 * Remap wired pages in an object into a new region.
574 * The object is assumed to be mapped into the kernel map or
575 * a submap.
576 */
577 void
578 kmem_remap_pages(
579 register vm_object_t object,
580 register vm_object_offset_t offset,
581 register vm_offset_t start,
582 register vm_offset_t end,
583 vm_prot_t protection)
584 {
585
586 vm_map_offset_t map_start;
587 vm_map_offset_t map_end;
588
589 /*
590 * Mark the pmap region as not pageable.
591 */
592 map_start = vm_map_trunc_page(start);
593 map_end = vm_map_round_page(end);
594
595 pmap_pageable(kernel_pmap, map_start, map_end, FALSE);
596
597 while (map_start < map_end) {
598 register vm_page_t mem;
599
600 vm_object_lock(object);
601
602 /*
603 * Find a page
604 */
605 if ((mem = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
606 panic("kmem_remap_pages");
607
608 /*
609 * Wire it down (again)
610 */
611 vm_page_lock_queues();
612 vm_page_wire(mem);
613 vm_page_unlock_queues();
614 vm_object_unlock(object);
615
616 /*
617 * ENCRYPTED SWAP:
618 * The page is supposed to be wired now, so it
619 * shouldn't be encrypted at this point. It can
620 * safely be entered in the page table.
621 */
622 ASSERT_PAGE_DECRYPTED(mem);
623
624 /*
625 * Enter it in the kernel pmap. The page isn't busy,
626 * but this shouldn't be a problem because it is wired.
627 */
628 PMAP_ENTER(kernel_pmap, map_start, mem, protection,
629 ((unsigned int)(mem->object->wimg_bits))
630 & VM_WIMG_MASK,
631 TRUE);
632
633 map_start += PAGE_SIZE;
634 offset += PAGE_SIZE;
635 }
636 }
637
638 /*
639 * kmem_suballoc:
640 *
641 * Allocates a map to manage a subrange
642 * of the kernel virtual address space.
643 *
644 * Arguments are as follows:
645 *
646 * parent Map to take range from
647 * addr Address of start of range (IN/OUT)
648 * size Size of range to find
649 * pageable Can region be paged
650 * anywhere Can region be located anywhere in map
651 * new_map Pointer to new submap
652 */
653 kern_return_t
654 kmem_suballoc(
655 vm_map_t parent,
656 vm_offset_t *addr,
657 vm_size_t size,
658 boolean_t pageable,
659 int flags,
660 vm_map_t *new_map)
661 {
662 vm_map_t map;
663 vm_map_offset_t map_addr;
664 vm_map_size_t map_size;
665 kern_return_t kr;
666
667 map_size = vm_map_round_page(size);
668
669 /*
670 * Need reference on submap object because it is internal
671 * to the vm_system. vm_object_enter will never be called
672 * on it (usual source of reference for vm_map_enter).
673 */
674 vm_object_reference(vm_submap_object);
675
676 map_addr = (flags & VM_FLAGS_ANYWHERE) ?
677 vm_map_min(parent) : vm_map_trunc_page(*addr);
678
679 kr = vm_map_enter(parent, &map_addr, map_size,
680 (vm_map_offset_t) 0, flags,
681 vm_submap_object, (vm_object_offset_t) 0, FALSE,
682 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
683 if (kr != KERN_SUCCESS) {
684 vm_object_deallocate(vm_submap_object);
685 return (kr);
686 }
687
688 pmap_reference(vm_map_pmap(parent));
689 map = vm_map_create(vm_map_pmap(parent), map_addr, map_addr + map_size, pageable);
690 if (map == VM_MAP_NULL)
691 panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */
692
693 kr = vm_map_submap(parent, map_addr, map_addr + map_size, map, map_addr, FALSE);
694 if (kr != KERN_SUCCESS) {
695 /*
696 * See comment preceding vm_map_submap().
697 */
698 vm_map_remove(parent, map_addr, map_addr + map_size, VM_MAP_NO_FLAGS);
699 vm_map_deallocate(map); /* also removes ref to pmap */
700 vm_object_deallocate(vm_submap_object);
701 return (kr);
702 }
703 *addr = CAST_DOWN(vm_offset_t, map_addr);
704 *new_map = map;
705 return (KERN_SUCCESS);
706 }
707
708 /*
709 * kmem_init:
710 *
711 * Initialize the kernel's virtual memory map, taking
712 * into account all memory allocated up to this time.
713 */
714 void
715 kmem_init(
716 vm_offset_t start,
717 vm_offset_t end)
718 {
719 vm_map_offset_t map_start;
720 vm_map_offset_t map_end;
721
722 map_start = vm_map_trunc_page(start);
723 map_end = vm_map_round_page(end);
724
725 kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_ADDRESS,
726 map_end, FALSE);
727
728 /*
729 * Reserve virtual memory allocated up to this time.
730 */
731
732 if (start != VM_MIN_KERNEL_ADDRESS) {
733 vm_map_offset_t map_addr;
734
735 map_addr = VM_MIN_KERNEL_ADDRESS;
736 (void) vm_map_enter(kernel_map,
737 &map_addr,
738 (vm_map_size_t)(map_start - VM_MIN_KERNEL_ADDRESS),
739 (vm_map_offset_t) 0,
740 VM_FLAGS_ANYWHERE | VM_FLAGS_NO_PMAP_CHECK,
741 VM_OBJECT_NULL,
742 (vm_object_offset_t) 0, FALSE,
743 VM_PROT_DEFAULT, VM_PROT_ALL,
744 VM_INHERIT_DEFAULT);
745 }
746
747 /*
748 * Account for kernel memory (text, data, bss, vm shenanigans).
749 * This may include inaccessible "holes" as determined by what
750 * the machine-dependent init code includes in max_mem.
751 */
752 vm_page_wire_count = (atop_64(max_mem) - (vm_page_free_count
753 + vm_page_active_count
754 + vm_page_inactive_count));
755 }
756
757
758 /*
759 * Routine: copyinmap
760 * Purpose:
761 * Like copyin, except that fromaddr is an address
762 * in the specified VM map. This implementation
763 * is incomplete; it handles the current user map
764 * and the kernel map/submaps.
765 */
766 kern_return_t
767 copyinmap(
768 vm_map_t map,
769 vm_map_offset_t fromaddr,
770 void *todata,
771 vm_size_t length)
772 {
773 kern_return_t kr = KERN_SUCCESS;
774 vm_map_t oldmap;
775
776 if (vm_map_pmap(map) == pmap_kernel())
777 {
778 /* assume a correct copy */
779 memcpy(todata, CAST_DOWN(void *, fromaddr), length);
780 }
781 else if (current_map() == map)
782 {
783 if (copyin(fromaddr, todata, length) != 0)
784 kr = KERN_INVALID_ADDRESS;
785 }
786 else
787 {
788 vm_map_reference(map);
789 oldmap = vm_map_switch(map);
790 if (copyin(fromaddr, todata, length) != 0)
791 kr = KERN_INVALID_ADDRESS;
792 vm_map_switch(oldmap);
793 vm_map_deallocate(map);
794 }
795 return kr;
796 }
797
798 /*
799 * Routine: copyoutmap
800 * Purpose:
801 * Like copyout, except that toaddr is an address
802 * in the specified VM map. This implementation
803 * is incomplete; it handles the current user map
804 * and the kernel map/submaps.
805 */
806 kern_return_t
807 copyoutmap(
808 vm_map_t map,
809 void *fromdata,
810 vm_map_address_t toaddr,
811 vm_size_t length)
812 {
813 if (vm_map_pmap(map) == pmap_kernel()) {
814 /* assume a correct copy */
815 memcpy(CAST_DOWN(void *, toaddr), fromdata, length);
816 return KERN_SUCCESS;
817 }
818
819 if (current_map() != map)
820 return KERN_NOT_SUPPORTED;
821
822 if (copyout(fromdata, toaddr, length) != 0)
823 return KERN_INVALID_ADDRESS;
824
825 return KERN_SUCCESS;
826 }
827
828
829 kern_return_t
830 vm_conflict_check(
831 vm_map_t map,
832 vm_map_offset_t off,
833 vm_map_size_t len,
834 memory_object_t pager,
835 vm_object_offset_t file_off)
836 {
837 vm_map_entry_t entry;
838 vm_object_t obj;
839 vm_object_offset_t obj_off;
840 vm_map_t base_map;
841 vm_map_offset_t base_offset;
842 vm_map_offset_t original_offset;
843 kern_return_t kr;
844 vm_map_size_t local_len;
845
846 base_map = map;
847 base_offset = off;
848 original_offset = off;
849 kr = KERN_SUCCESS;
850 vm_map_lock(map);
851 while(vm_map_lookup_entry(map, off, &entry)) {
852 local_len = len;
853
854 if (entry->object.vm_object == VM_OBJECT_NULL) {
855 vm_map_unlock(map);
856 return KERN_SUCCESS;
857 }
858 if (entry->is_sub_map) {
859 vm_map_t old_map;
860
861 old_map = map;
862 vm_map_lock(entry->object.sub_map);
863 map = entry->object.sub_map;
864 off = entry->offset + (off - entry->vme_start);
865 vm_map_unlock(old_map);
866 continue;
867 }
868 obj = entry->object.vm_object;
869 obj_off = (off - entry->vme_start) + entry->offset;
870 while(obj->shadow) {
871 obj_off += obj->shadow_offset;
872 obj = obj->shadow;
873 }
874 if((obj->pager_created) && (obj->pager == pager)) {
875 if(((obj->paging_offset) + obj_off) == file_off) {
876 if(off != base_offset) {
877 vm_map_unlock(map);
878 return KERN_FAILURE;
879 }
880 kr = KERN_ALREADY_WAITING;
881 } else {
882 vm_object_offset_t obj_off_aligned;
883 vm_object_offset_t file_off_aligned;
884
885 obj_off_aligned = obj_off & ~PAGE_MASK;
886 file_off_aligned = file_off & ~PAGE_MASK;
887
888 if (file_off_aligned == (obj->paging_offset + obj_off_aligned)) {
889 /*
890 * the target map and the file offset start in the same page
891 * but are not identical...
892 */
893 vm_map_unlock(map);
894 return KERN_FAILURE;
895 }
896 if ((file_off < (obj->paging_offset + obj_off_aligned)) &&
897 ((file_off + len) > (obj->paging_offset + obj_off_aligned))) {
898 /*
899 * some portion of the tail of the I/O will fall
900 * within the encompass of the target map
901 */
902 vm_map_unlock(map);
903 return KERN_FAILURE;
904 }
905 if ((file_off_aligned > (obj->paging_offset + obj_off)) &&
906 (file_off_aligned < (obj->paging_offset + obj_off) + len)) {
907 /*
908 * the beginning page of the file offset falls within
909 * the target map's encompass
910 */
911 vm_map_unlock(map);
912 return KERN_FAILURE;
913 }
914 }
915 } else if(kr != KERN_SUCCESS) {
916 vm_map_unlock(map);
917 return KERN_FAILURE;
918 }
919
920 if(len <= ((entry->vme_end - entry->vme_start) -
921 (off - entry->vme_start))) {
922 vm_map_unlock(map);
923 return kr;
924 } else {
925 len -= (entry->vme_end - entry->vme_start) -
926 (off - entry->vme_start);
927 }
928 base_offset = base_offset + (local_len - len);
929 file_off = file_off + (local_len - len);
930 off = base_offset;
931 if(map != base_map) {
932 vm_map_unlock(map);
933 vm_map_lock(base_map);
934 map = base_map;
935 }
936 }
937
938 vm_map_unlock(map);
939 return kr;
940 }