]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_kern.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / vm / vm_kern.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58 /*
59 */
60 /*
61 * File: vm/vm_kern.c
62 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * Date: 1985
64 *
65 * Kernel memory management.
66 */
67
68 #include <mach/kern_return.h>
69 #include <mach/vm_param.h>
70 #include <kern/assert.h>
71 #include <kern/lock.h>
72 #include <kern/thread.h>
73 #include <vm/vm_kern.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_object.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_pageout.h>
78 #include <kern/misc_protos.h>
79 #include <vm/cpm.h>
80
81 #include <string.h>
82 /*
83 * Variables exported by this module.
84 */
85
86 vm_map_t kernel_map;
87 vm_map_t kernel_pageable_map;
88
89 /*
90 * Forward declarations for internal functions.
91 */
92 extern kern_return_t kmem_alloc_pages(
93 register vm_object_t object,
94 register vm_object_offset_t offset,
95 register vm_object_size_t size);
96
97 extern void kmem_remap_pages(
98 register vm_object_t object,
99 register vm_object_offset_t offset,
100 register vm_offset_t start,
101 register vm_offset_t end,
102 vm_prot_t protection);
103
104 kern_return_t
105 kmem_alloc_contig(
106 vm_map_t map,
107 vm_offset_t *addrp,
108 vm_size_t size,
109 vm_offset_t mask,
110 int flags)
111 {
112 vm_object_t object;
113 vm_object_offset_t offset;
114 vm_map_offset_t map_addr;
115 vm_map_offset_t map_mask;
116 vm_map_size_t map_size, i;
117 vm_map_entry_t entry;
118 vm_page_t m, pages;
119 kern_return_t kr;
120
121 if (map == VM_MAP_NULL || (flags && (flags ^ KMA_KOBJECT)))
122 return KERN_INVALID_ARGUMENT;
123
124 if (size == 0) {
125 *addrp = 0;
126 return KERN_INVALID_ARGUMENT;
127 }
128
129 map_size = vm_map_round_page(size);
130 map_mask = (vm_map_offset_t)mask;
131
132 /*
133 * Allocate a new object (if necessary) and the reference we
134 * will be donating to the map entry. We must do this before
135 * locking the map, or risk deadlock with the default pager.
136 */
137 if ((flags & KMA_KOBJECT) != 0) {
138 object = kernel_object;
139 vm_object_reference(object);
140 } else {
141 object = vm_object_allocate(map_size);
142 }
143
144 kr = vm_map_find_space(map, &map_addr, map_size, map_mask, &entry);
145 if (KERN_SUCCESS != kr) {
146 vm_object_deallocate(object);
147 return kr;
148 }
149
150 entry->object.vm_object = object;
151 entry->offset = offset = (object == kernel_object) ?
152 map_addr - VM_MIN_KERNEL_ADDRESS : 0;
153
154 /* Take an extra object ref in case the map entry gets deleted */
155 vm_object_reference(object);
156 vm_map_unlock(map);
157
158 kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, FALSE);
159
160 if (kr != KERN_SUCCESS) {
161 vm_map_remove(map, vm_map_trunc_page(map_addr),
162 vm_map_round_page(map_addr + map_size), 0);
163 vm_object_deallocate(object);
164 *addrp = 0;
165 return kr;
166 }
167
168 vm_object_lock(object);
169 for (i = 0; i < map_size; i += PAGE_SIZE) {
170 m = pages;
171 pages = NEXT_PAGE(m);
172 m->busy = FALSE;
173 vm_page_insert(m, object, offset + i);
174 }
175 vm_object_unlock(object);
176
177 if ((kr = vm_map_wire(map, vm_map_trunc_page(map_addr),
178 vm_map_round_page(map_addr + map_size), VM_PROT_DEFAULT, FALSE))
179 != KERN_SUCCESS) {
180 if (object == kernel_object) {
181 vm_object_lock(object);
182 vm_object_page_remove(object, offset, offset + map_size);
183 vm_object_unlock(object);
184 }
185 vm_map_remove(map, vm_map_trunc_page(map_addr),
186 vm_map_round_page(map_addr + map_size), 0);
187 vm_object_deallocate(object);
188 return kr;
189 }
190 vm_object_deallocate(object);
191
192 if (object == kernel_object)
193 vm_map_simplify(map, map_addr);
194
195 *addrp = map_addr;
196 return KERN_SUCCESS;
197 }
198
199 /*
200 * Master entry point for allocating kernel memory.
201 * NOTE: this routine is _never_ interrupt safe.
202 *
203 * map : map to allocate into
204 * addrp : pointer to start address of new memory
205 * size : size of memory requested
206 * flags : options
207 * KMA_HERE *addrp is base address, else "anywhere"
208 * KMA_NOPAGEWAIT don't wait for pages if unavailable
209 * KMA_KOBJECT use kernel_object
210 */
211
212 kern_return_t
213 kernel_memory_allocate(
214 register vm_map_t map,
215 register vm_offset_t *addrp,
216 register vm_size_t size,
217 register vm_offset_t mask,
218 int flags)
219 {
220 vm_object_t object;
221 vm_object_offset_t offset;
222 vm_map_entry_t entry;
223 vm_map_offset_t map_addr;
224 vm_map_offset_t map_mask;
225 vm_map_size_t map_size;
226 vm_map_size_t i;
227 kern_return_t kr;
228
229 if (size == 0) {
230 *addrp = 0;
231 return KERN_INVALID_ARGUMENT;
232 }
233
234 map_size = vm_map_round_page(size);
235 map_mask = (vm_map_offset_t) mask;
236
237 /*
238 * Allocate a new object (if necessary). We must do this before
239 * locking the map, or risk deadlock with the default pager.
240 */
241 if ((flags & KMA_KOBJECT) != 0) {
242 object = kernel_object;
243 vm_object_reference(object);
244 } else {
245 object = vm_object_allocate(map_size);
246 }
247
248 kr = vm_map_find_space(map, &map_addr, map_size, map_mask, &entry);
249 if (KERN_SUCCESS != kr) {
250 vm_object_deallocate(object);
251 return kr;
252 }
253
254 entry->object.vm_object = object;
255 entry->offset = offset = (object == kernel_object) ?
256 map_addr - VM_MIN_KERNEL_ADDRESS : 0;
257
258 vm_object_reference(object);
259 vm_map_unlock(map);
260
261 vm_object_lock(object);
262 for (i = 0; i < map_size; i += PAGE_SIZE) {
263 vm_page_t mem;
264
265 while (VM_PAGE_NULL ==
266 (mem = vm_page_alloc(object, offset + i))) {
267 if (flags & KMA_NOPAGEWAIT) {
268 if (object == kernel_object)
269 vm_object_page_remove(object, offset, offset + i);
270 vm_object_unlock(object);
271 vm_map_remove(map, map_addr, map_addr + map_size, 0);
272 vm_object_deallocate(object);
273 return KERN_RESOURCE_SHORTAGE;
274 }
275 vm_object_unlock(object);
276 VM_PAGE_WAIT();
277 vm_object_lock(object);
278 }
279 mem->busy = FALSE;
280 }
281 vm_object_unlock(object);
282
283 if ((kr = vm_map_wire(map, map_addr, map_addr + map_size, VM_PROT_DEFAULT, FALSE))
284 != KERN_SUCCESS) {
285 if (object == kernel_object) {
286 vm_object_lock(object);
287 vm_object_page_remove(object, offset, offset + map_size);
288 vm_object_unlock(object);
289 }
290 vm_map_remove(map, map_addr, map_addr + map_size, 0);
291 vm_object_deallocate(object);
292 return (kr);
293 }
294 /* now that the page is wired, we no longer have to fear coalesce */
295 vm_object_deallocate(object);
296 if (object == kernel_object)
297 vm_map_simplify(map, map_addr);
298
299 /*
300 * Return the memory, not zeroed.
301 */
302 *addrp = CAST_DOWN(vm_offset_t, map_addr);
303 return KERN_SUCCESS;
304 }
305
306 /*
307 * kmem_alloc:
308 *
309 * Allocate wired-down memory in the kernel's address map
310 * or a submap. The memory is not zero-filled.
311 */
312
313 kern_return_t
314 kmem_alloc(
315 vm_map_t map,
316 vm_offset_t *addrp,
317 vm_size_t size)
318 {
319 return kernel_memory_allocate(map, addrp, size, 0, 0);
320 }
321
322 /*
323 * kmem_realloc:
324 *
325 * Reallocate wired-down memory in the kernel's address map
326 * or a submap. Newly allocated pages are not zeroed.
327 * This can only be used on regions allocated with kmem_alloc.
328 *
329 * If successful, the pages in the old region are mapped twice.
330 * The old region is unchanged. Use kmem_free to get rid of it.
331 */
332 kern_return_t
333 kmem_realloc(
334 vm_map_t map,
335 vm_offset_t oldaddr,
336 vm_size_t oldsize,
337 vm_offset_t *newaddrp,
338 vm_size_t newsize)
339 {
340 vm_object_t object;
341 vm_object_offset_t offset;
342 vm_map_offset_t oldmapmin;
343 vm_map_offset_t oldmapmax;
344 vm_map_offset_t newmapaddr;
345 vm_map_size_t oldmapsize;
346 vm_map_size_t newmapsize;
347 vm_map_entry_t oldentry;
348 vm_map_entry_t newentry;
349 vm_page_t mem;
350 kern_return_t kr;
351
352 oldmapmin = vm_map_trunc_page(oldaddr);
353 oldmapmax = vm_map_round_page(oldaddr + oldsize);
354 oldmapsize = oldmapmax - oldmapmin;
355 newmapsize = vm_map_round_page(newsize);
356
357
358 /*
359 * Find the VM object backing the old region.
360 */
361
362 vm_map_lock(map);
363
364 if (!vm_map_lookup_entry(map, oldmapmin, &oldentry))
365 panic("kmem_realloc");
366 object = oldentry->object.vm_object;
367
368 /*
369 * Increase the size of the object and
370 * fill in the new region.
371 */
372
373 vm_object_reference(object);
374 /* by grabbing the object lock before unlocking the map */
375 /* we guarantee that we will panic if more than one */
376 /* attempt is made to realloc a kmem_alloc'd area */
377 vm_object_lock(object);
378 vm_map_unlock(map);
379 if (object->size != oldmapsize)
380 panic("kmem_realloc");
381 object->size = newmapsize;
382 vm_object_unlock(object);
383
384 /* allocate the new pages while expanded portion of the */
385 /* object is still not mapped */
386 kmem_alloc_pages(object, vm_object_round_page(oldmapsize),
387 vm_object_round_page(newmapsize-oldmapsize));
388
389 /*
390 * Find space for the new region.
391 */
392
393 kr = vm_map_find_space(map, &newmapaddr, newmapsize,
394 (vm_map_offset_t) 0, &newentry);
395 if (kr != KERN_SUCCESS) {
396 vm_object_lock(object);
397 for(offset = oldmapsize;
398 offset < newmapsize; offset += PAGE_SIZE) {
399 if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
400 vm_page_lock_queues();
401 vm_page_free(mem);
402 vm_page_unlock_queues();
403 }
404 }
405 object->size = oldmapsize;
406 vm_object_unlock(object);
407 vm_object_deallocate(object);
408 return kr;
409 }
410 newentry->object.vm_object = object;
411 newentry->offset = 0;
412 assert (newentry->wired_count == 0);
413
414
415 /* add an extra reference in case we have someone doing an */
416 /* unexpected deallocate */
417 vm_object_reference(object);
418 vm_map_unlock(map);
419
420 kr = vm_map_wire(map, newmapaddr, newmapaddr + newmapsize, VM_PROT_DEFAULT, FALSE);
421 if (KERN_SUCCESS != kr) {
422 vm_map_remove(map, newmapaddr, newmapaddr + newmapsize, 0);
423 vm_object_lock(object);
424 for(offset = oldsize; offset < newmapsize; offset += PAGE_SIZE) {
425 if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
426 vm_page_lock_queues();
427 vm_page_free(mem);
428 vm_page_unlock_queues();
429 }
430 }
431 object->size = oldmapsize;
432 vm_object_unlock(object);
433 vm_object_deallocate(object);
434 return (kr);
435 }
436 vm_object_deallocate(object);
437
438 *newaddrp = CAST_DOWN(vm_offset_t, newmapaddr);
439 return KERN_SUCCESS;
440 }
441
442 /*
443 * kmem_alloc_wired:
444 *
445 * Allocate wired-down memory in the kernel's address map
446 * or a submap. The memory is not zero-filled.
447 *
448 * The memory is allocated in the kernel_object.
449 * It may not be copied with vm_map_copy, and
450 * it may not be reallocated with kmem_realloc.
451 */
452
453 kern_return_t
454 kmem_alloc_wired(
455 vm_map_t map,
456 vm_offset_t *addrp,
457 vm_size_t size)
458 {
459 return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT);
460 }
461
462 /*
463 * kmem_alloc_aligned:
464 *
465 * Like kmem_alloc_wired, except that the memory is aligned.
466 * The size should be a power-of-2.
467 */
468
469 kern_return_t
470 kmem_alloc_aligned(
471 vm_map_t map,
472 vm_offset_t *addrp,
473 vm_size_t size)
474 {
475 if ((size & (size - 1)) != 0)
476 panic("kmem_alloc_aligned: size not aligned");
477 return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT);
478 }
479
480 /*
481 * kmem_alloc_pageable:
482 *
483 * Allocate pageable memory in the kernel's address map.
484 */
485
486 kern_return_t
487 kmem_alloc_pageable(
488 vm_map_t map,
489 vm_offset_t *addrp,
490 vm_size_t size)
491 {
492 vm_map_offset_t map_addr;
493 vm_map_size_t map_size;
494 kern_return_t kr;
495
496 #ifndef normal
497 map_addr = (vm_map_min(map)) + 0x1000;
498 #else
499 map_addr = vm_map_min(map);
500 #endif
501 map_size = vm_map_round_page(size);
502
503 kr = vm_map_enter(map, &map_addr, map_size,
504 (vm_map_offset_t) 0, VM_FLAGS_ANYWHERE,
505 VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE,
506 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
507
508 if (kr != KERN_SUCCESS)
509 return kr;
510
511 *addrp = CAST_DOWN(vm_offset_t, map_addr);
512 return KERN_SUCCESS;
513 }
514
515 /*
516 * kmem_free:
517 *
518 * Release a region of kernel virtual memory allocated
519 * with kmem_alloc, kmem_alloc_wired, or kmem_alloc_pageable,
520 * and return the physical pages associated with that region.
521 */
522
523 void
524 kmem_free(
525 vm_map_t map,
526 vm_offset_t addr,
527 vm_size_t size)
528 {
529 kern_return_t kr;
530
531 kr = vm_map_remove(map, vm_map_trunc_page(addr),
532 vm_map_round_page(addr + size),
533 VM_MAP_REMOVE_KUNWIRE);
534 if (kr != KERN_SUCCESS)
535 panic("kmem_free");
536 }
537
538 /*
539 * Allocate new pages in an object.
540 */
541
542 kern_return_t
543 kmem_alloc_pages(
544 register vm_object_t object,
545 register vm_object_offset_t offset,
546 register vm_object_size_t size)
547 {
548 vm_object_size_t alloc_size;
549
550 alloc_size = vm_object_round_page(size);
551 vm_object_lock(object);
552 while (alloc_size) {
553 register vm_page_t mem;
554
555
556 /*
557 * Allocate a page
558 */
559 while (VM_PAGE_NULL ==
560 (mem = vm_page_alloc(object, offset))) {
561 vm_object_unlock(object);
562 VM_PAGE_WAIT();
563 vm_object_lock(object);
564 }
565 mem->busy = FALSE;
566
567 alloc_size -= PAGE_SIZE;
568 offset += PAGE_SIZE;
569 }
570 vm_object_unlock(object);
571 return KERN_SUCCESS;
572 }
573
574 /*
575 * Remap wired pages in an object into a new region.
576 * The object is assumed to be mapped into the kernel map or
577 * a submap.
578 */
579 void
580 kmem_remap_pages(
581 register vm_object_t object,
582 register vm_object_offset_t offset,
583 register vm_offset_t start,
584 register vm_offset_t end,
585 vm_prot_t protection)
586 {
587
588 vm_map_offset_t map_start;
589 vm_map_offset_t map_end;
590
591 /*
592 * Mark the pmap region as not pageable.
593 */
594 map_start = vm_map_trunc_page(start);
595 map_end = vm_map_round_page(end);
596
597 pmap_pageable(kernel_pmap, map_start, map_end, FALSE);
598
599 while (map_start < map_end) {
600 register vm_page_t mem;
601
602 vm_object_lock(object);
603
604 /*
605 * Find a page
606 */
607 if ((mem = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
608 panic("kmem_remap_pages");
609
610 /*
611 * Wire it down (again)
612 */
613 vm_page_lock_queues();
614 vm_page_wire(mem);
615 vm_page_unlock_queues();
616 vm_object_unlock(object);
617
618 /*
619 * ENCRYPTED SWAP:
620 * The page is supposed to be wired now, so it
621 * shouldn't be encrypted at this point. It can
622 * safely be entered in the page table.
623 */
624 ASSERT_PAGE_DECRYPTED(mem);
625
626 /*
627 * Enter it in the kernel pmap. The page isn't busy,
628 * but this shouldn't be a problem because it is wired.
629 */
630 PMAP_ENTER(kernel_pmap, map_start, mem, protection,
631 ((unsigned int)(mem->object->wimg_bits))
632 & VM_WIMG_MASK,
633 TRUE);
634
635 map_start += PAGE_SIZE;
636 offset += PAGE_SIZE;
637 }
638 }
639
640 /*
641 * kmem_suballoc:
642 *
643 * Allocates a map to manage a subrange
644 * of the kernel virtual address space.
645 *
646 * Arguments are as follows:
647 *
648 * parent Map to take range from
649 * addr Address of start of range (IN/OUT)
650 * size Size of range to find
651 * pageable Can region be paged
652 * anywhere Can region be located anywhere in map
653 * new_map Pointer to new submap
654 */
655 kern_return_t
656 kmem_suballoc(
657 vm_map_t parent,
658 vm_offset_t *addr,
659 vm_size_t size,
660 boolean_t pageable,
661 int flags,
662 vm_map_t *new_map)
663 {
664 vm_map_t map;
665 vm_map_offset_t map_addr;
666 vm_map_size_t map_size;
667 kern_return_t kr;
668
669 map_size = vm_map_round_page(size);
670
671 /*
672 * Need reference on submap object because it is internal
673 * to the vm_system. vm_object_enter will never be called
674 * on it (usual source of reference for vm_map_enter).
675 */
676 vm_object_reference(vm_submap_object);
677
678 map_addr = (flags & VM_FLAGS_ANYWHERE) ?
679 vm_map_min(parent) : vm_map_trunc_page(*addr);
680
681 kr = vm_map_enter(parent, &map_addr, map_size,
682 (vm_map_offset_t) 0, flags,
683 vm_submap_object, (vm_object_offset_t) 0, FALSE,
684 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
685 if (kr != KERN_SUCCESS) {
686 vm_object_deallocate(vm_submap_object);
687 return (kr);
688 }
689
690 pmap_reference(vm_map_pmap(parent));
691 map = vm_map_create(vm_map_pmap(parent), map_addr, map_addr + map_size, pageable);
692 if (map == VM_MAP_NULL)
693 panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */
694
695 kr = vm_map_submap(parent, map_addr, map_addr + map_size, map, map_addr, FALSE);
696 if (kr != KERN_SUCCESS) {
697 /*
698 * See comment preceding vm_map_submap().
699 */
700 vm_map_remove(parent, map_addr, map_addr + map_size, VM_MAP_NO_FLAGS);
701 vm_map_deallocate(map); /* also removes ref to pmap */
702 vm_object_deallocate(vm_submap_object);
703 return (kr);
704 }
705 *addr = CAST_DOWN(vm_offset_t, map_addr);
706 *new_map = map;
707 return (KERN_SUCCESS);
708 }
709
710 /*
711 * kmem_init:
712 *
713 * Initialize the kernel's virtual memory map, taking
714 * into account all memory allocated up to this time.
715 */
716 void
717 kmem_init(
718 vm_offset_t start,
719 vm_offset_t end)
720 {
721 vm_map_offset_t map_start;
722 vm_map_offset_t map_end;
723
724 map_start = vm_map_trunc_page(start);
725 map_end = vm_map_round_page(end);
726
727 kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_ADDRESS,
728 map_end, FALSE);
729
730 /*
731 * Reserve virtual memory allocated up to this time.
732 */
733
734 if (start != VM_MIN_KERNEL_ADDRESS) {
735 vm_map_offset_t map_addr;
736
737 map_addr = VM_MIN_KERNEL_ADDRESS;
738 (void) vm_map_enter(kernel_map,
739 &map_addr,
740 (vm_map_size_t)(map_start - VM_MIN_KERNEL_ADDRESS),
741 (vm_map_offset_t) 0,
742 VM_FLAGS_ANYWHERE | VM_FLAGS_NO_PMAP_CHECK,
743 VM_OBJECT_NULL,
744 (vm_object_offset_t) 0, FALSE,
745 VM_PROT_DEFAULT, VM_PROT_ALL,
746 VM_INHERIT_DEFAULT);
747 }
748
749 /*
750 * Account for kernel memory (text, data, bss, vm shenanigans).
751 * This may include inaccessible "holes" as determined by what
752 * the machine-dependent init code includes in max_mem.
753 */
754 vm_page_wire_count = (atop_64(max_mem) - (vm_page_free_count
755 + vm_page_active_count
756 + vm_page_inactive_count));
757 }
758
759
760 /*
761 * Routine: copyinmap
762 * Purpose:
763 * Like copyin, except that fromaddr is an address
764 * in the specified VM map. This implementation
765 * is incomplete; it handles the current user map
766 * and the kernel map/submaps.
767 */
768 kern_return_t
769 copyinmap(
770 vm_map_t map,
771 vm_map_offset_t fromaddr,
772 void *todata,
773 vm_size_t length)
774 {
775 kern_return_t kr = KERN_SUCCESS;
776 vm_map_t oldmap;
777
778 if (vm_map_pmap(map) == pmap_kernel())
779 {
780 /* assume a correct copy */
781 memcpy(todata, CAST_DOWN(void *, fromaddr), length);
782 }
783 else if (current_map() == map)
784 {
785 if (copyin(fromaddr, todata, length) != 0)
786 kr = KERN_INVALID_ADDRESS;
787 }
788 else
789 {
790 vm_map_reference(map);
791 oldmap = vm_map_switch(map);
792 if (copyin(fromaddr, todata, length) != 0)
793 kr = KERN_INVALID_ADDRESS;
794 vm_map_switch(oldmap);
795 vm_map_deallocate(map);
796 }
797 return kr;
798 }
799
800 /*
801 * Routine: copyoutmap
802 * Purpose:
803 * Like copyout, except that toaddr is an address
804 * in the specified VM map. This implementation
805 * is incomplete; it handles the current user map
806 * and the kernel map/submaps.
807 */
808 kern_return_t
809 copyoutmap(
810 vm_map_t map,
811 void *fromdata,
812 vm_map_address_t toaddr,
813 vm_size_t length)
814 {
815 if (vm_map_pmap(map) == pmap_kernel()) {
816 /* assume a correct copy */
817 memcpy(CAST_DOWN(void *, toaddr), fromdata, length);
818 return KERN_SUCCESS;
819 }
820
821 if (current_map() != map)
822 return KERN_NOT_SUPPORTED;
823
824 if (copyout(fromdata, toaddr, length) != 0)
825 return KERN_INVALID_ADDRESS;
826
827 return KERN_SUCCESS;
828 }
829
830
831 kern_return_t
832 vm_conflict_check(
833 vm_map_t map,
834 vm_map_offset_t off,
835 vm_map_size_t len,
836 memory_object_t pager,
837 vm_object_offset_t file_off)
838 {
839 vm_map_entry_t entry;
840 vm_object_t obj;
841 vm_object_offset_t obj_off;
842 vm_map_t base_map;
843 vm_map_offset_t base_offset;
844 vm_map_offset_t original_offset;
845 kern_return_t kr;
846 vm_map_size_t local_len;
847
848 base_map = map;
849 base_offset = off;
850 original_offset = off;
851 kr = KERN_SUCCESS;
852 vm_map_lock(map);
853 while(vm_map_lookup_entry(map, off, &entry)) {
854 local_len = len;
855
856 if (entry->object.vm_object == VM_OBJECT_NULL) {
857 vm_map_unlock(map);
858 return KERN_SUCCESS;
859 }
860 if (entry->is_sub_map) {
861 vm_map_t old_map;
862
863 old_map = map;
864 vm_map_lock(entry->object.sub_map);
865 map = entry->object.sub_map;
866 off = entry->offset + (off - entry->vme_start);
867 vm_map_unlock(old_map);
868 continue;
869 }
870 obj = entry->object.vm_object;
871 obj_off = (off - entry->vme_start) + entry->offset;
872 while(obj->shadow) {
873 obj_off += obj->shadow_offset;
874 obj = obj->shadow;
875 }
876 if((obj->pager_created) && (obj->pager == pager)) {
877 if(((obj->paging_offset) + obj_off) == file_off) {
878 if(off != base_offset) {
879 vm_map_unlock(map);
880 return KERN_FAILURE;
881 }
882 kr = KERN_ALREADY_WAITING;
883 } else {
884 vm_object_offset_t obj_off_aligned;
885 vm_object_offset_t file_off_aligned;
886
887 obj_off_aligned = obj_off & ~PAGE_MASK;
888 file_off_aligned = file_off & ~PAGE_MASK;
889
890 if (file_off_aligned == (obj->paging_offset + obj_off_aligned)) {
891 /*
892 * the target map and the file offset start in the same page
893 * but are not identical...
894 */
895 vm_map_unlock(map);
896 return KERN_FAILURE;
897 }
898 if ((file_off < (obj->paging_offset + obj_off_aligned)) &&
899 ((file_off + len) > (obj->paging_offset + obj_off_aligned))) {
900 /*
901 * some portion of the tail of the I/O will fall
902 * within the encompass of the target map
903 */
904 vm_map_unlock(map);
905 return KERN_FAILURE;
906 }
907 if ((file_off_aligned > (obj->paging_offset + obj_off)) &&
908 (file_off_aligned < (obj->paging_offset + obj_off) + len)) {
909 /*
910 * the beginning page of the file offset falls within
911 * the target map's encompass
912 */
913 vm_map_unlock(map);
914 return KERN_FAILURE;
915 }
916 }
917 } else if(kr != KERN_SUCCESS) {
918 vm_map_unlock(map);
919 return KERN_FAILURE;
920 }
921
922 if(len <= ((entry->vme_end - entry->vme_start) -
923 (off - entry->vme_start))) {
924 vm_map_unlock(map);
925 return kr;
926 } else {
927 len -= (entry->vme_end - entry->vme_start) -
928 (off - entry->vme_start);
929 }
930 base_offset = base_offset + (local_len - len);
931 file_off = file_off + (local_len - len);
932 off = base_offset;
933 if(map != base_map) {
934 vm_map_unlock(map);
935 vm_map_lock(base_map);
936 map = base_map;
937 }
938 }
939
940 vm_map_unlock(map);
941 return kr;
942 }