]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_kern.c
xnu-792.2.4.tar.gz
[apple/xnu.git] / osfmk / vm / vm_kern.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: vm/vm_kern.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
55 * Date: 1985
56 *
57 * Kernel memory management.
58 */
59
60 #include <mach/kern_return.h>
61 #include <mach/vm_param.h>
62 #include <kern/assert.h>
63 #include <kern/lock.h>
64 #include <kern/thread.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_map.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_page.h>
69 #include <vm/vm_pageout.h>
70 #include <kern/misc_protos.h>
71 #include <vm/cpm.h>
72
73 #include <string.h>
74 /*
75 * Variables exported by this module.
76 */
77
78 vm_map_t kernel_map;
79 vm_map_t kernel_pageable_map;
80
81 /*
82 * Forward declarations for internal functions.
83 */
84 extern kern_return_t kmem_alloc_pages(
85 register vm_object_t object,
86 register vm_object_offset_t offset,
87 register vm_object_size_t size);
88
89 extern void kmem_remap_pages(
90 register vm_object_t object,
91 register vm_object_offset_t offset,
92 register vm_offset_t start,
93 register vm_offset_t end,
94 vm_prot_t protection);
95
96 kern_return_t
97 kmem_alloc_contig(
98 vm_map_t map,
99 vm_offset_t *addrp,
100 vm_size_t size,
101 vm_offset_t mask,
102 int flags)
103 {
104 vm_object_t object;
105 vm_object_offset_t offset;
106 vm_map_offset_t map_addr;
107 vm_map_offset_t map_mask;
108 vm_map_size_t map_size, i;
109 vm_map_entry_t entry;
110 vm_page_t m, pages;
111 kern_return_t kr;
112
113 if (map == VM_MAP_NULL || (flags && (flags ^ KMA_KOBJECT)))
114 return KERN_INVALID_ARGUMENT;
115
116 if (size == 0) {
117 *addrp = 0;
118 return KERN_INVALID_ARGUMENT;
119 }
120
121 map_size = vm_map_round_page(size);
122 map_mask = (vm_map_offset_t)mask;
123
124 /*
125 * Allocate a new object (if necessary) and the reference we
126 * will be donating to the map entry. We must do this before
127 * locking the map, or risk deadlock with the default pager.
128 */
129 if ((flags & KMA_KOBJECT) != 0) {
130 object = kernel_object;
131 vm_object_reference(object);
132 } else {
133 object = vm_object_allocate(map_size);
134 }
135
136 kr = vm_map_find_space(map, &map_addr, map_size, map_mask, &entry);
137 if (KERN_SUCCESS != kr) {
138 vm_object_deallocate(object);
139 return kr;
140 }
141
142 entry->object.vm_object = object;
143 entry->offset = offset = (object == kernel_object) ?
144 map_addr - VM_MIN_KERNEL_ADDRESS : 0;
145
146 /* Take an extra object ref in case the map entry gets deleted */
147 vm_object_reference(object);
148 vm_map_unlock(map);
149
150 kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, FALSE);
151
152 if (kr != KERN_SUCCESS) {
153 vm_map_remove(map, vm_map_trunc_page(map_addr),
154 vm_map_round_page(map_addr + map_size), 0);
155 vm_object_deallocate(object);
156 *addrp = 0;
157 return kr;
158 }
159
160 vm_object_lock(object);
161 for (i = 0; i < map_size; i += PAGE_SIZE) {
162 m = pages;
163 pages = NEXT_PAGE(m);
164 m->busy = FALSE;
165 vm_page_insert(m, object, offset + i);
166 }
167 vm_object_unlock(object);
168
169 if ((kr = vm_map_wire(map, vm_map_trunc_page(map_addr),
170 vm_map_round_page(map_addr + map_size), VM_PROT_DEFAULT, FALSE))
171 != KERN_SUCCESS) {
172 if (object == kernel_object) {
173 vm_object_lock(object);
174 vm_object_page_remove(object, offset, offset + map_size);
175 vm_object_unlock(object);
176 }
177 vm_map_remove(map, vm_map_trunc_page(map_addr),
178 vm_map_round_page(map_addr + map_size), 0);
179 vm_object_deallocate(object);
180 return kr;
181 }
182 vm_object_deallocate(object);
183
184 if (object == kernel_object)
185 vm_map_simplify(map, map_addr);
186
187 *addrp = map_addr;
188 return KERN_SUCCESS;
189 }
190
191 /*
192 * Master entry point for allocating kernel memory.
193 * NOTE: this routine is _never_ interrupt safe.
194 *
195 * map : map to allocate into
196 * addrp : pointer to start address of new memory
197 * size : size of memory requested
198 * flags : options
199 * KMA_HERE *addrp is base address, else "anywhere"
200 * KMA_NOPAGEWAIT don't wait for pages if unavailable
201 * KMA_KOBJECT use kernel_object
202 */
203
204 kern_return_t
205 kernel_memory_allocate(
206 register vm_map_t map,
207 register vm_offset_t *addrp,
208 register vm_size_t size,
209 register vm_offset_t mask,
210 int flags)
211 {
212 vm_object_t object;
213 vm_object_offset_t offset;
214 vm_map_entry_t entry;
215 vm_map_offset_t map_addr;
216 vm_map_offset_t map_mask;
217 vm_map_size_t map_size;
218 vm_map_size_t i;
219 kern_return_t kr;
220
221 if (size == 0) {
222 *addrp = 0;
223 return KERN_INVALID_ARGUMENT;
224 }
225
226 map_size = vm_map_round_page(size);
227 map_mask = (vm_map_offset_t) mask;
228
229 /*
230 * Allocate a new object (if necessary). We must do this before
231 * locking the map, or risk deadlock with the default pager.
232 */
233 if ((flags & KMA_KOBJECT) != 0) {
234 object = kernel_object;
235 vm_object_reference(object);
236 } else {
237 object = vm_object_allocate(map_size);
238 }
239
240 kr = vm_map_find_space(map, &map_addr, map_size, map_mask, &entry);
241 if (KERN_SUCCESS != kr) {
242 vm_object_deallocate(object);
243 return kr;
244 }
245
246 entry->object.vm_object = object;
247 entry->offset = offset = (object == kernel_object) ?
248 map_addr - VM_MIN_KERNEL_ADDRESS : 0;
249
250 vm_object_reference(object);
251 vm_map_unlock(map);
252
253 vm_object_lock(object);
254 for (i = 0; i < map_size; i += PAGE_SIZE) {
255 vm_page_t mem;
256
257 while (VM_PAGE_NULL ==
258 (mem = vm_page_alloc(object, offset + i))) {
259 if (flags & KMA_NOPAGEWAIT) {
260 if (object == kernel_object)
261 vm_object_page_remove(object, offset, offset + i);
262 vm_object_unlock(object);
263 vm_map_remove(map, map_addr, map_addr + map_size, 0);
264 vm_object_deallocate(object);
265 return KERN_RESOURCE_SHORTAGE;
266 }
267 vm_object_unlock(object);
268 VM_PAGE_WAIT();
269 vm_object_lock(object);
270 }
271 mem->busy = FALSE;
272 }
273 vm_object_unlock(object);
274
275 if ((kr = vm_map_wire(map, map_addr, map_addr + map_size, VM_PROT_DEFAULT, FALSE))
276 != KERN_SUCCESS) {
277 if (object == kernel_object) {
278 vm_object_lock(object);
279 vm_object_page_remove(object, offset, offset + map_size);
280 vm_object_unlock(object);
281 }
282 vm_map_remove(map, map_addr, map_addr + map_size, 0);
283 vm_object_deallocate(object);
284 return (kr);
285 }
286 /* now that the page is wired, we no longer have to fear coalesce */
287 vm_object_deallocate(object);
288 if (object == kernel_object)
289 vm_map_simplify(map, map_addr);
290
291 /*
292 * Return the memory, not zeroed.
293 */
294 *addrp = CAST_DOWN(vm_offset_t, map_addr);
295 return KERN_SUCCESS;
296 }
297
298 /*
299 * kmem_alloc:
300 *
301 * Allocate wired-down memory in the kernel's address map
302 * or a submap. The memory is not zero-filled.
303 */
304
305 kern_return_t
306 kmem_alloc(
307 vm_map_t map,
308 vm_offset_t *addrp,
309 vm_size_t size)
310 {
311 return kernel_memory_allocate(map, addrp, size, 0, 0);
312 }
313
314 /*
315 * kmem_realloc:
316 *
317 * Reallocate wired-down memory in the kernel's address map
318 * or a submap. Newly allocated pages are not zeroed.
319 * This can only be used on regions allocated with kmem_alloc.
320 *
321 * If successful, the pages in the old region are mapped twice.
322 * The old region is unchanged. Use kmem_free to get rid of it.
323 */
324 kern_return_t
325 kmem_realloc(
326 vm_map_t map,
327 vm_offset_t oldaddr,
328 vm_size_t oldsize,
329 vm_offset_t *newaddrp,
330 vm_size_t newsize)
331 {
332 vm_object_t object;
333 vm_object_offset_t offset;
334 vm_map_offset_t oldmapmin;
335 vm_map_offset_t oldmapmax;
336 vm_map_offset_t newmapaddr;
337 vm_map_size_t oldmapsize;
338 vm_map_size_t newmapsize;
339 vm_map_entry_t oldentry;
340 vm_map_entry_t newentry;
341 vm_page_t mem;
342 kern_return_t kr;
343
344 oldmapmin = vm_map_trunc_page(oldaddr);
345 oldmapmax = vm_map_round_page(oldaddr + oldsize);
346 oldmapsize = oldmapmax - oldmapmin;
347 newmapsize = vm_map_round_page(newsize);
348
349
350 /*
351 * Find the VM object backing the old region.
352 */
353
354 vm_map_lock(map);
355
356 if (!vm_map_lookup_entry(map, oldmapmin, &oldentry))
357 panic("kmem_realloc");
358 object = oldentry->object.vm_object;
359
360 /*
361 * Increase the size of the object and
362 * fill in the new region.
363 */
364
365 vm_object_reference(object);
366 /* by grabbing the object lock before unlocking the map */
367 /* we guarantee that we will panic if more than one */
368 /* attempt is made to realloc a kmem_alloc'd area */
369 vm_object_lock(object);
370 vm_map_unlock(map);
371 if (object->size != oldmapsize)
372 panic("kmem_realloc");
373 object->size = newmapsize;
374 vm_object_unlock(object);
375
376 /* allocate the new pages while expanded portion of the */
377 /* object is still not mapped */
378 kmem_alloc_pages(object, vm_object_round_page(oldmapsize),
379 vm_object_round_page(newmapsize-oldmapsize));
380
381 /*
382 * Find space for the new region.
383 */
384
385 kr = vm_map_find_space(map, &newmapaddr, newmapsize,
386 (vm_map_offset_t) 0, &newentry);
387 if (kr != KERN_SUCCESS) {
388 vm_object_lock(object);
389 for(offset = oldmapsize;
390 offset < newmapsize; offset += PAGE_SIZE) {
391 if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
392 vm_page_lock_queues();
393 vm_page_free(mem);
394 vm_page_unlock_queues();
395 }
396 }
397 object->size = oldmapsize;
398 vm_object_unlock(object);
399 vm_object_deallocate(object);
400 return kr;
401 }
402 newentry->object.vm_object = object;
403 newentry->offset = 0;
404 assert (newentry->wired_count == 0);
405
406
407 /* add an extra reference in case we have someone doing an */
408 /* unexpected deallocate */
409 vm_object_reference(object);
410 vm_map_unlock(map);
411
412 kr = vm_map_wire(map, newmapaddr, newmapaddr + newmapsize, VM_PROT_DEFAULT, FALSE);
413 if (KERN_SUCCESS != kr) {
414 vm_map_remove(map, newmapaddr, newmapaddr + newmapsize, 0);
415 vm_object_lock(object);
416 for(offset = oldsize; offset < newmapsize; offset += PAGE_SIZE) {
417 if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
418 vm_page_lock_queues();
419 vm_page_free(mem);
420 vm_page_unlock_queues();
421 }
422 }
423 object->size = oldmapsize;
424 vm_object_unlock(object);
425 vm_object_deallocate(object);
426 return (kr);
427 }
428 vm_object_deallocate(object);
429
430 *newaddrp = CAST_DOWN(vm_offset_t, newmapaddr);
431 return KERN_SUCCESS;
432 }
433
434 /*
435 * kmem_alloc_wired:
436 *
437 * Allocate wired-down memory in the kernel's address map
438 * or a submap. The memory is not zero-filled.
439 *
440 * The memory is allocated in the kernel_object.
441 * It may not be copied with vm_map_copy, and
442 * it may not be reallocated with kmem_realloc.
443 */
444
445 kern_return_t
446 kmem_alloc_wired(
447 vm_map_t map,
448 vm_offset_t *addrp,
449 vm_size_t size)
450 {
451 return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT);
452 }
453
454 /*
455 * kmem_alloc_aligned:
456 *
457 * Like kmem_alloc_wired, except that the memory is aligned.
458 * The size should be a power-of-2.
459 */
460
461 kern_return_t
462 kmem_alloc_aligned(
463 vm_map_t map,
464 vm_offset_t *addrp,
465 vm_size_t size)
466 {
467 if ((size & (size - 1)) != 0)
468 panic("kmem_alloc_aligned: size not aligned");
469 return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT);
470 }
471
472 /*
473 * kmem_alloc_pageable:
474 *
475 * Allocate pageable memory in the kernel's address map.
476 */
477
478 kern_return_t
479 kmem_alloc_pageable(
480 vm_map_t map,
481 vm_offset_t *addrp,
482 vm_size_t size)
483 {
484 vm_map_offset_t map_addr;
485 vm_map_size_t map_size;
486 kern_return_t kr;
487
488 #ifndef normal
489 map_addr = (vm_map_min(map)) + 0x1000;
490 #else
491 map_addr = vm_map_min(map);
492 #endif
493 map_size = vm_map_round_page(size);
494
495 kr = vm_map_enter(map, &map_addr, map_size,
496 (vm_map_offset_t) 0, VM_FLAGS_ANYWHERE,
497 VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE,
498 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
499
500 if (kr != KERN_SUCCESS)
501 return kr;
502
503 *addrp = CAST_DOWN(vm_offset_t, map_addr);
504 return KERN_SUCCESS;
505 }
506
507 /*
508 * kmem_free:
509 *
510 * Release a region of kernel virtual memory allocated
511 * with kmem_alloc, kmem_alloc_wired, or kmem_alloc_pageable,
512 * and return the physical pages associated with that region.
513 */
514
515 void
516 kmem_free(
517 vm_map_t map,
518 vm_offset_t addr,
519 vm_size_t size)
520 {
521 kern_return_t kr;
522
523 kr = vm_map_remove(map, vm_map_trunc_page(addr),
524 vm_map_round_page(addr + size),
525 VM_MAP_REMOVE_KUNWIRE);
526 if (kr != KERN_SUCCESS)
527 panic("kmem_free");
528 }
529
530 /*
531 * Allocate new pages in an object.
532 */
533
534 kern_return_t
535 kmem_alloc_pages(
536 register vm_object_t object,
537 register vm_object_offset_t offset,
538 register vm_object_size_t size)
539 {
540 vm_object_size_t alloc_size;
541
542 alloc_size = vm_object_round_page(size);
543 vm_object_lock(object);
544 while (alloc_size) {
545 register vm_page_t mem;
546
547
548 /*
549 * Allocate a page
550 */
551 while (VM_PAGE_NULL ==
552 (mem = vm_page_alloc(object, offset))) {
553 vm_object_unlock(object);
554 VM_PAGE_WAIT();
555 vm_object_lock(object);
556 }
557 mem->busy = FALSE;
558
559 alloc_size -= PAGE_SIZE;
560 offset += PAGE_SIZE;
561 }
562 vm_object_unlock(object);
563 return KERN_SUCCESS;
564 }
565
566 /*
567 * Remap wired pages in an object into a new region.
568 * The object is assumed to be mapped into the kernel map or
569 * a submap.
570 */
571 void
572 kmem_remap_pages(
573 register vm_object_t object,
574 register vm_object_offset_t offset,
575 register vm_offset_t start,
576 register vm_offset_t end,
577 vm_prot_t protection)
578 {
579
580 vm_map_offset_t map_start;
581 vm_map_offset_t map_end;
582
583 /*
584 * Mark the pmap region as not pageable.
585 */
586 map_start = vm_map_trunc_page(start);
587 map_end = vm_map_round_page(end);
588
589 pmap_pageable(kernel_pmap, map_start, map_end, FALSE);
590
591 while (map_start < map_end) {
592 register vm_page_t mem;
593
594 vm_object_lock(object);
595
596 /*
597 * Find a page
598 */
599 if ((mem = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
600 panic("kmem_remap_pages");
601
602 /*
603 * Wire it down (again)
604 */
605 vm_page_lock_queues();
606 vm_page_wire(mem);
607 vm_page_unlock_queues();
608 vm_object_unlock(object);
609
610 /*
611 * ENCRYPTED SWAP:
612 * The page is supposed to be wired now, so it
613 * shouldn't be encrypted at this point. It can
614 * safely be entered in the page table.
615 */
616 ASSERT_PAGE_DECRYPTED(mem);
617
618 /*
619 * Enter it in the kernel pmap. The page isn't busy,
620 * but this shouldn't be a problem because it is wired.
621 */
622 PMAP_ENTER(kernel_pmap, map_start, mem, protection,
623 ((unsigned int)(mem->object->wimg_bits))
624 & VM_WIMG_MASK,
625 TRUE);
626
627 map_start += PAGE_SIZE;
628 offset += PAGE_SIZE;
629 }
630 }
631
632 /*
633 * kmem_suballoc:
634 *
635 * Allocates a map to manage a subrange
636 * of the kernel virtual address space.
637 *
638 * Arguments are as follows:
639 *
640 * parent Map to take range from
641 * addr Address of start of range (IN/OUT)
642 * size Size of range to find
643 * pageable Can region be paged
644 * anywhere Can region be located anywhere in map
645 * new_map Pointer to new submap
646 */
647 kern_return_t
648 kmem_suballoc(
649 vm_map_t parent,
650 vm_offset_t *addr,
651 vm_size_t size,
652 boolean_t pageable,
653 int flags,
654 vm_map_t *new_map)
655 {
656 vm_map_t map;
657 vm_map_offset_t map_addr;
658 vm_map_size_t map_size;
659 kern_return_t kr;
660
661 map_size = vm_map_round_page(size);
662
663 /*
664 * Need reference on submap object because it is internal
665 * to the vm_system. vm_object_enter will never be called
666 * on it (usual source of reference for vm_map_enter).
667 */
668 vm_object_reference(vm_submap_object);
669
670 map_addr = (flags & VM_FLAGS_ANYWHERE) ?
671 vm_map_min(parent) : vm_map_trunc_page(*addr);
672
673 kr = vm_map_enter(parent, &map_addr, map_size,
674 (vm_map_offset_t) 0, flags,
675 vm_submap_object, (vm_object_offset_t) 0, FALSE,
676 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
677 if (kr != KERN_SUCCESS) {
678 vm_object_deallocate(vm_submap_object);
679 return (kr);
680 }
681
682 pmap_reference(vm_map_pmap(parent));
683 map = vm_map_create(vm_map_pmap(parent), map_addr, map_addr + map_size, pageable);
684 if (map == VM_MAP_NULL)
685 panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */
686
687 kr = vm_map_submap(parent, map_addr, map_addr + map_size, map, map_addr, FALSE);
688 if (kr != KERN_SUCCESS) {
689 /*
690 * See comment preceding vm_map_submap().
691 */
692 vm_map_remove(parent, map_addr, map_addr + map_size, VM_MAP_NO_FLAGS);
693 vm_map_deallocate(map); /* also removes ref to pmap */
694 vm_object_deallocate(vm_submap_object);
695 return (kr);
696 }
697 *addr = CAST_DOWN(vm_offset_t, map_addr);
698 *new_map = map;
699 return (KERN_SUCCESS);
700 }
701
702 /*
703 * kmem_init:
704 *
705 * Initialize the kernel's virtual memory map, taking
706 * into account all memory allocated up to this time.
707 */
708 void
709 kmem_init(
710 vm_offset_t start,
711 vm_offset_t end)
712 {
713 vm_map_offset_t map_start;
714 vm_map_offset_t map_end;
715
716 map_start = vm_map_trunc_page(start);
717 map_end = vm_map_round_page(end);
718
719 kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_ADDRESS,
720 map_end, FALSE);
721
722 /*
723 * Reserve virtual memory allocated up to this time.
724 */
725
726 if (start != VM_MIN_KERNEL_ADDRESS) {
727 vm_map_offset_t map_addr;
728
729 map_addr = VM_MIN_KERNEL_ADDRESS;
730 (void) vm_map_enter(kernel_map,
731 &map_addr,
732 (vm_map_size_t)(map_start - VM_MIN_KERNEL_ADDRESS),
733 (vm_map_offset_t) 0,
734 VM_FLAGS_ANYWHERE | VM_FLAGS_NO_PMAP_CHECK,
735 VM_OBJECT_NULL,
736 (vm_object_offset_t) 0, FALSE,
737 VM_PROT_DEFAULT, VM_PROT_ALL,
738 VM_INHERIT_DEFAULT);
739 }
740
741 /*
742 * Account for kernel memory (text, data, bss, vm shenanigans).
743 * This may include inaccessible "holes" as determined by what
744 * the machine-dependent init code includes in max_mem.
745 */
746 vm_page_wire_count = (atop_64(max_mem) - (vm_page_free_count
747 + vm_page_active_count
748 + vm_page_inactive_count));
749 }
750
751
752 /*
753 * Routine: copyinmap
754 * Purpose:
755 * Like copyin, except that fromaddr is an address
756 * in the specified VM map. This implementation
757 * is incomplete; it handles the current user map
758 * and the kernel map/submaps.
759 */
760 kern_return_t
761 copyinmap(
762 vm_map_t map,
763 vm_map_offset_t fromaddr,
764 void *todata,
765 vm_size_t length)
766 {
767 kern_return_t kr = KERN_SUCCESS;
768 vm_map_t oldmap;
769
770 if (vm_map_pmap(map) == pmap_kernel())
771 {
772 /* assume a correct copy */
773 memcpy(todata, CAST_DOWN(void *, fromaddr), length);
774 }
775 else if (current_map() == map)
776 {
777 if (copyin(fromaddr, todata, length) != 0)
778 kr = KERN_INVALID_ADDRESS;
779 }
780 else
781 {
782 vm_map_reference(map);
783 oldmap = vm_map_switch(map);
784 if (copyin(fromaddr, todata, length) != 0)
785 kr = KERN_INVALID_ADDRESS;
786 vm_map_switch(oldmap);
787 vm_map_deallocate(map);
788 }
789 return kr;
790 }
791
792 /*
793 * Routine: copyoutmap
794 * Purpose:
795 * Like copyout, except that toaddr is an address
796 * in the specified VM map. This implementation
797 * is incomplete; it handles the current user map
798 * and the kernel map/submaps.
799 */
800 kern_return_t
801 copyoutmap(
802 vm_map_t map,
803 void *fromdata,
804 vm_map_address_t toaddr,
805 vm_size_t length)
806 {
807 if (vm_map_pmap(map) == pmap_kernel()) {
808 /* assume a correct copy */
809 memcpy(CAST_DOWN(void *, toaddr), fromdata, length);
810 return KERN_SUCCESS;
811 }
812
813 if (current_map() != map)
814 return KERN_NOT_SUPPORTED;
815
816 if (copyout(fromdata, toaddr, length) != 0)
817 return KERN_INVALID_ADDRESS;
818
819 return KERN_SUCCESS;
820 }
821
822
823 kern_return_t
824 vm_conflict_check(
825 vm_map_t map,
826 vm_map_offset_t off,
827 vm_map_size_t len,
828 memory_object_t pager,
829 vm_object_offset_t file_off)
830 {
831 vm_map_entry_t entry;
832 vm_object_t obj;
833 vm_object_offset_t obj_off;
834 vm_map_t base_map;
835 vm_map_offset_t base_offset;
836 vm_map_offset_t original_offset;
837 kern_return_t kr;
838 vm_map_size_t local_len;
839
840 base_map = map;
841 base_offset = off;
842 original_offset = off;
843 kr = KERN_SUCCESS;
844 vm_map_lock(map);
845 while(vm_map_lookup_entry(map, off, &entry)) {
846 local_len = len;
847
848 if (entry->object.vm_object == VM_OBJECT_NULL) {
849 vm_map_unlock(map);
850 return KERN_SUCCESS;
851 }
852 if (entry->is_sub_map) {
853 vm_map_t old_map;
854
855 old_map = map;
856 vm_map_lock(entry->object.sub_map);
857 map = entry->object.sub_map;
858 off = entry->offset + (off - entry->vme_start);
859 vm_map_unlock(old_map);
860 continue;
861 }
862 obj = entry->object.vm_object;
863 obj_off = (off - entry->vme_start) + entry->offset;
864 while(obj->shadow) {
865 obj_off += obj->shadow_offset;
866 obj = obj->shadow;
867 }
868 if((obj->pager_created) && (obj->pager == pager)) {
869 if(((obj->paging_offset) + obj_off) == file_off) {
870 if(off != base_offset) {
871 vm_map_unlock(map);
872 return KERN_FAILURE;
873 }
874 kr = KERN_ALREADY_WAITING;
875 } else {
876 vm_object_offset_t obj_off_aligned;
877 vm_object_offset_t file_off_aligned;
878
879 obj_off_aligned = obj_off & ~PAGE_MASK;
880 file_off_aligned = file_off & ~PAGE_MASK;
881
882 if (file_off_aligned == (obj->paging_offset + obj_off_aligned)) {
883 /*
884 * the target map and the file offset start in the same page
885 * but are not identical...
886 */
887 vm_map_unlock(map);
888 return KERN_FAILURE;
889 }
890 if ((file_off < (obj->paging_offset + obj_off_aligned)) &&
891 ((file_off + len) > (obj->paging_offset + obj_off_aligned))) {
892 /*
893 * some portion of the tail of the I/O will fall
894 * within the encompass of the target map
895 */
896 vm_map_unlock(map);
897 return KERN_FAILURE;
898 }
899 if ((file_off_aligned > (obj->paging_offset + obj_off)) &&
900 (file_off_aligned < (obj->paging_offset + obj_off) + len)) {
901 /*
902 * the beginning page of the file offset falls within
903 * the target map's encompass
904 */
905 vm_map_unlock(map);
906 return KERN_FAILURE;
907 }
908 }
909 } else if(kr != KERN_SUCCESS) {
910 vm_map_unlock(map);
911 return KERN_FAILURE;
912 }
913
914 if(len <= ((entry->vme_end - entry->vme_start) -
915 (off - entry->vme_start))) {
916 vm_map_unlock(map);
917 return kr;
918 } else {
919 len -= (entry->vme_end - entry->vme_start) -
920 (off - entry->vme_start);
921 }
922 base_offset = base_offset + (local_len - len);
923 file_off = file_off + (local_len - len);
924 off = base_offset;
925 if(map != base_map) {
926 vm_map_unlock(map);
927 vm_map_lock(base_map);
928 map = base_map;
929 }
930 }
931
932 vm_map_unlock(map);
933 return kr;
934 }