]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_kern.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / osfmk / vm / vm_kern.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: vm/vm_kern.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
55 * Date: 1985
56 *
57 * Kernel memory management.
58 */
59
60 #include <mach/kern_return.h>
61 #include <mach/vm_param.h>
62 #include <kern/assert.h>
63 #include <kern/lock.h>
64 #include <kern/thread.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_map.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_page.h>
69 #include <vm/vm_pageout.h>
70 #include <kern/misc_protos.h>
71 #include <vm/cpm.h>
72
73 #include <string.h>
74 /*
75 * Variables exported by this module.
76 */
77
78 vm_map_t kernel_map;
79 vm_map_t kernel_pageable_map;
80
81 /*
82 * Forward declarations for internal functions.
83 */
84 extern kern_return_t kmem_alloc_pages(
85 register vm_object_t object,
86 register vm_object_offset_t offset,
87 register vm_object_size_t size);
88
89 extern void kmem_remap_pages(
90 register vm_object_t object,
91 register vm_object_offset_t offset,
92 register vm_offset_t start,
93 register vm_offset_t end,
94 vm_prot_t protection);
95
96 kern_return_t
97 kmem_alloc_contig(
98 vm_map_t map,
99 vm_offset_t *addrp,
100 vm_size_t size,
101 vm_offset_t mask,
102 int flags)
103 {
104 vm_object_t object;
105 vm_object_offset_t offset;
106 vm_map_offset_t map_addr;
107 vm_map_offset_t map_mask;
108 vm_map_size_t map_size, i;
109 vm_map_entry_t entry;
110 vm_page_t m, pages;
111 kern_return_t kr;
112
113 if (map == VM_MAP_NULL || (flags && (flags ^ KMA_KOBJECT)))
114 return KERN_INVALID_ARGUMENT;
115
116 if (size == 0) {
117 *addrp = 0;
118 return KERN_INVALID_ARGUMENT;
119 }
120
121 map_size = vm_map_round_page(size);
122 map_mask = (vm_map_offset_t)mask;
123
124 /*
125 * Allocate a new object (if necessary) and the reference we
126 * will be donating to the map entry. We must do this before
127 * locking the map, or risk deadlock with the default pager.
128 */
129 if ((flags & KMA_KOBJECT) != 0) {
130 object = kernel_object;
131 vm_object_reference(object);
132 } else {
133 object = vm_object_allocate(map_size);
134 }
135
136 kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0, &entry);
137 if (KERN_SUCCESS != kr) {
138 vm_object_deallocate(object);
139 return kr;
140 }
141
142 entry->object.vm_object = object;
143 entry->offset = offset = (object == kernel_object) ?
144 map_addr - VM_MIN_KERNEL_ADDRESS : 0;
145
146 /* Take an extra object ref in case the map entry gets deleted */
147 vm_object_reference(object);
148 vm_map_unlock(map);
149
150 kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, FALSE);
151
152 if (kr != KERN_SUCCESS) {
153 vm_map_remove(map, vm_map_trunc_page(map_addr),
154 vm_map_round_page(map_addr + map_size), 0);
155 vm_object_deallocate(object);
156 *addrp = 0;
157 return kr;
158 }
159
160 vm_object_lock(object);
161 for (i = 0; i < map_size; i += PAGE_SIZE) {
162 m = pages;
163 pages = NEXT_PAGE(m);
164 *(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL;
165 m->busy = FALSE;
166 vm_page_insert(m, object, offset + i);
167 }
168 vm_object_unlock(object);
169
170 if ((kr = vm_map_wire(map, vm_map_trunc_page(map_addr),
171 vm_map_round_page(map_addr + map_size), VM_PROT_DEFAULT, FALSE))
172 != KERN_SUCCESS) {
173 if (object == kernel_object) {
174 vm_object_lock(object);
175 vm_object_page_remove(object, offset, offset + map_size);
176 vm_object_unlock(object);
177 }
178 vm_map_remove(map, vm_map_trunc_page(map_addr),
179 vm_map_round_page(map_addr + map_size), 0);
180 vm_object_deallocate(object);
181 return kr;
182 }
183 vm_object_deallocate(object);
184
185 if (object == kernel_object)
186 vm_map_simplify(map, map_addr);
187
188 *addrp = map_addr;
189 return KERN_SUCCESS;
190 }
191
192 /*
193 * Master entry point for allocating kernel memory.
194 * NOTE: this routine is _never_ interrupt safe.
195 *
196 * map : map to allocate into
197 * addrp : pointer to start address of new memory
198 * size : size of memory requested
199 * flags : options
200 * KMA_HERE *addrp is base address, else "anywhere"
201 * KMA_NOPAGEWAIT don't wait for pages if unavailable
202 * KMA_KOBJECT use kernel_object
203 * KMA_LOMEM support for 32 bit devices in a 64 bit world
204 * if set and a lomemory pool is available
205 * grab pages from it... this also implies
206 * KMA_NOPAGEWAIT
207 */
208
209 kern_return_t
210 kernel_memory_allocate(
211 register vm_map_t map,
212 register vm_offset_t *addrp,
213 register vm_size_t size,
214 register vm_offset_t mask,
215 int flags)
216 {
217 vm_object_t object;
218 vm_object_offset_t offset;
219 vm_map_entry_t entry;
220 vm_map_offset_t map_addr;
221 vm_map_offset_t map_mask;
222 vm_map_size_t map_size;
223 vm_map_size_t i;
224 kern_return_t kr;
225
226 if (size == 0) {
227 *addrp = 0;
228 return KERN_INVALID_ARGUMENT;
229 }
230 if (flags & KMA_LOMEM) {
231 if ( !(flags & KMA_NOPAGEWAIT) ) {
232 *addrp = 0;
233 return KERN_INVALID_ARGUMENT;
234 }
235 }
236
237 map_size = vm_map_round_page(size);
238 map_mask = (vm_map_offset_t) mask;
239
240 /*
241 * Allocate a new object (if necessary). We must do this before
242 * locking the map, or risk deadlock with the default pager.
243 */
244 if ((flags & KMA_KOBJECT) != 0) {
245 object = kernel_object;
246 vm_object_reference(object);
247 } else {
248 object = vm_object_allocate(map_size);
249 }
250
251 kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0, &entry);
252 if (KERN_SUCCESS != kr) {
253 vm_object_deallocate(object);
254 return kr;
255 }
256 entry->object.vm_object = object;
257 entry->offset = offset = (object == kernel_object) ?
258 map_addr - VM_MIN_KERNEL_ADDRESS : 0;
259
260 vm_object_reference(object);
261 vm_map_unlock(map);
262
263 vm_object_lock(object);
264 for (i = 0; i < map_size; i += PAGE_SIZE) {
265 vm_page_t mem;
266
267 for (;;) {
268 if (flags & KMA_LOMEM)
269 mem = vm_page_alloclo(object, offset + i);
270 else
271 mem = vm_page_alloc(object, offset + i);
272
273 if (mem != VM_PAGE_NULL)
274 break;
275
276 if (flags & KMA_NOPAGEWAIT) {
277 if (object == kernel_object)
278 vm_object_page_remove(object, offset, offset + i);
279 vm_object_unlock(object);
280 vm_map_remove(map, map_addr, map_addr + map_size, 0);
281 vm_object_deallocate(object);
282 return KERN_RESOURCE_SHORTAGE;
283 }
284 vm_object_unlock(object);
285 VM_PAGE_WAIT();
286 vm_object_lock(object);
287 }
288 mem->busy = FALSE;
289 }
290 vm_object_unlock(object);
291
292 if ((kr = vm_map_wire(map, map_addr, map_addr + map_size, VM_PROT_DEFAULT, FALSE))
293 != KERN_SUCCESS) {
294 if (object == kernel_object) {
295 vm_object_lock(object);
296 vm_object_page_remove(object, offset, offset + map_size);
297 vm_object_unlock(object);
298 }
299 vm_map_remove(map, map_addr, map_addr + map_size, 0);
300 vm_object_deallocate(object);
301 return (kr);
302 }
303 /* now that the page is wired, we no longer have to fear coalesce */
304 vm_object_deallocate(object);
305 if (object == kernel_object)
306 vm_map_simplify(map, map_addr);
307
308 /*
309 * Return the memory, not zeroed.
310 */
311 *addrp = CAST_DOWN(vm_offset_t, map_addr);
312 return KERN_SUCCESS;
313 }
314
315 /*
316 * kmem_alloc:
317 *
318 * Allocate wired-down memory in the kernel's address map
319 * or a submap. The memory is not zero-filled.
320 */
321
322 kern_return_t
323 kmem_alloc(
324 vm_map_t map,
325 vm_offset_t *addrp,
326 vm_size_t size)
327 {
328 return kernel_memory_allocate(map, addrp, size, 0, 0);
329 }
330
331 /*
332 * kmem_realloc:
333 *
334 * Reallocate wired-down memory in the kernel's address map
335 * or a submap. Newly allocated pages are not zeroed.
336 * This can only be used on regions allocated with kmem_alloc.
337 *
338 * If successful, the pages in the old region are mapped twice.
339 * The old region is unchanged. Use kmem_free to get rid of it.
340 */
341 kern_return_t
342 kmem_realloc(
343 vm_map_t map,
344 vm_offset_t oldaddr,
345 vm_size_t oldsize,
346 vm_offset_t *newaddrp,
347 vm_size_t newsize)
348 {
349 vm_object_t object;
350 vm_object_offset_t offset;
351 vm_map_offset_t oldmapmin;
352 vm_map_offset_t oldmapmax;
353 vm_map_offset_t newmapaddr;
354 vm_map_size_t oldmapsize;
355 vm_map_size_t newmapsize;
356 vm_map_entry_t oldentry;
357 vm_map_entry_t newentry;
358 vm_page_t mem;
359 kern_return_t kr;
360
361 oldmapmin = vm_map_trunc_page(oldaddr);
362 oldmapmax = vm_map_round_page(oldaddr + oldsize);
363 oldmapsize = oldmapmax - oldmapmin;
364 newmapsize = vm_map_round_page(newsize);
365
366
367 /*
368 * Find the VM object backing the old region.
369 */
370
371 vm_map_lock(map);
372
373 if (!vm_map_lookup_entry(map, oldmapmin, &oldentry))
374 panic("kmem_realloc");
375 object = oldentry->object.vm_object;
376
377 /*
378 * Increase the size of the object and
379 * fill in the new region.
380 */
381
382 vm_object_reference(object);
383 /* by grabbing the object lock before unlocking the map */
384 /* we guarantee that we will panic if more than one */
385 /* attempt is made to realloc a kmem_alloc'd area */
386 vm_object_lock(object);
387 vm_map_unlock(map);
388 if (object->size != oldmapsize)
389 panic("kmem_realloc");
390 object->size = newmapsize;
391 vm_object_unlock(object);
392
393 /* allocate the new pages while expanded portion of the */
394 /* object is still not mapped */
395 kmem_alloc_pages(object, vm_object_round_page(oldmapsize),
396 vm_object_round_page(newmapsize-oldmapsize));
397
398 /*
399 * Find space for the new region.
400 */
401
402 kr = vm_map_find_space(map, &newmapaddr, newmapsize,
403 (vm_map_offset_t) 0, 0, &newentry);
404 if (kr != KERN_SUCCESS) {
405 vm_object_lock(object);
406 for(offset = oldmapsize;
407 offset < newmapsize; offset += PAGE_SIZE) {
408 if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
409 vm_page_lock_queues();
410 vm_page_free(mem);
411 vm_page_unlock_queues();
412 }
413 }
414 object->size = oldmapsize;
415 vm_object_unlock(object);
416 vm_object_deallocate(object);
417 return kr;
418 }
419 newentry->object.vm_object = object;
420 newentry->offset = 0;
421 assert (newentry->wired_count == 0);
422
423
424 /* add an extra reference in case we have someone doing an */
425 /* unexpected deallocate */
426 vm_object_reference(object);
427 vm_map_unlock(map);
428
429 kr = vm_map_wire(map, newmapaddr, newmapaddr + newmapsize, VM_PROT_DEFAULT, FALSE);
430 if (KERN_SUCCESS != kr) {
431 vm_map_remove(map, newmapaddr, newmapaddr + newmapsize, 0);
432 vm_object_lock(object);
433 for(offset = oldsize; offset < newmapsize; offset += PAGE_SIZE) {
434 if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
435 vm_page_lock_queues();
436 vm_page_free(mem);
437 vm_page_unlock_queues();
438 }
439 }
440 object->size = oldmapsize;
441 vm_object_unlock(object);
442 vm_object_deallocate(object);
443 return (kr);
444 }
445 vm_object_deallocate(object);
446
447 *newaddrp = CAST_DOWN(vm_offset_t, newmapaddr);
448 return KERN_SUCCESS;
449 }
450
451 /*
452 * kmem_alloc_wired:
453 *
454 * Allocate wired-down memory in the kernel's address map
455 * or a submap. The memory is not zero-filled.
456 *
457 * The memory is allocated in the kernel_object.
458 * It may not be copied with vm_map_copy, and
459 * it may not be reallocated with kmem_realloc.
460 */
461
462 kern_return_t
463 kmem_alloc_wired(
464 vm_map_t map,
465 vm_offset_t *addrp,
466 vm_size_t size)
467 {
468 return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT);
469 }
470
471 /*
472 * kmem_alloc_aligned:
473 *
474 * Like kmem_alloc_wired, except that the memory is aligned.
475 * The size should be a power-of-2.
476 */
477
478 kern_return_t
479 kmem_alloc_aligned(
480 vm_map_t map,
481 vm_offset_t *addrp,
482 vm_size_t size)
483 {
484 if ((size & (size - 1)) != 0)
485 panic("kmem_alloc_aligned: size not aligned");
486 return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT);
487 }
488
489 /*
490 * kmem_alloc_pageable:
491 *
492 * Allocate pageable memory in the kernel's address map.
493 */
494
495 kern_return_t
496 kmem_alloc_pageable(
497 vm_map_t map,
498 vm_offset_t *addrp,
499 vm_size_t size)
500 {
501 vm_map_offset_t map_addr;
502 vm_map_size_t map_size;
503 kern_return_t kr;
504
505 #ifndef normal
506 map_addr = (vm_map_min(map)) + 0x1000;
507 #else
508 map_addr = vm_map_min(map);
509 #endif
510 map_size = vm_map_round_page(size);
511
512 kr = vm_map_enter(map, &map_addr, map_size,
513 (vm_map_offset_t) 0, VM_FLAGS_ANYWHERE,
514 VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE,
515 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
516
517 if (kr != KERN_SUCCESS)
518 return kr;
519
520 *addrp = CAST_DOWN(vm_offset_t, map_addr);
521 return KERN_SUCCESS;
522 }
523
524 /*
525 * kmem_free:
526 *
527 * Release a region of kernel virtual memory allocated
528 * with kmem_alloc, kmem_alloc_wired, or kmem_alloc_pageable,
529 * and return the physical pages associated with that region.
530 */
531
532 void
533 kmem_free(
534 vm_map_t map,
535 vm_offset_t addr,
536 vm_size_t size)
537 {
538 kern_return_t kr;
539
540 kr = vm_map_remove(map, vm_map_trunc_page(addr),
541 vm_map_round_page(addr + size),
542 VM_MAP_REMOVE_KUNWIRE);
543 if (kr != KERN_SUCCESS)
544 panic("kmem_free");
545 }
546
547 /*
548 * Allocate new pages in an object.
549 */
550
551 kern_return_t
552 kmem_alloc_pages(
553 register vm_object_t object,
554 register vm_object_offset_t offset,
555 register vm_object_size_t size)
556 {
557 vm_object_size_t alloc_size;
558
559 alloc_size = vm_object_round_page(size);
560 vm_object_lock(object);
561 while (alloc_size) {
562 register vm_page_t mem;
563
564
565 /*
566 * Allocate a page
567 */
568 while (VM_PAGE_NULL ==
569 (mem = vm_page_alloc(object, offset))) {
570 vm_object_unlock(object);
571 VM_PAGE_WAIT();
572 vm_object_lock(object);
573 }
574 mem->busy = FALSE;
575
576 alloc_size -= PAGE_SIZE;
577 offset += PAGE_SIZE;
578 }
579 vm_object_unlock(object);
580 return KERN_SUCCESS;
581 }
582
583 /*
584 * Remap wired pages in an object into a new region.
585 * The object is assumed to be mapped into the kernel map or
586 * a submap.
587 */
588 void
589 kmem_remap_pages(
590 register vm_object_t object,
591 register vm_object_offset_t offset,
592 register vm_offset_t start,
593 register vm_offset_t end,
594 vm_prot_t protection)
595 {
596
597 vm_map_offset_t map_start;
598 vm_map_offset_t map_end;
599
600 /*
601 * Mark the pmap region as not pageable.
602 */
603 map_start = vm_map_trunc_page(start);
604 map_end = vm_map_round_page(end);
605
606 pmap_pageable(kernel_pmap, map_start, map_end, FALSE);
607
608 while (map_start < map_end) {
609 register vm_page_t mem;
610
611 vm_object_lock(object);
612
613 /*
614 * Find a page
615 */
616 if ((mem = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
617 panic("kmem_remap_pages");
618
619 /*
620 * Wire it down (again)
621 */
622 vm_page_lock_queues();
623 vm_page_wire(mem);
624 vm_page_unlock_queues();
625 vm_object_unlock(object);
626
627 /*
628 * ENCRYPTED SWAP:
629 * The page is supposed to be wired now, so it
630 * shouldn't be encrypted at this point. It can
631 * safely be entered in the page table.
632 */
633 ASSERT_PAGE_DECRYPTED(mem);
634
635 /*
636 * Enter it in the kernel pmap. The page isn't busy,
637 * but this shouldn't be a problem because it is wired.
638 */
639 PMAP_ENTER(kernel_pmap, map_start, mem, protection,
640 ((unsigned int)(mem->object->wimg_bits))
641 & VM_WIMG_MASK,
642 TRUE);
643
644 map_start += PAGE_SIZE;
645 offset += PAGE_SIZE;
646 }
647 }
648
649 /*
650 * kmem_suballoc:
651 *
652 * Allocates a map to manage a subrange
653 * of the kernel virtual address space.
654 *
655 * Arguments are as follows:
656 *
657 * parent Map to take range from
658 * addr Address of start of range (IN/OUT)
659 * size Size of range to find
660 * pageable Can region be paged
661 * anywhere Can region be located anywhere in map
662 * new_map Pointer to new submap
663 */
664 kern_return_t
665 kmem_suballoc(
666 vm_map_t parent,
667 vm_offset_t *addr,
668 vm_size_t size,
669 boolean_t pageable,
670 int flags,
671 vm_map_t *new_map)
672 {
673 vm_map_t map;
674 vm_map_offset_t map_addr;
675 vm_map_size_t map_size;
676 kern_return_t kr;
677
678 map_size = vm_map_round_page(size);
679
680 /*
681 * Need reference on submap object because it is internal
682 * to the vm_system. vm_object_enter will never be called
683 * on it (usual source of reference for vm_map_enter).
684 */
685 vm_object_reference(vm_submap_object);
686
687 map_addr = (flags & VM_FLAGS_ANYWHERE) ?
688 vm_map_min(parent) : vm_map_trunc_page(*addr);
689
690 kr = vm_map_enter(parent, &map_addr, map_size,
691 (vm_map_offset_t) 0, flags,
692 vm_submap_object, (vm_object_offset_t) 0, FALSE,
693 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
694 if (kr != KERN_SUCCESS) {
695 vm_object_deallocate(vm_submap_object);
696 return (kr);
697 }
698
699 pmap_reference(vm_map_pmap(parent));
700 map = vm_map_create(vm_map_pmap(parent), map_addr, map_addr + map_size, pageable);
701 if (map == VM_MAP_NULL)
702 panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */
703
704 kr = vm_map_submap(parent, map_addr, map_addr + map_size, map, map_addr, FALSE);
705 if (kr != KERN_SUCCESS) {
706 /*
707 * See comment preceding vm_map_submap().
708 */
709 vm_map_remove(parent, map_addr, map_addr + map_size, VM_MAP_NO_FLAGS);
710 vm_map_deallocate(map); /* also removes ref to pmap */
711 vm_object_deallocate(vm_submap_object);
712 return (kr);
713 }
714 *addr = CAST_DOWN(vm_offset_t, map_addr);
715 *new_map = map;
716 return (KERN_SUCCESS);
717 }
718
719 /*
720 * kmem_init:
721 *
722 * Initialize the kernel's virtual memory map, taking
723 * into account all memory allocated up to this time.
724 */
725 void
726 kmem_init(
727 vm_offset_t start,
728 vm_offset_t end)
729 {
730 vm_map_offset_t map_start;
731 vm_map_offset_t map_end;
732
733 map_start = vm_map_trunc_page(start);
734 map_end = vm_map_round_page(end);
735
736 kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_ADDRESS,
737 map_end, FALSE);
738
739 /*
740 * Reserve virtual memory allocated up to this time.
741 */
742
743 if (start != VM_MIN_KERNEL_ADDRESS) {
744 vm_map_offset_t map_addr;
745
746 map_addr = VM_MIN_KERNEL_ADDRESS;
747 (void) vm_map_enter(kernel_map,
748 &map_addr,
749 (vm_map_size_t)(map_start - VM_MIN_KERNEL_ADDRESS),
750 (vm_map_offset_t) 0,
751 VM_FLAGS_ANYWHERE | VM_FLAGS_NO_PMAP_CHECK,
752 VM_OBJECT_NULL,
753 (vm_object_offset_t) 0, FALSE,
754 VM_PROT_DEFAULT, VM_PROT_ALL,
755 VM_INHERIT_DEFAULT);
756 }
757
758 /*
759 * Account for kernel memory (text, data, bss, vm shenanigans).
760 * This may include inaccessible "holes" as determined by what
761 * the machine-dependent init code includes in max_mem.
762 */
763 vm_page_wire_count = (atop_64(max_mem) - (vm_page_free_count
764 + vm_page_active_count
765 + vm_page_inactive_count));
766 }
767
768
769 /*
770 * Routine: copyinmap
771 * Purpose:
772 * Like copyin, except that fromaddr is an address
773 * in the specified VM map. This implementation
774 * is incomplete; it handles the current user map
775 * and the kernel map/submaps.
776 */
777 kern_return_t
778 copyinmap(
779 vm_map_t map,
780 vm_map_offset_t fromaddr,
781 void *todata,
782 vm_size_t length)
783 {
784 kern_return_t kr = KERN_SUCCESS;
785 vm_map_t oldmap;
786
787 if (vm_map_pmap(map) == pmap_kernel())
788 {
789 /* assume a correct copy */
790 memcpy(todata, CAST_DOWN(void *, fromaddr), length);
791 }
792 else if (current_map() == map)
793 {
794 if (copyin(fromaddr, todata, length) != 0)
795 kr = KERN_INVALID_ADDRESS;
796 }
797 else
798 {
799 vm_map_reference(map);
800 oldmap = vm_map_switch(map);
801 if (copyin(fromaddr, todata, length) != 0)
802 kr = KERN_INVALID_ADDRESS;
803 vm_map_switch(oldmap);
804 vm_map_deallocate(map);
805 }
806 return kr;
807 }
808
809 /*
810 * Routine: copyoutmap
811 * Purpose:
812 * Like copyout, except that toaddr is an address
813 * in the specified VM map. This implementation
814 * is incomplete; it handles the current user map
815 * and the kernel map/submaps.
816 */
817 kern_return_t
818 copyoutmap(
819 vm_map_t map,
820 void *fromdata,
821 vm_map_address_t toaddr,
822 vm_size_t length)
823 {
824 if (vm_map_pmap(map) == pmap_kernel()) {
825 /* assume a correct copy */
826 memcpy(CAST_DOWN(void *, toaddr), fromdata, length);
827 return KERN_SUCCESS;
828 }
829
830 if (current_map() != map)
831 return KERN_NOT_SUPPORTED;
832
833 if (copyout(fromdata, toaddr, length) != 0)
834 return KERN_INVALID_ADDRESS;
835
836 return KERN_SUCCESS;
837 }
838
839
840 kern_return_t
841 vm_conflict_check(
842 vm_map_t map,
843 vm_map_offset_t off,
844 vm_map_size_t len,
845 memory_object_t pager,
846 vm_object_offset_t file_off)
847 {
848 vm_map_entry_t entry;
849 vm_object_t obj;
850 vm_object_offset_t obj_off;
851 vm_map_t base_map;
852 vm_map_offset_t base_offset;
853 vm_map_offset_t original_offset;
854 kern_return_t kr;
855 vm_map_size_t local_len;
856
857 base_map = map;
858 base_offset = off;
859 original_offset = off;
860 kr = KERN_SUCCESS;
861 vm_map_lock(map);
862 while(vm_map_lookup_entry(map, off, &entry)) {
863 local_len = len;
864
865 if (entry->object.vm_object == VM_OBJECT_NULL) {
866 vm_map_unlock(map);
867 return KERN_SUCCESS;
868 }
869 if (entry->is_sub_map) {
870 vm_map_t old_map;
871
872 old_map = map;
873 vm_map_lock(entry->object.sub_map);
874 map = entry->object.sub_map;
875 off = entry->offset + (off - entry->vme_start);
876 vm_map_unlock(old_map);
877 continue;
878 }
879 obj = entry->object.vm_object;
880 obj_off = (off - entry->vme_start) + entry->offset;
881 while(obj->shadow) {
882 obj_off += obj->shadow_offset;
883 obj = obj->shadow;
884 }
885 if((obj->pager_created) && (obj->pager == pager)) {
886 if(((obj->paging_offset) + obj_off) == file_off) {
887 if(off != base_offset) {
888 vm_map_unlock(map);
889 return KERN_FAILURE;
890 }
891 kr = KERN_ALREADY_WAITING;
892 } else {
893 vm_object_offset_t obj_off_aligned;
894 vm_object_offset_t file_off_aligned;
895
896 obj_off_aligned = obj_off & ~PAGE_MASK;
897 file_off_aligned = file_off & ~PAGE_MASK;
898
899 if (file_off_aligned == (obj->paging_offset + obj_off_aligned)) {
900 /*
901 * the target map and the file offset start in the same page
902 * but are not identical...
903 */
904 vm_map_unlock(map);
905 return KERN_FAILURE;
906 }
907 if ((file_off < (obj->paging_offset + obj_off_aligned)) &&
908 ((file_off + len) > (obj->paging_offset + obj_off_aligned))) {
909 /*
910 * some portion of the tail of the I/O will fall
911 * within the encompass of the target map
912 */
913 vm_map_unlock(map);
914 return KERN_FAILURE;
915 }
916 if ((file_off_aligned > (obj->paging_offset + obj_off)) &&
917 (file_off_aligned < (obj->paging_offset + obj_off) + len)) {
918 /*
919 * the beginning page of the file offset falls within
920 * the target map's encompass
921 */
922 vm_map_unlock(map);
923 return KERN_FAILURE;
924 }
925 }
926 } else if(kr != KERN_SUCCESS) {
927 vm_map_unlock(map);
928 return KERN_FAILURE;
929 }
930
931 if(len <= ((entry->vme_end - entry->vme_start) -
932 (off - entry->vme_start))) {
933 vm_map_unlock(map);
934 return kr;
935 } else {
936 len -= (entry->vme_end - entry->vme_start) -
937 (off - entry->vme_start);
938 }
939 base_offset = base_offset + (local_len - len);
940 file_off = file_off + (local_len - len);
941 off = base_offset;
942 if(map != base_map) {
943 vm_map_unlock(map);
944 vm_map_lock(base_map);
945 map = base_map;
946 }
947 }
948
949 vm_map_unlock(map);
950 return kr;
951 }