2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * Kernel memory management.
66 #include <mach/kern_return.h>
67 #include <mach/vm_param.h>
68 #include <kern/assert.h>
69 #include <kern/lock.h>
70 #include <kern/thread.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_object.h>
74 #include <vm/vm_page.h>
75 #include <vm/vm_pageout.h>
76 #include <kern/misc_protos.h>
81 #include <libkern/OSDebug.h>
82 #include <sys/kdebug.h>
85 * Variables exported by this module.
89 vm_map_t kernel_pageable_map
;
91 extern boolean_t vm_kernel_ready
;
94 * Forward declarations for internal functions.
96 extern kern_return_t
kmem_alloc_pages(
97 register vm_object_t object
,
98 register vm_object_offset_t offset
,
99 register vm_object_size_t size
);
101 extern void kmem_remap_pages(
102 register vm_object_t object
,
103 register vm_object_offset_t offset
,
104 register vm_offset_t start
,
105 register vm_offset_t end
,
106 vm_prot_t protection
);
118 vm_object_offset_t offset
;
119 vm_map_offset_t map_addr
;
120 vm_map_offset_t map_mask
;
121 vm_map_size_t map_size
, i
;
122 vm_map_entry_t entry
;
126 if (map
== VM_MAP_NULL
|| (flags
&& (flags
^ KMA_KOBJECT
)))
127 return KERN_INVALID_ARGUMENT
;
131 return KERN_INVALID_ARGUMENT
;
134 map_size
= vm_map_round_page(size
);
135 map_mask
= (vm_map_offset_t
)mask
;
138 * Allocate a new object (if necessary) and the reference we
139 * will be donating to the map entry. We must do this before
140 * locking the map, or risk deadlock with the default pager.
142 if ((flags
& KMA_KOBJECT
) != 0) {
143 object
= kernel_object
;
144 vm_object_reference(object
);
146 object
= vm_object_allocate(map_size
);
149 kr
= vm_map_find_space(map
, &map_addr
, map_size
, map_mask
, 0, &entry
);
150 if (KERN_SUCCESS
!= kr
) {
151 vm_object_deallocate(object
);
155 entry
->object
.vm_object
= object
;
156 entry
->offset
= offset
= (object
== kernel_object
) ?
157 map_addr
- VM_MIN_KERNEL_ADDRESS
: 0;
159 /* Take an extra object ref in case the map entry gets deleted */
160 vm_object_reference(object
);
163 kr
= cpm_allocate(CAST_DOWN(vm_size_t
, map_size
), &pages
, max_pnum
, FALSE
);
165 if (kr
!= KERN_SUCCESS
) {
166 vm_map_remove(map
, vm_map_trunc_page(map_addr
),
167 vm_map_round_page(map_addr
+ map_size
), 0);
168 vm_object_deallocate(object
);
173 vm_object_lock(object
);
174 for (i
= 0; i
< map_size
; i
+= PAGE_SIZE
) {
176 pages
= NEXT_PAGE(m
);
177 *(NEXT_PAGE_PTR(m
)) = VM_PAGE_NULL
;
179 vm_page_insert(m
, object
, offset
+ i
);
181 vm_object_unlock(object
);
183 if ((kr
= vm_map_wire(map
, vm_map_trunc_page(map_addr
),
184 vm_map_round_page(map_addr
+ map_size
), VM_PROT_DEFAULT
, FALSE
))
186 if (object
== kernel_object
) {
187 vm_object_lock(object
);
188 vm_object_page_remove(object
, offset
, offset
+ map_size
);
189 vm_object_unlock(object
);
191 vm_map_remove(map
, vm_map_trunc_page(map_addr
),
192 vm_map_round_page(map_addr
+ map_size
), 0);
193 vm_object_deallocate(object
);
196 vm_object_deallocate(object
);
198 if (object
== kernel_object
)
199 vm_map_simplify(map
, map_addr
);
206 * Master entry point for allocating kernel memory.
207 * NOTE: this routine is _never_ interrupt safe.
209 * map : map to allocate into
210 * addrp : pointer to start address of new memory
211 * size : size of memory requested
213 * KMA_HERE *addrp is base address, else "anywhere"
214 * KMA_NOPAGEWAIT don't wait for pages if unavailable
215 * KMA_KOBJECT use kernel_object
216 * KMA_LOMEM support for 32 bit devices in a 64 bit world
217 * if set and a lomemory pool is available
218 * grab pages from it... this also implies
223 kernel_memory_allocate(
224 register vm_map_t map
,
225 register vm_offset_t
*addrp
,
226 register vm_size_t size
,
227 register vm_offset_t mask
,
231 vm_object_offset_t offset
;
232 vm_map_entry_t entry
;
233 vm_map_offset_t map_addr
, fill_start
;
234 vm_map_offset_t map_mask
;
235 vm_map_size_t map_size
, fill_size
;
241 if (! vm_kernel_ready
) {
242 panic("kernel_memory_allocate: VM is not ready");
247 return KERN_INVALID_ARGUMENT
;
249 if (flags
& KMA_LOMEM
) {
250 if ( !(flags
& KMA_NOPAGEWAIT
) ) {
252 return KERN_INVALID_ARGUMENT
;
256 map_size
= vm_map_round_page(size
);
257 map_mask
= (vm_map_offset_t
) mask
;
263 * Guard pages are implemented as ficticious pages. By placing guard pages
264 * on either end of a stack, they can help detect cases where a thread walks
265 * off either end of its stack. They are allocated and set up here and attempts
266 * to access those pages are trapped in vm_fault_page().
268 * The map_size we were passed may include extra space for
269 * guard pages. If those were requested, then back it out of fill_size
270 * since vm_map_find_space() takes just the actual size not including
271 * guard pages. Similarly, fill_start indicates where the actual pages
272 * will begin in the range.
276 fill_size
= map_size
;
277 if (flags
& KMA_GUARD_FIRST
) {
278 vm_alloc_flags
|= VM_FLAGS_GUARD_BEFORE
;
279 fill_start
+= PAGE_SIZE_64
;
280 fill_size
-= PAGE_SIZE_64
;
281 if (map_size
< fill_start
+ fill_size
) {
282 /* no space for a guard page */
284 return KERN_INVALID_ARGUMENT
;
287 if (flags
& KMA_GUARD_LAST
) {
288 vm_alloc_flags
|= VM_FLAGS_GUARD_AFTER
;
289 fill_size
-= PAGE_SIZE_64
;
290 if (map_size
<= fill_start
+ fill_size
) {
291 /* no space for a guard page */
293 return KERN_INVALID_ARGUMENT
;
298 * Allocate a new object (if necessary). We must do this before
299 * locking the map, or risk deadlock with the default pager.
301 if ((flags
& KMA_KOBJECT
) != 0) {
302 object
= kernel_object
;
303 vm_object_reference(object
);
305 object
= vm_object_allocate(map_size
);
308 kr
= vm_map_find_space(map
, &map_addr
,
310 vm_alloc_flags
, &entry
);
311 if (KERN_SUCCESS
!= kr
) {
312 vm_object_deallocate(object
);
316 entry
->object
.vm_object
= object
;
317 entry
->offset
= offset
= (object
== kernel_object
) ?
318 map_addr
- VM_MIN_KERNEL_ADDRESS
: 0;
320 vm_object_reference(object
);
323 vm_object_lock(object
);
326 * Allocate the lower guard page if one was requested. The guard
327 * page extends up to fill_start which is where the real memory
331 for (i
= 0; i
< fill_start
; i
+= PAGE_SIZE
) {
333 mem
= vm_page_alloc_guard(object
, offset
+ i
);
334 if (mem
!= VM_PAGE_NULL
)
336 if (flags
& KMA_NOPAGEWAIT
) {
337 kr
= KERN_RESOURCE_SHORTAGE
;
340 vm_object_unlock(object
);
341 vm_page_more_fictitious();
342 vm_object_lock(object
);
348 * Allocate the real memory here. This extends from offset fill_start
349 * for fill_size bytes.
352 for (i
= fill_start
; i
< fill_start
+ fill_size
; i
+= PAGE_SIZE
) {
354 if (flags
& KMA_LOMEM
)
355 mem
= vm_page_alloclo(object
, offset
+ i
);
357 mem
= vm_page_alloc(object
, offset
+ i
);
359 if (mem
!= VM_PAGE_NULL
)
362 if (flags
& KMA_NOPAGEWAIT
) {
363 kr
= KERN_RESOURCE_SHORTAGE
;
366 vm_object_unlock(object
);
368 vm_object_lock(object
);
374 * Lastly, allocate the ending guard page if requested. This starts at the ending
375 * address from the loop above up to the map_size that was originaly
379 for (i
= fill_start
+ fill_size
; i
< map_size
; i
+= PAGE_SIZE
) {
381 mem
= vm_page_alloc_guard(object
, offset
+ i
);
382 if (mem
!= VM_PAGE_NULL
)
384 if (flags
& KMA_NOPAGEWAIT
) {
385 kr
= KERN_RESOURCE_SHORTAGE
;
388 vm_object_unlock(object
);
389 vm_page_more_fictitious();
390 vm_object_lock(object
);
394 vm_object_unlock(object
);
396 kr
= vm_map_wire(map
, map_addr
, map_addr
+ map_size
,
397 VM_PROT_DEFAULT
, FALSE
);
398 if (kr
!= KERN_SUCCESS
) {
399 vm_object_lock(object
);
403 /* now that the page is wired, we no longer have to fear coalesce */
404 vm_object_deallocate(object
);
405 if (object
== kernel_object
)
406 vm_map_simplify(map
, map_addr
);
409 * Return the memory, not zeroed.
411 *addrp
= CAST_DOWN(vm_offset_t
, map_addr
);
415 if (object
== kernel_object
)
416 vm_object_page_remove(object
, offset
, offset
+ i
);
417 vm_object_unlock(object
);
418 vm_map_remove(map
, map_addr
, map_addr
+ map_size
, 0);
419 vm_object_deallocate(object
);
420 return KERN_RESOURCE_SHORTAGE
;
426 * Allocate wired-down memory in the kernel's address map
427 * or a submap. The memory is not zero-filled.
436 kern_return_t kr
= kernel_memory_allocate(map
, addrp
, size
, 0, 0);
437 TRACE_MACHLEAKS(KMEM_ALLOC_CODE
, KMEM_ALLOC_CODE_2
, size
, *addrp
);
444 * Reallocate wired-down memory in the kernel's address map
445 * or a submap. Newly allocated pages are not zeroed.
446 * This can only be used on regions allocated with kmem_alloc.
448 * If successful, the pages in the old region are mapped twice.
449 * The old region is unchanged. Use kmem_free to get rid of it.
456 vm_offset_t
*newaddrp
,
460 vm_object_offset_t offset
;
461 vm_map_offset_t oldmapmin
;
462 vm_map_offset_t oldmapmax
;
463 vm_map_offset_t newmapaddr
;
464 vm_map_size_t oldmapsize
;
465 vm_map_size_t newmapsize
;
466 vm_map_entry_t oldentry
;
467 vm_map_entry_t newentry
;
471 oldmapmin
= vm_map_trunc_page(oldaddr
);
472 oldmapmax
= vm_map_round_page(oldaddr
+ oldsize
);
473 oldmapsize
= oldmapmax
- oldmapmin
;
474 newmapsize
= vm_map_round_page(newsize
);
478 * Find the VM object backing the old region.
483 if (!vm_map_lookup_entry(map
, oldmapmin
, &oldentry
))
484 panic("kmem_realloc");
485 object
= oldentry
->object
.vm_object
;
488 * Increase the size of the object and
489 * fill in the new region.
492 vm_object_reference(object
);
493 /* by grabbing the object lock before unlocking the map */
494 /* we guarantee that we will panic if more than one */
495 /* attempt is made to realloc a kmem_alloc'd area */
496 vm_object_lock(object
);
498 if (object
->size
!= oldmapsize
)
499 panic("kmem_realloc");
500 object
->size
= newmapsize
;
501 vm_object_unlock(object
);
503 /* allocate the new pages while expanded portion of the */
504 /* object is still not mapped */
505 kmem_alloc_pages(object
, vm_object_round_page(oldmapsize
),
506 vm_object_round_page(newmapsize
-oldmapsize
));
509 * Find space for the new region.
512 kr
= vm_map_find_space(map
, &newmapaddr
, newmapsize
,
513 (vm_map_offset_t
) 0, 0, &newentry
);
514 if (kr
!= KERN_SUCCESS
) {
515 vm_object_lock(object
);
516 for(offset
= oldmapsize
;
517 offset
< newmapsize
; offset
+= PAGE_SIZE
) {
518 if ((mem
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
519 vm_page_lock_queues();
521 vm_page_unlock_queues();
524 object
->size
= oldmapsize
;
525 vm_object_unlock(object
);
526 vm_object_deallocate(object
);
529 newentry
->object
.vm_object
= object
;
530 newentry
->offset
= 0;
531 assert (newentry
->wired_count
== 0);
534 /* add an extra reference in case we have someone doing an */
535 /* unexpected deallocate */
536 vm_object_reference(object
);
539 kr
= vm_map_wire(map
, newmapaddr
, newmapaddr
+ newmapsize
, VM_PROT_DEFAULT
, FALSE
);
540 if (KERN_SUCCESS
!= kr
) {
541 vm_map_remove(map
, newmapaddr
, newmapaddr
+ newmapsize
, 0);
542 vm_object_lock(object
);
543 for(offset
= oldsize
; offset
< newmapsize
; offset
+= PAGE_SIZE
) {
544 if ((mem
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
545 vm_page_lock_queues();
547 vm_page_unlock_queues();
550 object
->size
= oldmapsize
;
551 vm_object_unlock(object
);
552 vm_object_deallocate(object
);
555 vm_object_deallocate(object
);
557 *newaddrp
= CAST_DOWN(vm_offset_t
, newmapaddr
);
564 * Allocate wired-down memory in the kernel's address map
565 * or a submap. The memory is not zero-filled.
567 * The memory is allocated in the kernel_object.
568 * It may not be copied with vm_map_copy, and
569 * it may not be reallocated with kmem_realloc.
578 return kernel_memory_allocate(map
, addrp
, size
, 0, KMA_KOBJECT
);
582 * kmem_alloc_aligned:
584 * Like kmem_alloc_wired, except that the memory is aligned.
585 * The size should be a power-of-2.
594 if ((size
& (size
- 1)) != 0)
595 panic("kmem_alloc_aligned: size not aligned");
596 return kernel_memory_allocate(map
, addrp
, size
, size
- 1, KMA_KOBJECT
);
600 * kmem_alloc_pageable:
602 * Allocate pageable memory in the kernel's address map.
611 vm_map_offset_t map_addr
;
612 vm_map_size_t map_size
;
616 map_addr
= (vm_map_min(map
)) + 0x1000;
618 map_addr
= vm_map_min(map
);
620 map_size
= vm_map_round_page(size
);
622 kr
= vm_map_enter(map
, &map_addr
, map_size
,
623 (vm_map_offset_t
) 0, VM_FLAGS_ANYWHERE
,
624 VM_OBJECT_NULL
, (vm_object_offset_t
) 0, FALSE
,
625 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
627 if (kr
!= KERN_SUCCESS
)
630 *addrp
= CAST_DOWN(vm_offset_t
, map_addr
);
637 * Release a region of kernel virtual memory allocated
638 * with kmem_alloc, kmem_alloc_wired, or kmem_alloc_pageable,
639 * and return the physical pages associated with that region.
650 TRACE_MACHLEAKS(KMEM_FREE_CODE
, KMEM_FREE_CODE_2
, size
, addr
);
652 kr
= vm_map_remove(map
, vm_map_trunc_page(addr
),
653 vm_map_round_page(addr
+ size
),
654 VM_MAP_REMOVE_KUNWIRE
);
655 if (kr
!= KERN_SUCCESS
)
660 * Allocate new pages in an object.
665 register vm_object_t object
,
666 register vm_object_offset_t offset
,
667 register vm_object_size_t size
)
669 vm_object_size_t alloc_size
;
671 alloc_size
= vm_object_round_page(size
);
672 vm_object_lock(object
);
674 register vm_page_t mem
;
680 while (VM_PAGE_NULL
==
681 (mem
= vm_page_alloc(object
, offset
))) {
682 vm_object_unlock(object
);
684 vm_object_lock(object
);
688 alloc_size
-= PAGE_SIZE
;
691 vm_object_unlock(object
);
696 * Remap wired pages in an object into a new region.
697 * The object is assumed to be mapped into the kernel map or
702 register vm_object_t object
,
703 register vm_object_offset_t offset
,
704 register vm_offset_t start
,
705 register vm_offset_t end
,
706 vm_prot_t protection
)
709 vm_map_offset_t map_start
;
710 vm_map_offset_t map_end
;
713 * Mark the pmap region as not pageable.
715 map_start
= vm_map_trunc_page(start
);
716 map_end
= vm_map_round_page(end
);
718 pmap_pageable(kernel_pmap
, map_start
, map_end
, FALSE
);
720 while (map_start
< map_end
) {
721 register vm_page_t mem
;
723 vm_object_lock(object
);
728 if ((mem
= vm_page_lookup(object
, offset
)) == VM_PAGE_NULL
)
729 panic("kmem_remap_pages");
732 * Wire it down (again)
734 vm_page_lockspin_queues();
736 vm_page_unlock_queues();
737 vm_object_unlock(object
);
741 * The page is supposed to be wired now, so it
742 * shouldn't be encrypted at this point. It can
743 * safely be entered in the page table.
745 ASSERT_PAGE_DECRYPTED(mem
);
748 * Enter it in the kernel pmap. The page isn't busy,
749 * but this shouldn't be a problem because it is wired.
751 PMAP_ENTER(kernel_pmap
, map_start
, mem
, protection
,
752 ((unsigned int)(mem
->object
->wimg_bits
))
756 map_start
+= PAGE_SIZE
;
764 * Allocates a map to manage a subrange
765 * of the kernel virtual address space.
767 * Arguments are as follows:
769 * parent Map to take range from
770 * addr Address of start of range (IN/OUT)
771 * size Size of range to find
772 * pageable Can region be paged
773 * anywhere Can region be located anywhere in map
774 * new_map Pointer to new submap
786 vm_map_offset_t map_addr
;
787 vm_map_size_t map_size
;
790 map_size
= vm_map_round_page(size
);
793 * Need reference on submap object because it is internal
794 * to the vm_system. vm_object_enter will never be called
795 * on it (usual source of reference for vm_map_enter).
797 vm_object_reference(vm_submap_object
);
799 map_addr
= (flags
& VM_FLAGS_ANYWHERE
) ?
800 vm_map_min(parent
) : vm_map_trunc_page(*addr
);
802 kr
= vm_map_enter(parent
, &map_addr
, map_size
,
803 (vm_map_offset_t
) 0, flags
,
804 vm_submap_object
, (vm_object_offset_t
) 0, FALSE
,
805 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
806 if (kr
!= KERN_SUCCESS
) {
807 vm_object_deallocate(vm_submap_object
);
811 pmap_reference(vm_map_pmap(parent
));
812 map
= vm_map_create(vm_map_pmap(parent
), map_addr
, map_addr
+ map_size
, pageable
);
813 if (map
== VM_MAP_NULL
)
814 panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */
816 kr
= vm_map_submap(parent
, map_addr
, map_addr
+ map_size
, map
, map_addr
, FALSE
);
817 if (kr
!= KERN_SUCCESS
) {
819 * See comment preceding vm_map_submap().
821 vm_map_remove(parent
, map_addr
, map_addr
+ map_size
, VM_MAP_NO_FLAGS
);
822 vm_map_deallocate(map
); /* also removes ref to pmap */
823 vm_object_deallocate(vm_submap_object
);
826 *addr
= CAST_DOWN(vm_offset_t
, map_addr
);
828 return (KERN_SUCCESS
);
834 * Initialize the kernel's virtual memory map, taking
835 * into account all memory allocated up to this time.
842 vm_map_offset_t map_start
;
843 vm_map_offset_t map_end
;
845 map_start
= vm_map_trunc_page(start
);
846 map_end
= vm_map_round_page(end
);
848 kernel_map
= vm_map_create(pmap_kernel(),VM_MIN_KERNEL_ADDRESS
,
851 * Reserve virtual memory allocated up to this time.
853 if (start
!= VM_MIN_KERNEL_ADDRESS
) {
854 vm_map_offset_t map_addr
;
856 map_addr
= VM_MIN_KERNEL_ADDRESS
;
857 (void) vm_map_enter(kernel_map
,
859 (vm_map_size_t
)(map_start
- VM_MIN_KERNEL_ADDRESS
),
861 VM_FLAGS_ANYWHERE
| VM_FLAGS_NO_PMAP_CHECK
,
863 (vm_object_offset_t
) 0, FALSE
,
864 VM_PROT_NONE
, VM_PROT_NONE
,
870 * Account for kernel memory (text, data, bss, vm shenanigans).
871 * This may include inaccessible "holes" as determined by what
872 * the machine-dependent init code includes in max_mem.
874 vm_page_wire_count
= (atop_64(max_mem
) - (vm_page_free_count
875 + vm_page_active_count
876 + vm_page_inactive_count
));
879 * Set the default global user wire limit which limits the amount of
880 * memory that can be locked via mlock(). We set this to the total number of
881 * pages that are potentially usable by a user app (max_mem) minus
882 * 1000 pages. This keeps 4MB in reserve for the kernel which will hopefully be
883 * enough to avoid memory deadlocks. If for some reason the system has less than
884 * 2000 pages of memory at this point, then we'll allow users to lock up to 80%
885 * of that. This can be overridden via a sysctl.
889 vm_global_user_wire_limit
= max_mem
- 1000;
891 vm_global_user_wire_limit
= max_mem
* 100 / 80;
893 vm_user_wire_limit
= vm_global_user_wire_limit
; /* the default per user limit is the same as the global limit */
900 * Like copyin, except that fromaddr is an address
901 * in the specified VM map. This implementation
902 * is incomplete; it handles the current user map
903 * and the kernel map/submaps.
908 vm_map_offset_t fromaddr
,
912 kern_return_t kr
= KERN_SUCCESS
;
915 if (vm_map_pmap(map
) == pmap_kernel())
917 /* assume a correct copy */
918 memcpy(todata
, CAST_DOWN(void *, fromaddr
), length
);
920 else if (current_map() == map
)
922 if (copyin(fromaddr
, todata
, length
) != 0)
923 kr
= KERN_INVALID_ADDRESS
;
927 vm_map_reference(map
);
928 oldmap
= vm_map_switch(map
);
929 if (copyin(fromaddr
, todata
, length
) != 0)
930 kr
= KERN_INVALID_ADDRESS
;
931 vm_map_switch(oldmap
);
932 vm_map_deallocate(map
);
938 * Routine: copyoutmap
940 * Like copyout, except that toaddr is an address
941 * in the specified VM map. This implementation
942 * is incomplete; it handles the current user map
943 * and the kernel map/submaps.
949 vm_map_address_t toaddr
,
952 if (vm_map_pmap(map
) == pmap_kernel()) {
953 /* assume a correct copy */
954 memcpy(CAST_DOWN(void *, toaddr
), fromdata
, length
);
958 if (current_map() != map
)
959 return KERN_NOT_SUPPORTED
;
961 if (copyout(fromdata
, toaddr
, length
) != 0)
962 return KERN_INVALID_ADDRESS
;
973 memory_object_t pager
,
974 vm_object_offset_t file_off
)
976 vm_map_entry_t entry
;
978 vm_object_offset_t obj_off
;
980 vm_map_offset_t base_offset
;
981 vm_map_offset_t original_offset
;
983 vm_map_size_t local_len
;
987 original_offset
= off
;
990 while(vm_map_lookup_entry(map
, off
, &entry
)) {
993 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
997 if (entry
->is_sub_map
) {
1001 vm_map_lock(entry
->object
.sub_map
);
1002 map
= entry
->object
.sub_map
;
1003 off
= entry
->offset
+ (off
- entry
->vme_start
);
1004 vm_map_unlock(old_map
);
1007 obj
= entry
->object
.vm_object
;
1008 obj_off
= (off
- entry
->vme_start
) + entry
->offset
;
1009 while(obj
->shadow
) {
1010 obj_off
+= obj
->shadow_offset
;
1013 if((obj
->pager_created
) && (obj
->pager
== pager
)) {
1014 if(((obj
->paging_offset
) + obj_off
) == file_off
) {
1015 if(off
!= base_offset
) {
1017 return KERN_FAILURE
;
1019 kr
= KERN_ALREADY_WAITING
;
1021 vm_object_offset_t obj_off_aligned
;
1022 vm_object_offset_t file_off_aligned
;
1024 obj_off_aligned
= obj_off
& ~PAGE_MASK
;
1025 file_off_aligned
= file_off
& ~PAGE_MASK
;
1027 if (file_off_aligned
== (obj
->paging_offset
+ obj_off_aligned
)) {
1029 * the target map and the file offset start in the same page
1030 * but are not identical...
1033 return KERN_FAILURE
;
1035 if ((file_off
< (obj
->paging_offset
+ obj_off_aligned
)) &&
1036 ((file_off
+ len
) > (obj
->paging_offset
+ obj_off_aligned
))) {
1038 * some portion of the tail of the I/O will fall
1039 * within the encompass of the target map
1042 return KERN_FAILURE
;
1044 if ((file_off_aligned
> (obj
->paging_offset
+ obj_off
)) &&
1045 (file_off_aligned
< (obj
->paging_offset
+ obj_off
) + len
)) {
1047 * the beginning page of the file offset falls within
1048 * the target map's encompass
1051 return KERN_FAILURE
;
1054 } else if(kr
!= KERN_SUCCESS
) {
1056 return KERN_FAILURE
;
1059 if(len
<= ((entry
->vme_end
- entry
->vme_start
) -
1060 (off
- entry
->vme_start
))) {
1064 len
-= (entry
->vme_end
- entry
->vme_start
) -
1065 (off
- entry
->vme_start
);
1067 base_offset
= base_offset
+ (local_len
- len
);
1068 file_off
= file_off
+ (local_len
- len
);
1070 if(map
!= base_map
) {
1072 vm_map_lock(base_map
);