2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
62 * Resident memory management module.
67 #include <mach/clock_types.h>
68 #include <mach/vm_prot.h>
69 #include <mach/vm_statistics.h>
70 #include <kern/counters.h>
71 #include <kern/sched_prim.h>
72 #include <kern/task.h>
73 #include <kern/thread.h>
74 #include <kern/zalloc.h>
77 #include <vm/vm_init.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_page.h>
80 #include <vm/vm_pageout.h>
81 #include <vm/vm_kern.h> /* kernel_memory_allocate() */
82 #include <kern/misc_protos.h>
83 #include <zone_debug.h>
85 #include <ppc/mappings.h> /* (BRINGUP) */
86 #include <pexpert/pexpert.h> /* (BRINGUP) */
88 #include <vm/vm_protos.h>
90 /* Variables used to indicate the relative age of pages in the
94 unsigned int vm_page_ticket_roll
= 0;
95 unsigned int vm_page_ticket
= 0;
97 * Associated with page of user-allocatable memory is a
102 * These variables record the values returned by vm_page_bootstrap,
103 * for debugging purposes. The implementation of pmap_steal_memory
104 * and pmap_startup here also uses them internally.
107 vm_offset_t virtual_space_start
;
108 vm_offset_t virtual_space_end
;
112 * The vm_page_lookup() routine, which provides for fast
113 * (virtual memory object, offset) to page lookup, employs
114 * the following hash table. The vm_page_{insert,remove}
115 * routines install and remove associations in the table.
116 * [This table is often called the virtual-to-physical,
121 #if MACH_PAGE_HASH_STATS
122 int cur_count
; /* current count */
123 int hi_count
; /* high water mark */
124 #endif /* MACH_PAGE_HASH_STATS */
127 vm_page_bucket_t
*vm_page_buckets
; /* Array of buckets */
128 unsigned int vm_page_bucket_count
= 0; /* How big is array? */
129 unsigned int vm_page_hash_mask
; /* Mask for hash function */
130 unsigned int vm_page_hash_shift
; /* Shift for hash function */
131 uint32_t vm_page_bucket_hash
; /* Basic bucket hash */
132 decl_simple_lock_data(,vm_page_bucket_lock
)
135 vm_page_lookup_nohint(vm_object_t object
, vm_object_offset_t offset
);
138 #if MACH_PAGE_HASH_STATS
139 /* This routine is only for debug. It is intended to be called by
140 * hand by a developer using a kernel debugger. This routine prints
141 * out vm_page_hash table statistics to the kernel debug console.
151 for (i
= 0; i
< vm_page_bucket_count
; i
++) {
152 if (vm_page_buckets
[i
].hi_count
) {
154 highsum
+= vm_page_buckets
[i
].hi_count
;
155 if (vm_page_buckets
[i
].hi_count
> maxdepth
)
156 maxdepth
= vm_page_buckets
[i
].hi_count
;
159 printf("Total number of buckets: %d\n", vm_page_bucket_count
);
160 printf("Number used buckets: %d = %d%%\n",
161 numbuckets
, 100*numbuckets
/vm_page_bucket_count
);
162 printf("Number unused buckets: %d = %d%%\n",
163 vm_page_bucket_count
- numbuckets
,
164 100*(vm_page_bucket_count
-numbuckets
)/vm_page_bucket_count
);
165 printf("Sum of bucket max depth: %d\n", highsum
);
166 printf("Average bucket depth: %d.%2d\n",
167 highsum
/vm_page_bucket_count
,
168 highsum%vm_page_bucket_count
);
169 printf("Maximum bucket depth: %d\n", maxdepth
);
171 #endif /* MACH_PAGE_HASH_STATS */
174 * The virtual page size is currently implemented as a runtime
175 * variable, but is constant once initialized using vm_set_page_size.
176 * This initialization must be done in the machine-dependent
177 * bootstrap sequence, before calling other machine-independent
180 * All references to the virtual page size outside this
181 * module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
184 vm_size_t page_size
= PAGE_SIZE
;
185 vm_size_t page_mask
= PAGE_MASK
;
186 int page_shift
= PAGE_SHIFT
;
189 * Resident page structures are initialized from
190 * a template (see vm_page_alloc).
192 * When adding a new field to the virtual memory
193 * object structure, be sure to add initialization
194 * (see vm_page_bootstrap).
196 struct vm_page vm_page_template
;
199 * Resident pages that represent real memory
200 * are allocated from a free list.
202 vm_page_t vm_page_queue_free
;
203 vm_page_t vm_page_queue_fictitious
;
204 unsigned int vm_page_free_wanted
;
205 unsigned int vm_page_free_count
;
206 unsigned int vm_page_fictitious_count
;
208 unsigned int vm_page_free_count_minimum
; /* debugging */
211 * Occasionally, the virtual memory system uses
212 * resident page structures that do not refer to
213 * real pages, for example to leave a page with
214 * important state information in the VP table.
216 * These page structures are allocated the way
217 * most other kernel structures are.
220 decl_mutex_data(,vm_page_alloc_lock
)
221 unsigned int io_throttle_zero_fill
;
224 * Fictitious pages don't have a physical address,
225 * but we must initialize phys_page to something.
226 * For debugging, this should be a strange value
227 * that the pmap module can recognize in assertions.
229 vm_offset_t vm_page_fictitious_addr
= (vm_offset_t
) -1;
232 * Resident page structures are also chained on
233 * queues that are used by the page replacement
234 * system (pageout daemon). These queues are
235 * defined here, but are shared by the pageout
236 * module. The inactive queue is broken into
237 * inactive and zf for convenience as the
238 * pageout daemon often assignes a higher
239 * affinity to zf pages
241 queue_head_t vm_page_queue_active
;
242 queue_head_t vm_page_queue_inactive
;
243 unsigned int vm_page_active_count
;
244 unsigned int vm_page_inactive_count
;
245 unsigned int vm_page_wire_count
;
246 unsigned int vm_page_gobble_count
= 0;
247 unsigned int vm_page_wire_count_warning
= 0;
248 unsigned int vm_page_gobble_count_warning
= 0;
250 unsigned int vm_page_purgeable_count
= 0; /* # of pages purgeable now */
251 uint64_t vm_page_purged_count
= 0; /* total count of purged pages */
253 ppnum_t vm_lopage_poolstart
= 0;
254 ppnum_t vm_lopage_poolend
= 0;
255 int vm_lopage_poolsize
= 0;
256 uint64_t max_valid_dma_address
= 0xffffffffffffffffULL
;
260 * Several page replacement parameters are also
261 * shared with this module, so that page allocation
262 * (done here in vm_page_alloc) can trigger the
265 unsigned int vm_page_free_target
= 0;
266 unsigned int vm_page_free_min
= 0;
267 unsigned int vm_page_inactive_target
= 0;
268 unsigned int vm_page_free_reserved
= 0;
269 unsigned int vm_page_throttled_count
= 0;
272 * The VM system has a couple of heuristics for deciding
273 * that pages are "uninteresting" and should be placed
274 * on the inactive queue as likely candidates for replacement.
275 * These variables let the heuristics be controlled at run-time
276 * to make experimentation easier.
279 boolean_t vm_page_deactivate_hint
= TRUE
;
284 * Sets the page size, perhaps based upon the memory
285 * size. Must be called before any use of page-size
286 * dependent functions.
288 * Sets page_shift and page_mask from page_size.
291 vm_set_page_size(void)
293 page_mask
= page_size
- 1;
295 if ((page_mask
& page_size
) != 0)
296 panic("vm_set_page_size: page size not a power of two");
298 for (page_shift
= 0; ; page_shift
++)
299 if ((1U << page_shift
) == page_size
)
306 * Initializes the resident memory module.
308 * Allocates memory for the page cells, and
309 * for the object/offset-to-page hash table headers.
310 * Each page cell is initialized and placed on the free list.
311 * Returns the range of available kernel virtual memory.
319 register vm_page_t m
;
326 * Initialize the vm_page template.
329 m
= &vm_page_template
;
330 m
->object
= VM_OBJECT_NULL
; /* reset later */
331 m
->offset
= (vm_object_offset_t
) -1; /* reset later */
334 m
->pageq
.next
= NULL
;
335 m
->pageq
.prev
= NULL
;
336 m
->listq
.next
= NULL
;
337 m
->listq
.prev
= NULL
;
344 m
->reference
= FALSE
;
346 m
->dump_cleaning
= FALSE
;
347 m
->list_req_pending
= FALSE
;
352 m
->fictitious
= FALSE
;
359 m
->clustered
= FALSE
;
360 m
->lock_supplied
= FALSE
;
363 m
->zero_fill
= FALSE
;
364 m
->encrypted
= FALSE
;
366 m
->phys_page
= 0; /* reset later */
368 m
->page_lock
= VM_PROT_NONE
;
369 m
->unlock_request
= VM_PROT_NONE
;
370 m
->page_error
= KERN_SUCCESS
;
373 * Initialize the page queues.
376 mutex_init(&vm_page_queue_free_lock
, 0);
377 mutex_init(&vm_page_queue_lock
, 0);
379 vm_page_queue_free
= VM_PAGE_NULL
;
380 vm_page_queue_fictitious
= VM_PAGE_NULL
;
381 queue_init(&vm_page_queue_active
);
382 queue_init(&vm_page_queue_inactive
);
383 queue_init(&vm_page_queue_zf
);
385 vm_page_free_wanted
= 0;
388 * Steal memory for the map and zone subsystems.
391 vm_map_steal_memory();
395 * Allocate (and initialize) the virtual-to-physical
396 * table hash buckets.
398 * The number of buckets should be a power of two to
399 * get a good hash function. The following computation
400 * chooses the first power of two that is greater
401 * than the number of physical pages in the system.
404 simple_lock_init(&vm_page_bucket_lock
, 0);
406 if (vm_page_bucket_count
== 0) {
407 unsigned int npages
= pmap_free_pages();
409 vm_page_bucket_count
= 1;
410 while (vm_page_bucket_count
< npages
)
411 vm_page_bucket_count
<<= 1;
414 vm_page_hash_mask
= vm_page_bucket_count
- 1;
417 * Calculate object shift value for hashing algorithm:
418 * O = log2(sizeof(struct vm_object))
419 * B = log2(vm_page_bucket_count)
420 * hash shifts the object left by
423 size
= vm_page_bucket_count
;
424 for (log1
= 0; size
> 1; log1
++)
426 size
= sizeof(struct vm_object
);
427 for (log2
= 0; size
> 1; log2
++)
429 vm_page_hash_shift
= log1
/2 - log2
+ 1;
431 vm_page_bucket_hash
= 1 << ((log1
+ 1) >> 1); /* Get (ceiling of sqrt of table size) */
432 vm_page_bucket_hash
|= 1 << ((log1
+ 1) >> 2); /* Get (ceiling of quadroot of table size) */
433 vm_page_bucket_hash
|= 1; /* Set bit and add 1 - always must be 1 to insure unique series */
435 if (vm_page_hash_mask
& vm_page_bucket_count
)
436 printf("vm_page_bootstrap: WARNING -- strange page hash\n");
438 vm_page_buckets
= (vm_page_bucket_t
*)
439 pmap_steal_memory(vm_page_bucket_count
*
440 sizeof(vm_page_bucket_t
));
442 for (i
= 0; i
< vm_page_bucket_count
; i
++) {
443 register vm_page_bucket_t
*bucket
= &vm_page_buckets
[i
];
445 bucket
->pages
= VM_PAGE_NULL
;
446 #if MACH_PAGE_HASH_STATS
447 bucket
->cur_count
= 0;
448 bucket
->hi_count
= 0;
449 #endif /* MACH_PAGE_HASH_STATS */
453 * Machine-dependent code allocates the resident page table.
454 * It uses vm_page_init to initialize the page frames.
455 * The code also returns to us the virtual space available
456 * to the kernel. We don't trust the pmap module
457 * to get the alignment right.
460 pmap_startup(&virtual_space_start
, &virtual_space_end
);
461 virtual_space_start
= round_page(virtual_space_start
);
462 virtual_space_end
= trunc_page(virtual_space_end
);
464 *startp
= virtual_space_start
;
465 *endp
= virtual_space_end
;
468 * Compute the initial "wire" count.
469 * Up until now, the pages which have been set aside are not under
470 * the VM system's control, so although they aren't explicitly
471 * wired, they nonetheless can't be moved. At this moment,
472 * all VM managed pages are "free", courtesy of pmap_startup.
474 vm_page_wire_count
= atop_64(max_mem
) - vm_page_free_count
; /* initial value */
476 printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count
);
477 vm_page_free_count_minimum
= vm_page_free_count
;
479 simple_lock_init(&vm_paging_lock
, 0);
482 #ifndef MACHINE_PAGES
484 * We implement pmap_steal_memory and pmap_startup with the help
485 * of two simpler functions, pmap_virtual_space and pmap_next_page.
492 vm_offset_t addr
, vaddr
;
496 * We round the size to a round multiple.
499 size
= (size
+ sizeof (void *) - 1) &~ (sizeof (void *) - 1);
502 * If this is the first call to pmap_steal_memory,
503 * we have to initialize ourself.
506 if (virtual_space_start
== virtual_space_end
) {
507 pmap_virtual_space(&virtual_space_start
, &virtual_space_end
);
510 * The initial values must be aligned properly, and
511 * we don't trust the pmap module to do it right.
514 virtual_space_start
= round_page(virtual_space_start
);
515 virtual_space_end
= trunc_page(virtual_space_end
);
519 * Allocate virtual memory for this request.
522 addr
= virtual_space_start
;
523 virtual_space_start
+= size
;
525 kprintf("pmap_steal_memory: %08X - %08X; size=%08X\n", addr
, virtual_space_start
, size
); /* (TEST/DEBUG) */
528 * Allocate and map physical pages to back new virtual pages.
531 for (vaddr
= round_page(addr
);
533 vaddr
+= PAGE_SIZE
) {
534 if (!pmap_next_page(&phys_page
))
535 panic("pmap_steal_memory");
538 * XXX Logically, these mappings should be wired,
539 * but some pmap modules barf if they are.
542 pmap_enter(kernel_pmap
, vaddr
, phys_page
,
543 VM_PROT_READ
|VM_PROT_WRITE
,
544 VM_WIMG_USE_DEFAULT
, FALSE
);
546 * Account for newly stolen memory
548 vm_page_wire_count
++;
552 return (void *) addr
;
560 unsigned int i
, npages
, pages_initialized
, fill
, fillval
;
564 unsigned int num_of_lopages
= 0;
565 unsigned int last_index
;
568 * We calculate how many page frames we will have
569 * and then allocate the page structures in one chunk.
572 tmpaddr
= (addr64_t
)pmap_free_pages() * (addr64_t
)PAGE_SIZE
; /* Get the amount of memory left */
573 tmpaddr
= tmpaddr
+ (addr64_t
)(round_page_32(virtual_space_start
) - virtual_space_start
); /* Account for any slop */
574 npages
= (unsigned int)(tmpaddr
/ (addr64_t
)(PAGE_SIZE
+ sizeof(*pages
))); /* Figure size of all vm_page_ts, including enough to hold the vm_page_ts */
576 pages
= (vm_page_t
) pmap_steal_memory(npages
* sizeof *pages
);
579 * Initialize the page frames.
581 for (i
= 0, pages_initialized
= 0; i
< npages
; i
++) {
582 if (!pmap_next_page(&phys_page
))
585 vm_page_init(&pages
[i
], phys_page
);
591 * Check if we want to initialize pages to a known value
593 fill
= 0; /* Assume no fill */
594 if (PE_parse_boot_arg("fill", &fillval
)) fill
= 1; /* Set fill */
597 * if vm_lopage_poolsize is non-zero, than we need to reserve
598 * a pool of pages whose addresess are less than 4G... this pool
599 * is used by drivers whose hardware can't DMA beyond 32 bits...
601 * note that I'm assuming that the page list is ascending and
602 * ordered w/r to the physical address
604 for (i
= 0, num_of_lopages
= vm_lopage_poolsize
; num_of_lopages
&& i
< pages_initialized
; num_of_lopages
--, i
++) {
609 if (m
->phys_page
>= (1 << (32 - PAGE_SHIFT
)))
610 panic("couldn't reserve the lopage pool: not enough lo pages\n");
612 if (m
->phys_page
< vm_lopage_poolend
)
613 panic("couldn't reserve the lopage pool: page list out of order\n");
615 vm_lopage_poolend
= m
->phys_page
;
617 if (vm_lopage_poolstart
== 0)
618 vm_lopage_poolstart
= m
->phys_page
;
620 if (m
->phys_page
< vm_lopage_poolstart
)
621 panic("couldn't reserve the lopage pool: page list out of order\n");
625 fillPage(m
->phys_page
, fillval
); /* Fill the page with a know value if requested at boot */
631 // -debug code remove
632 if (2 == vm_himemory_mode
) {
633 // free low -> high so high is preferred
634 for (i
= last_index
+ 1; i
<= pages_initialized
; i
++) {
635 if(fill
) fillPage(pages
[i
- 1].phys_page
, fillval
); /* Fill the page with a know value if requested at boot */
636 vm_page_release(&pages
[i
- 1]);
640 // debug code remove-
643 * Release pages in reverse order so that physical pages
644 * initially get allocated in ascending addresses. This keeps
645 * the devices (which must address physical memory) happy if
646 * they require several consecutive pages.
648 for (i
= pages_initialized
; i
> last_index
; i
--) {
649 if(fill
) fillPage(pages
[i
- 1].phys_page
, fillval
); /* Fill the page with a know value if requested at boot */
650 vm_page_release(&pages
[i
- 1]);
655 vm_page_t xx
, xxo
, xxl
;
658 j
= 0; /* (BRINGUP) */
661 for(xx
= vm_page_queue_free
; xx
; xxl
= xx
, xx
= xx
->pageq
.next
) { /* (BRINGUP) */
663 if(j
> vm_page_free_count
) { /* (BRINGUP) */
664 panic("pmap_startup: too many pages, xx = %08X, xxl = %08X\n", xx
, xxl
);
667 l
= vm_page_free_count
- j
; /* (BRINGUP) */
668 k
= 0; /* (BRINGUP) */
670 if(((j
- 1) & 0xFFFF) == 0) kprintf("checking number %d of %d\n", j
, vm_page_free_count
);
672 for(xxo
= xx
->pageq
.next
; xxo
; xxo
= xxo
->pageq
.next
) { /* (BRINGUP) */
674 if(k
> l
) panic("pmap_startup: too many in secondary check %d %d\n", k
, l
);
675 if((xx
->phys_page
& 0xFFFFFFFF) == (xxo
->phys_page
& 0xFFFFFFFF)) { /* (BRINGUP) */
676 panic("pmap_startup: duplicate physaddr, xx = %08X, xxo = %08X\n", xx
, xxo
);
681 if(j
!= vm_page_free_count
) { /* (BRINGUP) */
682 panic("pmap_startup: vm_page_free_count does not match, calc = %d, vm_page_free_count = %08X\n", j
, vm_page_free_count
);
689 * We have to re-align virtual_space_start,
690 * because pmap_steal_memory has been using it.
693 virtual_space_start
= round_page_32(virtual_space_start
);
695 *startp
= virtual_space_start
;
696 *endp
= virtual_space_end
;
698 #endif /* MACHINE_PAGES */
701 * Routine: vm_page_module_init
703 * Second initialization pass, to be done after
704 * the basic VM system is ready.
707 vm_page_module_init(void)
709 vm_page_zone
= zinit((vm_size_t
) sizeof(struct vm_page
),
710 0, PAGE_SIZE
, "vm pages");
713 zone_debug_disable(vm_page_zone
);
714 #endif /* ZONE_DEBUG */
716 zone_change(vm_page_zone
, Z_EXPAND
, FALSE
);
717 zone_change(vm_page_zone
, Z_EXHAUST
, TRUE
);
718 zone_change(vm_page_zone
, Z_FOREIGN
, TRUE
);
721 * Adjust zone statistics to account for the real pages allocated
722 * in vm_page_create(). [Q: is this really what we want?]
724 vm_page_zone
->count
+= vm_page_pages
;
725 vm_page_zone
->cur_size
+= vm_page_pages
* vm_page_zone
->elem_size
;
727 mutex_init(&vm_page_alloc_lock
, 0);
731 * Routine: vm_page_create
733 * After the VM system is up, machine-dependent code
734 * may stumble across more physical memory. For example,
735 * memory that it was reserving for a frame buffer.
736 * vm_page_create turns this memory into available pages.
747 for (phys_page
= start
;
750 while ((m
= (vm_page_t
) vm_page_grab_fictitious())
752 vm_page_more_fictitious();
754 vm_page_init(m
, phys_page
);
763 * Distributes the object/offset key pair among hash buckets.
765 * NOTE: The bucket count must be a power of 2
767 #define vm_page_hash(object, offset) (\
768 ( (natural_t)((uint32_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
772 * vm_page_insert: [ internal use only ]
774 * Inserts the given mem entry into the object/object-page
775 * table and object list.
777 * The object must be locked.
782 register vm_page_t mem
,
783 register vm_object_t object
,
784 register vm_object_offset_t offset
)
786 register vm_page_bucket_t
*bucket
;
789 "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n",
790 (integer_t
)object
, (integer_t
)offset
, (integer_t
)mem
, 0,0);
794 _mutex_assert(&object
->Lock
, MA_OWNED
);
796 if (mem
->tabled
|| mem
->object
!= VM_OBJECT_NULL
)
797 panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
798 "already in (obj=%p,off=0x%llx)",
799 mem
, object
, offset
, mem
->object
, mem
->offset
);
801 assert(!object
->internal
|| offset
< object
->size
);
803 /* only insert "pageout" pages into "pageout" objects,
804 * and normal pages into normal objects */
805 assert(object
->pageout
== mem
->pageout
);
807 assert(vm_page_lookup(object
, offset
) == VM_PAGE_NULL
);
810 * Record the object/offset pair in this page
813 mem
->object
= object
;
814 mem
->offset
= offset
;
817 * Insert it into the object_object/offset hash table
820 bucket
= &vm_page_buckets
[vm_page_hash(object
, offset
)];
821 simple_lock(&vm_page_bucket_lock
);
822 mem
->next
= bucket
->pages
;
824 #if MACH_PAGE_HASH_STATS
825 if (++bucket
->cur_count
> bucket
->hi_count
)
826 bucket
->hi_count
= bucket
->cur_count
;
827 #endif /* MACH_PAGE_HASH_STATS */
828 simple_unlock(&vm_page_bucket_lock
);
831 * Now link into the object's list of backed pages.
834 VM_PAGE_INSERT(mem
, object
);
838 * Show that the object has one more resident page.
841 object
->resident_page_count
++;
843 if (object
->purgable
== VM_OBJECT_PURGABLE_VOLATILE
||
844 object
->purgable
== VM_OBJECT_PURGABLE_EMPTY
) {
845 vm_page_lock_queues();
846 vm_page_purgeable_count
++;
847 vm_page_unlock_queues();
854 * Exactly like vm_page_insert, except that we first
855 * remove any existing page at the given offset in object.
857 * The object and page queues must be locked.
862 register vm_page_t mem
,
863 register vm_object_t object
,
864 register vm_object_offset_t offset
)
866 vm_page_bucket_t
*bucket
;
867 vm_page_t found_m
= VM_PAGE_NULL
;
871 _mutex_assert(&object
->Lock
, MA_OWNED
);
872 _mutex_assert(&vm_page_queue_lock
, MA_OWNED
);
874 if (mem
->tabled
|| mem
->object
!= VM_OBJECT_NULL
)
875 panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
876 "already in (obj=%p,off=0x%llx)",
877 mem
, object
, offset
, mem
->object
, mem
->offset
);
880 * Record the object/offset pair in this page
883 mem
->object
= object
;
884 mem
->offset
= offset
;
887 * Insert it into the object_object/offset hash table,
888 * replacing any page that might have been there.
891 bucket
= &vm_page_buckets
[vm_page_hash(object
, offset
)];
892 simple_lock(&vm_page_bucket_lock
);
895 vm_page_t
*mp
= &bucket
->pages
;
896 register vm_page_t m
= *mp
;
899 if (m
->object
== object
&& m
->offset
== offset
) {
901 * Remove old page from hash list
911 mem
->next
= bucket
->pages
;
913 mem
->next
= VM_PAGE_NULL
;
916 * insert new page at head of hash list
920 simple_unlock(&vm_page_bucket_lock
);
924 * there was already a page at the specified
925 * offset for this object... remove it from
926 * the object and free it back to the free list
928 VM_PAGE_REMOVE(found_m
);
929 found_m
->tabled
= FALSE
;
931 found_m
->object
= VM_OBJECT_NULL
;
932 found_m
->offset
= (vm_object_offset_t
) -1;
933 object
->resident_page_count
--;
935 if (object
->purgable
== VM_OBJECT_PURGABLE_VOLATILE
||
936 object
->purgable
== VM_OBJECT_PURGABLE_EMPTY
) {
937 assert(vm_page_purgeable_count
> 0);
938 vm_page_purgeable_count
--;
942 * Return page to the free list.
943 * Note the page is not tabled now
945 vm_page_free(found_m
);
948 * Now link into the object's list of backed pages.
951 VM_PAGE_INSERT(mem
, object
);
955 * And show that the object has one more resident
959 object
->resident_page_count
++;
961 if (object
->purgable
== VM_OBJECT_PURGABLE_VOLATILE
||
962 object
->purgable
== VM_OBJECT_PURGABLE_EMPTY
) {
963 vm_page_purgeable_count
++;
968 * vm_page_remove: [ internal use only ]
970 * Removes the given mem entry from the object/offset-page
971 * table and the object page list.
973 * The object and page queues must be locked.
978 register vm_page_t mem
)
980 register vm_page_bucket_t
*bucket
;
981 register vm_page_t
this;
984 "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n",
985 (integer_t
)mem
->object
, (integer_t
)mem
->offset
,
986 (integer_t
)mem
, 0,0);
988 _mutex_assert(&vm_page_queue_lock
, MA_OWNED
);
989 _mutex_assert(&mem
->object
->Lock
, MA_OWNED
);
992 assert(!mem
->cleaning
);
997 * Remove from the object_object/offset hash table
1000 bucket
= &vm_page_buckets
[vm_page_hash(mem
->object
, mem
->offset
)];
1001 simple_lock(&vm_page_bucket_lock
);
1002 if ((this = bucket
->pages
) == mem
) {
1003 /* optimize for common case */
1005 bucket
->pages
= mem
->next
;
1007 register vm_page_t
*prev
;
1009 for (prev
= &this->next
;
1010 (this = *prev
) != mem
;
1015 #if MACH_PAGE_HASH_STATS
1016 bucket
->cur_count
--;
1017 #endif /* MACH_PAGE_HASH_STATS */
1018 simple_unlock(&vm_page_bucket_lock
);
1021 * Now remove from the object's list of backed pages.
1024 VM_PAGE_REMOVE(mem
);
1027 * And show that the object has one fewer resident
1031 mem
->object
->resident_page_count
--;
1033 if (mem
->object
->purgable
== VM_OBJECT_PURGABLE_VOLATILE
||
1034 mem
->object
->purgable
== VM_OBJECT_PURGABLE_EMPTY
) {
1035 assert(vm_page_purgeable_count
> 0);
1036 vm_page_purgeable_count
--;
1039 mem
->tabled
= FALSE
;
1040 mem
->object
= VM_OBJECT_NULL
;
1041 mem
->offset
= (vm_object_offset_t
) -1;
1047 * Returns the page associated with the object/offset
1048 * pair specified; if none is found, VM_PAGE_NULL is returned.
1050 * The object must be locked. No side effects.
1053 unsigned long vm_page_lookup_hint
= 0;
1054 unsigned long vm_page_lookup_hint_next
= 0;
1055 unsigned long vm_page_lookup_hint_prev
= 0;
1056 unsigned long vm_page_lookup_hint_miss
= 0;
1060 register vm_object_t object
,
1061 register vm_object_offset_t offset
)
1063 register vm_page_t mem
;
1064 register vm_page_bucket_t
*bucket
;
1067 _mutex_assert(&object
->Lock
, MA_OWNED
);
1070 mem
= object
->memq_hint
;
1071 if (mem
!= VM_PAGE_NULL
) {
1072 assert(mem
->object
== object
);
1073 if (mem
->offset
== offset
) {
1074 vm_page_lookup_hint
++;
1077 qe
= queue_next(&mem
->listq
);
1078 if (! queue_end(&object
->memq
, qe
)) {
1079 vm_page_t next_page
;
1081 next_page
= (vm_page_t
) qe
;
1082 assert(next_page
->object
== object
);
1083 if (next_page
->offset
== offset
) {
1084 vm_page_lookup_hint_next
++;
1085 object
->memq_hint
= next_page
; /* new hint */
1089 qe
= queue_prev(&mem
->listq
);
1090 if (! queue_end(&object
->memq
, qe
)) {
1091 vm_page_t prev_page
;
1093 prev_page
= (vm_page_t
) qe
;
1094 assert(prev_page
->object
== object
);
1095 if (prev_page
->offset
== offset
) {
1096 vm_page_lookup_hint_prev
++;
1097 object
->memq_hint
= prev_page
; /* new hint */
1104 * Search the hash table for this object/offset pair
1107 bucket
= &vm_page_buckets
[vm_page_hash(object
, offset
)];
1110 * since we hold the object lock, we are guaranteed that no
1111 * new pages can be inserted into this object... this in turn
1112 * guarantess that the page we're looking for can't exist
1113 * if the bucket it hashes to is currently NULL even when looked
1114 * at outside the scope of the hash bucket lock... this is a
1115 * really cheap optimiztion to avoid taking the lock
1117 if (bucket
->pages
== VM_PAGE_NULL
) {
1118 return (VM_PAGE_NULL
);
1120 simple_lock(&vm_page_bucket_lock
);
1122 for (mem
= bucket
->pages
; mem
!= VM_PAGE_NULL
; mem
= mem
->next
) {
1124 if ((mem
->object
== object
) && (mem
->offset
== offset
))
1127 simple_unlock(&vm_page_bucket_lock
);
1129 if (mem
!= VM_PAGE_NULL
) {
1130 if (object
->memq_hint
!= VM_PAGE_NULL
) {
1131 vm_page_lookup_hint_miss
++;
1133 assert(mem
->object
== object
);
1134 object
->memq_hint
= mem
;
1142 vm_page_lookup_nohint(
1144 vm_object_offset_t offset
)
1146 register vm_page_t mem
;
1147 register vm_page_bucket_t
*bucket
;
1150 _mutex_assert(&object
->Lock
, MA_OWNED
);
1153 * Search the hash table for this object/offset pair
1156 bucket
= &vm_page_buckets
[vm_page_hash(object
, offset
)];
1158 simple_lock(&vm_page_bucket_lock
);
1159 for (mem
= bucket
->pages
; mem
!= VM_PAGE_NULL
; mem
= mem
->next
) {
1161 if ((mem
->object
== object
) && (mem
->offset
== offset
))
1164 simple_unlock(&vm_page_bucket_lock
);
1172 * Move the given memory entry from its
1173 * current object to the specified target object/offset.
1175 * The object must be locked.
1179 register vm_page_t mem
,
1180 register vm_object_t new_object
,
1181 vm_object_offset_t new_offset
)
1183 assert(mem
->object
!= new_object
);
1186 * The encryption key is based on the page's memory object
1187 * (aka "pager") and paging offset. Moving the page to
1188 * another VM object changes its "pager" and "paging_offset"
1189 * so it has to be decrypted first.
1191 if (mem
->encrypted
) {
1192 panic("vm_page_rename: page %p is encrypted\n", mem
);
1195 * Changes to mem->object require the page lock because
1196 * the pageout daemon uses that lock to get the object.
1200 "vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n",
1201 (integer_t
)new_object
, (integer_t
)new_offset
,
1202 (integer_t
)mem
, 0,0);
1204 vm_page_lock_queues();
1205 vm_page_remove(mem
);
1206 vm_page_insert(mem
, new_object
, new_offset
);
1207 vm_page_unlock_queues();
1213 * Initialize the fields in a new page.
1214 * This takes a structure with random values and initializes it
1215 * so that it can be given to vm_page_release or vm_page_insert.
1223 *mem
= vm_page_template
;
1224 mem
->phys_page
= phys_page
;
1228 * vm_page_grab_fictitious:
1230 * Remove a fictitious page from the free list.
1231 * Returns VM_PAGE_NULL if there are no free pages.
1233 int c_vm_page_grab_fictitious
= 0;
1234 int c_vm_page_release_fictitious
= 0;
1235 int c_vm_page_more_fictitious
= 0;
1238 vm_page_grab_fictitious(void)
1240 register vm_page_t m
;
1242 m
= (vm_page_t
)zget(vm_page_zone
);
1244 vm_page_init(m
, vm_page_fictitious_addr
);
1245 m
->fictitious
= TRUE
;
1248 c_vm_page_grab_fictitious
++;
1253 * vm_page_release_fictitious:
1255 * Release a fictitious page to the free list.
1259 vm_page_release_fictitious(
1260 register vm_page_t m
)
1264 assert(m
->fictitious
);
1265 assert(m
->phys_page
== vm_page_fictitious_addr
);
1267 c_vm_page_release_fictitious
++;
1270 panic("vm_page_release_fictitious");
1273 zfree(vm_page_zone
, m
);
1277 * vm_page_more_fictitious:
1279 * Add more fictitious pages to the free list.
1280 * Allowed to block. This routine is way intimate
1281 * with the zones code, for several reasons:
1282 * 1. we need to carve some page structures out of physical
1283 * memory before zones work, so they _cannot_ come from
1285 * 2. the zone needs to be collectable in order to prevent
1286 * growth without bound. These structures are used by
1287 * the device pager (by the hundreds and thousands), as
1288 * private pages for pageout, and as blocking pages for
1289 * pagein. Temporary bursts in demand should not result in
1290 * permanent allocation of a resource.
1291 * 3. To smooth allocation humps, we allocate single pages
1292 * with kernel_memory_allocate(), and cram them into the
1293 * zone. This also allows us to initialize the vm_page_t's
1294 * on the way into the zone, so that zget() always returns
1295 * an initialized structure. The zone free element pointer
1296 * and the free page pointer are both the first item in the
1298 * 4. By having the pages in the zone pre-initialized, we need
1299 * not keep 2 levels of lists. The garbage collector simply
1300 * scans our list, and reduces physical memory usage as it
1304 void vm_page_more_fictitious(void)
1306 register vm_page_t m
;
1308 kern_return_t retval
;
1311 c_vm_page_more_fictitious
++;
1314 * Allocate a single page from the zone_map. Do not wait if no physical
1315 * pages are immediately available, and do not zero the space. We need
1316 * our own blocking lock here to prevent having multiple,
1317 * simultaneous requests from piling up on the zone_map lock. Exactly
1318 * one (of our) threads should be potentially waiting on the map lock.
1319 * If winner is not vm-privileged, then the page allocation will fail,
1320 * and it will temporarily block here in the vm_page_wait().
1322 mutex_lock(&vm_page_alloc_lock
);
1324 * If another thread allocated space, just bail out now.
1326 if (zone_free_count(vm_page_zone
) > 5) {
1328 * The number "5" is a small number that is larger than the
1329 * number of fictitious pages that any single caller will
1330 * attempt to allocate. Otherwise, a thread will attempt to
1331 * acquire a fictitious page (vm_page_grab_fictitious), fail,
1332 * release all of the resources and locks already acquired,
1333 * and then call this routine. This routine finds the pages
1334 * that the caller released, so fails to allocate new space.
1335 * The process repeats infinitely. The largest known number
1336 * of fictitious pages required in this manner is 2. 5 is
1337 * simply a somewhat larger number.
1339 mutex_unlock(&vm_page_alloc_lock
);
1343 retval
= kernel_memory_allocate(zone_map
,
1344 &addr
, PAGE_SIZE
, VM_PROT_ALL
,
1345 KMA_KOBJECT
|KMA_NOPAGEWAIT
);
1346 if (retval
!= KERN_SUCCESS
) {
1348 * No page was available. Tell the pageout daemon, drop the
1349 * lock to give another thread a chance at it, and
1350 * wait for the pageout daemon to make progress.
1352 mutex_unlock(&vm_page_alloc_lock
);
1353 vm_page_wait(THREAD_UNINT
);
1357 * Initialize as many vm_page_t's as will fit on this page. This
1358 * depends on the zone code disturbing ONLY the first item of
1359 * each zone element.
1361 m
= (vm_page_t
)addr
;
1362 for (i
= PAGE_SIZE
/sizeof(struct vm_page
); i
> 0; i
--) {
1363 vm_page_init(m
, vm_page_fictitious_addr
);
1364 m
->fictitious
= TRUE
;
1367 zcram(vm_page_zone
, (void *) addr
, PAGE_SIZE
);
1368 mutex_unlock(&vm_page_alloc_lock
);
1374 * Attempt to convert a fictitious page into a real page.
1379 register vm_page_t m
)
1381 register vm_page_t real_m
;
1384 assert(m
->fictitious
);
1387 real_m
= vm_page_grab();
1388 if (real_m
== VM_PAGE_NULL
)
1391 m
->phys_page
= real_m
->phys_page
;
1392 m
->fictitious
= FALSE
;
1395 vm_page_lock_queues();
1397 vm_page_active_count
++;
1398 else if (m
->inactive
)
1399 vm_page_inactive_count
++;
1400 vm_page_unlock_queues();
1402 real_m
->phys_page
= vm_page_fictitious_addr
;
1403 real_m
->fictitious
= TRUE
;
1405 vm_page_release_fictitious(real_m
);
1412 * Return true if it is not likely that a non-vm_privileged thread
1413 * can get memory without blocking. Advisory only, since the
1414 * situation may change under us.
1419 /* No locking, at worst we will fib. */
1420 return( vm_page_free_count
< vm_page_free_reserved
);
1426 * this is an interface to support bring-up of drivers
1427 * on platforms with physical memory > 4G...
1429 int vm_himemory_mode
= 0;
1433 * this interface exists to support hardware controllers
1434 * incapable of generating DMAs with more than 32 bits
1435 * of address on platforms with physical memory > 4G...
1437 unsigned int vm_lopage_free_count
= 0;
1438 unsigned int vm_lopage_max_count
= 0;
1439 vm_page_t vm_lopage_queue_free
= VM_PAGE_NULL
;
1442 vm_page_grablo(void)
1444 register vm_page_t mem
;
1445 unsigned int vm_lopage_alloc_count
;
1447 if (vm_lopage_poolsize
== 0)
1448 return (vm_page_grab());
1450 mutex_lock(&vm_page_queue_free_lock
);
1452 if ((mem
= vm_lopage_queue_free
) != VM_PAGE_NULL
) {
1454 vm_lopage_queue_free
= (vm_page_t
) mem
->pageq
.next
;
1455 mem
->pageq
.next
= NULL
;
1456 mem
->pageq
.prev
= NULL
;
1458 mem
->no_isync
= TRUE
;
1460 vm_lopage_free_count
--;
1461 vm_lopage_alloc_count
= (vm_lopage_poolend
- vm_lopage_poolstart
) - vm_lopage_free_count
;
1462 if (vm_lopage_alloc_count
> vm_lopage_max_count
)
1463 vm_lopage_max_count
= vm_lopage_alloc_count
;
1465 mutex_unlock(&vm_page_queue_free_lock
);
1475 * Remove a page from the free list.
1476 * Returns VM_PAGE_NULL if the free list is too small.
1479 unsigned long vm_page_grab_count
= 0; /* measure demand */
1484 register vm_page_t mem
;
1486 mutex_lock(&vm_page_queue_free_lock
);
1487 vm_page_grab_count
++;
1490 * Optionally produce warnings if the wire or gobble
1491 * counts exceed some threshold.
1493 if (vm_page_wire_count_warning
> 0
1494 && vm_page_wire_count
>= vm_page_wire_count_warning
) {
1495 printf("mk: vm_page_grab(): high wired page count of %d\n",
1496 vm_page_wire_count
);
1497 assert(vm_page_wire_count
< vm_page_wire_count_warning
);
1499 if (vm_page_gobble_count_warning
> 0
1500 && vm_page_gobble_count
>= vm_page_gobble_count_warning
) {
1501 printf("mk: vm_page_grab(): high gobbled page count of %d\n",
1502 vm_page_gobble_count
);
1503 assert(vm_page_gobble_count
< vm_page_gobble_count_warning
);
1507 * Only let privileged threads (involved in pageout)
1508 * dip into the reserved pool.
1511 if ((vm_page_free_count
< vm_page_free_reserved
) &&
1512 !(current_thread()->options
& TH_OPT_VMPRIV
)) {
1513 mutex_unlock(&vm_page_queue_free_lock
);
1515 goto wakeup_pageout
;
1518 while (vm_page_queue_free
== VM_PAGE_NULL
) {
1519 mutex_unlock(&vm_page_queue_free_lock
);
1521 mutex_lock(&vm_page_queue_free_lock
);
1524 if (--vm_page_free_count
< vm_page_free_count_minimum
)
1525 vm_page_free_count_minimum
= vm_page_free_count
;
1526 mem
= vm_page_queue_free
;
1527 vm_page_queue_free
= (vm_page_t
) mem
->pageq
.next
;
1528 mem
->pageq
.next
= NULL
;
1529 mem
->pageq
.prev
= NULL
;
1530 assert(mem
->listq
.next
== NULL
&& mem
->listq
.prev
== NULL
);
1531 assert(mem
->tabled
== FALSE
);
1532 assert(mem
->object
== VM_OBJECT_NULL
);
1533 assert(!mem
->laundry
);
1535 mem
->no_isync
= TRUE
;
1536 mutex_unlock(&vm_page_queue_free_lock
);
1538 assert(pmap_verify_free(mem
->phys_page
));
1541 * Decide if we should poke the pageout daemon.
1542 * We do this if the free count is less than the low
1543 * water mark, or if the free count is less than the high
1544 * water mark (but above the low water mark) and the inactive
1545 * count is less than its target.
1547 * We don't have the counts locked ... if they change a little,
1548 * it doesn't really matter.
1552 if ((vm_page_free_count
< vm_page_free_min
) ||
1553 ((vm_page_free_count
< vm_page_free_target
) &&
1554 (vm_page_inactive_count
< vm_page_inactive_target
)))
1555 thread_wakeup((event_t
) &vm_page_free_wanted
);
1557 // dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */
1565 * Return a page to the free list.
1570 register vm_page_t mem
)
1574 unsigned int pindex
;
1575 phys_entry
*physent
;
1577 physent
= mapping_phys_lookup(mem
->phys_page
, &pindex
); /* (BRINGUP) */
1578 if(physent
->ppLink
& ppN
) { /* (BRINGUP) */
1579 panic("vm_page_release: already released - %08X %08X\n", mem
, mem
->phys_page
);
1581 physent
->ppLink
= physent
->ppLink
| ppN
; /* (BRINGUP) */
1583 assert(!mem
->private && !mem
->fictitious
);
1585 // dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */
1587 mutex_lock(&vm_page_queue_free_lock
);
1590 panic("vm_page_release");
1593 assert(!mem
->laundry
);
1594 assert(mem
->object
== VM_OBJECT_NULL
);
1595 assert(mem
->pageq
.next
== NULL
&&
1596 mem
->pageq
.prev
== NULL
);
1598 if (mem
->phys_page
<= vm_lopage_poolend
&& mem
->phys_page
>= vm_lopage_poolstart
) {
1600 * this exists to support hardware controllers
1601 * incapable of generating DMAs with more than 32 bits
1602 * of address on platforms with physical memory > 4G...
1604 mem
->pageq
.next
= (queue_entry_t
) vm_lopage_queue_free
;
1605 vm_lopage_queue_free
= mem
;
1606 vm_lopage_free_count
++;
1608 mem
->pageq
.next
= (queue_entry_t
) vm_page_queue_free
;
1609 vm_page_queue_free
= mem
;
1610 vm_page_free_count
++;
1612 * Check if we should wake up someone waiting for page.
1613 * But don't bother waking them unless they can allocate.
1615 * We wakeup only one thread, to prevent starvation.
1616 * Because the scheduling system handles wait queues FIFO,
1617 * if we wakeup all waiting threads, one greedy thread
1618 * can starve multiple niceguy threads. When the threads
1619 * all wakeup, the greedy threads runs first, grabs the page,
1620 * and waits for another page. It will be the first to run
1621 * when the next page is freed.
1623 * However, there is a slight danger here.
1624 * The thread we wake might not use the free page.
1625 * Then the other threads could wait indefinitely
1626 * while the page goes unused. To forestall this,
1627 * the pageout daemon will keep making free pages
1628 * as long as vm_page_free_wanted is non-zero.
1631 if ((vm_page_free_wanted
> 0) &&
1632 (vm_page_free_count
>= vm_page_free_reserved
)) {
1633 vm_page_free_wanted
--;
1634 thread_wakeup_one((event_t
) &vm_page_free_count
);
1637 mutex_unlock(&vm_page_queue_free_lock
);
1643 * Wait for a page to become available.
1644 * If there are plenty of free pages, then we don't sleep.
1647 * TRUE: There may be another page, try again
1648 * FALSE: We were interrupted out of our wait, don't try again
1656 * We can't use vm_page_free_reserved to make this
1657 * determination. Consider: some thread might
1658 * need to allocate two pages. The first allocation
1659 * succeeds, the second fails. After the first page is freed,
1660 * a call to vm_page_wait must really block.
1662 kern_return_t wait_result
;
1663 int need_wakeup
= 0;
1665 mutex_lock(&vm_page_queue_free_lock
);
1666 if (vm_page_free_count
< vm_page_free_target
) {
1667 if (vm_page_free_wanted
++ == 0)
1669 wait_result
= assert_wait((event_t
)&vm_page_free_count
, interruptible
);
1670 mutex_unlock(&vm_page_queue_free_lock
);
1671 counter(c_vm_page_wait_block
++);
1674 thread_wakeup((event_t
)&vm_page_free_wanted
);
1676 if (wait_result
== THREAD_WAITING
)
1677 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
1679 return(wait_result
== THREAD_AWAKENED
);
1681 mutex_unlock(&vm_page_queue_free_lock
);
1689 * Allocate and return a memory cell associated
1690 * with this VM object/offset pair.
1692 * Object must be locked.
1698 vm_object_offset_t offset
)
1700 register vm_page_t mem
;
1703 _mutex_assert(&object
->Lock
, MA_OWNED
);
1705 mem
= vm_page_grab();
1706 if (mem
== VM_PAGE_NULL
)
1707 return VM_PAGE_NULL
;
1709 vm_page_insert(mem
, object
, offset
);
1718 vm_object_offset_t offset
)
1720 register vm_page_t mem
;
1723 _mutex_assert(&object
->Lock
, MA_OWNED
);
1725 mem
= vm_page_grablo();
1726 if (mem
== VM_PAGE_NULL
)
1727 return VM_PAGE_NULL
;
1729 vm_page_insert(mem
, object
, offset
);
1735 counter(unsigned int c_laundry_pages_freed
= 0;)
1737 int vm_pagein_cluster_unused
= 0;
1738 boolean_t vm_page_free_verify
= TRUE
;
1742 * Returns the given page to the free list,
1743 * disassociating it with any VM object.
1745 * Object and page queues must be locked prior to entry.
1749 register vm_page_t mem
)
1751 vm_object_t object
= mem
->object
;
1754 assert(!mem
->cleaning
);
1755 assert(!mem
->pageout
);
1756 if (vm_page_free_verify
&& !mem
->fictitious
&& !mem
->private) {
1757 assert(pmap_verify_free(mem
->phys_page
));
1762 _mutex_assert(&mem
->object
->Lock
, MA_OWNED
);
1763 _mutex_assert(&vm_page_queue_lock
, MA_OWNED
);
1766 panic("vm_page_free: freeing page on free list\n");
1769 vm_page_remove(mem
); /* clears tabled, object, offset */
1770 VM_PAGE_QUEUES_REMOVE(mem
); /* clears active or inactive */
1772 if (mem
->clustered
) {
1773 mem
->clustered
= FALSE
;
1774 vm_pagein_cluster_unused
++;
1777 if (mem
->wire_count
) {
1778 if (!mem
->private && !mem
->fictitious
)
1779 vm_page_wire_count
--;
1780 mem
->wire_count
= 0;
1781 assert(!mem
->gobbled
);
1782 } else if (mem
->gobbled
) {
1783 if (!mem
->private && !mem
->fictitious
)
1784 vm_page_wire_count
--;
1785 vm_page_gobble_count
--;
1787 mem
->gobbled
= FALSE
;
1790 vm_pageout_throttle_up(mem
);
1791 counter(++c_laundry_pages_freed
);
1794 PAGE_WAKEUP(mem
); /* clears wanted */
1797 vm_object_absent_release(object
);
1799 /* Some of these may be unnecessary */
1801 mem
->unlock_request
= 0;
1803 mem
->absent
= FALSE
;
1806 mem
->precious
= FALSE
;
1807 mem
->reference
= FALSE
;
1808 mem
->encrypted
= FALSE
;
1810 mem
->page_error
= KERN_SUCCESS
;
1813 mem
->private = FALSE
;
1814 mem
->fictitious
= TRUE
;
1815 mem
->phys_page
= vm_page_fictitious_addr
;
1817 if (mem
->fictitious
) {
1818 vm_page_release_fictitious(mem
);
1820 /* depends on the queues lock */
1821 if(mem
->zero_fill
) {
1823 mem
->zero_fill
= FALSE
;
1825 vm_page_init(mem
, mem
->phys_page
);
1826 vm_page_release(mem
);
1833 register vm_page_t mem
)
1835 register vm_page_t nxt
;
1836 register vm_page_t first
= NULL
;
1837 register vm_page_t last
= VM_PAGE_NULL
;
1838 register int pg_count
= 0;
1841 _mutex_assert(&vm_page_queue_lock
, MA_OWNED
);
1845 if (mem
->tabled
|| mem
->object
)
1846 panic("vm_page_free_list: freeing tabled page\n");
1847 if (mem
->inactive
|| mem
->active
|| mem
->free
)
1848 panic("vm_page_free_list: freeing page on list\n");
1850 assert(mem
->pageq
.prev
== NULL
);
1851 nxt
= (vm_page_t
)(mem
->pageq
.next
);
1854 vm_pagein_cluster_unused
++;
1857 vm_pageout_throttle_up(mem
);
1858 counter(++c_laundry_pages_freed
);
1862 PAGE_WAKEUP(mem
); /* clears wanted */
1865 mem
->fictitious
= TRUE
;
1867 if (!mem
->fictitious
) {
1868 /* depends on the queues lock */
1871 assert(!mem
->laundry
);
1872 vm_page_init(mem
, mem
->phys_page
);
1878 mem
->pageq
.next
= (queue_t
) first
;
1883 mem
->phys_page
= vm_page_fictitious_addr
;
1884 vm_page_release_fictitious(mem
);
1890 mutex_lock(&vm_page_queue_free_lock
);
1892 last
->pageq
.next
= (queue_entry_t
) vm_page_queue_free
;
1893 vm_page_queue_free
= first
;
1895 vm_page_free_count
+= pg_count
;
1897 if ((vm_page_free_wanted
> 0) &&
1898 (vm_page_free_count
>= vm_page_free_reserved
)) {
1899 unsigned int available_pages
;
1901 if (vm_page_free_count
>= vm_page_free_reserved
) {
1902 available_pages
= (vm_page_free_count
1903 - vm_page_free_reserved
);
1905 available_pages
= 0;
1908 if (available_pages
>= vm_page_free_wanted
) {
1909 vm_page_free_wanted
= 0;
1910 thread_wakeup((event_t
) &vm_page_free_count
);
1912 while (available_pages
--) {
1913 vm_page_free_wanted
--;
1914 thread_wakeup_one((event_t
) &vm_page_free_count
);
1918 mutex_unlock(&vm_page_queue_free_lock
);
1926 * Mark this page as wired down by yet
1927 * another map, removing it from paging queues
1930 * The page's object and the page queues must be locked.
1934 register vm_page_t mem
)
1937 // dbgLog(current_thread(), mem->offset, mem->object, 1); /* (TEST/DEBUG) */
1942 _mutex_assert(&mem
->object
->Lock
, MA_OWNED
);
1943 _mutex_assert(&vm_page_queue_lock
, MA_OWNED
);
1945 if (mem
->wire_count
== 0) {
1946 VM_PAGE_QUEUES_REMOVE(mem
);
1947 if (!mem
->private && !mem
->fictitious
&& !mem
->gobbled
)
1948 vm_page_wire_count
++;
1950 vm_page_gobble_count
--;
1951 mem
->gobbled
= FALSE
;
1952 if(mem
->zero_fill
) {
1953 /* depends on the queues lock */
1955 mem
->zero_fill
= FALSE
;
1959 * The page could be encrypted, but
1960 * We don't have to decrypt it here
1961 * because we don't guarantee that the
1962 * data is actually valid at this point.
1963 * The page will get decrypted in
1964 * vm_fault_wire() if needed.
1967 assert(!mem
->gobbled
);
1974 * Mark this page as consumed by the vm/ipc/xmm subsystems.
1976 * Called only for freshly vm_page_grab()ed pages - w/ nothing locked.
1980 register vm_page_t mem
)
1982 vm_page_lock_queues();
1985 assert(!mem
->gobbled
);
1986 assert(mem
->wire_count
== 0);
1988 if (!mem
->gobbled
&& mem
->wire_count
== 0) {
1989 if (!mem
->private && !mem
->fictitious
)
1990 vm_page_wire_count
++;
1992 vm_page_gobble_count
++;
1993 mem
->gobbled
= TRUE
;
1994 vm_page_unlock_queues();
2000 * Release one wiring of this page, potentially
2001 * enabling it to be paged again.
2003 * The page's object and the page queues must be locked.
2007 register vm_page_t mem
)
2010 // dbgLog(current_thread(), mem->offset, mem->object, 0); /* (TEST/DEBUG) */
2013 assert(mem
->wire_count
> 0);
2016 _mutex_assert(&mem
->object
->Lock
, MA_OWNED
);
2017 _mutex_assert(&vm_page_queue_lock
, MA_OWNED
);
2019 if (--mem
->wire_count
== 0) {
2020 assert(!mem
->private && !mem
->fictitious
);
2021 vm_page_wire_count
--;
2022 assert(!mem
->laundry
);
2023 assert(mem
->object
!= kernel_object
);
2024 assert(mem
->pageq
.next
== NULL
&& mem
->pageq
.prev
== NULL
);
2025 queue_enter(&vm_page_queue_active
, mem
, vm_page_t
, pageq
);
2026 vm_page_active_count
++;
2028 mem
->reference
= TRUE
;
2033 * vm_page_deactivate:
2035 * Returns the given page to the inactive list,
2036 * indicating that no physical maps have access
2037 * to this page. [Used by the physical mapping system.]
2039 * The page queues must be locked.
2043 register vm_page_t m
)
2046 assert(m
->object
!= kernel_object
);
2048 // dbgLog(m->phys_page, vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */
2050 _mutex_assert(&vm_page_queue_lock
, MA_OWNED
);
2053 * This page is no longer very interesting. If it was
2054 * interesting (active or inactive/referenced), then we
2055 * clear the reference bit and (re)enter it in the
2056 * inactive queue. Note wired pages should not have
2057 * their reference bit cleared.
2059 if (m
->gobbled
) { /* can this happen? */
2060 assert(m
->wire_count
== 0);
2061 if (!m
->private && !m
->fictitious
)
2062 vm_page_wire_count
--;
2063 vm_page_gobble_count
--;
2066 if (m
->private || (m
->wire_count
!= 0))
2068 if (m
->active
|| (m
->inactive
&& m
->reference
)) {
2069 if (!m
->fictitious
&& !m
->absent
)
2070 pmap_clear_reference(m
->phys_page
);
2071 m
->reference
= FALSE
;
2072 VM_PAGE_QUEUES_REMOVE(m
);
2074 if (m
->wire_count
== 0 && !m
->inactive
) {
2075 m
->page_ticket
= vm_page_ticket
;
2076 vm_page_ticket_roll
++;
2078 if(vm_page_ticket_roll
== VM_PAGE_TICKETS_IN_ROLL
) {
2079 vm_page_ticket_roll
= 0;
2080 if(vm_page_ticket
== VM_PAGE_TICKET_ROLL_IDS
)
2086 assert(!m
->laundry
);
2087 assert(m
->pageq
.next
== NULL
&& m
->pageq
.prev
== NULL
);
2089 queue_enter(&vm_page_queue_zf
, m
, vm_page_t
, pageq
);
2091 queue_enter(&vm_page_queue_inactive
,
2092 m
, vm_page_t
, pageq
);
2097 vm_page_inactive_count
++;
2104 * Put the specified page on the active list (if appropriate).
2106 * The page queues must be locked.
2111 register vm_page_t m
)
2114 assert(m
->object
!= kernel_object
);
2116 _mutex_assert(&vm_page_queue_lock
, MA_OWNED
);
2119 assert(m
->wire_count
== 0);
2120 if (!m
->private && !m
->fictitious
)
2121 vm_page_wire_count
--;
2122 vm_page_gobble_count
--;
2129 assert(!m
->laundry
);
2131 queue_remove(&vm_page_queue_zf
, m
, vm_page_t
, pageq
);
2133 queue_remove(&vm_page_queue_inactive
,
2134 m
, vm_page_t
, pageq
);
2136 m
->pageq
.next
= NULL
;
2137 m
->pageq
.prev
= NULL
;
2139 vm_page_inactive_count
--;
2140 m
->inactive
= FALSE
;
2142 if (m
->wire_count
== 0) {
2145 panic("vm_page_activate: already active");
2147 assert(!m
->laundry
);
2148 assert(m
->pageq
.next
== NULL
&& m
->pageq
.prev
== NULL
);
2149 queue_enter(&vm_page_queue_active
, m
, vm_page_t
, pageq
);
2151 m
->reference
= TRUE
;
2153 vm_page_active_count
++;
2158 * vm_page_part_zero_fill:
2160 * Zero-fill a part of the page.
2163 vm_page_part_zero_fill(
2171 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
2172 pmap_zero_part_page(m
->phys_page
, m_pa
, len
);
2175 tmp
= vm_page_grab();
2176 if (tmp
== VM_PAGE_NULL
) {
2177 vm_page_wait(THREAD_UNINT
);
2182 vm_page_zero_fill(tmp
);
2184 vm_page_part_copy(m
, 0, tmp
, 0, m_pa
);
2186 if((m_pa
+ len
) < PAGE_SIZE
) {
2187 vm_page_part_copy(m
, m_pa
+ len
, tmp
,
2188 m_pa
+ len
, PAGE_SIZE
- (m_pa
+ len
));
2190 vm_page_copy(tmp
,m
);
2191 vm_page_lock_queues();
2193 vm_page_unlock_queues();
2199 * vm_page_zero_fill:
2201 * Zero-fill the specified page.
2208 "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n",
2209 (integer_t
)m
->object
, (integer_t
)m
->offset
, (integer_t
)m
, 0,0);
2213 // dbgTrace(0xAEAEAEAE, m->phys_page, 0); /* (BRINGUP) */
2214 pmap_zero_page(m
->phys_page
);
2218 * vm_page_part_copy:
2220 * copy part of one page to another
2231 VM_PAGE_CHECK(src_m
);
2232 VM_PAGE_CHECK(dst_m
);
2234 pmap_copy_part_page(src_m
->phys_page
, src_pa
,
2235 dst_m
->phys_page
, dst_pa
, len
);
2241 * Copy one page to another
2244 * The source page should not be encrypted. The caller should
2245 * make sure the page is decrypted first, if necessary.
2254 "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n",
2255 (integer_t
)src_m
->object
, src_m
->offset
,
2256 (integer_t
)dest_m
->object
, dest_m
->offset
,
2259 VM_PAGE_CHECK(src_m
);
2260 VM_PAGE_CHECK(dest_m
);
2264 * The source page should not be encrypted at this point.
2265 * The destination page will therefore not contain encrypted
2266 * data after the copy.
2268 if (src_m
->encrypted
) {
2269 panic("vm_page_copy: source page %p is encrypted\n", src_m
);
2271 dest_m
->encrypted
= FALSE
;
2273 pmap_copy_page(src_m
->phys_page
, dest_m
->phys_page
);
2277 * Currently, this is a primitive allocator that grabs
2278 * free pages from the system, sorts them by physical
2279 * address, then searches for a region large enough to
2280 * satisfy the user's request.
2282 * Additional levels of effort:
2283 * + steal clean active/inactive pages
2284 * + force pageouts of dirty pages
2285 * + maintain a map of available physical
2291 * Check that the list of pages is ordered by
2292 * ascending physical address and has no holes.
2294 int vm_page_verify_contiguous(
2296 unsigned int npages
);
2299 vm_page_verify_contiguous(
2301 unsigned int npages
)
2303 register vm_page_t m
;
2304 unsigned int page_count
;
2305 vm_offset_t prev_addr
;
2307 prev_addr
= pages
->phys_page
;
2309 for (m
= NEXT_PAGE(pages
); m
!= VM_PAGE_NULL
; m
= NEXT_PAGE(m
)) {
2310 if (m
->phys_page
!= prev_addr
+ 1) {
2311 printf("m 0x%x prev_addr 0x%x, current addr 0x%x\n",
2312 m
, prev_addr
, m
->phys_page
);
2313 printf("pages 0x%x page_count %d\n", pages
, page_count
);
2314 panic("vm_page_verify_contiguous: not contiguous!");
2316 prev_addr
= m
->phys_page
;
2319 if (page_count
!= npages
) {
2320 printf("pages 0x%x actual count 0x%x but requested 0x%x\n",
2321 pages
, page_count
, npages
);
2322 panic("vm_page_verify_contiguous: count error");
2326 #endif /* MACH_ASSERT */
2329 cpm_counter(unsigned int vpfls_pages_handled
= 0;)
2330 cpm_counter(unsigned int vpfls_head_insertions
= 0;)
2331 cpm_counter(unsigned int vpfls_tail_insertions
= 0;)
2332 cpm_counter(unsigned int vpfls_general_insertions
= 0;)
2333 cpm_counter(unsigned int vpfc_failed
= 0;)
2334 cpm_counter(unsigned int vpfc_satisfied
= 0;)
2337 * Find a region large enough to contain at least npages
2338 * of contiguous physical memory.
2341 * - Called while holding vm_page_queue_free_lock.
2342 * - Doesn't respect vm_page_free_reserved; caller
2343 * must not ask for more pages than are legal to grab.
2345 * Returns a pointer to a list of gobbled pages or VM_PAGE_NULL.
2348 * Loop over the free list, extracting one page at a time and
2349 * inserting those into a sorted sub-list. We stop as soon as
2350 * there's a contiguous range within the sorted list that can
2351 * satisfy the contiguous memory request. This contiguous sub-
2352 * list is chopped out of the sorted sub-list and the remainder
2353 * of the sorted sub-list is put back onto the beginning of the
2357 vm_page_find_contiguous(
2358 unsigned int contig_pages
)
2360 vm_page_t sort_list
;
2361 vm_page_t
*contfirstprev
, contlast
;
2363 ppnum_t prevcontaddr
;
2364 ppnum_t nextcontaddr
;
2365 unsigned int npages
;
2369 _mutex_assert(&vm_page_queue_free_lock
, MA_OWNED
);
2373 * Verify pages in the free list..
2376 for (m
= vm_page_queue_free
; m
!= VM_PAGE_NULL
; m
= NEXT_PAGE(m
))
2378 if (npages
!= vm_page_free_count
)
2379 panic("vm_sort_free_list: prelim: npages %u free_count %d",
2380 npages
, vm_page_free_count
);
2381 #endif /* MACH_ASSERT */
2383 if (contig_pages
== 0 || vm_page_queue_free
== VM_PAGE_NULL
)
2384 return VM_PAGE_NULL
;
2386 #define PPNUM_PREV(x) (((x) > 0) ? ((x) - 1) : 0)
2387 #define PPNUM_NEXT(x) (((x) < PPNUM_MAX) ? ((x) + 1) : PPNUM_MAX)
2388 #define SET_NEXT_PAGE(m,n) ((m)->pageq.next = (struct queue_entry *) (n))
2391 contfirstprev
= &sort_list
;
2392 contlast
= sort_list
= vm_page_queue_free
;
2393 vm_page_queue_free
= NEXT_PAGE(sort_list
);
2394 SET_NEXT_PAGE(sort_list
, VM_PAGE_NULL
);
2395 prevcontaddr
= PPNUM_PREV(sort_list
->phys_page
);
2396 nextcontaddr
= PPNUM_NEXT(sort_list
->phys_page
);
2398 while (npages
< contig_pages
&&
2399 (m
= vm_page_queue_free
) != VM_PAGE_NULL
)
2401 cpm_counter(++vpfls_pages_handled
);
2403 /* prepend to existing run? */
2404 if (m
->phys_page
== prevcontaddr
)
2406 vm_page_queue_free
= NEXT_PAGE(m
);
2407 cpm_counter(++vpfls_head_insertions
);
2408 prevcontaddr
= PPNUM_PREV(prevcontaddr
);
2409 SET_NEXT_PAGE(m
, *contfirstprev
);
2412 continue; /* no tail expansion check needed */
2415 /* append to tail of existing run? */
2416 else if (m
->phys_page
== nextcontaddr
)
2418 vm_page_queue_free
= NEXT_PAGE(m
);
2419 cpm_counter(++vpfls_tail_insertions
);
2420 nextcontaddr
= PPNUM_NEXT(nextcontaddr
);
2421 SET_NEXT_PAGE(m
, NEXT_PAGE(contlast
));
2422 SET_NEXT_PAGE(contlast
, m
);
2427 /* prepend to the very front of sorted list? */
2428 else if (m
->phys_page
< sort_list
->phys_page
)
2430 vm_page_queue_free
= NEXT_PAGE(m
);
2431 cpm_counter(++vpfls_general_insertions
);
2432 prevcontaddr
= PPNUM_PREV(m
->phys_page
);
2433 nextcontaddr
= PPNUM_NEXT(m
->phys_page
);
2434 SET_NEXT_PAGE(m
, sort_list
);
2435 contfirstprev
= &sort_list
;
2436 contlast
= sort_list
= m
;
2440 else /* get to proper place for insertion */
2442 if (m
->phys_page
< nextcontaddr
)
2444 prevcontaddr
= PPNUM_PREV(sort_list
->phys_page
);
2445 nextcontaddr
= PPNUM_NEXT(sort_list
->phys_page
);
2446 contfirstprev
= &sort_list
;
2447 contlast
= sort_list
;
2450 for (m1
= NEXT_PAGE(contlast
);
2451 npages
< contig_pages
&&
2452 m1
!= VM_PAGE_NULL
&& m1
->phys_page
< m
->phys_page
;
2455 if (m1
->phys_page
!= nextcontaddr
) {
2456 prevcontaddr
= PPNUM_PREV(m1
->phys_page
);
2457 contfirstprev
= NEXT_PAGE_PTR(contlast
);
2462 nextcontaddr
= PPNUM_NEXT(m1
->phys_page
);
2467 * We may actually already have enough.
2468 * This could happen if a previous prepend
2469 * joined up two runs to meet our needs.
2470 * If so, bail before we take the current
2471 * page off the free queue.
2473 if (npages
== contig_pages
)
2476 if (m
->phys_page
!= nextcontaddr
)
2478 contfirstprev
= NEXT_PAGE_PTR(contlast
);
2479 prevcontaddr
= PPNUM_PREV(m
->phys_page
);
2480 nextcontaddr
= PPNUM_NEXT(m
->phys_page
);
2483 nextcontaddr
= PPNUM_NEXT(nextcontaddr
);
2486 vm_page_queue_free
= NEXT_PAGE(m
);
2487 cpm_counter(++vpfls_general_insertions
);
2488 SET_NEXT_PAGE(m
, NEXT_PAGE(contlast
));
2489 SET_NEXT_PAGE(contlast
, m
);
2493 /* See how many pages are now contiguous after the insertion */
2494 for (m1
= NEXT_PAGE(m
);
2495 npages
< contig_pages
&&
2496 m1
!= VM_PAGE_NULL
&& m1
->phys_page
== nextcontaddr
;
2499 nextcontaddr
= PPNUM_NEXT(nextcontaddr
);
2505 /* how did we do? */
2506 if (npages
== contig_pages
)
2508 cpm_counter(++vpfc_satisfied
);
2510 /* remove the contiguous range from the sorted list */
2512 *contfirstprev
= NEXT_PAGE(contlast
);
2513 SET_NEXT_PAGE(contlast
, VM_PAGE_NULL
);
2514 assert(vm_page_verify_contiguous(m
, npages
));
2516 /* inline vm_page_gobble() for each returned page */
2517 for (m1
= m
; m1
!= VM_PAGE_NULL
; m1
= NEXT_PAGE(m1
)) {
2519 assert(!m1
->wanted
);
2520 assert(!m1
->laundry
);
2522 m1
->no_isync
= TRUE
;
2525 vm_page_wire_count
+= npages
;
2526 vm_page_gobble_count
+= npages
;
2527 vm_page_free_count
-= npages
;
2529 /* stick free list at the tail of the sorted list */
2530 while ((m1
= *contfirstprev
) != VM_PAGE_NULL
)
2531 contfirstprev
= (vm_page_t
*)&m1
->pageq
.next
;
2532 *contfirstprev
= vm_page_queue_free
;
2535 vm_page_queue_free
= sort_list
;
2540 * Allocate a list of contiguous, wired pages.
2548 register vm_page_t m
;
2550 unsigned int npages
;
2551 unsigned int vm_pages_available
;
2554 if (size
% page_size
!= 0)
2555 return KERN_INVALID_ARGUMENT
;
2557 vm_page_lock_queues();
2558 mutex_lock(&vm_page_queue_free_lock
);
2561 * Should also take active and inactive pages
2562 * into account... One day...
2564 npages
= size
/ page_size
;
2565 vm_pages_available
= vm_page_free_count
- vm_page_free_reserved
;
2567 if (npages
> vm_pages_available
) {
2568 mutex_unlock(&vm_page_queue_free_lock
);
2569 vm_page_unlock_queues();
2570 return KERN_RESOURCE_SHORTAGE
;
2574 * Obtain a pointer to a subset of the free
2575 * list large enough to satisfy the request;
2576 * the region will be physically contiguous.
2578 pages
= vm_page_find_contiguous(npages
);
2580 /* adjust global freelist counts and determine need for wakeups */
2581 if (vm_page_free_count
< vm_page_free_count_minimum
)
2582 vm_page_free_count_minimum
= vm_page_free_count
;
2584 wakeup
= ((vm_page_free_count
< vm_page_free_min
) ||
2585 ((vm_page_free_count
< vm_page_free_target
) &&
2586 (vm_page_inactive_count
< vm_page_inactive_target
)));
2588 mutex_unlock(&vm_page_queue_free_lock
);
2590 if (pages
== VM_PAGE_NULL
) {
2591 vm_page_unlock_queues();
2592 return KERN_NO_SPACE
;
2596 * Walk the returned list, wiring the pages.
2599 for (m
= pages
; m
!= VM_PAGE_NULL
; m
= NEXT_PAGE(m
)) {
2601 * Essentially inlined vm_page_wire.
2604 assert(!m
->inactive
);
2605 assert(!m
->private);
2606 assert(!m
->fictitious
);
2607 assert(m
->wire_count
== 0);
2611 --vm_page_gobble_count
;
2613 vm_page_unlock_queues();
2616 thread_wakeup((event_t
) &vm_page_free_wanted
);
2619 * The CPM pages should now be available and
2620 * ordered by ascending physical address.
2622 assert(vm_page_verify_contiguous(pages
, npages
));
2625 return KERN_SUCCESS
;
2629 #include <mach_vm_debug.h>
2632 #include <mach_debug/hash_info.h>
2633 #include <vm/vm_debug.h>
2636 * Routine: vm_page_info
2638 * Return information about the global VP table.
2639 * Fills the buffer with as much information as possible
2640 * and returns the desired size of the buffer.
2642 * Nothing locked. The caller should provide
2643 * possibly-pageable memory.
2648 hash_info_bucket_t
*info
,
2653 if (vm_page_bucket_count
< count
)
2654 count
= vm_page_bucket_count
;
2656 for (i
= 0; i
< count
; i
++) {
2657 vm_page_bucket_t
*bucket
= &vm_page_buckets
[i
];
2658 unsigned int bucket_count
= 0;
2661 simple_lock(&vm_page_bucket_lock
);
2662 for (m
= bucket
->pages
; m
!= VM_PAGE_NULL
; m
= m
->next
)
2664 simple_unlock(&vm_page_bucket_lock
);
2666 /* don't touch pageable memory while holding locks */
2667 info
[i
].hib_count
= bucket_count
;
2670 return vm_page_bucket_count
;
2672 #endif /* MACH_VM_DEBUG */
2674 #include <mach_kdb.h>
2677 #include <ddb/db_output.h>
2678 #include <vm/vm_print.h>
2679 #define printf kdbprintf
2682 * Routine: vm_page_print [exported]
2690 p
= (vm_page_t
) (long) db_addr
;
2692 iprintf("page 0x%x\n", p
);
2696 iprintf("object=0x%x", p
->object
);
2697 printf(", offset=0x%x", p
->offset
);
2698 printf(", wire_count=%d", p
->wire_count
);
2700 iprintf("%sinactive, %sactive, %sgobbled, %slaundry, %sfree, %sref, %sencrypted\n",
2701 (p
->inactive
? "" : "!"),
2702 (p
->active
? "" : "!"),
2703 (p
->gobbled
? "" : "!"),
2704 (p
->laundry
? "" : "!"),
2705 (p
->free
? "" : "!"),
2706 (p
->reference
? "" : "!"),
2707 (p
->encrypted
? "" : "!"));
2708 iprintf("%sbusy, %swanted, %stabled, %sfictitious, %sprivate, %sprecious\n",
2709 (p
->busy
? "" : "!"),
2710 (p
->wanted
? "" : "!"),
2711 (p
->tabled
? "" : "!"),
2712 (p
->fictitious
? "" : "!"),
2713 (p
->private ? "" : "!"),
2714 (p
->precious
? "" : "!"));
2715 iprintf("%sabsent, %serror, %sdirty, %scleaning, %spageout, %sclustered\n",
2716 (p
->absent
? "" : "!"),
2717 (p
->error
? "" : "!"),
2718 (p
->dirty
? "" : "!"),
2719 (p
->cleaning
? "" : "!"),
2720 (p
->pageout
? "" : "!"),
2721 (p
->clustered
? "" : "!"));
2722 iprintf("%slock_supplied, %soverwriting, %srestart, %sunusual\n",
2723 (p
->lock_supplied
? "" : "!"),
2724 (p
->overwriting
? "" : "!"),
2725 (p
->restart
? "" : "!"),
2726 (p
->unusual
? "" : "!"));
2728 iprintf("phys_page=0x%x", p
->phys_page
);
2729 printf(", page_error=0x%x", p
->page_error
);
2730 printf(", page_lock=0x%x", p
->page_lock
);
2731 printf(", unlock_request=%d\n", p
->unlock_request
);
2735 #endif /* MACH_KDB */