2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
56 * Resident memory management module.
59 #include <mach/clock_types.h>
60 #include <mach/vm_prot.h>
61 #include <mach/vm_statistics.h>
62 #include <kern/counters.h>
63 #include <kern/sched_prim.h>
64 #include <kern/task.h>
65 #include <kern/thread.h>
66 #include <kern/zalloc.h>
69 #include <vm/vm_init.h>
70 #include <vm/vm_map.h>
71 #include <vm/vm_page.h>
72 #include <vm/vm_pageout.h>
73 #include <vm/vm_kern.h> /* kernel_memory_allocate() */
74 #include <kern/misc_protos.h>
75 #include <zone_debug.h>
77 #include <ppc/mappings.h> /* (BRINGUP) */
78 #include <pexpert/pexpert.h> /* (BRINGUP) */
81 /* Variables used to indicate the relative age of pages in the
85 int vm_page_ticket_roll
= 0;
86 int vm_page_ticket
= 0;
88 * Associated with page of user-allocatable memory is a
93 * These variables record the values returned by vm_page_bootstrap,
94 * for debugging purposes. The implementation of pmap_steal_memory
95 * and pmap_startup here also uses them internally.
98 vm_offset_t virtual_space_start
;
99 vm_offset_t virtual_space_end
;
103 * The vm_page_lookup() routine, which provides for fast
104 * (virtual memory object, offset) to page lookup, employs
105 * the following hash table. The vm_page_{insert,remove}
106 * routines install and remove associations in the table.
107 * [This table is often called the virtual-to-physical,
112 #if MACH_PAGE_HASH_STATS
113 int cur_count
; /* current count */
114 int hi_count
; /* high water mark */
115 #endif /* MACH_PAGE_HASH_STATS */
118 vm_page_bucket_t
*vm_page_buckets
; /* Array of buckets */
119 unsigned int vm_page_bucket_count
= 0; /* How big is array? */
120 unsigned int vm_page_hash_mask
; /* Mask for hash function */
121 unsigned int vm_page_hash_shift
; /* Shift for hash function */
122 uint32_t vm_page_bucket_hash
; /* Basic bucket hash */
123 decl_simple_lock_data(,vm_page_bucket_lock
)
125 #if MACH_PAGE_HASH_STATS
126 /* This routine is only for debug. It is intended to be called by
127 * hand by a developer using a kernel debugger. This routine prints
128 * out vm_page_hash table statistics to the kernel debug console.
138 for (i
= 0; i
< vm_page_bucket_count
; i
++) {
139 if (vm_page_buckets
[i
].hi_count
) {
141 highsum
+= vm_page_buckets
[i
].hi_count
;
142 if (vm_page_buckets
[i
].hi_count
> maxdepth
)
143 maxdepth
= vm_page_buckets
[i
].hi_count
;
146 printf("Total number of buckets: %d\n", vm_page_bucket_count
);
147 printf("Number used buckets: %d = %d%%\n",
148 numbuckets
, 100*numbuckets
/vm_page_bucket_count
);
149 printf("Number unused buckets: %d = %d%%\n",
150 vm_page_bucket_count
- numbuckets
,
151 100*(vm_page_bucket_count
-numbuckets
)/vm_page_bucket_count
);
152 printf("Sum of bucket max depth: %d\n", highsum
);
153 printf("Average bucket depth: %d.%2d\n",
154 highsum
/vm_page_bucket_count
,
155 highsum%vm_page_bucket_count
);
156 printf("Maximum bucket depth: %d\n", maxdepth
);
158 #endif /* MACH_PAGE_HASH_STATS */
161 * The virtual page size is currently implemented as a runtime
162 * variable, but is constant once initialized using vm_set_page_size.
163 * This initialization must be done in the machine-dependent
164 * bootstrap sequence, before calling other machine-independent
167 * All references to the virtual page size outside this
168 * module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
171 #ifndef PAGE_SIZE_FIXED
172 vm_size_t page_size
= 4096;
173 vm_size_t page_mask
= 4095;
176 vm_size_t page_size
= PAGE_SIZE
;
177 vm_size_t page_mask
= PAGE_MASK
;
178 int page_shift
= PAGE_SHIFT
;
179 #endif /* PAGE_SIZE_FIXED */
182 * Resident page structures are initialized from
183 * a template (see vm_page_alloc).
185 * When adding a new field to the virtual memory
186 * object structure, be sure to add initialization
187 * (see vm_page_bootstrap).
189 struct vm_page vm_page_template
;
192 * Resident pages that represent real memory
193 * are allocated from a free list.
195 vm_page_t vm_page_queue_free
;
196 vm_page_t vm_page_queue_fictitious
;
197 decl_mutex_data(,vm_page_queue_free_lock
)
198 unsigned int vm_page_free_wanted
;
199 int vm_page_free_count
;
200 int vm_page_fictitious_count
;
202 unsigned int vm_page_free_count_minimum
; /* debugging */
205 * Occasionally, the virtual memory system uses
206 * resident page structures that do not refer to
207 * real pages, for example to leave a page with
208 * important state information in the VP table.
210 * These page structures are allocated the way
211 * most other kernel structures are.
214 decl_mutex_data(,vm_page_alloc_lock
)
215 unsigned int io_throttle_zero_fill
;
216 decl_mutex_data(,vm_page_zero_fill_lock
)
219 * Fictitious pages don't have a physical address,
220 * but we must initialize phys_page to something.
221 * For debugging, this should be a strange value
222 * that the pmap module can recognize in assertions.
224 vm_offset_t vm_page_fictitious_addr
= (vm_offset_t
) -1;
227 * Resident page structures are also chained on
228 * queues that are used by the page replacement
229 * system (pageout daemon). These queues are
230 * defined here, but are shared by the pageout
231 * module. The inactive queue is broken into
232 * inactive and zf for convenience as the
233 * pageout daemon often assignes a higher
234 * affinity to zf pages
236 queue_head_t vm_page_queue_active
;
237 queue_head_t vm_page_queue_inactive
;
238 queue_head_t vm_page_queue_zf
;
239 decl_mutex_data(,vm_page_queue_lock
)
240 int vm_page_active_count
;
241 int vm_page_inactive_count
;
242 int vm_page_wire_count
;
243 int vm_page_gobble_count
= 0;
244 int vm_page_wire_count_warning
= 0;
245 int vm_page_gobble_count_warning
= 0;
247 /* the following fields are protected by the vm_page_queue_lock */
248 queue_head_t vm_page_queue_limbo
;
249 int vm_page_limbo_count
= 0; /* total pages in limbo */
250 int vm_page_limbo_real_count
= 0; /* real pages in limbo */
251 int vm_page_pin_count
= 0; /* number of pinned pages */
253 decl_simple_lock_data(,vm_page_preppin_lock
)
256 * Several page replacement parameters are also
257 * shared with this module, so that page allocation
258 * (done here in vm_page_alloc) can trigger the
261 int vm_page_free_target
= 0;
262 int vm_page_free_min
= 0;
263 int vm_page_inactive_target
= 0;
264 int vm_page_free_reserved
= 0;
265 int vm_page_laundry_count
= 0;
266 int vm_page_burst_count
= 0;
267 int vm_page_throttled_count
= 0;
270 * The VM system has a couple of heuristics for deciding
271 * that pages are "uninteresting" and should be placed
272 * on the inactive queue as likely candidates for replacement.
273 * These variables let the heuristics be controlled at run-time
274 * to make experimentation easier.
277 boolean_t vm_page_deactivate_hint
= TRUE
;
282 * Sets the page size, perhaps based upon the memory
283 * size. Must be called before any use of page-size
284 * dependent functions.
286 * Sets page_shift and page_mask from page_size.
289 vm_set_page_size(void)
291 #ifndef PAGE_SIZE_FIXED
292 page_mask
= page_size
- 1;
294 if ((page_mask
& page_size
) != 0)
295 panic("vm_set_page_size: page size not a power of two");
297 for (page_shift
= 0; ; page_shift
++)
298 if ((1 << page_shift
) == page_size
)
300 #endif /* PAGE_SIZE_FIXED */
306 * Initializes the resident memory module.
308 * Allocates memory for the page cells, and
309 * for the object/offset-to-page hash table headers.
310 * Each page cell is initialized and placed on the free list.
311 * Returns the range of available kernel virtual memory.
319 register vm_page_t m
;
326 * Initialize the vm_page template.
329 m
= &vm_page_template
;
330 m
->object
= VM_OBJECT_NULL
; /* reset later */
331 m
->offset
= 0; /* reset later */
339 m
->reference
= FALSE
;
341 m
->dump_cleaning
= FALSE
;
342 m
->list_req_pending
= FALSE
;
347 m
->fictitious
= FALSE
;
354 m
->clustered
= FALSE
;
355 m
->lock_supplied
= FALSE
;
358 m
->zero_fill
= FALSE
;
360 m
->phys_page
= 0; /* reset later */
362 m
->page_lock
= VM_PROT_NONE
;
363 m
->unlock_request
= VM_PROT_NONE
;
364 m
->page_error
= KERN_SUCCESS
;
367 * Initialize the page queues.
370 mutex_init(&vm_page_queue_free_lock
, ETAP_VM_PAGEQ_FREE
);
371 mutex_init(&vm_page_queue_lock
, ETAP_VM_PAGEQ
);
372 simple_lock_init(&vm_page_preppin_lock
, ETAP_VM_PREPPIN
);
374 vm_page_queue_free
= VM_PAGE_NULL
;
375 vm_page_queue_fictitious
= VM_PAGE_NULL
;
376 queue_init(&vm_page_queue_active
);
377 queue_init(&vm_page_queue_inactive
);
378 queue_init(&vm_page_queue_zf
);
379 queue_init(&vm_page_queue_limbo
);
381 vm_page_free_wanted
= 0;
384 * Steal memory for the map and zone subsystems.
387 vm_map_steal_memory();
391 * Allocate (and initialize) the virtual-to-physical
392 * table hash buckets.
394 * The number of buckets should be a power of two to
395 * get a good hash function. The following computation
396 * chooses the first power of two that is greater
397 * than the number of physical pages in the system.
400 simple_lock_init(&vm_page_bucket_lock
, ETAP_VM_BUCKET
);
402 if (vm_page_bucket_count
== 0) {
403 unsigned int npages
= pmap_free_pages();
405 vm_page_bucket_count
= 1;
406 while (vm_page_bucket_count
< npages
)
407 vm_page_bucket_count
<<= 1;
410 vm_page_hash_mask
= vm_page_bucket_count
- 1;
413 * Calculate object shift value for hashing algorithm:
414 * O = log2(sizeof(struct vm_object))
415 * B = log2(vm_page_bucket_count)
416 * hash shifts the object left by
419 size
= vm_page_bucket_count
;
420 for (log1
= 0; size
> 1; log1
++)
422 size
= sizeof(struct vm_object
);
423 for (log2
= 0; size
> 1; log2
++)
425 vm_page_hash_shift
= log1
/2 - log2
+ 1;
427 vm_page_bucket_hash
= 1 << ((log1
+ 1) >> 1); /* Get (ceiling of sqrt of table size) */
428 vm_page_bucket_hash
|= 1 << ((log1
+ 1) >> 2); /* Get (ceiling of quadroot of table size) */
429 vm_page_bucket_hash
|= 1; /* Set bit and add 1 - always must be 1 to insure unique series */
431 if (vm_page_hash_mask
& vm_page_bucket_count
)
432 printf("vm_page_bootstrap: WARNING -- strange page hash\n");
434 vm_page_buckets
= (vm_page_bucket_t
*)
435 pmap_steal_memory(vm_page_bucket_count
*
436 sizeof(vm_page_bucket_t
));
438 for (i
= 0; i
< vm_page_bucket_count
; i
++) {
439 register vm_page_bucket_t
*bucket
= &vm_page_buckets
[i
];
441 bucket
->pages
= VM_PAGE_NULL
;
442 #if MACH_PAGE_HASH_STATS
443 bucket
->cur_count
= 0;
444 bucket
->hi_count
= 0;
445 #endif /* MACH_PAGE_HASH_STATS */
449 * Machine-dependent code allocates the resident page table.
450 * It uses vm_page_init to initialize the page frames.
451 * The code also returns to us the virtual space available
452 * to the kernel. We don't trust the pmap module
453 * to get the alignment right.
456 pmap_startup(&virtual_space_start
, &virtual_space_end
);
457 virtual_space_start
= round_page_32(virtual_space_start
);
458 virtual_space_end
= trunc_page_32(virtual_space_end
);
460 *startp
= virtual_space_start
;
461 *endp
= virtual_space_end
;
464 * Compute the initial "wire" count.
465 * Up until now, the pages which have been set aside are not under
466 * the VM system's control, so although they aren't explicitly
467 * wired, they nonetheless can't be moved. At this moment,
468 * all VM managed pages are "free", courtesy of pmap_startup.
470 vm_page_wire_count
= atop_64(max_mem
) - vm_page_free_count
; /* initial value */
472 printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count
);
473 vm_page_free_count_minimum
= vm_page_free_count
;
476 #ifndef MACHINE_PAGES
478 * We implement pmap_steal_memory and pmap_startup with the help
479 * of two simpler functions, pmap_virtual_space and pmap_next_page.
486 vm_offset_t addr
, vaddr
;
490 * We round the size to a round multiple.
493 size
= (size
+ sizeof (void *) - 1) &~ (sizeof (void *) - 1);
496 * If this is the first call to pmap_steal_memory,
497 * we have to initialize ourself.
500 if (virtual_space_start
== virtual_space_end
) {
501 pmap_virtual_space(&virtual_space_start
, &virtual_space_end
);
504 * The initial values must be aligned properly, and
505 * we don't trust the pmap module to do it right.
508 virtual_space_start
= round_page_32(virtual_space_start
);
509 virtual_space_end
= trunc_page_32(virtual_space_end
);
513 * Allocate virtual memory for this request.
516 addr
= virtual_space_start
;
517 virtual_space_start
+= size
;
519 kprintf("pmap_steal_memory: %08X - %08X; size=%08X\n", addr
, virtual_space_start
, size
); /* (TEST/DEBUG) */
522 * Allocate and map physical pages to back new virtual pages.
525 for (vaddr
= round_page_32(addr
);
527 vaddr
+= PAGE_SIZE
) {
528 if (!pmap_next_page(&phys_page
))
529 panic("pmap_steal_memory");
532 * XXX Logically, these mappings should be wired,
533 * but some pmap modules barf if they are.
536 pmap_enter(kernel_pmap
, vaddr
, phys_page
,
537 VM_PROT_READ
|VM_PROT_WRITE
,
538 VM_WIMG_USE_DEFAULT
, FALSE
);
540 * Account for newly stolen memory
542 vm_page_wire_count
++;
554 unsigned int i
, npages
, pages_initialized
, fill
, fillval
;
560 * We calculate how many page frames we will have
561 * and then allocate the page structures in one chunk.
564 tmpaddr
= (addr64_t
)pmap_free_pages() * (addr64_t
)PAGE_SIZE
; /* Get the amount of memory left */
565 tmpaddr
= tmpaddr
+ (addr64_t
)(round_page_32(virtual_space_start
) - virtual_space_start
); /* Account for any slop */
566 npages
= (unsigned int)(tmpaddr
/ (addr64_t
)(PAGE_SIZE
+ sizeof(*pages
))); /* Figure size of all vm_page_ts, including enough to hold the vm_page_ts */
568 pages
= (vm_page_t
) pmap_steal_memory(npages
* sizeof *pages
);
571 * Initialize the page frames.
574 for (i
= 0, pages_initialized
= 0; i
< npages
; i
++) {
575 if (!pmap_next_page(&phys_page
))
578 vm_page_init(&pages
[i
], phys_page
);
584 * Release pages in reverse order so that physical pages
585 * initially get allocated in ascending addresses. This keeps
586 * the devices (which must address physical memory) happy if
587 * they require several consecutive pages.
591 * Check if we want to initialize pages to a known value
594 fill
= 0; /* Assume no fill */
595 if (PE_parse_boot_arg("fill", &fillval
)) fill
= 1; /* Set fill */
597 for (i
= pages_initialized
; i
> 0; i
--) {
598 extern void fillPage(ppnum_t phys_page
, unsigned int fillval
);
599 if(fill
) fillPage(pages
[i
- 1].phys_page
, fillval
); /* Fill the page with a know value if requested at boot */
600 vm_page_release(&pages
[i
- 1]);
605 vm_page_t xx
, xxo
, xxl
;
608 j
= 0; /* (BRINGUP) */
611 for(xx
= vm_page_queue_free
; xx
; xxl
= xx
, xx
= xx
->pageq
.next
) { /* (BRINGUP) */
613 if(j
> vm_page_free_count
) { /* (BRINGUP) */
614 panic("pmap_startup: too many pages, xx = %08X, xxl = %08X\n", xx
, xxl
);
617 l
= vm_page_free_count
- j
; /* (BRINGUP) */
618 k
= 0; /* (BRINGUP) */
620 if(((j
- 1) & 0xFFFF) == 0) kprintf("checking number %d of %d\n", j
, vm_page_free_count
);
622 for(xxo
= xx
->pageq
.next
; xxo
; xxo
= xxo
->pageq
.next
) { /* (BRINGUP) */
624 if(k
> l
) panic("pmap_startup: too many in secondary check %d %d\n", k
, l
);
625 if((xx
->phys_page
& 0xFFFFFFFF) == (xxo
->phys_page
& 0xFFFFFFFF)) { /* (BRINGUP) */
626 panic("pmap_startup: duplicate physaddr, xx = %08X, xxo = %08X\n", xx
, xxo
);
631 if(j
!= vm_page_free_count
) { /* (BRINGUP) */
632 panic("pmap_startup: vm_page_free_count does not match, calc = %d, vm_page_free_count = %08X\n", j
, vm_page_free_count
);
639 * We have to re-align virtual_space_start,
640 * because pmap_steal_memory has been using it.
643 virtual_space_start
= round_page_32(virtual_space_start
);
645 *startp
= virtual_space_start
;
646 *endp
= virtual_space_end
;
648 #endif /* MACHINE_PAGES */
651 * Routine: vm_page_module_init
653 * Second initialization pass, to be done after
654 * the basic VM system is ready.
657 vm_page_module_init(void)
659 vm_page_zone
= zinit((vm_size_t
) sizeof(struct vm_page
),
660 0, PAGE_SIZE
, "vm pages");
663 zone_debug_disable(vm_page_zone
);
664 #endif /* ZONE_DEBUG */
666 zone_change(vm_page_zone
, Z_EXPAND
, FALSE
);
667 zone_change(vm_page_zone
, Z_EXHAUST
, TRUE
);
668 zone_change(vm_page_zone
, Z_FOREIGN
, TRUE
);
671 * Adjust zone statistics to account for the real pages allocated
672 * in vm_page_create(). [Q: is this really what we want?]
674 vm_page_zone
->count
+= vm_page_pages
;
675 vm_page_zone
->cur_size
+= vm_page_pages
* vm_page_zone
->elem_size
;
677 mutex_init(&vm_page_alloc_lock
, ETAP_VM_PAGE_ALLOC
);
678 mutex_init(&vm_page_zero_fill_lock
, ETAP_VM_PAGE_ALLOC
);
682 * Routine: vm_page_create
684 * After the VM system is up, machine-dependent code
685 * may stumble across more physical memory. For example,
686 * memory that it was reserving for a frame buffer.
687 * vm_page_create turns this memory into available pages.
698 for (phys_page
= start
;
701 while ((m
= (vm_page_t
) vm_page_grab_fictitious())
703 vm_page_more_fictitious();
705 vm_page_init(m
, phys_page
);
714 * Distributes the object/offset key pair among hash buckets.
716 * NOTE: The bucket count must be a power of 2
718 #define vm_page_hash(object, offset) (\
719 ( (natural_t)((uint32_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
723 * vm_page_insert: [ internal use only ]
725 * Inserts the given mem entry into the object/object-page
726 * table and object list.
728 * The object must be locked.
733 register vm_page_t mem
,
734 register vm_object_t object
,
735 register vm_object_offset_t offset
)
737 register vm_page_bucket_t
*bucket
;
740 "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n",
741 (integer_t
)object
, (integer_t
)offset
, (integer_t
)mem
, 0,0);
746 panic("vm_page_insert");
748 assert(!object
->internal
|| offset
< object
->size
);
750 /* only insert "pageout" pages into "pageout" objects,
751 * and normal pages into normal objects */
752 assert(object
->pageout
== mem
->pageout
);
755 * Record the object/offset pair in this page
758 mem
->object
= object
;
759 mem
->offset
= offset
;
762 * Insert it into the object_object/offset hash table
765 bucket
= &vm_page_buckets
[vm_page_hash(object
, offset
)];
766 simple_lock(&vm_page_bucket_lock
);
767 mem
->next
= bucket
->pages
;
769 #if MACH_PAGE_HASH_STATS
770 if (++bucket
->cur_count
> bucket
->hi_count
)
771 bucket
->hi_count
= bucket
->cur_count
;
772 #endif /* MACH_PAGE_HASH_STATS */
773 simple_unlock(&vm_page_bucket_lock
);
776 * Now link into the object's list of backed pages.
779 queue_enter(&object
->memq
, mem
, vm_page_t
, listq
);
783 * Show that the object has one more resident page.
786 object
->resident_page_count
++;
792 * Exactly like vm_page_insert, except that we first
793 * remove any existing page at the given offset in object.
795 * The object and page queues must be locked.
800 register vm_page_t mem
,
801 register vm_object_t object
,
802 register vm_object_offset_t offset
)
804 register vm_page_bucket_t
*bucket
;
809 panic("vm_page_replace");
812 * Record the object/offset pair in this page
815 mem
->object
= object
;
816 mem
->offset
= offset
;
819 * Insert it into the object_object/offset hash table,
820 * replacing any page that might have been there.
823 bucket
= &vm_page_buckets
[vm_page_hash(object
, offset
)];
824 simple_lock(&vm_page_bucket_lock
);
826 vm_page_t
*mp
= &bucket
->pages
;
827 register vm_page_t m
= *mp
;
829 if (m
->object
== object
&& m
->offset
== offset
) {
831 * Remove page from bucket and from object,
832 * and return it to the free list.
835 queue_remove(&object
->memq
, m
, vm_page_t
,
838 object
->resident_page_count
--;
841 * Return page to the free list.
842 * Note the page is not tabled now, so this
843 * won't self-deadlock on the bucket lock.
851 mem
->next
= bucket
->pages
;
853 mem
->next
= VM_PAGE_NULL
;
856 simple_unlock(&vm_page_bucket_lock
);
859 * Now link into the object's list of backed pages.
862 queue_enter(&object
->memq
, mem
, vm_page_t
, listq
);
866 * And show that the object has one more resident
870 object
->resident_page_count
++;
874 * vm_page_remove: [ internal use only ]
876 * Removes the given mem entry from the object/offset-page
877 * table and the object page list.
879 * The object and page must be locked.
884 register vm_page_t mem
)
886 register vm_page_bucket_t
*bucket
;
887 register vm_page_t
this;
890 "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n",
891 (integer_t
)mem
->object
, (integer_t
)mem
->offset
,
892 (integer_t
)mem
, 0,0);
895 assert(!mem
->cleaning
);
899 * Remove from the object_object/offset hash table
902 bucket
= &vm_page_buckets
[vm_page_hash(mem
->object
, mem
->offset
)];
903 simple_lock(&vm_page_bucket_lock
);
904 if ((this = bucket
->pages
) == mem
) {
905 /* optimize for common case */
907 bucket
->pages
= mem
->next
;
909 register vm_page_t
*prev
;
911 for (prev
= &this->next
;
912 (this = *prev
) != mem
;
917 #if MACH_PAGE_HASH_STATS
919 #endif /* MACH_PAGE_HASH_STATS */
920 simple_unlock(&vm_page_bucket_lock
);
923 * Now remove from the object's list of backed pages.
926 queue_remove(&mem
->object
->memq
, mem
, vm_page_t
, listq
);
929 * And show that the object has one fewer resident
933 mem
->object
->resident_page_count
--;
936 mem
->object
= VM_OBJECT_NULL
;
943 * Returns the page associated with the object/offset
944 * pair specified; if none is found, VM_PAGE_NULL is returned.
946 * The object must be locked. No side effects.
951 register vm_object_t object
,
952 register vm_object_offset_t offset
)
954 register vm_page_t mem
;
955 register vm_page_bucket_t
*bucket
;
958 * Search the hash table for this object/offset pair
961 bucket
= &vm_page_buckets
[vm_page_hash(object
, offset
)];
963 simple_lock(&vm_page_bucket_lock
);
964 for (mem
= bucket
->pages
; mem
!= VM_PAGE_NULL
; mem
= mem
->next
) {
966 if ((mem
->object
== object
) && (mem
->offset
== offset
))
969 simple_unlock(&vm_page_bucket_lock
);
977 * Move the given memory entry from its
978 * current object to the specified target object/offset.
980 * The object must be locked.
984 register vm_page_t mem
,
985 register vm_object_t new_object
,
986 vm_object_offset_t new_offset
)
988 assert(mem
->object
!= new_object
);
990 * Changes to mem->object require the page lock because
991 * the pageout daemon uses that lock to get the object.
995 "vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n",
996 (integer_t
)new_object
, (integer_t
)new_offset
,
997 (integer_t
)mem
, 0,0);
999 vm_page_lock_queues();
1000 vm_page_remove(mem
);
1001 vm_page_insert(mem
, new_object
, new_offset
);
1002 vm_page_unlock_queues();
1008 * Initialize the fields in a new page.
1009 * This takes a structure with random values and initializes it
1010 * so that it can be given to vm_page_release or vm_page_insert.
1017 *mem
= vm_page_template
;
1018 mem
->phys_page
= phys_page
;
1022 * vm_page_grab_fictitious:
1024 * Remove a fictitious page from the free list.
1025 * Returns VM_PAGE_NULL if there are no free pages.
1027 int c_vm_page_grab_fictitious
= 0;
1028 int c_vm_page_release_fictitious
= 0;
1029 int c_vm_page_more_fictitious
= 0;
1032 vm_page_grab_fictitious(void)
1034 register vm_page_t m
;
1036 m
= (vm_page_t
)zget(vm_page_zone
);
1038 vm_page_init(m
, vm_page_fictitious_addr
);
1039 m
->fictitious
= TRUE
;
1042 c_vm_page_grab_fictitious
++;
1047 * vm_page_release_fictitious:
1049 * Release a fictitious page to the free list.
1053 vm_page_release_fictitious(
1054 register vm_page_t m
)
1058 assert(m
->fictitious
);
1059 assert(m
->phys_page
== vm_page_fictitious_addr
);
1061 c_vm_page_release_fictitious
++;
1064 panic("vm_page_release_fictitious");
1066 zfree(vm_page_zone
, (vm_offset_t
)m
);
1070 * vm_page_more_fictitious:
1072 * Add more fictitious pages to the free list.
1073 * Allowed to block. This routine is way intimate
1074 * with the zones code, for several reasons:
1075 * 1. we need to carve some page structures out of physical
1076 * memory before zones work, so they _cannot_ come from
1078 * 2. the zone needs to be collectable in order to prevent
1079 * growth without bound. These structures are used by
1080 * the device pager (by the hundreds and thousands), as
1081 * private pages for pageout, and as blocking pages for
1082 * pagein. Temporary bursts in demand should not result in
1083 * permanent allocation of a resource.
1084 * 3. To smooth allocation humps, we allocate single pages
1085 * with kernel_memory_allocate(), and cram them into the
1086 * zone. This also allows us to initialize the vm_page_t's
1087 * on the way into the zone, so that zget() always returns
1088 * an initialized structure. The zone free element pointer
1089 * and the free page pointer are both the first item in the
1091 * 4. By having the pages in the zone pre-initialized, we need
1092 * not keep 2 levels of lists. The garbage collector simply
1093 * scans our list, and reduces physical memory usage as it
1097 void vm_page_more_fictitious(void)
1099 extern vm_map_t zone_map
;
1100 register vm_page_t m
;
1102 kern_return_t retval
;
1105 c_vm_page_more_fictitious
++;
1108 * Allocate a single page from the zone_map. Do not wait if no physical
1109 * pages are immediately available, and do not zero the space. We need
1110 * our own blocking lock here to prevent having multiple,
1111 * simultaneous requests from piling up on the zone_map lock. Exactly
1112 * one (of our) threads should be potentially waiting on the map lock.
1113 * If winner is not vm-privileged, then the page allocation will fail,
1114 * and it will temporarily block here in the vm_page_wait().
1116 mutex_lock(&vm_page_alloc_lock
);
1118 * If another thread allocated space, just bail out now.
1120 if (zone_free_count(vm_page_zone
) > 5) {
1122 * The number "5" is a small number that is larger than the
1123 * number of fictitious pages that any single caller will
1124 * attempt to allocate. Otherwise, a thread will attempt to
1125 * acquire a fictitious page (vm_page_grab_fictitious), fail,
1126 * release all of the resources and locks already acquired,
1127 * and then call this routine. This routine finds the pages
1128 * that the caller released, so fails to allocate new space.
1129 * The process repeats infinitely. The largest known number
1130 * of fictitious pages required in this manner is 2. 5 is
1131 * simply a somewhat larger number.
1133 mutex_unlock(&vm_page_alloc_lock
);
1137 if ((retval
= kernel_memory_allocate(zone_map
,
1138 &addr
, PAGE_SIZE
, VM_PROT_ALL
,
1139 KMA_KOBJECT
|KMA_NOPAGEWAIT
)) != KERN_SUCCESS
) {
1141 * No page was available. Tell the pageout daemon, drop the
1142 * lock to give another thread a chance at it, and
1143 * wait for the pageout daemon to make progress.
1145 mutex_unlock(&vm_page_alloc_lock
);
1146 vm_page_wait(THREAD_UNINT
);
1150 * Initialize as many vm_page_t's as will fit on this page. This
1151 * depends on the zone code disturbing ONLY the first item of
1152 * each zone element.
1154 m
= (vm_page_t
)addr
;
1155 for (i
= PAGE_SIZE
/sizeof(struct vm_page
); i
> 0; i
--) {
1156 vm_page_init(m
, vm_page_fictitious_addr
);
1157 m
->fictitious
= TRUE
;
1160 zcram(vm_page_zone
, addr
, PAGE_SIZE
);
1161 mutex_unlock(&vm_page_alloc_lock
);
1167 * Attempt to convert a fictitious page into a real page.
1172 register vm_page_t m
)
1174 register vm_page_t real_m
;
1177 assert(m
->fictitious
);
1180 real_m
= vm_page_grab();
1181 if (real_m
== VM_PAGE_NULL
)
1184 m
->phys_page
= real_m
->phys_page
;
1185 m
->fictitious
= FALSE
;
1188 vm_page_lock_queues();
1190 vm_page_active_count
++;
1191 else if (m
->inactive
)
1192 vm_page_inactive_count
++;
1193 vm_page_unlock_queues();
1195 real_m
->phys_page
= vm_page_fictitious_addr
;
1196 real_m
->fictitious
= TRUE
;
1198 vm_page_release_fictitious(real_m
);
1205 * Return true if it is not likely that a non-vm_privileged thread
1206 * can get memory without blocking. Advisory only, since the
1207 * situation may change under us.
1212 /* No locking, at worst we will fib. */
1213 return( vm_page_free_count
< vm_page_free_reserved
);
1219 * Remove a page from the free list.
1220 * Returns VM_PAGE_NULL if the free list is too small.
1223 unsigned long vm_page_grab_count
= 0; /* measure demand */
1228 register vm_page_t mem
;
1230 mutex_lock(&vm_page_queue_free_lock
);
1231 vm_page_grab_count
++;
1234 * Optionally produce warnings if the wire or gobble
1235 * counts exceed some threshold.
1237 if (vm_page_wire_count_warning
> 0
1238 && vm_page_wire_count
>= vm_page_wire_count_warning
) {
1239 printf("mk: vm_page_grab(): high wired page count of %d\n",
1240 vm_page_wire_count
);
1241 assert(vm_page_wire_count
< vm_page_wire_count_warning
);
1243 if (vm_page_gobble_count_warning
> 0
1244 && vm_page_gobble_count
>= vm_page_gobble_count_warning
) {
1245 printf("mk: vm_page_grab(): high gobbled page count of %d\n",
1246 vm_page_gobble_count
);
1247 assert(vm_page_gobble_count
< vm_page_gobble_count_warning
);
1251 * Only let privileged threads (involved in pageout)
1252 * dip into the reserved pool.
1255 if ((vm_page_free_count
< vm_page_free_reserved
) &&
1256 !current_thread()->vm_privilege
) {
1257 mutex_unlock(&vm_page_queue_free_lock
);
1259 goto wakeup_pageout
;
1262 while (vm_page_queue_free
== VM_PAGE_NULL
) {
1263 printf("vm_page_grab: no free pages, trouble expected...\n");
1264 mutex_unlock(&vm_page_queue_free_lock
);
1266 mutex_lock(&vm_page_queue_free_lock
);
1269 if (--vm_page_free_count
< vm_page_free_count_minimum
)
1270 vm_page_free_count_minimum
= vm_page_free_count
;
1271 mem
= vm_page_queue_free
;
1272 vm_page_queue_free
= (vm_page_t
) mem
->pageq
.next
;
1274 mem
->no_isync
= TRUE
;
1275 mutex_unlock(&vm_page_queue_free_lock
);
1278 * Decide if we should poke the pageout daemon.
1279 * We do this if the free count is less than the low
1280 * water mark, or if the free count is less than the high
1281 * water mark (but above the low water mark) and the inactive
1282 * count is less than its target.
1284 * We don't have the counts locked ... if they change a little,
1285 * it doesn't really matter.
1289 if ((vm_page_free_count
< vm_page_free_min
) ||
1290 ((vm_page_free_count
< vm_page_free_target
) &&
1291 (vm_page_inactive_count
< vm_page_inactive_target
)))
1292 thread_wakeup((event_t
) &vm_page_free_wanted
);
1294 // dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */
1302 * Return a page to the free list.
1307 register vm_page_t mem
)
1311 unsigned int pindex
;
1312 phys_entry
*physent
;
1314 physent
= mapping_phys_lookup(mem
->phys_page
, &pindex
); /* (BRINGUP) */
1315 if(physent
->ppLink
& ppN
) { /* (BRINGUP) */
1316 panic("vm_page_release: already released - %08X %08X\n", mem
, mem
->phys_page
);
1318 physent
->ppLink
= physent
->ppLink
| ppN
; /* (BRINGUP) */
1321 assert(!mem
->private && !mem
->fictitious
);
1323 // dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */
1325 mutex_lock(&vm_page_queue_free_lock
);
1327 panic("vm_page_release");
1329 mem
->pageq
.next
= (queue_entry_t
) vm_page_queue_free
;
1330 vm_page_queue_free
= mem
;
1331 vm_page_free_count
++;
1334 * Check if we should wake up someone waiting for page.
1335 * But don't bother waking them unless they can allocate.
1337 * We wakeup only one thread, to prevent starvation.
1338 * Because the scheduling system handles wait queues FIFO,
1339 * if we wakeup all waiting threads, one greedy thread
1340 * can starve multiple niceguy threads. When the threads
1341 * all wakeup, the greedy threads runs first, grabs the page,
1342 * and waits for another page. It will be the first to run
1343 * when the next page is freed.
1345 * However, there is a slight danger here.
1346 * The thread we wake might not use the free page.
1347 * Then the other threads could wait indefinitely
1348 * while the page goes unused. To forestall this,
1349 * the pageout daemon will keep making free pages
1350 * as long as vm_page_free_wanted is non-zero.
1353 if ((vm_page_free_wanted
> 0) &&
1354 (vm_page_free_count
>= vm_page_free_reserved
)) {
1355 vm_page_free_wanted
--;
1356 thread_wakeup_one((event_t
) &vm_page_free_count
);
1359 mutex_unlock(&vm_page_queue_free_lock
);
1362 #define VM_PAGEOUT_DEADLOCK_TIMEOUT 3
1367 * Wait for a page to become available.
1368 * If there are plenty of free pages, then we don't sleep.
1371 * TRUE: There may be another page, try again
1372 * FALSE: We were interrupted out of our wait, don't try again
1380 * We can't use vm_page_free_reserved to make this
1381 * determination. Consider: some thread might
1382 * need to allocate two pages. The first allocation
1383 * succeeds, the second fails. After the first page is freed,
1384 * a call to vm_page_wait must really block.
1387 kern_return_t wait_result
;
1389 int need_wakeup
= 0;
1391 mutex_lock(&vm_page_queue_free_lock
);
1392 if (vm_page_free_count
< vm_page_free_target
) {
1393 if (vm_page_free_wanted
++ == 0)
1395 wait_result
= assert_wait((event_t
)&vm_page_free_count
,
1397 mutex_unlock(&vm_page_queue_free_lock
);
1398 counter(c_vm_page_wait_block
++);
1401 thread_wakeup((event_t
)&vm_page_free_wanted
);
1403 if (wait_result
== THREAD_WAITING
) {
1404 clock_interval_to_absolutetime_interval(
1405 VM_PAGEOUT_DEADLOCK_TIMEOUT
,
1406 NSEC_PER_SEC
, &abstime
);
1407 clock_absolutetime_interval_to_deadline(
1409 thread_set_timer_deadline(abstime
);
1410 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
1412 if(wait_result
== THREAD_TIMED_OUT
) {
1413 kr
= vm_pageout_emergency_availability_request();
1416 thread_cancel_timer();
1420 return(wait_result
== THREAD_AWAKENED
);
1422 mutex_unlock(&vm_page_queue_free_lock
);
1430 * Allocate and return a memory cell associated
1431 * with this VM object/offset pair.
1433 * Object must be locked.
1439 vm_object_offset_t offset
)
1441 register vm_page_t mem
;
1443 mem
= vm_page_grab();
1444 if (mem
== VM_PAGE_NULL
)
1445 return VM_PAGE_NULL
;
1447 vm_page_insert(mem
, object
, offset
);
1452 counter(unsigned int c_laundry_pages_freed
= 0;)
1454 int vm_pagein_cluster_unused
= 0;
1455 boolean_t vm_page_free_verify
= FALSE
;
1459 * Returns the given page to the free list,
1460 * disassociating it with any VM object.
1462 * Object and page queues must be locked prior to entry.
1466 register vm_page_t mem
)
1468 vm_object_t object
= mem
->object
;
1471 assert(!mem
->cleaning
);
1472 assert(!mem
->pageout
);
1473 assert(!vm_page_free_verify
|| pmap_verify_free(mem
->phys_page
));
1476 vm_page_remove(mem
); /* clears tabled, object, offset */
1477 VM_PAGE_QUEUES_REMOVE(mem
); /* clears active or inactive */
1479 if (mem
->clustered
) {
1480 mem
->clustered
= FALSE
;
1481 vm_pagein_cluster_unused
++;
1484 if (mem
->wire_count
) {
1485 if (!mem
->private && !mem
->fictitious
)
1486 vm_page_wire_count
--;
1487 mem
->wire_count
= 0;
1488 assert(!mem
->gobbled
);
1489 } else if (mem
->gobbled
) {
1490 if (!mem
->private && !mem
->fictitious
)
1491 vm_page_wire_count
--;
1492 vm_page_gobble_count
--;
1494 mem
->gobbled
= FALSE
;
1497 extern int vm_page_laundry_min
;
1498 if (!object
->internal
)
1499 vm_page_burst_count
--;
1500 vm_page_laundry_count
--;
1501 mem
->laundry
= FALSE
; /* laundry is now clear */
1502 counter(++c_laundry_pages_freed
);
1503 if (vm_page_laundry_count
< vm_page_laundry_min
) {
1504 vm_page_laundry_min
= 0;
1505 thread_wakeup((event_t
) &vm_page_laundry_count
);
1509 mem
->discard_request
= FALSE
;
1511 PAGE_WAKEUP(mem
); /* clears wanted */
1514 vm_object_absent_release(object
);
1516 /* Some of these may be unnecessary */
1518 mem
->unlock_request
= 0;
1520 mem
->absent
= FALSE
;
1523 mem
->precious
= FALSE
;
1524 mem
->reference
= FALSE
;
1526 mem
->page_error
= KERN_SUCCESS
;
1529 mem
->private = FALSE
;
1530 mem
->fictitious
= TRUE
;
1531 mem
->phys_page
= vm_page_fictitious_addr
;
1533 if (mem
->fictitious
) {
1534 vm_page_release_fictitious(mem
);
1536 /* depends on the queues lock */
1537 if(mem
->zero_fill
) {
1539 mem
->zero_fill
= FALSE
;
1541 vm_page_init(mem
, mem
->phys_page
);
1542 vm_page_release(mem
);
1549 register vm_page_t mem
)
1551 register vm_page_t nxt
;
1552 register vm_page_t first
= NULL
;
1553 register vm_page_t last
;
1554 register int pg_count
= 0;
1558 nxt
= (vm_page_t
)(mem
->pageq
.next
);
1561 vm_pagein_cluster_unused
++;
1564 extern int vm_page_laundry_min
;
1566 if (!mem
->object
->internal
)
1567 vm_page_burst_count
--;
1568 vm_page_laundry_count
--;
1569 counter(++c_laundry_pages_freed
);
1571 if (vm_page_laundry_count
< vm_page_laundry_min
) {
1572 vm_page_laundry_min
= 0;
1573 thread_wakeup((event_t
) &vm_page_laundry_count
);
1578 PAGE_WAKEUP(mem
); /* clears wanted */
1581 mem
->fictitious
= TRUE
;
1583 if (!mem
->fictitious
) {
1584 /* depends on the queues lock */
1587 vm_page_init(mem
, mem
->phys_page
);
1593 mem
->pageq
.next
= (queue_t
) first
;
1598 mem
->phys_page
= vm_page_fictitious_addr
;
1599 vm_page_release_fictitious(mem
);
1605 mutex_lock(&vm_page_queue_free_lock
);
1607 last
->pageq
.next
= (queue_entry_t
) vm_page_queue_free
;
1608 vm_page_queue_free
= first
;
1610 vm_page_free_count
+= pg_count
;
1612 if ((vm_page_free_wanted
> 0) &&
1613 (vm_page_free_count
>= vm_page_free_reserved
)) {
1614 int available_pages
;
1616 available_pages
= vm_page_free_count
- vm_page_free_reserved
;
1618 if (available_pages
>= vm_page_free_wanted
) {
1619 vm_page_free_wanted
= 0;
1620 thread_wakeup((event_t
) &vm_page_free_count
);
1622 while (available_pages
--) {
1623 vm_page_free_wanted
--;
1624 thread_wakeup_one((event_t
) &vm_page_free_count
);
1628 mutex_unlock(&vm_page_queue_free_lock
);
1636 * Mark this page as wired down by yet
1637 * another map, removing it from paging queues
1640 * The page's object and the page queues must be locked.
1644 register vm_page_t mem
)
1647 // dbgLog(current_act(), mem->offset, mem->object, 1); /* (TEST/DEBUG) */
1651 if (mem
->wire_count
== 0) {
1652 VM_PAGE_QUEUES_REMOVE(mem
);
1653 if (!mem
->private && !mem
->fictitious
&& !mem
->gobbled
)
1654 vm_page_wire_count
++;
1656 vm_page_gobble_count
--;
1657 mem
->gobbled
= FALSE
;
1658 if(mem
->zero_fill
) {
1659 /* depends on the queues lock */
1661 mem
->zero_fill
= FALSE
;
1664 assert(!mem
->gobbled
);
1671 * Mark this page as consumed by the vm/ipc/xmm subsystems.
1673 * Called only for freshly vm_page_grab()ed pages - w/ nothing locked.
1677 register vm_page_t mem
)
1679 vm_page_lock_queues();
1682 assert(!mem
->gobbled
);
1683 assert(mem
->wire_count
== 0);
1685 if (!mem
->gobbled
&& mem
->wire_count
== 0) {
1686 if (!mem
->private && !mem
->fictitious
)
1687 vm_page_wire_count
++;
1689 vm_page_gobble_count
++;
1690 mem
->gobbled
= TRUE
;
1691 vm_page_unlock_queues();
1697 * Release one wiring of this page, potentially
1698 * enabling it to be paged again.
1700 * The page's object and the page queues must be locked.
1704 register vm_page_t mem
)
1707 // dbgLog(current_act(), mem->offset, mem->object, 0); /* (TEST/DEBUG) */
1710 assert(mem
->wire_count
> 0);
1712 if (--mem
->wire_count
== 0) {
1713 assert(!mem
->private && !mem
->fictitious
);
1714 vm_page_wire_count
--;
1715 queue_enter(&vm_page_queue_active
, mem
, vm_page_t
, pageq
);
1716 vm_page_active_count
++;
1718 mem
->reference
= TRUE
;
1723 * vm_page_deactivate:
1725 * Returns the given page to the inactive list,
1726 * indicating that no physical maps have access
1727 * to this page. [Used by the physical mapping system.]
1729 * The page queues must be locked.
1733 register vm_page_t m
)
1737 // dbgLog(m->phys_page, vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */
1740 * This page is no longer very interesting. If it was
1741 * interesting (active or inactive/referenced), then we
1742 * clear the reference bit and (re)enter it in the
1743 * inactive queue. Note wired pages should not have
1744 * their reference bit cleared.
1746 if (m
->gobbled
) { /* can this happen? */
1747 assert(m
->wire_count
== 0);
1748 if (!m
->private && !m
->fictitious
)
1749 vm_page_wire_count
--;
1750 vm_page_gobble_count
--;
1753 if (m
->private || (m
->wire_count
!= 0))
1755 if (m
->active
|| (m
->inactive
&& m
->reference
)) {
1756 if (!m
->fictitious
&& !m
->absent
)
1757 pmap_clear_reference(m
->phys_page
);
1758 m
->reference
= FALSE
;
1759 VM_PAGE_QUEUES_REMOVE(m
);
1761 if (m
->wire_count
== 0 && !m
->inactive
) {
1762 m
->page_ticket
= vm_page_ticket
;
1763 vm_page_ticket_roll
++;
1765 if(vm_page_ticket_roll
== VM_PAGE_TICKETS_IN_ROLL
) {
1766 vm_page_ticket_roll
= 0;
1767 if(vm_page_ticket
== VM_PAGE_TICKET_ROLL_IDS
)
1774 queue_enter(&vm_page_queue_zf
, m
, vm_page_t
, pageq
);
1776 queue_enter(&vm_page_queue_inactive
,
1777 m
, vm_page_t
, pageq
);
1782 vm_page_inactive_count
++;
1789 * Put the specified page on the active list (if appropriate).
1791 * The page queues must be locked.
1796 register vm_page_t m
)
1801 assert(m
->wire_count
== 0);
1802 if (!m
->private && !m
->fictitious
)
1803 vm_page_wire_count
--;
1804 vm_page_gobble_count
--;
1812 queue_remove(&vm_page_queue_zf
, m
, vm_page_t
, pageq
);
1814 queue_remove(&vm_page_queue_inactive
,
1815 m
, vm_page_t
, pageq
);
1818 vm_page_inactive_count
--;
1819 m
->inactive
= FALSE
;
1821 if (m
->wire_count
== 0) {
1823 panic("vm_page_activate: already active");
1825 queue_enter(&vm_page_queue_active
, m
, vm_page_t
, pageq
);
1827 m
->reference
= TRUE
;
1829 vm_page_active_count
++;
1834 * vm_page_part_zero_fill:
1836 * Zero-fill a part of the page.
1839 vm_page_part_zero_fill(
1847 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
1848 pmap_zero_part_page(m
->phys_page
, m_pa
, len
);
1851 tmp
= vm_page_grab();
1852 if (tmp
== VM_PAGE_NULL
) {
1853 vm_page_wait(THREAD_UNINT
);
1858 vm_page_zero_fill(tmp
);
1860 vm_page_part_copy(m
, 0, tmp
, 0, m_pa
);
1862 if((m_pa
+ len
) < PAGE_SIZE
) {
1863 vm_page_part_copy(m
, m_pa
+ len
, tmp
,
1864 m_pa
+ len
, PAGE_SIZE
- (m_pa
+ len
));
1866 vm_page_copy(tmp
,m
);
1867 vm_page_lock_queues();
1869 vm_page_unlock_queues();
1875 * vm_page_zero_fill:
1877 * Zero-fill the specified page.
1884 "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n",
1885 (integer_t
)m
->object
, (integer_t
)m
->offset
, (integer_t
)m
, 0,0);
1889 // dbgTrace(0xAEAEAEAE, m->phys_page, 0); /* (BRINGUP) */
1890 pmap_zero_page(m
->phys_page
);
1894 * vm_page_part_copy:
1896 * copy part of one page to another
1907 VM_PAGE_CHECK(src_m
);
1908 VM_PAGE_CHECK(dst_m
);
1910 pmap_copy_part_page(src_m
->phys_page
, src_pa
,
1911 dst_m
->phys_page
, dst_pa
, len
);
1917 * Copy one page to another
1926 "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n",
1927 (integer_t
)src_m
->object
, src_m
->offset
,
1928 (integer_t
)dest_m
->object
, dest_m
->offset
,
1931 VM_PAGE_CHECK(src_m
);
1932 VM_PAGE_CHECK(dest_m
);
1934 pmap_copy_page(src_m
->phys_page
, dest_m
->phys_page
);
1938 * Currently, this is a primitive allocator that grabs
1939 * free pages from the system, sorts them by physical
1940 * address, then searches for a region large enough to
1941 * satisfy the user's request.
1943 * Additional levels of effort:
1944 * + steal clean active/inactive pages
1945 * + force pageouts of dirty pages
1946 * + maintain a map of available physical
1950 #define SET_NEXT_PAGE(m,n) ((m)->pageq.next = (struct queue_entry *) (n))
1953 int vm_page_verify_contiguous(
1955 unsigned int npages
);
1956 #endif /* MACH_ASSERT */
1958 cpm_counter(unsigned int vpfls_pages_handled
= 0;)
1959 cpm_counter(unsigned int vpfls_head_insertions
= 0;)
1960 cpm_counter(unsigned int vpfls_tail_insertions
= 0;)
1961 cpm_counter(unsigned int vpfls_general_insertions
= 0;)
1962 cpm_counter(unsigned int vpfc_failed
= 0;)
1963 cpm_counter(unsigned int vpfc_satisfied
= 0;)
1969 * Check that the list of pages is ordered by
1970 * ascending physical address and has no holes.
1973 vm_page_verify_contiguous(
1975 unsigned int npages
)
1977 register vm_page_t m
;
1978 unsigned int page_count
;
1981 prev_addr
= pages
->phys_page
;
1983 for (m
= NEXT_PAGE(pages
); m
!= VM_PAGE_NULL
; m
= NEXT_PAGE(m
)) {
1984 if (m
->phys_page
!= prev_addr
+ 1) {
1985 printf("m 0x%x prev_addr 0x%x, current addr 0x%x\n",
1986 m
, prev_addr
, m
->phys_page
);
1987 printf("pages 0x%x page_count %u\n", pages
, page_count
);
1988 panic("vm_page_verify_contiguous: not contiguous!");
1990 prev_addr
= m
->phys_page
;
1993 if (page_count
!= npages
) {
1994 printf("pages 0x%x actual count 0x%x but requested 0x%x\n",
1995 pages
, page_count
, npages
);
1996 panic("vm_page_verify_contiguous: count error");
2000 #endif /* MACH_ASSERT */
2004 * Find a region large enough to contain at least npages
2005 * of contiguous physical memory.
2008 * - Called while holding vm_page_queue_free_lock.
2009 * - Doesn't respect vm_page_free_reserved; caller
2010 * must not ask for more pages than are legal to grab.
2012 * Returns a pointer to a list of gobbled pages or VM_PAGE_NULL.
2015 * Loop over the free list, extracting one page at a time and
2016 * inserting those into a sorted sub-list. We stop as soon as
2017 * there's a contiguous range within the sorted list that can
2018 * satisfy the contiguous memory request. This contiguous sub-
2019 * list is chopped out of the sorted sub-list and the remainder
2020 * of the sorted sub-list is put back onto the beginning of the
2024 vm_page_find_contiguous(
2025 unsigned int contig_pages
)
2027 vm_page_t sort_list
;
2028 vm_page_t
*contfirstprev
, contlast
;
2030 ppnum_t prevcontaddr
;
2031 ppnum_t nextcontaddr
;
2032 unsigned int npages
;
2036 * Verify pages in the free list..
2039 for (m
= vm_page_queue_free
; m
!= VM_PAGE_NULL
; m
= NEXT_PAGE(m
))
2041 if (npages
!= vm_page_free_count
)
2042 panic("vm_sort_free_list: prelim: npages %u free_count %d",
2043 npages
, vm_page_free_count
);
2044 #endif /* MACH_ASSERT */
2046 if (contig_pages
== 0 || vm_page_queue_free
== VM_PAGE_NULL
)
2047 return VM_PAGE_NULL
;
2049 #define PPNUM_PREV(x) (((x) > 0) ? ((x) - 1) : 0)
2050 #define PPNUM_NEXT(x) (((x) < PPNUM_MAX) ? ((x) + 1) : PPNUM_MAX)
2053 contfirstprev
= &sort_list
;
2054 contlast
= sort_list
= vm_page_queue_free
;
2055 vm_page_queue_free
= NEXT_PAGE(sort_list
);
2056 SET_NEXT_PAGE(sort_list
, VM_PAGE_NULL
);
2057 prevcontaddr
= PPNUM_PREV(sort_list
->phys_page
);
2058 nextcontaddr
= PPNUM_NEXT(sort_list
->phys_page
);
2060 while (npages
< contig_pages
&&
2061 (m
= vm_page_queue_free
) != VM_PAGE_NULL
)
2063 cpm_counter(++vpfls_pages_handled
);
2065 /* prepend to existing run? */
2066 if (m
->phys_page
== prevcontaddr
)
2068 vm_page_queue_free
= NEXT_PAGE(m
);
2069 cpm_counter(++vpfls_head_insertions
);
2070 prevcontaddr
= PPNUM_PREV(prevcontaddr
);
2071 SET_NEXT_PAGE(m
, *contfirstprev
);
2074 continue; /* no tail expansion check needed */
2077 /* append to tail of existing run? */
2078 else if (m
->phys_page
== nextcontaddr
)
2080 vm_page_queue_free
= NEXT_PAGE(m
);
2081 cpm_counter(++vpfls_tail_insertions
);
2082 nextcontaddr
= PPNUM_NEXT(nextcontaddr
);
2083 SET_NEXT_PAGE(m
, NEXT_PAGE(contlast
));
2084 SET_NEXT_PAGE(contlast
, m
);
2089 /* prepend to the very front of sorted list? */
2090 else if (m
->phys_page
< sort_list
->phys_page
)
2092 vm_page_queue_free
= NEXT_PAGE(m
);
2093 cpm_counter(++vpfls_general_insertions
);
2094 prevcontaddr
= PPNUM_PREV(m
->phys_page
);
2095 nextcontaddr
= PPNUM_NEXT(m
->phys_page
);
2096 SET_NEXT_PAGE(m
, sort_list
);
2097 contfirstprev
= &sort_list
;
2098 contlast
= sort_list
= m
;
2102 else /* get to proper place for insertion */
2104 if (m
->phys_page
< nextcontaddr
)
2106 prevcontaddr
= PPNUM_PREV(sort_list
->phys_page
);
2107 nextcontaddr
= PPNUM_NEXT(sort_list
->phys_page
);
2108 contfirstprev
= &sort_list
;
2109 contlast
= sort_list
;
2112 for (m1
= NEXT_PAGE(contlast
);
2113 npages
< contig_pages
&&
2114 m1
!= VM_PAGE_NULL
&& m1
->phys_page
< m
->phys_page
;
2117 if (m1
->phys_page
!= nextcontaddr
) {
2118 prevcontaddr
= PPNUM_PREV(m1
->phys_page
);
2119 contfirstprev
= NEXT_PAGE_PTR(contlast
);
2124 nextcontaddr
= PPNUM_NEXT(m1
->phys_page
);
2129 * We may actually already have enough.
2130 * This could happen if a previous prepend
2131 * joined up two runs to meet our needs.
2132 * If so, bail before we take the current
2133 * page off the free queue.
2135 if (npages
== contig_pages
)
2138 if (m
->phys_page
!= nextcontaddr
) {
2139 contfirstprev
= NEXT_PAGE_PTR(contlast
);
2140 prevcontaddr
= PPNUM_PREV(m
->phys_page
);
2141 nextcontaddr
= PPNUM_NEXT(m
->phys_page
);
2144 nextcontaddr
= PPNUM_NEXT(nextcontaddr
);
2147 vm_page_queue_free
= NEXT_PAGE(m
);
2148 cpm_counter(++vpfls_general_insertions
);
2149 SET_NEXT_PAGE(m
, NEXT_PAGE(contlast
));
2150 SET_NEXT_PAGE(contlast
, m
);
2154 /* See how many pages are now contiguous after the insertion */
2155 for (m1
= NEXT_PAGE(m
);
2156 npages
< contig_pages
&&
2157 m1
!= VM_PAGE_NULL
&& m1
->phys_page
== nextcontaddr
;
2160 nextcontaddr
= PPNUM_NEXT(nextcontaddr
);
2166 /* how did we do? */
2167 if (npages
== contig_pages
)
2169 cpm_counter(++vpfc_satisfied
);
2171 /* remove the contiguous range from the sorted list */
2173 *contfirstprev
= NEXT_PAGE(contlast
);
2174 SET_NEXT_PAGE(contlast
, VM_PAGE_NULL
);
2175 assert(vm_page_verify_contiguous(m
, npages
));
2177 /* inline vm_page_gobble() for each returned page */
2178 for (m1
= m
; m1
!= VM_PAGE_NULL
; m1
= NEXT_PAGE(m1
)) {
2180 assert(!m1
->wanted
);
2182 m1
->no_isync
= TRUE
;
2185 vm_page_wire_count
+= npages
;
2186 vm_page_gobble_count
+= npages
;
2187 vm_page_free_count
-= npages
;
2189 /* stick free list at the tail of the sorted list */
2190 while ((m1
= *contfirstprev
) != VM_PAGE_NULL
)
2191 contfirstprev
= (vm_page_t
*)&m1
->pageq
.next
;
2192 *contfirstprev
= vm_page_queue_free
;
2195 vm_page_queue_free
= sort_list
;
2200 * Allocate a list of contiguous, wired pages.
2208 register vm_page_t m
;
2209 vm_page_t
*first_contig
;
2210 vm_page_t free_list
, pages
;
2211 unsigned int npages
, n1pages
;
2212 int vm_pages_available
;
2215 if (size
% page_size
!= 0)
2216 return KERN_INVALID_ARGUMENT
;
2218 vm_page_lock_queues();
2219 mutex_lock(&vm_page_queue_free_lock
);
2222 * Should also take active and inactive pages
2223 * into account... One day...
2225 npages
= size
/ page_size
;
2226 vm_pages_available
= vm_page_free_count
- vm_page_free_reserved
;
2228 if (npages
> vm_pages_available
) {
2229 mutex_unlock(&vm_page_queue_free_lock
);
2230 vm_page_unlock_queues();
2231 return KERN_RESOURCE_SHORTAGE
;
2235 * Obtain a pointer to a subset of the free
2236 * list large enough to satisfy the request;
2237 * the region will be physically contiguous.
2239 pages
= vm_page_find_contiguous(npages
);
2241 /* adjust global freelist counts and determine need for wakeups */
2242 if (vm_page_free_count
< vm_page_free_count_minimum
)
2243 vm_page_free_count_minimum
= vm_page_free_count
;
2245 wakeup
= ((vm_page_free_count
< vm_page_free_min
) ||
2246 ((vm_page_free_count
< vm_page_free_target
) &&
2247 (vm_page_inactive_count
< vm_page_inactive_target
)));
2249 mutex_unlock(&vm_page_queue_free_lock
);
2251 if (pages
== VM_PAGE_NULL
) {
2252 vm_page_unlock_queues();
2253 return KERN_NO_SPACE
;
2257 * Walk the returned list, wiring the pages.
2260 for (m
= pages
; m
!= VM_PAGE_NULL
; m
= NEXT_PAGE(m
)) {
2262 * Essentially inlined vm_page_wire.
2265 assert(!m
->inactive
);
2266 assert(!m
->private);
2267 assert(!m
->fictitious
);
2268 assert(m
->wire_count
== 0);
2272 --vm_page_gobble_count
;
2274 vm_page_unlock_queues();
2277 thread_wakeup((event_t
) &vm_page_free_wanted
);
2280 * The CPM pages should now be available and
2281 * ordered by ascending physical address.
2283 assert(vm_page_verify_contiguous(pages
, npages
));
2286 return KERN_SUCCESS
;
2290 #include <mach_vm_debug.h>
2293 #include <mach_debug/hash_info.h>
2294 #include <vm/vm_debug.h>
2297 * Routine: vm_page_info
2299 * Return information about the global VP table.
2300 * Fills the buffer with as much information as possible
2301 * and returns the desired size of the buffer.
2303 * Nothing locked. The caller should provide
2304 * possibly-pageable memory.
2309 hash_info_bucket_t
*info
,
2314 if (vm_page_bucket_count
< count
)
2315 count
= vm_page_bucket_count
;
2317 for (i
= 0; i
< count
; i
++) {
2318 vm_page_bucket_t
*bucket
= &vm_page_buckets
[i
];
2319 unsigned int bucket_count
= 0;
2322 simple_lock(&vm_page_bucket_lock
);
2323 for (m
= bucket
->pages
; m
!= VM_PAGE_NULL
; m
= m
->next
)
2325 simple_unlock(&vm_page_bucket_lock
);
2327 /* don't touch pageable memory while holding locks */
2328 info
[i
].hib_count
= bucket_count
;
2331 return vm_page_bucket_count
;
2333 #endif /* MACH_VM_DEBUG */
2335 #include <mach_kdb.h>
2338 #include <ddb/db_output.h>
2339 #include <vm/vm_print.h>
2340 #define printf kdbprintf
2343 * Routine: vm_page_print [exported]
2351 iprintf("page 0x%x\n", p
);
2355 iprintf("object=0x%x", p
->object
);
2356 printf(", offset=0x%x", p
->offset
);
2357 printf(", wire_count=%d", p
->wire_count
);
2359 iprintf("%sinactive, %sactive, %sgobbled, %slaundry, %sfree, %sref, %sdiscard\n",
2360 (p
->inactive
? "" : "!"),
2361 (p
->active
? "" : "!"),
2362 (p
->gobbled
? "" : "!"),
2363 (p
->laundry
? "" : "!"),
2364 (p
->free
? "" : "!"),
2365 (p
->reference
? "" : "!"),
2366 (p
->discard_request
? "" : "!"));
2367 iprintf("%sbusy, %swanted, %stabled, %sfictitious, %sprivate, %sprecious\n",
2368 (p
->busy
? "" : "!"),
2369 (p
->wanted
? "" : "!"),
2370 (p
->tabled
? "" : "!"),
2371 (p
->fictitious
? "" : "!"),
2372 (p
->private ? "" : "!"),
2373 (p
->precious
? "" : "!"));
2374 iprintf("%sabsent, %serror, %sdirty, %scleaning, %spageout, %sclustered\n",
2375 (p
->absent
? "" : "!"),
2376 (p
->error
? "" : "!"),
2377 (p
->dirty
? "" : "!"),
2378 (p
->cleaning
? "" : "!"),
2379 (p
->pageout
? "" : "!"),
2380 (p
->clustered
? "" : "!"));
2381 iprintf("%slock_supplied, %soverwriting, %srestart, %sunusual\n",
2382 (p
->lock_supplied
? "" : "!"),
2383 (p
->overwriting
? "" : "!"),
2384 (p
->restart
? "" : "!"),
2385 (p
->unusual
? "" : "!"));
2387 iprintf("phys_page=0x%x", p
->phys_page
);
2388 printf(", page_error=0x%x", p
->page_error
);
2389 printf(", page_lock=0x%x", p
->page_lock
);
2390 printf(", unlock_request=%d\n", p
->unlock_request
);
2394 #endif /* MACH_KDB */