2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
57 * Author: Avadis Tevanian, Jr., Michael Wayne Young
59 * Resident memory management module.
62 #include <mach/clock_types.h>
63 #include <mach/vm_prot.h>
64 #include <mach/vm_statistics.h>
65 #include <kern/counters.h>
66 #include <kern/sched_prim.h>
67 #include <kern/task.h>
68 #include <kern/thread.h>
69 #include <kern/zalloc.h>
72 #include <vm/vm_init.h>
73 #include <vm/vm_map.h>
74 #include <vm/vm_page.h>
75 #include <vm/vm_pageout.h>
76 #include <vm/vm_kern.h> /* kernel_memory_allocate() */
77 #include <kern/misc_protos.h>
78 #include <zone_debug.h>
81 /* Variables used to indicate the relative age of pages in the
85 int vm_page_ticket_roll
= 0;
86 int vm_page_ticket
= 0;
88 * Associated with page of user-allocatable memory is a
93 * These variables record the values returned by vm_page_bootstrap,
94 * for debugging purposes. The implementation of pmap_steal_memory
95 * and pmap_startup here also uses them internally.
98 vm_offset_t virtual_space_start
;
99 vm_offset_t virtual_space_end
;
103 * The vm_page_lookup() routine, which provides for fast
104 * (virtual memory object, offset) to page lookup, employs
105 * the following hash table. The vm_page_{insert,remove}
106 * routines install and remove associations in the table.
107 * [This table is often called the virtual-to-physical,
112 #if MACH_PAGE_HASH_STATS
113 int cur_count
; /* current count */
114 int hi_count
; /* high water mark */
115 #endif /* MACH_PAGE_HASH_STATS */
118 vm_page_bucket_t
*vm_page_buckets
; /* Array of buckets */
119 unsigned int vm_page_bucket_count
= 0; /* How big is array? */
120 unsigned int vm_page_hash_mask
; /* Mask for hash function */
121 unsigned int vm_page_hash_shift
; /* Shift for hash function */
122 decl_simple_lock_data(,vm_page_bucket_lock
)
124 #if MACH_PAGE_HASH_STATS
125 /* This routine is only for debug. It is intended to be called by
126 * hand by a developer using a kernel debugger. This routine prints
127 * out vm_page_hash table statistics to the kernel debug console.
137 for (i
= 0; i
< vm_page_bucket_count
; i
++) {
138 if (vm_page_buckets
[i
].hi_count
) {
140 highsum
+= vm_page_buckets
[i
].hi_count
;
141 if (vm_page_buckets
[i
].hi_count
> maxdepth
)
142 maxdepth
= vm_page_buckets
[i
].hi_count
;
145 printf("Total number of buckets: %d\n", vm_page_bucket_count
);
146 printf("Number used buckets: %d = %d%%\n",
147 numbuckets
, 100*numbuckets
/vm_page_bucket_count
);
148 printf("Number unused buckets: %d = %d%%\n",
149 vm_page_bucket_count
- numbuckets
,
150 100*(vm_page_bucket_count
-numbuckets
)/vm_page_bucket_count
);
151 printf("Sum of bucket max depth: %d\n", highsum
);
152 printf("Average bucket depth: %d.%2d\n",
153 highsum
/vm_page_bucket_count
,
154 highsum%vm_page_bucket_count
);
155 printf("Maximum bucket depth: %d\n", maxdepth
);
157 #endif /* MACH_PAGE_HASH_STATS */
160 * The virtual page size is currently implemented as a runtime
161 * variable, but is constant once initialized using vm_set_page_size.
162 * This initialization must be done in the machine-dependent
163 * bootstrap sequence, before calling other machine-independent
166 * All references to the virtual page size outside this
167 * module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
170 #ifndef PAGE_SIZE_FIXED
171 vm_size_t page_size
= 4096;
172 vm_size_t page_mask
= 4095;
174 #endif /* PAGE_SIZE_FIXED */
177 * Resident page structures are initialized from
178 * a template (see vm_page_alloc).
180 * When adding a new field to the virtual memory
181 * object structure, be sure to add initialization
182 * (see vm_page_bootstrap).
184 struct vm_page vm_page_template
;
187 * Resident pages that represent real memory
188 * are allocated from a free list.
190 vm_page_t vm_page_queue_free
;
191 vm_page_t vm_page_queue_fictitious
;
192 decl_mutex_data(,vm_page_queue_free_lock
)
193 unsigned int vm_page_free_wanted
;
194 int vm_page_free_count
;
195 int vm_page_fictitious_count
;
197 unsigned int vm_page_free_count_minimum
; /* debugging */
200 * Occasionally, the virtual memory system uses
201 * resident page structures that do not refer to
202 * real pages, for example to leave a page with
203 * important state information in the VP table.
205 * These page structures are allocated the way
206 * most other kernel structures are.
209 decl_mutex_data(,vm_page_alloc_lock
)
210 unsigned int io_throttle_zero_fill
;
211 decl_mutex_data(,vm_page_zero_fill_lock
)
214 * Fictitious pages don't have a physical address,
215 * but we must initialize phys_addr to something.
216 * For debugging, this should be a strange value
217 * that the pmap module can recognize in assertions.
219 vm_offset_t vm_page_fictitious_addr
= (vm_offset_t
) -1;
222 * Resident page structures are also chained on
223 * queues that are used by the page replacement
224 * system (pageout daemon). These queues are
225 * defined here, but are shared by the pageout
226 * module. The inactive queue is broken into
227 * inactive and zf for convenience as the
228 * pageout daemon often assignes a higher
229 * affinity to zf pages
231 queue_head_t vm_page_queue_active
;
232 queue_head_t vm_page_queue_inactive
;
233 queue_head_t vm_page_queue_zf
;
234 decl_mutex_data(,vm_page_queue_lock
)
235 int vm_page_active_count
;
236 int vm_page_inactive_count
;
237 int vm_page_wire_count
;
238 int vm_page_gobble_count
= 0;
239 int vm_page_wire_count_warning
= 0;
240 int vm_page_gobble_count_warning
= 0;
242 /* the following fields are protected by the vm_page_queue_lock */
243 queue_head_t vm_page_queue_limbo
;
244 int vm_page_limbo_count
= 0; /* total pages in limbo */
245 int vm_page_limbo_real_count
= 0; /* real pages in limbo */
246 int vm_page_pin_count
= 0; /* number of pinned pages */
248 decl_simple_lock_data(,vm_page_preppin_lock
)
251 * Several page replacement parameters are also
252 * shared with this module, so that page allocation
253 * (done here in vm_page_alloc) can trigger the
256 int vm_page_free_target
= 0;
257 int vm_page_free_min
= 0;
258 int vm_page_inactive_target
= 0;
259 int vm_page_free_reserved
= 0;
260 int vm_page_laundry_count
= 0;
263 * The VM system has a couple of heuristics for deciding
264 * that pages are "uninteresting" and should be placed
265 * on the inactive queue as likely candidates for replacement.
266 * These variables let the heuristics be controlled at run-time
267 * to make experimentation easier.
270 boolean_t vm_page_deactivate_hint
= TRUE
;
275 * Sets the page size, perhaps based upon the memory
276 * size. Must be called before any use of page-size
277 * dependent functions.
279 * Sets page_shift and page_mask from page_size.
282 vm_set_page_size(void)
284 #ifndef PAGE_SIZE_FIXED
285 page_mask
= page_size
- 1;
287 if ((page_mask
& page_size
) != 0)
288 panic("vm_set_page_size: page size not a power of two");
290 for (page_shift
= 0; ; page_shift
++)
291 if ((1 << page_shift
) == page_size
)
293 #endif /* PAGE_SIZE_FIXED */
299 * Initializes the resident memory module.
301 * Allocates memory for the page cells, and
302 * for the object/offset-to-page hash table headers.
303 * Each page cell is initialized and placed on the free list.
304 * Returns the range of available kernel virtual memory.
312 register vm_page_t m
;
319 * Initialize the vm_page template.
322 m
= &vm_page_template
;
323 m
->object
= VM_OBJECT_NULL
; /* reset later */
324 m
->offset
= 0; /* reset later */
332 m
->reference
= FALSE
;
334 m
->dump_cleaning
= FALSE
;
335 m
->list_req_pending
= FALSE
;
340 m
->fictitious
= FALSE
;
347 m
->clustered
= FALSE
;
348 m
->lock_supplied
= FALSE
;
351 m
->zero_fill
= FALSE
;
353 m
->phys_addr
= 0; /* reset later */
355 m
->page_lock
= VM_PROT_NONE
;
356 m
->unlock_request
= VM_PROT_NONE
;
357 m
->page_error
= KERN_SUCCESS
;
360 * Initialize the page queues.
363 mutex_init(&vm_page_queue_free_lock
, ETAP_VM_PAGEQ_FREE
);
364 mutex_init(&vm_page_queue_lock
, ETAP_VM_PAGEQ
);
365 simple_lock_init(&vm_page_preppin_lock
, ETAP_VM_PREPPIN
);
367 vm_page_queue_free
= VM_PAGE_NULL
;
368 vm_page_queue_fictitious
= VM_PAGE_NULL
;
369 queue_init(&vm_page_queue_active
);
370 queue_init(&vm_page_queue_inactive
);
371 queue_init(&vm_page_queue_zf
);
372 queue_init(&vm_page_queue_limbo
);
374 vm_page_free_wanted
= 0;
377 * Steal memory for the map and zone subsystems.
380 vm_map_steal_memory();
384 * Allocate (and initialize) the virtual-to-physical
385 * table hash buckets.
387 * The number of buckets should be a power of two to
388 * get a good hash function. The following computation
389 * chooses the first power of two that is greater
390 * than the number of physical pages in the system.
393 simple_lock_init(&vm_page_bucket_lock
, ETAP_VM_BUCKET
);
395 if (vm_page_bucket_count
== 0) {
396 unsigned int npages
= pmap_free_pages();
398 vm_page_bucket_count
= 1;
399 while (vm_page_bucket_count
< npages
)
400 vm_page_bucket_count
<<= 1;
403 vm_page_hash_mask
= vm_page_bucket_count
- 1;
406 * Calculate object shift value for hashing algorithm:
407 * O = log2(sizeof(struct vm_object))
408 * B = log2(vm_page_bucket_count)
409 * hash shifts the object left by
412 size
= vm_page_bucket_count
;
413 for (log1
= 0; size
> 1; log1
++)
415 size
= sizeof(struct vm_object
);
416 for (log2
= 0; size
> 1; log2
++)
418 vm_page_hash_shift
= log1
/2 - log2
+ 1;
420 if (vm_page_hash_mask
& vm_page_bucket_count
)
421 printf("vm_page_bootstrap: WARNING -- strange page hash\n");
423 vm_page_buckets
= (vm_page_bucket_t
*)
424 pmap_steal_memory(vm_page_bucket_count
*
425 sizeof(vm_page_bucket_t
));
427 for (i
= 0; i
< vm_page_bucket_count
; i
++) {
428 register vm_page_bucket_t
*bucket
= &vm_page_buckets
[i
];
430 bucket
->pages
= VM_PAGE_NULL
;
431 #if MACH_PAGE_HASH_STATS
432 bucket
->cur_count
= 0;
433 bucket
->hi_count
= 0;
434 #endif /* MACH_PAGE_HASH_STATS */
438 * Machine-dependent code allocates the resident page table.
439 * It uses vm_page_init to initialize the page frames.
440 * The code also returns to us the virtual space available
441 * to the kernel. We don't trust the pmap module
442 * to get the alignment right.
445 pmap_startup(&virtual_space_start
, &virtual_space_end
);
446 virtual_space_start
= round_page(virtual_space_start
);
447 virtual_space_end
= trunc_page(virtual_space_end
);
449 *startp
= virtual_space_start
;
450 *endp
= virtual_space_end
;
453 * Compute the initial "wire" count.
454 * Up until now, the pages which have been set aside are not under
455 * the VM system's control, so although they aren't explicitly
456 * wired, they nonetheless can't be moved. At this moment,
457 * all VM managed pages are "free", courtesy of pmap_startup.
459 vm_page_wire_count
= atop(mem_size
) - vm_page_free_count
; /* initial value */
461 printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count
);
462 vm_page_free_count_minimum
= vm_page_free_count
;
465 #ifndef MACHINE_PAGES
467 * We implement pmap_steal_memory and pmap_startup with the help
468 * of two simpler functions, pmap_virtual_space and pmap_next_page.
475 vm_offset_t addr
, vaddr
, paddr
;
478 * We round the size to a round multiple.
481 size
= (size
+ sizeof (void *) - 1) &~ (sizeof (void *) - 1);
484 * If this is the first call to pmap_steal_memory,
485 * we have to initialize ourself.
488 if (virtual_space_start
== virtual_space_end
) {
489 pmap_virtual_space(&virtual_space_start
, &virtual_space_end
);
492 * The initial values must be aligned properly, and
493 * we don't trust the pmap module to do it right.
496 virtual_space_start
= round_page(virtual_space_start
);
497 virtual_space_end
= trunc_page(virtual_space_end
);
501 * Allocate virtual memory for this request.
504 addr
= virtual_space_start
;
505 virtual_space_start
+= size
;
507 kprintf("pmap_steal_memory: %08X - %08X; size=%08X\n", addr
, virtual_space_start
, size
); /* (TEST/DEBUG) */
510 * Allocate and map physical pages to back new virtual pages.
513 for (vaddr
= round_page(addr
);
515 vaddr
+= PAGE_SIZE
) {
516 if (!pmap_next_page(&paddr
))
517 panic("pmap_steal_memory");
520 * XXX Logically, these mappings should be wired,
521 * but some pmap modules barf if they are.
524 pmap_enter(kernel_pmap
, vaddr
, paddr
,
525 VM_PROT_READ
|VM_PROT_WRITE
,
526 VM_WIMG_USE_DEFAULT
, FALSE
);
528 * Account for newly stolen memory
530 vm_page_wire_count
++;
542 unsigned int i
, npages
, pages_initialized
;
547 * We calculate how many page frames we will have
548 * and then allocate the page structures in one chunk.
551 npages
= ((PAGE_SIZE
* pmap_free_pages() +
552 (round_page(virtual_space_start
) - virtual_space_start
)) /
553 (PAGE_SIZE
+ sizeof *pages
));
555 pages
= (vm_page_t
) pmap_steal_memory(npages
* sizeof *pages
);
558 * Initialize the page frames.
561 for (i
= 0, pages_initialized
= 0; i
< npages
; i
++) {
562 if (!pmap_next_page(&paddr
))
565 vm_page_init(&pages
[i
], paddr
);
571 * Release pages in reverse order so that physical pages
572 * initially get allocated in ascending addresses. This keeps
573 * the devices (which must address physical memory) happy if
574 * they require several consecutive pages.
577 for (i
= pages_initialized
; i
> 0; i
--) {
578 vm_page_release(&pages
[i
- 1]);
582 * We have to re-align virtual_space_start,
583 * because pmap_steal_memory has been using it.
586 virtual_space_start
= round_page(virtual_space_start
);
588 *startp
= virtual_space_start
;
589 *endp
= virtual_space_end
;
591 #endif /* MACHINE_PAGES */
594 * Routine: vm_page_module_init
596 * Second initialization pass, to be done after
597 * the basic VM system is ready.
600 vm_page_module_init(void)
602 vm_page_zone
= zinit((vm_size_t
) sizeof(struct vm_page
),
603 0, PAGE_SIZE
, "vm pages");
606 zone_debug_disable(vm_page_zone
);
607 #endif /* ZONE_DEBUG */
609 zone_change(vm_page_zone
, Z_EXPAND
, FALSE
);
610 zone_change(vm_page_zone
, Z_EXHAUST
, TRUE
);
611 zone_change(vm_page_zone
, Z_FOREIGN
, TRUE
);
614 * Adjust zone statistics to account for the real pages allocated
615 * in vm_page_create(). [Q: is this really what we want?]
617 vm_page_zone
->count
+= vm_page_pages
;
618 vm_page_zone
->cur_size
+= vm_page_pages
* vm_page_zone
->elem_size
;
620 mutex_init(&vm_page_alloc_lock
, ETAP_VM_PAGE_ALLOC
);
621 mutex_init(&vm_page_zero_fill_lock
, ETAP_VM_PAGE_ALLOC
);
625 * Routine: vm_page_create
627 * After the VM system is up, machine-dependent code
628 * may stumble across more physical memory. For example,
629 * memory that it was reserving for a frame buffer.
630 * vm_page_create turns this memory into available pages.
641 for (paddr
= round_page(start
);
642 paddr
< trunc_page(end
);
643 paddr
+= PAGE_SIZE
) {
644 while ((m
= (vm_page_t
) vm_page_grab_fictitious())
646 vm_page_more_fictitious();
648 vm_page_init(m
, paddr
);
657 * Distributes the object/offset key pair among hash buckets.
659 * NOTE: To get a good hash function, the bucket count should
662 #define vm_page_hash(object, offset) (\
663 ( ((natural_t)(vm_offset_t)object<<vm_page_hash_shift) + (natural_t)atop(offset))\
667 * vm_page_insert: [ internal use only ]
669 * Inserts the given mem entry into the object/object-page
670 * table and object list.
672 * The object must be locked.
677 register vm_page_t mem
,
678 register vm_object_t object
,
679 register vm_object_offset_t offset
)
681 register vm_page_bucket_t
*bucket
;
684 "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n",
685 (integer_t
)object
, (integer_t
)offset
, (integer_t
)mem
, 0,0);
690 panic("vm_page_insert");
692 assert(!object
->internal
|| offset
< object
->size
);
694 /* only insert "pageout" pages into "pageout" objects,
695 * and normal pages into normal objects */
696 assert(object
->pageout
== mem
->pageout
);
699 * Record the object/offset pair in this page
702 mem
->object
= object
;
703 mem
->offset
= offset
;
706 * Insert it into the object_object/offset hash table
709 bucket
= &vm_page_buckets
[vm_page_hash(object
, offset
)];
710 simple_lock(&vm_page_bucket_lock
);
711 mem
->next
= bucket
->pages
;
713 #if MACH_PAGE_HASH_STATS
714 if (++bucket
->cur_count
> bucket
->hi_count
)
715 bucket
->hi_count
= bucket
->cur_count
;
716 #endif /* MACH_PAGE_HASH_STATS */
717 simple_unlock(&vm_page_bucket_lock
);
720 * Now link into the object's list of backed pages.
723 queue_enter(&object
->memq
, mem
, vm_page_t
, listq
);
727 * Show that the object has one more resident page.
730 object
->resident_page_count
++;
736 * Exactly like vm_page_insert, except that we first
737 * remove any existing page at the given offset in object.
739 * The object and page queues must be locked.
744 register vm_page_t mem
,
745 register vm_object_t object
,
746 register vm_object_offset_t offset
)
748 register vm_page_bucket_t
*bucket
;
753 panic("vm_page_replace");
756 * Record the object/offset pair in this page
759 mem
->object
= object
;
760 mem
->offset
= offset
;
763 * Insert it into the object_object/offset hash table,
764 * replacing any page that might have been there.
767 bucket
= &vm_page_buckets
[vm_page_hash(object
, offset
)];
768 simple_lock(&vm_page_bucket_lock
);
770 vm_page_t
*mp
= &bucket
->pages
;
771 register vm_page_t m
= *mp
;
773 if (m
->object
== object
&& m
->offset
== offset
) {
775 * Remove page from bucket and from object,
776 * and return it to the free list.
779 queue_remove(&object
->memq
, m
, vm_page_t
,
782 object
->resident_page_count
--;
785 * Return page to the free list.
786 * Note the page is not tabled now, so this
787 * won't self-deadlock on the bucket lock.
795 mem
->next
= bucket
->pages
;
797 mem
->next
= VM_PAGE_NULL
;
800 simple_unlock(&vm_page_bucket_lock
);
803 * Now link into the object's list of backed pages.
806 queue_enter(&object
->memq
, mem
, vm_page_t
, listq
);
810 * And show that the object has one more resident
814 object
->resident_page_count
++;
818 * vm_page_remove: [ internal use only ]
820 * Removes the given mem entry from the object/offset-page
821 * table and the object page list.
823 * The object and page must be locked.
828 register vm_page_t mem
)
830 register vm_page_bucket_t
*bucket
;
831 register vm_page_t
this;
834 "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n",
835 (integer_t
)mem
->object
, (integer_t
)mem
->offset
,
836 (integer_t
)mem
, 0,0);
839 assert(!mem
->cleaning
);
843 * Remove from the object_object/offset hash table
846 bucket
= &vm_page_buckets
[vm_page_hash(mem
->object
, mem
->offset
)];
847 simple_lock(&vm_page_bucket_lock
);
848 if ((this = bucket
->pages
) == mem
) {
849 /* optimize for common case */
851 bucket
->pages
= mem
->next
;
853 register vm_page_t
*prev
;
855 for (prev
= &this->next
;
856 (this = *prev
) != mem
;
861 #if MACH_PAGE_HASH_STATS
863 #endif /* MACH_PAGE_HASH_STATS */
864 simple_unlock(&vm_page_bucket_lock
);
867 * Now remove from the object's list of backed pages.
870 queue_remove(&mem
->object
->memq
, mem
, vm_page_t
, listq
);
873 * And show that the object has one fewer resident
877 mem
->object
->resident_page_count
--;
880 mem
->object
= VM_OBJECT_NULL
;
887 * Returns the page associated with the object/offset
888 * pair specified; if none is found, VM_PAGE_NULL is returned.
890 * The object must be locked. No side effects.
895 register vm_object_t object
,
896 register vm_object_offset_t offset
)
898 register vm_page_t mem
;
899 register vm_page_bucket_t
*bucket
;
902 * Search the hash table for this object/offset pair
905 bucket
= &vm_page_buckets
[vm_page_hash(object
, offset
)];
907 simple_lock(&vm_page_bucket_lock
);
908 for (mem
= bucket
->pages
; mem
!= VM_PAGE_NULL
; mem
= mem
->next
) {
910 if ((mem
->object
== object
) && (mem
->offset
== offset
))
913 simple_unlock(&vm_page_bucket_lock
);
920 * Move the given memory entry from its
921 * current object to the specified target object/offset.
923 * The object must be locked.
927 register vm_page_t mem
,
928 register vm_object_t new_object
,
929 vm_object_offset_t new_offset
)
931 assert(mem
->object
!= new_object
);
933 * Changes to mem->object require the page lock because
934 * the pageout daemon uses that lock to get the object.
938 "vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n",
939 (integer_t
)new_object
, (integer_t
)new_offset
,
940 (integer_t
)mem
, 0,0);
942 vm_page_lock_queues();
944 vm_page_insert(mem
, new_object
, new_offset
);
945 vm_page_unlock_queues();
951 * Initialize the fields in a new page.
952 * This takes a structure with random values and initializes it
953 * so that it can be given to vm_page_release or vm_page_insert.
958 vm_offset_t phys_addr
)
960 *mem
= vm_page_template
;
961 mem
->phys_addr
= phys_addr
;
965 * vm_page_grab_fictitious:
967 * Remove a fictitious page from the free list.
968 * Returns VM_PAGE_NULL if there are no free pages.
970 int c_vm_page_grab_fictitious
= 0;
971 int c_vm_page_release_fictitious
= 0;
972 int c_vm_page_more_fictitious
= 0;
975 vm_page_grab_fictitious(void)
977 register vm_page_t m
;
979 m
= (vm_page_t
)zget(vm_page_zone
);
981 vm_page_init(m
, vm_page_fictitious_addr
);
982 m
->fictitious
= TRUE
;
985 c_vm_page_grab_fictitious
++;
990 * vm_page_release_fictitious:
992 * Release a fictitious page to the free list.
996 vm_page_release_fictitious(
997 register vm_page_t m
)
1001 assert(m
->fictitious
);
1002 assert(m
->phys_addr
== vm_page_fictitious_addr
);
1004 c_vm_page_release_fictitious
++;
1007 panic("vm_page_release_fictitious");
1009 zfree(vm_page_zone
, (vm_offset_t
)m
);
1013 * vm_page_more_fictitious:
1015 * Add more fictitious pages to the free list.
1016 * Allowed to block. This routine is way intimate
1017 * with the zones code, for several reasons:
1018 * 1. we need to carve some page structures out of physical
1019 * memory before zones work, so they _cannot_ come from
1021 * 2. the zone needs to be collectable in order to prevent
1022 * growth without bound. These structures are used by
1023 * the device pager (by the hundreds and thousands), as
1024 * private pages for pageout, and as blocking pages for
1025 * pagein. Temporary bursts in demand should not result in
1026 * permanent allocation of a resource.
1027 * 3. To smooth allocation humps, we allocate single pages
1028 * with kernel_memory_allocate(), and cram them into the
1029 * zone. This also allows us to initialize the vm_page_t's
1030 * on the way into the zone, so that zget() always returns
1031 * an initialized structure. The zone free element pointer
1032 * and the free page pointer are both the first item in the
1034 * 4. By having the pages in the zone pre-initialized, we need
1035 * not keep 2 levels of lists. The garbage collector simply
1036 * scans our list, and reduces physical memory usage as it
1040 void vm_page_more_fictitious(void)
1042 extern vm_map_t zone_map
;
1043 register vm_page_t m
;
1045 kern_return_t retval
;
1048 c_vm_page_more_fictitious
++;
1051 * Allocate a single page from the zone_map. Do not wait if no physical
1052 * pages are immediately available, and do not zero the space. We need
1053 * our own blocking lock here to prevent having multiple,
1054 * simultaneous requests from piling up on the zone_map lock. Exactly
1055 * one (of our) threads should be potentially waiting on the map lock.
1056 * If winner is not vm-privileged, then the page allocation will fail,
1057 * and it will temporarily block here in the vm_page_wait().
1059 mutex_lock(&vm_page_alloc_lock
);
1061 * If another thread allocated space, just bail out now.
1063 if (zone_free_count(vm_page_zone
) > 5) {
1065 * The number "5" is a small number that is larger than the
1066 * number of fictitious pages that any single caller will
1067 * attempt to allocate. Otherwise, a thread will attempt to
1068 * acquire a fictitious page (vm_page_grab_fictitious), fail,
1069 * release all of the resources and locks already acquired,
1070 * and then call this routine. This routine finds the pages
1071 * that the caller released, so fails to allocate new space.
1072 * The process repeats infinitely. The largest known number
1073 * of fictitious pages required in this manner is 2. 5 is
1074 * simply a somewhat larger number.
1076 mutex_unlock(&vm_page_alloc_lock
);
1080 if ((retval
= kernel_memory_allocate(zone_map
,
1081 &addr
, PAGE_SIZE
, VM_PROT_ALL
,
1082 KMA_KOBJECT
|KMA_NOPAGEWAIT
)) != KERN_SUCCESS
) {
1084 * No page was available. Tell the pageout daemon, drop the
1085 * lock to give another thread a chance at it, and
1086 * wait for the pageout daemon to make progress.
1088 mutex_unlock(&vm_page_alloc_lock
);
1089 vm_page_wait(THREAD_UNINT
);
1093 * Initialize as many vm_page_t's as will fit on this page. This
1094 * depends on the zone code disturbing ONLY the first item of
1095 * each zone element.
1097 m
= (vm_page_t
)addr
;
1098 for (i
= PAGE_SIZE
/sizeof(struct vm_page
); i
> 0; i
--) {
1099 vm_page_init(m
, vm_page_fictitious_addr
);
1100 m
->fictitious
= TRUE
;
1103 zcram(vm_page_zone
, addr
, PAGE_SIZE
);
1104 mutex_unlock(&vm_page_alloc_lock
);
1110 * Attempt to convert a fictitious page into a real page.
1115 register vm_page_t m
)
1117 register vm_page_t real_m
;
1120 assert(m
->fictitious
);
1123 real_m
= vm_page_grab();
1124 if (real_m
== VM_PAGE_NULL
)
1127 m
->phys_addr
= real_m
->phys_addr
;
1128 m
->fictitious
= FALSE
;
1131 vm_page_lock_queues();
1133 vm_page_active_count
++;
1134 else if (m
->inactive
)
1135 vm_page_inactive_count
++;
1136 vm_page_unlock_queues();
1138 real_m
->phys_addr
= vm_page_fictitious_addr
;
1139 real_m
->fictitious
= TRUE
;
1141 vm_page_release_fictitious(real_m
);
1148 * Return true if it is not likely that a non-vm_privileged thread
1149 * can get memory without blocking. Advisory only, since the
1150 * situation may change under us.
1155 /* No locking, at worst we will fib. */
1156 return( vm_page_free_count
< vm_page_free_reserved
);
1162 * Remove a page from the free list.
1163 * Returns VM_PAGE_NULL if the free list is too small.
1166 unsigned long vm_page_grab_count
= 0; /* measure demand */
1171 register vm_page_t mem
;
1173 mutex_lock(&vm_page_queue_free_lock
);
1174 vm_page_grab_count
++;
1177 * Optionally produce warnings if the wire or gobble
1178 * counts exceed some threshold.
1180 if (vm_page_wire_count_warning
> 0
1181 && vm_page_wire_count
>= vm_page_wire_count_warning
) {
1182 printf("mk: vm_page_grab(): high wired page count of %d\n",
1183 vm_page_wire_count
);
1184 assert(vm_page_wire_count
< vm_page_wire_count_warning
);
1186 if (vm_page_gobble_count_warning
> 0
1187 && vm_page_gobble_count
>= vm_page_gobble_count_warning
) {
1188 printf("mk: vm_page_grab(): high gobbled page count of %d\n",
1189 vm_page_gobble_count
);
1190 assert(vm_page_gobble_count
< vm_page_gobble_count_warning
);
1194 * Only let privileged threads (involved in pageout)
1195 * dip into the reserved pool.
1198 if ((vm_page_free_count
< vm_page_free_reserved
) &&
1199 !current_thread()->vm_privilege
) {
1200 mutex_unlock(&vm_page_queue_free_lock
);
1202 goto wakeup_pageout
;
1205 while (vm_page_queue_free
== VM_PAGE_NULL
) {
1206 printf("vm_page_grab: no free pages, trouble expected...\n");
1207 mutex_unlock(&vm_page_queue_free_lock
);
1209 mutex_lock(&vm_page_queue_free_lock
);
1212 if (--vm_page_free_count
< vm_page_free_count_minimum
)
1213 vm_page_free_count_minimum
= vm_page_free_count
;
1214 mem
= vm_page_queue_free
;
1215 vm_page_queue_free
= (vm_page_t
) mem
->pageq
.next
;
1217 mem
->no_isync
= TRUE
;
1218 mutex_unlock(&vm_page_queue_free_lock
);
1221 * Decide if we should poke the pageout daemon.
1222 * We do this if the free count is less than the low
1223 * water mark, or if the free count is less than the high
1224 * water mark (but above the low water mark) and the inactive
1225 * count is less than its target.
1227 * We don't have the counts locked ... if they change a little,
1228 * it doesn't really matter.
1232 if ((vm_page_free_count
< vm_page_free_min
) ||
1233 ((vm_page_free_count
< vm_page_free_target
) &&
1234 (vm_page_inactive_count
< vm_page_inactive_target
)))
1235 thread_wakeup((event_t
) &vm_page_free_wanted
);
1237 // dbgLog(mem->phys_addr, vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */
1245 * Return a page to the free list.
1250 register vm_page_t mem
)
1252 assert(!mem
->private && !mem
->fictitious
);
1254 // dbgLog(mem->phys_addr, vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */
1256 mutex_lock(&vm_page_queue_free_lock
);
1258 panic("vm_page_release");
1260 mem
->pageq
.next
= (queue_entry_t
) vm_page_queue_free
;
1261 vm_page_queue_free
= mem
;
1262 vm_page_free_count
++;
1265 * Check if we should wake up someone waiting for page.
1266 * But don't bother waking them unless they can allocate.
1268 * We wakeup only one thread, to prevent starvation.
1269 * Because the scheduling system handles wait queues FIFO,
1270 * if we wakeup all waiting threads, one greedy thread
1271 * can starve multiple niceguy threads. When the threads
1272 * all wakeup, the greedy threads runs first, grabs the page,
1273 * and waits for another page. It will be the first to run
1274 * when the next page is freed.
1276 * However, there is a slight danger here.
1277 * The thread we wake might not use the free page.
1278 * Then the other threads could wait indefinitely
1279 * while the page goes unused. To forestall this,
1280 * the pageout daemon will keep making free pages
1281 * as long as vm_page_free_wanted is non-zero.
1284 if ((vm_page_free_wanted
> 0) &&
1285 (vm_page_free_count
>= vm_page_free_reserved
)) {
1286 vm_page_free_wanted
--;
1287 thread_wakeup_one((event_t
) &vm_page_free_count
);
1290 mutex_unlock(&vm_page_queue_free_lock
);
1293 #define VM_PAGEOUT_DEADLOCK_TIMEOUT 3
1298 * Wait for a page to become available.
1299 * If there are plenty of free pages, then we don't sleep.
1302 * TRUE: There may be another page, try again
1303 * FALSE: We were interrupted out of our wait, don't try again
1311 * We can't use vm_page_free_reserved to make this
1312 * determination. Consider: some thread might
1313 * need to allocate two pages. The first allocation
1314 * succeeds, the second fails. After the first page is freed,
1315 * a call to vm_page_wait must really block.
1318 kern_return_t wait_result
;
1320 int need_wakeup
= 0;
1322 mutex_lock(&vm_page_queue_free_lock
);
1323 if (vm_page_free_count
< vm_page_free_target
) {
1324 if (vm_page_free_wanted
++ == 0)
1326 wait_result
= assert_wait((event_t
)&vm_page_free_count
,
1328 mutex_unlock(&vm_page_queue_free_lock
);
1329 counter(c_vm_page_wait_block
++);
1332 thread_wakeup((event_t
)&vm_page_free_wanted
);
1334 if (wait_result
== THREAD_WAITING
) {
1335 clock_interval_to_absolutetime_interval(
1336 VM_PAGEOUT_DEADLOCK_TIMEOUT
,
1337 NSEC_PER_SEC
, &abstime
);
1338 clock_absolutetime_interval_to_deadline(
1340 thread_set_timer_deadline(abstime
);
1341 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
1343 if(wait_result
== THREAD_TIMED_OUT
) {
1344 kr
= vm_pageout_emergency_availability_request();
1347 thread_cancel_timer();
1351 return(wait_result
== THREAD_AWAKENED
);
1353 mutex_unlock(&vm_page_queue_free_lock
);
1361 * Allocate and return a memory cell associated
1362 * with this VM object/offset pair.
1364 * Object must be locked.
1370 vm_object_offset_t offset
)
1372 register vm_page_t mem
;
1374 mem
= vm_page_grab();
1375 if (mem
== VM_PAGE_NULL
)
1376 return VM_PAGE_NULL
;
1378 vm_page_insert(mem
, object
, offset
);
1383 counter(unsigned int c_laundry_pages_freed
= 0;)
1385 int vm_pagein_cluster_unused
= 0;
1386 boolean_t vm_page_free_verify
= FALSE
;
1390 * Returns the given page to the free list,
1391 * disassociating it with any VM object.
1393 * Object and page queues must be locked prior to entry.
1397 register vm_page_t mem
)
1399 vm_object_t object
= mem
->object
;
1402 assert(!mem
->cleaning
);
1403 assert(!mem
->pageout
);
1404 assert(!vm_page_free_verify
|| pmap_verify_free(mem
->phys_addr
));
1407 vm_page_remove(mem
); /* clears tabled, object, offset */
1408 VM_PAGE_QUEUES_REMOVE(mem
); /* clears active or inactive */
1410 if (mem
->clustered
) {
1411 mem
->clustered
= FALSE
;
1412 vm_pagein_cluster_unused
++;
1415 if (mem
->wire_count
) {
1416 if (!mem
->private && !mem
->fictitious
)
1417 vm_page_wire_count
--;
1418 mem
->wire_count
= 0;
1419 assert(!mem
->gobbled
);
1420 } else if (mem
->gobbled
) {
1421 if (!mem
->private && !mem
->fictitious
)
1422 vm_page_wire_count
--;
1423 vm_page_gobble_count
--;
1425 mem
->gobbled
= FALSE
;
1428 extern int vm_page_laundry_min
;
1429 vm_page_laundry_count
--;
1430 mem
->laundry
= FALSE
; /* laundry is now clear */
1431 counter(++c_laundry_pages_freed
);
1432 if (vm_page_laundry_count
< vm_page_laundry_min
) {
1433 vm_page_laundry_min
= 0;
1434 thread_wakeup((event_t
) &vm_page_laundry_count
);
1438 mem
->discard_request
= FALSE
;
1440 PAGE_WAKEUP(mem
); /* clears wanted */
1443 vm_object_absent_release(object
);
1445 /* Some of these may be unnecessary */
1447 mem
->unlock_request
= 0;
1449 mem
->absent
= FALSE
;
1452 mem
->precious
= FALSE
;
1453 mem
->reference
= FALSE
;
1455 mem
->page_error
= KERN_SUCCESS
;
1458 mem
->private = FALSE
;
1459 mem
->fictitious
= TRUE
;
1460 mem
->phys_addr
= vm_page_fictitious_addr
;
1462 if (mem
->fictitious
) {
1463 vm_page_release_fictitious(mem
);
1465 /* depends on the queues lock */
1466 if(mem
->zero_fill
) {
1468 mem
->zero_fill
= FALSE
;
1470 vm_page_init(mem
, mem
->phys_addr
);
1471 vm_page_release(mem
);
1478 * Mark this page as wired down by yet
1479 * another map, removing it from paging queues
1482 * The page's object and the page queues must be locked.
1486 register vm_page_t mem
)
1489 // dbgLog(current_act(), mem->offset, mem->object, 1); /* (TEST/DEBUG) */
1493 if (mem
->wire_count
== 0) {
1494 VM_PAGE_QUEUES_REMOVE(mem
);
1495 if (!mem
->private && !mem
->fictitious
&& !mem
->gobbled
)
1496 vm_page_wire_count
++;
1498 vm_page_gobble_count
--;
1499 mem
->gobbled
= FALSE
;
1500 if(mem
->zero_fill
) {
1501 /* depends on the queues lock */
1503 mem
->zero_fill
= FALSE
;
1506 assert(!mem
->gobbled
);
1513 * Mark this page as consumed by the vm/ipc/xmm subsystems.
1515 * Called only for freshly vm_page_grab()ed pages - w/ nothing locked.
1519 register vm_page_t mem
)
1521 vm_page_lock_queues();
1524 assert(!mem
->gobbled
);
1525 assert(mem
->wire_count
== 0);
1527 if (!mem
->gobbled
&& mem
->wire_count
== 0) {
1528 if (!mem
->private && !mem
->fictitious
)
1529 vm_page_wire_count
++;
1531 vm_page_gobble_count
++;
1532 mem
->gobbled
= TRUE
;
1533 vm_page_unlock_queues();
1539 * Release one wiring of this page, potentially
1540 * enabling it to be paged again.
1542 * The page's object and the page queues must be locked.
1546 register vm_page_t mem
)
1549 // dbgLog(current_act(), mem->offset, mem->object, 0); /* (TEST/DEBUG) */
1552 assert(mem
->wire_count
> 0);
1554 if (--mem
->wire_count
== 0) {
1555 assert(!mem
->private && !mem
->fictitious
);
1556 vm_page_wire_count
--;
1557 queue_enter(&vm_page_queue_active
, mem
, vm_page_t
, pageq
);
1558 vm_page_active_count
++;
1560 mem
->reference
= TRUE
;
1565 * vm_page_deactivate:
1567 * Returns the given page to the inactive list,
1568 * indicating that no physical maps have access
1569 * to this page. [Used by the physical mapping system.]
1571 * The page queues must be locked.
1575 register vm_page_t m
)
1579 // dbgLog(m->phys_addr, vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */
1582 * This page is no longer very interesting. If it was
1583 * interesting (active or inactive/referenced), then we
1584 * clear the reference bit and (re)enter it in the
1585 * inactive queue. Note wired pages should not have
1586 * their reference bit cleared.
1588 if (m
->gobbled
) { /* can this happen? */
1589 assert(m
->wire_count
== 0);
1590 if (!m
->private && !m
->fictitious
)
1591 vm_page_wire_count
--;
1592 vm_page_gobble_count
--;
1595 if (m
->private || (m
->wire_count
!= 0))
1597 if (m
->active
|| (m
->inactive
&& m
->reference
)) {
1598 if (!m
->fictitious
&& !m
->absent
)
1599 pmap_clear_reference(m
->phys_addr
);
1600 m
->reference
= FALSE
;
1601 VM_PAGE_QUEUES_REMOVE(m
);
1603 if (m
->wire_count
== 0 && !m
->inactive
) {
1604 m
->page_ticket
= vm_page_ticket
;
1605 vm_page_ticket_roll
++;
1607 if(vm_page_ticket_roll
== VM_PAGE_TICKETS_IN_ROLL
) {
1608 vm_page_ticket_roll
= 0;
1609 if(vm_page_ticket
== VM_PAGE_TICKET_ROLL_IDS
)
1616 queue_enter(&vm_page_queue_zf
, m
, vm_page_t
, pageq
);
1618 queue_enter(&vm_page_queue_inactive
,
1619 m
, vm_page_t
, pageq
);
1624 vm_page_inactive_count
++;
1631 * Put the specified page on the active list (if appropriate).
1633 * The page queues must be locked.
1638 register vm_page_t m
)
1643 assert(m
->wire_count
== 0);
1644 if (!m
->private && !m
->fictitious
)
1645 vm_page_wire_count
--;
1646 vm_page_gobble_count
--;
1654 queue_remove(&vm_page_queue_zf
, m
, vm_page_t
, pageq
);
1656 queue_remove(&vm_page_queue_inactive
,
1657 m
, vm_page_t
, pageq
);
1660 vm_page_inactive_count
--;
1661 m
->inactive
= FALSE
;
1663 if (m
->wire_count
== 0) {
1665 panic("vm_page_activate: already active");
1667 queue_enter(&vm_page_queue_active
, m
, vm_page_t
, pageq
);
1669 m
->reference
= TRUE
;
1671 vm_page_active_count
++;
1676 * vm_page_part_zero_fill:
1678 * Zero-fill a part of the page.
1681 vm_page_part_zero_fill(
1689 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
1690 pmap_zero_part_page(m
->phys_addr
, m_pa
, len
);
1693 tmp
= vm_page_grab();
1694 if (tmp
== VM_PAGE_NULL
) {
1695 vm_page_wait(THREAD_UNINT
);
1700 vm_page_zero_fill(tmp
);
1702 vm_page_part_copy(m
, 0, tmp
, 0, m_pa
);
1704 if((m_pa
+ len
) < PAGE_SIZE
) {
1705 vm_page_part_copy(m
, m_pa
+ len
, tmp
,
1706 m_pa
+ len
, PAGE_SIZE
- (m_pa
+ len
));
1708 vm_page_copy(tmp
,m
);
1709 vm_page_lock_queues();
1711 vm_page_unlock_queues();
1717 * vm_page_zero_fill:
1719 * Zero-fill the specified page.
1726 "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n",
1727 (integer_t
)m
->object
, (integer_t
)m
->offset
, (integer_t
)m
, 0,0);
1731 pmap_zero_page(m
->phys_addr
);
1735 * vm_page_part_copy:
1737 * copy part of one page to another
1748 VM_PAGE_CHECK(src_m
);
1749 VM_PAGE_CHECK(dst_m
);
1751 pmap_copy_part_page(src_m
->phys_addr
, src_pa
,
1752 dst_m
->phys_addr
, dst_pa
, len
);
1758 * Copy one page to another
1767 "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n",
1768 (integer_t
)src_m
->object
, src_m
->offset
,
1769 (integer_t
)dest_m
->object
, dest_m
->offset
,
1772 VM_PAGE_CHECK(src_m
);
1773 VM_PAGE_CHECK(dest_m
);
1775 pmap_copy_page(src_m
->phys_addr
, dest_m
->phys_addr
);
1779 * Currently, this is a primitive allocator that grabs
1780 * free pages from the system, sorts them by physical
1781 * address, then searches for a region large enough to
1782 * satisfy the user's request.
1784 * Additional levels of effort:
1785 * + steal clean active/inactive pages
1786 * + force pageouts of dirty pages
1787 * + maintain a map of available physical
1791 #define SET_NEXT_PAGE(m,n) ((m)->pageq.next = (struct queue_entry *) (n))
1794 int vm_page_verify_contiguous(
1796 unsigned int npages
);
1797 #endif /* MACH_ASSERT */
1799 cpm_counter(unsigned int vpfls_pages_handled
= 0;)
1800 cpm_counter(unsigned int vpfls_head_insertions
= 0;)
1801 cpm_counter(unsigned int vpfls_tail_insertions
= 0;)
1802 cpm_counter(unsigned int vpfls_general_insertions
= 0;)
1803 cpm_counter(unsigned int vpfc_failed
= 0;)
1804 cpm_counter(unsigned int vpfc_satisfied
= 0;)
1807 * Sort free list by ascending physical address,
1808 * using a not-particularly-bright sort algorithm.
1809 * Caller holds vm_page_queue_free_lock.
1812 vm_page_free_list_sort(void)
1814 vm_page_t sort_list
;
1815 vm_page_t sort_list_end
;
1816 vm_page_t m
, m1
, *prev
, next_m
;
1819 unsigned int npages
;
1821 #endif /* MACH_ASSERT */
1825 * Verify pages in the free list..
1828 for (m
= vm_page_queue_free
; m
!= VM_PAGE_NULL
; m
= NEXT_PAGE(m
))
1830 if (npages
!= vm_page_free_count
)
1831 panic("vm_sort_free_list: prelim: npages %d free_count %d",
1832 npages
, vm_page_free_count
);
1833 old_free_count
= vm_page_free_count
;
1834 #endif /* MACH_ASSERT */
1836 sort_list
= sort_list_end
= vm_page_queue_free
;
1837 m
= NEXT_PAGE(vm_page_queue_free
);
1838 SET_NEXT_PAGE(vm_page_queue_free
, VM_PAGE_NULL
);
1839 cpm_counter(vpfls_pages_handled
= 0);
1840 while (m
!= VM_PAGE_NULL
) {
1841 cpm_counter(++vpfls_pages_handled
);
1842 next_m
= NEXT_PAGE(m
);
1843 if (m
->phys_addr
< sort_list
->phys_addr
) {
1844 cpm_counter(++vpfls_head_insertions
);
1845 SET_NEXT_PAGE(m
, sort_list
);
1847 } else if (m
->phys_addr
> sort_list_end
->phys_addr
) {
1848 cpm_counter(++vpfls_tail_insertions
);
1849 SET_NEXT_PAGE(sort_list_end
, m
);
1850 SET_NEXT_PAGE(m
, VM_PAGE_NULL
);
1853 cpm_counter(++vpfls_general_insertions
);
1854 /* general sorted list insertion */
1856 for (m1
=sort_list
; m1
!=VM_PAGE_NULL
; m1
=NEXT_PAGE(m1
)) {
1857 if (m1
->phys_addr
> m
->phys_addr
) {
1859 panic("vm_sort_free_list: ugh");
1860 SET_NEXT_PAGE(m
, *prev
);
1864 prev
= (vm_page_t
*) &m1
->pageq
.next
;
1872 * Verify that pages are sorted into ascending order.
1874 for (m
= sort_list
, npages
= 0; m
!= VM_PAGE_NULL
; m
= NEXT_PAGE(m
)) {
1875 if (m
!= sort_list
&&
1876 m
->phys_addr
<= addr
) {
1877 printf("m 0x%x addr 0x%x\n", m
, addr
);
1878 panic("vm_sort_free_list");
1880 addr
= m
->phys_addr
;
1883 if (old_free_count
!= vm_page_free_count
)
1884 panic("vm_sort_free_list: old_free %d free_count %d",
1885 old_free_count
, vm_page_free_count
);
1886 if (npages
!= vm_page_free_count
)
1887 panic("vm_sort_free_list: npages %d free_count %d",
1888 npages
, vm_page_free_count
);
1889 #endif /* MACH_ASSERT */
1891 vm_page_queue_free
= sort_list
;
1897 * Check that the list of pages is ordered by
1898 * ascending physical address and has no holes.
1901 vm_page_verify_contiguous(
1903 unsigned int npages
)
1905 register vm_page_t m
;
1906 unsigned int page_count
;
1907 vm_offset_t prev_addr
;
1909 prev_addr
= pages
->phys_addr
;
1911 for (m
= NEXT_PAGE(pages
); m
!= VM_PAGE_NULL
; m
= NEXT_PAGE(m
)) {
1912 if (m
->phys_addr
!= prev_addr
+ page_size
) {
1913 printf("m 0x%x prev_addr 0x%x, current addr 0x%x\n",
1914 m
, prev_addr
, m
->phys_addr
);
1915 printf("pages 0x%x page_count %d\n", pages
, page_count
);
1916 panic("vm_page_verify_contiguous: not contiguous!");
1918 prev_addr
= m
->phys_addr
;
1921 if (page_count
!= npages
) {
1922 printf("pages 0x%x actual count 0x%x but requested 0x%x\n",
1923 pages
, page_count
, npages
);
1924 panic("vm_page_verify_contiguous: count error");
1928 #endif /* MACH_ASSERT */
1932 * Find a region large enough to contain at least npages
1933 * of contiguous physical memory.
1936 * - Called while holding vm_page_queue_free_lock.
1937 * - Doesn't respect vm_page_free_reserved; caller
1938 * must not ask for more pages than are legal to grab.
1940 * Returns a pointer to a list of gobbled pages or VM_PAGE_NULL.
1944 vm_page_find_contiguous(
1947 vm_page_t m
, *contig_prev
, *prev_ptr
;
1948 vm_offset_t prev_addr
;
1949 unsigned int contig_npages
;
1953 return VM_PAGE_NULL
;
1955 prev_addr
= vm_page_queue_free
->phys_addr
- (page_size
+ 1);
1956 prev_ptr
= &vm_page_queue_free
;
1957 for (m
= vm_page_queue_free
; m
!= VM_PAGE_NULL
; m
= NEXT_PAGE(m
)) {
1959 if (m
->phys_addr
!= prev_addr
+ page_size
) {
1961 * Whoops! Pages aren't contiguous. Start over.
1964 contig_prev
= prev_ptr
;
1967 if (++contig_npages
== npages
) {
1969 * Chop these pages out of the free list.
1970 * Mark them all as gobbled.
1972 list
= *contig_prev
;
1973 *contig_prev
= NEXT_PAGE(m
);
1974 SET_NEXT_PAGE(m
, VM_PAGE_NULL
);
1975 for (m
= list
; m
!= VM_PAGE_NULL
; m
= NEXT_PAGE(m
)) {
1982 vm_page_free_count
-= npages
;
1983 if (vm_page_free_count
< vm_page_free_count_minimum
)
1984 vm_page_free_count_minimum
= vm_page_free_count
;
1985 vm_page_wire_count
+= npages
;
1986 vm_page_gobble_count
+= npages
;
1987 cpm_counter(++vpfc_satisfied
);
1988 assert(vm_page_verify_contiguous(list
, contig_npages
));
1992 assert(contig_npages
< npages
);
1993 prev_ptr
= (vm_page_t
*) &m
->pageq
.next
;
1994 prev_addr
= m
->phys_addr
;
1996 cpm_counter(++vpfc_failed
);
1997 return VM_PAGE_NULL
;
2001 * Allocate a list of contiguous, wired pages.
2009 register vm_page_t m
;
2010 vm_page_t
*first_contig
;
2011 vm_page_t free_list
, pages
;
2012 unsigned int npages
, n1pages
;
2013 int vm_pages_available
;
2015 if (size
% page_size
!= 0)
2016 return KERN_INVALID_ARGUMENT
;
2018 vm_page_lock_queues();
2019 mutex_lock(&vm_page_queue_free_lock
);
2022 * Should also take active and inactive pages
2023 * into account... One day...
2025 vm_pages_available
= vm_page_free_count
- vm_page_free_reserved
;
2027 if (size
> vm_pages_available
* page_size
) {
2028 mutex_unlock(&vm_page_queue_free_lock
);
2029 return KERN_RESOURCE_SHORTAGE
;
2032 vm_page_free_list_sort();
2034 npages
= size
/ page_size
;
2037 * Obtain a pointer to a subset of the free
2038 * list large enough to satisfy the request;
2039 * the region will be physically contiguous.
2041 pages
= vm_page_find_contiguous(npages
);
2042 if (pages
== VM_PAGE_NULL
) {
2043 mutex_unlock(&vm_page_queue_free_lock
);
2044 vm_page_unlock_queues();
2045 return KERN_NO_SPACE
;
2048 mutex_unlock(&vm_page_queue_free_lock
);
2051 * Walk the returned list, wiring the pages.
2054 for (m
= pages
; m
!= VM_PAGE_NULL
; m
= NEXT_PAGE(m
)) {
2056 * Essentially inlined vm_page_wire.
2059 assert(!m
->inactive
);
2060 assert(!m
->private);
2061 assert(!m
->fictitious
);
2062 assert(m
->wire_count
== 0);
2066 --vm_page_gobble_count
;
2068 vm_page_unlock_queues();
2071 * The CPM pages should now be available and
2072 * ordered by ascending physical address.
2074 assert(vm_page_verify_contiguous(pages
, npages
));
2077 return KERN_SUCCESS
;
2081 #include <mach_vm_debug.h>
2084 #include <mach_debug/hash_info.h>
2085 #include <vm/vm_debug.h>
2088 * Routine: vm_page_info
2090 * Return information about the global VP table.
2091 * Fills the buffer with as much information as possible
2092 * and returns the desired size of the buffer.
2094 * Nothing locked. The caller should provide
2095 * possibly-pageable memory.
2100 hash_info_bucket_t
*info
,
2105 if (vm_page_bucket_count
< count
)
2106 count
= vm_page_bucket_count
;
2108 for (i
= 0; i
< count
; i
++) {
2109 vm_page_bucket_t
*bucket
= &vm_page_buckets
[i
];
2110 unsigned int bucket_count
= 0;
2113 simple_lock(&vm_page_bucket_lock
);
2114 for (m
= bucket
->pages
; m
!= VM_PAGE_NULL
; m
= m
->next
)
2116 simple_unlock(&vm_page_bucket_lock
);
2118 /* don't touch pageable memory while holding locks */
2119 info
[i
].hib_count
= bucket_count
;
2122 return vm_page_bucket_count
;
2124 #endif /* MACH_VM_DEBUG */
2126 #include <mach_kdb.h>
2129 #include <ddb/db_output.h>
2130 #include <vm/vm_print.h>
2131 #define printf kdbprintf
2134 * Routine: vm_page_print [exported]
2142 iprintf("page 0x%x\n", p
);
2146 iprintf("object=0x%x", p
->object
);
2147 printf(", offset=0x%x", p
->offset
);
2148 printf(", wire_count=%d", p
->wire_count
);
2150 iprintf("%sinactive, %sactive, %sgobbled, %slaundry, %sfree, %sref, %sdiscard\n",
2151 (p
->inactive
? "" : "!"),
2152 (p
->active
? "" : "!"),
2153 (p
->gobbled
? "" : "!"),
2154 (p
->laundry
? "" : "!"),
2155 (p
->free
? "" : "!"),
2156 (p
->reference
? "" : "!"),
2157 (p
->discard_request
? "" : "!"));
2158 iprintf("%sbusy, %swanted, %stabled, %sfictitious, %sprivate, %sprecious\n",
2159 (p
->busy
? "" : "!"),
2160 (p
->wanted
? "" : "!"),
2161 (p
->tabled
? "" : "!"),
2162 (p
->fictitious
? "" : "!"),
2163 (p
->private ? "" : "!"),
2164 (p
->precious
? "" : "!"));
2165 iprintf("%sabsent, %serror, %sdirty, %scleaning, %spageout, %sclustered\n",
2166 (p
->absent
? "" : "!"),
2167 (p
->error
? "" : "!"),
2168 (p
->dirty
? "" : "!"),
2169 (p
->cleaning
? "" : "!"),
2170 (p
->pageout
? "" : "!"),
2171 (p
->clustered
? "" : "!"));
2172 iprintf("%slock_supplied, %soverwriting, %srestart, %sunusual\n",
2173 (p
->lock_supplied
? "" : "!"),
2174 (p
->overwriting
? "" : "!"),
2175 (p
->restart
? "" : "!"),
2176 (p
->unusual
? "" : "!"));
2178 iprintf("phys_addr=0x%x", p
->phys_addr
);
2179 printf(", page_error=0x%x", p
->page_error
);
2180 printf(", page_lock=0x%x", p
->page_lock
);
2181 printf(", unlock_request=%d\n", p
->unlock_request
);
2185 #endif /* MACH_KDB */