2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
56 * Resident memory management module.
59 #include <mach/clock_types.h>
60 #include <mach/vm_prot.h>
61 #include <mach/vm_statistics.h>
62 #include <kern/counters.h>
63 #include <kern/sched_prim.h>
64 #include <kern/task.h>
65 #include <kern/thread.h>
66 #include <kern/zalloc.h>
69 #include <vm/vm_init.h>
70 #include <vm/vm_map.h>
71 #include <vm/vm_page.h>
72 #include <vm/vm_pageout.h>
73 #include <vm/vm_kern.h> /* kernel_memory_allocate() */
74 #include <kern/misc_protos.h>
75 #include <zone_debug.h>
78 /* Variables used to indicate the relative age of pages in the
82 int vm_page_ticket_roll
= 0;
83 int vm_page_ticket
= 0;
85 * Associated with page of user-allocatable memory is a
90 * These variables record the values returned by vm_page_bootstrap,
91 * for debugging purposes. The implementation of pmap_steal_memory
92 * and pmap_startup here also uses them internally.
95 vm_offset_t virtual_space_start
;
96 vm_offset_t virtual_space_end
;
100 * The vm_page_lookup() routine, which provides for fast
101 * (virtual memory object, offset) to page lookup, employs
102 * the following hash table. The vm_page_{insert,remove}
103 * routines install and remove associations in the table.
104 * [This table is often called the virtual-to-physical,
109 #if MACH_PAGE_HASH_STATS
110 int cur_count
; /* current count */
111 int hi_count
; /* high water mark */
112 #endif /* MACH_PAGE_HASH_STATS */
115 vm_page_bucket_t
*vm_page_buckets
; /* Array of buckets */
116 unsigned int vm_page_bucket_count
= 0; /* How big is array? */
117 unsigned int vm_page_hash_mask
; /* Mask for hash function */
118 unsigned int vm_page_hash_shift
; /* Shift for hash function */
119 decl_simple_lock_data(,vm_page_bucket_lock
)
121 #if MACH_PAGE_HASH_STATS
122 /* This routine is only for debug. It is intended to be called by
123 * hand by a developer using a kernel debugger. This routine prints
124 * out vm_page_hash table statistics to the kernel debug console.
134 for (i
= 0; i
< vm_page_bucket_count
; i
++) {
135 if (vm_page_buckets
[i
].hi_count
) {
137 highsum
+= vm_page_buckets
[i
].hi_count
;
138 if (vm_page_buckets
[i
].hi_count
> maxdepth
)
139 maxdepth
= vm_page_buckets
[i
].hi_count
;
142 printf("Total number of buckets: %d\n", vm_page_bucket_count
);
143 printf("Number used buckets: %d = %d%%\n",
144 numbuckets
, 100*numbuckets
/vm_page_bucket_count
);
145 printf("Number unused buckets: %d = %d%%\n",
146 vm_page_bucket_count
- numbuckets
,
147 100*(vm_page_bucket_count
-numbuckets
)/vm_page_bucket_count
);
148 printf("Sum of bucket max depth: %d\n", highsum
);
149 printf("Average bucket depth: %d.%2d\n",
150 highsum
/vm_page_bucket_count
,
151 highsum%vm_page_bucket_count
);
152 printf("Maximum bucket depth: %d\n", maxdepth
);
154 #endif /* MACH_PAGE_HASH_STATS */
157 * The virtual page size is currently implemented as a runtime
158 * variable, but is constant once initialized using vm_set_page_size.
159 * This initialization must be done in the machine-dependent
160 * bootstrap sequence, before calling other machine-independent
163 * All references to the virtual page size outside this
164 * module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
167 #ifndef PAGE_SIZE_FIXED
168 vm_size_t page_size
= 4096;
169 vm_size_t page_mask
= 4095;
171 #endif /* PAGE_SIZE_FIXED */
174 * Resident page structures are initialized from
175 * a template (see vm_page_alloc).
177 * When adding a new field to the virtual memory
178 * object structure, be sure to add initialization
179 * (see vm_page_bootstrap).
181 struct vm_page vm_page_template
;
184 * Resident pages that represent real memory
185 * are allocated from a free list.
187 vm_page_t vm_page_queue_free
;
188 vm_page_t vm_page_queue_fictitious
;
189 decl_mutex_data(,vm_page_queue_free_lock
)
190 unsigned int vm_page_free_wanted
;
191 int vm_page_free_count
;
192 int vm_page_fictitious_count
;
194 unsigned int vm_page_free_count_minimum
; /* debugging */
197 * Occasionally, the virtual memory system uses
198 * resident page structures that do not refer to
199 * real pages, for example to leave a page with
200 * important state information in the VP table.
202 * These page structures are allocated the way
203 * most other kernel structures are.
206 decl_mutex_data(,vm_page_alloc_lock
)
207 unsigned int io_throttle_zero_fill
;
208 decl_mutex_data(,vm_page_zero_fill_lock
)
211 * Fictitious pages don't have a physical address,
212 * but we must initialize phys_addr to something.
213 * For debugging, this should be a strange value
214 * that the pmap module can recognize in assertions.
216 vm_offset_t vm_page_fictitious_addr
= (vm_offset_t
) -1;
219 * Resident page structures are also chained on
220 * queues that are used by the page replacement
221 * system (pageout daemon). These queues are
222 * defined here, but are shared by the pageout
223 * module. The inactive queue is broken into
224 * inactive and zf for convenience as the
225 * pageout daemon often assignes a higher
226 * affinity to zf pages
228 queue_head_t vm_page_queue_active
;
229 queue_head_t vm_page_queue_inactive
;
230 queue_head_t vm_page_queue_zf
;
231 decl_mutex_data(,vm_page_queue_lock
)
232 int vm_page_active_count
;
233 int vm_page_inactive_count
;
234 int vm_page_wire_count
;
235 int vm_page_gobble_count
= 0;
236 int vm_page_wire_count_warning
= 0;
237 int vm_page_gobble_count_warning
= 0;
239 /* the following fields are protected by the vm_page_queue_lock */
240 queue_head_t vm_page_queue_limbo
;
241 int vm_page_limbo_count
= 0; /* total pages in limbo */
242 int vm_page_limbo_real_count
= 0; /* real pages in limbo */
243 int vm_page_pin_count
= 0; /* number of pinned pages */
245 decl_simple_lock_data(,vm_page_preppin_lock
)
248 * Several page replacement parameters are also
249 * shared with this module, so that page allocation
250 * (done here in vm_page_alloc) can trigger the
253 int vm_page_free_target
= 0;
254 int vm_page_free_min
= 0;
255 int vm_page_inactive_target
= 0;
256 int vm_page_free_reserved
= 0;
257 int vm_page_laundry_count
= 0;
260 * The VM system has a couple of heuristics for deciding
261 * that pages are "uninteresting" and should be placed
262 * on the inactive queue as likely candidates for replacement.
263 * These variables let the heuristics be controlled at run-time
264 * to make experimentation easier.
267 boolean_t vm_page_deactivate_hint
= TRUE
;
272 * Sets the page size, perhaps based upon the memory
273 * size. Must be called before any use of page-size
274 * dependent functions.
276 * Sets page_shift and page_mask from page_size.
279 vm_set_page_size(void)
281 #ifndef PAGE_SIZE_FIXED
282 page_mask
= page_size
- 1;
284 if ((page_mask
& page_size
) != 0)
285 panic("vm_set_page_size: page size not a power of two");
287 for (page_shift
= 0; ; page_shift
++)
288 if ((1 << page_shift
) == page_size
)
290 #endif /* PAGE_SIZE_FIXED */
296 * Initializes the resident memory module.
298 * Allocates memory for the page cells, and
299 * for the object/offset-to-page hash table headers.
300 * Each page cell is initialized and placed on the free list.
301 * Returns the range of available kernel virtual memory.
309 register vm_page_t m
;
316 * Initialize the vm_page template.
319 m
= &vm_page_template
;
320 m
->object
= VM_OBJECT_NULL
; /* reset later */
321 m
->offset
= 0; /* reset later */
329 m
->reference
= FALSE
;
331 m
->dump_cleaning
= FALSE
;
332 m
->list_req_pending
= FALSE
;
337 m
->fictitious
= FALSE
;
344 m
->clustered
= FALSE
;
345 m
->lock_supplied
= FALSE
;
348 m
->zero_fill
= FALSE
;
350 m
->phys_addr
= 0; /* reset later */
352 m
->page_lock
= VM_PROT_NONE
;
353 m
->unlock_request
= VM_PROT_NONE
;
354 m
->page_error
= KERN_SUCCESS
;
357 * Initialize the page queues.
360 mutex_init(&vm_page_queue_free_lock
, ETAP_VM_PAGEQ_FREE
);
361 mutex_init(&vm_page_queue_lock
, ETAP_VM_PAGEQ
);
362 simple_lock_init(&vm_page_preppin_lock
, ETAP_VM_PREPPIN
);
364 vm_page_queue_free
= VM_PAGE_NULL
;
365 vm_page_queue_fictitious
= VM_PAGE_NULL
;
366 queue_init(&vm_page_queue_active
);
367 queue_init(&vm_page_queue_inactive
);
368 queue_init(&vm_page_queue_zf
);
369 queue_init(&vm_page_queue_limbo
);
371 vm_page_free_wanted
= 0;
374 * Steal memory for the map and zone subsystems.
377 vm_map_steal_memory();
381 * Allocate (and initialize) the virtual-to-physical
382 * table hash buckets.
384 * The number of buckets should be a power of two to
385 * get a good hash function. The following computation
386 * chooses the first power of two that is greater
387 * than the number of physical pages in the system.
390 simple_lock_init(&vm_page_bucket_lock
, ETAP_VM_BUCKET
);
392 if (vm_page_bucket_count
== 0) {
393 unsigned int npages
= pmap_free_pages();
395 vm_page_bucket_count
= 1;
396 while (vm_page_bucket_count
< npages
)
397 vm_page_bucket_count
<<= 1;
400 vm_page_hash_mask
= vm_page_bucket_count
- 1;
403 * Calculate object shift value for hashing algorithm:
404 * O = log2(sizeof(struct vm_object))
405 * B = log2(vm_page_bucket_count)
406 * hash shifts the object left by
409 size
= vm_page_bucket_count
;
410 for (log1
= 0; size
> 1; log1
++)
412 size
= sizeof(struct vm_object
);
413 for (log2
= 0; size
> 1; log2
++)
415 vm_page_hash_shift
= log1
/2 - log2
+ 1;
417 if (vm_page_hash_mask
& vm_page_bucket_count
)
418 printf("vm_page_bootstrap: WARNING -- strange page hash\n");
420 vm_page_buckets
= (vm_page_bucket_t
*)
421 pmap_steal_memory(vm_page_bucket_count
*
422 sizeof(vm_page_bucket_t
));
424 for (i
= 0; i
< vm_page_bucket_count
; i
++) {
425 register vm_page_bucket_t
*bucket
= &vm_page_buckets
[i
];
427 bucket
->pages
= VM_PAGE_NULL
;
428 #if MACH_PAGE_HASH_STATS
429 bucket
->cur_count
= 0;
430 bucket
->hi_count
= 0;
431 #endif /* MACH_PAGE_HASH_STATS */
435 * Machine-dependent code allocates the resident page table.
436 * It uses vm_page_init to initialize the page frames.
437 * The code also returns to us the virtual space available
438 * to the kernel. We don't trust the pmap module
439 * to get the alignment right.
442 pmap_startup(&virtual_space_start
, &virtual_space_end
);
443 virtual_space_start
= round_page(virtual_space_start
);
444 virtual_space_end
= trunc_page(virtual_space_end
);
446 *startp
= virtual_space_start
;
447 *endp
= virtual_space_end
;
450 * Compute the initial "wire" count.
451 * Up until now, the pages which have been set aside are not under
452 * the VM system's control, so although they aren't explicitly
453 * wired, they nonetheless can't be moved. At this moment,
454 * all VM managed pages are "free", courtesy of pmap_startup.
456 vm_page_wire_count
= atop(mem_size
) - vm_page_free_count
; /* initial value */
458 printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count
);
459 vm_page_free_count_minimum
= vm_page_free_count
;
462 #ifndef MACHINE_PAGES
464 * We implement pmap_steal_memory and pmap_startup with the help
465 * of two simpler functions, pmap_virtual_space and pmap_next_page.
472 vm_offset_t addr
, vaddr
, paddr
;
475 * We round the size to a round multiple.
478 size
= (size
+ sizeof (void *) - 1) &~ (sizeof (void *) - 1);
481 * If this is the first call to pmap_steal_memory,
482 * we have to initialize ourself.
485 if (virtual_space_start
== virtual_space_end
) {
486 pmap_virtual_space(&virtual_space_start
, &virtual_space_end
);
489 * The initial values must be aligned properly, and
490 * we don't trust the pmap module to do it right.
493 virtual_space_start
= round_page(virtual_space_start
);
494 virtual_space_end
= trunc_page(virtual_space_end
);
498 * Allocate virtual memory for this request.
501 addr
= virtual_space_start
;
502 virtual_space_start
+= size
;
504 kprintf("pmap_steal_memory: %08X - %08X; size=%08X\n", addr
, virtual_space_start
, size
); /* (TEST/DEBUG) */
507 * Allocate and map physical pages to back new virtual pages.
510 for (vaddr
= round_page(addr
);
512 vaddr
+= PAGE_SIZE
) {
513 if (!pmap_next_page(&paddr
))
514 panic("pmap_steal_memory");
517 * XXX Logically, these mappings should be wired,
518 * but some pmap modules barf if they are.
521 pmap_enter(kernel_pmap
, vaddr
, paddr
,
522 VM_PROT_READ
|VM_PROT_WRITE
,
523 VM_WIMG_USE_DEFAULT
, FALSE
);
525 * Account for newly stolen memory
527 vm_page_wire_count
++;
539 unsigned int i
, npages
, pages_initialized
;
544 * We calculate how many page frames we will have
545 * and then allocate the page structures in one chunk.
548 npages
= ((PAGE_SIZE
* pmap_free_pages() +
549 (round_page(virtual_space_start
) - virtual_space_start
)) /
550 (PAGE_SIZE
+ sizeof *pages
));
552 pages
= (vm_page_t
) pmap_steal_memory(npages
* sizeof *pages
);
555 * Initialize the page frames.
558 for (i
= 0, pages_initialized
= 0; i
< npages
; i
++) {
559 if (!pmap_next_page(&paddr
))
562 vm_page_init(&pages
[i
], paddr
);
568 * Release pages in reverse order so that physical pages
569 * initially get allocated in ascending addresses. This keeps
570 * the devices (which must address physical memory) happy if
571 * they require several consecutive pages.
574 for (i
= pages_initialized
; i
> 0; i
--) {
575 vm_page_release(&pages
[i
- 1]);
579 * We have to re-align virtual_space_start,
580 * because pmap_steal_memory has been using it.
583 virtual_space_start
= round_page(virtual_space_start
);
585 *startp
= virtual_space_start
;
586 *endp
= virtual_space_end
;
588 #endif /* MACHINE_PAGES */
591 * Routine: vm_page_module_init
593 * Second initialization pass, to be done after
594 * the basic VM system is ready.
597 vm_page_module_init(void)
599 vm_page_zone
= zinit((vm_size_t
) sizeof(struct vm_page
),
600 0, PAGE_SIZE
, "vm pages");
603 zone_debug_disable(vm_page_zone
);
604 #endif /* ZONE_DEBUG */
606 zone_change(vm_page_zone
, Z_EXPAND
, FALSE
);
607 zone_change(vm_page_zone
, Z_EXHAUST
, TRUE
);
608 zone_change(vm_page_zone
, Z_FOREIGN
, TRUE
);
611 * Adjust zone statistics to account for the real pages allocated
612 * in vm_page_create(). [Q: is this really what we want?]
614 vm_page_zone
->count
+= vm_page_pages
;
615 vm_page_zone
->cur_size
+= vm_page_pages
* vm_page_zone
->elem_size
;
617 mutex_init(&vm_page_alloc_lock
, ETAP_VM_PAGE_ALLOC
);
618 mutex_init(&vm_page_zero_fill_lock
, ETAP_VM_PAGE_ALLOC
);
622 * Routine: vm_page_create
624 * After the VM system is up, machine-dependent code
625 * may stumble across more physical memory. For example,
626 * memory that it was reserving for a frame buffer.
627 * vm_page_create turns this memory into available pages.
638 for (paddr
= round_page(start
);
639 paddr
< trunc_page(end
);
640 paddr
+= PAGE_SIZE
) {
641 while ((m
= (vm_page_t
) vm_page_grab_fictitious())
643 vm_page_more_fictitious();
645 vm_page_init(m
, paddr
);
654 * Distributes the object/offset key pair among hash buckets.
656 * NOTE: To get a good hash function, the bucket count should
659 #define vm_page_hash(object, offset) (\
660 ( ((natural_t)(vm_offset_t)object<<vm_page_hash_shift) + (natural_t)atop(offset))\
664 * vm_page_insert: [ internal use only ]
666 * Inserts the given mem entry into the object/object-page
667 * table and object list.
669 * The object must be locked.
674 register vm_page_t mem
,
675 register vm_object_t object
,
676 register vm_object_offset_t offset
)
678 register vm_page_bucket_t
*bucket
;
681 "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n",
682 (integer_t
)object
, (integer_t
)offset
, (integer_t
)mem
, 0,0);
687 panic("vm_page_insert");
689 assert(!object
->internal
|| offset
< object
->size
);
691 /* only insert "pageout" pages into "pageout" objects,
692 * and normal pages into normal objects */
693 assert(object
->pageout
== mem
->pageout
);
696 * Record the object/offset pair in this page
699 mem
->object
= object
;
700 mem
->offset
= offset
;
703 * Insert it into the object_object/offset hash table
706 bucket
= &vm_page_buckets
[vm_page_hash(object
, offset
)];
707 simple_lock(&vm_page_bucket_lock
);
708 mem
->next
= bucket
->pages
;
710 #if MACH_PAGE_HASH_STATS
711 if (++bucket
->cur_count
> bucket
->hi_count
)
712 bucket
->hi_count
= bucket
->cur_count
;
713 #endif /* MACH_PAGE_HASH_STATS */
714 simple_unlock(&vm_page_bucket_lock
);
717 * Now link into the object's list of backed pages.
720 queue_enter(&object
->memq
, mem
, vm_page_t
, listq
);
724 * Show that the object has one more resident page.
727 object
->resident_page_count
++;
733 * Exactly like vm_page_insert, except that we first
734 * remove any existing page at the given offset in object.
736 * The object and page queues must be locked.
741 register vm_page_t mem
,
742 register vm_object_t object
,
743 register vm_object_offset_t offset
)
745 register vm_page_bucket_t
*bucket
;
750 panic("vm_page_replace");
753 * Record the object/offset pair in this page
756 mem
->object
= object
;
757 mem
->offset
= offset
;
760 * Insert it into the object_object/offset hash table,
761 * replacing any page that might have been there.
764 bucket
= &vm_page_buckets
[vm_page_hash(object
, offset
)];
765 simple_lock(&vm_page_bucket_lock
);
767 vm_page_t
*mp
= &bucket
->pages
;
768 register vm_page_t m
= *mp
;
770 if (m
->object
== object
&& m
->offset
== offset
) {
772 * Remove page from bucket and from object,
773 * and return it to the free list.
776 queue_remove(&object
->memq
, m
, vm_page_t
,
779 object
->resident_page_count
--;
782 * Return page to the free list.
783 * Note the page is not tabled now, so this
784 * won't self-deadlock on the bucket lock.
792 mem
->next
= bucket
->pages
;
794 mem
->next
= VM_PAGE_NULL
;
797 simple_unlock(&vm_page_bucket_lock
);
800 * Now link into the object's list of backed pages.
803 queue_enter(&object
->memq
, mem
, vm_page_t
, listq
);
807 * And show that the object has one more resident
811 object
->resident_page_count
++;
815 * vm_page_remove: [ internal use only ]
817 * Removes the given mem entry from the object/offset-page
818 * table and the object page list.
820 * The object and page must be locked.
825 register vm_page_t mem
)
827 register vm_page_bucket_t
*bucket
;
828 register vm_page_t
this;
831 "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n",
832 (integer_t
)mem
->object
, (integer_t
)mem
->offset
,
833 (integer_t
)mem
, 0,0);
836 assert(!mem
->cleaning
);
840 * Remove from the object_object/offset hash table
843 bucket
= &vm_page_buckets
[vm_page_hash(mem
->object
, mem
->offset
)];
844 simple_lock(&vm_page_bucket_lock
);
845 if ((this = bucket
->pages
) == mem
) {
846 /* optimize for common case */
848 bucket
->pages
= mem
->next
;
850 register vm_page_t
*prev
;
852 for (prev
= &this->next
;
853 (this = *prev
) != mem
;
858 #if MACH_PAGE_HASH_STATS
860 #endif /* MACH_PAGE_HASH_STATS */
861 simple_unlock(&vm_page_bucket_lock
);
864 * Now remove from the object's list of backed pages.
867 queue_remove(&mem
->object
->memq
, mem
, vm_page_t
, listq
);
870 * And show that the object has one fewer resident
874 mem
->object
->resident_page_count
--;
877 mem
->object
= VM_OBJECT_NULL
;
884 * Returns the page associated with the object/offset
885 * pair specified; if none is found, VM_PAGE_NULL is returned.
887 * The object must be locked. No side effects.
892 register vm_object_t object
,
893 register vm_object_offset_t offset
)
895 register vm_page_t mem
;
896 register vm_page_bucket_t
*bucket
;
899 * Search the hash table for this object/offset pair
902 bucket
= &vm_page_buckets
[vm_page_hash(object
, offset
)];
904 simple_lock(&vm_page_bucket_lock
);
905 for (mem
= bucket
->pages
; mem
!= VM_PAGE_NULL
; mem
= mem
->next
) {
907 if ((mem
->object
== object
) && (mem
->offset
== offset
))
910 simple_unlock(&vm_page_bucket_lock
);
917 * Move the given memory entry from its
918 * current object to the specified target object/offset.
920 * The object must be locked.
924 register vm_page_t mem
,
925 register vm_object_t new_object
,
926 vm_object_offset_t new_offset
)
928 assert(mem
->object
!= new_object
);
930 * Changes to mem->object require the page lock because
931 * the pageout daemon uses that lock to get the object.
935 "vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n",
936 (integer_t
)new_object
, (integer_t
)new_offset
,
937 (integer_t
)mem
, 0,0);
939 vm_page_lock_queues();
941 vm_page_insert(mem
, new_object
, new_offset
);
942 vm_page_unlock_queues();
948 * Initialize the fields in a new page.
949 * This takes a structure with random values and initializes it
950 * so that it can be given to vm_page_release or vm_page_insert.
955 vm_offset_t phys_addr
)
957 *mem
= vm_page_template
;
958 mem
->phys_addr
= phys_addr
;
962 * vm_page_grab_fictitious:
964 * Remove a fictitious page from the free list.
965 * Returns VM_PAGE_NULL if there are no free pages.
967 int c_vm_page_grab_fictitious
= 0;
968 int c_vm_page_release_fictitious
= 0;
969 int c_vm_page_more_fictitious
= 0;
972 vm_page_grab_fictitious(void)
974 register vm_page_t m
;
976 m
= (vm_page_t
)zget(vm_page_zone
);
978 vm_page_init(m
, vm_page_fictitious_addr
);
979 m
->fictitious
= TRUE
;
982 c_vm_page_grab_fictitious
++;
987 * vm_page_release_fictitious:
989 * Release a fictitious page to the free list.
993 vm_page_release_fictitious(
994 register vm_page_t m
)
998 assert(m
->fictitious
);
999 assert(m
->phys_addr
== vm_page_fictitious_addr
);
1001 c_vm_page_release_fictitious
++;
1004 panic("vm_page_release_fictitious");
1006 zfree(vm_page_zone
, (vm_offset_t
)m
);
1010 * vm_page_more_fictitious:
1012 * Add more fictitious pages to the free list.
1013 * Allowed to block. This routine is way intimate
1014 * with the zones code, for several reasons:
1015 * 1. we need to carve some page structures out of physical
1016 * memory before zones work, so they _cannot_ come from
1018 * 2. the zone needs to be collectable in order to prevent
1019 * growth without bound. These structures are used by
1020 * the device pager (by the hundreds and thousands), as
1021 * private pages for pageout, and as blocking pages for
1022 * pagein. Temporary bursts in demand should not result in
1023 * permanent allocation of a resource.
1024 * 3. To smooth allocation humps, we allocate single pages
1025 * with kernel_memory_allocate(), and cram them into the
1026 * zone. This also allows us to initialize the vm_page_t's
1027 * on the way into the zone, so that zget() always returns
1028 * an initialized structure. The zone free element pointer
1029 * and the free page pointer are both the first item in the
1031 * 4. By having the pages in the zone pre-initialized, we need
1032 * not keep 2 levels of lists. The garbage collector simply
1033 * scans our list, and reduces physical memory usage as it
1037 void vm_page_more_fictitious(void)
1039 extern vm_map_t zone_map
;
1040 register vm_page_t m
;
1042 kern_return_t retval
;
1045 c_vm_page_more_fictitious
++;
1048 * Allocate a single page from the zone_map. Do not wait if no physical
1049 * pages are immediately available, and do not zero the space. We need
1050 * our own blocking lock here to prevent having multiple,
1051 * simultaneous requests from piling up on the zone_map lock. Exactly
1052 * one (of our) threads should be potentially waiting on the map lock.
1053 * If winner is not vm-privileged, then the page allocation will fail,
1054 * and it will temporarily block here in the vm_page_wait().
1056 mutex_lock(&vm_page_alloc_lock
);
1058 * If another thread allocated space, just bail out now.
1060 if (zone_free_count(vm_page_zone
) > 5) {
1062 * The number "5" is a small number that is larger than the
1063 * number of fictitious pages that any single caller will
1064 * attempt to allocate. Otherwise, a thread will attempt to
1065 * acquire a fictitious page (vm_page_grab_fictitious), fail,
1066 * release all of the resources and locks already acquired,
1067 * and then call this routine. This routine finds the pages
1068 * that the caller released, so fails to allocate new space.
1069 * The process repeats infinitely. The largest known number
1070 * of fictitious pages required in this manner is 2. 5 is
1071 * simply a somewhat larger number.
1073 mutex_unlock(&vm_page_alloc_lock
);
1077 if ((retval
= kernel_memory_allocate(zone_map
,
1078 &addr
, PAGE_SIZE
, VM_PROT_ALL
,
1079 KMA_KOBJECT
|KMA_NOPAGEWAIT
)) != KERN_SUCCESS
) {
1081 * No page was available. Tell the pageout daemon, drop the
1082 * lock to give another thread a chance at it, and
1083 * wait for the pageout daemon to make progress.
1085 mutex_unlock(&vm_page_alloc_lock
);
1086 vm_page_wait(THREAD_UNINT
);
1090 * Initialize as many vm_page_t's as will fit on this page. This
1091 * depends on the zone code disturbing ONLY the first item of
1092 * each zone element.
1094 m
= (vm_page_t
)addr
;
1095 for (i
= PAGE_SIZE
/sizeof(struct vm_page
); i
> 0; i
--) {
1096 vm_page_init(m
, vm_page_fictitious_addr
);
1097 m
->fictitious
= TRUE
;
1100 zcram(vm_page_zone
, addr
, PAGE_SIZE
);
1101 mutex_unlock(&vm_page_alloc_lock
);
1107 * Attempt to convert a fictitious page into a real page.
1112 register vm_page_t m
)
1114 register vm_page_t real_m
;
1117 assert(m
->fictitious
);
1120 real_m
= vm_page_grab();
1121 if (real_m
== VM_PAGE_NULL
)
1124 m
->phys_addr
= real_m
->phys_addr
;
1125 m
->fictitious
= FALSE
;
1128 vm_page_lock_queues();
1130 vm_page_active_count
++;
1131 else if (m
->inactive
)
1132 vm_page_inactive_count
++;
1133 vm_page_unlock_queues();
1135 real_m
->phys_addr
= vm_page_fictitious_addr
;
1136 real_m
->fictitious
= TRUE
;
1138 vm_page_release_fictitious(real_m
);
1145 * Return true if it is not likely that a non-vm_privileged thread
1146 * can get memory without blocking. Advisory only, since the
1147 * situation may change under us.
1152 /* No locking, at worst we will fib. */
1153 return( vm_page_free_count
< vm_page_free_reserved
);
1159 * Remove a page from the free list.
1160 * Returns VM_PAGE_NULL if the free list is too small.
1163 unsigned long vm_page_grab_count
= 0; /* measure demand */
1168 register vm_page_t mem
;
1170 mutex_lock(&vm_page_queue_free_lock
);
1171 vm_page_grab_count
++;
1174 * Optionally produce warnings if the wire or gobble
1175 * counts exceed some threshold.
1177 if (vm_page_wire_count_warning
> 0
1178 && vm_page_wire_count
>= vm_page_wire_count_warning
) {
1179 printf("mk: vm_page_grab(): high wired page count of %d\n",
1180 vm_page_wire_count
);
1181 assert(vm_page_wire_count
< vm_page_wire_count_warning
);
1183 if (vm_page_gobble_count_warning
> 0
1184 && vm_page_gobble_count
>= vm_page_gobble_count_warning
) {
1185 printf("mk: vm_page_grab(): high gobbled page count of %d\n",
1186 vm_page_gobble_count
);
1187 assert(vm_page_gobble_count
< vm_page_gobble_count_warning
);
1191 * Only let privileged threads (involved in pageout)
1192 * dip into the reserved pool.
1195 if ((vm_page_free_count
< vm_page_free_reserved
) &&
1196 !current_thread()->vm_privilege
) {
1197 mutex_unlock(&vm_page_queue_free_lock
);
1199 goto wakeup_pageout
;
1202 while (vm_page_queue_free
== VM_PAGE_NULL
) {
1203 printf("vm_page_grab: no free pages, trouble expected...\n");
1204 mutex_unlock(&vm_page_queue_free_lock
);
1206 mutex_lock(&vm_page_queue_free_lock
);
1209 if (--vm_page_free_count
< vm_page_free_count_minimum
)
1210 vm_page_free_count_minimum
= vm_page_free_count
;
1211 mem
= vm_page_queue_free
;
1212 vm_page_queue_free
= (vm_page_t
) mem
->pageq
.next
;
1214 mem
->no_isync
= TRUE
;
1215 mutex_unlock(&vm_page_queue_free_lock
);
1218 * Decide if we should poke the pageout daemon.
1219 * We do this if the free count is less than the low
1220 * water mark, or if the free count is less than the high
1221 * water mark (but above the low water mark) and the inactive
1222 * count is less than its target.
1224 * We don't have the counts locked ... if they change a little,
1225 * it doesn't really matter.
1229 if ((vm_page_free_count
< vm_page_free_min
) ||
1230 ((vm_page_free_count
< vm_page_free_target
) &&
1231 (vm_page_inactive_count
< vm_page_inactive_target
)))
1232 thread_wakeup((event_t
) &vm_page_free_wanted
);
1234 // dbgLog(mem->phys_addr, vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */
1242 * Return a page to the free list.
1247 register vm_page_t mem
)
1249 assert(!mem
->private && !mem
->fictitious
);
1251 // dbgLog(mem->phys_addr, vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */
1253 mutex_lock(&vm_page_queue_free_lock
);
1255 panic("vm_page_release");
1257 mem
->pageq
.next
= (queue_entry_t
) vm_page_queue_free
;
1258 vm_page_queue_free
= mem
;
1259 vm_page_free_count
++;
1262 * Check if we should wake up someone waiting for page.
1263 * But don't bother waking them unless they can allocate.
1265 * We wakeup only one thread, to prevent starvation.
1266 * Because the scheduling system handles wait queues FIFO,
1267 * if we wakeup all waiting threads, one greedy thread
1268 * can starve multiple niceguy threads. When the threads
1269 * all wakeup, the greedy threads runs first, grabs the page,
1270 * and waits for another page. It will be the first to run
1271 * when the next page is freed.
1273 * However, there is a slight danger here.
1274 * The thread we wake might not use the free page.
1275 * Then the other threads could wait indefinitely
1276 * while the page goes unused. To forestall this,
1277 * the pageout daemon will keep making free pages
1278 * as long as vm_page_free_wanted is non-zero.
1281 if ((vm_page_free_wanted
> 0) &&
1282 (vm_page_free_count
>= vm_page_free_reserved
)) {
1283 vm_page_free_wanted
--;
1284 thread_wakeup_one((event_t
) &vm_page_free_count
);
1287 mutex_unlock(&vm_page_queue_free_lock
);
1290 #define VM_PAGEOUT_DEADLOCK_TIMEOUT 3
1295 * Wait for a page to become available.
1296 * If there are plenty of free pages, then we don't sleep.
1299 * TRUE: There may be another page, try again
1300 * FALSE: We were interrupted out of our wait, don't try again
1308 * We can't use vm_page_free_reserved to make this
1309 * determination. Consider: some thread might
1310 * need to allocate two pages. The first allocation
1311 * succeeds, the second fails. After the first page is freed,
1312 * a call to vm_page_wait must really block.
1315 kern_return_t wait_result
;
1317 int need_wakeup
= 0;
1319 mutex_lock(&vm_page_queue_free_lock
);
1320 if (vm_page_free_count
< vm_page_free_target
) {
1321 if (vm_page_free_wanted
++ == 0)
1323 wait_result
= assert_wait((event_t
)&vm_page_free_count
,
1325 mutex_unlock(&vm_page_queue_free_lock
);
1326 counter(c_vm_page_wait_block
++);
1329 thread_wakeup((event_t
)&vm_page_free_wanted
);
1331 if (wait_result
== THREAD_WAITING
) {
1332 clock_interval_to_absolutetime_interval(
1333 VM_PAGEOUT_DEADLOCK_TIMEOUT
,
1334 NSEC_PER_SEC
, &abstime
);
1335 clock_absolutetime_interval_to_deadline(
1337 thread_set_timer_deadline(abstime
);
1338 wait_result
= thread_block(THREAD_CONTINUE_NULL
);
1340 if(wait_result
== THREAD_TIMED_OUT
) {
1341 kr
= vm_pageout_emergency_availability_request();
1344 thread_cancel_timer();
1348 return(wait_result
== THREAD_AWAKENED
);
1350 mutex_unlock(&vm_page_queue_free_lock
);
1358 * Allocate and return a memory cell associated
1359 * with this VM object/offset pair.
1361 * Object must be locked.
1367 vm_object_offset_t offset
)
1369 register vm_page_t mem
;
1371 mem
= vm_page_grab();
1372 if (mem
== VM_PAGE_NULL
)
1373 return VM_PAGE_NULL
;
1375 vm_page_insert(mem
, object
, offset
);
1380 counter(unsigned int c_laundry_pages_freed
= 0;)
1382 int vm_pagein_cluster_unused
= 0;
1383 boolean_t vm_page_free_verify
= FALSE
;
1387 * Returns the given page to the free list,
1388 * disassociating it with any VM object.
1390 * Object and page queues must be locked prior to entry.
1394 register vm_page_t mem
)
1396 vm_object_t object
= mem
->object
;
1399 assert(!mem
->cleaning
);
1400 assert(!mem
->pageout
);
1401 assert(!vm_page_free_verify
|| pmap_verify_free(mem
->phys_addr
));
1404 vm_page_remove(mem
); /* clears tabled, object, offset */
1405 VM_PAGE_QUEUES_REMOVE(mem
); /* clears active or inactive */
1407 if (mem
->clustered
) {
1408 mem
->clustered
= FALSE
;
1409 vm_pagein_cluster_unused
++;
1412 if (mem
->wire_count
) {
1413 if (!mem
->private && !mem
->fictitious
)
1414 vm_page_wire_count
--;
1415 mem
->wire_count
= 0;
1416 assert(!mem
->gobbled
);
1417 } else if (mem
->gobbled
) {
1418 if (!mem
->private && !mem
->fictitious
)
1419 vm_page_wire_count
--;
1420 vm_page_gobble_count
--;
1422 mem
->gobbled
= FALSE
;
1425 extern int vm_page_laundry_min
;
1426 vm_page_laundry_count
--;
1427 mem
->laundry
= FALSE
; /* laundry is now clear */
1428 counter(++c_laundry_pages_freed
);
1429 if (vm_page_laundry_count
< vm_page_laundry_min
) {
1430 vm_page_laundry_min
= 0;
1431 thread_wakeup((event_t
) &vm_page_laundry_count
);
1435 mem
->discard_request
= FALSE
;
1437 PAGE_WAKEUP(mem
); /* clears wanted */
1440 vm_object_absent_release(object
);
1442 /* Some of these may be unnecessary */
1444 mem
->unlock_request
= 0;
1446 mem
->absent
= FALSE
;
1449 mem
->precious
= FALSE
;
1450 mem
->reference
= FALSE
;
1452 mem
->page_error
= KERN_SUCCESS
;
1455 mem
->private = FALSE
;
1456 mem
->fictitious
= TRUE
;
1457 mem
->phys_addr
= vm_page_fictitious_addr
;
1459 if (mem
->fictitious
) {
1460 vm_page_release_fictitious(mem
);
1462 /* depends on the queues lock */
1463 if(mem
->zero_fill
) {
1465 mem
->zero_fill
= FALSE
;
1467 vm_page_init(mem
, mem
->phys_addr
);
1468 vm_page_release(mem
);
1475 * Mark this page as wired down by yet
1476 * another map, removing it from paging queues
1479 * The page's object and the page queues must be locked.
1483 register vm_page_t mem
)
1486 // dbgLog(current_act(), mem->offset, mem->object, 1); /* (TEST/DEBUG) */
1490 if (mem
->wire_count
== 0) {
1491 VM_PAGE_QUEUES_REMOVE(mem
);
1492 if (!mem
->private && !mem
->fictitious
&& !mem
->gobbled
)
1493 vm_page_wire_count
++;
1495 vm_page_gobble_count
--;
1496 mem
->gobbled
= FALSE
;
1497 if(mem
->zero_fill
) {
1498 /* depends on the queues lock */
1500 mem
->zero_fill
= FALSE
;
1503 assert(!mem
->gobbled
);
1510 * Mark this page as consumed by the vm/ipc/xmm subsystems.
1512 * Called only for freshly vm_page_grab()ed pages - w/ nothing locked.
1516 register vm_page_t mem
)
1518 vm_page_lock_queues();
1521 assert(!mem
->gobbled
);
1522 assert(mem
->wire_count
== 0);
1524 if (!mem
->gobbled
&& mem
->wire_count
== 0) {
1525 if (!mem
->private && !mem
->fictitious
)
1526 vm_page_wire_count
++;
1528 vm_page_gobble_count
++;
1529 mem
->gobbled
= TRUE
;
1530 vm_page_unlock_queues();
1536 * Release one wiring of this page, potentially
1537 * enabling it to be paged again.
1539 * The page's object and the page queues must be locked.
1543 register vm_page_t mem
)
1546 // dbgLog(current_act(), mem->offset, mem->object, 0); /* (TEST/DEBUG) */
1549 assert(mem
->wire_count
> 0);
1551 if (--mem
->wire_count
== 0) {
1552 assert(!mem
->private && !mem
->fictitious
);
1553 vm_page_wire_count
--;
1554 queue_enter(&vm_page_queue_active
, mem
, vm_page_t
, pageq
);
1555 vm_page_active_count
++;
1557 mem
->reference
= TRUE
;
1562 * vm_page_deactivate:
1564 * Returns the given page to the inactive list,
1565 * indicating that no physical maps have access
1566 * to this page. [Used by the physical mapping system.]
1568 * The page queues must be locked.
1572 register vm_page_t m
)
1576 // dbgLog(m->phys_addr, vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */
1579 * This page is no longer very interesting. If it was
1580 * interesting (active or inactive/referenced), then we
1581 * clear the reference bit and (re)enter it in the
1582 * inactive queue. Note wired pages should not have
1583 * their reference bit cleared.
1585 if (m
->gobbled
) { /* can this happen? */
1586 assert(m
->wire_count
== 0);
1587 if (!m
->private && !m
->fictitious
)
1588 vm_page_wire_count
--;
1589 vm_page_gobble_count
--;
1592 if (m
->private || (m
->wire_count
!= 0))
1594 if (m
->active
|| (m
->inactive
&& m
->reference
)) {
1595 if (!m
->fictitious
&& !m
->absent
)
1596 pmap_clear_reference(m
->phys_addr
);
1597 m
->reference
= FALSE
;
1598 VM_PAGE_QUEUES_REMOVE(m
);
1600 if (m
->wire_count
== 0 && !m
->inactive
) {
1601 m
->page_ticket
= vm_page_ticket
;
1602 vm_page_ticket_roll
++;
1604 if(vm_page_ticket_roll
== VM_PAGE_TICKETS_IN_ROLL
) {
1605 vm_page_ticket_roll
= 0;
1606 if(vm_page_ticket
== VM_PAGE_TICKET_ROLL_IDS
)
1613 queue_enter(&vm_page_queue_zf
, m
, vm_page_t
, pageq
);
1615 queue_enter(&vm_page_queue_inactive
,
1616 m
, vm_page_t
, pageq
);
1621 vm_page_inactive_count
++;
1628 * Put the specified page on the active list (if appropriate).
1630 * The page queues must be locked.
1635 register vm_page_t m
)
1640 assert(m
->wire_count
== 0);
1641 if (!m
->private && !m
->fictitious
)
1642 vm_page_wire_count
--;
1643 vm_page_gobble_count
--;
1651 queue_remove(&vm_page_queue_zf
, m
, vm_page_t
, pageq
);
1653 queue_remove(&vm_page_queue_inactive
,
1654 m
, vm_page_t
, pageq
);
1657 vm_page_inactive_count
--;
1658 m
->inactive
= FALSE
;
1660 if (m
->wire_count
== 0) {
1662 panic("vm_page_activate: already active");
1664 queue_enter(&vm_page_queue_active
, m
, vm_page_t
, pageq
);
1666 m
->reference
= TRUE
;
1668 vm_page_active_count
++;
1673 * vm_page_part_zero_fill:
1675 * Zero-fill a part of the page.
1678 vm_page_part_zero_fill(
1686 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
1687 pmap_zero_part_page(m
->phys_addr
, m_pa
, len
);
1690 tmp
= vm_page_grab();
1691 if (tmp
== VM_PAGE_NULL
) {
1692 vm_page_wait(THREAD_UNINT
);
1697 vm_page_zero_fill(tmp
);
1699 vm_page_part_copy(m
, 0, tmp
, 0, m_pa
);
1701 if((m_pa
+ len
) < PAGE_SIZE
) {
1702 vm_page_part_copy(m
, m_pa
+ len
, tmp
,
1703 m_pa
+ len
, PAGE_SIZE
- (m_pa
+ len
));
1705 vm_page_copy(tmp
,m
);
1706 vm_page_lock_queues();
1708 vm_page_unlock_queues();
1714 * vm_page_zero_fill:
1716 * Zero-fill the specified page.
1723 "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n",
1724 (integer_t
)m
->object
, (integer_t
)m
->offset
, (integer_t
)m
, 0,0);
1728 pmap_zero_page(m
->phys_addr
);
1732 * vm_page_part_copy:
1734 * copy part of one page to another
1745 VM_PAGE_CHECK(src_m
);
1746 VM_PAGE_CHECK(dst_m
);
1748 pmap_copy_part_page(src_m
->phys_addr
, src_pa
,
1749 dst_m
->phys_addr
, dst_pa
, len
);
1755 * Copy one page to another
1764 "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n",
1765 (integer_t
)src_m
->object
, src_m
->offset
,
1766 (integer_t
)dest_m
->object
, dest_m
->offset
,
1769 VM_PAGE_CHECK(src_m
);
1770 VM_PAGE_CHECK(dest_m
);
1772 pmap_copy_page(src_m
->phys_addr
, dest_m
->phys_addr
);
1776 * Currently, this is a primitive allocator that grabs
1777 * free pages from the system, sorts them by physical
1778 * address, then searches for a region large enough to
1779 * satisfy the user's request.
1781 * Additional levels of effort:
1782 * + steal clean active/inactive pages
1783 * + force pageouts of dirty pages
1784 * + maintain a map of available physical
1788 #define SET_NEXT_PAGE(m,n) ((m)->pageq.next = (struct queue_entry *) (n))
1791 int vm_page_verify_contiguous(
1793 unsigned int npages
);
1794 #endif /* MACH_ASSERT */
1796 cpm_counter(unsigned int vpfls_pages_handled
= 0;)
1797 cpm_counter(unsigned int vpfls_head_insertions
= 0;)
1798 cpm_counter(unsigned int vpfls_tail_insertions
= 0;)
1799 cpm_counter(unsigned int vpfls_general_insertions
= 0;)
1800 cpm_counter(unsigned int vpfc_failed
= 0;)
1801 cpm_counter(unsigned int vpfc_satisfied
= 0;)
1804 * Sort free list by ascending physical address,
1805 * using a not-particularly-bright sort algorithm.
1806 * Caller holds vm_page_queue_free_lock.
1809 vm_page_free_list_sort(void)
1811 vm_page_t sort_list
;
1812 vm_page_t sort_list_end
;
1813 vm_page_t m
, m1
, *prev
, next_m
;
1816 unsigned int npages
;
1818 #endif /* MACH_ASSERT */
1822 * Verify pages in the free list..
1825 for (m
= vm_page_queue_free
; m
!= VM_PAGE_NULL
; m
= NEXT_PAGE(m
))
1827 if (npages
!= vm_page_free_count
)
1828 panic("vm_sort_free_list: prelim: npages %d free_count %d",
1829 npages
, vm_page_free_count
);
1830 old_free_count
= vm_page_free_count
;
1831 #endif /* MACH_ASSERT */
1833 sort_list
= sort_list_end
= vm_page_queue_free
;
1834 m
= NEXT_PAGE(vm_page_queue_free
);
1835 SET_NEXT_PAGE(vm_page_queue_free
, VM_PAGE_NULL
);
1836 cpm_counter(vpfls_pages_handled
= 0);
1837 while (m
!= VM_PAGE_NULL
) {
1838 cpm_counter(++vpfls_pages_handled
);
1839 next_m
= NEXT_PAGE(m
);
1840 if (m
->phys_addr
< sort_list
->phys_addr
) {
1841 cpm_counter(++vpfls_head_insertions
);
1842 SET_NEXT_PAGE(m
, sort_list
);
1844 } else if (m
->phys_addr
> sort_list_end
->phys_addr
) {
1845 cpm_counter(++vpfls_tail_insertions
);
1846 SET_NEXT_PAGE(sort_list_end
, m
);
1847 SET_NEXT_PAGE(m
, VM_PAGE_NULL
);
1850 cpm_counter(++vpfls_general_insertions
);
1851 /* general sorted list insertion */
1853 for (m1
=sort_list
; m1
!=VM_PAGE_NULL
; m1
=NEXT_PAGE(m1
)) {
1854 if (m1
->phys_addr
> m
->phys_addr
) {
1856 panic("vm_sort_free_list: ugh");
1857 SET_NEXT_PAGE(m
, *prev
);
1861 prev
= (vm_page_t
*) &m1
->pageq
.next
;
1869 * Verify that pages are sorted into ascending order.
1871 for (m
= sort_list
, npages
= 0; m
!= VM_PAGE_NULL
; m
= NEXT_PAGE(m
)) {
1872 if (m
!= sort_list
&&
1873 m
->phys_addr
<= addr
) {
1874 printf("m 0x%x addr 0x%x\n", m
, addr
);
1875 panic("vm_sort_free_list");
1877 addr
= m
->phys_addr
;
1880 if (old_free_count
!= vm_page_free_count
)
1881 panic("vm_sort_free_list: old_free %d free_count %d",
1882 old_free_count
, vm_page_free_count
);
1883 if (npages
!= vm_page_free_count
)
1884 panic("vm_sort_free_list: npages %d free_count %d",
1885 npages
, vm_page_free_count
);
1886 #endif /* MACH_ASSERT */
1888 vm_page_queue_free
= sort_list
;
1894 * Check that the list of pages is ordered by
1895 * ascending physical address and has no holes.
1898 vm_page_verify_contiguous(
1900 unsigned int npages
)
1902 register vm_page_t m
;
1903 unsigned int page_count
;
1904 vm_offset_t prev_addr
;
1906 prev_addr
= pages
->phys_addr
;
1908 for (m
= NEXT_PAGE(pages
); m
!= VM_PAGE_NULL
; m
= NEXT_PAGE(m
)) {
1909 if (m
->phys_addr
!= prev_addr
+ page_size
) {
1910 printf("m 0x%x prev_addr 0x%x, current addr 0x%x\n",
1911 m
, prev_addr
, m
->phys_addr
);
1912 printf("pages 0x%x page_count %d\n", pages
, page_count
);
1913 panic("vm_page_verify_contiguous: not contiguous!");
1915 prev_addr
= m
->phys_addr
;
1918 if (page_count
!= npages
) {
1919 printf("pages 0x%x actual count 0x%x but requested 0x%x\n",
1920 pages
, page_count
, npages
);
1921 panic("vm_page_verify_contiguous: count error");
1925 #endif /* MACH_ASSERT */
1929 * Find a region large enough to contain at least npages
1930 * of contiguous physical memory.
1933 * - Called while holding vm_page_queue_free_lock.
1934 * - Doesn't respect vm_page_free_reserved; caller
1935 * must not ask for more pages than are legal to grab.
1937 * Returns a pointer to a list of gobbled pages or VM_PAGE_NULL.
1941 vm_page_find_contiguous(
1944 vm_page_t m
, *contig_prev
, *prev_ptr
;
1945 vm_offset_t prev_addr
;
1946 unsigned int contig_npages
;
1950 return VM_PAGE_NULL
;
1952 prev_addr
= vm_page_queue_free
->phys_addr
- (page_size
+ 1);
1953 prev_ptr
= &vm_page_queue_free
;
1954 for (m
= vm_page_queue_free
; m
!= VM_PAGE_NULL
; m
= NEXT_PAGE(m
)) {
1956 if (m
->phys_addr
!= prev_addr
+ page_size
) {
1958 * Whoops! Pages aren't contiguous. Start over.
1961 contig_prev
= prev_ptr
;
1964 if (++contig_npages
== npages
) {
1966 * Chop these pages out of the free list.
1967 * Mark them all as gobbled.
1969 list
= *contig_prev
;
1970 *contig_prev
= NEXT_PAGE(m
);
1971 SET_NEXT_PAGE(m
, VM_PAGE_NULL
);
1972 for (m
= list
; m
!= VM_PAGE_NULL
; m
= NEXT_PAGE(m
)) {
1979 vm_page_free_count
-= npages
;
1980 if (vm_page_free_count
< vm_page_free_count_minimum
)
1981 vm_page_free_count_minimum
= vm_page_free_count
;
1982 vm_page_wire_count
+= npages
;
1983 vm_page_gobble_count
+= npages
;
1984 cpm_counter(++vpfc_satisfied
);
1985 assert(vm_page_verify_contiguous(list
, contig_npages
));
1989 assert(contig_npages
< npages
);
1990 prev_ptr
= (vm_page_t
*) &m
->pageq
.next
;
1991 prev_addr
= m
->phys_addr
;
1993 cpm_counter(++vpfc_failed
);
1994 return VM_PAGE_NULL
;
1998 * Allocate a list of contiguous, wired pages.
2006 register vm_page_t m
;
2007 vm_page_t
*first_contig
;
2008 vm_page_t free_list
, pages
;
2009 unsigned int npages
, n1pages
;
2010 int vm_pages_available
;
2012 if (size
% page_size
!= 0)
2013 return KERN_INVALID_ARGUMENT
;
2015 vm_page_lock_queues();
2016 mutex_lock(&vm_page_queue_free_lock
);
2019 * Should also take active and inactive pages
2020 * into account... One day...
2022 vm_pages_available
= vm_page_free_count
- vm_page_free_reserved
;
2024 if (size
> vm_pages_available
* page_size
) {
2025 mutex_unlock(&vm_page_queue_free_lock
);
2026 return KERN_RESOURCE_SHORTAGE
;
2029 vm_page_free_list_sort();
2031 npages
= size
/ page_size
;
2034 * Obtain a pointer to a subset of the free
2035 * list large enough to satisfy the request;
2036 * the region will be physically contiguous.
2038 pages
= vm_page_find_contiguous(npages
);
2039 if (pages
== VM_PAGE_NULL
) {
2040 mutex_unlock(&vm_page_queue_free_lock
);
2041 vm_page_unlock_queues();
2042 return KERN_NO_SPACE
;
2045 mutex_unlock(&vm_page_queue_free_lock
);
2048 * Walk the returned list, wiring the pages.
2051 for (m
= pages
; m
!= VM_PAGE_NULL
; m
= NEXT_PAGE(m
)) {
2053 * Essentially inlined vm_page_wire.
2056 assert(!m
->inactive
);
2057 assert(!m
->private);
2058 assert(!m
->fictitious
);
2059 assert(m
->wire_count
== 0);
2063 --vm_page_gobble_count
;
2065 vm_page_unlock_queues();
2068 * The CPM pages should now be available and
2069 * ordered by ascending physical address.
2071 assert(vm_page_verify_contiguous(pages
, npages
));
2074 return KERN_SUCCESS
;
2078 #include <mach_vm_debug.h>
2081 #include <mach_debug/hash_info.h>
2082 #include <vm/vm_debug.h>
2085 * Routine: vm_page_info
2087 * Return information about the global VP table.
2088 * Fills the buffer with as much information as possible
2089 * and returns the desired size of the buffer.
2091 * Nothing locked. The caller should provide
2092 * possibly-pageable memory.
2097 hash_info_bucket_t
*info
,
2102 if (vm_page_bucket_count
< count
)
2103 count
= vm_page_bucket_count
;
2105 for (i
= 0; i
< count
; i
++) {
2106 vm_page_bucket_t
*bucket
= &vm_page_buckets
[i
];
2107 unsigned int bucket_count
= 0;
2110 simple_lock(&vm_page_bucket_lock
);
2111 for (m
= bucket
->pages
; m
!= VM_PAGE_NULL
; m
= m
->next
)
2113 simple_unlock(&vm_page_bucket_lock
);
2115 /* don't touch pageable memory while holding locks */
2116 info
[i
].hib_count
= bucket_count
;
2119 return vm_page_bucket_count
;
2121 #endif /* MACH_VM_DEBUG */
2123 #include <mach_kdb.h>
2126 #include <ddb/db_output.h>
2127 #include <vm/vm_print.h>
2128 #define printf kdbprintf
2131 * Routine: vm_page_print [exported]
2139 iprintf("page 0x%x\n", p
);
2143 iprintf("object=0x%x", p
->object
);
2144 printf(", offset=0x%x", p
->offset
);
2145 printf(", wire_count=%d", p
->wire_count
);
2147 iprintf("%sinactive, %sactive, %sgobbled, %slaundry, %sfree, %sref, %sdiscard\n",
2148 (p
->inactive
? "" : "!"),
2149 (p
->active
? "" : "!"),
2150 (p
->gobbled
? "" : "!"),
2151 (p
->laundry
? "" : "!"),
2152 (p
->free
? "" : "!"),
2153 (p
->reference
? "" : "!"),
2154 (p
->discard_request
? "" : "!"));
2155 iprintf("%sbusy, %swanted, %stabled, %sfictitious, %sprivate, %sprecious\n",
2156 (p
->busy
? "" : "!"),
2157 (p
->wanted
? "" : "!"),
2158 (p
->tabled
? "" : "!"),
2159 (p
->fictitious
? "" : "!"),
2160 (p
->private ? "" : "!"),
2161 (p
->precious
? "" : "!"));
2162 iprintf("%sabsent, %serror, %sdirty, %scleaning, %spageout, %sclustered\n",
2163 (p
->absent
? "" : "!"),
2164 (p
->error
? "" : "!"),
2165 (p
->dirty
? "" : "!"),
2166 (p
->cleaning
? "" : "!"),
2167 (p
->pageout
? "" : "!"),
2168 (p
->clustered
? "" : "!"));
2169 iprintf("%slock_supplied, %soverwriting, %srestart, %sunusual\n",
2170 (p
->lock_supplied
? "" : "!"),
2171 (p
->overwriting
? "" : "!"),
2172 (p
->restart
? "" : "!"),
2173 (p
->unusual
? "" : "!"));
2175 iprintf("phys_addr=0x%x", p
->phys_addr
);
2176 printf(", page_error=0x%x", p
->page_error
);
2177 printf(", page_lock=0x%x", p
->page_lock
);
2178 printf(", unlock_request=%d\n", p
->unlock_request
);
2182 #endif /* MACH_KDB */