]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_resident.c
f3a762d1f1841a977c5622a7dcd88a7399bbb43d
[apple/xnu.git] / osfmk / vm / vm_resident.c
1 /*
2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53 /*
54 */
55 /*
56 * File: vm/vm_page.c
57 * Author: Avadis Tevanian, Jr., Michael Wayne Young
58 *
59 * Resident memory management module.
60 */
61
62 #include <mach/clock_types.h>
63 #include <mach/vm_prot.h>
64 #include <mach/vm_statistics.h>
65 #include <kern/counters.h>
66 #include <kern/sched_prim.h>
67 #include <kern/task.h>
68 #include <kern/thread.h>
69 #include <kern/zalloc.h>
70 #include <kern/xpr.h>
71 #include <vm/pmap.h>
72 #include <vm/vm_init.h>
73 #include <vm/vm_map.h>
74 #include <vm/vm_page.h>
75 #include <vm/vm_pageout.h>
76 #include <vm/vm_kern.h> /* kernel_memory_allocate() */
77 #include <kern/misc_protos.h>
78 #include <zone_debug.h>
79 #include <vm/cpm.h>
80 #include <ppc/mappings.h> /* (BRINGUP) */
81 #include <pexpert/pexpert.h> /* (BRINGUP) */
82
83
84 /* Variables used to indicate the relative age of pages in the
85 * inactive list
86 */
87
88 int vm_page_ticket_roll = 0;
89 int vm_page_ticket = 0;
90 /*
91 * Associated with page of user-allocatable memory is a
92 * page structure.
93 */
94
95 /*
96 * These variables record the values returned by vm_page_bootstrap,
97 * for debugging purposes. The implementation of pmap_steal_memory
98 * and pmap_startup here also uses them internally.
99 */
100
101 vm_offset_t virtual_space_start;
102 vm_offset_t virtual_space_end;
103 int vm_page_pages;
104
105 /*
106 * The vm_page_lookup() routine, which provides for fast
107 * (virtual memory object, offset) to page lookup, employs
108 * the following hash table. The vm_page_{insert,remove}
109 * routines install and remove associations in the table.
110 * [This table is often called the virtual-to-physical,
111 * or VP, table.]
112 */
113 typedef struct {
114 vm_page_t pages;
115 #if MACH_PAGE_HASH_STATS
116 int cur_count; /* current count */
117 int hi_count; /* high water mark */
118 #endif /* MACH_PAGE_HASH_STATS */
119 } vm_page_bucket_t;
120
121 vm_page_bucket_t *vm_page_buckets; /* Array of buckets */
122 unsigned int vm_page_bucket_count = 0; /* How big is array? */
123 unsigned int vm_page_hash_mask; /* Mask for hash function */
124 unsigned int vm_page_hash_shift; /* Shift for hash function */
125 uint32_t vm_page_bucket_hash; /* Basic bucket hash */
126 decl_simple_lock_data(,vm_page_bucket_lock)
127
128 #if MACH_PAGE_HASH_STATS
129 /* This routine is only for debug. It is intended to be called by
130 * hand by a developer using a kernel debugger. This routine prints
131 * out vm_page_hash table statistics to the kernel debug console.
132 */
133 void
134 hash_debug(void)
135 {
136 int i;
137 int numbuckets = 0;
138 int highsum = 0;
139 int maxdepth = 0;
140
141 for (i = 0; i < vm_page_bucket_count; i++) {
142 if (vm_page_buckets[i].hi_count) {
143 numbuckets++;
144 highsum += vm_page_buckets[i].hi_count;
145 if (vm_page_buckets[i].hi_count > maxdepth)
146 maxdepth = vm_page_buckets[i].hi_count;
147 }
148 }
149 printf("Total number of buckets: %d\n", vm_page_bucket_count);
150 printf("Number used buckets: %d = %d%%\n",
151 numbuckets, 100*numbuckets/vm_page_bucket_count);
152 printf("Number unused buckets: %d = %d%%\n",
153 vm_page_bucket_count - numbuckets,
154 100*(vm_page_bucket_count-numbuckets)/vm_page_bucket_count);
155 printf("Sum of bucket max depth: %d\n", highsum);
156 printf("Average bucket depth: %d.%2d\n",
157 highsum/vm_page_bucket_count,
158 highsum%vm_page_bucket_count);
159 printf("Maximum bucket depth: %d\n", maxdepth);
160 }
161 #endif /* MACH_PAGE_HASH_STATS */
162
163 /*
164 * The virtual page size is currently implemented as a runtime
165 * variable, but is constant once initialized using vm_set_page_size.
166 * This initialization must be done in the machine-dependent
167 * bootstrap sequence, before calling other machine-independent
168 * initializations.
169 *
170 * All references to the virtual page size outside this
171 * module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
172 * constants.
173 */
174 #ifndef PAGE_SIZE_FIXED
175 vm_size_t page_size = 4096;
176 vm_size_t page_mask = 4095;
177 int page_shift = 12;
178 #else
179 vm_size_t page_size = PAGE_SIZE;
180 vm_size_t page_mask = PAGE_MASK;
181 int page_shift = PAGE_SHIFT;
182 #endif /* PAGE_SIZE_FIXED */
183
184 /*
185 * Resident page structures are initialized from
186 * a template (see vm_page_alloc).
187 *
188 * When adding a new field to the virtual memory
189 * object structure, be sure to add initialization
190 * (see vm_page_bootstrap).
191 */
192 struct vm_page vm_page_template;
193
194 /*
195 * Resident pages that represent real memory
196 * are allocated from a free list.
197 */
198 vm_page_t vm_page_queue_free;
199 vm_page_t vm_page_queue_fictitious;
200 decl_mutex_data(,vm_page_queue_free_lock)
201 unsigned int vm_page_free_wanted;
202 int vm_page_free_count;
203 int vm_page_fictitious_count;
204
205 unsigned int vm_page_free_count_minimum; /* debugging */
206
207 /*
208 * Occasionally, the virtual memory system uses
209 * resident page structures that do not refer to
210 * real pages, for example to leave a page with
211 * important state information in the VP table.
212 *
213 * These page structures are allocated the way
214 * most other kernel structures are.
215 */
216 zone_t vm_page_zone;
217 decl_mutex_data(,vm_page_alloc_lock)
218 unsigned int io_throttle_zero_fill;
219 decl_mutex_data(,vm_page_zero_fill_lock)
220
221 /*
222 * Fictitious pages don't have a physical address,
223 * but we must initialize phys_page to something.
224 * For debugging, this should be a strange value
225 * that the pmap module can recognize in assertions.
226 */
227 vm_offset_t vm_page_fictitious_addr = (vm_offset_t) -1;
228
229 /*
230 * Resident page structures are also chained on
231 * queues that are used by the page replacement
232 * system (pageout daemon). These queues are
233 * defined here, but are shared by the pageout
234 * module. The inactive queue is broken into
235 * inactive and zf for convenience as the
236 * pageout daemon often assignes a higher
237 * affinity to zf pages
238 */
239 queue_head_t vm_page_queue_active;
240 queue_head_t vm_page_queue_inactive;
241 queue_head_t vm_page_queue_zf;
242 decl_mutex_data(,vm_page_queue_lock)
243 int vm_page_active_count;
244 int vm_page_inactive_count;
245 int vm_page_wire_count;
246 int vm_page_gobble_count = 0;
247 int vm_page_wire_count_warning = 0;
248 int vm_page_gobble_count_warning = 0;
249
250 /* the following fields are protected by the vm_page_queue_lock */
251 queue_head_t vm_page_queue_limbo;
252 int vm_page_limbo_count = 0; /* total pages in limbo */
253 int vm_page_limbo_real_count = 0; /* real pages in limbo */
254 int vm_page_pin_count = 0; /* number of pinned pages */
255
256 decl_simple_lock_data(,vm_page_preppin_lock)
257
258 /*
259 * Several page replacement parameters are also
260 * shared with this module, so that page allocation
261 * (done here in vm_page_alloc) can trigger the
262 * pageout daemon.
263 */
264 int vm_page_free_target = 0;
265 int vm_page_free_min = 0;
266 int vm_page_inactive_target = 0;
267 int vm_page_free_reserved = 0;
268 int vm_page_laundry_count = 0;
269
270 /*
271 * The VM system has a couple of heuristics for deciding
272 * that pages are "uninteresting" and should be placed
273 * on the inactive queue as likely candidates for replacement.
274 * These variables let the heuristics be controlled at run-time
275 * to make experimentation easier.
276 */
277
278 boolean_t vm_page_deactivate_hint = TRUE;
279
280 /*
281 * vm_set_page_size:
282 *
283 * Sets the page size, perhaps based upon the memory
284 * size. Must be called before any use of page-size
285 * dependent functions.
286 *
287 * Sets page_shift and page_mask from page_size.
288 */
289 void
290 vm_set_page_size(void)
291 {
292 #ifndef PAGE_SIZE_FIXED
293 page_mask = page_size - 1;
294
295 if ((page_mask & page_size) != 0)
296 panic("vm_set_page_size: page size not a power of two");
297
298 for (page_shift = 0; ; page_shift++)
299 if ((1 << page_shift) == page_size)
300 break;
301 #endif /* PAGE_SIZE_FIXED */
302 }
303
304 /*
305 * vm_page_bootstrap:
306 *
307 * Initializes the resident memory module.
308 *
309 * Allocates memory for the page cells, and
310 * for the object/offset-to-page hash table headers.
311 * Each page cell is initialized and placed on the free list.
312 * Returns the range of available kernel virtual memory.
313 */
314
315 void
316 vm_page_bootstrap(
317 vm_offset_t *startp,
318 vm_offset_t *endp)
319 {
320 register vm_page_t m;
321 int i;
322 unsigned int log1;
323 unsigned int log2;
324 unsigned int size;
325
326 /*
327 * Initialize the vm_page template.
328 */
329
330 m = &vm_page_template;
331 m->object = VM_OBJECT_NULL; /* reset later */
332 m->offset = 0; /* reset later */
333 m->wire_count = 0;
334
335 m->inactive = FALSE;
336 m->active = FALSE;
337 m->laundry = FALSE;
338 m->free = FALSE;
339 m->no_isync = TRUE;
340 m->reference = FALSE;
341 m->pageout = FALSE;
342 m->dump_cleaning = FALSE;
343 m->list_req_pending = FALSE;
344
345 m->busy = TRUE;
346 m->wanted = FALSE;
347 m->tabled = FALSE;
348 m->fictitious = FALSE;
349 m->private = FALSE;
350 m->absent = FALSE;
351 m->error = FALSE;
352 m->dirty = FALSE;
353 m->cleaning = FALSE;
354 m->precious = FALSE;
355 m->clustered = FALSE;
356 m->lock_supplied = FALSE;
357 m->unusual = FALSE;
358 m->restart = FALSE;
359 m->zero_fill = FALSE;
360
361 m->phys_page = 0; /* reset later */
362
363 m->page_lock = VM_PROT_NONE;
364 m->unlock_request = VM_PROT_NONE;
365 m->page_error = KERN_SUCCESS;
366
367 /*
368 * Initialize the page queues.
369 */
370
371 mutex_init(&vm_page_queue_free_lock, ETAP_VM_PAGEQ_FREE);
372 mutex_init(&vm_page_queue_lock, ETAP_VM_PAGEQ);
373 simple_lock_init(&vm_page_preppin_lock, ETAP_VM_PREPPIN);
374
375 vm_page_queue_free = VM_PAGE_NULL;
376 vm_page_queue_fictitious = VM_PAGE_NULL;
377 queue_init(&vm_page_queue_active);
378 queue_init(&vm_page_queue_inactive);
379 queue_init(&vm_page_queue_zf);
380 queue_init(&vm_page_queue_limbo);
381
382 vm_page_free_wanted = 0;
383
384 /*
385 * Steal memory for the map and zone subsystems.
386 */
387
388 vm_map_steal_memory();
389 zone_steal_memory();
390
391 /*
392 * Allocate (and initialize) the virtual-to-physical
393 * table hash buckets.
394 *
395 * The number of buckets should be a power of two to
396 * get a good hash function. The following computation
397 * chooses the first power of two that is greater
398 * than the number of physical pages in the system.
399 */
400
401 simple_lock_init(&vm_page_bucket_lock, ETAP_VM_BUCKET);
402
403 if (vm_page_bucket_count == 0) {
404 unsigned int npages = pmap_free_pages();
405
406 vm_page_bucket_count = 1;
407 while (vm_page_bucket_count < npages)
408 vm_page_bucket_count <<= 1;
409 }
410
411 vm_page_hash_mask = vm_page_bucket_count - 1;
412
413 /*
414 * Calculate object shift value for hashing algorithm:
415 * O = log2(sizeof(struct vm_object))
416 * B = log2(vm_page_bucket_count)
417 * hash shifts the object left by
418 * B/2 - O
419 */
420 size = vm_page_bucket_count;
421 for (log1 = 0; size > 1; log1++)
422 size /= 2;
423 size = sizeof(struct vm_object);
424 for (log2 = 0; size > 1; log2++)
425 size /= 2;
426 vm_page_hash_shift = log1/2 - log2 + 1;
427
428 vm_page_bucket_hash = 1 << ((log1 + 1) >> 1); /* Get (ceiling of sqrt of table size) */
429 vm_page_bucket_hash |= 1 << ((log1 + 1) >> 2); /* Get (ceiling of quadroot of table size) */
430 vm_page_bucket_hash |= 1; /* Set bit and add 1 - always must be 1 to insure unique series */
431
432 if (vm_page_hash_mask & vm_page_bucket_count)
433 printf("vm_page_bootstrap: WARNING -- strange page hash\n");
434
435 vm_page_buckets = (vm_page_bucket_t *)
436 pmap_steal_memory(vm_page_bucket_count *
437 sizeof(vm_page_bucket_t));
438
439 for (i = 0; i < vm_page_bucket_count; i++) {
440 register vm_page_bucket_t *bucket = &vm_page_buckets[i];
441
442 bucket->pages = VM_PAGE_NULL;
443 #if MACH_PAGE_HASH_STATS
444 bucket->cur_count = 0;
445 bucket->hi_count = 0;
446 #endif /* MACH_PAGE_HASH_STATS */
447 }
448
449 /*
450 * Machine-dependent code allocates the resident page table.
451 * It uses vm_page_init to initialize the page frames.
452 * The code also returns to us the virtual space available
453 * to the kernel. We don't trust the pmap module
454 * to get the alignment right.
455 */
456
457 pmap_startup(&virtual_space_start, &virtual_space_end);
458 virtual_space_start = round_page_32(virtual_space_start);
459 virtual_space_end = trunc_page_32(virtual_space_end);
460
461 *startp = virtual_space_start;
462 *endp = virtual_space_end;
463
464 /*
465 * Compute the initial "wire" count.
466 * Up until now, the pages which have been set aside are not under
467 * the VM system's control, so although they aren't explicitly
468 * wired, they nonetheless can't be moved. At this moment,
469 * all VM managed pages are "free", courtesy of pmap_startup.
470 */
471 vm_page_wire_count = atop_64(max_mem) - vm_page_free_count; /* initial value */
472
473 printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count);
474 vm_page_free_count_minimum = vm_page_free_count;
475 }
476
477 #ifndef MACHINE_PAGES
478 /*
479 * We implement pmap_steal_memory and pmap_startup with the help
480 * of two simpler functions, pmap_virtual_space and pmap_next_page.
481 */
482
483 vm_offset_t
484 pmap_steal_memory(
485 vm_size_t size)
486 {
487 vm_offset_t addr, vaddr;
488 ppnum_t phys_page;
489
490 /*
491 * We round the size to a round multiple.
492 */
493
494 size = (size + sizeof (void *) - 1) &~ (sizeof (void *) - 1);
495
496 /*
497 * If this is the first call to pmap_steal_memory,
498 * we have to initialize ourself.
499 */
500
501 if (virtual_space_start == virtual_space_end) {
502 pmap_virtual_space(&virtual_space_start, &virtual_space_end);
503
504 /*
505 * The initial values must be aligned properly, and
506 * we don't trust the pmap module to do it right.
507 */
508
509 virtual_space_start = round_page_32(virtual_space_start);
510 virtual_space_end = trunc_page_32(virtual_space_end);
511 }
512
513 /*
514 * Allocate virtual memory for this request.
515 */
516
517 addr = virtual_space_start;
518 virtual_space_start += size;
519
520 kprintf("pmap_steal_memory: %08X - %08X; size=%08X\n", addr, virtual_space_start, size); /* (TEST/DEBUG) */
521
522 /*
523 * Allocate and map physical pages to back new virtual pages.
524 */
525
526 for (vaddr = round_page_32(addr);
527 vaddr < addr + size;
528 vaddr += PAGE_SIZE) {
529 if (!pmap_next_page(&phys_page))
530 panic("pmap_steal_memory");
531
532 /*
533 * XXX Logically, these mappings should be wired,
534 * but some pmap modules barf if they are.
535 */
536
537 pmap_enter(kernel_pmap, vaddr, phys_page,
538 VM_PROT_READ|VM_PROT_WRITE,
539 VM_WIMG_USE_DEFAULT, FALSE);
540 /*
541 * Account for newly stolen memory
542 */
543 vm_page_wire_count++;
544
545 }
546
547 return addr;
548 }
549
550 void
551 pmap_startup(
552 vm_offset_t *startp,
553 vm_offset_t *endp)
554 {
555 unsigned int i, npages, pages_initialized, fill, fillval;
556 vm_page_t pages;
557 ppnum_t phys_page;
558 addr64_t tmpaddr;
559
560 /*
561 * We calculate how many page frames we will have
562 * and then allocate the page structures in one chunk.
563 */
564
565 tmpaddr = (addr64_t)pmap_free_pages() * (addr64_t)PAGE_SIZE; /* Get the amount of memory left */
566 tmpaddr = tmpaddr + (addr64_t)(round_page_32(virtual_space_start) - virtual_space_start); /* Account for any slop */
567 npages = (unsigned int)(tmpaddr / (addr64_t)(PAGE_SIZE + sizeof(*pages))); /* Figure size of all vm_page_ts, including enough to hold the vm_page_ts */
568
569 pages = (vm_page_t) pmap_steal_memory(npages * sizeof *pages);
570
571 /*
572 * Initialize the page frames.
573 */
574
575 for (i = 0, pages_initialized = 0; i < npages; i++) {
576 if (!pmap_next_page(&phys_page))
577 break;
578
579 vm_page_init(&pages[i], phys_page);
580 vm_page_pages++;
581 pages_initialized++;
582 }
583
584 /*
585 * Release pages in reverse order so that physical pages
586 * initially get allocated in ascending addresses. This keeps
587 * the devices (which must address physical memory) happy if
588 * they require several consecutive pages.
589 */
590
591 /*
592 * Check if we want to initialize pages to a known value
593 */
594
595 fill = 0; /* Assume no fill */
596 if (PE_parse_boot_arg("fill", &fillval)) fill = 1; /* Set fill */
597
598 for (i = pages_initialized; i > 0; i--) {
599 extern void fillPage(ppnum_t phys_page, unsigned int fillval);
600 if(fill) fillPage(pages[i - 1].phys_page, fillval); /* Fill the page with a know value if requested at boot */
601 vm_page_release(&pages[i - 1]);
602 }
603
604 #if 0
605 {
606 vm_page_t xx, xxo, xxl;
607 int j, k, l;
608
609 j = 0; /* (BRINGUP) */
610 xxl = 0;
611
612 for(xx = vm_page_queue_free; xx; xxl = xx, xx = xx->pageq.next) { /* (BRINGUP) */
613 j++; /* (BRINGUP) */
614 if(j > vm_page_free_count) { /* (BRINGUP) */
615 panic("pmap_startup: too many pages, xx = %08X, xxl = %08X\n", xx, xxl);
616 }
617
618 l = vm_page_free_count - j; /* (BRINGUP) */
619 k = 0; /* (BRINGUP) */
620
621 if(((j - 1) & 0xFFFF) == 0) kprintf("checking number %d of %d\n", j, vm_page_free_count);
622
623 for(xxo = xx->pageq.next; xxo; xxo = xxo->pageq.next) { /* (BRINGUP) */
624 k++;
625 if(k > l) panic("pmap_startup: too many in secondary check %d %d\n", k, l);
626 if((xx->phys_page & 0xFFFFFFFF) == (xxo->phys_page & 0xFFFFFFFF)) { /* (BRINGUP) */
627 panic("pmap_startup: duplicate physaddr, xx = %08X, xxo = %08X\n", xx, xxo);
628 }
629 }
630 }
631
632 if(j != vm_page_free_count) { /* (BRINGUP) */
633 panic("pmap_startup: vm_page_free_count does not match, calc = %d, vm_page_free_count = %08X\n", j, vm_page_free_count);
634 }
635 }
636 #endif
637
638
639 /*
640 * We have to re-align virtual_space_start,
641 * because pmap_steal_memory has been using it.
642 */
643
644 virtual_space_start = round_page_32(virtual_space_start);
645
646 *startp = virtual_space_start;
647 *endp = virtual_space_end;
648 }
649 #endif /* MACHINE_PAGES */
650
651 /*
652 * Routine: vm_page_module_init
653 * Purpose:
654 * Second initialization pass, to be done after
655 * the basic VM system is ready.
656 */
657 void
658 vm_page_module_init(void)
659 {
660 vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page),
661 0, PAGE_SIZE, "vm pages");
662
663 #if ZONE_DEBUG
664 zone_debug_disable(vm_page_zone);
665 #endif /* ZONE_DEBUG */
666
667 zone_change(vm_page_zone, Z_EXPAND, FALSE);
668 zone_change(vm_page_zone, Z_EXHAUST, TRUE);
669 zone_change(vm_page_zone, Z_FOREIGN, TRUE);
670
671 /*
672 * Adjust zone statistics to account for the real pages allocated
673 * in vm_page_create(). [Q: is this really what we want?]
674 */
675 vm_page_zone->count += vm_page_pages;
676 vm_page_zone->cur_size += vm_page_pages * vm_page_zone->elem_size;
677
678 mutex_init(&vm_page_alloc_lock, ETAP_VM_PAGE_ALLOC);
679 mutex_init(&vm_page_zero_fill_lock, ETAP_VM_PAGE_ALLOC);
680 }
681
682 /*
683 * Routine: vm_page_create
684 * Purpose:
685 * After the VM system is up, machine-dependent code
686 * may stumble across more physical memory. For example,
687 * memory that it was reserving for a frame buffer.
688 * vm_page_create turns this memory into available pages.
689 */
690
691 void
692 vm_page_create(
693 ppnum_t start,
694 ppnum_t end)
695 {
696 ppnum_t phys_page;
697 vm_page_t m;
698
699 for (phys_page = start;
700 phys_page < end;
701 phys_page++) {
702 while ((m = (vm_page_t) vm_page_grab_fictitious())
703 == VM_PAGE_NULL)
704 vm_page_more_fictitious();
705
706 vm_page_init(m, phys_page);
707 vm_page_pages++;
708 vm_page_release(m);
709 }
710 }
711
712 /*
713 * vm_page_hash:
714 *
715 * Distributes the object/offset key pair among hash buckets.
716 *
717 * NOTE: The bucket count must be a power of 2
718 */
719 #define vm_page_hash(object, offset) (\
720 ( (natural_t)((uint32_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
721 & vm_page_hash_mask)
722
723 /*
724 * vm_page_insert: [ internal use only ]
725 *
726 * Inserts the given mem entry into the object/object-page
727 * table and object list.
728 *
729 * The object must be locked.
730 */
731
732 void
733 vm_page_insert(
734 register vm_page_t mem,
735 register vm_object_t object,
736 register vm_object_offset_t offset)
737 {
738 register vm_page_bucket_t *bucket;
739
740 XPR(XPR_VM_PAGE,
741 "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n",
742 (integer_t)object, (integer_t)offset, (integer_t)mem, 0,0);
743
744 VM_PAGE_CHECK(mem);
745
746 if (mem->tabled)
747 panic("vm_page_insert");
748
749 assert(!object->internal || offset < object->size);
750
751 /* only insert "pageout" pages into "pageout" objects,
752 * and normal pages into normal objects */
753 assert(object->pageout == mem->pageout);
754
755 /*
756 * Record the object/offset pair in this page
757 */
758
759 mem->object = object;
760 mem->offset = offset;
761
762 /*
763 * Insert it into the object_object/offset hash table
764 */
765
766 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
767 simple_lock(&vm_page_bucket_lock);
768 mem->next = bucket->pages;
769 bucket->pages = mem;
770 #if MACH_PAGE_HASH_STATS
771 if (++bucket->cur_count > bucket->hi_count)
772 bucket->hi_count = bucket->cur_count;
773 #endif /* MACH_PAGE_HASH_STATS */
774 simple_unlock(&vm_page_bucket_lock);
775
776 /*
777 * Now link into the object's list of backed pages.
778 */
779
780 queue_enter(&object->memq, mem, vm_page_t, listq);
781 mem->tabled = TRUE;
782
783 /*
784 * Show that the object has one more resident page.
785 */
786
787 object->resident_page_count++;
788 }
789
790 /*
791 * vm_page_replace:
792 *
793 * Exactly like vm_page_insert, except that we first
794 * remove any existing page at the given offset in object.
795 *
796 * The object and page queues must be locked.
797 */
798
799 void
800 vm_page_replace(
801 register vm_page_t mem,
802 register vm_object_t object,
803 register vm_object_offset_t offset)
804 {
805 register vm_page_bucket_t *bucket;
806
807 VM_PAGE_CHECK(mem);
808
809 if (mem->tabled)
810 panic("vm_page_replace");
811
812 /*
813 * Record the object/offset pair in this page
814 */
815
816 mem->object = object;
817 mem->offset = offset;
818
819 /*
820 * Insert it into the object_object/offset hash table,
821 * replacing any page that might have been there.
822 */
823
824 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
825 simple_lock(&vm_page_bucket_lock);
826 if (bucket->pages) {
827 vm_page_t *mp = &bucket->pages;
828 register vm_page_t m = *mp;
829 do {
830 if (m->object == object && m->offset == offset) {
831 /*
832 * Remove page from bucket and from object,
833 * and return it to the free list.
834 */
835 *mp = m->next;
836 queue_remove(&object->memq, m, vm_page_t,
837 listq);
838 m->tabled = FALSE;
839 object->resident_page_count--;
840
841 /*
842 * Return page to the free list.
843 * Note the page is not tabled now, so this
844 * won't self-deadlock on the bucket lock.
845 */
846
847 vm_page_free(m);
848 break;
849 }
850 mp = &m->next;
851 } while (m = *mp);
852 mem->next = bucket->pages;
853 } else {
854 mem->next = VM_PAGE_NULL;
855 }
856 bucket->pages = mem;
857 simple_unlock(&vm_page_bucket_lock);
858
859 /*
860 * Now link into the object's list of backed pages.
861 */
862
863 queue_enter(&object->memq, mem, vm_page_t, listq);
864 mem->tabled = TRUE;
865
866 /*
867 * And show that the object has one more resident
868 * page.
869 */
870
871 object->resident_page_count++;
872 }
873
874 /*
875 * vm_page_remove: [ internal use only ]
876 *
877 * Removes the given mem entry from the object/offset-page
878 * table and the object page list.
879 *
880 * The object and page must be locked.
881 */
882
883 void
884 vm_page_remove(
885 register vm_page_t mem)
886 {
887 register vm_page_bucket_t *bucket;
888 register vm_page_t this;
889
890 XPR(XPR_VM_PAGE,
891 "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n",
892 (integer_t)mem->object, (integer_t)mem->offset,
893 (integer_t)mem, 0,0);
894
895 assert(mem->tabled);
896 assert(!mem->cleaning);
897 VM_PAGE_CHECK(mem);
898
899 /*
900 * Remove from the object_object/offset hash table
901 */
902
903 bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
904 simple_lock(&vm_page_bucket_lock);
905 if ((this = bucket->pages) == mem) {
906 /* optimize for common case */
907
908 bucket->pages = mem->next;
909 } else {
910 register vm_page_t *prev;
911
912 for (prev = &this->next;
913 (this = *prev) != mem;
914 prev = &this->next)
915 continue;
916 *prev = this->next;
917 }
918 #if MACH_PAGE_HASH_STATS
919 bucket->cur_count--;
920 #endif /* MACH_PAGE_HASH_STATS */
921 simple_unlock(&vm_page_bucket_lock);
922
923 /*
924 * Now remove from the object's list of backed pages.
925 */
926
927 queue_remove(&mem->object->memq, mem, vm_page_t, listq);
928
929 /*
930 * And show that the object has one fewer resident
931 * page.
932 */
933
934 mem->object->resident_page_count--;
935
936 mem->tabled = FALSE;
937 mem->object = VM_OBJECT_NULL;
938 mem->offset = 0;
939 }
940
941 /*
942 * vm_page_lookup:
943 *
944 * Returns the page associated with the object/offset
945 * pair specified; if none is found, VM_PAGE_NULL is returned.
946 *
947 * The object must be locked. No side effects.
948 */
949
950 vm_page_t
951 vm_page_lookup(
952 register vm_object_t object,
953 register vm_object_offset_t offset)
954 {
955 register vm_page_t mem;
956 register vm_page_bucket_t *bucket;
957
958 /*
959 * Search the hash table for this object/offset pair
960 */
961
962 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
963
964 simple_lock(&vm_page_bucket_lock);
965 for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
966 VM_PAGE_CHECK(mem);
967 if ((mem->object == object) && (mem->offset == offset))
968 break;
969 }
970 simple_unlock(&vm_page_bucket_lock);
971
972 return(mem);
973 }
974
975 /*
976 * vm_page_rename:
977 *
978 * Move the given memory entry from its
979 * current object to the specified target object/offset.
980 *
981 * The object must be locked.
982 */
983 void
984 vm_page_rename(
985 register vm_page_t mem,
986 register vm_object_t new_object,
987 vm_object_offset_t new_offset)
988 {
989 assert(mem->object != new_object);
990 /*
991 * Changes to mem->object require the page lock because
992 * the pageout daemon uses that lock to get the object.
993 */
994
995 XPR(XPR_VM_PAGE,
996 "vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n",
997 (integer_t)new_object, (integer_t)new_offset,
998 (integer_t)mem, 0,0);
999
1000 vm_page_lock_queues();
1001 vm_page_remove(mem);
1002 vm_page_insert(mem, new_object, new_offset);
1003 vm_page_unlock_queues();
1004 }
1005
1006 /*
1007 * vm_page_init:
1008 *
1009 * Initialize the fields in a new page.
1010 * This takes a structure with random values and initializes it
1011 * so that it can be given to vm_page_release or vm_page_insert.
1012 */
1013 void
1014 vm_page_init(
1015 vm_page_t mem,
1016 ppnum_t phys_page)
1017 {
1018 *mem = vm_page_template;
1019 mem->phys_page = phys_page;
1020 }
1021
1022 /*
1023 * vm_page_grab_fictitious:
1024 *
1025 * Remove a fictitious page from the free list.
1026 * Returns VM_PAGE_NULL if there are no free pages.
1027 */
1028 int c_vm_page_grab_fictitious = 0;
1029 int c_vm_page_release_fictitious = 0;
1030 int c_vm_page_more_fictitious = 0;
1031
1032 vm_page_t
1033 vm_page_grab_fictitious(void)
1034 {
1035 register vm_page_t m;
1036
1037 m = (vm_page_t)zget(vm_page_zone);
1038 if (m) {
1039 vm_page_init(m, vm_page_fictitious_addr);
1040 m->fictitious = TRUE;
1041 }
1042
1043 c_vm_page_grab_fictitious++;
1044 return m;
1045 }
1046
1047 /*
1048 * vm_page_release_fictitious:
1049 *
1050 * Release a fictitious page to the free list.
1051 */
1052
1053 void
1054 vm_page_release_fictitious(
1055 register vm_page_t m)
1056 {
1057 assert(!m->free);
1058 assert(m->busy);
1059 assert(m->fictitious);
1060 assert(m->phys_page == vm_page_fictitious_addr);
1061
1062 c_vm_page_release_fictitious++;
1063
1064 if (m->free)
1065 panic("vm_page_release_fictitious");
1066 m->free = TRUE;
1067 zfree(vm_page_zone, (vm_offset_t)m);
1068 }
1069
1070 /*
1071 * vm_page_more_fictitious:
1072 *
1073 * Add more fictitious pages to the free list.
1074 * Allowed to block. This routine is way intimate
1075 * with the zones code, for several reasons:
1076 * 1. we need to carve some page structures out of physical
1077 * memory before zones work, so they _cannot_ come from
1078 * the zone_map.
1079 * 2. the zone needs to be collectable in order to prevent
1080 * growth without bound. These structures are used by
1081 * the device pager (by the hundreds and thousands), as
1082 * private pages for pageout, and as blocking pages for
1083 * pagein. Temporary bursts in demand should not result in
1084 * permanent allocation of a resource.
1085 * 3. To smooth allocation humps, we allocate single pages
1086 * with kernel_memory_allocate(), and cram them into the
1087 * zone. This also allows us to initialize the vm_page_t's
1088 * on the way into the zone, so that zget() always returns
1089 * an initialized structure. The zone free element pointer
1090 * and the free page pointer are both the first item in the
1091 * vm_page_t.
1092 * 4. By having the pages in the zone pre-initialized, we need
1093 * not keep 2 levels of lists. The garbage collector simply
1094 * scans our list, and reduces physical memory usage as it
1095 * sees fit.
1096 */
1097
1098 void vm_page_more_fictitious(void)
1099 {
1100 extern vm_map_t zone_map;
1101 register vm_page_t m;
1102 vm_offset_t addr;
1103 kern_return_t retval;
1104 int i;
1105
1106 c_vm_page_more_fictitious++;
1107
1108 /*
1109 * Allocate a single page from the zone_map. Do not wait if no physical
1110 * pages are immediately available, and do not zero the space. We need
1111 * our own blocking lock here to prevent having multiple,
1112 * simultaneous requests from piling up on the zone_map lock. Exactly
1113 * one (of our) threads should be potentially waiting on the map lock.
1114 * If winner is not vm-privileged, then the page allocation will fail,
1115 * and it will temporarily block here in the vm_page_wait().
1116 */
1117 mutex_lock(&vm_page_alloc_lock);
1118 /*
1119 * If another thread allocated space, just bail out now.
1120 */
1121 if (zone_free_count(vm_page_zone) > 5) {
1122 /*
1123 * The number "5" is a small number that is larger than the
1124 * number of fictitious pages that any single caller will
1125 * attempt to allocate. Otherwise, a thread will attempt to
1126 * acquire a fictitious page (vm_page_grab_fictitious), fail,
1127 * release all of the resources and locks already acquired,
1128 * and then call this routine. This routine finds the pages
1129 * that the caller released, so fails to allocate new space.
1130 * The process repeats infinitely. The largest known number
1131 * of fictitious pages required in this manner is 2. 5 is
1132 * simply a somewhat larger number.
1133 */
1134 mutex_unlock(&vm_page_alloc_lock);
1135 return;
1136 }
1137
1138 if ((retval = kernel_memory_allocate(zone_map,
1139 &addr, PAGE_SIZE, VM_PROT_ALL,
1140 KMA_KOBJECT|KMA_NOPAGEWAIT)) != KERN_SUCCESS) {
1141 /*
1142 * No page was available. Tell the pageout daemon, drop the
1143 * lock to give another thread a chance at it, and
1144 * wait for the pageout daemon to make progress.
1145 */
1146 mutex_unlock(&vm_page_alloc_lock);
1147 vm_page_wait(THREAD_UNINT);
1148 return;
1149 }
1150 /*
1151 * Initialize as many vm_page_t's as will fit on this page. This
1152 * depends on the zone code disturbing ONLY the first item of
1153 * each zone element.
1154 */
1155 m = (vm_page_t)addr;
1156 for (i = PAGE_SIZE/sizeof(struct vm_page); i > 0; i--) {
1157 vm_page_init(m, vm_page_fictitious_addr);
1158 m->fictitious = TRUE;
1159 m++;
1160 }
1161 zcram(vm_page_zone, addr, PAGE_SIZE);
1162 mutex_unlock(&vm_page_alloc_lock);
1163 }
1164
1165 /*
1166 * vm_page_convert:
1167 *
1168 * Attempt to convert a fictitious page into a real page.
1169 */
1170
1171 boolean_t
1172 vm_page_convert(
1173 register vm_page_t m)
1174 {
1175 register vm_page_t real_m;
1176
1177 assert(m->busy);
1178 assert(m->fictitious);
1179 assert(!m->dirty);
1180
1181 real_m = vm_page_grab();
1182 if (real_m == VM_PAGE_NULL)
1183 return FALSE;
1184
1185 m->phys_page = real_m->phys_page;
1186 m->fictitious = FALSE;
1187 m->no_isync = TRUE;
1188
1189 vm_page_lock_queues();
1190 if (m->active)
1191 vm_page_active_count++;
1192 else if (m->inactive)
1193 vm_page_inactive_count++;
1194 vm_page_unlock_queues();
1195
1196 real_m->phys_page = vm_page_fictitious_addr;
1197 real_m->fictitious = TRUE;
1198
1199 vm_page_release_fictitious(real_m);
1200 return TRUE;
1201 }
1202
1203 /*
1204 * vm_pool_low():
1205 *
1206 * Return true if it is not likely that a non-vm_privileged thread
1207 * can get memory without blocking. Advisory only, since the
1208 * situation may change under us.
1209 */
1210 int
1211 vm_pool_low(void)
1212 {
1213 /* No locking, at worst we will fib. */
1214 return( vm_page_free_count < vm_page_free_reserved );
1215 }
1216
1217 /*
1218 * vm_page_grab:
1219 *
1220 * Remove a page from the free list.
1221 * Returns VM_PAGE_NULL if the free list is too small.
1222 */
1223
1224 unsigned long vm_page_grab_count = 0; /* measure demand */
1225
1226 vm_page_t
1227 vm_page_grab(void)
1228 {
1229 register vm_page_t mem;
1230
1231 mutex_lock(&vm_page_queue_free_lock);
1232 vm_page_grab_count++;
1233
1234 /*
1235 * Optionally produce warnings if the wire or gobble
1236 * counts exceed some threshold.
1237 */
1238 if (vm_page_wire_count_warning > 0
1239 && vm_page_wire_count >= vm_page_wire_count_warning) {
1240 printf("mk: vm_page_grab(): high wired page count of %d\n",
1241 vm_page_wire_count);
1242 assert(vm_page_wire_count < vm_page_wire_count_warning);
1243 }
1244 if (vm_page_gobble_count_warning > 0
1245 && vm_page_gobble_count >= vm_page_gobble_count_warning) {
1246 printf("mk: vm_page_grab(): high gobbled page count of %d\n",
1247 vm_page_gobble_count);
1248 assert(vm_page_gobble_count < vm_page_gobble_count_warning);
1249 }
1250
1251 /*
1252 * Only let privileged threads (involved in pageout)
1253 * dip into the reserved pool.
1254 */
1255
1256 if ((vm_page_free_count < vm_page_free_reserved) &&
1257 !current_thread()->vm_privilege) {
1258 mutex_unlock(&vm_page_queue_free_lock);
1259 mem = VM_PAGE_NULL;
1260 goto wakeup_pageout;
1261 }
1262
1263 while (vm_page_queue_free == VM_PAGE_NULL) {
1264 printf("vm_page_grab: no free pages, trouble expected...\n");
1265 mutex_unlock(&vm_page_queue_free_lock);
1266 VM_PAGE_WAIT();
1267 mutex_lock(&vm_page_queue_free_lock);
1268 }
1269
1270 if (--vm_page_free_count < vm_page_free_count_minimum)
1271 vm_page_free_count_minimum = vm_page_free_count;
1272 mem = vm_page_queue_free;
1273 vm_page_queue_free = (vm_page_t) mem->pageq.next;
1274 mem->free = FALSE;
1275 mem->no_isync = TRUE;
1276 mutex_unlock(&vm_page_queue_free_lock);
1277
1278 /*
1279 * Decide if we should poke the pageout daemon.
1280 * We do this if the free count is less than the low
1281 * water mark, or if the free count is less than the high
1282 * water mark (but above the low water mark) and the inactive
1283 * count is less than its target.
1284 *
1285 * We don't have the counts locked ... if they change a little,
1286 * it doesn't really matter.
1287 */
1288
1289 wakeup_pageout:
1290 if ((vm_page_free_count < vm_page_free_min) ||
1291 ((vm_page_free_count < vm_page_free_target) &&
1292 (vm_page_inactive_count < vm_page_inactive_target)))
1293 thread_wakeup((event_t) &vm_page_free_wanted);
1294
1295 // dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */
1296
1297 return mem;
1298 }
1299
1300 /*
1301 * vm_page_release:
1302 *
1303 * Return a page to the free list.
1304 */
1305
1306 void
1307 vm_page_release(
1308 register vm_page_t mem)
1309 {
1310
1311 #if 0
1312 unsigned int pindex;
1313 phys_entry *physent;
1314
1315 physent = mapping_phys_lookup(mem->phys_page, &pindex); /* (BRINGUP) */
1316 if(physent->ppLink & ppN) { /* (BRINGUP) */
1317 panic("vm_page_release: already released - %08X %08X\n", mem, mem->phys_page);
1318 }
1319 physent->ppLink = physent->ppLink | ppN; /* (BRINGUP) */
1320 #endif
1321
1322 assert(!mem->private && !mem->fictitious);
1323
1324 // dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */
1325
1326 mutex_lock(&vm_page_queue_free_lock);
1327 if (mem->free)
1328 panic("vm_page_release");
1329 mem->free = TRUE;
1330 mem->pageq.next = (queue_entry_t) vm_page_queue_free;
1331 vm_page_queue_free = mem;
1332 vm_page_free_count++;
1333
1334 /*
1335 * Check if we should wake up someone waiting for page.
1336 * But don't bother waking them unless they can allocate.
1337 *
1338 * We wakeup only one thread, to prevent starvation.
1339 * Because the scheduling system handles wait queues FIFO,
1340 * if we wakeup all waiting threads, one greedy thread
1341 * can starve multiple niceguy threads. When the threads
1342 * all wakeup, the greedy threads runs first, grabs the page,
1343 * and waits for another page. It will be the first to run
1344 * when the next page is freed.
1345 *
1346 * However, there is a slight danger here.
1347 * The thread we wake might not use the free page.
1348 * Then the other threads could wait indefinitely
1349 * while the page goes unused. To forestall this,
1350 * the pageout daemon will keep making free pages
1351 * as long as vm_page_free_wanted is non-zero.
1352 */
1353
1354 if ((vm_page_free_wanted > 0) &&
1355 (vm_page_free_count >= vm_page_free_reserved)) {
1356 vm_page_free_wanted--;
1357 thread_wakeup_one((event_t) &vm_page_free_count);
1358 }
1359
1360 mutex_unlock(&vm_page_queue_free_lock);
1361 }
1362
1363 #define VM_PAGEOUT_DEADLOCK_TIMEOUT 3
1364
1365 /*
1366 * vm_page_wait:
1367 *
1368 * Wait for a page to become available.
1369 * If there are plenty of free pages, then we don't sleep.
1370 *
1371 * Returns:
1372 * TRUE: There may be another page, try again
1373 * FALSE: We were interrupted out of our wait, don't try again
1374 */
1375
1376 boolean_t
1377 vm_page_wait(
1378 int interruptible )
1379 {
1380 /*
1381 * We can't use vm_page_free_reserved to make this
1382 * determination. Consider: some thread might
1383 * need to allocate two pages. The first allocation
1384 * succeeds, the second fails. After the first page is freed,
1385 * a call to vm_page_wait must really block.
1386 */
1387 uint64_t abstime;
1388 kern_return_t wait_result;
1389 kern_return_t kr;
1390 int need_wakeup = 0;
1391
1392 mutex_lock(&vm_page_queue_free_lock);
1393 if (vm_page_free_count < vm_page_free_target) {
1394 if (vm_page_free_wanted++ == 0)
1395 need_wakeup = 1;
1396 wait_result = assert_wait((event_t)&vm_page_free_count,
1397 interruptible);
1398 mutex_unlock(&vm_page_queue_free_lock);
1399 counter(c_vm_page_wait_block++);
1400
1401 if (need_wakeup)
1402 thread_wakeup((event_t)&vm_page_free_wanted);
1403
1404 if (wait_result == THREAD_WAITING) {
1405 clock_interval_to_absolutetime_interval(
1406 VM_PAGEOUT_DEADLOCK_TIMEOUT,
1407 NSEC_PER_SEC, &abstime);
1408 clock_absolutetime_interval_to_deadline(
1409 abstime, &abstime);
1410 thread_set_timer_deadline(abstime);
1411 wait_result = thread_block(THREAD_CONTINUE_NULL);
1412
1413 if(wait_result == THREAD_TIMED_OUT) {
1414 kr = vm_pageout_emergency_availability_request();
1415 return TRUE;
1416 } else {
1417 thread_cancel_timer();
1418 }
1419 }
1420
1421 return(wait_result == THREAD_AWAKENED);
1422 } else {
1423 mutex_unlock(&vm_page_queue_free_lock);
1424 return TRUE;
1425 }
1426 }
1427
1428 /*
1429 * vm_page_alloc:
1430 *
1431 * Allocate and return a memory cell associated
1432 * with this VM object/offset pair.
1433 *
1434 * Object must be locked.
1435 */
1436
1437 vm_page_t
1438 vm_page_alloc(
1439 vm_object_t object,
1440 vm_object_offset_t offset)
1441 {
1442 register vm_page_t mem;
1443
1444 mem = vm_page_grab();
1445 if (mem == VM_PAGE_NULL)
1446 return VM_PAGE_NULL;
1447
1448 vm_page_insert(mem, object, offset);
1449
1450 return(mem);
1451 }
1452
1453 counter(unsigned int c_laundry_pages_freed = 0;)
1454
1455 int vm_pagein_cluster_unused = 0;
1456 boolean_t vm_page_free_verify = FALSE;
1457 /*
1458 * vm_page_free:
1459 *
1460 * Returns the given page to the free list,
1461 * disassociating it with any VM object.
1462 *
1463 * Object and page queues must be locked prior to entry.
1464 */
1465 void
1466 vm_page_free(
1467 register vm_page_t mem)
1468 {
1469 vm_object_t object = mem->object;
1470
1471 assert(!mem->free);
1472 assert(!mem->cleaning);
1473 assert(!mem->pageout);
1474 assert(!vm_page_free_verify || pmap_verify_free(mem->phys_page));
1475
1476 if (mem->tabled)
1477 vm_page_remove(mem); /* clears tabled, object, offset */
1478 VM_PAGE_QUEUES_REMOVE(mem); /* clears active or inactive */
1479
1480 if (mem->clustered) {
1481 mem->clustered = FALSE;
1482 vm_pagein_cluster_unused++;
1483 }
1484
1485 if (mem->wire_count) {
1486 if (!mem->private && !mem->fictitious)
1487 vm_page_wire_count--;
1488 mem->wire_count = 0;
1489 assert(!mem->gobbled);
1490 } else if (mem->gobbled) {
1491 if (!mem->private && !mem->fictitious)
1492 vm_page_wire_count--;
1493 vm_page_gobble_count--;
1494 }
1495 mem->gobbled = FALSE;
1496
1497 if (mem->laundry) {
1498 extern int vm_page_laundry_min;
1499 vm_page_laundry_count--;
1500 mem->laundry = FALSE; /* laundry is now clear */
1501 counter(++c_laundry_pages_freed);
1502 if (vm_page_laundry_count < vm_page_laundry_min) {
1503 vm_page_laundry_min = 0;
1504 thread_wakeup((event_t) &vm_page_laundry_count);
1505 }
1506 }
1507
1508 mem->discard_request = FALSE;
1509
1510 PAGE_WAKEUP(mem); /* clears wanted */
1511
1512 if (mem->absent)
1513 vm_object_absent_release(object);
1514
1515 /* Some of these may be unnecessary */
1516 mem->page_lock = 0;
1517 mem->unlock_request = 0;
1518 mem->busy = TRUE;
1519 mem->absent = FALSE;
1520 mem->error = FALSE;
1521 mem->dirty = FALSE;
1522 mem->precious = FALSE;
1523 mem->reference = FALSE;
1524
1525 mem->page_error = KERN_SUCCESS;
1526
1527 if (mem->private) {
1528 mem->private = FALSE;
1529 mem->fictitious = TRUE;
1530 mem->phys_page = vm_page_fictitious_addr;
1531 }
1532 if (mem->fictitious) {
1533 vm_page_release_fictitious(mem);
1534 } else {
1535 /* depends on the queues lock */
1536 if(mem->zero_fill) {
1537 vm_zf_count-=1;
1538 mem->zero_fill = FALSE;
1539 }
1540 vm_page_init(mem, mem->phys_page);
1541 vm_page_release(mem);
1542 }
1543 }
1544
1545 /*
1546 * vm_page_wire:
1547 *
1548 * Mark this page as wired down by yet
1549 * another map, removing it from paging queues
1550 * as necessary.
1551 *
1552 * The page's object and the page queues must be locked.
1553 */
1554 void
1555 vm_page_wire(
1556 register vm_page_t mem)
1557 {
1558
1559 // dbgLog(current_act(), mem->offset, mem->object, 1); /* (TEST/DEBUG) */
1560
1561 VM_PAGE_CHECK(mem);
1562
1563 if (mem->wire_count == 0) {
1564 VM_PAGE_QUEUES_REMOVE(mem);
1565 if (!mem->private && !mem->fictitious && !mem->gobbled)
1566 vm_page_wire_count++;
1567 if (mem->gobbled)
1568 vm_page_gobble_count--;
1569 mem->gobbled = FALSE;
1570 if(mem->zero_fill) {
1571 /* depends on the queues lock */
1572 vm_zf_count-=1;
1573 mem->zero_fill = FALSE;
1574 }
1575 }
1576 assert(!mem->gobbled);
1577 mem->wire_count++;
1578 }
1579
1580 /*
1581 * vm_page_gobble:
1582 *
1583 * Mark this page as consumed by the vm/ipc/xmm subsystems.
1584 *
1585 * Called only for freshly vm_page_grab()ed pages - w/ nothing locked.
1586 */
1587 void
1588 vm_page_gobble(
1589 register vm_page_t mem)
1590 {
1591 vm_page_lock_queues();
1592 VM_PAGE_CHECK(mem);
1593
1594 assert(!mem->gobbled);
1595 assert(mem->wire_count == 0);
1596
1597 if (!mem->gobbled && mem->wire_count == 0) {
1598 if (!mem->private && !mem->fictitious)
1599 vm_page_wire_count++;
1600 }
1601 vm_page_gobble_count++;
1602 mem->gobbled = TRUE;
1603 vm_page_unlock_queues();
1604 }
1605
1606 /*
1607 * vm_page_unwire:
1608 *
1609 * Release one wiring of this page, potentially
1610 * enabling it to be paged again.
1611 *
1612 * The page's object and the page queues must be locked.
1613 */
1614 void
1615 vm_page_unwire(
1616 register vm_page_t mem)
1617 {
1618
1619 // dbgLog(current_act(), mem->offset, mem->object, 0); /* (TEST/DEBUG) */
1620
1621 VM_PAGE_CHECK(mem);
1622 assert(mem->wire_count > 0);
1623
1624 if (--mem->wire_count == 0) {
1625 assert(!mem->private && !mem->fictitious);
1626 vm_page_wire_count--;
1627 queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
1628 vm_page_active_count++;
1629 mem->active = TRUE;
1630 mem->reference = TRUE;
1631 }
1632 }
1633
1634 /*
1635 * vm_page_deactivate:
1636 *
1637 * Returns the given page to the inactive list,
1638 * indicating that no physical maps have access
1639 * to this page. [Used by the physical mapping system.]
1640 *
1641 * The page queues must be locked.
1642 */
1643 void
1644 vm_page_deactivate(
1645 register vm_page_t m)
1646 {
1647 VM_PAGE_CHECK(m);
1648
1649 // dbgLog(m->phys_page, vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */
1650
1651 /*
1652 * This page is no longer very interesting. If it was
1653 * interesting (active or inactive/referenced), then we
1654 * clear the reference bit and (re)enter it in the
1655 * inactive queue. Note wired pages should not have
1656 * their reference bit cleared.
1657 */
1658 if (m->gobbled) { /* can this happen? */
1659 assert(m->wire_count == 0);
1660 if (!m->private && !m->fictitious)
1661 vm_page_wire_count--;
1662 vm_page_gobble_count--;
1663 m->gobbled = FALSE;
1664 }
1665 if (m->private || (m->wire_count != 0))
1666 return;
1667 if (m->active || (m->inactive && m->reference)) {
1668 if (!m->fictitious && !m->absent)
1669 pmap_clear_reference(m->phys_page);
1670 m->reference = FALSE;
1671 VM_PAGE_QUEUES_REMOVE(m);
1672 }
1673 if (m->wire_count == 0 && !m->inactive) {
1674 m->page_ticket = vm_page_ticket;
1675 vm_page_ticket_roll++;
1676
1677 if(vm_page_ticket_roll == VM_PAGE_TICKETS_IN_ROLL) {
1678 vm_page_ticket_roll = 0;
1679 if(vm_page_ticket == VM_PAGE_TICKET_ROLL_IDS)
1680 vm_page_ticket= 0;
1681 else
1682 vm_page_ticket++;
1683 }
1684
1685 if(m->zero_fill) {
1686 queue_enter(&vm_page_queue_zf, m, vm_page_t, pageq);
1687 } else {
1688 queue_enter(&vm_page_queue_inactive,
1689 m, vm_page_t, pageq);
1690 }
1691
1692 m->inactive = TRUE;
1693 if (!m->fictitious)
1694 vm_page_inactive_count++;
1695 }
1696 }
1697
1698 /*
1699 * vm_page_activate:
1700 *
1701 * Put the specified page on the active list (if appropriate).
1702 *
1703 * The page queues must be locked.
1704 */
1705
1706 void
1707 vm_page_activate(
1708 register vm_page_t m)
1709 {
1710 VM_PAGE_CHECK(m);
1711
1712 if (m->gobbled) {
1713 assert(m->wire_count == 0);
1714 if (!m->private && !m->fictitious)
1715 vm_page_wire_count--;
1716 vm_page_gobble_count--;
1717 m->gobbled = FALSE;
1718 }
1719 if (m->private)
1720 return;
1721
1722 if (m->inactive) {
1723 if (m->zero_fill) {
1724 queue_remove(&vm_page_queue_zf, m, vm_page_t, pageq);
1725 } else {
1726 queue_remove(&vm_page_queue_inactive,
1727 m, vm_page_t, pageq);
1728 }
1729 if (!m->fictitious)
1730 vm_page_inactive_count--;
1731 m->inactive = FALSE;
1732 }
1733 if (m->wire_count == 0) {
1734 if (m->active)
1735 panic("vm_page_activate: already active");
1736
1737 queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
1738 m->active = TRUE;
1739 m->reference = TRUE;
1740 if (!m->fictitious)
1741 vm_page_active_count++;
1742 }
1743 }
1744
1745 /*
1746 * vm_page_part_zero_fill:
1747 *
1748 * Zero-fill a part of the page.
1749 */
1750 void
1751 vm_page_part_zero_fill(
1752 vm_page_t m,
1753 vm_offset_t m_pa,
1754 vm_size_t len)
1755 {
1756 vm_page_t tmp;
1757
1758 VM_PAGE_CHECK(m);
1759 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
1760 pmap_zero_part_page(m->phys_page, m_pa, len);
1761 #else
1762 while (1) {
1763 tmp = vm_page_grab();
1764 if (tmp == VM_PAGE_NULL) {
1765 vm_page_wait(THREAD_UNINT);
1766 continue;
1767 }
1768 break;
1769 }
1770 vm_page_zero_fill(tmp);
1771 if(m_pa != 0) {
1772 vm_page_part_copy(m, 0, tmp, 0, m_pa);
1773 }
1774 if((m_pa + len) < PAGE_SIZE) {
1775 vm_page_part_copy(m, m_pa + len, tmp,
1776 m_pa + len, PAGE_SIZE - (m_pa + len));
1777 }
1778 vm_page_copy(tmp,m);
1779 vm_page_lock_queues();
1780 vm_page_free(tmp);
1781 vm_page_unlock_queues();
1782 #endif
1783
1784 }
1785
1786 /*
1787 * vm_page_zero_fill:
1788 *
1789 * Zero-fill the specified page.
1790 */
1791 void
1792 vm_page_zero_fill(
1793 vm_page_t m)
1794 {
1795 XPR(XPR_VM_PAGE,
1796 "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n",
1797 (integer_t)m->object, (integer_t)m->offset, (integer_t)m, 0,0);
1798
1799 VM_PAGE_CHECK(m);
1800
1801 // dbgTrace(0xAEAEAEAE, m->phys_page, 0); /* (BRINGUP) */
1802 pmap_zero_page(m->phys_page);
1803 }
1804
1805 /*
1806 * vm_page_part_copy:
1807 *
1808 * copy part of one page to another
1809 */
1810
1811 void
1812 vm_page_part_copy(
1813 vm_page_t src_m,
1814 vm_offset_t src_pa,
1815 vm_page_t dst_m,
1816 vm_offset_t dst_pa,
1817 vm_size_t len)
1818 {
1819 VM_PAGE_CHECK(src_m);
1820 VM_PAGE_CHECK(dst_m);
1821
1822 pmap_copy_part_page(src_m->phys_page, src_pa,
1823 dst_m->phys_page, dst_pa, len);
1824 }
1825
1826 /*
1827 * vm_page_copy:
1828 *
1829 * Copy one page to another
1830 */
1831
1832 void
1833 vm_page_copy(
1834 vm_page_t src_m,
1835 vm_page_t dest_m)
1836 {
1837 XPR(XPR_VM_PAGE,
1838 "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n",
1839 (integer_t)src_m->object, src_m->offset,
1840 (integer_t)dest_m->object, dest_m->offset,
1841 0);
1842
1843 VM_PAGE_CHECK(src_m);
1844 VM_PAGE_CHECK(dest_m);
1845
1846 pmap_copy_page(src_m->phys_page, dest_m->phys_page);
1847 }
1848
1849 /*
1850 * Currently, this is a primitive allocator that grabs
1851 * free pages from the system, sorts them by physical
1852 * address, then searches for a region large enough to
1853 * satisfy the user's request.
1854 *
1855 * Additional levels of effort:
1856 * + steal clean active/inactive pages
1857 * + force pageouts of dirty pages
1858 * + maintain a map of available physical
1859 * memory
1860 */
1861
1862 #define SET_NEXT_PAGE(m,n) ((m)->pageq.next = (struct queue_entry *) (n))
1863
1864 #if MACH_ASSERT
1865 int vm_page_verify_contiguous(
1866 vm_page_t pages,
1867 unsigned int npages);
1868 #endif /* MACH_ASSERT */
1869
1870 cpm_counter(unsigned int vpfls_pages_handled = 0;)
1871 cpm_counter(unsigned int vpfls_head_insertions = 0;)
1872 cpm_counter(unsigned int vpfls_tail_insertions = 0;)
1873 cpm_counter(unsigned int vpfls_general_insertions = 0;)
1874 cpm_counter(unsigned int vpfc_failed = 0;)
1875 cpm_counter(unsigned int vpfc_satisfied = 0;)
1876
1877 /*
1878 * Sort free list by ascending physical address,
1879 * using a not-particularly-bright sort algorithm.
1880 * Caller holds vm_page_queue_free_lock.
1881 */
1882 static void
1883 vm_page_free_list_sort(void)
1884 {
1885 vm_page_t sort_list;
1886 vm_page_t sort_list_end;
1887 vm_page_t m, m1, *prev, next_m;
1888 vm_offset_t addr;
1889 #if MACH_ASSERT
1890 unsigned int npages;
1891 int old_free_count;
1892 #endif /* MACH_ASSERT */
1893
1894 #if MACH_ASSERT
1895 /*
1896 * Verify pages in the free list..
1897 */
1898 npages = 0;
1899 for (m = vm_page_queue_free; m != VM_PAGE_NULL; m = NEXT_PAGE(m))
1900 ++npages;
1901 if (npages != vm_page_free_count)
1902 panic("vm_sort_free_list: prelim: npages %d free_count %d",
1903 npages, vm_page_free_count);
1904 old_free_count = vm_page_free_count;
1905 #endif /* MACH_ASSERT */
1906
1907 sort_list = sort_list_end = vm_page_queue_free;
1908 m = NEXT_PAGE(vm_page_queue_free);
1909 SET_NEXT_PAGE(vm_page_queue_free, VM_PAGE_NULL);
1910 cpm_counter(vpfls_pages_handled = 0);
1911 while (m != VM_PAGE_NULL) {
1912 cpm_counter(++vpfls_pages_handled);
1913 next_m = NEXT_PAGE(m);
1914 if (m->phys_page < sort_list->phys_page) {
1915 cpm_counter(++vpfls_head_insertions);
1916 SET_NEXT_PAGE(m, sort_list);
1917 sort_list = m;
1918 } else if (m->phys_page > sort_list_end->phys_page) {
1919 cpm_counter(++vpfls_tail_insertions);
1920 SET_NEXT_PAGE(sort_list_end, m);
1921 SET_NEXT_PAGE(m, VM_PAGE_NULL);
1922 sort_list_end = m;
1923 } else {
1924 cpm_counter(++vpfls_general_insertions);
1925 /* general sorted list insertion */
1926 prev = &sort_list;
1927 for (m1=sort_list; m1!=VM_PAGE_NULL; m1=NEXT_PAGE(m1)) {
1928 if (m1->phys_page > m->phys_page) {
1929 if (*prev != m1)
1930 panic("vm_sort_free_list: ugh");
1931 SET_NEXT_PAGE(m, *prev);
1932 *prev = m;
1933 break;
1934 }
1935 prev = (vm_page_t *) &m1->pageq.next;
1936 }
1937 }
1938 m = next_m;
1939 }
1940
1941 #if MACH_ASSERT
1942 /*
1943 * Verify that pages are sorted into ascending order.
1944 */
1945 for (m = sort_list, npages = 0; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
1946 if (m != sort_list &&
1947 m->phys_page <= addr) {
1948 printf("m 0x%x addr 0x%x\n", m, addr);
1949 panic("vm_sort_free_list");
1950 }
1951 addr = m->phys_page;
1952 ++npages;
1953 }
1954 if (old_free_count != vm_page_free_count)
1955 panic("vm_sort_free_list: old_free %d free_count %d",
1956 old_free_count, vm_page_free_count);
1957 if (npages != vm_page_free_count)
1958 panic("vm_sort_free_list: npages %d free_count %d",
1959 npages, vm_page_free_count);
1960 #endif /* MACH_ASSERT */
1961
1962 vm_page_queue_free = sort_list;
1963 }
1964
1965
1966 #if MACH_ASSERT
1967 /*
1968 * Check that the list of pages is ordered by
1969 * ascending physical address and has no holes.
1970 */
1971 int
1972 vm_page_verify_contiguous(
1973 vm_page_t pages,
1974 unsigned int npages)
1975 {
1976 register vm_page_t m;
1977 unsigned int page_count;
1978 vm_offset_t prev_addr;
1979
1980 prev_addr = pages->phys_page;
1981 page_count = 1;
1982 for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
1983 if (m->phys_page != prev_addr + 1) {
1984 printf("m 0x%x prev_addr 0x%x, current addr 0x%x\n",
1985 m, prev_addr, m->phys_page);
1986 printf("pages 0x%x page_count %d\n", pages, page_count);
1987 panic("vm_page_verify_contiguous: not contiguous!");
1988 }
1989 prev_addr = m->phys_page;
1990 ++page_count;
1991 }
1992 if (page_count != npages) {
1993 printf("pages 0x%x actual count 0x%x but requested 0x%x\n",
1994 pages, page_count, npages);
1995 panic("vm_page_verify_contiguous: count error");
1996 }
1997 return 1;
1998 }
1999 #endif /* MACH_ASSERT */
2000
2001
2002 /*
2003 * Find a region large enough to contain at least npages
2004 * of contiguous physical memory.
2005 *
2006 * Requirements:
2007 * - Called while holding vm_page_queue_free_lock.
2008 * - Doesn't respect vm_page_free_reserved; caller
2009 * must not ask for more pages than are legal to grab.
2010 *
2011 * Returns a pointer to a list of gobbled pages or VM_PAGE_NULL.
2012 *
2013 */
2014 static vm_page_t
2015 vm_page_find_contiguous(
2016 int npages)
2017 {
2018 vm_page_t m, *contig_prev, *prev_ptr;
2019 ppnum_t prev_page;
2020 unsigned int contig_npages;
2021 vm_page_t list;
2022
2023 if (npages < 1)
2024 return VM_PAGE_NULL;
2025
2026 prev_page = vm_page_queue_free->phys_page - 2;
2027 prev_ptr = &vm_page_queue_free;
2028 for (m = vm_page_queue_free; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
2029
2030 if (m->phys_page != prev_page + 1) {
2031 /*
2032 * Whoops! Pages aren't contiguous. Start over.
2033 */
2034 contig_npages = 0;
2035 contig_prev = prev_ptr;
2036 }
2037
2038 if (++contig_npages == npages) {
2039 /*
2040 * Chop these pages out of the free list.
2041 * Mark them all as gobbled.
2042 */
2043 list = *contig_prev;
2044 *contig_prev = NEXT_PAGE(m);
2045 SET_NEXT_PAGE(m, VM_PAGE_NULL);
2046 for (m = list; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
2047 assert(m->free);
2048 assert(!m->wanted);
2049 m->free = FALSE;
2050 m->no_isync = TRUE;
2051 m->gobbled = TRUE;
2052 }
2053 vm_page_free_count -= npages;
2054 if (vm_page_free_count < vm_page_free_count_minimum)
2055 vm_page_free_count_minimum = vm_page_free_count;
2056 vm_page_wire_count += npages;
2057 vm_page_gobble_count += npages;
2058 cpm_counter(++vpfc_satisfied);
2059 assert(vm_page_verify_contiguous(list, contig_npages));
2060 return list;
2061 }
2062
2063 assert(contig_npages < npages);
2064 prev_ptr = (vm_page_t *) &m->pageq.next;
2065 prev_page = m->phys_page;
2066 }
2067 cpm_counter(++vpfc_failed);
2068 return VM_PAGE_NULL;
2069 }
2070
2071 /*
2072 * Allocate a list of contiguous, wired pages.
2073 */
2074 kern_return_t
2075 cpm_allocate(
2076 vm_size_t size,
2077 vm_page_t *list,
2078 boolean_t wire)
2079 {
2080 register vm_page_t m;
2081 vm_page_t *first_contig;
2082 vm_page_t free_list, pages;
2083 unsigned int npages, n1pages;
2084 int vm_pages_available;
2085
2086 if (size % page_size != 0)
2087 return KERN_INVALID_ARGUMENT;
2088
2089 vm_page_lock_queues();
2090 mutex_lock(&vm_page_queue_free_lock);
2091
2092 /*
2093 * Should also take active and inactive pages
2094 * into account... One day...
2095 */
2096 vm_pages_available = vm_page_free_count - vm_page_free_reserved;
2097
2098 if (size > vm_pages_available * page_size) {
2099 mutex_unlock(&vm_page_queue_free_lock);
2100 return KERN_RESOURCE_SHORTAGE;
2101 }
2102
2103 vm_page_free_list_sort();
2104
2105 npages = size / page_size;
2106
2107 /*
2108 * Obtain a pointer to a subset of the free
2109 * list large enough to satisfy the request;
2110 * the region will be physically contiguous.
2111 */
2112 pages = vm_page_find_contiguous(npages);
2113 if (pages == VM_PAGE_NULL) {
2114 mutex_unlock(&vm_page_queue_free_lock);
2115 vm_page_unlock_queues();
2116 return KERN_NO_SPACE;
2117 }
2118
2119 mutex_unlock(&vm_page_queue_free_lock);
2120
2121 /*
2122 * Walk the returned list, wiring the pages.
2123 */
2124 if (wire == TRUE)
2125 for (m = pages; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
2126 /*
2127 * Essentially inlined vm_page_wire.
2128 */
2129 assert(!m->active);
2130 assert(!m->inactive);
2131 assert(!m->private);
2132 assert(!m->fictitious);
2133 assert(m->wire_count == 0);
2134 assert(m->gobbled);
2135 m->gobbled = FALSE;
2136 m->wire_count++;
2137 --vm_page_gobble_count;
2138 }
2139 vm_page_unlock_queues();
2140
2141 /*
2142 * The CPM pages should now be available and
2143 * ordered by ascending physical address.
2144 */
2145 assert(vm_page_verify_contiguous(pages, npages));
2146
2147 *list = pages;
2148 return KERN_SUCCESS;
2149 }
2150
2151
2152 #include <mach_vm_debug.h>
2153 #if MACH_VM_DEBUG
2154
2155 #include <mach_debug/hash_info.h>
2156 #include <vm/vm_debug.h>
2157
2158 /*
2159 * Routine: vm_page_info
2160 * Purpose:
2161 * Return information about the global VP table.
2162 * Fills the buffer with as much information as possible
2163 * and returns the desired size of the buffer.
2164 * Conditions:
2165 * Nothing locked. The caller should provide
2166 * possibly-pageable memory.
2167 */
2168
2169 unsigned int
2170 vm_page_info(
2171 hash_info_bucket_t *info,
2172 unsigned int count)
2173 {
2174 int i;
2175
2176 if (vm_page_bucket_count < count)
2177 count = vm_page_bucket_count;
2178
2179 for (i = 0; i < count; i++) {
2180 vm_page_bucket_t *bucket = &vm_page_buckets[i];
2181 unsigned int bucket_count = 0;
2182 vm_page_t m;
2183
2184 simple_lock(&vm_page_bucket_lock);
2185 for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next)
2186 bucket_count++;
2187 simple_unlock(&vm_page_bucket_lock);
2188
2189 /* don't touch pageable memory while holding locks */
2190 info[i].hib_count = bucket_count;
2191 }
2192
2193 return vm_page_bucket_count;
2194 }
2195 #endif /* MACH_VM_DEBUG */
2196
2197 #include <mach_kdb.h>
2198 #if MACH_KDB
2199
2200 #include <ddb/db_output.h>
2201 #include <vm/vm_print.h>
2202 #define printf kdbprintf
2203
2204 /*
2205 * Routine: vm_page_print [exported]
2206 */
2207 void
2208 vm_page_print(
2209 vm_page_t p)
2210 {
2211 extern db_indent;
2212
2213 iprintf("page 0x%x\n", p);
2214
2215 db_indent += 2;
2216
2217 iprintf("object=0x%x", p->object);
2218 printf(", offset=0x%x", p->offset);
2219 printf(", wire_count=%d", p->wire_count);
2220
2221 iprintf("%sinactive, %sactive, %sgobbled, %slaundry, %sfree, %sref, %sdiscard\n",
2222 (p->inactive ? "" : "!"),
2223 (p->active ? "" : "!"),
2224 (p->gobbled ? "" : "!"),
2225 (p->laundry ? "" : "!"),
2226 (p->free ? "" : "!"),
2227 (p->reference ? "" : "!"),
2228 (p->discard_request ? "" : "!"));
2229 iprintf("%sbusy, %swanted, %stabled, %sfictitious, %sprivate, %sprecious\n",
2230 (p->busy ? "" : "!"),
2231 (p->wanted ? "" : "!"),
2232 (p->tabled ? "" : "!"),
2233 (p->fictitious ? "" : "!"),
2234 (p->private ? "" : "!"),
2235 (p->precious ? "" : "!"));
2236 iprintf("%sabsent, %serror, %sdirty, %scleaning, %spageout, %sclustered\n",
2237 (p->absent ? "" : "!"),
2238 (p->error ? "" : "!"),
2239 (p->dirty ? "" : "!"),
2240 (p->cleaning ? "" : "!"),
2241 (p->pageout ? "" : "!"),
2242 (p->clustered ? "" : "!"));
2243 iprintf("%slock_supplied, %soverwriting, %srestart, %sunusual\n",
2244 (p->lock_supplied ? "" : "!"),
2245 (p->overwriting ? "" : "!"),
2246 (p->restart ? "" : "!"),
2247 (p->unusual ? "" : "!"));
2248
2249 iprintf("phys_page=0x%x", p->phys_page);
2250 printf(", page_error=0x%x", p->page_error);
2251 printf(", page_lock=0x%x", p->page_lock);
2252 printf(", unlock_request=%d\n", p->unlock_request);
2253
2254 db_indent -= 2;
2255 }
2256 #endif /* MACH_KDB */