]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_resident.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / osfmk / vm / vm_resident.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52/*
53 * File: vm/vm_page.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
55 *
56 * Resident memory management module.
57 */
58
91447636
A
59#include <debug.h>
60
9bccf70c 61#include <mach/clock_types.h>
1c79356b
A
62#include <mach/vm_prot.h>
63#include <mach/vm_statistics.h>
64#include <kern/counters.h>
65#include <kern/sched_prim.h>
66#include <kern/task.h>
67#include <kern/thread.h>
68#include <kern/zalloc.h>
69#include <kern/xpr.h>
70#include <vm/pmap.h>
71#include <vm/vm_init.h>
72#include <vm/vm_map.h>
73#include <vm/vm_page.h>
74#include <vm/vm_pageout.h>
75#include <vm/vm_kern.h> /* kernel_memory_allocate() */
76#include <kern/misc_protos.h>
77#include <zone_debug.h>
78#include <vm/cpm.h>
55e303ae
A
79#include <ppc/mappings.h> /* (BRINGUP) */
80#include <pexpert/pexpert.h> /* (BRINGUP) */
81
91447636 82#include <vm/vm_protos.h>
1c79356b 83
0b4e3aa0
A
84/* Variables used to indicate the relative age of pages in the
85 * inactive list
86 */
87
91447636
A
88unsigned int vm_page_ticket_roll = 0;
89unsigned int vm_page_ticket = 0;
1c79356b
A
90/*
91 * Associated with page of user-allocatable memory is a
92 * page structure.
93 */
94
95/*
96 * These variables record the values returned by vm_page_bootstrap,
97 * for debugging purposes. The implementation of pmap_steal_memory
98 * and pmap_startup here also uses them internally.
99 */
100
101vm_offset_t virtual_space_start;
102vm_offset_t virtual_space_end;
103int vm_page_pages;
104
105/*
106 * The vm_page_lookup() routine, which provides for fast
107 * (virtual memory object, offset) to page lookup, employs
108 * the following hash table. The vm_page_{insert,remove}
109 * routines install and remove associations in the table.
110 * [This table is often called the virtual-to-physical,
111 * or VP, table.]
112 */
113typedef struct {
114 vm_page_t pages;
115#if MACH_PAGE_HASH_STATS
116 int cur_count; /* current count */
117 int hi_count; /* high water mark */
118#endif /* MACH_PAGE_HASH_STATS */
119} vm_page_bucket_t;
120
121vm_page_bucket_t *vm_page_buckets; /* Array of buckets */
122unsigned int vm_page_bucket_count = 0; /* How big is array? */
123unsigned int vm_page_hash_mask; /* Mask for hash function */
124unsigned int vm_page_hash_shift; /* Shift for hash function */
55e303ae 125uint32_t vm_page_bucket_hash; /* Basic bucket hash */
1c79356b
A
126decl_simple_lock_data(,vm_page_bucket_lock)
127
91447636
A
128vm_page_t
129vm_page_lookup_nohint(vm_object_t object, vm_object_offset_t offset);
130
131
1c79356b
A
132#if MACH_PAGE_HASH_STATS
133/* This routine is only for debug. It is intended to be called by
134 * hand by a developer using a kernel debugger. This routine prints
135 * out vm_page_hash table statistics to the kernel debug console.
136 */
137void
138hash_debug(void)
139{
140 int i;
141 int numbuckets = 0;
142 int highsum = 0;
143 int maxdepth = 0;
144
145 for (i = 0; i < vm_page_bucket_count; i++) {
146 if (vm_page_buckets[i].hi_count) {
147 numbuckets++;
148 highsum += vm_page_buckets[i].hi_count;
149 if (vm_page_buckets[i].hi_count > maxdepth)
150 maxdepth = vm_page_buckets[i].hi_count;
151 }
152 }
153 printf("Total number of buckets: %d\n", vm_page_bucket_count);
154 printf("Number used buckets: %d = %d%%\n",
155 numbuckets, 100*numbuckets/vm_page_bucket_count);
156 printf("Number unused buckets: %d = %d%%\n",
157 vm_page_bucket_count - numbuckets,
158 100*(vm_page_bucket_count-numbuckets)/vm_page_bucket_count);
159 printf("Sum of bucket max depth: %d\n", highsum);
160 printf("Average bucket depth: %d.%2d\n",
161 highsum/vm_page_bucket_count,
162 highsum%vm_page_bucket_count);
163 printf("Maximum bucket depth: %d\n", maxdepth);
164}
165#endif /* MACH_PAGE_HASH_STATS */
166
167/*
168 * The virtual page size is currently implemented as a runtime
169 * variable, but is constant once initialized using vm_set_page_size.
170 * This initialization must be done in the machine-dependent
171 * bootstrap sequence, before calling other machine-independent
172 * initializations.
173 *
174 * All references to the virtual page size outside this
175 * module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
176 * constants.
177 */
55e303ae
A
178vm_size_t page_size = PAGE_SIZE;
179vm_size_t page_mask = PAGE_MASK;
91447636 180int page_shift = PAGE_SHIFT;
1c79356b
A
181
182/*
183 * Resident page structures are initialized from
184 * a template (see vm_page_alloc).
185 *
186 * When adding a new field to the virtual memory
187 * object structure, be sure to add initialization
188 * (see vm_page_bootstrap).
189 */
190struct vm_page vm_page_template;
191
192/*
193 * Resident pages that represent real memory
194 * are allocated from a free list.
195 */
196vm_page_t vm_page_queue_free;
197vm_page_t vm_page_queue_fictitious;
1c79356b 198unsigned int vm_page_free_wanted;
91447636
A
199unsigned int vm_page_free_count;
200unsigned int vm_page_fictitious_count;
1c79356b
A
201
202unsigned int vm_page_free_count_minimum; /* debugging */
203
204/*
205 * Occasionally, the virtual memory system uses
206 * resident page structures that do not refer to
207 * real pages, for example to leave a page with
208 * important state information in the VP table.
209 *
210 * These page structures are allocated the way
211 * most other kernel structures are.
212 */
213zone_t vm_page_zone;
214decl_mutex_data(,vm_page_alloc_lock)
9bccf70c 215unsigned int io_throttle_zero_fill;
1c79356b
A
216
217/*
218 * Fictitious pages don't have a physical address,
55e303ae 219 * but we must initialize phys_page to something.
1c79356b
A
220 * For debugging, this should be a strange value
221 * that the pmap module can recognize in assertions.
222 */
223vm_offset_t vm_page_fictitious_addr = (vm_offset_t) -1;
224
225/*
226 * Resident page structures are also chained on
227 * queues that are used by the page replacement
228 * system (pageout daemon). These queues are
229 * defined here, but are shared by the pageout
9bccf70c
A
230 * module. The inactive queue is broken into
231 * inactive and zf for convenience as the
232 * pageout daemon often assignes a higher
233 * affinity to zf pages
1c79356b
A
234 */
235queue_head_t vm_page_queue_active;
236queue_head_t vm_page_queue_inactive;
91447636
A
237unsigned int vm_page_active_count;
238unsigned int vm_page_inactive_count;
239unsigned int vm_page_wire_count;
240unsigned int vm_page_gobble_count = 0;
241unsigned int vm_page_wire_count_warning = 0;
242unsigned int vm_page_gobble_count_warning = 0;
243
244unsigned int vm_page_purgeable_count = 0; /* # of pages purgeable now */
245uint64_t vm_page_purged_count = 0; /* total count of purged pages */
1c79356b 246
c0fea474
A
247ppnum_t vm_lopage_poolstart = 0;
248ppnum_t vm_lopage_poolend = 0;
249int vm_lopage_poolsize = 0;
250uint64_t max_valid_dma_address = 0xffffffffffffffffULL;
251
252
1c79356b
A
253/*
254 * Several page replacement parameters are also
255 * shared with this module, so that page allocation
256 * (done here in vm_page_alloc) can trigger the
257 * pageout daemon.
258 */
91447636
A
259unsigned int vm_page_free_target = 0;
260unsigned int vm_page_free_min = 0;
261unsigned int vm_page_inactive_target = 0;
262unsigned int vm_page_free_reserved = 0;
263unsigned int vm_page_throttled_count = 0;
1c79356b
A
264
265/*
266 * The VM system has a couple of heuristics for deciding
267 * that pages are "uninteresting" and should be placed
268 * on the inactive queue as likely candidates for replacement.
269 * These variables let the heuristics be controlled at run-time
270 * to make experimentation easier.
271 */
272
273boolean_t vm_page_deactivate_hint = TRUE;
274
275/*
276 * vm_set_page_size:
277 *
278 * Sets the page size, perhaps based upon the memory
279 * size. Must be called before any use of page-size
280 * dependent functions.
281 *
282 * Sets page_shift and page_mask from page_size.
283 */
284void
285vm_set_page_size(void)
286{
1c79356b
A
287 page_mask = page_size - 1;
288
289 if ((page_mask & page_size) != 0)
290 panic("vm_set_page_size: page size not a power of two");
291
292 for (page_shift = 0; ; page_shift++)
91447636 293 if ((1U << page_shift) == page_size)
1c79356b 294 break;
1c79356b
A
295}
296
297/*
298 * vm_page_bootstrap:
299 *
300 * Initializes the resident memory module.
301 *
302 * Allocates memory for the page cells, and
303 * for the object/offset-to-page hash table headers.
304 * Each page cell is initialized and placed on the free list.
305 * Returns the range of available kernel virtual memory.
306 */
307
308void
309vm_page_bootstrap(
310 vm_offset_t *startp,
311 vm_offset_t *endp)
312{
313 register vm_page_t m;
91447636 314 unsigned int i;
1c79356b
A
315 unsigned int log1;
316 unsigned int log2;
317 unsigned int size;
318
319 /*
320 * Initialize the vm_page template.
321 */
322
323 m = &vm_page_template;
91447636
A
324 m->object = VM_OBJECT_NULL; /* reset later */
325 m->offset = (vm_object_offset_t) -1; /* reset later */
1c79356b
A
326 m->wire_count = 0;
327
91447636
A
328 m->pageq.next = NULL;
329 m->pageq.prev = NULL;
330 m->listq.next = NULL;
331 m->listq.prev = NULL;
332
1c79356b
A
333 m->inactive = FALSE;
334 m->active = FALSE;
335 m->laundry = FALSE;
336 m->free = FALSE;
765c9de3 337 m->no_isync = TRUE;
1c79356b
A
338 m->reference = FALSE;
339 m->pageout = FALSE;
0b4e3aa0 340 m->dump_cleaning = FALSE;
1c79356b
A
341 m->list_req_pending = FALSE;
342
343 m->busy = TRUE;
344 m->wanted = FALSE;
345 m->tabled = FALSE;
346 m->fictitious = FALSE;
347 m->private = FALSE;
348 m->absent = FALSE;
349 m->error = FALSE;
350 m->dirty = FALSE;
351 m->cleaning = FALSE;
352 m->precious = FALSE;
353 m->clustered = FALSE;
354 m->lock_supplied = FALSE;
355 m->unusual = FALSE;
356 m->restart = FALSE;
9bccf70c 357 m->zero_fill = FALSE;
91447636 358 m->encrypted = FALSE;
1c79356b 359
55e303ae 360 m->phys_page = 0; /* reset later */
1c79356b
A
361
362 m->page_lock = VM_PROT_NONE;
363 m->unlock_request = VM_PROT_NONE;
364 m->page_error = KERN_SUCCESS;
365
366 /*
367 * Initialize the page queues.
368 */
369
91447636
A
370 mutex_init(&vm_page_queue_free_lock, 0);
371 mutex_init(&vm_page_queue_lock, 0);
1c79356b
A
372
373 vm_page_queue_free = VM_PAGE_NULL;
374 vm_page_queue_fictitious = VM_PAGE_NULL;
375 queue_init(&vm_page_queue_active);
376 queue_init(&vm_page_queue_inactive);
9bccf70c 377 queue_init(&vm_page_queue_zf);
1c79356b
A
378
379 vm_page_free_wanted = 0;
380
381 /*
382 * Steal memory for the map and zone subsystems.
383 */
384
385 vm_map_steal_memory();
386 zone_steal_memory();
387
388 /*
389 * Allocate (and initialize) the virtual-to-physical
390 * table hash buckets.
391 *
392 * The number of buckets should be a power of two to
393 * get a good hash function. The following computation
394 * chooses the first power of two that is greater
395 * than the number of physical pages in the system.
396 */
397
91447636 398 simple_lock_init(&vm_page_bucket_lock, 0);
1c79356b
A
399
400 if (vm_page_bucket_count == 0) {
401 unsigned int npages = pmap_free_pages();
402
403 vm_page_bucket_count = 1;
404 while (vm_page_bucket_count < npages)
405 vm_page_bucket_count <<= 1;
406 }
407
408 vm_page_hash_mask = vm_page_bucket_count - 1;
409
410 /*
411 * Calculate object shift value for hashing algorithm:
412 * O = log2(sizeof(struct vm_object))
413 * B = log2(vm_page_bucket_count)
414 * hash shifts the object left by
415 * B/2 - O
416 */
417 size = vm_page_bucket_count;
418 for (log1 = 0; size > 1; log1++)
419 size /= 2;
420 size = sizeof(struct vm_object);
421 for (log2 = 0; size > 1; log2++)
422 size /= 2;
423 vm_page_hash_shift = log1/2 - log2 + 1;
55e303ae
A
424
425 vm_page_bucket_hash = 1 << ((log1 + 1) >> 1); /* Get (ceiling of sqrt of table size) */
426 vm_page_bucket_hash |= 1 << ((log1 + 1) >> 2); /* Get (ceiling of quadroot of table size) */
427 vm_page_bucket_hash |= 1; /* Set bit and add 1 - always must be 1 to insure unique series */
1c79356b
A
428
429 if (vm_page_hash_mask & vm_page_bucket_count)
430 printf("vm_page_bootstrap: WARNING -- strange page hash\n");
431
432 vm_page_buckets = (vm_page_bucket_t *)
433 pmap_steal_memory(vm_page_bucket_count *
434 sizeof(vm_page_bucket_t));
435
436 for (i = 0; i < vm_page_bucket_count; i++) {
437 register vm_page_bucket_t *bucket = &vm_page_buckets[i];
438
439 bucket->pages = VM_PAGE_NULL;
440#if MACH_PAGE_HASH_STATS
441 bucket->cur_count = 0;
442 bucket->hi_count = 0;
443#endif /* MACH_PAGE_HASH_STATS */
444 }
445
446 /*
447 * Machine-dependent code allocates the resident page table.
448 * It uses vm_page_init to initialize the page frames.
449 * The code also returns to us the virtual space available
450 * to the kernel. We don't trust the pmap module
451 * to get the alignment right.
452 */
453
454 pmap_startup(&virtual_space_start, &virtual_space_end);
91447636
A
455 virtual_space_start = round_page(virtual_space_start);
456 virtual_space_end = trunc_page(virtual_space_end);
1c79356b
A
457
458 *startp = virtual_space_start;
459 *endp = virtual_space_end;
460
461 /*
462 * Compute the initial "wire" count.
463 * Up until now, the pages which have been set aside are not under
464 * the VM system's control, so although they aren't explicitly
465 * wired, they nonetheless can't be moved. At this moment,
466 * all VM managed pages are "free", courtesy of pmap_startup.
467 */
55e303ae 468 vm_page_wire_count = atop_64(max_mem) - vm_page_free_count; /* initial value */
1c79356b
A
469
470 printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count);
471 vm_page_free_count_minimum = vm_page_free_count;
91447636
A
472
473 simple_lock_init(&vm_paging_lock, 0);
1c79356b
A
474}
475
476#ifndef MACHINE_PAGES
477/*
478 * We implement pmap_steal_memory and pmap_startup with the help
479 * of two simpler functions, pmap_virtual_space and pmap_next_page.
480 */
481
91447636 482void *
1c79356b
A
483pmap_steal_memory(
484 vm_size_t size)
485{
55e303ae
A
486 vm_offset_t addr, vaddr;
487 ppnum_t phys_page;
1c79356b
A
488
489 /*
490 * We round the size to a round multiple.
491 */
492
493 size = (size + sizeof (void *) - 1) &~ (sizeof (void *) - 1);
494
495 /*
496 * If this is the first call to pmap_steal_memory,
497 * we have to initialize ourself.
498 */
499
500 if (virtual_space_start == virtual_space_end) {
501 pmap_virtual_space(&virtual_space_start, &virtual_space_end);
502
503 /*
504 * The initial values must be aligned properly, and
505 * we don't trust the pmap module to do it right.
506 */
507
91447636
A
508 virtual_space_start = round_page(virtual_space_start);
509 virtual_space_end = trunc_page(virtual_space_end);
1c79356b
A
510 }
511
512 /*
513 * Allocate virtual memory for this request.
514 */
515
516 addr = virtual_space_start;
517 virtual_space_start += size;
518
519 kprintf("pmap_steal_memory: %08X - %08X; size=%08X\n", addr, virtual_space_start, size); /* (TEST/DEBUG) */
520
521 /*
522 * Allocate and map physical pages to back new virtual pages.
523 */
524
91447636 525 for (vaddr = round_page(addr);
1c79356b
A
526 vaddr < addr + size;
527 vaddr += PAGE_SIZE) {
55e303ae 528 if (!pmap_next_page(&phys_page))
1c79356b
A
529 panic("pmap_steal_memory");
530
531 /*
532 * XXX Logically, these mappings should be wired,
533 * but some pmap modules barf if they are.
534 */
535
55e303ae 536 pmap_enter(kernel_pmap, vaddr, phys_page,
9bccf70c
A
537 VM_PROT_READ|VM_PROT_WRITE,
538 VM_WIMG_USE_DEFAULT, FALSE);
1c79356b
A
539 /*
540 * Account for newly stolen memory
541 */
542 vm_page_wire_count++;
543
544 }
545
91447636 546 return (void *) addr;
1c79356b
A
547}
548
549void
550pmap_startup(
551 vm_offset_t *startp,
552 vm_offset_t *endp)
553{
55e303ae
A
554 unsigned int i, npages, pages_initialized, fill, fillval;
555 vm_page_t pages;
556 ppnum_t phys_page;
557 addr64_t tmpaddr;
c0fea474
A
558 unsigned int num_of_lopages = 0;
559 unsigned int last_index;
1c79356b
A
560
561 /*
562 * We calculate how many page frames we will have
563 * and then allocate the page structures in one chunk.
564 */
565
55e303ae
A
566 tmpaddr = (addr64_t)pmap_free_pages() * (addr64_t)PAGE_SIZE; /* Get the amount of memory left */
567 tmpaddr = tmpaddr + (addr64_t)(round_page_32(virtual_space_start) - virtual_space_start); /* Account for any slop */
568 npages = (unsigned int)(tmpaddr / (addr64_t)(PAGE_SIZE + sizeof(*pages))); /* Figure size of all vm_page_ts, including enough to hold the vm_page_ts */
1c79356b
A
569
570 pages = (vm_page_t) pmap_steal_memory(npages * sizeof *pages);
571
572 /*
573 * Initialize the page frames.
574 */
1c79356b 575 for (i = 0, pages_initialized = 0; i < npages; i++) {
55e303ae 576 if (!pmap_next_page(&phys_page))
1c79356b
A
577 break;
578
55e303ae 579 vm_page_init(&pages[i], phys_page);
1c79356b
A
580 vm_page_pages++;
581 pages_initialized++;
582 }
583
c0fea474
A
584 /*
585 * Check if we want to initialize pages to a known value
586 */
587 fill = 0; /* Assume no fill */
588 if (PE_parse_boot_arg("fill", &fillval)) fill = 1; /* Set fill */
589
590 /*
591 * if vm_lopage_poolsize is non-zero, than we need to reserve
592 * a pool of pages whose addresess are less than 4G... this pool
593 * is used by drivers whose hardware can't DMA beyond 32 bits...
594 *
595 * note that I'm assuming that the page list is ascending and
596 * ordered w/r to the physical address
597 */
598 for (i = 0, num_of_lopages = vm_lopage_poolsize; num_of_lopages && i < pages_initialized; num_of_lopages--, i++) {
599 vm_page_t m;
600
601 m = &pages[i];
602
603 if (m->phys_page >= (1 << (32 - PAGE_SHIFT)))
604 panic("couldn't reserve the lopage pool: not enough lo pages\n");
605
606 if (m->phys_page < vm_lopage_poolend)
607 panic("couldn't reserve the lopage pool: page list out of order\n");
608
609 vm_lopage_poolend = m->phys_page;
610
611 if (vm_lopage_poolstart == 0)
612 vm_lopage_poolstart = m->phys_page;
613 else {
614 if (m->phys_page < vm_lopage_poolstart)
615 panic("couldn't reserve the lopage pool: page list out of order\n");
616 }
617
618 if (fill)
619 fillPage(m->phys_page, fillval); /* Fill the page with a know value if requested at boot */
620
621 vm_page_release(m);
622 }
623 last_index = i;
624
625 // -debug code remove
626 if (2 == vm_himemory_mode) {
627 // free low -> high so high is preferred
628 for (i = last_index + 1; i <= pages_initialized; i++) {
629 if(fill) fillPage(pages[i - 1].phys_page, fillval); /* Fill the page with a know value if requested at boot */
630 vm_page_release(&pages[i - 1]);
631 }
632 }
633 else
634 // debug code remove-
635
1c79356b
A
636 /*
637 * Release pages in reverse order so that physical pages
638 * initially get allocated in ascending addresses. This keeps
639 * the devices (which must address physical memory) happy if
640 * they require several consecutive pages.
641 */
c0fea474 642 for (i = pages_initialized; i > last_index; i--) {
55e303ae 643 if(fill) fillPage(pages[i - 1].phys_page, fillval); /* Fill the page with a know value if requested at boot */
1c79356b
A
644 vm_page_release(&pages[i - 1]);
645 }
646
55e303ae
A
647#if 0
648 {
649 vm_page_t xx, xxo, xxl;
650 int j, k, l;
651
652 j = 0; /* (BRINGUP) */
653 xxl = 0;
654
655 for(xx = vm_page_queue_free; xx; xxl = xx, xx = xx->pageq.next) { /* (BRINGUP) */
656 j++; /* (BRINGUP) */
657 if(j > vm_page_free_count) { /* (BRINGUP) */
658 panic("pmap_startup: too many pages, xx = %08X, xxl = %08X\n", xx, xxl);
659 }
660
661 l = vm_page_free_count - j; /* (BRINGUP) */
662 k = 0; /* (BRINGUP) */
663
664 if(((j - 1) & 0xFFFF) == 0) kprintf("checking number %d of %d\n", j, vm_page_free_count);
665
666 for(xxo = xx->pageq.next; xxo; xxo = xxo->pageq.next) { /* (BRINGUP) */
667 k++;
668 if(k > l) panic("pmap_startup: too many in secondary check %d %d\n", k, l);
669 if((xx->phys_page & 0xFFFFFFFF) == (xxo->phys_page & 0xFFFFFFFF)) { /* (BRINGUP) */
670 panic("pmap_startup: duplicate physaddr, xx = %08X, xxo = %08X\n", xx, xxo);
671 }
672 }
673 }
674
675 if(j != vm_page_free_count) { /* (BRINGUP) */
676 panic("pmap_startup: vm_page_free_count does not match, calc = %d, vm_page_free_count = %08X\n", j, vm_page_free_count);
677 }
678 }
679#endif
680
681
1c79356b
A
682 /*
683 * We have to re-align virtual_space_start,
684 * because pmap_steal_memory has been using it.
685 */
686
55e303ae 687 virtual_space_start = round_page_32(virtual_space_start);
1c79356b
A
688
689 *startp = virtual_space_start;
690 *endp = virtual_space_end;
691}
692#endif /* MACHINE_PAGES */
693
694/*
695 * Routine: vm_page_module_init
696 * Purpose:
697 * Second initialization pass, to be done after
698 * the basic VM system is ready.
699 */
700void
701vm_page_module_init(void)
702{
703 vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page),
704 0, PAGE_SIZE, "vm pages");
705
706#if ZONE_DEBUG
707 zone_debug_disable(vm_page_zone);
708#endif /* ZONE_DEBUG */
709
710 zone_change(vm_page_zone, Z_EXPAND, FALSE);
711 zone_change(vm_page_zone, Z_EXHAUST, TRUE);
712 zone_change(vm_page_zone, Z_FOREIGN, TRUE);
713
714 /*
715 * Adjust zone statistics to account for the real pages allocated
716 * in vm_page_create(). [Q: is this really what we want?]
717 */
718 vm_page_zone->count += vm_page_pages;
719 vm_page_zone->cur_size += vm_page_pages * vm_page_zone->elem_size;
720
91447636 721 mutex_init(&vm_page_alloc_lock, 0);
1c79356b
A
722}
723
724/*
725 * Routine: vm_page_create
726 * Purpose:
727 * After the VM system is up, machine-dependent code
728 * may stumble across more physical memory. For example,
729 * memory that it was reserving for a frame buffer.
730 * vm_page_create turns this memory into available pages.
731 */
732
733void
734vm_page_create(
55e303ae
A
735 ppnum_t start,
736 ppnum_t end)
1c79356b 737{
55e303ae
A
738 ppnum_t phys_page;
739 vm_page_t m;
1c79356b 740
55e303ae
A
741 for (phys_page = start;
742 phys_page < end;
743 phys_page++) {
1c79356b
A
744 while ((m = (vm_page_t) vm_page_grab_fictitious())
745 == VM_PAGE_NULL)
746 vm_page_more_fictitious();
747
55e303ae 748 vm_page_init(m, phys_page);
1c79356b
A
749 vm_page_pages++;
750 vm_page_release(m);
751 }
752}
753
754/*
755 * vm_page_hash:
756 *
757 * Distributes the object/offset key pair among hash buckets.
758 *
55e303ae 759 * NOTE: The bucket count must be a power of 2
1c79356b
A
760 */
761#define vm_page_hash(object, offset) (\
55e303ae 762 ( (natural_t)((uint32_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
1c79356b
A
763 & vm_page_hash_mask)
764
765/*
766 * vm_page_insert: [ internal use only ]
767 *
768 * Inserts the given mem entry into the object/object-page
769 * table and object list.
770 *
771 * The object must be locked.
772 */
773
774void
775vm_page_insert(
776 register vm_page_t mem,
777 register vm_object_t object,
778 register vm_object_offset_t offset)
779{
780 register vm_page_bucket_t *bucket;
781
782 XPR(XPR_VM_PAGE,
783 "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n",
784 (integer_t)object, (integer_t)offset, (integer_t)mem, 0,0);
785
786 VM_PAGE_CHECK(mem);
91447636
A
787#if DEBUG
788 _mutex_assert(&object->Lock, MA_OWNED);
1c79356b 789
91447636
A
790 if (mem->tabled || mem->object != VM_OBJECT_NULL)
791 panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
792 "already in (obj=%p,off=0x%llx)",
793 mem, object, offset, mem->object, mem->offset);
794#endif
1c79356b
A
795 assert(!object->internal || offset < object->size);
796
797 /* only insert "pageout" pages into "pageout" objects,
798 * and normal pages into normal objects */
799 assert(object->pageout == mem->pageout);
800
91447636
A
801 assert(vm_page_lookup(object, offset) == VM_PAGE_NULL);
802
1c79356b
A
803 /*
804 * Record the object/offset pair in this page
805 */
806
807 mem->object = object;
808 mem->offset = offset;
809
810 /*
811 * Insert it into the object_object/offset hash table
812 */
813
814 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
815 simple_lock(&vm_page_bucket_lock);
816 mem->next = bucket->pages;
817 bucket->pages = mem;
818#if MACH_PAGE_HASH_STATS
819 if (++bucket->cur_count > bucket->hi_count)
820 bucket->hi_count = bucket->cur_count;
821#endif /* MACH_PAGE_HASH_STATS */
822 simple_unlock(&vm_page_bucket_lock);
823
824 /*
825 * Now link into the object's list of backed pages.
826 */
827
91447636 828 VM_PAGE_INSERT(mem, object);
1c79356b
A
829 mem->tabled = TRUE;
830
831 /*
832 * Show that the object has one more resident page.
833 */
834
835 object->resident_page_count++;
91447636
A
836
837 if (object->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
838 object->purgable == VM_OBJECT_PURGABLE_EMPTY) {
839 vm_page_lock_queues();
840 vm_page_purgeable_count++;
841 vm_page_unlock_queues();
842 }
1c79356b
A
843}
844
845/*
846 * vm_page_replace:
847 *
848 * Exactly like vm_page_insert, except that we first
849 * remove any existing page at the given offset in object.
850 *
851 * The object and page queues must be locked.
852 */
853
854void
855vm_page_replace(
856 register vm_page_t mem,
857 register vm_object_t object,
858 register vm_object_offset_t offset)
859{
c0fea474
A
860 vm_page_bucket_t *bucket;
861 vm_page_t found_m = VM_PAGE_NULL;
1c79356b
A
862
863 VM_PAGE_CHECK(mem);
91447636
A
864#if DEBUG
865 _mutex_assert(&object->Lock, MA_OWNED);
866 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
867
868 if (mem->tabled || mem->object != VM_OBJECT_NULL)
869 panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
870 "already in (obj=%p,off=0x%llx)",
871 mem, object, offset, mem->object, mem->offset);
872#endif
1c79356b
A
873 /*
874 * Record the object/offset pair in this page
875 */
876
877 mem->object = object;
878 mem->offset = offset;
879
880 /*
881 * Insert it into the object_object/offset hash table,
882 * replacing any page that might have been there.
883 */
884
885 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
886 simple_lock(&vm_page_bucket_lock);
c0fea474 887
1c79356b
A
888 if (bucket->pages) {
889 vm_page_t *mp = &bucket->pages;
890 register vm_page_t m = *mp;
c0fea474 891
1c79356b
A
892 do {
893 if (m->object == object && m->offset == offset) {
894 /*
c0fea474 895 * Remove old page from hash list
1c79356b
A
896 */
897 *mp = m->next;
1c79356b 898
c0fea474 899 found_m = m;
1c79356b
A
900 break;
901 }
902 mp = &m->next;
91447636 903 } while ((m = *mp));
c0fea474 904
1c79356b
A
905 mem->next = bucket->pages;
906 } else {
907 mem->next = VM_PAGE_NULL;
908 }
c0fea474
A
909 /*
910 * insert new page at head of hash list
911 */
1c79356b 912 bucket->pages = mem;
c0fea474 913
1c79356b
A
914 simple_unlock(&vm_page_bucket_lock);
915
c0fea474
A
916 if (found_m) {
917 /*
918 * there was already a page at the specified
919 * offset for this object... remove it from
920 * the object and free it back to the free list
921 */
922 VM_PAGE_REMOVE(found_m);
923 found_m->tabled = FALSE;
924
925 found_m->object = VM_OBJECT_NULL;
926 found_m->offset = (vm_object_offset_t) -1;
927 object->resident_page_count--;
928
929 if (object->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
930 object->purgable == VM_OBJECT_PURGABLE_EMPTY) {
931 assert(vm_page_purgeable_count > 0);
932 vm_page_purgeable_count--;
933 }
934
935 /*
936 * Return page to the free list.
937 * Note the page is not tabled now
938 */
939 vm_page_free(found_m);
940 }
1c79356b
A
941 /*
942 * Now link into the object's list of backed pages.
943 */
944
91447636 945 VM_PAGE_INSERT(mem, object);
1c79356b
A
946 mem->tabled = TRUE;
947
948 /*
949 * And show that the object has one more resident
950 * page.
951 */
952
953 object->resident_page_count++;
91447636
A
954
955 if (object->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
956 object->purgable == VM_OBJECT_PURGABLE_EMPTY) {
957 vm_page_purgeable_count++;
958 }
1c79356b
A
959}
960
961/*
962 * vm_page_remove: [ internal use only ]
963 *
964 * Removes the given mem entry from the object/offset-page
965 * table and the object page list.
966 *
91447636 967 * The object and page queues must be locked.
1c79356b
A
968 */
969
970void
971vm_page_remove(
972 register vm_page_t mem)
973{
974 register vm_page_bucket_t *bucket;
975 register vm_page_t this;
976
977 XPR(XPR_VM_PAGE,
978 "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n",
979 (integer_t)mem->object, (integer_t)mem->offset,
980 (integer_t)mem, 0,0);
91447636
A
981#if DEBUG
982 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
983 _mutex_assert(&mem->object->Lock, MA_OWNED);
984#endif
1c79356b
A
985 assert(mem->tabled);
986 assert(!mem->cleaning);
987 VM_PAGE_CHECK(mem);
988
91447636 989
1c79356b
A
990 /*
991 * Remove from the object_object/offset hash table
992 */
993
994 bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
995 simple_lock(&vm_page_bucket_lock);
996 if ((this = bucket->pages) == mem) {
997 /* optimize for common case */
998
999 bucket->pages = mem->next;
1000 } else {
1001 register vm_page_t *prev;
1002
1003 for (prev = &this->next;
1004 (this = *prev) != mem;
1005 prev = &this->next)
1006 continue;
1007 *prev = this->next;
1008 }
1009#if MACH_PAGE_HASH_STATS
1010 bucket->cur_count--;
1011#endif /* MACH_PAGE_HASH_STATS */
1012 simple_unlock(&vm_page_bucket_lock);
1013
1014 /*
1015 * Now remove from the object's list of backed pages.
1016 */
1017
91447636 1018 VM_PAGE_REMOVE(mem);
1c79356b
A
1019
1020 /*
1021 * And show that the object has one fewer resident
1022 * page.
1023 */
1024
1025 mem->object->resident_page_count--;
1026
91447636
A
1027 if (mem->object->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
1028 mem->object->purgable == VM_OBJECT_PURGABLE_EMPTY) {
1029 assert(vm_page_purgeable_count > 0);
1030 vm_page_purgeable_count--;
1031 }
1032
1c79356b
A
1033 mem->tabled = FALSE;
1034 mem->object = VM_OBJECT_NULL;
91447636 1035 mem->offset = (vm_object_offset_t) -1;
1c79356b
A
1036}
1037
1038/*
1039 * vm_page_lookup:
1040 *
1041 * Returns the page associated with the object/offset
1042 * pair specified; if none is found, VM_PAGE_NULL is returned.
1043 *
1044 * The object must be locked. No side effects.
1045 */
1046
91447636
A
1047unsigned long vm_page_lookup_hint = 0;
1048unsigned long vm_page_lookup_hint_next = 0;
1049unsigned long vm_page_lookup_hint_prev = 0;
1050unsigned long vm_page_lookup_hint_miss = 0;
1051
1c79356b
A
1052vm_page_t
1053vm_page_lookup(
1054 register vm_object_t object,
1055 register vm_object_offset_t offset)
1056{
1057 register vm_page_t mem;
1058 register vm_page_bucket_t *bucket;
91447636
A
1059 queue_entry_t qe;
1060#if 0
1061 _mutex_assert(&object->Lock, MA_OWNED);
1062#endif
1063
1064 mem = object->memq_hint;
1065 if (mem != VM_PAGE_NULL) {
1066 assert(mem->object == object);
1067 if (mem->offset == offset) {
1068 vm_page_lookup_hint++;
1069 return mem;
1070 }
1071 qe = queue_next(&mem->listq);
1072 if (! queue_end(&object->memq, qe)) {
1073 vm_page_t next_page;
1074
1075 next_page = (vm_page_t) qe;
1076 assert(next_page->object == object);
1077 if (next_page->offset == offset) {
1078 vm_page_lookup_hint_next++;
1079 object->memq_hint = next_page; /* new hint */
1080 return next_page;
1081 }
1082 }
1083 qe = queue_prev(&mem->listq);
1084 if (! queue_end(&object->memq, qe)) {
1085 vm_page_t prev_page;
1086
1087 prev_page = (vm_page_t) qe;
1088 assert(prev_page->object == object);
1089 if (prev_page->offset == offset) {
1090 vm_page_lookup_hint_prev++;
1091 object->memq_hint = prev_page; /* new hint */
1092 return prev_page;
1093 }
1094 }
1095 }
1c79356b
A
1096
1097 /*
1098 * Search the hash table for this object/offset pair
1099 */
1100
1101 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
1102
c0fea474
A
1103 /*
1104 * since we hold the object lock, we are guaranteed that no
1105 * new pages can be inserted into this object... this in turn
1106 * guarantess that the page we're looking for can't exist
1107 * if the bucket it hashes to is currently NULL even when looked
1108 * at outside the scope of the hash bucket lock... this is a
1109 * really cheap optimiztion to avoid taking the lock
1110 */
1111 if (bucket->pages == VM_PAGE_NULL) {
1112 return (VM_PAGE_NULL);
1113 }
1c79356b 1114 simple_lock(&vm_page_bucket_lock);
c0fea474 1115
1c79356b
A
1116 for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
1117 VM_PAGE_CHECK(mem);
1118 if ((mem->object == object) && (mem->offset == offset))
1119 break;
1120 }
1121 simple_unlock(&vm_page_bucket_lock);
55e303ae 1122
91447636
A
1123 if (mem != VM_PAGE_NULL) {
1124 if (object->memq_hint != VM_PAGE_NULL) {
1125 vm_page_lookup_hint_miss++;
1126 }
1127 assert(mem->object == object);
1128 object->memq_hint = mem;
1129 }
1130
1131 return(mem);
1132}
1133
1134
1135vm_page_t
1136vm_page_lookup_nohint(
1137 vm_object_t object,
1138 vm_object_offset_t offset)
1139{
1140 register vm_page_t mem;
1141 register vm_page_bucket_t *bucket;
1142
1143#if 0
1144 _mutex_assert(&object->Lock, MA_OWNED);
1145#endif
1146 /*
1147 * Search the hash table for this object/offset pair
1148 */
1149
1150 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
1151
1152 simple_lock(&vm_page_bucket_lock);
1153 for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
1154 VM_PAGE_CHECK(mem);
1155 if ((mem->object == object) && (mem->offset == offset))
1156 break;
1157 }
1158 simple_unlock(&vm_page_bucket_lock);
1159
1c79356b
A
1160 return(mem);
1161}
1162
1163/*
1164 * vm_page_rename:
1165 *
1166 * Move the given memory entry from its
1167 * current object to the specified target object/offset.
1168 *
1169 * The object must be locked.
1170 */
1171void
1172vm_page_rename(
1173 register vm_page_t mem,
1174 register vm_object_t new_object,
1175 vm_object_offset_t new_offset)
1176{
1177 assert(mem->object != new_object);
91447636
A
1178 /*
1179 * ENCRYPTED SWAP:
1180 * The encryption key is based on the page's memory object
1181 * (aka "pager") and paging offset. Moving the page to
1182 * another VM object changes its "pager" and "paging_offset"
1183 * so it has to be decrypted first.
1184 */
1185 if (mem->encrypted) {
1186 panic("vm_page_rename: page %p is encrypted\n", mem);
1187 }
1c79356b
A
1188 /*
1189 * Changes to mem->object require the page lock because
1190 * the pageout daemon uses that lock to get the object.
1191 */
1192
1193 XPR(XPR_VM_PAGE,
1194 "vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n",
1195 (integer_t)new_object, (integer_t)new_offset,
1196 (integer_t)mem, 0,0);
1197
1198 vm_page_lock_queues();
1199 vm_page_remove(mem);
1200 vm_page_insert(mem, new_object, new_offset);
1201 vm_page_unlock_queues();
1202}
1203
1204/*
1205 * vm_page_init:
1206 *
1207 * Initialize the fields in a new page.
1208 * This takes a structure with random values and initializes it
1209 * so that it can be given to vm_page_release or vm_page_insert.
1210 */
1211void
1212vm_page_init(
1213 vm_page_t mem,
55e303ae 1214 ppnum_t phys_page)
1c79356b 1215{
91447636 1216 assert(phys_page);
1c79356b 1217 *mem = vm_page_template;
55e303ae 1218 mem->phys_page = phys_page;
1c79356b
A
1219}
1220
1221/*
1222 * vm_page_grab_fictitious:
1223 *
1224 * Remove a fictitious page from the free list.
1225 * Returns VM_PAGE_NULL if there are no free pages.
1226 */
1227int c_vm_page_grab_fictitious = 0;
1228int c_vm_page_release_fictitious = 0;
1229int c_vm_page_more_fictitious = 0;
1230
1231vm_page_t
1232vm_page_grab_fictitious(void)
1233{
1234 register vm_page_t m;
1235
1236 m = (vm_page_t)zget(vm_page_zone);
1237 if (m) {
1c79356b
A
1238 vm_page_init(m, vm_page_fictitious_addr);
1239 m->fictitious = TRUE;
1c79356b
A
1240 }
1241
1242 c_vm_page_grab_fictitious++;
1243 return m;
1244}
1245
1246/*
1247 * vm_page_release_fictitious:
1248 *
1249 * Release a fictitious page to the free list.
1250 */
1251
1252void
1253vm_page_release_fictitious(
1254 register vm_page_t m)
1255{
1256 assert(!m->free);
1257 assert(m->busy);
1258 assert(m->fictitious);
55e303ae 1259 assert(m->phys_page == vm_page_fictitious_addr);
1c79356b
A
1260
1261 c_vm_page_release_fictitious++;
91447636 1262#if DEBUG
1c79356b
A
1263 if (m->free)
1264 panic("vm_page_release_fictitious");
91447636 1265#endif
1c79356b 1266 m->free = TRUE;
91447636 1267 zfree(vm_page_zone, m);
1c79356b
A
1268}
1269
1270/*
1271 * vm_page_more_fictitious:
1272 *
1273 * Add more fictitious pages to the free list.
1274 * Allowed to block. This routine is way intimate
1275 * with the zones code, for several reasons:
1276 * 1. we need to carve some page structures out of physical
1277 * memory before zones work, so they _cannot_ come from
1278 * the zone_map.
1279 * 2. the zone needs to be collectable in order to prevent
1280 * growth without bound. These structures are used by
1281 * the device pager (by the hundreds and thousands), as
1282 * private pages for pageout, and as blocking pages for
1283 * pagein. Temporary bursts in demand should not result in
1284 * permanent allocation of a resource.
1285 * 3. To smooth allocation humps, we allocate single pages
1286 * with kernel_memory_allocate(), and cram them into the
1287 * zone. This also allows us to initialize the vm_page_t's
1288 * on the way into the zone, so that zget() always returns
1289 * an initialized structure. The zone free element pointer
1290 * and the free page pointer are both the first item in the
1291 * vm_page_t.
1292 * 4. By having the pages in the zone pre-initialized, we need
1293 * not keep 2 levels of lists. The garbage collector simply
1294 * scans our list, and reduces physical memory usage as it
1295 * sees fit.
1296 */
1297
1298void vm_page_more_fictitious(void)
1299{
1c79356b
A
1300 register vm_page_t m;
1301 vm_offset_t addr;
1302 kern_return_t retval;
1303 int i;
1304
1305 c_vm_page_more_fictitious++;
1306
1c79356b
A
1307 /*
1308 * Allocate a single page from the zone_map. Do not wait if no physical
1309 * pages are immediately available, and do not zero the space. We need
1310 * our own blocking lock here to prevent having multiple,
1311 * simultaneous requests from piling up on the zone_map lock. Exactly
1312 * one (of our) threads should be potentially waiting on the map lock.
1313 * If winner is not vm-privileged, then the page allocation will fail,
1314 * and it will temporarily block here in the vm_page_wait().
1315 */
1316 mutex_lock(&vm_page_alloc_lock);
1317 /*
1318 * If another thread allocated space, just bail out now.
1319 */
1320 if (zone_free_count(vm_page_zone) > 5) {
1321 /*
1322 * The number "5" is a small number that is larger than the
1323 * number of fictitious pages that any single caller will
1324 * attempt to allocate. Otherwise, a thread will attempt to
1325 * acquire a fictitious page (vm_page_grab_fictitious), fail,
1326 * release all of the resources and locks already acquired,
1327 * and then call this routine. This routine finds the pages
1328 * that the caller released, so fails to allocate new space.
1329 * The process repeats infinitely. The largest known number
1330 * of fictitious pages required in this manner is 2. 5 is
1331 * simply a somewhat larger number.
1332 */
1333 mutex_unlock(&vm_page_alloc_lock);
1334 return;
1335 }
1336
91447636
A
1337 retval = kernel_memory_allocate(zone_map,
1338 &addr, PAGE_SIZE, VM_PROT_ALL,
1339 KMA_KOBJECT|KMA_NOPAGEWAIT);
1340 if (retval != KERN_SUCCESS) {
1c79356b
A
1341 /*
1342 * No page was available. Tell the pageout daemon, drop the
1343 * lock to give another thread a chance at it, and
1344 * wait for the pageout daemon to make progress.
1345 */
1346 mutex_unlock(&vm_page_alloc_lock);
1347 vm_page_wait(THREAD_UNINT);
1348 return;
1349 }
1350 /*
1351 * Initialize as many vm_page_t's as will fit on this page. This
1352 * depends on the zone code disturbing ONLY the first item of
1353 * each zone element.
1354 */
1355 m = (vm_page_t)addr;
1356 for (i = PAGE_SIZE/sizeof(struct vm_page); i > 0; i--) {
1357 vm_page_init(m, vm_page_fictitious_addr);
1358 m->fictitious = TRUE;
1359 m++;
1360 }
91447636 1361 zcram(vm_page_zone, (void *) addr, PAGE_SIZE);
1c79356b
A
1362 mutex_unlock(&vm_page_alloc_lock);
1363}
1364
1365/*
1366 * vm_page_convert:
1367 *
1368 * Attempt to convert a fictitious page into a real page.
1369 */
1370
1371boolean_t
1372vm_page_convert(
1373 register vm_page_t m)
1374{
1375 register vm_page_t real_m;
1376
1377 assert(m->busy);
1378 assert(m->fictitious);
1379 assert(!m->dirty);
1380
1381 real_m = vm_page_grab();
1382 if (real_m == VM_PAGE_NULL)
1383 return FALSE;
1384
55e303ae 1385 m->phys_page = real_m->phys_page;
1c79356b 1386 m->fictitious = FALSE;
765c9de3 1387 m->no_isync = TRUE;
1c79356b
A
1388
1389 vm_page_lock_queues();
1390 if (m->active)
1391 vm_page_active_count++;
1392 else if (m->inactive)
1393 vm_page_inactive_count++;
1394 vm_page_unlock_queues();
1395
55e303ae 1396 real_m->phys_page = vm_page_fictitious_addr;
1c79356b
A
1397 real_m->fictitious = TRUE;
1398
1399 vm_page_release_fictitious(real_m);
1400 return TRUE;
1401}
1402
1403/*
1404 * vm_pool_low():
1405 *
1406 * Return true if it is not likely that a non-vm_privileged thread
1407 * can get memory without blocking. Advisory only, since the
1408 * situation may change under us.
1409 */
1410int
1411vm_pool_low(void)
1412{
1413 /* No locking, at worst we will fib. */
1414 return( vm_page_free_count < vm_page_free_reserved );
1415}
1416
c0fea474
A
1417
1418
1419/*
1420 * this is an interface to support bring-up of drivers
1421 * on platforms with physical memory > 4G...
1422 */
1423int vm_himemory_mode = 0;
1424
1425
1426/*
1427 * this interface exists to support hardware controllers
1428 * incapable of generating DMAs with more than 32 bits
1429 * of address on platforms with physical memory > 4G...
1430 */
1431unsigned int vm_lopage_free_count = 0;
1432unsigned int vm_lopage_max_count = 0;
1433vm_page_t vm_lopage_queue_free = VM_PAGE_NULL;
1434
1435vm_page_t
1436vm_page_grablo(void)
1437{
1438 register vm_page_t mem;
1439 unsigned int vm_lopage_alloc_count;
1440
1441 if (vm_lopage_poolsize == 0)
1442 return (vm_page_grab());
1443
1444 mutex_lock(&vm_page_queue_free_lock);
1445
1446 if ((mem = vm_lopage_queue_free) != VM_PAGE_NULL) {
1447
1448 vm_lopage_queue_free = (vm_page_t) mem->pageq.next;
1449 mem->pageq.next = NULL;
1450 mem->pageq.prev = NULL;
1451 mem->free = FALSE;
1452 mem->no_isync = TRUE;
1453
1454 vm_lopage_free_count--;
1455 vm_lopage_alloc_count = (vm_lopage_poolend - vm_lopage_poolstart) - vm_lopage_free_count;
1456 if (vm_lopage_alloc_count > vm_lopage_max_count)
1457 vm_lopage_max_count = vm_lopage_alloc_count;
1458 }
1459 mutex_unlock(&vm_page_queue_free_lock);
1460
1461 return (mem);
1462}
1463
1464
1465
1c79356b
A
1466/*
1467 * vm_page_grab:
1468 *
1469 * Remove a page from the free list.
1470 * Returns VM_PAGE_NULL if the free list is too small.
1471 */
1472
1473unsigned long vm_page_grab_count = 0; /* measure demand */
1474
1475vm_page_t
1476vm_page_grab(void)
1477{
1478 register vm_page_t mem;
1479
1480 mutex_lock(&vm_page_queue_free_lock);
1481 vm_page_grab_count++;
1482
1483 /*
1484 * Optionally produce warnings if the wire or gobble
1485 * counts exceed some threshold.
1486 */
1487 if (vm_page_wire_count_warning > 0
1488 && vm_page_wire_count >= vm_page_wire_count_warning) {
1489 printf("mk: vm_page_grab(): high wired page count of %d\n",
1490 vm_page_wire_count);
1491 assert(vm_page_wire_count < vm_page_wire_count_warning);
1492 }
1493 if (vm_page_gobble_count_warning > 0
1494 && vm_page_gobble_count >= vm_page_gobble_count_warning) {
1495 printf("mk: vm_page_grab(): high gobbled page count of %d\n",
1496 vm_page_gobble_count);
1497 assert(vm_page_gobble_count < vm_page_gobble_count_warning);
1498 }
1499
1500 /*
1501 * Only let privileged threads (involved in pageout)
1502 * dip into the reserved pool.
1503 */
1504
1505 if ((vm_page_free_count < vm_page_free_reserved) &&
91447636 1506 !(current_thread()->options & TH_OPT_VMPRIV)) {
1c79356b
A
1507 mutex_unlock(&vm_page_queue_free_lock);
1508 mem = VM_PAGE_NULL;
1509 goto wakeup_pageout;
1510 }
1511
1512 while (vm_page_queue_free == VM_PAGE_NULL) {
1c79356b
A
1513 mutex_unlock(&vm_page_queue_free_lock);
1514 VM_PAGE_WAIT();
1515 mutex_lock(&vm_page_queue_free_lock);
1516 }
1517
1518 if (--vm_page_free_count < vm_page_free_count_minimum)
1519 vm_page_free_count_minimum = vm_page_free_count;
1520 mem = vm_page_queue_free;
1521 vm_page_queue_free = (vm_page_t) mem->pageq.next;
91447636
A
1522 mem->pageq.next = NULL;
1523 mem->pageq.prev = NULL;
1524 assert(mem->listq.next == NULL && mem->listq.prev == NULL);
1525 assert(mem->tabled == FALSE);
1526 assert(mem->object == VM_OBJECT_NULL);
1527 assert(!mem->laundry);
1c79356b 1528 mem->free = FALSE;
0b4e3aa0 1529 mem->no_isync = TRUE;
1c79356b
A
1530 mutex_unlock(&vm_page_queue_free_lock);
1531
91447636
A
1532 assert(pmap_verify_free(mem->phys_page));
1533
1c79356b
A
1534 /*
1535 * Decide if we should poke the pageout daemon.
1536 * We do this if the free count is less than the low
1537 * water mark, or if the free count is less than the high
1538 * water mark (but above the low water mark) and the inactive
1539 * count is less than its target.
1540 *
1541 * We don't have the counts locked ... if they change a little,
1542 * it doesn't really matter.
1543 */
1544
1545wakeup_pageout:
1546 if ((vm_page_free_count < vm_page_free_min) ||
1547 ((vm_page_free_count < vm_page_free_target) &&
1548 (vm_page_inactive_count < vm_page_inactive_target)))
1549 thread_wakeup((event_t) &vm_page_free_wanted);
1550
55e303ae 1551// dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */
1c79356b
A
1552
1553 return mem;
1554}
1555
1556/*
1557 * vm_page_release:
1558 *
1559 * Return a page to the free list.
1560 */
1561
1562void
1563vm_page_release(
1564 register vm_page_t mem)
1565{
55e303ae
A
1566
1567#if 0
1568 unsigned int pindex;
1569 phys_entry *physent;
1570
1571 physent = mapping_phys_lookup(mem->phys_page, &pindex); /* (BRINGUP) */
1572 if(physent->ppLink & ppN) { /* (BRINGUP) */
1573 panic("vm_page_release: already released - %08X %08X\n", mem, mem->phys_page);
1574 }
1575 physent->ppLink = physent->ppLink | ppN; /* (BRINGUP) */
1576#endif
1c79356b
A
1577 assert(!mem->private && !mem->fictitious);
1578
55e303ae 1579// dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */
1c79356b
A
1580
1581 mutex_lock(&vm_page_queue_free_lock);
91447636 1582#if DEBUG
1c79356b
A
1583 if (mem->free)
1584 panic("vm_page_release");
91447636 1585#endif
1c79356b 1586 mem->free = TRUE;
91447636
A
1587 assert(!mem->laundry);
1588 assert(mem->object == VM_OBJECT_NULL);
1589 assert(mem->pageq.next == NULL &&
1590 mem->pageq.prev == NULL);
1c79356b 1591
c0fea474
A
1592 if (mem->phys_page <= vm_lopage_poolend && mem->phys_page >= vm_lopage_poolstart) {
1593 /*
1594 * this exists to support hardware controllers
1595 * incapable of generating DMAs with more than 32 bits
1596 * of address on platforms with physical memory > 4G...
1597 */
1598 mem->pageq.next = (queue_entry_t) vm_lopage_queue_free;
1599 vm_lopage_queue_free = mem;
1600 vm_lopage_free_count++;
1601 } else {
1602 mem->pageq.next = (queue_entry_t) vm_page_queue_free;
1603 vm_page_queue_free = mem;
1604 vm_page_free_count++;
1605 /*
1606 * Check if we should wake up someone waiting for page.
1607 * But don't bother waking them unless they can allocate.
1608 *
1609 * We wakeup only one thread, to prevent starvation.
1610 * Because the scheduling system handles wait queues FIFO,
1611 * if we wakeup all waiting threads, one greedy thread
1612 * can starve multiple niceguy threads. When the threads
1613 * all wakeup, the greedy threads runs first, grabs the page,
1614 * and waits for another page. It will be the first to run
1615 * when the next page is freed.
1616 *
1617 * However, there is a slight danger here.
1618 * The thread we wake might not use the free page.
1619 * Then the other threads could wait indefinitely
1620 * while the page goes unused. To forestall this,
1621 * the pageout daemon will keep making free pages
1622 * as long as vm_page_free_wanted is non-zero.
1623 */
1c79356b 1624
c0fea474
A
1625 if ((vm_page_free_wanted > 0) &&
1626 (vm_page_free_count >= vm_page_free_reserved)) {
1627 vm_page_free_wanted--;
1628 thread_wakeup_one((event_t) &vm_page_free_count);
1629 }
1c79356b 1630 }
1c79356b
A
1631 mutex_unlock(&vm_page_queue_free_lock);
1632}
1633
1c79356b
A
1634/*
1635 * vm_page_wait:
1636 *
1637 * Wait for a page to become available.
1638 * If there are plenty of free pages, then we don't sleep.
1639 *
1640 * Returns:
1641 * TRUE: There may be another page, try again
1642 * FALSE: We were interrupted out of our wait, don't try again
1643 */
1644
1645boolean_t
1646vm_page_wait(
1647 int interruptible )
1648{
1649 /*
1650 * We can't use vm_page_free_reserved to make this
1651 * determination. Consider: some thread might
1652 * need to allocate two pages. The first allocation
1653 * succeeds, the second fails. After the first page is freed,
1654 * a call to vm_page_wait must really block.
1655 */
9bccf70c 1656 kern_return_t wait_result;
9bccf70c 1657 int need_wakeup = 0;
1c79356b
A
1658
1659 mutex_lock(&vm_page_queue_free_lock);
1660 if (vm_page_free_count < vm_page_free_target) {
1661 if (vm_page_free_wanted++ == 0)
0b4e3aa0 1662 need_wakeup = 1;
91447636 1663 wait_result = assert_wait((event_t)&vm_page_free_count, interruptible);
1c79356b
A
1664 mutex_unlock(&vm_page_queue_free_lock);
1665 counter(c_vm_page_wait_block++);
0b4e3aa0
A
1666
1667 if (need_wakeup)
1668 thread_wakeup((event_t)&vm_page_free_wanted);
9bccf70c 1669
91447636 1670 if (wait_result == THREAD_WAITING)
9bccf70c
A
1671 wait_result = thread_block(THREAD_CONTINUE_NULL);
1672
1c79356b
A
1673 return(wait_result == THREAD_AWAKENED);
1674 } else {
1675 mutex_unlock(&vm_page_queue_free_lock);
1676 return TRUE;
1677 }
1678}
1679
1680/*
1681 * vm_page_alloc:
1682 *
1683 * Allocate and return a memory cell associated
1684 * with this VM object/offset pair.
1685 *
1686 * Object must be locked.
1687 */
1688
1689vm_page_t
1690vm_page_alloc(
1691 vm_object_t object,
1692 vm_object_offset_t offset)
1693{
1694 register vm_page_t mem;
1695
91447636
A
1696#if DEBUG
1697 _mutex_assert(&object->Lock, MA_OWNED);
1698#endif
1c79356b
A
1699 mem = vm_page_grab();
1700 if (mem == VM_PAGE_NULL)
1701 return VM_PAGE_NULL;
1702
1703 vm_page_insert(mem, object, offset);
1704
1705 return(mem);
1706}
1707
c0fea474
A
1708
1709vm_page_t
1710vm_page_alloclo(
1711 vm_object_t object,
1712 vm_object_offset_t offset)
1713{
1714 register vm_page_t mem;
1715
1716#if DEBUG
1717 _mutex_assert(&object->Lock, MA_OWNED);
1718#endif
1719 mem = vm_page_grablo();
1720 if (mem == VM_PAGE_NULL)
1721 return VM_PAGE_NULL;
1722
1723 vm_page_insert(mem, object, offset);
1724
1725 return(mem);
1726}
1727
1728
1c79356b
A
1729counter(unsigned int c_laundry_pages_freed = 0;)
1730
1731int vm_pagein_cluster_unused = 0;
91447636 1732boolean_t vm_page_free_verify = TRUE;
1c79356b
A
1733/*
1734 * vm_page_free:
1735 *
1736 * Returns the given page to the free list,
1737 * disassociating it with any VM object.
1738 *
1739 * Object and page queues must be locked prior to entry.
1740 */
1741void
1742vm_page_free(
1743 register vm_page_t mem)
1744{
1745 vm_object_t object = mem->object;
1746
1747 assert(!mem->free);
1748 assert(!mem->cleaning);
1749 assert(!mem->pageout);
91447636
A
1750 if (vm_page_free_verify && !mem->fictitious && !mem->private) {
1751 assert(pmap_verify_free(mem->phys_page));
1752 }
1753
1754#if DEBUG
1755 if (mem->object)
1756 _mutex_assert(&mem->object->Lock, MA_OWNED);
1757 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
1c79356b 1758
91447636
A
1759 if (mem->free)
1760 panic("vm_page_free: freeing page on free list\n");
1761#endif
1c79356b
A
1762 if (mem->tabled)
1763 vm_page_remove(mem); /* clears tabled, object, offset */
1764 VM_PAGE_QUEUES_REMOVE(mem); /* clears active or inactive */
1765
1766 if (mem->clustered) {
1767 mem->clustered = FALSE;
1768 vm_pagein_cluster_unused++;
1769 }
1770
1771 if (mem->wire_count) {
1772 if (!mem->private && !mem->fictitious)
1773 vm_page_wire_count--;
1774 mem->wire_count = 0;
1775 assert(!mem->gobbled);
1776 } else if (mem->gobbled) {
1777 if (!mem->private && !mem->fictitious)
1778 vm_page_wire_count--;
1779 vm_page_gobble_count--;
1780 }
1781 mem->gobbled = FALSE;
1782
1783 if (mem->laundry) {
91447636 1784 vm_pageout_throttle_up(mem);
1c79356b 1785 counter(++c_laundry_pages_freed);
1c79356b
A
1786 }
1787
1c79356b
A
1788 PAGE_WAKEUP(mem); /* clears wanted */
1789
1790 if (mem->absent)
1791 vm_object_absent_release(object);
1792
0b4e3aa0 1793 /* Some of these may be unnecessary */
1c79356b
A
1794 mem->page_lock = 0;
1795 mem->unlock_request = 0;
1796 mem->busy = TRUE;
1797 mem->absent = FALSE;
1798 mem->error = FALSE;
1799 mem->dirty = FALSE;
1800 mem->precious = FALSE;
1801 mem->reference = FALSE;
91447636 1802 mem->encrypted = FALSE;
1c79356b
A
1803
1804 mem->page_error = KERN_SUCCESS;
1805
1806 if (mem->private) {
1807 mem->private = FALSE;
1808 mem->fictitious = TRUE;
55e303ae 1809 mem->phys_page = vm_page_fictitious_addr;
1c79356b
A
1810 }
1811 if (mem->fictitious) {
1812 vm_page_release_fictitious(mem);
1813 } else {
9bccf70c
A
1814 /* depends on the queues lock */
1815 if(mem->zero_fill) {
1816 vm_zf_count-=1;
1817 mem->zero_fill = FALSE;
1818 }
55e303ae 1819 vm_page_init(mem, mem->phys_page);
1c79356b
A
1820 vm_page_release(mem);
1821 }
1822}
1823
55e303ae
A
1824
1825void
1826vm_page_free_list(
1827 register vm_page_t mem)
1828{
91447636 1829 register vm_page_t nxt;
55e303ae 1830 register vm_page_t first = NULL;
91447636 1831 register vm_page_t last = VM_PAGE_NULL;
55e303ae
A
1832 register int pg_count = 0;
1833
91447636
A
1834#if DEBUG
1835 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
1836#endif
55e303ae 1837 while (mem) {
91447636
A
1838#if DEBUG
1839 if (mem->tabled || mem->object)
1840 panic("vm_page_free_list: freeing tabled page\n");
1841 if (mem->inactive || mem->active || mem->free)
1842 panic("vm_page_free_list: freeing page on list\n");
1843#endif
1844 assert(mem->pageq.prev == NULL);
55e303ae
A
1845 nxt = (vm_page_t)(mem->pageq.next);
1846
1847 if (mem->clustered)
1848 vm_pagein_cluster_unused++;
1849
1850 if (mem->laundry) {
91447636 1851 vm_pageout_throttle_up(mem);
55e303ae 1852 counter(++c_laundry_pages_freed);
55e303ae
A
1853 }
1854 mem->busy = TRUE;
1855
1856 PAGE_WAKEUP(mem); /* clears wanted */
1857
1858 if (mem->private)
1859 mem->fictitious = TRUE;
1860
1861 if (!mem->fictitious) {
1862 /* depends on the queues lock */
1863 if (mem->zero_fill)
1864 vm_zf_count -= 1;
91447636 1865 assert(!mem->laundry);
55e303ae
A
1866 vm_page_init(mem, mem->phys_page);
1867
1868 mem->free = TRUE;
1869
1870 if (first == NULL)
1871 last = mem;
1872 mem->pageq.next = (queue_t) first;
1873 first = mem;
1874
1875 pg_count++;
1876 } else {
1877 mem->phys_page = vm_page_fictitious_addr;
1878 vm_page_release_fictitious(mem);
1879 }
1880 mem = nxt;
1881 }
1882 if (first) {
1883
1884 mutex_lock(&vm_page_queue_free_lock);
1885
1886 last->pageq.next = (queue_entry_t) vm_page_queue_free;
1887 vm_page_queue_free = first;
1888
1889 vm_page_free_count += pg_count;
1890
1891 if ((vm_page_free_wanted > 0) &&
1892 (vm_page_free_count >= vm_page_free_reserved)) {
91447636 1893 unsigned int available_pages;
55e303ae 1894
91447636
A
1895 if (vm_page_free_count >= vm_page_free_reserved) {
1896 available_pages = (vm_page_free_count
1897 - vm_page_free_reserved);
1898 } else {
1899 available_pages = 0;
1900 }
55e303ae
A
1901
1902 if (available_pages >= vm_page_free_wanted) {
1903 vm_page_free_wanted = 0;
1904 thread_wakeup((event_t) &vm_page_free_count);
1905 } else {
1906 while (available_pages--) {
1907 vm_page_free_wanted--;
1908 thread_wakeup_one((event_t) &vm_page_free_count);
1909 }
1910 }
1911 }
1912 mutex_unlock(&vm_page_queue_free_lock);
1913 }
1914}
1915
1916
1c79356b
A
1917/*
1918 * vm_page_wire:
1919 *
1920 * Mark this page as wired down by yet
1921 * another map, removing it from paging queues
1922 * as necessary.
1923 *
1924 * The page's object and the page queues must be locked.
1925 */
1926void
1927vm_page_wire(
1928 register vm_page_t mem)
1929{
1930
91447636 1931// dbgLog(current_thread(), mem->offset, mem->object, 1); /* (TEST/DEBUG) */
1c79356b
A
1932
1933 VM_PAGE_CHECK(mem);
91447636
A
1934#if DEBUG
1935 if (mem->object)
1936 _mutex_assert(&mem->object->Lock, MA_OWNED);
1937 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
1938#endif
1c79356b
A
1939 if (mem->wire_count == 0) {
1940 VM_PAGE_QUEUES_REMOVE(mem);
1941 if (!mem->private && !mem->fictitious && !mem->gobbled)
1942 vm_page_wire_count++;
1943 if (mem->gobbled)
1944 vm_page_gobble_count--;
1945 mem->gobbled = FALSE;
9bccf70c
A
1946 if(mem->zero_fill) {
1947 /* depends on the queues lock */
1948 vm_zf_count-=1;
1949 mem->zero_fill = FALSE;
1950 }
91447636
A
1951 /*
1952 * ENCRYPTED SWAP:
1953 * The page could be encrypted, but
1954 * We don't have to decrypt it here
1955 * because we don't guarantee that the
1956 * data is actually valid at this point.
1957 * The page will get decrypted in
1958 * vm_fault_wire() if needed.
1959 */
1c79356b
A
1960 }
1961 assert(!mem->gobbled);
1962 mem->wire_count++;
1963}
1964
1965/*
1966 * vm_page_gobble:
1967 *
1968 * Mark this page as consumed by the vm/ipc/xmm subsystems.
1969 *
1970 * Called only for freshly vm_page_grab()ed pages - w/ nothing locked.
1971 */
1972void
1973vm_page_gobble(
1974 register vm_page_t mem)
1975{
1976 vm_page_lock_queues();
1977 VM_PAGE_CHECK(mem);
1978
1979 assert(!mem->gobbled);
1980 assert(mem->wire_count == 0);
1981
1982 if (!mem->gobbled && mem->wire_count == 0) {
1983 if (!mem->private && !mem->fictitious)
1984 vm_page_wire_count++;
1985 }
1986 vm_page_gobble_count++;
1987 mem->gobbled = TRUE;
1988 vm_page_unlock_queues();
1989}
1990
1991/*
1992 * vm_page_unwire:
1993 *
1994 * Release one wiring of this page, potentially
1995 * enabling it to be paged again.
1996 *
1997 * The page's object and the page queues must be locked.
1998 */
1999void
2000vm_page_unwire(
2001 register vm_page_t mem)
2002{
2003
91447636 2004// dbgLog(current_thread(), mem->offset, mem->object, 0); /* (TEST/DEBUG) */
1c79356b
A
2005
2006 VM_PAGE_CHECK(mem);
2007 assert(mem->wire_count > 0);
91447636
A
2008#if DEBUG
2009 if (mem->object)
2010 _mutex_assert(&mem->object->Lock, MA_OWNED);
2011 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
2012#endif
1c79356b
A
2013 if (--mem->wire_count == 0) {
2014 assert(!mem->private && !mem->fictitious);
2015 vm_page_wire_count--;
91447636
A
2016 assert(!mem->laundry);
2017 assert(mem->object != kernel_object);
2018 assert(mem->pageq.next == NULL && mem->pageq.prev == NULL);
1c79356b
A
2019 queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
2020 vm_page_active_count++;
2021 mem->active = TRUE;
2022 mem->reference = TRUE;
2023 }
2024}
2025
2026/*
2027 * vm_page_deactivate:
2028 *
2029 * Returns the given page to the inactive list,
2030 * indicating that no physical maps have access
2031 * to this page. [Used by the physical mapping system.]
2032 *
2033 * The page queues must be locked.
2034 */
2035void
2036vm_page_deactivate(
2037 register vm_page_t m)
2038{
2039 VM_PAGE_CHECK(m);
91447636 2040 assert(m->object != kernel_object);
1c79356b 2041
55e303ae 2042// dbgLog(m->phys_page, vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */
91447636
A
2043#if DEBUG
2044 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
2045#endif
1c79356b
A
2046 /*
2047 * This page is no longer very interesting. If it was
2048 * interesting (active or inactive/referenced), then we
2049 * clear the reference bit and (re)enter it in the
2050 * inactive queue. Note wired pages should not have
2051 * their reference bit cleared.
2052 */
2053 if (m->gobbled) { /* can this happen? */
2054 assert(m->wire_count == 0);
2055 if (!m->private && !m->fictitious)
2056 vm_page_wire_count--;
2057 vm_page_gobble_count--;
2058 m->gobbled = FALSE;
2059 }
2060 if (m->private || (m->wire_count != 0))
2061 return;
2062 if (m->active || (m->inactive && m->reference)) {
2063 if (!m->fictitious && !m->absent)
55e303ae 2064 pmap_clear_reference(m->phys_page);
1c79356b
A
2065 m->reference = FALSE;
2066 VM_PAGE_QUEUES_REMOVE(m);
2067 }
2068 if (m->wire_count == 0 && !m->inactive) {
0b4e3aa0
A
2069 m->page_ticket = vm_page_ticket;
2070 vm_page_ticket_roll++;
2071
2072 if(vm_page_ticket_roll == VM_PAGE_TICKETS_IN_ROLL) {
2073 vm_page_ticket_roll = 0;
2074 if(vm_page_ticket == VM_PAGE_TICKET_ROLL_IDS)
2075 vm_page_ticket= 0;
2076 else
2077 vm_page_ticket++;
2078 }
2079
91447636
A
2080 assert(!m->laundry);
2081 assert(m->pageq.next == NULL && m->pageq.prev == NULL);
9bccf70c
A
2082 if(m->zero_fill) {
2083 queue_enter(&vm_page_queue_zf, m, vm_page_t, pageq);
2084 } else {
2085 queue_enter(&vm_page_queue_inactive,
2086 m, vm_page_t, pageq);
2087 }
2088
1c79356b
A
2089 m->inactive = TRUE;
2090 if (!m->fictitious)
2091 vm_page_inactive_count++;
2092 }
2093}
2094
2095/*
2096 * vm_page_activate:
2097 *
2098 * Put the specified page on the active list (if appropriate).
2099 *
2100 * The page queues must be locked.
2101 */
2102
2103void
2104vm_page_activate(
2105 register vm_page_t m)
2106{
2107 VM_PAGE_CHECK(m);
91447636
A
2108 assert(m->object != kernel_object);
2109#if DEBUG
2110 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
2111#endif
1c79356b
A
2112 if (m->gobbled) {
2113 assert(m->wire_count == 0);
2114 if (!m->private && !m->fictitious)
2115 vm_page_wire_count--;
2116 vm_page_gobble_count--;
2117 m->gobbled = FALSE;
2118 }
2119 if (m->private)
2120 return;
2121
2122 if (m->inactive) {
91447636 2123 assert(!m->laundry);
9bccf70c
A
2124 if (m->zero_fill) {
2125 queue_remove(&vm_page_queue_zf, m, vm_page_t, pageq);
2126 } else {
2127 queue_remove(&vm_page_queue_inactive,
2128 m, vm_page_t, pageq);
2129 }
91447636
A
2130 m->pageq.next = NULL;
2131 m->pageq.prev = NULL;
1c79356b
A
2132 if (!m->fictitious)
2133 vm_page_inactive_count--;
2134 m->inactive = FALSE;
2135 }
2136 if (m->wire_count == 0) {
91447636 2137#if DEBUG
1c79356b
A
2138 if (m->active)
2139 panic("vm_page_activate: already active");
91447636
A
2140#endif
2141 assert(!m->laundry);
2142 assert(m->pageq.next == NULL && m->pageq.prev == NULL);
1c79356b
A
2143 queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
2144 m->active = TRUE;
2145 m->reference = TRUE;
2146 if (!m->fictitious)
2147 vm_page_active_count++;
2148 }
2149}
2150
2151/*
2152 * vm_page_part_zero_fill:
2153 *
2154 * Zero-fill a part of the page.
2155 */
2156void
2157vm_page_part_zero_fill(
2158 vm_page_t m,
2159 vm_offset_t m_pa,
2160 vm_size_t len)
2161{
2162 vm_page_t tmp;
2163
2164 VM_PAGE_CHECK(m);
2165#ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
55e303ae 2166 pmap_zero_part_page(m->phys_page, m_pa, len);
1c79356b
A
2167#else
2168 while (1) {
2169 tmp = vm_page_grab();
2170 if (tmp == VM_PAGE_NULL) {
2171 vm_page_wait(THREAD_UNINT);
2172 continue;
2173 }
2174 break;
2175 }
2176 vm_page_zero_fill(tmp);
2177 if(m_pa != 0) {
2178 vm_page_part_copy(m, 0, tmp, 0, m_pa);
2179 }
2180 if((m_pa + len) < PAGE_SIZE) {
2181 vm_page_part_copy(m, m_pa + len, tmp,
2182 m_pa + len, PAGE_SIZE - (m_pa + len));
2183 }
2184 vm_page_copy(tmp,m);
2185 vm_page_lock_queues();
2186 vm_page_free(tmp);
2187 vm_page_unlock_queues();
2188#endif
2189
2190}
2191
2192/*
2193 * vm_page_zero_fill:
2194 *
2195 * Zero-fill the specified page.
2196 */
2197void
2198vm_page_zero_fill(
2199 vm_page_t m)
2200{
2201 XPR(XPR_VM_PAGE,
2202 "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n",
2203 (integer_t)m->object, (integer_t)m->offset, (integer_t)m, 0,0);
2204
2205 VM_PAGE_CHECK(m);
2206
55e303ae
A
2207// dbgTrace(0xAEAEAEAE, m->phys_page, 0); /* (BRINGUP) */
2208 pmap_zero_page(m->phys_page);
1c79356b
A
2209}
2210
2211/*
2212 * vm_page_part_copy:
2213 *
2214 * copy part of one page to another
2215 */
2216
2217void
2218vm_page_part_copy(
2219 vm_page_t src_m,
2220 vm_offset_t src_pa,
2221 vm_page_t dst_m,
2222 vm_offset_t dst_pa,
2223 vm_size_t len)
2224{
2225 VM_PAGE_CHECK(src_m);
2226 VM_PAGE_CHECK(dst_m);
2227
55e303ae
A
2228 pmap_copy_part_page(src_m->phys_page, src_pa,
2229 dst_m->phys_page, dst_pa, len);
1c79356b
A
2230}
2231
2232/*
2233 * vm_page_copy:
2234 *
2235 * Copy one page to another
91447636
A
2236 *
2237 * ENCRYPTED SWAP:
2238 * The source page should not be encrypted. The caller should
2239 * make sure the page is decrypted first, if necessary.
1c79356b
A
2240 */
2241
2242void
2243vm_page_copy(
2244 vm_page_t src_m,
2245 vm_page_t dest_m)
2246{
2247 XPR(XPR_VM_PAGE,
2248 "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n",
2249 (integer_t)src_m->object, src_m->offset,
2250 (integer_t)dest_m->object, dest_m->offset,
2251 0);
2252
2253 VM_PAGE_CHECK(src_m);
2254 VM_PAGE_CHECK(dest_m);
2255
91447636
A
2256 /*
2257 * ENCRYPTED SWAP:
2258 * The source page should not be encrypted at this point.
2259 * The destination page will therefore not contain encrypted
2260 * data after the copy.
2261 */
2262 if (src_m->encrypted) {
2263 panic("vm_page_copy: source page %p is encrypted\n", src_m);
2264 }
2265 dest_m->encrypted = FALSE;
2266
55e303ae 2267 pmap_copy_page(src_m->phys_page, dest_m->phys_page);
1c79356b
A
2268}
2269
1c79356b
A
2270/*
2271 * Currently, this is a primitive allocator that grabs
2272 * free pages from the system, sorts them by physical
2273 * address, then searches for a region large enough to
2274 * satisfy the user's request.
2275 *
2276 * Additional levels of effort:
2277 * + steal clean active/inactive pages
2278 * + force pageouts of dirty pages
2279 * + maintain a map of available physical
2280 * memory
2281 */
2282
1c79356b
A
2283#if MACH_ASSERT
2284/*
2285 * Check that the list of pages is ordered by
2286 * ascending physical address and has no holes.
2287 */
91447636
A
2288int vm_page_verify_contiguous(
2289 vm_page_t pages,
2290 unsigned int npages);
2291
1c79356b
A
2292int
2293vm_page_verify_contiguous(
2294 vm_page_t pages,
2295 unsigned int npages)
2296{
2297 register vm_page_t m;
2298 unsigned int page_count;
91447636 2299 vm_offset_t prev_addr;
1c79356b 2300
55e303ae 2301 prev_addr = pages->phys_page;
1c79356b
A
2302 page_count = 1;
2303 for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
55e303ae 2304 if (m->phys_page != prev_addr + 1) {
1c79356b 2305 printf("m 0x%x prev_addr 0x%x, current addr 0x%x\n",
55e303ae 2306 m, prev_addr, m->phys_page);
91447636 2307 printf("pages 0x%x page_count %d\n", pages, page_count);
1c79356b
A
2308 panic("vm_page_verify_contiguous: not contiguous!");
2309 }
55e303ae 2310 prev_addr = m->phys_page;
1c79356b
A
2311 ++page_count;
2312 }
2313 if (page_count != npages) {
2314 printf("pages 0x%x actual count 0x%x but requested 0x%x\n",
2315 pages, page_count, npages);
2316 panic("vm_page_verify_contiguous: count error");
2317 }
2318 return 1;
2319}
2320#endif /* MACH_ASSERT */
2321
2322
91447636
A
2323cpm_counter(unsigned int vpfls_pages_handled = 0;)
2324cpm_counter(unsigned int vpfls_head_insertions = 0;)
2325cpm_counter(unsigned int vpfls_tail_insertions = 0;)
2326cpm_counter(unsigned int vpfls_general_insertions = 0;)
2327cpm_counter(unsigned int vpfc_failed = 0;)
2328cpm_counter(unsigned int vpfc_satisfied = 0;)
2329
1c79356b
A
2330/*
2331 * Find a region large enough to contain at least npages
2332 * of contiguous physical memory.
2333 *
2334 * Requirements:
2335 * - Called while holding vm_page_queue_free_lock.
2336 * - Doesn't respect vm_page_free_reserved; caller
2337 * must not ask for more pages than are legal to grab.
2338 *
2339 * Returns a pointer to a list of gobbled pages or VM_PAGE_NULL.
2340 *
e5568f75
A
2341 * Algorithm:
2342 * Loop over the free list, extracting one page at a time and
2343 * inserting those into a sorted sub-list. We stop as soon as
2344 * there's a contiguous range within the sorted list that can
2345 * satisfy the contiguous memory request. This contiguous sub-
2346 * list is chopped out of the sorted sub-list and the remainder
2347 * of the sorted sub-list is put back onto the beginning of the
2348 * free list.
1c79356b
A
2349 */
2350static vm_page_t
2351vm_page_find_contiguous(
e5568f75 2352 unsigned int contig_pages)
1c79356b 2353{
e5568f75
A
2354 vm_page_t sort_list;
2355 vm_page_t *contfirstprev, contlast;
2356 vm_page_t m, m1;
2357 ppnum_t prevcontaddr;
2358 ppnum_t nextcontaddr;
2359 unsigned int npages;
2360
91447636
A
2361 m = NULL;
2362#if DEBUG
2363 _mutex_assert(&vm_page_queue_free_lock, MA_OWNED);
2364#endif
e5568f75
A
2365#if MACH_ASSERT
2366 /*
2367 * Verify pages in the free list..
2368 */
2369 npages = 0;
2370 for (m = vm_page_queue_free; m != VM_PAGE_NULL; m = NEXT_PAGE(m))
2371 ++npages;
2372 if (npages != vm_page_free_count)
2373 panic("vm_sort_free_list: prelim: npages %u free_count %d",
2374 npages, vm_page_free_count);
2375#endif /* MACH_ASSERT */
1c79356b 2376
e5568f75 2377 if (contig_pages == 0 || vm_page_queue_free == VM_PAGE_NULL)
1c79356b
A
2378 return VM_PAGE_NULL;
2379
91447636
A
2380#define PPNUM_PREV(x) (((x) > 0) ? ((x) - 1) : 0)
2381#define PPNUM_NEXT(x) (((x) < PPNUM_MAX) ? ((x) + 1) : PPNUM_MAX)
2382#define SET_NEXT_PAGE(m,n) ((m)->pageq.next = (struct queue_entry *) (n))
1c79356b 2383
e5568f75
A
2384 npages = 1;
2385 contfirstprev = &sort_list;
2386 contlast = sort_list = vm_page_queue_free;
2387 vm_page_queue_free = NEXT_PAGE(sort_list);
2388 SET_NEXT_PAGE(sort_list, VM_PAGE_NULL);
2389 prevcontaddr = PPNUM_PREV(sort_list->phys_page);
2390 nextcontaddr = PPNUM_NEXT(sort_list->phys_page);
2391
2392 while (npages < contig_pages &&
2393 (m = vm_page_queue_free) != VM_PAGE_NULL)
2394 {
2395 cpm_counter(++vpfls_pages_handled);
2396
2397 /* prepend to existing run? */
2398 if (m->phys_page == prevcontaddr)
2399 {
2400 vm_page_queue_free = NEXT_PAGE(m);
2401 cpm_counter(++vpfls_head_insertions);
2402 prevcontaddr = PPNUM_PREV(prevcontaddr);
2403 SET_NEXT_PAGE(m, *contfirstprev);
2404 *contfirstprev = m;
2405 npages++;
2406 continue; /* no tail expansion check needed */
2407 }
2408
2409 /* append to tail of existing run? */
2410 else if (m->phys_page == nextcontaddr)
2411 {
2412 vm_page_queue_free = NEXT_PAGE(m);
2413 cpm_counter(++vpfls_tail_insertions);
2414 nextcontaddr = PPNUM_NEXT(nextcontaddr);
2415 SET_NEXT_PAGE(m, NEXT_PAGE(contlast));
2416 SET_NEXT_PAGE(contlast, m);
2417 contlast = m;
2418 npages++;
2419 }
2420
2421 /* prepend to the very front of sorted list? */
2422 else if (m->phys_page < sort_list->phys_page)
2423 {
2424 vm_page_queue_free = NEXT_PAGE(m);
2425 cpm_counter(++vpfls_general_insertions);
2426 prevcontaddr = PPNUM_PREV(m->phys_page);
2427 nextcontaddr = PPNUM_NEXT(m->phys_page);
2428 SET_NEXT_PAGE(m, sort_list);
2429 contfirstprev = &sort_list;
2430 contlast = sort_list = m;
2431 npages = 1;
1c79356b
A
2432 }
2433
e5568f75
A
2434 else /* get to proper place for insertion */
2435 {
2436 if (m->phys_page < nextcontaddr)
2437 {
2438 prevcontaddr = PPNUM_PREV(sort_list->phys_page);
2439 nextcontaddr = PPNUM_NEXT(sort_list->phys_page);
2440 contfirstprev = &sort_list;
2441 contlast = sort_list;
2442 npages = 1;
2443 }
2444 for (m1 = NEXT_PAGE(contlast);
2445 npages < contig_pages &&
2446 m1 != VM_PAGE_NULL && m1->phys_page < m->phys_page;
2447 m1 = NEXT_PAGE(m1))
2448 {
2449 if (m1->phys_page != nextcontaddr) {
2450 prevcontaddr = PPNUM_PREV(m1->phys_page);
2451 contfirstprev = NEXT_PAGE_PTR(contlast);
2452 npages = 1;
2453 } else {
2454 npages++;
2455 }
2456 nextcontaddr = PPNUM_NEXT(m1->phys_page);
2457 contlast = m1;
2458 }
2459
1c79356b 2460 /*
e5568f75
A
2461 * We may actually already have enough.
2462 * This could happen if a previous prepend
2463 * joined up two runs to meet our needs.
2464 * If so, bail before we take the current
2465 * page off the free queue.
1c79356b 2466 */
e5568f75
A
2467 if (npages == contig_pages)
2468 break;
2469
91447636
A
2470 if (m->phys_page != nextcontaddr)
2471 {
e5568f75
A
2472 contfirstprev = NEXT_PAGE_PTR(contlast);
2473 prevcontaddr = PPNUM_PREV(m->phys_page);
2474 nextcontaddr = PPNUM_NEXT(m->phys_page);
2475 npages = 1;
2476 } else {
2477 nextcontaddr = PPNUM_NEXT(nextcontaddr);
2478 npages++;
1c79356b 2479 }
e5568f75
A
2480 vm_page_queue_free = NEXT_PAGE(m);
2481 cpm_counter(++vpfls_general_insertions);
2482 SET_NEXT_PAGE(m, NEXT_PAGE(contlast));
2483 SET_NEXT_PAGE(contlast, m);
2484 contlast = m;
2485 }
2486
2487 /* See how many pages are now contiguous after the insertion */
2488 for (m1 = NEXT_PAGE(m);
2489 npages < contig_pages &&
2490 m1 != VM_PAGE_NULL && m1->phys_page == nextcontaddr;
2491 m1 = NEXT_PAGE(m1))
2492 {
2493 nextcontaddr = PPNUM_NEXT(nextcontaddr);
2494 contlast = m1;
2495 npages++;
1c79356b 2496 }
e5568f75 2497 }
1c79356b 2498
e5568f75
A
2499 /* how did we do? */
2500 if (npages == contig_pages)
2501 {
2502 cpm_counter(++vpfc_satisfied);
2503
2504 /* remove the contiguous range from the sorted list */
2505 m = *contfirstprev;
2506 *contfirstprev = NEXT_PAGE(contlast);
2507 SET_NEXT_PAGE(contlast, VM_PAGE_NULL);
2508 assert(vm_page_verify_contiguous(m, npages));
2509
2510 /* inline vm_page_gobble() for each returned page */
2511 for (m1 = m; m1 != VM_PAGE_NULL; m1 = NEXT_PAGE(m1)) {
2512 assert(m1->free);
2513 assert(!m1->wanted);
91447636 2514 assert(!m1->laundry);
e5568f75
A
2515 m1->free = FALSE;
2516 m1->no_isync = TRUE;
2517 m1->gobbled = TRUE;
2518 }
2519 vm_page_wire_count += npages;
2520 vm_page_gobble_count += npages;
2521 vm_page_free_count -= npages;
2522
2523 /* stick free list at the tail of the sorted list */
2524 while ((m1 = *contfirstprev) != VM_PAGE_NULL)
2525 contfirstprev = (vm_page_t *)&m1->pageq.next;
2526 *contfirstprev = vm_page_queue_free;
1c79356b 2527 }
e5568f75
A
2528
2529 vm_page_queue_free = sort_list;
2530 return m;
1c79356b
A
2531}
2532
2533/*
2534 * Allocate a list of contiguous, wired pages.
2535 */
2536kern_return_t
2537cpm_allocate(
2538 vm_size_t size,
2539 vm_page_t *list,
2540 boolean_t wire)
2541{
2542 register vm_page_t m;
91447636
A
2543 vm_page_t pages;
2544 unsigned int npages;
2545 unsigned int vm_pages_available;
e5568f75 2546 boolean_t wakeup;
1c79356b
A
2547
2548 if (size % page_size != 0)
2549 return KERN_INVALID_ARGUMENT;
2550
2551 vm_page_lock_queues();
2552 mutex_lock(&vm_page_queue_free_lock);
2553
2554 /*
2555 * Should also take active and inactive pages
2556 * into account... One day...
2557 */
e5568f75 2558 npages = size / page_size;
1c79356b
A
2559 vm_pages_available = vm_page_free_count - vm_page_free_reserved;
2560
e5568f75 2561 if (npages > vm_pages_available) {
1c79356b 2562 mutex_unlock(&vm_page_queue_free_lock);
e5568f75 2563 vm_page_unlock_queues();
1c79356b
A
2564 return KERN_RESOURCE_SHORTAGE;
2565 }
2566
1c79356b
A
2567 /*
2568 * Obtain a pointer to a subset of the free
2569 * list large enough to satisfy the request;
2570 * the region will be physically contiguous.
2571 */
2572 pages = vm_page_find_contiguous(npages);
e5568f75
A
2573
2574 /* adjust global freelist counts and determine need for wakeups */
2575 if (vm_page_free_count < vm_page_free_count_minimum)
2576 vm_page_free_count_minimum = vm_page_free_count;
2577
2578 wakeup = ((vm_page_free_count < vm_page_free_min) ||
2579 ((vm_page_free_count < vm_page_free_target) &&
2580 (vm_page_inactive_count < vm_page_inactive_target)));
2581
2582 mutex_unlock(&vm_page_queue_free_lock);
2583
1c79356b 2584 if (pages == VM_PAGE_NULL) {
1c79356b
A
2585 vm_page_unlock_queues();
2586 return KERN_NO_SPACE;
2587 }
2588
1c79356b
A
2589 /*
2590 * Walk the returned list, wiring the pages.
2591 */
2592 if (wire == TRUE)
2593 for (m = pages; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
2594 /*
2595 * Essentially inlined vm_page_wire.
2596 */
2597 assert(!m->active);
2598 assert(!m->inactive);
2599 assert(!m->private);
2600 assert(!m->fictitious);
2601 assert(m->wire_count == 0);
2602 assert(m->gobbled);
2603 m->gobbled = FALSE;
2604 m->wire_count++;
2605 --vm_page_gobble_count;
2606 }
2607 vm_page_unlock_queues();
2608
e5568f75
A
2609 if (wakeup)
2610 thread_wakeup((event_t) &vm_page_free_wanted);
2611
1c79356b
A
2612 /*
2613 * The CPM pages should now be available and
2614 * ordered by ascending physical address.
2615 */
2616 assert(vm_page_verify_contiguous(pages, npages));
2617
2618 *list = pages;
2619 return KERN_SUCCESS;
2620}
2621
2622
2623#include <mach_vm_debug.h>
2624#if MACH_VM_DEBUG
2625
2626#include <mach_debug/hash_info.h>
2627#include <vm/vm_debug.h>
2628
2629/*
2630 * Routine: vm_page_info
2631 * Purpose:
2632 * Return information about the global VP table.
2633 * Fills the buffer with as much information as possible
2634 * and returns the desired size of the buffer.
2635 * Conditions:
2636 * Nothing locked. The caller should provide
2637 * possibly-pageable memory.
2638 */
2639
2640unsigned int
2641vm_page_info(
2642 hash_info_bucket_t *info,
2643 unsigned int count)
2644{
91447636 2645 unsigned int i;
1c79356b
A
2646
2647 if (vm_page_bucket_count < count)
2648 count = vm_page_bucket_count;
2649
2650 for (i = 0; i < count; i++) {
2651 vm_page_bucket_t *bucket = &vm_page_buckets[i];
2652 unsigned int bucket_count = 0;
2653 vm_page_t m;
2654
2655 simple_lock(&vm_page_bucket_lock);
2656 for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next)
2657 bucket_count++;
2658 simple_unlock(&vm_page_bucket_lock);
2659
2660 /* don't touch pageable memory while holding locks */
2661 info[i].hib_count = bucket_count;
2662 }
2663
2664 return vm_page_bucket_count;
2665}
2666#endif /* MACH_VM_DEBUG */
2667
2668#include <mach_kdb.h>
2669#if MACH_KDB
2670
2671#include <ddb/db_output.h>
2672#include <vm/vm_print.h>
2673#define printf kdbprintf
2674
2675/*
2676 * Routine: vm_page_print [exported]
2677 */
2678void
2679vm_page_print(
91447636 2680 db_addr_t db_addr)
1c79356b 2681{
91447636
A
2682 vm_page_t p;
2683
2684 p = (vm_page_t) (long) db_addr;
1c79356b
A
2685
2686 iprintf("page 0x%x\n", p);
2687
2688 db_indent += 2;
2689
2690 iprintf("object=0x%x", p->object);
2691 printf(", offset=0x%x", p->offset);
2692 printf(", wire_count=%d", p->wire_count);
1c79356b 2693
91447636 2694 iprintf("%sinactive, %sactive, %sgobbled, %slaundry, %sfree, %sref, %sencrypted\n",
1c79356b
A
2695 (p->inactive ? "" : "!"),
2696 (p->active ? "" : "!"),
2697 (p->gobbled ? "" : "!"),
2698 (p->laundry ? "" : "!"),
2699 (p->free ? "" : "!"),
2700 (p->reference ? "" : "!"),
91447636 2701 (p->encrypted ? "" : "!"));
1c79356b
A
2702 iprintf("%sbusy, %swanted, %stabled, %sfictitious, %sprivate, %sprecious\n",
2703 (p->busy ? "" : "!"),
2704 (p->wanted ? "" : "!"),
2705 (p->tabled ? "" : "!"),
2706 (p->fictitious ? "" : "!"),
2707 (p->private ? "" : "!"),
2708 (p->precious ? "" : "!"));
2709 iprintf("%sabsent, %serror, %sdirty, %scleaning, %spageout, %sclustered\n",
2710 (p->absent ? "" : "!"),
2711 (p->error ? "" : "!"),
2712 (p->dirty ? "" : "!"),
2713 (p->cleaning ? "" : "!"),
2714 (p->pageout ? "" : "!"),
2715 (p->clustered ? "" : "!"));
0b4e3aa0 2716 iprintf("%slock_supplied, %soverwriting, %srestart, %sunusual\n",
1c79356b
A
2717 (p->lock_supplied ? "" : "!"),
2718 (p->overwriting ? "" : "!"),
2719 (p->restart ? "" : "!"),
0b4e3aa0 2720 (p->unusual ? "" : "!"));
1c79356b 2721
55e303ae 2722 iprintf("phys_page=0x%x", p->phys_page);
1c79356b
A
2723 printf(", page_error=0x%x", p->page_error);
2724 printf(", page_lock=0x%x", p->page_lock);
2725 printf(", unlock_request=%d\n", p->unlock_request);
2726
2727 db_indent -= 2;
2728}
2729#endif /* MACH_KDB */