]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_resident.c
xnu-344.34.tar.gz
[apple/xnu.git] / osfmk / vm / vm_resident.c
CommitLineData
1c79356b 1/*
de355530 2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
de355530
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
de355530
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
de355530
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52/*
53 * File: vm/vm_page.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
55 *
56 * Resident memory management module.
57 */
58
9bccf70c 59#include <mach/clock_types.h>
1c79356b
A
60#include <mach/vm_prot.h>
61#include <mach/vm_statistics.h>
62#include <kern/counters.h>
63#include <kern/sched_prim.h>
64#include <kern/task.h>
65#include <kern/thread.h>
66#include <kern/zalloc.h>
67#include <kern/xpr.h>
68#include <vm/pmap.h>
69#include <vm/vm_init.h>
70#include <vm/vm_map.h>
71#include <vm/vm_page.h>
72#include <vm/vm_pageout.h>
73#include <vm/vm_kern.h> /* kernel_memory_allocate() */
74#include <kern/misc_protos.h>
75#include <zone_debug.h>
76#include <vm/cpm.h>
77
0b4e3aa0
A
78/* Variables used to indicate the relative age of pages in the
79 * inactive list
80 */
81
82int vm_page_ticket_roll = 0;
83int vm_page_ticket = 0;
1c79356b
A
84/*
85 * Associated with page of user-allocatable memory is a
86 * page structure.
87 */
88
89/*
90 * These variables record the values returned by vm_page_bootstrap,
91 * for debugging purposes. The implementation of pmap_steal_memory
92 * and pmap_startup here also uses them internally.
93 */
94
95vm_offset_t virtual_space_start;
96vm_offset_t virtual_space_end;
97int vm_page_pages;
98
99/*
100 * The vm_page_lookup() routine, which provides for fast
101 * (virtual memory object, offset) to page lookup, employs
102 * the following hash table. The vm_page_{insert,remove}
103 * routines install and remove associations in the table.
104 * [This table is often called the virtual-to-physical,
105 * or VP, table.]
106 */
107typedef struct {
108 vm_page_t pages;
109#if MACH_PAGE_HASH_STATS
110 int cur_count; /* current count */
111 int hi_count; /* high water mark */
112#endif /* MACH_PAGE_HASH_STATS */
113} vm_page_bucket_t;
114
115vm_page_bucket_t *vm_page_buckets; /* Array of buckets */
116unsigned int vm_page_bucket_count = 0; /* How big is array? */
117unsigned int vm_page_hash_mask; /* Mask for hash function */
118unsigned int vm_page_hash_shift; /* Shift for hash function */
119decl_simple_lock_data(,vm_page_bucket_lock)
120
121#if MACH_PAGE_HASH_STATS
122/* This routine is only for debug. It is intended to be called by
123 * hand by a developer using a kernel debugger. This routine prints
124 * out vm_page_hash table statistics to the kernel debug console.
125 */
126void
127hash_debug(void)
128{
129 int i;
130 int numbuckets = 0;
131 int highsum = 0;
132 int maxdepth = 0;
133
134 for (i = 0; i < vm_page_bucket_count; i++) {
135 if (vm_page_buckets[i].hi_count) {
136 numbuckets++;
137 highsum += vm_page_buckets[i].hi_count;
138 if (vm_page_buckets[i].hi_count > maxdepth)
139 maxdepth = vm_page_buckets[i].hi_count;
140 }
141 }
142 printf("Total number of buckets: %d\n", vm_page_bucket_count);
143 printf("Number used buckets: %d = %d%%\n",
144 numbuckets, 100*numbuckets/vm_page_bucket_count);
145 printf("Number unused buckets: %d = %d%%\n",
146 vm_page_bucket_count - numbuckets,
147 100*(vm_page_bucket_count-numbuckets)/vm_page_bucket_count);
148 printf("Sum of bucket max depth: %d\n", highsum);
149 printf("Average bucket depth: %d.%2d\n",
150 highsum/vm_page_bucket_count,
151 highsum%vm_page_bucket_count);
152 printf("Maximum bucket depth: %d\n", maxdepth);
153}
154#endif /* MACH_PAGE_HASH_STATS */
155
156/*
157 * The virtual page size is currently implemented as a runtime
158 * variable, but is constant once initialized using vm_set_page_size.
159 * This initialization must be done in the machine-dependent
160 * bootstrap sequence, before calling other machine-independent
161 * initializations.
162 *
163 * All references to the virtual page size outside this
164 * module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
165 * constants.
166 */
167#ifndef PAGE_SIZE_FIXED
168vm_size_t page_size = 4096;
169vm_size_t page_mask = 4095;
170int page_shift = 12;
171#endif /* PAGE_SIZE_FIXED */
172
173/*
174 * Resident page structures are initialized from
175 * a template (see vm_page_alloc).
176 *
177 * When adding a new field to the virtual memory
178 * object structure, be sure to add initialization
179 * (see vm_page_bootstrap).
180 */
181struct vm_page vm_page_template;
182
183/*
184 * Resident pages that represent real memory
185 * are allocated from a free list.
186 */
187vm_page_t vm_page_queue_free;
188vm_page_t vm_page_queue_fictitious;
189decl_mutex_data(,vm_page_queue_free_lock)
190unsigned int vm_page_free_wanted;
191int vm_page_free_count;
192int vm_page_fictitious_count;
193
194unsigned int vm_page_free_count_minimum; /* debugging */
195
196/*
197 * Occasionally, the virtual memory system uses
198 * resident page structures that do not refer to
199 * real pages, for example to leave a page with
200 * important state information in the VP table.
201 *
202 * These page structures are allocated the way
203 * most other kernel structures are.
204 */
205zone_t vm_page_zone;
206decl_mutex_data(,vm_page_alloc_lock)
9bccf70c
A
207unsigned int io_throttle_zero_fill;
208decl_mutex_data(,vm_page_zero_fill_lock)
1c79356b
A
209
210/*
211 * Fictitious pages don't have a physical address,
de355530 212 * but we must initialize phys_addr to something.
1c79356b
A
213 * For debugging, this should be a strange value
214 * that the pmap module can recognize in assertions.
215 */
216vm_offset_t vm_page_fictitious_addr = (vm_offset_t) -1;
217
218/*
219 * Resident page structures are also chained on
220 * queues that are used by the page replacement
221 * system (pageout daemon). These queues are
222 * defined here, but are shared by the pageout
9bccf70c
A
223 * module. The inactive queue is broken into
224 * inactive and zf for convenience as the
225 * pageout daemon often assignes a higher
226 * affinity to zf pages
1c79356b
A
227 */
228queue_head_t vm_page_queue_active;
229queue_head_t vm_page_queue_inactive;
9bccf70c 230queue_head_t vm_page_queue_zf;
1c79356b
A
231decl_mutex_data(,vm_page_queue_lock)
232int vm_page_active_count;
233int vm_page_inactive_count;
234int vm_page_wire_count;
235int vm_page_gobble_count = 0;
236int vm_page_wire_count_warning = 0;
237int vm_page_gobble_count_warning = 0;
238
239/* the following fields are protected by the vm_page_queue_lock */
240queue_head_t vm_page_queue_limbo;
241int vm_page_limbo_count = 0; /* total pages in limbo */
242int vm_page_limbo_real_count = 0; /* real pages in limbo */
243int vm_page_pin_count = 0; /* number of pinned pages */
244
245decl_simple_lock_data(,vm_page_preppin_lock)
246
247/*
248 * Several page replacement parameters are also
249 * shared with this module, so that page allocation
250 * (done here in vm_page_alloc) can trigger the
251 * pageout daemon.
252 */
253int vm_page_free_target = 0;
254int vm_page_free_min = 0;
255int vm_page_inactive_target = 0;
256int vm_page_free_reserved = 0;
257int vm_page_laundry_count = 0;
258
259/*
260 * The VM system has a couple of heuristics for deciding
261 * that pages are "uninteresting" and should be placed
262 * on the inactive queue as likely candidates for replacement.
263 * These variables let the heuristics be controlled at run-time
264 * to make experimentation easier.
265 */
266
267boolean_t vm_page_deactivate_hint = TRUE;
268
269/*
270 * vm_set_page_size:
271 *
272 * Sets the page size, perhaps based upon the memory
273 * size. Must be called before any use of page-size
274 * dependent functions.
275 *
276 * Sets page_shift and page_mask from page_size.
277 */
278void
279vm_set_page_size(void)
280{
281#ifndef PAGE_SIZE_FIXED
282 page_mask = page_size - 1;
283
284 if ((page_mask & page_size) != 0)
285 panic("vm_set_page_size: page size not a power of two");
286
287 for (page_shift = 0; ; page_shift++)
288 if ((1 << page_shift) == page_size)
289 break;
290#endif /* PAGE_SIZE_FIXED */
291}
292
293/*
294 * vm_page_bootstrap:
295 *
296 * Initializes the resident memory module.
297 *
298 * Allocates memory for the page cells, and
299 * for the object/offset-to-page hash table headers.
300 * Each page cell is initialized and placed on the free list.
301 * Returns the range of available kernel virtual memory.
302 */
303
304void
305vm_page_bootstrap(
306 vm_offset_t *startp,
307 vm_offset_t *endp)
308{
309 register vm_page_t m;
310 int i;
311 unsigned int log1;
312 unsigned int log2;
313 unsigned int size;
314
315 /*
316 * Initialize the vm_page template.
317 */
318
319 m = &vm_page_template;
320 m->object = VM_OBJECT_NULL; /* reset later */
321 m->offset = 0; /* reset later */
322 m->wire_count = 0;
323
324 m->inactive = FALSE;
325 m->active = FALSE;
326 m->laundry = FALSE;
327 m->free = FALSE;
765c9de3 328 m->no_isync = TRUE;
1c79356b
A
329 m->reference = FALSE;
330 m->pageout = FALSE;
0b4e3aa0 331 m->dump_cleaning = FALSE;
1c79356b
A
332 m->list_req_pending = FALSE;
333
334 m->busy = TRUE;
335 m->wanted = FALSE;
336 m->tabled = FALSE;
337 m->fictitious = FALSE;
338 m->private = FALSE;
339 m->absent = FALSE;
340 m->error = FALSE;
341 m->dirty = FALSE;
342 m->cleaning = FALSE;
343 m->precious = FALSE;
344 m->clustered = FALSE;
345 m->lock_supplied = FALSE;
346 m->unusual = FALSE;
347 m->restart = FALSE;
9bccf70c 348 m->zero_fill = FALSE;
1c79356b 349
de355530 350 m->phys_addr = 0; /* reset later */
1c79356b
A
351
352 m->page_lock = VM_PROT_NONE;
353 m->unlock_request = VM_PROT_NONE;
354 m->page_error = KERN_SUCCESS;
355
356 /*
357 * Initialize the page queues.
358 */
359
360 mutex_init(&vm_page_queue_free_lock, ETAP_VM_PAGEQ_FREE);
361 mutex_init(&vm_page_queue_lock, ETAP_VM_PAGEQ);
362 simple_lock_init(&vm_page_preppin_lock, ETAP_VM_PREPPIN);
363
364 vm_page_queue_free = VM_PAGE_NULL;
365 vm_page_queue_fictitious = VM_PAGE_NULL;
366 queue_init(&vm_page_queue_active);
367 queue_init(&vm_page_queue_inactive);
9bccf70c 368 queue_init(&vm_page_queue_zf);
1c79356b
A
369 queue_init(&vm_page_queue_limbo);
370
371 vm_page_free_wanted = 0;
372
373 /*
374 * Steal memory for the map and zone subsystems.
375 */
376
377 vm_map_steal_memory();
378 zone_steal_memory();
379
380 /*
381 * Allocate (and initialize) the virtual-to-physical
382 * table hash buckets.
383 *
384 * The number of buckets should be a power of two to
385 * get a good hash function. The following computation
386 * chooses the first power of two that is greater
387 * than the number of physical pages in the system.
388 */
389
390 simple_lock_init(&vm_page_bucket_lock, ETAP_VM_BUCKET);
391
392 if (vm_page_bucket_count == 0) {
393 unsigned int npages = pmap_free_pages();
394
395 vm_page_bucket_count = 1;
396 while (vm_page_bucket_count < npages)
397 vm_page_bucket_count <<= 1;
398 }
399
400 vm_page_hash_mask = vm_page_bucket_count - 1;
401
402 /*
403 * Calculate object shift value for hashing algorithm:
404 * O = log2(sizeof(struct vm_object))
405 * B = log2(vm_page_bucket_count)
406 * hash shifts the object left by
407 * B/2 - O
408 */
409 size = vm_page_bucket_count;
410 for (log1 = 0; size > 1; log1++)
411 size /= 2;
412 size = sizeof(struct vm_object);
413 for (log2 = 0; size > 1; log2++)
414 size /= 2;
415 vm_page_hash_shift = log1/2 - log2 + 1;
416
417 if (vm_page_hash_mask & vm_page_bucket_count)
418 printf("vm_page_bootstrap: WARNING -- strange page hash\n");
419
420 vm_page_buckets = (vm_page_bucket_t *)
421 pmap_steal_memory(vm_page_bucket_count *
422 sizeof(vm_page_bucket_t));
423
424 for (i = 0; i < vm_page_bucket_count; i++) {
425 register vm_page_bucket_t *bucket = &vm_page_buckets[i];
426
427 bucket->pages = VM_PAGE_NULL;
428#if MACH_PAGE_HASH_STATS
429 bucket->cur_count = 0;
430 bucket->hi_count = 0;
431#endif /* MACH_PAGE_HASH_STATS */
432 }
433
434 /*
435 * Machine-dependent code allocates the resident page table.
436 * It uses vm_page_init to initialize the page frames.
437 * The code also returns to us the virtual space available
438 * to the kernel. We don't trust the pmap module
439 * to get the alignment right.
440 */
441
442 pmap_startup(&virtual_space_start, &virtual_space_end);
de355530
A
443 virtual_space_start = round_page(virtual_space_start);
444 virtual_space_end = trunc_page(virtual_space_end);
1c79356b
A
445
446 *startp = virtual_space_start;
447 *endp = virtual_space_end;
448
449 /*
450 * Compute the initial "wire" count.
451 * Up until now, the pages which have been set aside are not under
452 * the VM system's control, so although they aren't explicitly
453 * wired, they nonetheless can't be moved. At this moment,
454 * all VM managed pages are "free", courtesy of pmap_startup.
455 */
de355530 456 vm_page_wire_count = atop(mem_size) - vm_page_free_count; /* initial value */
1c79356b
A
457
458 printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count);
459 vm_page_free_count_minimum = vm_page_free_count;
460}
461
462#ifndef MACHINE_PAGES
463/*
464 * We implement pmap_steal_memory and pmap_startup with the help
465 * of two simpler functions, pmap_virtual_space and pmap_next_page.
466 */
467
468vm_offset_t
469pmap_steal_memory(
470 vm_size_t size)
471{
de355530 472 vm_offset_t addr, vaddr, paddr;
1c79356b
A
473
474 /*
475 * We round the size to a round multiple.
476 */
477
478 size = (size + sizeof (void *) - 1) &~ (sizeof (void *) - 1);
479
480 /*
481 * If this is the first call to pmap_steal_memory,
482 * we have to initialize ourself.
483 */
484
485 if (virtual_space_start == virtual_space_end) {
486 pmap_virtual_space(&virtual_space_start, &virtual_space_end);
487
488 /*
489 * The initial values must be aligned properly, and
490 * we don't trust the pmap module to do it right.
491 */
492
de355530
A
493 virtual_space_start = round_page(virtual_space_start);
494 virtual_space_end = trunc_page(virtual_space_end);
1c79356b
A
495 }
496
497 /*
498 * Allocate virtual memory for this request.
499 */
500
501 addr = virtual_space_start;
502 virtual_space_start += size;
503
504 kprintf("pmap_steal_memory: %08X - %08X; size=%08X\n", addr, virtual_space_start, size); /* (TEST/DEBUG) */
505
506 /*
507 * Allocate and map physical pages to back new virtual pages.
508 */
509
de355530 510 for (vaddr = round_page(addr);
1c79356b
A
511 vaddr < addr + size;
512 vaddr += PAGE_SIZE) {
de355530 513 if (!pmap_next_page(&paddr))
1c79356b
A
514 panic("pmap_steal_memory");
515
516 /*
517 * XXX Logically, these mappings should be wired,
518 * but some pmap modules barf if they are.
519 */
520
de355530 521 pmap_enter(kernel_pmap, vaddr, paddr,
9bccf70c
A
522 VM_PROT_READ|VM_PROT_WRITE,
523 VM_WIMG_USE_DEFAULT, FALSE);
1c79356b
A
524 /*
525 * Account for newly stolen memory
526 */
527 vm_page_wire_count++;
528
529 }
530
531 return addr;
532}
533
534void
535pmap_startup(
536 vm_offset_t *startp,
537 vm_offset_t *endp)
538{
de355530
A
539 unsigned int i, npages, pages_initialized;
540 vm_page_t pages;
541 vm_offset_t paddr;
1c79356b
A
542
543 /*
544 * We calculate how many page frames we will have
545 * and then allocate the page structures in one chunk.
546 */
547
de355530
A
548 npages = ((PAGE_SIZE * pmap_free_pages() +
549 (round_page(virtual_space_start) - virtual_space_start)) /
550 (PAGE_SIZE + sizeof *pages));
1c79356b
A
551
552 pages = (vm_page_t) pmap_steal_memory(npages * sizeof *pages);
553
554 /*
555 * Initialize the page frames.
556 */
557
558 for (i = 0, pages_initialized = 0; i < npages; i++) {
de355530 559 if (!pmap_next_page(&paddr))
1c79356b
A
560 break;
561
de355530 562 vm_page_init(&pages[i], paddr);
1c79356b
A
563 vm_page_pages++;
564 pages_initialized++;
565 }
566
567 /*
568 * Release pages in reverse order so that physical pages
569 * initially get allocated in ascending addresses. This keeps
570 * the devices (which must address physical memory) happy if
571 * they require several consecutive pages.
572 */
573
574 for (i = pages_initialized; i > 0; i--) {
575 vm_page_release(&pages[i - 1]);
576 }
577
578 /*
579 * We have to re-align virtual_space_start,
580 * because pmap_steal_memory has been using it.
581 */
582
de355530 583 virtual_space_start = round_page(virtual_space_start);
1c79356b
A
584
585 *startp = virtual_space_start;
586 *endp = virtual_space_end;
587}
588#endif /* MACHINE_PAGES */
589
590/*
591 * Routine: vm_page_module_init
592 * Purpose:
593 * Second initialization pass, to be done after
594 * the basic VM system is ready.
595 */
596void
597vm_page_module_init(void)
598{
599 vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page),
600 0, PAGE_SIZE, "vm pages");
601
602#if ZONE_DEBUG
603 zone_debug_disable(vm_page_zone);
604#endif /* ZONE_DEBUG */
605
606 zone_change(vm_page_zone, Z_EXPAND, FALSE);
607 zone_change(vm_page_zone, Z_EXHAUST, TRUE);
608 zone_change(vm_page_zone, Z_FOREIGN, TRUE);
609
610 /*
611 * Adjust zone statistics to account for the real pages allocated
612 * in vm_page_create(). [Q: is this really what we want?]
613 */
614 vm_page_zone->count += vm_page_pages;
615 vm_page_zone->cur_size += vm_page_pages * vm_page_zone->elem_size;
616
617 mutex_init(&vm_page_alloc_lock, ETAP_VM_PAGE_ALLOC);
9bccf70c 618 mutex_init(&vm_page_zero_fill_lock, ETAP_VM_PAGE_ALLOC);
1c79356b
A
619}
620
621/*
622 * Routine: vm_page_create
623 * Purpose:
624 * After the VM system is up, machine-dependent code
625 * may stumble across more physical memory. For example,
626 * memory that it was reserving for a frame buffer.
627 * vm_page_create turns this memory into available pages.
628 */
629
630void
631vm_page_create(
de355530
A
632 vm_offset_t start,
633 vm_offset_t end)
1c79356b 634{
de355530
A
635 vm_offset_t paddr;
636 vm_page_t m;
1c79356b 637
de355530
A
638 for (paddr = round_page(start);
639 paddr < trunc_page(end);
640 paddr += PAGE_SIZE) {
1c79356b
A
641 while ((m = (vm_page_t) vm_page_grab_fictitious())
642 == VM_PAGE_NULL)
643 vm_page_more_fictitious();
644
de355530 645 vm_page_init(m, paddr);
1c79356b
A
646 vm_page_pages++;
647 vm_page_release(m);
648 }
649}
650
651/*
652 * vm_page_hash:
653 *
654 * Distributes the object/offset key pair among hash buckets.
655 *
de355530
A
656 * NOTE: To get a good hash function, the bucket count should
657 * be a power of two.
1c79356b
A
658 */
659#define vm_page_hash(object, offset) (\
de355530 660 ( ((natural_t)(vm_offset_t)object<<vm_page_hash_shift) + (natural_t)atop(offset))\
1c79356b
A
661 & vm_page_hash_mask)
662
663/*
664 * vm_page_insert: [ internal use only ]
665 *
666 * Inserts the given mem entry into the object/object-page
667 * table and object list.
668 *
669 * The object must be locked.
670 */
671
672void
673vm_page_insert(
674 register vm_page_t mem,
675 register vm_object_t object,
676 register vm_object_offset_t offset)
677{
678 register vm_page_bucket_t *bucket;
679
680 XPR(XPR_VM_PAGE,
681 "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n",
682 (integer_t)object, (integer_t)offset, (integer_t)mem, 0,0);
683
684 VM_PAGE_CHECK(mem);
685
686 if (mem->tabled)
687 panic("vm_page_insert");
688
689 assert(!object->internal || offset < object->size);
690
691 /* only insert "pageout" pages into "pageout" objects,
692 * and normal pages into normal objects */
693 assert(object->pageout == mem->pageout);
694
695 /*
696 * Record the object/offset pair in this page
697 */
698
699 mem->object = object;
700 mem->offset = offset;
701
702 /*
703 * Insert it into the object_object/offset hash table
704 */
705
706 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
707 simple_lock(&vm_page_bucket_lock);
708 mem->next = bucket->pages;
709 bucket->pages = mem;
710#if MACH_PAGE_HASH_STATS
711 if (++bucket->cur_count > bucket->hi_count)
712 bucket->hi_count = bucket->cur_count;
713#endif /* MACH_PAGE_HASH_STATS */
714 simple_unlock(&vm_page_bucket_lock);
715
716 /*
717 * Now link into the object's list of backed pages.
718 */
719
720 queue_enter(&object->memq, mem, vm_page_t, listq);
721 mem->tabled = TRUE;
722
723 /*
724 * Show that the object has one more resident page.
725 */
726
727 object->resident_page_count++;
728}
729
730/*
731 * vm_page_replace:
732 *
733 * Exactly like vm_page_insert, except that we first
734 * remove any existing page at the given offset in object.
735 *
736 * The object and page queues must be locked.
737 */
738
739void
740vm_page_replace(
741 register vm_page_t mem,
742 register vm_object_t object,
743 register vm_object_offset_t offset)
744{
745 register vm_page_bucket_t *bucket;
746
747 VM_PAGE_CHECK(mem);
748
749 if (mem->tabled)
750 panic("vm_page_replace");
751
752 /*
753 * Record the object/offset pair in this page
754 */
755
756 mem->object = object;
757 mem->offset = offset;
758
759 /*
760 * Insert it into the object_object/offset hash table,
761 * replacing any page that might have been there.
762 */
763
764 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
765 simple_lock(&vm_page_bucket_lock);
766 if (bucket->pages) {
767 vm_page_t *mp = &bucket->pages;
768 register vm_page_t m = *mp;
769 do {
770 if (m->object == object && m->offset == offset) {
771 /*
772 * Remove page from bucket and from object,
773 * and return it to the free list.
774 */
775 *mp = m->next;
776 queue_remove(&object->memq, m, vm_page_t,
777 listq);
778 m->tabled = FALSE;
779 object->resident_page_count--;
780
781 /*
782 * Return page to the free list.
783 * Note the page is not tabled now, so this
784 * won't self-deadlock on the bucket lock.
785 */
786
787 vm_page_free(m);
788 break;
789 }
790 mp = &m->next;
791 } while (m = *mp);
792 mem->next = bucket->pages;
793 } else {
794 mem->next = VM_PAGE_NULL;
795 }
796 bucket->pages = mem;
797 simple_unlock(&vm_page_bucket_lock);
798
799 /*
800 * Now link into the object's list of backed pages.
801 */
802
803 queue_enter(&object->memq, mem, vm_page_t, listq);
804 mem->tabled = TRUE;
805
806 /*
807 * And show that the object has one more resident
808 * page.
809 */
810
811 object->resident_page_count++;
812}
813
814/*
815 * vm_page_remove: [ internal use only ]
816 *
817 * Removes the given mem entry from the object/offset-page
818 * table and the object page list.
819 *
820 * The object and page must be locked.
821 */
822
823void
824vm_page_remove(
825 register vm_page_t mem)
826{
827 register vm_page_bucket_t *bucket;
828 register vm_page_t this;
829
830 XPR(XPR_VM_PAGE,
831 "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n",
832 (integer_t)mem->object, (integer_t)mem->offset,
833 (integer_t)mem, 0,0);
834
835 assert(mem->tabled);
836 assert(!mem->cleaning);
837 VM_PAGE_CHECK(mem);
838
839 /*
840 * Remove from the object_object/offset hash table
841 */
842
843 bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
844 simple_lock(&vm_page_bucket_lock);
845 if ((this = bucket->pages) == mem) {
846 /* optimize for common case */
847
848 bucket->pages = mem->next;
849 } else {
850 register vm_page_t *prev;
851
852 for (prev = &this->next;
853 (this = *prev) != mem;
854 prev = &this->next)
855 continue;
856 *prev = this->next;
857 }
858#if MACH_PAGE_HASH_STATS
859 bucket->cur_count--;
860#endif /* MACH_PAGE_HASH_STATS */
861 simple_unlock(&vm_page_bucket_lock);
862
863 /*
864 * Now remove from the object's list of backed pages.
865 */
866
867 queue_remove(&mem->object->memq, mem, vm_page_t, listq);
868
869 /*
870 * And show that the object has one fewer resident
871 * page.
872 */
873
874 mem->object->resident_page_count--;
875
876 mem->tabled = FALSE;
877 mem->object = VM_OBJECT_NULL;
878 mem->offset = 0;
879}
880
881/*
882 * vm_page_lookup:
883 *
884 * Returns the page associated with the object/offset
885 * pair specified; if none is found, VM_PAGE_NULL is returned.
886 *
887 * The object must be locked. No side effects.
888 */
889
890vm_page_t
891vm_page_lookup(
892 register vm_object_t object,
893 register vm_object_offset_t offset)
894{
895 register vm_page_t mem;
896 register vm_page_bucket_t *bucket;
897
898 /*
899 * Search the hash table for this object/offset pair
900 */
901
902 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
903
904 simple_lock(&vm_page_bucket_lock);
905 for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
906 VM_PAGE_CHECK(mem);
907 if ((mem->object == object) && (mem->offset == offset))
908 break;
909 }
910 simple_unlock(&vm_page_bucket_lock);
911 return(mem);
912}
913
914/*
915 * vm_page_rename:
916 *
917 * Move the given memory entry from its
918 * current object to the specified target object/offset.
919 *
920 * The object must be locked.
921 */
922void
923vm_page_rename(
924 register vm_page_t mem,
925 register vm_object_t new_object,
926 vm_object_offset_t new_offset)
927{
928 assert(mem->object != new_object);
929 /*
930 * Changes to mem->object require the page lock because
931 * the pageout daemon uses that lock to get the object.
932 */
933
934 XPR(XPR_VM_PAGE,
935 "vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n",
936 (integer_t)new_object, (integer_t)new_offset,
937 (integer_t)mem, 0,0);
938
939 vm_page_lock_queues();
940 vm_page_remove(mem);
941 vm_page_insert(mem, new_object, new_offset);
942 vm_page_unlock_queues();
943}
944
945/*
946 * vm_page_init:
947 *
948 * Initialize the fields in a new page.
949 * This takes a structure with random values and initializes it
950 * so that it can be given to vm_page_release or vm_page_insert.
951 */
952void
953vm_page_init(
954 vm_page_t mem,
de355530 955 vm_offset_t phys_addr)
1c79356b
A
956{
957 *mem = vm_page_template;
de355530 958 mem->phys_addr = phys_addr;
1c79356b
A
959}
960
961/*
962 * vm_page_grab_fictitious:
963 *
964 * Remove a fictitious page from the free list.
965 * Returns VM_PAGE_NULL if there are no free pages.
966 */
967int c_vm_page_grab_fictitious = 0;
968int c_vm_page_release_fictitious = 0;
969int c_vm_page_more_fictitious = 0;
970
971vm_page_t
972vm_page_grab_fictitious(void)
973{
974 register vm_page_t m;
975
976 m = (vm_page_t)zget(vm_page_zone);
977 if (m) {
1c79356b
A
978 vm_page_init(m, vm_page_fictitious_addr);
979 m->fictitious = TRUE;
1c79356b
A
980 }
981
982 c_vm_page_grab_fictitious++;
983 return m;
984}
985
986/*
987 * vm_page_release_fictitious:
988 *
989 * Release a fictitious page to the free list.
990 */
991
992void
993vm_page_release_fictitious(
994 register vm_page_t m)
995{
996 assert(!m->free);
997 assert(m->busy);
998 assert(m->fictitious);
de355530 999 assert(m->phys_addr == vm_page_fictitious_addr);
1c79356b
A
1000
1001 c_vm_page_release_fictitious++;
1002
1003 if (m->free)
1004 panic("vm_page_release_fictitious");
1005 m->free = TRUE;
1006 zfree(vm_page_zone, (vm_offset_t)m);
1007}
1008
1009/*
1010 * vm_page_more_fictitious:
1011 *
1012 * Add more fictitious pages to the free list.
1013 * Allowed to block. This routine is way intimate
1014 * with the zones code, for several reasons:
1015 * 1. we need to carve some page structures out of physical
1016 * memory before zones work, so they _cannot_ come from
1017 * the zone_map.
1018 * 2. the zone needs to be collectable in order to prevent
1019 * growth without bound. These structures are used by
1020 * the device pager (by the hundreds and thousands), as
1021 * private pages for pageout, and as blocking pages for
1022 * pagein. Temporary bursts in demand should not result in
1023 * permanent allocation of a resource.
1024 * 3. To smooth allocation humps, we allocate single pages
1025 * with kernel_memory_allocate(), and cram them into the
1026 * zone. This also allows us to initialize the vm_page_t's
1027 * on the way into the zone, so that zget() always returns
1028 * an initialized structure. The zone free element pointer
1029 * and the free page pointer are both the first item in the
1030 * vm_page_t.
1031 * 4. By having the pages in the zone pre-initialized, we need
1032 * not keep 2 levels of lists. The garbage collector simply
1033 * scans our list, and reduces physical memory usage as it
1034 * sees fit.
1035 */
1036
1037void vm_page_more_fictitious(void)
1038{
1039 extern vm_map_t zone_map;
1040 register vm_page_t m;
1041 vm_offset_t addr;
1042 kern_return_t retval;
1043 int i;
1044
1045 c_vm_page_more_fictitious++;
1046
1c79356b
A
1047 /*
1048 * Allocate a single page from the zone_map. Do not wait if no physical
1049 * pages are immediately available, and do not zero the space. We need
1050 * our own blocking lock here to prevent having multiple,
1051 * simultaneous requests from piling up on the zone_map lock. Exactly
1052 * one (of our) threads should be potentially waiting on the map lock.
1053 * If winner is not vm-privileged, then the page allocation will fail,
1054 * and it will temporarily block here in the vm_page_wait().
1055 */
1056 mutex_lock(&vm_page_alloc_lock);
1057 /*
1058 * If another thread allocated space, just bail out now.
1059 */
1060 if (zone_free_count(vm_page_zone) > 5) {
1061 /*
1062 * The number "5" is a small number that is larger than the
1063 * number of fictitious pages that any single caller will
1064 * attempt to allocate. Otherwise, a thread will attempt to
1065 * acquire a fictitious page (vm_page_grab_fictitious), fail,
1066 * release all of the resources and locks already acquired,
1067 * and then call this routine. This routine finds the pages
1068 * that the caller released, so fails to allocate new space.
1069 * The process repeats infinitely. The largest known number
1070 * of fictitious pages required in this manner is 2. 5 is
1071 * simply a somewhat larger number.
1072 */
1073 mutex_unlock(&vm_page_alloc_lock);
1074 return;
1075 }
1076
1077 if ((retval = kernel_memory_allocate(zone_map,
1078 &addr, PAGE_SIZE, VM_PROT_ALL,
1079 KMA_KOBJECT|KMA_NOPAGEWAIT)) != KERN_SUCCESS) {
1080 /*
1081 * No page was available. Tell the pageout daemon, drop the
1082 * lock to give another thread a chance at it, and
1083 * wait for the pageout daemon to make progress.
1084 */
1085 mutex_unlock(&vm_page_alloc_lock);
1086 vm_page_wait(THREAD_UNINT);
1087 return;
1088 }
1089 /*
1090 * Initialize as many vm_page_t's as will fit on this page. This
1091 * depends on the zone code disturbing ONLY the first item of
1092 * each zone element.
1093 */
1094 m = (vm_page_t)addr;
1095 for (i = PAGE_SIZE/sizeof(struct vm_page); i > 0; i--) {
1096 vm_page_init(m, vm_page_fictitious_addr);
1097 m->fictitious = TRUE;
1098 m++;
1099 }
1100 zcram(vm_page_zone, addr, PAGE_SIZE);
1101 mutex_unlock(&vm_page_alloc_lock);
1102}
1103
1104/*
1105 * vm_page_convert:
1106 *
1107 * Attempt to convert a fictitious page into a real page.
1108 */
1109
1110boolean_t
1111vm_page_convert(
1112 register vm_page_t m)
1113{
1114 register vm_page_t real_m;
1115
1116 assert(m->busy);
1117 assert(m->fictitious);
1118 assert(!m->dirty);
1119
1120 real_m = vm_page_grab();
1121 if (real_m == VM_PAGE_NULL)
1122 return FALSE;
1123
de355530 1124 m->phys_addr = real_m->phys_addr;
1c79356b 1125 m->fictitious = FALSE;
765c9de3 1126 m->no_isync = TRUE;
1c79356b
A
1127
1128 vm_page_lock_queues();
1129 if (m->active)
1130 vm_page_active_count++;
1131 else if (m->inactive)
1132 vm_page_inactive_count++;
1133 vm_page_unlock_queues();
1134
de355530 1135 real_m->phys_addr = vm_page_fictitious_addr;
1c79356b
A
1136 real_m->fictitious = TRUE;
1137
1138 vm_page_release_fictitious(real_m);
1139 return TRUE;
1140}
1141
1142/*
1143 * vm_pool_low():
1144 *
1145 * Return true if it is not likely that a non-vm_privileged thread
1146 * can get memory without blocking. Advisory only, since the
1147 * situation may change under us.
1148 */
1149int
1150vm_pool_low(void)
1151{
1152 /* No locking, at worst we will fib. */
1153 return( vm_page_free_count < vm_page_free_reserved );
1154}
1155
1156/*
1157 * vm_page_grab:
1158 *
1159 * Remove a page from the free list.
1160 * Returns VM_PAGE_NULL if the free list is too small.
1161 */
1162
1163unsigned long vm_page_grab_count = 0; /* measure demand */
1164
1165vm_page_t
1166vm_page_grab(void)
1167{
1168 register vm_page_t mem;
1169
1170 mutex_lock(&vm_page_queue_free_lock);
1171 vm_page_grab_count++;
1172
1173 /*
1174 * Optionally produce warnings if the wire or gobble
1175 * counts exceed some threshold.
1176 */
1177 if (vm_page_wire_count_warning > 0
1178 && vm_page_wire_count >= vm_page_wire_count_warning) {
1179 printf("mk: vm_page_grab(): high wired page count of %d\n",
1180 vm_page_wire_count);
1181 assert(vm_page_wire_count < vm_page_wire_count_warning);
1182 }
1183 if (vm_page_gobble_count_warning > 0
1184 && vm_page_gobble_count >= vm_page_gobble_count_warning) {
1185 printf("mk: vm_page_grab(): high gobbled page count of %d\n",
1186 vm_page_gobble_count);
1187 assert(vm_page_gobble_count < vm_page_gobble_count_warning);
1188 }
1189
1190 /*
1191 * Only let privileged threads (involved in pageout)
1192 * dip into the reserved pool.
1193 */
1194
1195 if ((vm_page_free_count < vm_page_free_reserved) &&
1196 !current_thread()->vm_privilege) {
1197 mutex_unlock(&vm_page_queue_free_lock);
1198 mem = VM_PAGE_NULL;
1199 goto wakeup_pageout;
1200 }
1201
1202 while (vm_page_queue_free == VM_PAGE_NULL) {
1203 printf("vm_page_grab: no free pages, trouble expected...\n");
1204 mutex_unlock(&vm_page_queue_free_lock);
1205 VM_PAGE_WAIT();
1206 mutex_lock(&vm_page_queue_free_lock);
1207 }
1208
1209 if (--vm_page_free_count < vm_page_free_count_minimum)
1210 vm_page_free_count_minimum = vm_page_free_count;
1211 mem = vm_page_queue_free;
1212 vm_page_queue_free = (vm_page_t) mem->pageq.next;
1213 mem->free = FALSE;
0b4e3aa0 1214 mem->no_isync = TRUE;
1c79356b
A
1215 mutex_unlock(&vm_page_queue_free_lock);
1216
1217 /*
1218 * Decide if we should poke the pageout daemon.
1219 * We do this if the free count is less than the low
1220 * water mark, or if the free count is less than the high
1221 * water mark (but above the low water mark) and the inactive
1222 * count is less than its target.
1223 *
1224 * We don't have the counts locked ... if they change a little,
1225 * it doesn't really matter.
1226 */
1227
1228wakeup_pageout:
1229 if ((vm_page_free_count < vm_page_free_min) ||
1230 ((vm_page_free_count < vm_page_free_target) &&
1231 (vm_page_inactive_count < vm_page_inactive_target)))
1232 thread_wakeup((event_t) &vm_page_free_wanted);
1233
de355530 1234// dbgLog(mem->phys_addr, vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */
1c79356b
A
1235
1236 return mem;
1237}
1238
1239/*
1240 * vm_page_release:
1241 *
1242 * Return a page to the free list.
1243 */
1244
1245void
1246vm_page_release(
1247 register vm_page_t mem)
1248{
1249 assert(!mem->private && !mem->fictitious);
1250
de355530 1251// dbgLog(mem->phys_addr, vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */
1c79356b
A
1252
1253 mutex_lock(&vm_page_queue_free_lock);
1254 if (mem->free)
1255 panic("vm_page_release");
1256 mem->free = TRUE;
1257 mem->pageq.next = (queue_entry_t) vm_page_queue_free;
1258 vm_page_queue_free = mem;
1259 vm_page_free_count++;
1260
1261 /*
1262 * Check if we should wake up someone waiting for page.
1263 * But don't bother waking them unless they can allocate.
1264 *
1265 * We wakeup only one thread, to prevent starvation.
1266 * Because the scheduling system handles wait queues FIFO,
1267 * if we wakeup all waiting threads, one greedy thread
1268 * can starve multiple niceguy threads. When the threads
1269 * all wakeup, the greedy threads runs first, grabs the page,
1270 * and waits for another page. It will be the first to run
1271 * when the next page is freed.
1272 *
1273 * However, there is a slight danger here.
1274 * The thread we wake might not use the free page.
1275 * Then the other threads could wait indefinitely
1276 * while the page goes unused. To forestall this,
1277 * the pageout daemon will keep making free pages
1278 * as long as vm_page_free_wanted is non-zero.
1279 */
1280
1281 if ((vm_page_free_wanted > 0) &&
1282 (vm_page_free_count >= vm_page_free_reserved)) {
1283 vm_page_free_wanted--;
1284 thread_wakeup_one((event_t) &vm_page_free_count);
1285 }
1286
1287 mutex_unlock(&vm_page_queue_free_lock);
1288}
1289
9bccf70c
A
1290#define VM_PAGEOUT_DEADLOCK_TIMEOUT 3
1291
1c79356b
A
1292/*
1293 * vm_page_wait:
1294 *
1295 * Wait for a page to become available.
1296 * If there are plenty of free pages, then we don't sleep.
1297 *
1298 * Returns:
1299 * TRUE: There may be another page, try again
1300 * FALSE: We were interrupted out of our wait, don't try again
1301 */
1302
1303boolean_t
1304vm_page_wait(
1305 int interruptible )
1306{
1307 /*
1308 * We can't use vm_page_free_reserved to make this
1309 * determination. Consider: some thread might
1310 * need to allocate two pages. The first allocation
1311 * succeeds, the second fails. After the first page is freed,
1312 * a call to vm_page_wait must really block.
1313 */
9bccf70c
A
1314 uint64_t abstime;
1315 kern_return_t wait_result;
1316 kern_return_t kr;
1317 int need_wakeup = 0;
1c79356b
A
1318
1319 mutex_lock(&vm_page_queue_free_lock);
1320 if (vm_page_free_count < vm_page_free_target) {
1321 if (vm_page_free_wanted++ == 0)
0b4e3aa0 1322 need_wakeup = 1;
9bccf70c
A
1323 wait_result = assert_wait((event_t)&vm_page_free_count,
1324 interruptible);
1c79356b
A
1325 mutex_unlock(&vm_page_queue_free_lock);
1326 counter(c_vm_page_wait_block++);
0b4e3aa0
A
1327
1328 if (need_wakeup)
1329 thread_wakeup((event_t)&vm_page_free_wanted);
9bccf70c
A
1330
1331 if (wait_result == THREAD_WAITING) {
1332 clock_interval_to_absolutetime_interval(
1333 VM_PAGEOUT_DEADLOCK_TIMEOUT,
1334 NSEC_PER_SEC, &abstime);
1335 clock_absolutetime_interval_to_deadline(
1336 abstime, &abstime);
1337 thread_set_timer_deadline(abstime);
1338 wait_result = thread_block(THREAD_CONTINUE_NULL);
1339
1340 if(wait_result == THREAD_TIMED_OUT) {
1341 kr = vm_pageout_emergency_availability_request();
1342 return TRUE;
1343 } else {
1344 thread_cancel_timer();
1345 }
1346 }
0b4e3aa0 1347
1c79356b
A
1348 return(wait_result == THREAD_AWAKENED);
1349 } else {
1350 mutex_unlock(&vm_page_queue_free_lock);
1351 return TRUE;
1352 }
1353}
1354
1355/*
1356 * vm_page_alloc:
1357 *
1358 * Allocate and return a memory cell associated
1359 * with this VM object/offset pair.
1360 *
1361 * Object must be locked.
1362 */
1363
1364vm_page_t
1365vm_page_alloc(
1366 vm_object_t object,
1367 vm_object_offset_t offset)
1368{
1369 register vm_page_t mem;
1370
1371 mem = vm_page_grab();
1372 if (mem == VM_PAGE_NULL)
1373 return VM_PAGE_NULL;
1374
1375 vm_page_insert(mem, object, offset);
1376
1377 return(mem);
1378}
1379
1c79356b
A
1380counter(unsigned int c_laundry_pages_freed = 0;)
1381
1382int vm_pagein_cluster_unused = 0;
1383boolean_t vm_page_free_verify = FALSE;
1384/*
1385 * vm_page_free:
1386 *
1387 * Returns the given page to the free list,
1388 * disassociating it with any VM object.
1389 *
1390 * Object and page queues must be locked prior to entry.
1391 */
1392void
1393vm_page_free(
1394 register vm_page_t mem)
1395{
1396 vm_object_t object = mem->object;
1397
1398 assert(!mem->free);
1399 assert(!mem->cleaning);
1400 assert(!mem->pageout);
de355530 1401 assert(!vm_page_free_verify || pmap_verify_free(mem->phys_addr));
1c79356b
A
1402
1403 if (mem->tabled)
1404 vm_page_remove(mem); /* clears tabled, object, offset */
1405 VM_PAGE_QUEUES_REMOVE(mem); /* clears active or inactive */
1406
1407 if (mem->clustered) {
1408 mem->clustered = FALSE;
1409 vm_pagein_cluster_unused++;
1410 }
1411
1412 if (mem->wire_count) {
1413 if (!mem->private && !mem->fictitious)
1414 vm_page_wire_count--;
1415 mem->wire_count = 0;
1416 assert(!mem->gobbled);
1417 } else if (mem->gobbled) {
1418 if (!mem->private && !mem->fictitious)
1419 vm_page_wire_count--;
1420 vm_page_gobble_count--;
1421 }
1422 mem->gobbled = FALSE;
1423
1424 if (mem->laundry) {
1425 extern int vm_page_laundry_min;
1426 vm_page_laundry_count--;
1427 mem->laundry = FALSE; /* laundry is now clear */
1428 counter(++c_laundry_pages_freed);
1429 if (vm_page_laundry_count < vm_page_laundry_min) {
1430 vm_page_laundry_min = 0;
1431 thread_wakeup((event_t) &vm_page_laundry_count);
1432 }
1433 }
1434
1435 mem->discard_request = FALSE;
1436
1437 PAGE_WAKEUP(mem); /* clears wanted */
1438
1439 if (mem->absent)
1440 vm_object_absent_release(object);
1441
0b4e3aa0 1442 /* Some of these may be unnecessary */
1c79356b
A
1443 mem->page_lock = 0;
1444 mem->unlock_request = 0;
1445 mem->busy = TRUE;
1446 mem->absent = FALSE;
1447 mem->error = FALSE;
1448 mem->dirty = FALSE;
1449 mem->precious = FALSE;
1450 mem->reference = FALSE;
1451
1452 mem->page_error = KERN_SUCCESS;
1453
1454 if (mem->private) {
1455 mem->private = FALSE;
1456 mem->fictitious = TRUE;
de355530 1457 mem->phys_addr = vm_page_fictitious_addr;
1c79356b
A
1458 }
1459 if (mem->fictitious) {
1460 vm_page_release_fictitious(mem);
1461 } else {
9bccf70c
A
1462 /* depends on the queues lock */
1463 if(mem->zero_fill) {
1464 vm_zf_count-=1;
1465 mem->zero_fill = FALSE;
1466 }
de355530 1467 vm_page_init(mem, mem->phys_addr);
1c79356b
A
1468 vm_page_release(mem);
1469 }
1470}
1471
1472/*
1473 * vm_page_wire:
1474 *
1475 * Mark this page as wired down by yet
1476 * another map, removing it from paging queues
1477 * as necessary.
1478 *
1479 * The page's object and the page queues must be locked.
1480 */
1481void
1482vm_page_wire(
1483 register vm_page_t mem)
1484{
1485
1486// dbgLog(current_act(), mem->offset, mem->object, 1); /* (TEST/DEBUG) */
1487
1488 VM_PAGE_CHECK(mem);
1489
1490 if (mem->wire_count == 0) {
1491 VM_PAGE_QUEUES_REMOVE(mem);
1492 if (!mem->private && !mem->fictitious && !mem->gobbled)
1493 vm_page_wire_count++;
1494 if (mem->gobbled)
1495 vm_page_gobble_count--;
1496 mem->gobbled = FALSE;
9bccf70c
A
1497 if(mem->zero_fill) {
1498 /* depends on the queues lock */
1499 vm_zf_count-=1;
1500 mem->zero_fill = FALSE;
1501 }
1c79356b
A
1502 }
1503 assert(!mem->gobbled);
1504 mem->wire_count++;
1505}
1506
1507/*
1508 * vm_page_gobble:
1509 *
1510 * Mark this page as consumed by the vm/ipc/xmm subsystems.
1511 *
1512 * Called only for freshly vm_page_grab()ed pages - w/ nothing locked.
1513 */
1514void
1515vm_page_gobble(
1516 register vm_page_t mem)
1517{
1518 vm_page_lock_queues();
1519 VM_PAGE_CHECK(mem);
1520
1521 assert(!mem->gobbled);
1522 assert(mem->wire_count == 0);
1523
1524 if (!mem->gobbled && mem->wire_count == 0) {
1525 if (!mem->private && !mem->fictitious)
1526 vm_page_wire_count++;
1527 }
1528 vm_page_gobble_count++;
1529 mem->gobbled = TRUE;
1530 vm_page_unlock_queues();
1531}
1532
1533/*
1534 * vm_page_unwire:
1535 *
1536 * Release one wiring of this page, potentially
1537 * enabling it to be paged again.
1538 *
1539 * The page's object and the page queues must be locked.
1540 */
1541void
1542vm_page_unwire(
1543 register vm_page_t mem)
1544{
1545
1546// dbgLog(current_act(), mem->offset, mem->object, 0); /* (TEST/DEBUG) */
1547
1548 VM_PAGE_CHECK(mem);
1549 assert(mem->wire_count > 0);
1550
1551 if (--mem->wire_count == 0) {
1552 assert(!mem->private && !mem->fictitious);
1553 vm_page_wire_count--;
1554 queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
1555 vm_page_active_count++;
1556 mem->active = TRUE;
1557 mem->reference = TRUE;
1558 }
1559}
1560
1561/*
1562 * vm_page_deactivate:
1563 *
1564 * Returns the given page to the inactive list,
1565 * indicating that no physical maps have access
1566 * to this page. [Used by the physical mapping system.]
1567 *
1568 * The page queues must be locked.
1569 */
1570void
1571vm_page_deactivate(
1572 register vm_page_t m)
1573{
1574 VM_PAGE_CHECK(m);
1575
de355530 1576// dbgLog(m->phys_addr, vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */
1c79356b
A
1577
1578 /*
1579 * This page is no longer very interesting. If it was
1580 * interesting (active or inactive/referenced), then we
1581 * clear the reference bit and (re)enter it in the
1582 * inactive queue. Note wired pages should not have
1583 * their reference bit cleared.
1584 */
1585 if (m->gobbled) { /* can this happen? */
1586 assert(m->wire_count == 0);
1587 if (!m->private && !m->fictitious)
1588 vm_page_wire_count--;
1589 vm_page_gobble_count--;
1590 m->gobbled = FALSE;
1591 }
1592 if (m->private || (m->wire_count != 0))
1593 return;
1594 if (m->active || (m->inactive && m->reference)) {
1595 if (!m->fictitious && !m->absent)
de355530 1596 pmap_clear_reference(m->phys_addr);
1c79356b
A
1597 m->reference = FALSE;
1598 VM_PAGE_QUEUES_REMOVE(m);
1599 }
1600 if (m->wire_count == 0 && !m->inactive) {
0b4e3aa0
A
1601 m->page_ticket = vm_page_ticket;
1602 vm_page_ticket_roll++;
1603
1604 if(vm_page_ticket_roll == VM_PAGE_TICKETS_IN_ROLL) {
1605 vm_page_ticket_roll = 0;
1606 if(vm_page_ticket == VM_PAGE_TICKET_ROLL_IDS)
1607 vm_page_ticket= 0;
1608 else
1609 vm_page_ticket++;
1610 }
1611
9bccf70c
A
1612 if(m->zero_fill) {
1613 queue_enter(&vm_page_queue_zf, m, vm_page_t, pageq);
1614 } else {
1615 queue_enter(&vm_page_queue_inactive,
1616 m, vm_page_t, pageq);
1617 }
1618
1c79356b
A
1619 m->inactive = TRUE;
1620 if (!m->fictitious)
1621 vm_page_inactive_count++;
1622 }
1623}
1624
1625/*
1626 * vm_page_activate:
1627 *
1628 * Put the specified page on the active list (if appropriate).
1629 *
1630 * The page queues must be locked.
1631 */
1632
1633void
1634vm_page_activate(
1635 register vm_page_t m)
1636{
1637 VM_PAGE_CHECK(m);
1638
1639 if (m->gobbled) {
1640 assert(m->wire_count == 0);
1641 if (!m->private && !m->fictitious)
1642 vm_page_wire_count--;
1643 vm_page_gobble_count--;
1644 m->gobbled = FALSE;
1645 }
1646 if (m->private)
1647 return;
1648
1649 if (m->inactive) {
9bccf70c
A
1650 if (m->zero_fill) {
1651 queue_remove(&vm_page_queue_zf, m, vm_page_t, pageq);
1652 } else {
1653 queue_remove(&vm_page_queue_inactive,
1654 m, vm_page_t, pageq);
1655 }
1c79356b
A
1656 if (!m->fictitious)
1657 vm_page_inactive_count--;
1658 m->inactive = FALSE;
1659 }
1660 if (m->wire_count == 0) {
1661 if (m->active)
1662 panic("vm_page_activate: already active");
1663
1664 queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
1665 m->active = TRUE;
1666 m->reference = TRUE;
1667 if (!m->fictitious)
1668 vm_page_active_count++;
1669 }
1670}
1671
1672/*
1673 * vm_page_part_zero_fill:
1674 *
1675 * Zero-fill a part of the page.
1676 */
1677void
1678vm_page_part_zero_fill(
1679 vm_page_t m,
1680 vm_offset_t m_pa,
1681 vm_size_t len)
1682{
1683 vm_page_t tmp;
1684
1685 VM_PAGE_CHECK(m);
1686#ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
de355530 1687 pmap_zero_part_page(m->phys_addr, m_pa, len);
1c79356b
A
1688#else
1689 while (1) {
1690 tmp = vm_page_grab();
1691 if (tmp == VM_PAGE_NULL) {
1692 vm_page_wait(THREAD_UNINT);
1693 continue;
1694 }
1695 break;
1696 }
1697 vm_page_zero_fill(tmp);
1698 if(m_pa != 0) {
1699 vm_page_part_copy(m, 0, tmp, 0, m_pa);
1700 }
1701 if((m_pa + len) < PAGE_SIZE) {
1702 vm_page_part_copy(m, m_pa + len, tmp,
1703 m_pa + len, PAGE_SIZE - (m_pa + len));
1704 }
1705 vm_page_copy(tmp,m);
1706 vm_page_lock_queues();
1707 vm_page_free(tmp);
1708 vm_page_unlock_queues();
1709#endif
1710
1711}
1712
1713/*
1714 * vm_page_zero_fill:
1715 *
1716 * Zero-fill the specified page.
1717 */
1718void
1719vm_page_zero_fill(
1720 vm_page_t m)
1721{
1722 XPR(XPR_VM_PAGE,
1723 "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n",
1724 (integer_t)m->object, (integer_t)m->offset, (integer_t)m, 0,0);
1725
1726 VM_PAGE_CHECK(m);
1727
de355530 1728 pmap_zero_page(m->phys_addr);
1c79356b
A
1729}
1730
1731/*
1732 * vm_page_part_copy:
1733 *
1734 * copy part of one page to another
1735 */
1736
1737void
1738vm_page_part_copy(
1739 vm_page_t src_m,
1740 vm_offset_t src_pa,
1741 vm_page_t dst_m,
1742 vm_offset_t dst_pa,
1743 vm_size_t len)
1744{
1745 VM_PAGE_CHECK(src_m);
1746 VM_PAGE_CHECK(dst_m);
1747
de355530
A
1748 pmap_copy_part_page(src_m->phys_addr, src_pa,
1749 dst_m->phys_addr, dst_pa, len);
1c79356b
A
1750}
1751
1752/*
1753 * vm_page_copy:
1754 *
1755 * Copy one page to another
1756 */
1757
1758void
1759vm_page_copy(
1760 vm_page_t src_m,
1761 vm_page_t dest_m)
1762{
1763 XPR(XPR_VM_PAGE,
1764 "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n",
1765 (integer_t)src_m->object, src_m->offset,
1766 (integer_t)dest_m->object, dest_m->offset,
1767 0);
1768
1769 VM_PAGE_CHECK(src_m);
1770 VM_PAGE_CHECK(dest_m);
1771
de355530 1772 pmap_copy_page(src_m->phys_addr, dest_m->phys_addr);
1c79356b
A
1773}
1774
1c79356b
A
1775/*
1776 * Currently, this is a primitive allocator that grabs
1777 * free pages from the system, sorts them by physical
1778 * address, then searches for a region large enough to
1779 * satisfy the user's request.
1780 *
1781 * Additional levels of effort:
1782 * + steal clean active/inactive pages
1783 * + force pageouts of dirty pages
1784 * + maintain a map of available physical
1785 * memory
1786 */
1787
1788#define SET_NEXT_PAGE(m,n) ((m)->pageq.next = (struct queue_entry *) (n))
1789
1790#if MACH_ASSERT
1791int vm_page_verify_contiguous(
1792 vm_page_t pages,
1793 unsigned int npages);
1794#endif /* MACH_ASSERT */
1795
1796cpm_counter(unsigned int vpfls_pages_handled = 0;)
1797cpm_counter(unsigned int vpfls_head_insertions = 0;)
1798cpm_counter(unsigned int vpfls_tail_insertions = 0;)
1799cpm_counter(unsigned int vpfls_general_insertions = 0;)
1800cpm_counter(unsigned int vpfc_failed = 0;)
1801cpm_counter(unsigned int vpfc_satisfied = 0;)
1802
1803/*
1804 * Sort free list by ascending physical address,
1805 * using a not-particularly-bright sort algorithm.
1806 * Caller holds vm_page_queue_free_lock.
1807 */
1808static void
1809vm_page_free_list_sort(void)
1810{
1811 vm_page_t sort_list;
1812 vm_page_t sort_list_end;
1813 vm_page_t m, m1, *prev, next_m;
1814 vm_offset_t addr;
1815#if MACH_ASSERT
1816 unsigned int npages;
1817 int old_free_count;
1818#endif /* MACH_ASSERT */
1819
1820#if MACH_ASSERT
1821 /*
1822 * Verify pages in the free list..
1823 */
1824 npages = 0;
1825 for (m = vm_page_queue_free; m != VM_PAGE_NULL; m = NEXT_PAGE(m))
1826 ++npages;
1827 if (npages != vm_page_free_count)
1828 panic("vm_sort_free_list: prelim: npages %d free_count %d",
1829 npages, vm_page_free_count);
1830 old_free_count = vm_page_free_count;
1831#endif /* MACH_ASSERT */
1832
1833 sort_list = sort_list_end = vm_page_queue_free;
1834 m = NEXT_PAGE(vm_page_queue_free);
1835 SET_NEXT_PAGE(vm_page_queue_free, VM_PAGE_NULL);
1836 cpm_counter(vpfls_pages_handled = 0);
1837 while (m != VM_PAGE_NULL) {
1838 cpm_counter(++vpfls_pages_handled);
1839 next_m = NEXT_PAGE(m);
de355530 1840 if (m->phys_addr < sort_list->phys_addr) {
1c79356b
A
1841 cpm_counter(++vpfls_head_insertions);
1842 SET_NEXT_PAGE(m, sort_list);
1843 sort_list = m;
de355530 1844 } else if (m->phys_addr > sort_list_end->phys_addr) {
1c79356b
A
1845 cpm_counter(++vpfls_tail_insertions);
1846 SET_NEXT_PAGE(sort_list_end, m);
1847 SET_NEXT_PAGE(m, VM_PAGE_NULL);
1848 sort_list_end = m;
1849 } else {
1850 cpm_counter(++vpfls_general_insertions);
1851 /* general sorted list insertion */
1852 prev = &sort_list;
1853 for (m1=sort_list; m1!=VM_PAGE_NULL; m1=NEXT_PAGE(m1)) {
de355530 1854 if (m1->phys_addr > m->phys_addr) {
1c79356b
A
1855 if (*prev != m1)
1856 panic("vm_sort_free_list: ugh");
1857 SET_NEXT_PAGE(m, *prev);
1858 *prev = m;
1859 break;
1860 }
1861 prev = (vm_page_t *) &m1->pageq.next;
1862 }
1863 }
1864 m = next_m;
1865 }
1866
1867#if MACH_ASSERT
1868 /*
1869 * Verify that pages are sorted into ascending order.
1870 */
1871 for (m = sort_list, npages = 0; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
1872 if (m != sort_list &&
de355530 1873 m->phys_addr <= addr) {
1c79356b
A
1874 printf("m 0x%x addr 0x%x\n", m, addr);
1875 panic("vm_sort_free_list");
1876 }
de355530 1877 addr = m->phys_addr;
1c79356b
A
1878 ++npages;
1879 }
1880 if (old_free_count != vm_page_free_count)
1881 panic("vm_sort_free_list: old_free %d free_count %d",
1882 old_free_count, vm_page_free_count);
1883 if (npages != vm_page_free_count)
1884 panic("vm_sort_free_list: npages %d free_count %d",
1885 npages, vm_page_free_count);
1886#endif /* MACH_ASSERT */
1887
1888 vm_page_queue_free = sort_list;
1889}
1890
1891
1892#if MACH_ASSERT
1893/*
1894 * Check that the list of pages is ordered by
1895 * ascending physical address and has no holes.
1896 */
1897int
1898vm_page_verify_contiguous(
1899 vm_page_t pages,
1900 unsigned int npages)
1901{
1902 register vm_page_t m;
1903 unsigned int page_count;
1904 vm_offset_t prev_addr;
1905
de355530 1906 prev_addr = pages->phys_addr;
1c79356b
A
1907 page_count = 1;
1908 for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
de355530 1909 if (m->phys_addr != prev_addr + page_size) {
1c79356b 1910 printf("m 0x%x prev_addr 0x%x, current addr 0x%x\n",
de355530 1911 m, prev_addr, m->phys_addr);
1c79356b
A
1912 printf("pages 0x%x page_count %d\n", pages, page_count);
1913 panic("vm_page_verify_contiguous: not contiguous!");
1914 }
de355530 1915 prev_addr = m->phys_addr;
1c79356b
A
1916 ++page_count;
1917 }
1918 if (page_count != npages) {
1919 printf("pages 0x%x actual count 0x%x but requested 0x%x\n",
1920 pages, page_count, npages);
1921 panic("vm_page_verify_contiguous: count error");
1922 }
1923 return 1;
1924}
1925#endif /* MACH_ASSERT */
1926
1927
1928/*
1929 * Find a region large enough to contain at least npages
1930 * of contiguous physical memory.
1931 *
1932 * Requirements:
1933 * - Called while holding vm_page_queue_free_lock.
1934 * - Doesn't respect vm_page_free_reserved; caller
1935 * must not ask for more pages than are legal to grab.
1936 *
1937 * Returns a pointer to a list of gobbled pages or VM_PAGE_NULL.
1938 *
1939 */
1940static vm_page_t
1941vm_page_find_contiguous(
1942 int npages)
1943{
1944 vm_page_t m, *contig_prev, *prev_ptr;
de355530 1945 vm_offset_t prev_addr;
1c79356b
A
1946 unsigned int contig_npages;
1947 vm_page_t list;
1948
1949 if (npages < 1)
1950 return VM_PAGE_NULL;
1951
de355530 1952 prev_addr = vm_page_queue_free->phys_addr - (page_size + 1);
1c79356b
A
1953 prev_ptr = &vm_page_queue_free;
1954 for (m = vm_page_queue_free; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
1955
de355530 1956 if (m->phys_addr != prev_addr + page_size) {
1c79356b
A
1957 /*
1958 * Whoops! Pages aren't contiguous. Start over.
1959 */
1960 contig_npages = 0;
1961 contig_prev = prev_ptr;
1962 }
1963
1964 if (++contig_npages == npages) {
1965 /*
1966 * Chop these pages out of the free list.
1967 * Mark them all as gobbled.
1968 */
1969 list = *contig_prev;
1970 *contig_prev = NEXT_PAGE(m);
1971 SET_NEXT_PAGE(m, VM_PAGE_NULL);
1972 for (m = list; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
1973 assert(m->free);
1974 assert(!m->wanted);
1975 m->free = FALSE;
765c9de3 1976 m->no_isync = TRUE;
1c79356b
A
1977 m->gobbled = TRUE;
1978 }
1979 vm_page_free_count -= npages;
1980 if (vm_page_free_count < vm_page_free_count_minimum)
1981 vm_page_free_count_minimum = vm_page_free_count;
1982 vm_page_wire_count += npages;
1983 vm_page_gobble_count += npages;
1984 cpm_counter(++vpfc_satisfied);
1985 assert(vm_page_verify_contiguous(list, contig_npages));
1986 return list;
1987 }
1988
1989 assert(contig_npages < npages);
1990 prev_ptr = (vm_page_t *) &m->pageq.next;
de355530 1991 prev_addr = m->phys_addr;
1c79356b
A
1992 }
1993 cpm_counter(++vpfc_failed);
1994 return VM_PAGE_NULL;
1995}
1996
1997/*
1998 * Allocate a list of contiguous, wired pages.
1999 */
2000kern_return_t
2001cpm_allocate(
2002 vm_size_t size,
2003 vm_page_t *list,
2004 boolean_t wire)
2005{
2006 register vm_page_t m;
2007 vm_page_t *first_contig;
2008 vm_page_t free_list, pages;
2009 unsigned int npages, n1pages;
2010 int vm_pages_available;
2011
2012 if (size % page_size != 0)
2013 return KERN_INVALID_ARGUMENT;
2014
2015 vm_page_lock_queues();
2016 mutex_lock(&vm_page_queue_free_lock);
2017
2018 /*
2019 * Should also take active and inactive pages
2020 * into account... One day...
2021 */
2022 vm_pages_available = vm_page_free_count - vm_page_free_reserved;
2023
2024 if (size > vm_pages_available * page_size) {
2025 mutex_unlock(&vm_page_queue_free_lock);
2026 return KERN_RESOURCE_SHORTAGE;
2027 }
2028
2029 vm_page_free_list_sort();
2030
2031 npages = size / page_size;
2032
2033 /*
2034 * Obtain a pointer to a subset of the free
2035 * list large enough to satisfy the request;
2036 * the region will be physically contiguous.
2037 */
2038 pages = vm_page_find_contiguous(npages);
2039 if (pages == VM_PAGE_NULL) {
2040 mutex_unlock(&vm_page_queue_free_lock);
2041 vm_page_unlock_queues();
2042 return KERN_NO_SPACE;
2043 }
2044
2045 mutex_unlock(&vm_page_queue_free_lock);
2046
2047 /*
2048 * Walk the returned list, wiring the pages.
2049 */
2050 if (wire == TRUE)
2051 for (m = pages; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
2052 /*
2053 * Essentially inlined vm_page_wire.
2054 */
2055 assert(!m->active);
2056 assert(!m->inactive);
2057 assert(!m->private);
2058 assert(!m->fictitious);
2059 assert(m->wire_count == 0);
2060 assert(m->gobbled);
2061 m->gobbled = FALSE;
2062 m->wire_count++;
2063 --vm_page_gobble_count;
2064 }
2065 vm_page_unlock_queues();
2066
2067 /*
2068 * The CPM pages should now be available and
2069 * ordered by ascending physical address.
2070 */
2071 assert(vm_page_verify_contiguous(pages, npages));
2072
2073 *list = pages;
2074 return KERN_SUCCESS;
2075}
2076
2077
2078#include <mach_vm_debug.h>
2079#if MACH_VM_DEBUG
2080
2081#include <mach_debug/hash_info.h>
2082#include <vm/vm_debug.h>
2083
2084/*
2085 * Routine: vm_page_info
2086 * Purpose:
2087 * Return information about the global VP table.
2088 * Fills the buffer with as much information as possible
2089 * and returns the desired size of the buffer.
2090 * Conditions:
2091 * Nothing locked. The caller should provide
2092 * possibly-pageable memory.
2093 */
2094
2095unsigned int
2096vm_page_info(
2097 hash_info_bucket_t *info,
2098 unsigned int count)
2099{
2100 int i;
2101
2102 if (vm_page_bucket_count < count)
2103 count = vm_page_bucket_count;
2104
2105 for (i = 0; i < count; i++) {
2106 vm_page_bucket_t *bucket = &vm_page_buckets[i];
2107 unsigned int bucket_count = 0;
2108 vm_page_t m;
2109
2110 simple_lock(&vm_page_bucket_lock);
2111 for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next)
2112 bucket_count++;
2113 simple_unlock(&vm_page_bucket_lock);
2114
2115 /* don't touch pageable memory while holding locks */
2116 info[i].hib_count = bucket_count;
2117 }
2118
2119 return vm_page_bucket_count;
2120}
2121#endif /* MACH_VM_DEBUG */
2122
2123#include <mach_kdb.h>
2124#if MACH_KDB
2125
2126#include <ddb/db_output.h>
2127#include <vm/vm_print.h>
2128#define printf kdbprintf
2129
2130/*
2131 * Routine: vm_page_print [exported]
2132 */
2133void
2134vm_page_print(
2135 vm_page_t p)
2136{
2137 extern db_indent;
2138
2139 iprintf("page 0x%x\n", p);
2140
2141 db_indent += 2;
2142
2143 iprintf("object=0x%x", p->object);
2144 printf(", offset=0x%x", p->offset);
2145 printf(", wire_count=%d", p->wire_count);
1c79356b
A
2146
2147 iprintf("%sinactive, %sactive, %sgobbled, %slaundry, %sfree, %sref, %sdiscard\n",
2148 (p->inactive ? "" : "!"),
2149 (p->active ? "" : "!"),
2150 (p->gobbled ? "" : "!"),
2151 (p->laundry ? "" : "!"),
2152 (p->free ? "" : "!"),
2153 (p->reference ? "" : "!"),
2154 (p->discard_request ? "" : "!"));
2155 iprintf("%sbusy, %swanted, %stabled, %sfictitious, %sprivate, %sprecious\n",
2156 (p->busy ? "" : "!"),
2157 (p->wanted ? "" : "!"),
2158 (p->tabled ? "" : "!"),
2159 (p->fictitious ? "" : "!"),
2160 (p->private ? "" : "!"),
2161 (p->precious ? "" : "!"));
2162 iprintf("%sabsent, %serror, %sdirty, %scleaning, %spageout, %sclustered\n",
2163 (p->absent ? "" : "!"),
2164 (p->error ? "" : "!"),
2165 (p->dirty ? "" : "!"),
2166 (p->cleaning ? "" : "!"),
2167 (p->pageout ? "" : "!"),
2168 (p->clustered ? "" : "!"));
0b4e3aa0 2169 iprintf("%slock_supplied, %soverwriting, %srestart, %sunusual\n",
1c79356b
A
2170 (p->lock_supplied ? "" : "!"),
2171 (p->overwriting ? "" : "!"),
2172 (p->restart ? "" : "!"),
0b4e3aa0 2173 (p->unusual ? "" : "!"));
1c79356b 2174
de355530 2175 iprintf("phys_addr=0x%x", p->phys_addr);
1c79356b
A
2176 printf(", page_error=0x%x", p->page_error);
2177 printf(", page_lock=0x%x", p->page_lock);
2178 printf(", unlock_request=%d\n", p->unlock_request);
2179
2180 db_indent -= 2;
2181}
2182#endif /* MACH_KDB */