]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_resident.c
xnu-792.21.3.tar.gz
[apple/xnu.git] / osfmk / vm / vm_resident.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm/vm_page.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * Resident memory management module.
63 */
64
91447636
A
65#include <debug.h>
66
9bccf70c 67#include <mach/clock_types.h>
1c79356b
A
68#include <mach/vm_prot.h>
69#include <mach/vm_statistics.h>
70#include <kern/counters.h>
71#include <kern/sched_prim.h>
72#include <kern/task.h>
73#include <kern/thread.h>
74#include <kern/zalloc.h>
75#include <kern/xpr.h>
76#include <vm/pmap.h>
77#include <vm/vm_init.h>
78#include <vm/vm_map.h>
79#include <vm/vm_page.h>
80#include <vm/vm_pageout.h>
81#include <vm/vm_kern.h> /* kernel_memory_allocate() */
82#include <kern/misc_protos.h>
83#include <zone_debug.h>
84#include <vm/cpm.h>
55e303ae
A
85#include <ppc/mappings.h> /* (BRINGUP) */
86#include <pexpert/pexpert.h> /* (BRINGUP) */
87
91447636 88#include <vm/vm_protos.h>
1c79356b 89
0b4e3aa0
A
90/* Variables used to indicate the relative age of pages in the
91 * inactive list
92 */
93
91447636
A
94unsigned int vm_page_ticket_roll = 0;
95unsigned int vm_page_ticket = 0;
1c79356b
A
96/*
97 * Associated with page of user-allocatable memory is a
98 * page structure.
99 */
100
101/*
102 * These variables record the values returned by vm_page_bootstrap,
103 * for debugging purposes. The implementation of pmap_steal_memory
104 * and pmap_startup here also uses them internally.
105 */
106
107vm_offset_t virtual_space_start;
108vm_offset_t virtual_space_end;
109int vm_page_pages;
110
111/*
112 * The vm_page_lookup() routine, which provides for fast
113 * (virtual memory object, offset) to page lookup, employs
114 * the following hash table. The vm_page_{insert,remove}
115 * routines install and remove associations in the table.
116 * [This table is often called the virtual-to-physical,
117 * or VP, table.]
118 */
119typedef struct {
120 vm_page_t pages;
121#if MACH_PAGE_HASH_STATS
122 int cur_count; /* current count */
123 int hi_count; /* high water mark */
124#endif /* MACH_PAGE_HASH_STATS */
125} vm_page_bucket_t;
126
127vm_page_bucket_t *vm_page_buckets; /* Array of buckets */
128unsigned int vm_page_bucket_count = 0; /* How big is array? */
129unsigned int vm_page_hash_mask; /* Mask for hash function */
130unsigned int vm_page_hash_shift; /* Shift for hash function */
55e303ae 131uint32_t vm_page_bucket_hash; /* Basic bucket hash */
1c79356b
A
132decl_simple_lock_data(,vm_page_bucket_lock)
133
91447636
A
134vm_page_t
135vm_page_lookup_nohint(vm_object_t object, vm_object_offset_t offset);
136
137
1c79356b
A
138#if MACH_PAGE_HASH_STATS
139/* This routine is only for debug. It is intended to be called by
140 * hand by a developer using a kernel debugger. This routine prints
141 * out vm_page_hash table statistics to the kernel debug console.
142 */
143void
144hash_debug(void)
145{
146 int i;
147 int numbuckets = 0;
148 int highsum = 0;
149 int maxdepth = 0;
150
151 for (i = 0; i < vm_page_bucket_count; i++) {
152 if (vm_page_buckets[i].hi_count) {
153 numbuckets++;
154 highsum += vm_page_buckets[i].hi_count;
155 if (vm_page_buckets[i].hi_count > maxdepth)
156 maxdepth = vm_page_buckets[i].hi_count;
157 }
158 }
159 printf("Total number of buckets: %d\n", vm_page_bucket_count);
160 printf("Number used buckets: %d = %d%%\n",
161 numbuckets, 100*numbuckets/vm_page_bucket_count);
162 printf("Number unused buckets: %d = %d%%\n",
163 vm_page_bucket_count - numbuckets,
164 100*(vm_page_bucket_count-numbuckets)/vm_page_bucket_count);
165 printf("Sum of bucket max depth: %d\n", highsum);
166 printf("Average bucket depth: %d.%2d\n",
167 highsum/vm_page_bucket_count,
168 highsum%vm_page_bucket_count);
169 printf("Maximum bucket depth: %d\n", maxdepth);
170}
171#endif /* MACH_PAGE_HASH_STATS */
172
173/*
174 * The virtual page size is currently implemented as a runtime
175 * variable, but is constant once initialized using vm_set_page_size.
176 * This initialization must be done in the machine-dependent
177 * bootstrap sequence, before calling other machine-independent
178 * initializations.
179 *
180 * All references to the virtual page size outside this
181 * module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
182 * constants.
183 */
55e303ae
A
184vm_size_t page_size = PAGE_SIZE;
185vm_size_t page_mask = PAGE_MASK;
91447636 186int page_shift = PAGE_SHIFT;
1c79356b
A
187
188/*
189 * Resident page structures are initialized from
190 * a template (see vm_page_alloc).
191 *
192 * When adding a new field to the virtual memory
193 * object structure, be sure to add initialization
194 * (see vm_page_bootstrap).
195 */
196struct vm_page vm_page_template;
197
198/*
199 * Resident pages that represent real memory
200 * are allocated from a free list.
201 */
202vm_page_t vm_page_queue_free;
203vm_page_t vm_page_queue_fictitious;
1c79356b 204unsigned int vm_page_free_wanted;
91447636
A
205unsigned int vm_page_free_count;
206unsigned int vm_page_fictitious_count;
1c79356b
A
207
208unsigned int vm_page_free_count_minimum; /* debugging */
209
210/*
211 * Occasionally, the virtual memory system uses
212 * resident page structures that do not refer to
213 * real pages, for example to leave a page with
214 * important state information in the VP table.
215 *
216 * These page structures are allocated the way
217 * most other kernel structures are.
218 */
219zone_t vm_page_zone;
220decl_mutex_data(,vm_page_alloc_lock)
9bccf70c 221unsigned int io_throttle_zero_fill;
1c79356b
A
222
223/*
224 * Fictitious pages don't have a physical address,
55e303ae 225 * but we must initialize phys_page to something.
1c79356b
A
226 * For debugging, this should be a strange value
227 * that the pmap module can recognize in assertions.
228 */
229vm_offset_t vm_page_fictitious_addr = (vm_offset_t) -1;
230
231/*
232 * Resident page structures are also chained on
233 * queues that are used by the page replacement
234 * system (pageout daemon). These queues are
235 * defined here, but are shared by the pageout
9bccf70c
A
236 * module. The inactive queue is broken into
237 * inactive and zf for convenience as the
238 * pageout daemon often assignes a higher
239 * affinity to zf pages
1c79356b
A
240 */
241queue_head_t vm_page_queue_active;
242queue_head_t vm_page_queue_inactive;
91447636
A
243unsigned int vm_page_active_count;
244unsigned int vm_page_inactive_count;
245unsigned int vm_page_wire_count;
246unsigned int vm_page_gobble_count = 0;
247unsigned int vm_page_wire_count_warning = 0;
248unsigned int vm_page_gobble_count_warning = 0;
249
250unsigned int vm_page_purgeable_count = 0; /* # of pages purgeable now */
251uint64_t vm_page_purged_count = 0; /* total count of purged pages */
1c79356b
A
252
253/*
254 * Several page replacement parameters are also
255 * shared with this module, so that page allocation
256 * (done here in vm_page_alloc) can trigger the
257 * pageout daemon.
258 */
91447636
A
259unsigned int vm_page_free_target = 0;
260unsigned int vm_page_free_min = 0;
261unsigned int vm_page_inactive_target = 0;
262unsigned int vm_page_free_reserved = 0;
263unsigned int vm_page_throttled_count = 0;
1c79356b
A
264
265/*
266 * The VM system has a couple of heuristics for deciding
267 * that pages are "uninteresting" and should be placed
268 * on the inactive queue as likely candidates for replacement.
269 * These variables let the heuristics be controlled at run-time
270 * to make experimentation easier.
271 */
272
273boolean_t vm_page_deactivate_hint = TRUE;
274
275/*
276 * vm_set_page_size:
277 *
278 * Sets the page size, perhaps based upon the memory
279 * size. Must be called before any use of page-size
280 * dependent functions.
281 *
282 * Sets page_shift and page_mask from page_size.
283 */
284void
285vm_set_page_size(void)
286{
1c79356b
A
287 page_mask = page_size - 1;
288
289 if ((page_mask & page_size) != 0)
290 panic("vm_set_page_size: page size not a power of two");
291
292 for (page_shift = 0; ; page_shift++)
91447636 293 if ((1U << page_shift) == page_size)
1c79356b 294 break;
1c79356b
A
295}
296
297/*
298 * vm_page_bootstrap:
299 *
300 * Initializes the resident memory module.
301 *
302 * Allocates memory for the page cells, and
303 * for the object/offset-to-page hash table headers.
304 * Each page cell is initialized and placed on the free list.
305 * Returns the range of available kernel virtual memory.
306 */
307
308void
309vm_page_bootstrap(
310 vm_offset_t *startp,
311 vm_offset_t *endp)
312{
313 register vm_page_t m;
91447636 314 unsigned int i;
1c79356b
A
315 unsigned int log1;
316 unsigned int log2;
317 unsigned int size;
318
319 /*
320 * Initialize the vm_page template.
321 */
322
323 m = &vm_page_template;
91447636
A
324 m->object = VM_OBJECT_NULL; /* reset later */
325 m->offset = (vm_object_offset_t) -1; /* reset later */
1c79356b
A
326 m->wire_count = 0;
327
91447636
A
328 m->pageq.next = NULL;
329 m->pageq.prev = NULL;
330 m->listq.next = NULL;
331 m->listq.prev = NULL;
332
1c79356b
A
333 m->inactive = FALSE;
334 m->active = FALSE;
335 m->laundry = FALSE;
336 m->free = FALSE;
765c9de3 337 m->no_isync = TRUE;
1c79356b
A
338 m->reference = FALSE;
339 m->pageout = FALSE;
0b4e3aa0 340 m->dump_cleaning = FALSE;
1c79356b
A
341 m->list_req_pending = FALSE;
342
343 m->busy = TRUE;
344 m->wanted = FALSE;
345 m->tabled = FALSE;
346 m->fictitious = FALSE;
347 m->private = FALSE;
348 m->absent = FALSE;
349 m->error = FALSE;
350 m->dirty = FALSE;
351 m->cleaning = FALSE;
352 m->precious = FALSE;
353 m->clustered = FALSE;
354 m->lock_supplied = FALSE;
355 m->unusual = FALSE;
356 m->restart = FALSE;
9bccf70c 357 m->zero_fill = FALSE;
91447636 358 m->encrypted = FALSE;
1c79356b 359
55e303ae 360 m->phys_page = 0; /* reset later */
1c79356b
A
361
362 m->page_lock = VM_PROT_NONE;
363 m->unlock_request = VM_PROT_NONE;
364 m->page_error = KERN_SUCCESS;
365
366 /*
367 * Initialize the page queues.
368 */
369
91447636
A
370 mutex_init(&vm_page_queue_free_lock, 0);
371 mutex_init(&vm_page_queue_lock, 0);
1c79356b
A
372
373 vm_page_queue_free = VM_PAGE_NULL;
374 vm_page_queue_fictitious = VM_PAGE_NULL;
375 queue_init(&vm_page_queue_active);
376 queue_init(&vm_page_queue_inactive);
9bccf70c 377 queue_init(&vm_page_queue_zf);
1c79356b
A
378
379 vm_page_free_wanted = 0;
380
381 /*
382 * Steal memory for the map and zone subsystems.
383 */
384
385 vm_map_steal_memory();
386 zone_steal_memory();
387
388 /*
389 * Allocate (and initialize) the virtual-to-physical
390 * table hash buckets.
391 *
392 * The number of buckets should be a power of two to
393 * get a good hash function. The following computation
394 * chooses the first power of two that is greater
395 * than the number of physical pages in the system.
396 */
397
91447636 398 simple_lock_init(&vm_page_bucket_lock, 0);
1c79356b
A
399
400 if (vm_page_bucket_count == 0) {
401 unsigned int npages = pmap_free_pages();
402
403 vm_page_bucket_count = 1;
404 while (vm_page_bucket_count < npages)
405 vm_page_bucket_count <<= 1;
406 }
407
408 vm_page_hash_mask = vm_page_bucket_count - 1;
409
410 /*
411 * Calculate object shift value for hashing algorithm:
412 * O = log2(sizeof(struct vm_object))
413 * B = log2(vm_page_bucket_count)
414 * hash shifts the object left by
415 * B/2 - O
416 */
417 size = vm_page_bucket_count;
418 for (log1 = 0; size > 1; log1++)
419 size /= 2;
420 size = sizeof(struct vm_object);
421 for (log2 = 0; size > 1; log2++)
422 size /= 2;
423 vm_page_hash_shift = log1/2 - log2 + 1;
55e303ae
A
424
425 vm_page_bucket_hash = 1 << ((log1 + 1) >> 1); /* Get (ceiling of sqrt of table size) */
426 vm_page_bucket_hash |= 1 << ((log1 + 1) >> 2); /* Get (ceiling of quadroot of table size) */
427 vm_page_bucket_hash |= 1; /* Set bit and add 1 - always must be 1 to insure unique series */
1c79356b
A
428
429 if (vm_page_hash_mask & vm_page_bucket_count)
430 printf("vm_page_bootstrap: WARNING -- strange page hash\n");
431
432 vm_page_buckets = (vm_page_bucket_t *)
433 pmap_steal_memory(vm_page_bucket_count *
434 sizeof(vm_page_bucket_t));
435
436 for (i = 0; i < vm_page_bucket_count; i++) {
437 register vm_page_bucket_t *bucket = &vm_page_buckets[i];
438
439 bucket->pages = VM_PAGE_NULL;
440#if MACH_PAGE_HASH_STATS
441 bucket->cur_count = 0;
442 bucket->hi_count = 0;
443#endif /* MACH_PAGE_HASH_STATS */
444 }
445
446 /*
447 * Machine-dependent code allocates the resident page table.
448 * It uses vm_page_init to initialize the page frames.
449 * The code also returns to us the virtual space available
450 * to the kernel. We don't trust the pmap module
451 * to get the alignment right.
452 */
453
454 pmap_startup(&virtual_space_start, &virtual_space_end);
91447636
A
455 virtual_space_start = round_page(virtual_space_start);
456 virtual_space_end = trunc_page(virtual_space_end);
1c79356b
A
457
458 *startp = virtual_space_start;
459 *endp = virtual_space_end;
460
461 /*
462 * Compute the initial "wire" count.
463 * Up until now, the pages which have been set aside are not under
464 * the VM system's control, so although they aren't explicitly
465 * wired, they nonetheless can't be moved. At this moment,
466 * all VM managed pages are "free", courtesy of pmap_startup.
467 */
55e303ae 468 vm_page_wire_count = atop_64(max_mem) - vm_page_free_count; /* initial value */
1c79356b
A
469
470 printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count);
471 vm_page_free_count_minimum = vm_page_free_count;
91447636
A
472
473 simple_lock_init(&vm_paging_lock, 0);
1c79356b
A
474}
475
476#ifndef MACHINE_PAGES
477/*
478 * We implement pmap_steal_memory and pmap_startup with the help
479 * of two simpler functions, pmap_virtual_space and pmap_next_page.
480 */
481
91447636 482void *
1c79356b
A
483pmap_steal_memory(
484 vm_size_t size)
485{
55e303ae
A
486 vm_offset_t addr, vaddr;
487 ppnum_t phys_page;
1c79356b
A
488
489 /*
490 * We round the size to a round multiple.
491 */
492
493 size = (size + sizeof (void *) - 1) &~ (sizeof (void *) - 1);
494
495 /*
496 * If this is the first call to pmap_steal_memory,
497 * we have to initialize ourself.
498 */
499
500 if (virtual_space_start == virtual_space_end) {
501 pmap_virtual_space(&virtual_space_start, &virtual_space_end);
502
503 /*
504 * The initial values must be aligned properly, and
505 * we don't trust the pmap module to do it right.
506 */
507
91447636
A
508 virtual_space_start = round_page(virtual_space_start);
509 virtual_space_end = trunc_page(virtual_space_end);
1c79356b
A
510 }
511
512 /*
513 * Allocate virtual memory for this request.
514 */
515
516 addr = virtual_space_start;
517 virtual_space_start += size;
518
519 kprintf("pmap_steal_memory: %08X - %08X; size=%08X\n", addr, virtual_space_start, size); /* (TEST/DEBUG) */
520
521 /*
522 * Allocate and map physical pages to back new virtual pages.
523 */
524
91447636 525 for (vaddr = round_page(addr);
1c79356b
A
526 vaddr < addr + size;
527 vaddr += PAGE_SIZE) {
55e303ae 528 if (!pmap_next_page(&phys_page))
1c79356b
A
529 panic("pmap_steal_memory");
530
531 /*
532 * XXX Logically, these mappings should be wired,
533 * but some pmap modules barf if they are.
534 */
535
55e303ae 536 pmap_enter(kernel_pmap, vaddr, phys_page,
9bccf70c
A
537 VM_PROT_READ|VM_PROT_WRITE,
538 VM_WIMG_USE_DEFAULT, FALSE);
1c79356b
A
539 /*
540 * Account for newly stolen memory
541 */
542 vm_page_wire_count++;
543
544 }
545
91447636 546 return (void *) addr;
1c79356b
A
547}
548
549void
550pmap_startup(
551 vm_offset_t *startp,
552 vm_offset_t *endp)
553{
55e303ae
A
554 unsigned int i, npages, pages_initialized, fill, fillval;
555 vm_page_t pages;
556 ppnum_t phys_page;
557 addr64_t tmpaddr;
1c79356b
A
558
559 /*
560 * We calculate how many page frames we will have
561 * and then allocate the page structures in one chunk.
562 */
563
55e303ae
A
564 tmpaddr = (addr64_t)pmap_free_pages() * (addr64_t)PAGE_SIZE; /* Get the amount of memory left */
565 tmpaddr = tmpaddr + (addr64_t)(round_page_32(virtual_space_start) - virtual_space_start); /* Account for any slop */
566 npages = (unsigned int)(tmpaddr / (addr64_t)(PAGE_SIZE + sizeof(*pages))); /* Figure size of all vm_page_ts, including enough to hold the vm_page_ts */
1c79356b
A
567
568 pages = (vm_page_t) pmap_steal_memory(npages * sizeof *pages);
569
570 /*
571 * Initialize the page frames.
572 */
21362eb3 573
1c79356b 574 for (i = 0, pages_initialized = 0; i < npages; i++) {
55e303ae 575 if (!pmap_next_page(&phys_page))
1c79356b
A
576 break;
577
55e303ae 578 vm_page_init(&pages[i], phys_page);
1c79356b
A
579 vm_page_pages++;
580 pages_initialized++;
581 }
582
583 /*
584 * Release pages in reverse order so that physical pages
585 * initially get allocated in ascending addresses. This keeps
586 * the devices (which must address physical memory) happy if
587 * they require several consecutive pages.
588 */
21362eb3
A
589
590/*
591 * Check if we want to initialize pages to a known value
592 */
593
594 fill = 0; /* Assume no fill */
595 if (PE_parse_boot_arg("fill", &fillval)) fill = 1; /* Set fill */
596
597 for (i = pages_initialized; i > 0; i--) {
55e303ae 598 if(fill) fillPage(pages[i - 1].phys_page, fillval); /* Fill the page with a know value if requested at boot */
1c79356b
A
599 vm_page_release(&pages[i - 1]);
600 }
601
55e303ae
A
602#if 0
603 {
604 vm_page_t xx, xxo, xxl;
605 int j, k, l;
606
607 j = 0; /* (BRINGUP) */
608 xxl = 0;
609
610 for(xx = vm_page_queue_free; xx; xxl = xx, xx = xx->pageq.next) { /* (BRINGUP) */
611 j++; /* (BRINGUP) */
612 if(j > vm_page_free_count) { /* (BRINGUP) */
613 panic("pmap_startup: too many pages, xx = %08X, xxl = %08X\n", xx, xxl);
614 }
615
616 l = vm_page_free_count - j; /* (BRINGUP) */
617 k = 0; /* (BRINGUP) */
618
619 if(((j - 1) & 0xFFFF) == 0) kprintf("checking number %d of %d\n", j, vm_page_free_count);
620
621 for(xxo = xx->pageq.next; xxo; xxo = xxo->pageq.next) { /* (BRINGUP) */
622 k++;
623 if(k > l) panic("pmap_startup: too many in secondary check %d %d\n", k, l);
624 if((xx->phys_page & 0xFFFFFFFF) == (xxo->phys_page & 0xFFFFFFFF)) { /* (BRINGUP) */
625 panic("pmap_startup: duplicate physaddr, xx = %08X, xxo = %08X\n", xx, xxo);
626 }
627 }
628 }
629
630 if(j != vm_page_free_count) { /* (BRINGUP) */
631 panic("pmap_startup: vm_page_free_count does not match, calc = %d, vm_page_free_count = %08X\n", j, vm_page_free_count);
632 }
633 }
634#endif
635
636
1c79356b
A
637 /*
638 * We have to re-align virtual_space_start,
639 * because pmap_steal_memory has been using it.
640 */
641
55e303ae 642 virtual_space_start = round_page_32(virtual_space_start);
1c79356b
A
643
644 *startp = virtual_space_start;
645 *endp = virtual_space_end;
646}
647#endif /* MACHINE_PAGES */
648
649/*
650 * Routine: vm_page_module_init
651 * Purpose:
652 * Second initialization pass, to be done after
653 * the basic VM system is ready.
654 */
655void
656vm_page_module_init(void)
657{
658 vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page),
659 0, PAGE_SIZE, "vm pages");
660
661#if ZONE_DEBUG
662 zone_debug_disable(vm_page_zone);
663#endif /* ZONE_DEBUG */
664
665 zone_change(vm_page_zone, Z_EXPAND, FALSE);
666 zone_change(vm_page_zone, Z_EXHAUST, TRUE);
667 zone_change(vm_page_zone, Z_FOREIGN, TRUE);
668
669 /*
670 * Adjust zone statistics to account for the real pages allocated
671 * in vm_page_create(). [Q: is this really what we want?]
672 */
673 vm_page_zone->count += vm_page_pages;
674 vm_page_zone->cur_size += vm_page_pages * vm_page_zone->elem_size;
675
91447636 676 mutex_init(&vm_page_alloc_lock, 0);
1c79356b
A
677}
678
679/*
680 * Routine: vm_page_create
681 * Purpose:
682 * After the VM system is up, machine-dependent code
683 * may stumble across more physical memory. For example,
684 * memory that it was reserving for a frame buffer.
685 * vm_page_create turns this memory into available pages.
686 */
687
688void
689vm_page_create(
55e303ae
A
690 ppnum_t start,
691 ppnum_t end)
1c79356b 692{
55e303ae
A
693 ppnum_t phys_page;
694 vm_page_t m;
1c79356b 695
55e303ae
A
696 for (phys_page = start;
697 phys_page < end;
698 phys_page++) {
1c79356b
A
699 while ((m = (vm_page_t) vm_page_grab_fictitious())
700 == VM_PAGE_NULL)
701 vm_page_more_fictitious();
702
55e303ae 703 vm_page_init(m, phys_page);
1c79356b
A
704 vm_page_pages++;
705 vm_page_release(m);
706 }
707}
708
709/*
710 * vm_page_hash:
711 *
712 * Distributes the object/offset key pair among hash buckets.
713 *
55e303ae 714 * NOTE: The bucket count must be a power of 2
1c79356b
A
715 */
716#define vm_page_hash(object, offset) (\
55e303ae 717 ( (natural_t)((uint32_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
1c79356b
A
718 & vm_page_hash_mask)
719
720/*
721 * vm_page_insert: [ internal use only ]
722 *
723 * Inserts the given mem entry into the object/object-page
724 * table and object list.
725 *
726 * The object must be locked.
727 */
728
729void
730vm_page_insert(
731 register vm_page_t mem,
732 register vm_object_t object,
733 register vm_object_offset_t offset)
734{
735 register vm_page_bucket_t *bucket;
736
737 XPR(XPR_VM_PAGE,
738 "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n",
739 (integer_t)object, (integer_t)offset, (integer_t)mem, 0,0);
740
741 VM_PAGE_CHECK(mem);
91447636
A
742#if DEBUG
743 _mutex_assert(&object->Lock, MA_OWNED);
1c79356b 744
91447636
A
745 if (mem->tabled || mem->object != VM_OBJECT_NULL)
746 panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
747 "already in (obj=%p,off=0x%llx)",
748 mem, object, offset, mem->object, mem->offset);
749#endif
1c79356b
A
750 assert(!object->internal || offset < object->size);
751
752 /* only insert "pageout" pages into "pageout" objects,
753 * and normal pages into normal objects */
754 assert(object->pageout == mem->pageout);
755
91447636
A
756 assert(vm_page_lookup(object, offset) == VM_PAGE_NULL);
757
1c79356b
A
758 /*
759 * Record the object/offset pair in this page
760 */
761
762 mem->object = object;
763 mem->offset = offset;
764
765 /*
766 * Insert it into the object_object/offset hash table
767 */
768
769 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
770 simple_lock(&vm_page_bucket_lock);
771 mem->next = bucket->pages;
772 bucket->pages = mem;
773#if MACH_PAGE_HASH_STATS
774 if (++bucket->cur_count > bucket->hi_count)
775 bucket->hi_count = bucket->cur_count;
776#endif /* MACH_PAGE_HASH_STATS */
777 simple_unlock(&vm_page_bucket_lock);
778
779 /*
780 * Now link into the object's list of backed pages.
781 */
782
91447636 783 VM_PAGE_INSERT(mem, object);
1c79356b
A
784 mem->tabled = TRUE;
785
786 /*
787 * Show that the object has one more resident page.
788 */
789
790 object->resident_page_count++;
91447636
A
791
792 if (object->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
793 object->purgable == VM_OBJECT_PURGABLE_EMPTY) {
794 vm_page_lock_queues();
795 vm_page_purgeable_count++;
796 vm_page_unlock_queues();
797 }
1c79356b
A
798}
799
800/*
801 * vm_page_replace:
802 *
803 * Exactly like vm_page_insert, except that we first
804 * remove any existing page at the given offset in object.
805 *
806 * The object and page queues must be locked.
807 */
808
809void
810vm_page_replace(
811 register vm_page_t mem,
812 register vm_object_t object,
813 register vm_object_offset_t offset)
814{
21362eb3 815 register vm_page_bucket_t *bucket;
1c79356b
A
816
817 VM_PAGE_CHECK(mem);
91447636
A
818#if DEBUG
819 _mutex_assert(&object->Lock, MA_OWNED);
820 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
821
822 if (mem->tabled || mem->object != VM_OBJECT_NULL)
823 panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
824 "already in (obj=%p,off=0x%llx)",
825 mem, object, offset, mem->object, mem->offset);
826#endif
1c79356b
A
827 /*
828 * Record the object/offset pair in this page
829 */
830
831 mem->object = object;
832 mem->offset = offset;
833
834 /*
835 * Insert it into the object_object/offset hash table,
836 * replacing any page that might have been there.
837 */
838
839 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
840 simple_lock(&vm_page_bucket_lock);
841 if (bucket->pages) {
842 vm_page_t *mp = &bucket->pages;
843 register vm_page_t m = *mp;
844 do {
845 if (m->object == object && m->offset == offset) {
846 /*
21362eb3
A
847 * Remove page from bucket and from object,
848 * and return it to the free list.
1c79356b
A
849 */
850 *mp = m->next;
21362eb3
A
851 VM_PAGE_REMOVE(m);
852 m->tabled = FALSE;
853 m->object = VM_OBJECT_NULL;
854 m->offset = (vm_object_offset_t) -1;
855 object->resident_page_count--;
856
857 if (object->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
858 object->purgable == VM_OBJECT_PURGABLE_EMPTY) {
859 assert(vm_page_purgeable_count > 0);
860 vm_page_purgeable_count--;
861 }
862
863 /*
864 * Return page to the free list.
865 * Note the page is not tabled now, so this
866 * won't self-deadlock on the bucket lock.
867 */
1c79356b 868
21362eb3 869 vm_page_free(m);
1c79356b
A
870 break;
871 }
872 mp = &m->next;
91447636 873 } while ((m = *mp));
1c79356b
A
874 mem->next = bucket->pages;
875 } else {
876 mem->next = VM_PAGE_NULL;
877 }
878 bucket->pages = mem;
879 simple_unlock(&vm_page_bucket_lock);
880
881 /*
882 * Now link into the object's list of backed pages.
883 */
884
91447636 885 VM_PAGE_INSERT(mem, object);
1c79356b
A
886 mem->tabled = TRUE;
887
888 /*
889 * And show that the object has one more resident
890 * page.
891 */
892
893 object->resident_page_count++;
91447636
A
894
895 if (object->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
896 object->purgable == VM_OBJECT_PURGABLE_EMPTY) {
897 vm_page_purgeable_count++;
898 }
1c79356b
A
899}
900
901/*
902 * vm_page_remove: [ internal use only ]
903 *
904 * Removes the given mem entry from the object/offset-page
905 * table and the object page list.
906 *
91447636 907 * The object and page queues must be locked.
1c79356b
A
908 */
909
910void
911vm_page_remove(
912 register vm_page_t mem)
913{
914 register vm_page_bucket_t *bucket;
915 register vm_page_t this;
916
917 XPR(XPR_VM_PAGE,
918 "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n",
919 (integer_t)mem->object, (integer_t)mem->offset,
920 (integer_t)mem, 0,0);
91447636
A
921#if DEBUG
922 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
923 _mutex_assert(&mem->object->Lock, MA_OWNED);
924#endif
1c79356b
A
925 assert(mem->tabled);
926 assert(!mem->cleaning);
927 VM_PAGE_CHECK(mem);
928
91447636 929
1c79356b
A
930 /*
931 * Remove from the object_object/offset hash table
932 */
933
934 bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
935 simple_lock(&vm_page_bucket_lock);
936 if ((this = bucket->pages) == mem) {
937 /* optimize for common case */
938
939 bucket->pages = mem->next;
940 } else {
941 register vm_page_t *prev;
942
943 for (prev = &this->next;
944 (this = *prev) != mem;
945 prev = &this->next)
946 continue;
947 *prev = this->next;
948 }
949#if MACH_PAGE_HASH_STATS
950 bucket->cur_count--;
951#endif /* MACH_PAGE_HASH_STATS */
952 simple_unlock(&vm_page_bucket_lock);
953
954 /*
955 * Now remove from the object's list of backed pages.
956 */
957
91447636 958 VM_PAGE_REMOVE(mem);
1c79356b
A
959
960 /*
961 * And show that the object has one fewer resident
962 * page.
963 */
964
965 mem->object->resident_page_count--;
966
91447636
A
967 if (mem->object->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
968 mem->object->purgable == VM_OBJECT_PURGABLE_EMPTY) {
969 assert(vm_page_purgeable_count > 0);
970 vm_page_purgeable_count--;
971 }
972
1c79356b
A
973 mem->tabled = FALSE;
974 mem->object = VM_OBJECT_NULL;
91447636 975 mem->offset = (vm_object_offset_t) -1;
1c79356b
A
976}
977
978/*
979 * vm_page_lookup:
980 *
981 * Returns the page associated with the object/offset
982 * pair specified; if none is found, VM_PAGE_NULL is returned.
983 *
984 * The object must be locked. No side effects.
985 */
986
91447636
A
987unsigned long vm_page_lookup_hint = 0;
988unsigned long vm_page_lookup_hint_next = 0;
989unsigned long vm_page_lookup_hint_prev = 0;
990unsigned long vm_page_lookup_hint_miss = 0;
991
1c79356b
A
992vm_page_t
993vm_page_lookup(
994 register vm_object_t object,
995 register vm_object_offset_t offset)
996{
997 register vm_page_t mem;
998 register vm_page_bucket_t *bucket;
91447636
A
999 queue_entry_t qe;
1000#if 0
1001 _mutex_assert(&object->Lock, MA_OWNED);
1002#endif
1003
1004 mem = object->memq_hint;
1005 if (mem != VM_PAGE_NULL) {
1006 assert(mem->object == object);
1007 if (mem->offset == offset) {
1008 vm_page_lookup_hint++;
1009 return mem;
1010 }
1011 qe = queue_next(&mem->listq);
1012 if (! queue_end(&object->memq, qe)) {
1013 vm_page_t next_page;
1014
1015 next_page = (vm_page_t) qe;
1016 assert(next_page->object == object);
1017 if (next_page->offset == offset) {
1018 vm_page_lookup_hint_next++;
1019 object->memq_hint = next_page; /* new hint */
1020 return next_page;
1021 }
1022 }
1023 qe = queue_prev(&mem->listq);
1024 if (! queue_end(&object->memq, qe)) {
1025 vm_page_t prev_page;
1026
1027 prev_page = (vm_page_t) qe;
1028 assert(prev_page->object == object);
1029 if (prev_page->offset == offset) {
1030 vm_page_lookup_hint_prev++;
1031 object->memq_hint = prev_page; /* new hint */
1032 return prev_page;
1033 }
1034 }
1035 }
1c79356b
A
1036
1037 /*
1038 * Search the hash table for this object/offset pair
1039 */
1040
1041 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
1042
1043 simple_lock(&vm_page_bucket_lock);
1044 for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
1045 VM_PAGE_CHECK(mem);
1046 if ((mem->object == object) && (mem->offset == offset))
1047 break;
1048 }
1049 simple_unlock(&vm_page_bucket_lock);
55e303ae 1050
91447636
A
1051 if (mem != VM_PAGE_NULL) {
1052 if (object->memq_hint != VM_PAGE_NULL) {
1053 vm_page_lookup_hint_miss++;
1054 }
1055 assert(mem->object == object);
1056 object->memq_hint = mem;
1057 }
1058
1059 return(mem);
1060}
1061
1062
1063vm_page_t
1064vm_page_lookup_nohint(
1065 vm_object_t object,
1066 vm_object_offset_t offset)
1067{
1068 register vm_page_t mem;
1069 register vm_page_bucket_t *bucket;
1070
1071#if 0
1072 _mutex_assert(&object->Lock, MA_OWNED);
1073#endif
1074 /*
1075 * Search the hash table for this object/offset pair
1076 */
1077
1078 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
1079
1080 simple_lock(&vm_page_bucket_lock);
1081 for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
1082 VM_PAGE_CHECK(mem);
1083 if ((mem->object == object) && (mem->offset == offset))
1084 break;
1085 }
1086 simple_unlock(&vm_page_bucket_lock);
1087
1c79356b
A
1088 return(mem);
1089}
1090
1091/*
1092 * vm_page_rename:
1093 *
1094 * Move the given memory entry from its
1095 * current object to the specified target object/offset.
1096 *
1097 * The object must be locked.
1098 */
1099void
1100vm_page_rename(
1101 register vm_page_t mem,
1102 register vm_object_t new_object,
1103 vm_object_offset_t new_offset)
1104{
1105 assert(mem->object != new_object);
91447636
A
1106 /*
1107 * ENCRYPTED SWAP:
1108 * The encryption key is based on the page's memory object
1109 * (aka "pager") and paging offset. Moving the page to
1110 * another VM object changes its "pager" and "paging_offset"
1111 * so it has to be decrypted first.
1112 */
1113 if (mem->encrypted) {
1114 panic("vm_page_rename: page %p is encrypted\n", mem);
1115 }
1c79356b
A
1116 /*
1117 * Changes to mem->object require the page lock because
1118 * the pageout daemon uses that lock to get the object.
1119 */
1120
1121 XPR(XPR_VM_PAGE,
1122 "vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n",
1123 (integer_t)new_object, (integer_t)new_offset,
1124 (integer_t)mem, 0,0);
1125
1126 vm_page_lock_queues();
1127 vm_page_remove(mem);
1128 vm_page_insert(mem, new_object, new_offset);
1129 vm_page_unlock_queues();
1130}
1131
1132/*
1133 * vm_page_init:
1134 *
1135 * Initialize the fields in a new page.
1136 * This takes a structure with random values and initializes it
1137 * so that it can be given to vm_page_release or vm_page_insert.
1138 */
1139void
1140vm_page_init(
1141 vm_page_t mem,
55e303ae 1142 ppnum_t phys_page)
1c79356b 1143{
91447636 1144 assert(phys_page);
1c79356b 1145 *mem = vm_page_template;
55e303ae 1146 mem->phys_page = phys_page;
1c79356b
A
1147}
1148
1149/*
1150 * vm_page_grab_fictitious:
1151 *
1152 * Remove a fictitious page from the free list.
1153 * Returns VM_PAGE_NULL if there are no free pages.
1154 */
1155int c_vm_page_grab_fictitious = 0;
1156int c_vm_page_release_fictitious = 0;
1157int c_vm_page_more_fictitious = 0;
1158
1159vm_page_t
1160vm_page_grab_fictitious(void)
1161{
1162 register vm_page_t m;
1163
1164 m = (vm_page_t)zget(vm_page_zone);
1165 if (m) {
1c79356b
A
1166 vm_page_init(m, vm_page_fictitious_addr);
1167 m->fictitious = TRUE;
1c79356b
A
1168 }
1169
1170 c_vm_page_grab_fictitious++;
1171 return m;
1172}
1173
1174/*
1175 * vm_page_release_fictitious:
1176 *
1177 * Release a fictitious page to the free list.
1178 */
1179
1180void
1181vm_page_release_fictitious(
1182 register vm_page_t m)
1183{
1184 assert(!m->free);
1185 assert(m->busy);
1186 assert(m->fictitious);
55e303ae 1187 assert(m->phys_page == vm_page_fictitious_addr);
1c79356b
A
1188
1189 c_vm_page_release_fictitious++;
91447636 1190#if DEBUG
1c79356b
A
1191 if (m->free)
1192 panic("vm_page_release_fictitious");
91447636 1193#endif
1c79356b 1194 m->free = TRUE;
91447636 1195 zfree(vm_page_zone, m);
1c79356b
A
1196}
1197
1198/*
1199 * vm_page_more_fictitious:
1200 *
1201 * Add more fictitious pages to the free list.
1202 * Allowed to block. This routine is way intimate
1203 * with the zones code, for several reasons:
1204 * 1. we need to carve some page structures out of physical
1205 * memory before zones work, so they _cannot_ come from
1206 * the zone_map.
1207 * 2. the zone needs to be collectable in order to prevent
1208 * growth without bound. These structures are used by
1209 * the device pager (by the hundreds and thousands), as
1210 * private pages for pageout, and as blocking pages for
1211 * pagein. Temporary bursts in demand should not result in
1212 * permanent allocation of a resource.
1213 * 3. To smooth allocation humps, we allocate single pages
1214 * with kernel_memory_allocate(), and cram them into the
1215 * zone. This also allows us to initialize the vm_page_t's
1216 * on the way into the zone, so that zget() always returns
1217 * an initialized structure. The zone free element pointer
1218 * and the free page pointer are both the first item in the
1219 * vm_page_t.
1220 * 4. By having the pages in the zone pre-initialized, we need
1221 * not keep 2 levels of lists. The garbage collector simply
1222 * scans our list, and reduces physical memory usage as it
1223 * sees fit.
1224 */
1225
1226void vm_page_more_fictitious(void)
1227{
1c79356b
A
1228 register vm_page_t m;
1229 vm_offset_t addr;
1230 kern_return_t retval;
1231 int i;
1232
1233 c_vm_page_more_fictitious++;
1234
1c79356b
A
1235 /*
1236 * Allocate a single page from the zone_map. Do not wait if no physical
1237 * pages are immediately available, and do not zero the space. We need
1238 * our own blocking lock here to prevent having multiple,
1239 * simultaneous requests from piling up on the zone_map lock. Exactly
1240 * one (of our) threads should be potentially waiting on the map lock.
1241 * If winner is not vm-privileged, then the page allocation will fail,
1242 * and it will temporarily block here in the vm_page_wait().
1243 */
1244 mutex_lock(&vm_page_alloc_lock);
1245 /*
1246 * If another thread allocated space, just bail out now.
1247 */
1248 if (zone_free_count(vm_page_zone) > 5) {
1249 /*
1250 * The number "5" is a small number that is larger than the
1251 * number of fictitious pages that any single caller will
1252 * attempt to allocate. Otherwise, a thread will attempt to
1253 * acquire a fictitious page (vm_page_grab_fictitious), fail,
1254 * release all of the resources and locks already acquired,
1255 * and then call this routine. This routine finds the pages
1256 * that the caller released, so fails to allocate new space.
1257 * The process repeats infinitely. The largest known number
1258 * of fictitious pages required in this manner is 2. 5 is
1259 * simply a somewhat larger number.
1260 */
1261 mutex_unlock(&vm_page_alloc_lock);
1262 return;
1263 }
1264
91447636
A
1265 retval = kernel_memory_allocate(zone_map,
1266 &addr, PAGE_SIZE, VM_PROT_ALL,
1267 KMA_KOBJECT|KMA_NOPAGEWAIT);
1268 if (retval != KERN_SUCCESS) {
1c79356b
A
1269 /*
1270 * No page was available. Tell the pageout daemon, drop the
1271 * lock to give another thread a chance at it, and
1272 * wait for the pageout daemon to make progress.
1273 */
1274 mutex_unlock(&vm_page_alloc_lock);
1275 vm_page_wait(THREAD_UNINT);
1276 return;
1277 }
1278 /*
1279 * Initialize as many vm_page_t's as will fit on this page. This
1280 * depends on the zone code disturbing ONLY the first item of
1281 * each zone element.
1282 */
1283 m = (vm_page_t)addr;
1284 for (i = PAGE_SIZE/sizeof(struct vm_page); i > 0; i--) {
1285 vm_page_init(m, vm_page_fictitious_addr);
1286 m->fictitious = TRUE;
1287 m++;
1288 }
91447636 1289 zcram(vm_page_zone, (void *) addr, PAGE_SIZE);
1c79356b
A
1290 mutex_unlock(&vm_page_alloc_lock);
1291}
1292
1293/*
1294 * vm_page_convert:
1295 *
1296 * Attempt to convert a fictitious page into a real page.
1297 */
1298
1299boolean_t
1300vm_page_convert(
1301 register vm_page_t m)
1302{
1303 register vm_page_t real_m;
1304
1305 assert(m->busy);
1306 assert(m->fictitious);
1307 assert(!m->dirty);
1308
1309 real_m = vm_page_grab();
1310 if (real_m == VM_PAGE_NULL)
1311 return FALSE;
1312
55e303ae 1313 m->phys_page = real_m->phys_page;
1c79356b 1314 m->fictitious = FALSE;
765c9de3 1315 m->no_isync = TRUE;
1c79356b
A
1316
1317 vm_page_lock_queues();
1318 if (m->active)
1319 vm_page_active_count++;
1320 else if (m->inactive)
1321 vm_page_inactive_count++;
1322 vm_page_unlock_queues();
1323
55e303ae 1324 real_m->phys_page = vm_page_fictitious_addr;
1c79356b
A
1325 real_m->fictitious = TRUE;
1326
1327 vm_page_release_fictitious(real_m);
1328 return TRUE;
1329}
1330
1331/*
1332 * vm_pool_low():
1333 *
1334 * Return true if it is not likely that a non-vm_privileged thread
1335 * can get memory without blocking. Advisory only, since the
1336 * situation may change under us.
1337 */
1338int
1339vm_pool_low(void)
1340{
1341 /* No locking, at worst we will fib. */
1342 return( vm_page_free_count < vm_page_free_reserved );
1343}
1344
1345/*
1346 * vm_page_grab:
1347 *
1348 * Remove a page from the free list.
1349 * Returns VM_PAGE_NULL if the free list is too small.
1350 */
1351
1352unsigned long vm_page_grab_count = 0; /* measure demand */
1353
1354vm_page_t
1355vm_page_grab(void)
1356{
1357 register vm_page_t mem;
1358
1359 mutex_lock(&vm_page_queue_free_lock);
1360 vm_page_grab_count++;
1361
1362 /*
1363 * Optionally produce warnings if the wire or gobble
1364 * counts exceed some threshold.
1365 */
1366 if (vm_page_wire_count_warning > 0
1367 && vm_page_wire_count >= vm_page_wire_count_warning) {
1368 printf("mk: vm_page_grab(): high wired page count of %d\n",
1369 vm_page_wire_count);
1370 assert(vm_page_wire_count < vm_page_wire_count_warning);
1371 }
1372 if (vm_page_gobble_count_warning > 0
1373 && vm_page_gobble_count >= vm_page_gobble_count_warning) {
1374 printf("mk: vm_page_grab(): high gobbled page count of %d\n",
1375 vm_page_gobble_count);
1376 assert(vm_page_gobble_count < vm_page_gobble_count_warning);
1377 }
1378
1379 /*
1380 * Only let privileged threads (involved in pageout)
1381 * dip into the reserved pool.
1382 */
1383
1384 if ((vm_page_free_count < vm_page_free_reserved) &&
91447636 1385 !(current_thread()->options & TH_OPT_VMPRIV)) {
1c79356b
A
1386 mutex_unlock(&vm_page_queue_free_lock);
1387 mem = VM_PAGE_NULL;
1388 goto wakeup_pageout;
1389 }
1390
1391 while (vm_page_queue_free == VM_PAGE_NULL) {
1c79356b
A
1392 mutex_unlock(&vm_page_queue_free_lock);
1393 VM_PAGE_WAIT();
1394 mutex_lock(&vm_page_queue_free_lock);
1395 }
1396
1397 if (--vm_page_free_count < vm_page_free_count_minimum)
1398 vm_page_free_count_minimum = vm_page_free_count;
1399 mem = vm_page_queue_free;
1400 vm_page_queue_free = (vm_page_t) mem->pageq.next;
91447636
A
1401 mem->pageq.next = NULL;
1402 mem->pageq.prev = NULL;
1403 assert(mem->listq.next == NULL && mem->listq.prev == NULL);
1404 assert(mem->tabled == FALSE);
1405 assert(mem->object == VM_OBJECT_NULL);
1406 assert(!mem->laundry);
1c79356b 1407 mem->free = FALSE;
0b4e3aa0 1408 mem->no_isync = TRUE;
1c79356b
A
1409 mutex_unlock(&vm_page_queue_free_lock);
1410
91447636
A
1411 assert(pmap_verify_free(mem->phys_page));
1412
1c79356b
A
1413 /*
1414 * Decide if we should poke the pageout daemon.
1415 * We do this if the free count is less than the low
1416 * water mark, or if the free count is less than the high
1417 * water mark (but above the low water mark) and the inactive
1418 * count is less than its target.
1419 *
1420 * We don't have the counts locked ... if they change a little,
1421 * it doesn't really matter.
1422 */
1423
1424wakeup_pageout:
1425 if ((vm_page_free_count < vm_page_free_min) ||
1426 ((vm_page_free_count < vm_page_free_target) &&
1427 (vm_page_inactive_count < vm_page_inactive_target)))
1428 thread_wakeup((event_t) &vm_page_free_wanted);
1429
55e303ae 1430// dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */
1c79356b
A
1431
1432 return mem;
1433}
1434
1435/*
1436 * vm_page_release:
1437 *
1438 * Return a page to the free list.
1439 */
1440
1441void
1442vm_page_release(
1443 register vm_page_t mem)
1444{
55e303ae
A
1445
1446#if 0
1447 unsigned int pindex;
1448 phys_entry *physent;
1449
1450 physent = mapping_phys_lookup(mem->phys_page, &pindex); /* (BRINGUP) */
1451 if(physent->ppLink & ppN) { /* (BRINGUP) */
1452 panic("vm_page_release: already released - %08X %08X\n", mem, mem->phys_page);
1453 }
1454 physent->ppLink = physent->ppLink | ppN; /* (BRINGUP) */
1455#endif
1c79356b
A
1456 assert(!mem->private && !mem->fictitious);
1457
55e303ae 1458// dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */
1c79356b
A
1459
1460 mutex_lock(&vm_page_queue_free_lock);
91447636 1461#if DEBUG
1c79356b
A
1462 if (mem->free)
1463 panic("vm_page_release");
91447636 1464#endif
1c79356b 1465 mem->free = TRUE;
91447636
A
1466 assert(!mem->laundry);
1467 assert(mem->object == VM_OBJECT_NULL);
1468 assert(mem->pageq.next == NULL &&
1469 mem->pageq.prev == NULL);
21362eb3
A
1470 mem->pageq.next = (queue_entry_t) vm_page_queue_free;
1471 vm_page_queue_free = mem;
1472 vm_page_free_count++;
1c79356b 1473
21362eb3
A
1474 /*
1475 * Check if we should wake up someone waiting for page.
1476 * But don't bother waking them unless they can allocate.
1477 *
1478 * We wakeup only one thread, to prevent starvation.
1479 * Because the scheduling system handles wait queues FIFO,
1480 * if we wakeup all waiting threads, one greedy thread
1481 * can starve multiple niceguy threads. When the threads
1482 * all wakeup, the greedy threads runs first, grabs the page,
1483 * and waits for another page. It will be the first to run
1484 * when the next page is freed.
1485 *
1486 * However, there is a slight danger here.
1487 * The thread we wake might not use the free page.
1488 * Then the other threads could wait indefinitely
1489 * while the page goes unused. To forestall this,
1490 * the pageout daemon will keep making free pages
1491 * as long as vm_page_free_wanted is non-zero.
1492 */
1c79356b 1493
21362eb3
A
1494 if ((vm_page_free_wanted > 0) &&
1495 (vm_page_free_count >= vm_page_free_reserved)) {
1496 vm_page_free_wanted--;
1497 thread_wakeup_one((event_t) &vm_page_free_count);
1c79356b 1498 }
21362eb3 1499
1c79356b
A
1500 mutex_unlock(&vm_page_queue_free_lock);
1501}
1502
1c79356b
A
1503/*
1504 * vm_page_wait:
1505 *
1506 * Wait for a page to become available.
1507 * If there are plenty of free pages, then we don't sleep.
1508 *
1509 * Returns:
1510 * TRUE: There may be another page, try again
1511 * FALSE: We were interrupted out of our wait, don't try again
1512 */
1513
1514boolean_t
1515vm_page_wait(
1516 int interruptible )
1517{
1518 /*
1519 * We can't use vm_page_free_reserved to make this
1520 * determination. Consider: some thread might
1521 * need to allocate two pages. The first allocation
1522 * succeeds, the second fails. After the first page is freed,
1523 * a call to vm_page_wait must really block.
1524 */
9bccf70c 1525 kern_return_t wait_result;
9bccf70c 1526 int need_wakeup = 0;
1c79356b
A
1527
1528 mutex_lock(&vm_page_queue_free_lock);
1529 if (vm_page_free_count < vm_page_free_target) {
1530 if (vm_page_free_wanted++ == 0)
0b4e3aa0 1531 need_wakeup = 1;
91447636 1532 wait_result = assert_wait((event_t)&vm_page_free_count, interruptible);
1c79356b
A
1533 mutex_unlock(&vm_page_queue_free_lock);
1534 counter(c_vm_page_wait_block++);
0b4e3aa0
A
1535
1536 if (need_wakeup)
1537 thread_wakeup((event_t)&vm_page_free_wanted);
9bccf70c 1538
91447636 1539 if (wait_result == THREAD_WAITING)
9bccf70c
A
1540 wait_result = thread_block(THREAD_CONTINUE_NULL);
1541
1c79356b
A
1542 return(wait_result == THREAD_AWAKENED);
1543 } else {
1544 mutex_unlock(&vm_page_queue_free_lock);
1545 return TRUE;
1546 }
1547}
1548
1549/*
1550 * vm_page_alloc:
1551 *
1552 * Allocate and return a memory cell associated
1553 * with this VM object/offset pair.
1554 *
1555 * Object must be locked.
1556 */
1557
1558vm_page_t
1559vm_page_alloc(
1560 vm_object_t object,
1561 vm_object_offset_t offset)
1562{
1563 register vm_page_t mem;
1564
91447636
A
1565#if DEBUG
1566 _mutex_assert(&object->Lock, MA_OWNED);
1567#endif
1c79356b
A
1568 mem = vm_page_grab();
1569 if (mem == VM_PAGE_NULL)
1570 return VM_PAGE_NULL;
1571
1572 vm_page_insert(mem, object, offset);
1573
1574 return(mem);
1575}
1576
1c79356b
A
1577counter(unsigned int c_laundry_pages_freed = 0;)
1578
1579int vm_pagein_cluster_unused = 0;
91447636 1580boolean_t vm_page_free_verify = TRUE;
1c79356b
A
1581/*
1582 * vm_page_free:
1583 *
1584 * Returns the given page to the free list,
1585 * disassociating it with any VM object.
1586 *
1587 * Object and page queues must be locked prior to entry.
1588 */
1589void
1590vm_page_free(
1591 register vm_page_t mem)
1592{
1593 vm_object_t object = mem->object;
1594
1595 assert(!mem->free);
1596 assert(!mem->cleaning);
1597 assert(!mem->pageout);
91447636
A
1598 if (vm_page_free_verify && !mem->fictitious && !mem->private) {
1599 assert(pmap_verify_free(mem->phys_page));
1600 }
1601
1602#if DEBUG
1603 if (mem->object)
1604 _mutex_assert(&mem->object->Lock, MA_OWNED);
1605 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
1c79356b 1606
91447636
A
1607 if (mem->free)
1608 panic("vm_page_free: freeing page on free list\n");
1609#endif
1c79356b
A
1610 if (mem->tabled)
1611 vm_page_remove(mem); /* clears tabled, object, offset */
1612 VM_PAGE_QUEUES_REMOVE(mem); /* clears active or inactive */
1613
1614 if (mem->clustered) {
1615 mem->clustered = FALSE;
1616 vm_pagein_cluster_unused++;
1617 }
1618
1619 if (mem->wire_count) {
1620 if (!mem->private && !mem->fictitious)
1621 vm_page_wire_count--;
1622 mem->wire_count = 0;
1623 assert(!mem->gobbled);
1624 } else if (mem->gobbled) {
1625 if (!mem->private && !mem->fictitious)
1626 vm_page_wire_count--;
1627 vm_page_gobble_count--;
1628 }
1629 mem->gobbled = FALSE;
1630
1631 if (mem->laundry) {
91447636 1632 vm_pageout_throttle_up(mem);
1c79356b 1633 counter(++c_laundry_pages_freed);
1c79356b
A
1634 }
1635
1c79356b
A
1636 PAGE_WAKEUP(mem); /* clears wanted */
1637
1638 if (mem->absent)
1639 vm_object_absent_release(object);
1640
0b4e3aa0 1641 /* Some of these may be unnecessary */
1c79356b
A
1642 mem->page_lock = 0;
1643 mem->unlock_request = 0;
1644 mem->busy = TRUE;
1645 mem->absent = FALSE;
1646 mem->error = FALSE;
1647 mem->dirty = FALSE;
1648 mem->precious = FALSE;
1649 mem->reference = FALSE;
91447636 1650 mem->encrypted = FALSE;
1c79356b
A
1651
1652 mem->page_error = KERN_SUCCESS;
1653
1654 if (mem->private) {
1655 mem->private = FALSE;
1656 mem->fictitious = TRUE;
55e303ae 1657 mem->phys_page = vm_page_fictitious_addr;
1c79356b
A
1658 }
1659 if (mem->fictitious) {
1660 vm_page_release_fictitious(mem);
1661 } else {
9bccf70c
A
1662 /* depends on the queues lock */
1663 if(mem->zero_fill) {
1664 vm_zf_count-=1;
1665 mem->zero_fill = FALSE;
1666 }
55e303ae 1667 vm_page_init(mem, mem->phys_page);
1c79356b
A
1668 vm_page_release(mem);
1669 }
1670}
1671
55e303ae
A
1672
1673void
1674vm_page_free_list(
1675 register vm_page_t mem)
1676{
91447636 1677 register vm_page_t nxt;
55e303ae 1678 register vm_page_t first = NULL;
91447636 1679 register vm_page_t last = VM_PAGE_NULL;
55e303ae
A
1680 register int pg_count = 0;
1681
91447636
A
1682#if DEBUG
1683 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
1684#endif
55e303ae 1685 while (mem) {
91447636
A
1686#if DEBUG
1687 if (mem->tabled || mem->object)
1688 panic("vm_page_free_list: freeing tabled page\n");
1689 if (mem->inactive || mem->active || mem->free)
1690 panic("vm_page_free_list: freeing page on list\n");
1691#endif
1692 assert(mem->pageq.prev == NULL);
55e303ae
A
1693 nxt = (vm_page_t)(mem->pageq.next);
1694
1695 if (mem->clustered)
1696 vm_pagein_cluster_unused++;
1697
1698 if (mem->laundry) {
91447636 1699 vm_pageout_throttle_up(mem);
55e303ae 1700 counter(++c_laundry_pages_freed);
55e303ae
A
1701 }
1702 mem->busy = TRUE;
1703
1704 PAGE_WAKEUP(mem); /* clears wanted */
1705
1706 if (mem->private)
1707 mem->fictitious = TRUE;
1708
1709 if (!mem->fictitious) {
1710 /* depends on the queues lock */
1711 if (mem->zero_fill)
1712 vm_zf_count -= 1;
91447636 1713 assert(!mem->laundry);
55e303ae
A
1714 vm_page_init(mem, mem->phys_page);
1715
1716 mem->free = TRUE;
1717
1718 if (first == NULL)
1719 last = mem;
1720 mem->pageq.next = (queue_t) first;
1721 first = mem;
1722
1723 pg_count++;
1724 } else {
1725 mem->phys_page = vm_page_fictitious_addr;
1726 vm_page_release_fictitious(mem);
1727 }
1728 mem = nxt;
1729 }
1730 if (first) {
1731
1732 mutex_lock(&vm_page_queue_free_lock);
1733
1734 last->pageq.next = (queue_entry_t) vm_page_queue_free;
1735 vm_page_queue_free = first;
1736
1737 vm_page_free_count += pg_count;
1738
1739 if ((vm_page_free_wanted > 0) &&
1740 (vm_page_free_count >= vm_page_free_reserved)) {
91447636 1741 unsigned int available_pages;
55e303ae 1742
91447636
A
1743 if (vm_page_free_count >= vm_page_free_reserved) {
1744 available_pages = (vm_page_free_count
1745 - vm_page_free_reserved);
1746 } else {
1747 available_pages = 0;
1748 }
55e303ae
A
1749
1750 if (available_pages >= vm_page_free_wanted) {
1751 vm_page_free_wanted = 0;
1752 thread_wakeup((event_t) &vm_page_free_count);
1753 } else {
1754 while (available_pages--) {
1755 vm_page_free_wanted--;
1756 thread_wakeup_one((event_t) &vm_page_free_count);
1757 }
1758 }
1759 }
1760 mutex_unlock(&vm_page_queue_free_lock);
1761 }
1762}
1763
1764
1c79356b
A
1765/*
1766 * vm_page_wire:
1767 *
1768 * Mark this page as wired down by yet
1769 * another map, removing it from paging queues
1770 * as necessary.
1771 *
1772 * The page's object and the page queues must be locked.
1773 */
1774void
1775vm_page_wire(
1776 register vm_page_t mem)
1777{
1778
91447636 1779// dbgLog(current_thread(), mem->offset, mem->object, 1); /* (TEST/DEBUG) */
1c79356b
A
1780
1781 VM_PAGE_CHECK(mem);
91447636
A
1782#if DEBUG
1783 if (mem->object)
1784 _mutex_assert(&mem->object->Lock, MA_OWNED);
1785 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
1786#endif
1c79356b
A
1787 if (mem->wire_count == 0) {
1788 VM_PAGE_QUEUES_REMOVE(mem);
1789 if (!mem->private && !mem->fictitious && !mem->gobbled)
1790 vm_page_wire_count++;
1791 if (mem->gobbled)
1792 vm_page_gobble_count--;
1793 mem->gobbled = FALSE;
9bccf70c
A
1794 if(mem->zero_fill) {
1795 /* depends on the queues lock */
1796 vm_zf_count-=1;
1797 mem->zero_fill = FALSE;
1798 }
91447636
A
1799 /*
1800 * ENCRYPTED SWAP:
1801 * The page could be encrypted, but
1802 * We don't have to decrypt it here
1803 * because we don't guarantee that the
1804 * data is actually valid at this point.
1805 * The page will get decrypted in
1806 * vm_fault_wire() if needed.
1807 */
1c79356b
A
1808 }
1809 assert(!mem->gobbled);
1810 mem->wire_count++;
1811}
1812
1813/*
1814 * vm_page_gobble:
1815 *
1816 * Mark this page as consumed by the vm/ipc/xmm subsystems.
1817 *
1818 * Called only for freshly vm_page_grab()ed pages - w/ nothing locked.
1819 */
1820void
1821vm_page_gobble(
1822 register vm_page_t mem)
1823{
1824 vm_page_lock_queues();
1825 VM_PAGE_CHECK(mem);
1826
1827 assert(!mem->gobbled);
1828 assert(mem->wire_count == 0);
1829
1830 if (!mem->gobbled && mem->wire_count == 0) {
1831 if (!mem->private && !mem->fictitious)
1832 vm_page_wire_count++;
1833 }
1834 vm_page_gobble_count++;
1835 mem->gobbled = TRUE;
1836 vm_page_unlock_queues();
1837}
1838
1839/*
1840 * vm_page_unwire:
1841 *
1842 * Release one wiring of this page, potentially
1843 * enabling it to be paged again.
1844 *
1845 * The page's object and the page queues must be locked.
1846 */
1847void
1848vm_page_unwire(
1849 register vm_page_t mem)
1850{
1851
91447636 1852// dbgLog(current_thread(), mem->offset, mem->object, 0); /* (TEST/DEBUG) */
1c79356b
A
1853
1854 VM_PAGE_CHECK(mem);
1855 assert(mem->wire_count > 0);
91447636
A
1856#if DEBUG
1857 if (mem->object)
1858 _mutex_assert(&mem->object->Lock, MA_OWNED);
1859 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
1860#endif
1c79356b
A
1861 if (--mem->wire_count == 0) {
1862 assert(!mem->private && !mem->fictitious);
1863 vm_page_wire_count--;
91447636
A
1864 assert(!mem->laundry);
1865 assert(mem->object != kernel_object);
1866 assert(mem->pageq.next == NULL && mem->pageq.prev == NULL);
1c79356b
A
1867 queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
1868 vm_page_active_count++;
1869 mem->active = TRUE;
1870 mem->reference = TRUE;
1871 }
1872}
1873
1874/*
1875 * vm_page_deactivate:
1876 *
1877 * Returns the given page to the inactive list,
1878 * indicating that no physical maps have access
1879 * to this page. [Used by the physical mapping system.]
1880 *
1881 * The page queues must be locked.
1882 */
1883void
1884vm_page_deactivate(
1885 register vm_page_t m)
1886{
1887 VM_PAGE_CHECK(m);
91447636 1888 assert(m->object != kernel_object);
1c79356b 1889
55e303ae 1890// dbgLog(m->phys_page, vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */
91447636
A
1891#if DEBUG
1892 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
1893#endif
1c79356b
A
1894 /*
1895 * This page is no longer very interesting. If it was
1896 * interesting (active or inactive/referenced), then we
1897 * clear the reference bit and (re)enter it in the
1898 * inactive queue. Note wired pages should not have
1899 * their reference bit cleared.
1900 */
1901 if (m->gobbled) { /* can this happen? */
1902 assert(m->wire_count == 0);
1903 if (!m->private && !m->fictitious)
1904 vm_page_wire_count--;
1905 vm_page_gobble_count--;
1906 m->gobbled = FALSE;
1907 }
1908 if (m->private || (m->wire_count != 0))
1909 return;
1910 if (m->active || (m->inactive && m->reference)) {
1911 if (!m->fictitious && !m->absent)
55e303ae 1912 pmap_clear_reference(m->phys_page);
1c79356b
A
1913 m->reference = FALSE;
1914 VM_PAGE_QUEUES_REMOVE(m);
1915 }
1916 if (m->wire_count == 0 && !m->inactive) {
0b4e3aa0
A
1917 m->page_ticket = vm_page_ticket;
1918 vm_page_ticket_roll++;
1919
1920 if(vm_page_ticket_roll == VM_PAGE_TICKETS_IN_ROLL) {
1921 vm_page_ticket_roll = 0;
1922 if(vm_page_ticket == VM_PAGE_TICKET_ROLL_IDS)
1923 vm_page_ticket= 0;
1924 else
1925 vm_page_ticket++;
1926 }
1927
91447636
A
1928 assert(!m->laundry);
1929 assert(m->pageq.next == NULL && m->pageq.prev == NULL);
9bccf70c
A
1930 if(m->zero_fill) {
1931 queue_enter(&vm_page_queue_zf, m, vm_page_t, pageq);
1932 } else {
1933 queue_enter(&vm_page_queue_inactive,
1934 m, vm_page_t, pageq);
1935 }
1936
1c79356b
A
1937 m->inactive = TRUE;
1938 if (!m->fictitious)
1939 vm_page_inactive_count++;
1940 }
1941}
1942
1943/*
1944 * vm_page_activate:
1945 *
1946 * Put the specified page on the active list (if appropriate).
1947 *
1948 * The page queues must be locked.
1949 */
1950
1951void
1952vm_page_activate(
1953 register vm_page_t m)
1954{
1955 VM_PAGE_CHECK(m);
91447636
A
1956 assert(m->object != kernel_object);
1957#if DEBUG
1958 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
1959#endif
1c79356b
A
1960 if (m->gobbled) {
1961 assert(m->wire_count == 0);
1962 if (!m->private && !m->fictitious)
1963 vm_page_wire_count--;
1964 vm_page_gobble_count--;
1965 m->gobbled = FALSE;
1966 }
1967 if (m->private)
1968 return;
1969
1970 if (m->inactive) {
91447636 1971 assert(!m->laundry);
9bccf70c
A
1972 if (m->zero_fill) {
1973 queue_remove(&vm_page_queue_zf, m, vm_page_t, pageq);
1974 } else {
1975 queue_remove(&vm_page_queue_inactive,
1976 m, vm_page_t, pageq);
1977 }
91447636
A
1978 m->pageq.next = NULL;
1979 m->pageq.prev = NULL;
1c79356b
A
1980 if (!m->fictitious)
1981 vm_page_inactive_count--;
1982 m->inactive = FALSE;
1983 }
1984 if (m->wire_count == 0) {
91447636 1985#if DEBUG
1c79356b
A
1986 if (m->active)
1987 panic("vm_page_activate: already active");
91447636
A
1988#endif
1989 assert(!m->laundry);
1990 assert(m->pageq.next == NULL && m->pageq.prev == NULL);
1c79356b
A
1991 queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
1992 m->active = TRUE;
1993 m->reference = TRUE;
1994 if (!m->fictitious)
1995 vm_page_active_count++;
1996 }
1997}
1998
1999/*
2000 * vm_page_part_zero_fill:
2001 *
2002 * Zero-fill a part of the page.
2003 */
2004void
2005vm_page_part_zero_fill(
2006 vm_page_t m,
2007 vm_offset_t m_pa,
2008 vm_size_t len)
2009{
2010 vm_page_t tmp;
2011
2012 VM_PAGE_CHECK(m);
2013#ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
55e303ae 2014 pmap_zero_part_page(m->phys_page, m_pa, len);
1c79356b
A
2015#else
2016 while (1) {
2017 tmp = vm_page_grab();
2018 if (tmp == VM_PAGE_NULL) {
2019 vm_page_wait(THREAD_UNINT);
2020 continue;
2021 }
2022 break;
2023 }
2024 vm_page_zero_fill(tmp);
2025 if(m_pa != 0) {
2026 vm_page_part_copy(m, 0, tmp, 0, m_pa);
2027 }
2028 if((m_pa + len) < PAGE_SIZE) {
2029 vm_page_part_copy(m, m_pa + len, tmp,
2030 m_pa + len, PAGE_SIZE - (m_pa + len));
2031 }
2032 vm_page_copy(tmp,m);
2033 vm_page_lock_queues();
2034 vm_page_free(tmp);
2035 vm_page_unlock_queues();
2036#endif
2037
2038}
2039
2040/*
2041 * vm_page_zero_fill:
2042 *
2043 * Zero-fill the specified page.
2044 */
2045void
2046vm_page_zero_fill(
2047 vm_page_t m)
2048{
2049 XPR(XPR_VM_PAGE,
2050 "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n",
2051 (integer_t)m->object, (integer_t)m->offset, (integer_t)m, 0,0);
2052
2053 VM_PAGE_CHECK(m);
2054
55e303ae
A
2055// dbgTrace(0xAEAEAEAE, m->phys_page, 0); /* (BRINGUP) */
2056 pmap_zero_page(m->phys_page);
1c79356b
A
2057}
2058
2059/*
2060 * vm_page_part_copy:
2061 *
2062 * copy part of one page to another
2063 */
2064
2065void
2066vm_page_part_copy(
2067 vm_page_t src_m,
2068 vm_offset_t src_pa,
2069 vm_page_t dst_m,
2070 vm_offset_t dst_pa,
2071 vm_size_t len)
2072{
2073 VM_PAGE_CHECK(src_m);
2074 VM_PAGE_CHECK(dst_m);
2075
55e303ae
A
2076 pmap_copy_part_page(src_m->phys_page, src_pa,
2077 dst_m->phys_page, dst_pa, len);
1c79356b
A
2078}
2079
2080/*
2081 * vm_page_copy:
2082 *
2083 * Copy one page to another
91447636
A
2084 *
2085 * ENCRYPTED SWAP:
2086 * The source page should not be encrypted. The caller should
2087 * make sure the page is decrypted first, if necessary.
1c79356b
A
2088 */
2089
2090void
2091vm_page_copy(
2092 vm_page_t src_m,
2093 vm_page_t dest_m)
2094{
2095 XPR(XPR_VM_PAGE,
2096 "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n",
2097 (integer_t)src_m->object, src_m->offset,
2098 (integer_t)dest_m->object, dest_m->offset,
2099 0);
2100
2101 VM_PAGE_CHECK(src_m);
2102 VM_PAGE_CHECK(dest_m);
2103
91447636
A
2104 /*
2105 * ENCRYPTED SWAP:
2106 * The source page should not be encrypted at this point.
2107 * The destination page will therefore not contain encrypted
2108 * data after the copy.
2109 */
2110 if (src_m->encrypted) {
2111 panic("vm_page_copy: source page %p is encrypted\n", src_m);
2112 }
2113 dest_m->encrypted = FALSE;
2114
55e303ae 2115 pmap_copy_page(src_m->phys_page, dest_m->phys_page);
1c79356b
A
2116}
2117
1c79356b
A
2118/*
2119 * Currently, this is a primitive allocator that grabs
2120 * free pages from the system, sorts them by physical
2121 * address, then searches for a region large enough to
2122 * satisfy the user's request.
2123 *
2124 * Additional levels of effort:
2125 * + steal clean active/inactive pages
2126 * + force pageouts of dirty pages
2127 * + maintain a map of available physical
2128 * memory
2129 */
2130
1c79356b
A
2131#if MACH_ASSERT
2132/*
2133 * Check that the list of pages is ordered by
2134 * ascending physical address and has no holes.
2135 */
91447636
A
2136int vm_page_verify_contiguous(
2137 vm_page_t pages,
2138 unsigned int npages);
2139
1c79356b
A
2140int
2141vm_page_verify_contiguous(
2142 vm_page_t pages,
2143 unsigned int npages)
2144{
2145 register vm_page_t m;
2146 unsigned int page_count;
91447636 2147 vm_offset_t prev_addr;
1c79356b 2148
55e303ae 2149 prev_addr = pages->phys_page;
1c79356b
A
2150 page_count = 1;
2151 for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
55e303ae 2152 if (m->phys_page != prev_addr + 1) {
1c79356b 2153 printf("m 0x%x prev_addr 0x%x, current addr 0x%x\n",
55e303ae 2154 m, prev_addr, m->phys_page);
91447636 2155 printf("pages 0x%x page_count %d\n", pages, page_count);
1c79356b
A
2156 panic("vm_page_verify_contiguous: not contiguous!");
2157 }
55e303ae 2158 prev_addr = m->phys_page;
1c79356b
A
2159 ++page_count;
2160 }
2161 if (page_count != npages) {
2162 printf("pages 0x%x actual count 0x%x but requested 0x%x\n",
2163 pages, page_count, npages);
2164 panic("vm_page_verify_contiguous: count error");
2165 }
2166 return 1;
2167}
2168#endif /* MACH_ASSERT */
2169
2170
91447636
A
2171cpm_counter(unsigned int vpfls_pages_handled = 0;)
2172cpm_counter(unsigned int vpfls_head_insertions = 0;)
2173cpm_counter(unsigned int vpfls_tail_insertions = 0;)
2174cpm_counter(unsigned int vpfls_general_insertions = 0;)
2175cpm_counter(unsigned int vpfc_failed = 0;)
2176cpm_counter(unsigned int vpfc_satisfied = 0;)
2177
1c79356b
A
2178/*
2179 * Find a region large enough to contain at least npages
2180 * of contiguous physical memory.
2181 *
2182 * Requirements:
2183 * - Called while holding vm_page_queue_free_lock.
2184 * - Doesn't respect vm_page_free_reserved; caller
2185 * must not ask for more pages than are legal to grab.
2186 *
2187 * Returns a pointer to a list of gobbled pages or VM_PAGE_NULL.
2188 *
e5568f75
A
2189 * Algorithm:
2190 * Loop over the free list, extracting one page at a time and
2191 * inserting those into a sorted sub-list. We stop as soon as
2192 * there's a contiguous range within the sorted list that can
2193 * satisfy the contiguous memory request. This contiguous sub-
2194 * list is chopped out of the sorted sub-list and the remainder
2195 * of the sorted sub-list is put back onto the beginning of the
2196 * free list.
1c79356b
A
2197 */
2198static vm_page_t
2199vm_page_find_contiguous(
e5568f75 2200 unsigned int contig_pages)
1c79356b 2201{
e5568f75
A
2202 vm_page_t sort_list;
2203 vm_page_t *contfirstprev, contlast;
2204 vm_page_t m, m1;
2205 ppnum_t prevcontaddr;
2206 ppnum_t nextcontaddr;
2207 unsigned int npages;
2208
91447636
A
2209 m = NULL;
2210#if DEBUG
2211 _mutex_assert(&vm_page_queue_free_lock, MA_OWNED);
2212#endif
e5568f75
A
2213#if MACH_ASSERT
2214 /*
2215 * Verify pages in the free list..
2216 */
2217 npages = 0;
2218 for (m = vm_page_queue_free; m != VM_PAGE_NULL; m = NEXT_PAGE(m))
2219 ++npages;
2220 if (npages != vm_page_free_count)
2221 panic("vm_sort_free_list: prelim: npages %u free_count %d",
2222 npages, vm_page_free_count);
2223#endif /* MACH_ASSERT */
1c79356b 2224
e5568f75 2225 if (contig_pages == 0 || vm_page_queue_free == VM_PAGE_NULL)
1c79356b
A
2226 return VM_PAGE_NULL;
2227
91447636
A
2228#define PPNUM_PREV(x) (((x) > 0) ? ((x) - 1) : 0)
2229#define PPNUM_NEXT(x) (((x) < PPNUM_MAX) ? ((x) + 1) : PPNUM_MAX)
2230#define SET_NEXT_PAGE(m,n) ((m)->pageq.next = (struct queue_entry *) (n))
1c79356b 2231
e5568f75
A
2232 npages = 1;
2233 contfirstprev = &sort_list;
2234 contlast = sort_list = vm_page_queue_free;
2235 vm_page_queue_free = NEXT_PAGE(sort_list);
2236 SET_NEXT_PAGE(sort_list, VM_PAGE_NULL);
2237 prevcontaddr = PPNUM_PREV(sort_list->phys_page);
2238 nextcontaddr = PPNUM_NEXT(sort_list->phys_page);
2239
2240 while (npages < contig_pages &&
2241 (m = vm_page_queue_free) != VM_PAGE_NULL)
2242 {
2243 cpm_counter(++vpfls_pages_handled);
2244
2245 /* prepend to existing run? */
2246 if (m->phys_page == prevcontaddr)
2247 {
2248 vm_page_queue_free = NEXT_PAGE(m);
2249 cpm_counter(++vpfls_head_insertions);
2250 prevcontaddr = PPNUM_PREV(prevcontaddr);
2251 SET_NEXT_PAGE(m, *contfirstprev);
2252 *contfirstprev = m;
2253 npages++;
2254 continue; /* no tail expansion check needed */
2255 }
2256
2257 /* append to tail of existing run? */
2258 else if (m->phys_page == nextcontaddr)
2259 {
2260 vm_page_queue_free = NEXT_PAGE(m);
2261 cpm_counter(++vpfls_tail_insertions);
2262 nextcontaddr = PPNUM_NEXT(nextcontaddr);
2263 SET_NEXT_PAGE(m, NEXT_PAGE(contlast));
2264 SET_NEXT_PAGE(contlast, m);
2265 contlast = m;
2266 npages++;
2267 }
2268
2269 /* prepend to the very front of sorted list? */
2270 else if (m->phys_page < sort_list->phys_page)
2271 {
2272 vm_page_queue_free = NEXT_PAGE(m);
2273 cpm_counter(++vpfls_general_insertions);
2274 prevcontaddr = PPNUM_PREV(m->phys_page);
2275 nextcontaddr = PPNUM_NEXT(m->phys_page);
2276 SET_NEXT_PAGE(m, sort_list);
2277 contfirstprev = &sort_list;
2278 contlast = sort_list = m;
2279 npages = 1;
1c79356b
A
2280 }
2281
e5568f75
A
2282 else /* get to proper place for insertion */
2283 {
2284 if (m->phys_page < nextcontaddr)
2285 {
2286 prevcontaddr = PPNUM_PREV(sort_list->phys_page);
2287 nextcontaddr = PPNUM_NEXT(sort_list->phys_page);
2288 contfirstprev = &sort_list;
2289 contlast = sort_list;
2290 npages = 1;
2291 }
2292 for (m1 = NEXT_PAGE(contlast);
2293 npages < contig_pages &&
2294 m1 != VM_PAGE_NULL && m1->phys_page < m->phys_page;
2295 m1 = NEXT_PAGE(m1))
2296 {
2297 if (m1->phys_page != nextcontaddr) {
2298 prevcontaddr = PPNUM_PREV(m1->phys_page);
2299 contfirstprev = NEXT_PAGE_PTR(contlast);
2300 npages = 1;
2301 } else {
2302 npages++;
2303 }
2304 nextcontaddr = PPNUM_NEXT(m1->phys_page);
2305 contlast = m1;
2306 }
2307
1c79356b 2308 /*
e5568f75
A
2309 * We may actually already have enough.
2310 * This could happen if a previous prepend
2311 * joined up two runs to meet our needs.
2312 * If so, bail before we take the current
2313 * page off the free queue.
1c79356b 2314 */
e5568f75
A
2315 if (npages == contig_pages)
2316 break;
2317
91447636
A
2318 if (m->phys_page != nextcontaddr)
2319 {
e5568f75
A
2320 contfirstprev = NEXT_PAGE_PTR(contlast);
2321 prevcontaddr = PPNUM_PREV(m->phys_page);
2322 nextcontaddr = PPNUM_NEXT(m->phys_page);
2323 npages = 1;
2324 } else {
2325 nextcontaddr = PPNUM_NEXT(nextcontaddr);
2326 npages++;
1c79356b 2327 }
e5568f75
A
2328 vm_page_queue_free = NEXT_PAGE(m);
2329 cpm_counter(++vpfls_general_insertions);
2330 SET_NEXT_PAGE(m, NEXT_PAGE(contlast));
2331 SET_NEXT_PAGE(contlast, m);
2332 contlast = m;
2333 }
2334
2335 /* See how many pages are now contiguous after the insertion */
2336 for (m1 = NEXT_PAGE(m);
2337 npages < contig_pages &&
2338 m1 != VM_PAGE_NULL && m1->phys_page == nextcontaddr;
2339 m1 = NEXT_PAGE(m1))
2340 {
2341 nextcontaddr = PPNUM_NEXT(nextcontaddr);
2342 contlast = m1;
2343 npages++;
1c79356b 2344 }
e5568f75 2345 }
1c79356b 2346
e5568f75
A
2347 /* how did we do? */
2348 if (npages == contig_pages)
2349 {
2350 cpm_counter(++vpfc_satisfied);
2351
2352 /* remove the contiguous range from the sorted list */
2353 m = *contfirstprev;
2354 *contfirstprev = NEXT_PAGE(contlast);
2355 SET_NEXT_PAGE(contlast, VM_PAGE_NULL);
2356 assert(vm_page_verify_contiguous(m, npages));
2357
2358 /* inline vm_page_gobble() for each returned page */
2359 for (m1 = m; m1 != VM_PAGE_NULL; m1 = NEXT_PAGE(m1)) {
2360 assert(m1->free);
2361 assert(!m1->wanted);
91447636 2362 assert(!m1->laundry);
e5568f75
A
2363 m1->free = FALSE;
2364 m1->no_isync = TRUE;
2365 m1->gobbled = TRUE;
2366 }
2367 vm_page_wire_count += npages;
2368 vm_page_gobble_count += npages;
2369 vm_page_free_count -= npages;
2370
2371 /* stick free list at the tail of the sorted list */
2372 while ((m1 = *contfirstprev) != VM_PAGE_NULL)
2373 contfirstprev = (vm_page_t *)&m1->pageq.next;
2374 *contfirstprev = vm_page_queue_free;
1c79356b 2375 }
e5568f75
A
2376
2377 vm_page_queue_free = sort_list;
2378 return m;
1c79356b
A
2379}
2380
2381/*
2382 * Allocate a list of contiguous, wired pages.
2383 */
2384kern_return_t
2385cpm_allocate(
2386 vm_size_t size,
2387 vm_page_t *list,
2388 boolean_t wire)
2389{
2390 register vm_page_t m;
91447636
A
2391 vm_page_t pages;
2392 unsigned int npages;
2393 unsigned int vm_pages_available;
e5568f75 2394 boolean_t wakeup;
1c79356b
A
2395
2396 if (size % page_size != 0)
2397 return KERN_INVALID_ARGUMENT;
2398
2399 vm_page_lock_queues();
2400 mutex_lock(&vm_page_queue_free_lock);
2401
2402 /*
2403 * Should also take active and inactive pages
2404 * into account... One day...
2405 */
e5568f75 2406 npages = size / page_size;
1c79356b
A
2407 vm_pages_available = vm_page_free_count - vm_page_free_reserved;
2408
e5568f75 2409 if (npages > vm_pages_available) {
1c79356b 2410 mutex_unlock(&vm_page_queue_free_lock);
e5568f75 2411 vm_page_unlock_queues();
1c79356b
A
2412 return KERN_RESOURCE_SHORTAGE;
2413 }
2414
1c79356b
A
2415 /*
2416 * Obtain a pointer to a subset of the free
2417 * list large enough to satisfy the request;
2418 * the region will be physically contiguous.
2419 */
2420 pages = vm_page_find_contiguous(npages);
e5568f75
A
2421
2422 /* adjust global freelist counts and determine need for wakeups */
2423 if (vm_page_free_count < vm_page_free_count_minimum)
2424 vm_page_free_count_minimum = vm_page_free_count;
2425
2426 wakeup = ((vm_page_free_count < vm_page_free_min) ||
2427 ((vm_page_free_count < vm_page_free_target) &&
2428 (vm_page_inactive_count < vm_page_inactive_target)));
2429
2430 mutex_unlock(&vm_page_queue_free_lock);
2431
1c79356b 2432 if (pages == VM_PAGE_NULL) {
1c79356b
A
2433 vm_page_unlock_queues();
2434 return KERN_NO_SPACE;
2435 }
2436
1c79356b
A
2437 /*
2438 * Walk the returned list, wiring the pages.
2439 */
2440 if (wire == TRUE)
2441 for (m = pages; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
2442 /*
2443 * Essentially inlined vm_page_wire.
2444 */
2445 assert(!m->active);
2446 assert(!m->inactive);
2447 assert(!m->private);
2448 assert(!m->fictitious);
2449 assert(m->wire_count == 0);
2450 assert(m->gobbled);
2451 m->gobbled = FALSE;
2452 m->wire_count++;
2453 --vm_page_gobble_count;
2454 }
2455 vm_page_unlock_queues();
2456
e5568f75
A
2457 if (wakeup)
2458 thread_wakeup((event_t) &vm_page_free_wanted);
2459
1c79356b
A
2460 /*
2461 * The CPM pages should now be available and
2462 * ordered by ascending physical address.
2463 */
2464 assert(vm_page_verify_contiguous(pages, npages));
2465
2466 *list = pages;
2467 return KERN_SUCCESS;
2468}
2469
2470
2471#include <mach_vm_debug.h>
2472#if MACH_VM_DEBUG
2473
2474#include <mach_debug/hash_info.h>
2475#include <vm/vm_debug.h>
2476
2477/*
2478 * Routine: vm_page_info
2479 * Purpose:
2480 * Return information about the global VP table.
2481 * Fills the buffer with as much information as possible
2482 * and returns the desired size of the buffer.
2483 * Conditions:
2484 * Nothing locked. The caller should provide
2485 * possibly-pageable memory.
2486 */
2487
2488unsigned int
2489vm_page_info(
2490 hash_info_bucket_t *info,
2491 unsigned int count)
2492{
91447636 2493 unsigned int i;
1c79356b
A
2494
2495 if (vm_page_bucket_count < count)
2496 count = vm_page_bucket_count;
2497
2498 for (i = 0; i < count; i++) {
2499 vm_page_bucket_t *bucket = &vm_page_buckets[i];
2500 unsigned int bucket_count = 0;
2501 vm_page_t m;
2502
2503 simple_lock(&vm_page_bucket_lock);
2504 for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next)
2505 bucket_count++;
2506 simple_unlock(&vm_page_bucket_lock);
2507
2508 /* don't touch pageable memory while holding locks */
2509 info[i].hib_count = bucket_count;
2510 }
2511
2512 return vm_page_bucket_count;
2513}
2514#endif /* MACH_VM_DEBUG */
2515
2516#include <mach_kdb.h>
2517#if MACH_KDB
2518
2519#include <ddb/db_output.h>
2520#include <vm/vm_print.h>
2521#define printf kdbprintf
2522
2523/*
2524 * Routine: vm_page_print [exported]
2525 */
2526void
2527vm_page_print(
91447636 2528 db_addr_t db_addr)
1c79356b 2529{
91447636
A
2530 vm_page_t p;
2531
2532 p = (vm_page_t) (long) db_addr;
1c79356b
A
2533
2534 iprintf("page 0x%x\n", p);
2535
2536 db_indent += 2;
2537
2538 iprintf("object=0x%x", p->object);
2539 printf(", offset=0x%x", p->offset);
2540 printf(", wire_count=%d", p->wire_count);
1c79356b 2541
91447636 2542 iprintf("%sinactive, %sactive, %sgobbled, %slaundry, %sfree, %sref, %sencrypted\n",
1c79356b
A
2543 (p->inactive ? "" : "!"),
2544 (p->active ? "" : "!"),
2545 (p->gobbled ? "" : "!"),
2546 (p->laundry ? "" : "!"),
2547 (p->free ? "" : "!"),
2548 (p->reference ? "" : "!"),
91447636 2549 (p->encrypted ? "" : "!"));
1c79356b
A
2550 iprintf("%sbusy, %swanted, %stabled, %sfictitious, %sprivate, %sprecious\n",
2551 (p->busy ? "" : "!"),
2552 (p->wanted ? "" : "!"),
2553 (p->tabled ? "" : "!"),
2554 (p->fictitious ? "" : "!"),
2555 (p->private ? "" : "!"),
2556 (p->precious ? "" : "!"));
2557 iprintf("%sabsent, %serror, %sdirty, %scleaning, %spageout, %sclustered\n",
2558 (p->absent ? "" : "!"),
2559 (p->error ? "" : "!"),
2560 (p->dirty ? "" : "!"),
2561 (p->cleaning ? "" : "!"),
2562 (p->pageout ? "" : "!"),
2563 (p->clustered ? "" : "!"));
0b4e3aa0 2564 iprintf("%slock_supplied, %soverwriting, %srestart, %sunusual\n",
1c79356b
A
2565 (p->lock_supplied ? "" : "!"),
2566 (p->overwriting ? "" : "!"),
2567 (p->restart ? "" : "!"),
0b4e3aa0 2568 (p->unusual ? "" : "!"));
1c79356b 2569
55e303ae 2570 iprintf("phys_page=0x%x", p->phys_page);
1c79356b
A
2571 printf(", page_error=0x%x", p->page_error);
2572 printf(", page_lock=0x%x", p->page_lock);
2573 printf(", unlock_request=%d\n", p->unlock_request);
2574
2575 db_indent -= 2;
2576}
2577#endif /* MACH_KDB */