]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_resident.c
xnu-517.3.7.tar.gz
[apple/xnu.git] / osfmk / vm / vm_resident.c
CommitLineData
1c79356b 1/*
55e303ae 2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_COPYRIGHT@
27 */
28/*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53/*
54 */
55/*
56 * File: vm/vm_page.c
57 * Author: Avadis Tevanian, Jr., Michael Wayne Young
58 *
59 * Resident memory management module.
60 */
61
9bccf70c 62#include <mach/clock_types.h>
1c79356b
A
63#include <mach/vm_prot.h>
64#include <mach/vm_statistics.h>
65#include <kern/counters.h>
66#include <kern/sched_prim.h>
67#include <kern/task.h>
68#include <kern/thread.h>
69#include <kern/zalloc.h>
70#include <kern/xpr.h>
71#include <vm/pmap.h>
72#include <vm/vm_init.h>
73#include <vm/vm_map.h>
74#include <vm/vm_page.h>
75#include <vm/vm_pageout.h>
76#include <vm/vm_kern.h> /* kernel_memory_allocate() */
77#include <kern/misc_protos.h>
78#include <zone_debug.h>
79#include <vm/cpm.h>
55e303ae
A
80#include <ppc/mappings.h> /* (BRINGUP) */
81#include <pexpert/pexpert.h> /* (BRINGUP) */
82
1c79356b 83
0b4e3aa0
A
84/* Variables used to indicate the relative age of pages in the
85 * inactive list
86 */
87
88int vm_page_ticket_roll = 0;
89int vm_page_ticket = 0;
1c79356b
A
90/*
91 * Associated with page of user-allocatable memory is a
92 * page structure.
93 */
94
95/*
96 * These variables record the values returned by vm_page_bootstrap,
97 * for debugging purposes. The implementation of pmap_steal_memory
98 * and pmap_startup here also uses them internally.
99 */
100
101vm_offset_t virtual_space_start;
102vm_offset_t virtual_space_end;
103int vm_page_pages;
104
105/*
106 * The vm_page_lookup() routine, which provides for fast
107 * (virtual memory object, offset) to page lookup, employs
108 * the following hash table. The vm_page_{insert,remove}
109 * routines install and remove associations in the table.
110 * [This table is often called the virtual-to-physical,
111 * or VP, table.]
112 */
113typedef struct {
114 vm_page_t pages;
115#if MACH_PAGE_HASH_STATS
116 int cur_count; /* current count */
117 int hi_count; /* high water mark */
118#endif /* MACH_PAGE_HASH_STATS */
119} vm_page_bucket_t;
120
121vm_page_bucket_t *vm_page_buckets; /* Array of buckets */
122unsigned int vm_page_bucket_count = 0; /* How big is array? */
123unsigned int vm_page_hash_mask; /* Mask for hash function */
124unsigned int vm_page_hash_shift; /* Shift for hash function */
55e303ae 125uint32_t vm_page_bucket_hash; /* Basic bucket hash */
1c79356b
A
126decl_simple_lock_data(,vm_page_bucket_lock)
127
128#if MACH_PAGE_HASH_STATS
129/* This routine is only for debug. It is intended to be called by
130 * hand by a developer using a kernel debugger. This routine prints
131 * out vm_page_hash table statistics to the kernel debug console.
132 */
133void
134hash_debug(void)
135{
136 int i;
137 int numbuckets = 0;
138 int highsum = 0;
139 int maxdepth = 0;
140
141 for (i = 0; i < vm_page_bucket_count; i++) {
142 if (vm_page_buckets[i].hi_count) {
143 numbuckets++;
144 highsum += vm_page_buckets[i].hi_count;
145 if (vm_page_buckets[i].hi_count > maxdepth)
146 maxdepth = vm_page_buckets[i].hi_count;
147 }
148 }
149 printf("Total number of buckets: %d\n", vm_page_bucket_count);
150 printf("Number used buckets: %d = %d%%\n",
151 numbuckets, 100*numbuckets/vm_page_bucket_count);
152 printf("Number unused buckets: %d = %d%%\n",
153 vm_page_bucket_count - numbuckets,
154 100*(vm_page_bucket_count-numbuckets)/vm_page_bucket_count);
155 printf("Sum of bucket max depth: %d\n", highsum);
156 printf("Average bucket depth: %d.%2d\n",
157 highsum/vm_page_bucket_count,
158 highsum%vm_page_bucket_count);
159 printf("Maximum bucket depth: %d\n", maxdepth);
160}
161#endif /* MACH_PAGE_HASH_STATS */
162
163/*
164 * The virtual page size is currently implemented as a runtime
165 * variable, but is constant once initialized using vm_set_page_size.
166 * This initialization must be done in the machine-dependent
167 * bootstrap sequence, before calling other machine-independent
168 * initializations.
169 *
170 * All references to the virtual page size outside this
171 * module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
172 * constants.
173 */
174#ifndef PAGE_SIZE_FIXED
175vm_size_t page_size = 4096;
176vm_size_t page_mask = 4095;
177int page_shift = 12;
55e303ae
A
178#else
179vm_size_t page_size = PAGE_SIZE;
180vm_size_t page_mask = PAGE_MASK;
181int page_shift = PAGE_SHIFT;
1c79356b
A
182#endif /* PAGE_SIZE_FIXED */
183
184/*
185 * Resident page structures are initialized from
186 * a template (see vm_page_alloc).
187 *
188 * When adding a new field to the virtual memory
189 * object structure, be sure to add initialization
190 * (see vm_page_bootstrap).
191 */
192struct vm_page vm_page_template;
193
194/*
195 * Resident pages that represent real memory
196 * are allocated from a free list.
197 */
198vm_page_t vm_page_queue_free;
199vm_page_t vm_page_queue_fictitious;
200decl_mutex_data(,vm_page_queue_free_lock)
201unsigned int vm_page_free_wanted;
202int vm_page_free_count;
203int vm_page_fictitious_count;
204
205unsigned int vm_page_free_count_minimum; /* debugging */
206
207/*
208 * Occasionally, the virtual memory system uses
209 * resident page structures that do not refer to
210 * real pages, for example to leave a page with
211 * important state information in the VP table.
212 *
213 * These page structures are allocated the way
214 * most other kernel structures are.
215 */
216zone_t vm_page_zone;
217decl_mutex_data(,vm_page_alloc_lock)
9bccf70c
A
218unsigned int io_throttle_zero_fill;
219decl_mutex_data(,vm_page_zero_fill_lock)
1c79356b
A
220
221/*
222 * Fictitious pages don't have a physical address,
55e303ae 223 * but we must initialize phys_page to something.
1c79356b
A
224 * For debugging, this should be a strange value
225 * that the pmap module can recognize in assertions.
226 */
227vm_offset_t vm_page_fictitious_addr = (vm_offset_t) -1;
228
229/*
230 * Resident page structures are also chained on
231 * queues that are used by the page replacement
232 * system (pageout daemon). These queues are
233 * defined here, but are shared by the pageout
9bccf70c
A
234 * module. The inactive queue is broken into
235 * inactive and zf for convenience as the
236 * pageout daemon often assignes a higher
237 * affinity to zf pages
1c79356b
A
238 */
239queue_head_t vm_page_queue_active;
240queue_head_t vm_page_queue_inactive;
9bccf70c 241queue_head_t vm_page_queue_zf;
1c79356b
A
242decl_mutex_data(,vm_page_queue_lock)
243int vm_page_active_count;
244int vm_page_inactive_count;
245int vm_page_wire_count;
246int vm_page_gobble_count = 0;
247int vm_page_wire_count_warning = 0;
248int vm_page_gobble_count_warning = 0;
249
250/* the following fields are protected by the vm_page_queue_lock */
251queue_head_t vm_page_queue_limbo;
252int vm_page_limbo_count = 0; /* total pages in limbo */
253int vm_page_limbo_real_count = 0; /* real pages in limbo */
254int vm_page_pin_count = 0; /* number of pinned pages */
255
256decl_simple_lock_data(,vm_page_preppin_lock)
257
258/*
259 * Several page replacement parameters are also
260 * shared with this module, so that page allocation
261 * (done here in vm_page_alloc) can trigger the
262 * pageout daemon.
263 */
264int vm_page_free_target = 0;
265int vm_page_free_min = 0;
266int vm_page_inactive_target = 0;
267int vm_page_free_reserved = 0;
268int vm_page_laundry_count = 0;
55e303ae
A
269int vm_page_burst_count = 0;
270int vm_page_throttled_count = 0;
1c79356b
A
271
272/*
273 * The VM system has a couple of heuristics for deciding
274 * that pages are "uninteresting" and should be placed
275 * on the inactive queue as likely candidates for replacement.
276 * These variables let the heuristics be controlled at run-time
277 * to make experimentation easier.
278 */
279
280boolean_t vm_page_deactivate_hint = TRUE;
281
282/*
283 * vm_set_page_size:
284 *
285 * Sets the page size, perhaps based upon the memory
286 * size. Must be called before any use of page-size
287 * dependent functions.
288 *
289 * Sets page_shift and page_mask from page_size.
290 */
291void
292vm_set_page_size(void)
293{
294#ifndef PAGE_SIZE_FIXED
295 page_mask = page_size - 1;
296
297 if ((page_mask & page_size) != 0)
298 panic("vm_set_page_size: page size not a power of two");
299
300 for (page_shift = 0; ; page_shift++)
301 if ((1 << page_shift) == page_size)
302 break;
303#endif /* PAGE_SIZE_FIXED */
304}
305
306/*
307 * vm_page_bootstrap:
308 *
309 * Initializes the resident memory module.
310 *
311 * Allocates memory for the page cells, and
312 * for the object/offset-to-page hash table headers.
313 * Each page cell is initialized and placed on the free list.
314 * Returns the range of available kernel virtual memory.
315 */
316
317void
318vm_page_bootstrap(
319 vm_offset_t *startp,
320 vm_offset_t *endp)
321{
322 register vm_page_t m;
323 int i;
324 unsigned int log1;
325 unsigned int log2;
326 unsigned int size;
327
328 /*
329 * Initialize the vm_page template.
330 */
331
332 m = &vm_page_template;
333 m->object = VM_OBJECT_NULL; /* reset later */
334 m->offset = 0; /* reset later */
335 m->wire_count = 0;
336
337 m->inactive = FALSE;
338 m->active = FALSE;
339 m->laundry = FALSE;
340 m->free = FALSE;
765c9de3 341 m->no_isync = TRUE;
1c79356b
A
342 m->reference = FALSE;
343 m->pageout = FALSE;
0b4e3aa0 344 m->dump_cleaning = FALSE;
1c79356b
A
345 m->list_req_pending = FALSE;
346
347 m->busy = TRUE;
348 m->wanted = FALSE;
349 m->tabled = FALSE;
350 m->fictitious = FALSE;
351 m->private = FALSE;
352 m->absent = FALSE;
353 m->error = FALSE;
354 m->dirty = FALSE;
355 m->cleaning = FALSE;
356 m->precious = FALSE;
357 m->clustered = FALSE;
358 m->lock_supplied = FALSE;
359 m->unusual = FALSE;
360 m->restart = FALSE;
9bccf70c 361 m->zero_fill = FALSE;
1c79356b 362
55e303ae 363 m->phys_page = 0; /* reset later */
1c79356b
A
364
365 m->page_lock = VM_PROT_NONE;
366 m->unlock_request = VM_PROT_NONE;
367 m->page_error = KERN_SUCCESS;
368
369 /*
370 * Initialize the page queues.
371 */
372
373 mutex_init(&vm_page_queue_free_lock, ETAP_VM_PAGEQ_FREE);
374 mutex_init(&vm_page_queue_lock, ETAP_VM_PAGEQ);
375 simple_lock_init(&vm_page_preppin_lock, ETAP_VM_PREPPIN);
376
377 vm_page_queue_free = VM_PAGE_NULL;
378 vm_page_queue_fictitious = VM_PAGE_NULL;
379 queue_init(&vm_page_queue_active);
380 queue_init(&vm_page_queue_inactive);
9bccf70c 381 queue_init(&vm_page_queue_zf);
1c79356b
A
382 queue_init(&vm_page_queue_limbo);
383
384 vm_page_free_wanted = 0;
385
386 /*
387 * Steal memory for the map and zone subsystems.
388 */
389
390 vm_map_steal_memory();
391 zone_steal_memory();
392
393 /*
394 * Allocate (and initialize) the virtual-to-physical
395 * table hash buckets.
396 *
397 * The number of buckets should be a power of two to
398 * get a good hash function. The following computation
399 * chooses the first power of two that is greater
400 * than the number of physical pages in the system.
401 */
402
403 simple_lock_init(&vm_page_bucket_lock, ETAP_VM_BUCKET);
404
405 if (vm_page_bucket_count == 0) {
406 unsigned int npages = pmap_free_pages();
407
408 vm_page_bucket_count = 1;
409 while (vm_page_bucket_count < npages)
410 vm_page_bucket_count <<= 1;
411 }
412
413 vm_page_hash_mask = vm_page_bucket_count - 1;
414
415 /*
416 * Calculate object shift value for hashing algorithm:
417 * O = log2(sizeof(struct vm_object))
418 * B = log2(vm_page_bucket_count)
419 * hash shifts the object left by
420 * B/2 - O
421 */
422 size = vm_page_bucket_count;
423 for (log1 = 0; size > 1; log1++)
424 size /= 2;
425 size = sizeof(struct vm_object);
426 for (log2 = 0; size > 1; log2++)
427 size /= 2;
428 vm_page_hash_shift = log1/2 - log2 + 1;
55e303ae
A
429
430 vm_page_bucket_hash = 1 << ((log1 + 1) >> 1); /* Get (ceiling of sqrt of table size) */
431 vm_page_bucket_hash |= 1 << ((log1 + 1) >> 2); /* Get (ceiling of quadroot of table size) */
432 vm_page_bucket_hash |= 1; /* Set bit and add 1 - always must be 1 to insure unique series */
1c79356b
A
433
434 if (vm_page_hash_mask & vm_page_bucket_count)
435 printf("vm_page_bootstrap: WARNING -- strange page hash\n");
436
437 vm_page_buckets = (vm_page_bucket_t *)
438 pmap_steal_memory(vm_page_bucket_count *
439 sizeof(vm_page_bucket_t));
440
441 for (i = 0; i < vm_page_bucket_count; i++) {
442 register vm_page_bucket_t *bucket = &vm_page_buckets[i];
443
444 bucket->pages = VM_PAGE_NULL;
445#if MACH_PAGE_HASH_STATS
446 bucket->cur_count = 0;
447 bucket->hi_count = 0;
448#endif /* MACH_PAGE_HASH_STATS */
449 }
450
451 /*
452 * Machine-dependent code allocates the resident page table.
453 * It uses vm_page_init to initialize the page frames.
454 * The code also returns to us the virtual space available
455 * to the kernel. We don't trust the pmap module
456 * to get the alignment right.
457 */
458
459 pmap_startup(&virtual_space_start, &virtual_space_end);
55e303ae
A
460 virtual_space_start = round_page_32(virtual_space_start);
461 virtual_space_end = trunc_page_32(virtual_space_end);
1c79356b
A
462
463 *startp = virtual_space_start;
464 *endp = virtual_space_end;
465
466 /*
467 * Compute the initial "wire" count.
468 * Up until now, the pages which have been set aside are not under
469 * the VM system's control, so although they aren't explicitly
470 * wired, they nonetheless can't be moved. At this moment,
471 * all VM managed pages are "free", courtesy of pmap_startup.
472 */
55e303ae 473 vm_page_wire_count = atop_64(max_mem) - vm_page_free_count; /* initial value */
1c79356b
A
474
475 printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count);
476 vm_page_free_count_minimum = vm_page_free_count;
477}
478
479#ifndef MACHINE_PAGES
480/*
481 * We implement pmap_steal_memory and pmap_startup with the help
482 * of two simpler functions, pmap_virtual_space and pmap_next_page.
483 */
484
485vm_offset_t
486pmap_steal_memory(
487 vm_size_t size)
488{
55e303ae
A
489 vm_offset_t addr, vaddr;
490 ppnum_t phys_page;
1c79356b
A
491
492 /*
493 * We round the size to a round multiple.
494 */
495
496 size = (size + sizeof (void *) - 1) &~ (sizeof (void *) - 1);
497
498 /*
499 * If this is the first call to pmap_steal_memory,
500 * we have to initialize ourself.
501 */
502
503 if (virtual_space_start == virtual_space_end) {
504 pmap_virtual_space(&virtual_space_start, &virtual_space_end);
505
506 /*
507 * The initial values must be aligned properly, and
508 * we don't trust the pmap module to do it right.
509 */
510
55e303ae
A
511 virtual_space_start = round_page_32(virtual_space_start);
512 virtual_space_end = trunc_page_32(virtual_space_end);
1c79356b
A
513 }
514
515 /*
516 * Allocate virtual memory for this request.
517 */
518
519 addr = virtual_space_start;
520 virtual_space_start += size;
521
522 kprintf("pmap_steal_memory: %08X - %08X; size=%08X\n", addr, virtual_space_start, size); /* (TEST/DEBUG) */
523
524 /*
525 * Allocate and map physical pages to back new virtual pages.
526 */
527
55e303ae 528 for (vaddr = round_page_32(addr);
1c79356b
A
529 vaddr < addr + size;
530 vaddr += PAGE_SIZE) {
55e303ae 531 if (!pmap_next_page(&phys_page))
1c79356b
A
532 panic("pmap_steal_memory");
533
534 /*
535 * XXX Logically, these mappings should be wired,
536 * but some pmap modules barf if they are.
537 */
538
55e303ae 539 pmap_enter(kernel_pmap, vaddr, phys_page,
9bccf70c
A
540 VM_PROT_READ|VM_PROT_WRITE,
541 VM_WIMG_USE_DEFAULT, FALSE);
1c79356b
A
542 /*
543 * Account for newly stolen memory
544 */
545 vm_page_wire_count++;
546
547 }
548
549 return addr;
550}
551
552void
553pmap_startup(
554 vm_offset_t *startp,
555 vm_offset_t *endp)
556{
55e303ae
A
557 unsigned int i, npages, pages_initialized, fill, fillval;
558 vm_page_t pages;
559 ppnum_t phys_page;
560 addr64_t tmpaddr;
1c79356b
A
561
562 /*
563 * We calculate how many page frames we will have
564 * and then allocate the page structures in one chunk.
565 */
566
55e303ae
A
567 tmpaddr = (addr64_t)pmap_free_pages() * (addr64_t)PAGE_SIZE; /* Get the amount of memory left */
568 tmpaddr = tmpaddr + (addr64_t)(round_page_32(virtual_space_start) - virtual_space_start); /* Account for any slop */
569 npages = (unsigned int)(tmpaddr / (addr64_t)(PAGE_SIZE + sizeof(*pages))); /* Figure size of all vm_page_ts, including enough to hold the vm_page_ts */
1c79356b
A
570
571 pages = (vm_page_t) pmap_steal_memory(npages * sizeof *pages);
572
573 /*
574 * Initialize the page frames.
575 */
576
577 for (i = 0, pages_initialized = 0; i < npages; i++) {
55e303ae 578 if (!pmap_next_page(&phys_page))
1c79356b
A
579 break;
580
55e303ae 581 vm_page_init(&pages[i], phys_page);
1c79356b
A
582 vm_page_pages++;
583 pages_initialized++;
584 }
585
586 /*
587 * Release pages in reverse order so that physical pages
588 * initially get allocated in ascending addresses. This keeps
589 * the devices (which must address physical memory) happy if
590 * they require several consecutive pages.
591 */
592
55e303ae
A
593/*
594 * Check if we want to initialize pages to a known value
595 */
596
597 fill = 0; /* Assume no fill */
598 if (PE_parse_boot_arg("fill", &fillval)) fill = 1; /* Set fill */
599
1c79356b 600 for (i = pages_initialized; i > 0; i--) {
55e303ae
A
601 extern void fillPage(ppnum_t phys_page, unsigned int fillval);
602 if(fill) fillPage(pages[i - 1].phys_page, fillval); /* Fill the page with a know value if requested at boot */
1c79356b
A
603 vm_page_release(&pages[i - 1]);
604 }
605
55e303ae
A
606#if 0
607 {
608 vm_page_t xx, xxo, xxl;
609 int j, k, l;
610
611 j = 0; /* (BRINGUP) */
612 xxl = 0;
613
614 for(xx = vm_page_queue_free; xx; xxl = xx, xx = xx->pageq.next) { /* (BRINGUP) */
615 j++; /* (BRINGUP) */
616 if(j > vm_page_free_count) { /* (BRINGUP) */
617 panic("pmap_startup: too many pages, xx = %08X, xxl = %08X\n", xx, xxl);
618 }
619
620 l = vm_page_free_count - j; /* (BRINGUP) */
621 k = 0; /* (BRINGUP) */
622
623 if(((j - 1) & 0xFFFF) == 0) kprintf("checking number %d of %d\n", j, vm_page_free_count);
624
625 for(xxo = xx->pageq.next; xxo; xxo = xxo->pageq.next) { /* (BRINGUP) */
626 k++;
627 if(k > l) panic("pmap_startup: too many in secondary check %d %d\n", k, l);
628 if((xx->phys_page & 0xFFFFFFFF) == (xxo->phys_page & 0xFFFFFFFF)) { /* (BRINGUP) */
629 panic("pmap_startup: duplicate physaddr, xx = %08X, xxo = %08X\n", xx, xxo);
630 }
631 }
632 }
633
634 if(j != vm_page_free_count) { /* (BRINGUP) */
635 panic("pmap_startup: vm_page_free_count does not match, calc = %d, vm_page_free_count = %08X\n", j, vm_page_free_count);
636 }
637 }
638#endif
639
640
1c79356b
A
641 /*
642 * We have to re-align virtual_space_start,
643 * because pmap_steal_memory has been using it.
644 */
645
55e303ae 646 virtual_space_start = round_page_32(virtual_space_start);
1c79356b
A
647
648 *startp = virtual_space_start;
649 *endp = virtual_space_end;
650}
651#endif /* MACHINE_PAGES */
652
653/*
654 * Routine: vm_page_module_init
655 * Purpose:
656 * Second initialization pass, to be done after
657 * the basic VM system is ready.
658 */
659void
660vm_page_module_init(void)
661{
662 vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page),
663 0, PAGE_SIZE, "vm pages");
664
665#if ZONE_DEBUG
666 zone_debug_disable(vm_page_zone);
667#endif /* ZONE_DEBUG */
668
669 zone_change(vm_page_zone, Z_EXPAND, FALSE);
670 zone_change(vm_page_zone, Z_EXHAUST, TRUE);
671 zone_change(vm_page_zone, Z_FOREIGN, TRUE);
672
673 /*
674 * Adjust zone statistics to account for the real pages allocated
675 * in vm_page_create(). [Q: is this really what we want?]
676 */
677 vm_page_zone->count += vm_page_pages;
678 vm_page_zone->cur_size += vm_page_pages * vm_page_zone->elem_size;
679
680 mutex_init(&vm_page_alloc_lock, ETAP_VM_PAGE_ALLOC);
9bccf70c 681 mutex_init(&vm_page_zero_fill_lock, ETAP_VM_PAGE_ALLOC);
1c79356b
A
682}
683
684/*
685 * Routine: vm_page_create
686 * Purpose:
687 * After the VM system is up, machine-dependent code
688 * may stumble across more physical memory. For example,
689 * memory that it was reserving for a frame buffer.
690 * vm_page_create turns this memory into available pages.
691 */
692
693void
694vm_page_create(
55e303ae
A
695 ppnum_t start,
696 ppnum_t end)
1c79356b 697{
55e303ae
A
698 ppnum_t phys_page;
699 vm_page_t m;
1c79356b 700
55e303ae
A
701 for (phys_page = start;
702 phys_page < end;
703 phys_page++) {
1c79356b
A
704 while ((m = (vm_page_t) vm_page_grab_fictitious())
705 == VM_PAGE_NULL)
706 vm_page_more_fictitious();
707
55e303ae 708 vm_page_init(m, phys_page);
1c79356b
A
709 vm_page_pages++;
710 vm_page_release(m);
711 }
712}
713
714/*
715 * vm_page_hash:
716 *
717 * Distributes the object/offset key pair among hash buckets.
718 *
55e303ae 719 * NOTE: The bucket count must be a power of 2
1c79356b
A
720 */
721#define vm_page_hash(object, offset) (\
55e303ae 722 ( (natural_t)((uint32_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
1c79356b
A
723 & vm_page_hash_mask)
724
725/*
726 * vm_page_insert: [ internal use only ]
727 *
728 * Inserts the given mem entry into the object/object-page
729 * table and object list.
730 *
731 * The object must be locked.
732 */
733
734void
735vm_page_insert(
736 register vm_page_t mem,
737 register vm_object_t object,
738 register vm_object_offset_t offset)
739{
740 register vm_page_bucket_t *bucket;
741
742 XPR(XPR_VM_PAGE,
743 "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n",
744 (integer_t)object, (integer_t)offset, (integer_t)mem, 0,0);
745
746 VM_PAGE_CHECK(mem);
747
748 if (mem->tabled)
749 panic("vm_page_insert");
750
751 assert(!object->internal || offset < object->size);
752
753 /* only insert "pageout" pages into "pageout" objects,
754 * and normal pages into normal objects */
755 assert(object->pageout == mem->pageout);
756
757 /*
758 * Record the object/offset pair in this page
759 */
760
761 mem->object = object;
762 mem->offset = offset;
763
764 /*
765 * Insert it into the object_object/offset hash table
766 */
767
768 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
769 simple_lock(&vm_page_bucket_lock);
770 mem->next = bucket->pages;
771 bucket->pages = mem;
772#if MACH_PAGE_HASH_STATS
773 if (++bucket->cur_count > bucket->hi_count)
774 bucket->hi_count = bucket->cur_count;
775#endif /* MACH_PAGE_HASH_STATS */
776 simple_unlock(&vm_page_bucket_lock);
777
778 /*
779 * Now link into the object's list of backed pages.
780 */
781
782 queue_enter(&object->memq, mem, vm_page_t, listq);
783 mem->tabled = TRUE;
784
785 /*
786 * Show that the object has one more resident page.
787 */
788
789 object->resident_page_count++;
790}
791
792/*
793 * vm_page_replace:
794 *
795 * Exactly like vm_page_insert, except that we first
796 * remove any existing page at the given offset in object.
797 *
798 * The object and page queues must be locked.
799 */
800
801void
802vm_page_replace(
803 register vm_page_t mem,
804 register vm_object_t object,
805 register vm_object_offset_t offset)
806{
807 register vm_page_bucket_t *bucket;
808
809 VM_PAGE_CHECK(mem);
810
811 if (mem->tabled)
812 panic("vm_page_replace");
813
814 /*
815 * Record the object/offset pair in this page
816 */
817
818 mem->object = object;
819 mem->offset = offset;
820
821 /*
822 * Insert it into the object_object/offset hash table,
823 * replacing any page that might have been there.
824 */
825
826 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
827 simple_lock(&vm_page_bucket_lock);
828 if (bucket->pages) {
829 vm_page_t *mp = &bucket->pages;
830 register vm_page_t m = *mp;
831 do {
832 if (m->object == object && m->offset == offset) {
833 /*
834 * Remove page from bucket and from object,
835 * and return it to the free list.
836 */
837 *mp = m->next;
838 queue_remove(&object->memq, m, vm_page_t,
839 listq);
840 m->tabled = FALSE;
841 object->resident_page_count--;
842
843 /*
844 * Return page to the free list.
845 * Note the page is not tabled now, so this
846 * won't self-deadlock on the bucket lock.
847 */
848
849 vm_page_free(m);
850 break;
851 }
852 mp = &m->next;
853 } while (m = *mp);
854 mem->next = bucket->pages;
855 } else {
856 mem->next = VM_PAGE_NULL;
857 }
858 bucket->pages = mem;
859 simple_unlock(&vm_page_bucket_lock);
860
861 /*
862 * Now link into the object's list of backed pages.
863 */
864
865 queue_enter(&object->memq, mem, vm_page_t, listq);
866 mem->tabled = TRUE;
867
868 /*
869 * And show that the object has one more resident
870 * page.
871 */
872
873 object->resident_page_count++;
874}
875
876/*
877 * vm_page_remove: [ internal use only ]
878 *
879 * Removes the given mem entry from the object/offset-page
880 * table and the object page list.
881 *
882 * The object and page must be locked.
883 */
884
885void
886vm_page_remove(
887 register vm_page_t mem)
888{
889 register vm_page_bucket_t *bucket;
890 register vm_page_t this;
891
892 XPR(XPR_VM_PAGE,
893 "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n",
894 (integer_t)mem->object, (integer_t)mem->offset,
895 (integer_t)mem, 0,0);
896
897 assert(mem->tabled);
898 assert(!mem->cleaning);
899 VM_PAGE_CHECK(mem);
900
901 /*
902 * Remove from the object_object/offset hash table
903 */
904
905 bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
906 simple_lock(&vm_page_bucket_lock);
907 if ((this = bucket->pages) == mem) {
908 /* optimize for common case */
909
910 bucket->pages = mem->next;
911 } else {
912 register vm_page_t *prev;
913
914 for (prev = &this->next;
915 (this = *prev) != mem;
916 prev = &this->next)
917 continue;
918 *prev = this->next;
919 }
920#if MACH_PAGE_HASH_STATS
921 bucket->cur_count--;
922#endif /* MACH_PAGE_HASH_STATS */
923 simple_unlock(&vm_page_bucket_lock);
924
925 /*
926 * Now remove from the object's list of backed pages.
927 */
928
929 queue_remove(&mem->object->memq, mem, vm_page_t, listq);
930
931 /*
932 * And show that the object has one fewer resident
933 * page.
934 */
935
936 mem->object->resident_page_count--;
937
938 mem->tabled = FALSE;
939 mem->object = VM_OBJECT_NULL;
940 mem->offset = 0;
941}
942
943/*
944 * vm_page_lookup:
945 *
946 * Returns the page associated with the object/offset
947 * pair specified; if none is found, VM_PAGE_NULL is returned.
948 *
949 * The object must be locked. No side effects.
950 */
951
952vm_page_t
953vm_page_lookup(
954 register vm_object_t object,
955 register vm_object_offset_t offset)
956{
957 register vm_page_t mem;
958 register vm_page_bucket_t *bucket;
959
960 /*
961 * Search the hash table for this object/offset pair
962 */
963
964 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
965
966 simple_lock(&vm_page_bucket_lock);
967 for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
968 VM_PAGE_CHECK(mem);
969 if ((mem->object == object) && (mem->offset == offset))
970 break;
971 }
972 simple_unlock(&vm_page_bucket_lock);
55e303ae 973
1c79356b
A
974 return(mem);
975}
976
977/*
978 * vm_page_rename:
979 *
980 * Move the given memory entry from its
981 * current object to the specified target object/offset.
982 *
983 * The object must be locked.
984 */
985void
986vm_page_rename(
987 register vm_page_t mem,
988 register vm_object_t new_object,
989 vm_object_offset_t new_offset)
990{
991 assert(mem->object != new_object);
992 /*
993 * Changes to mem->object require the page lock because
994 * the pageout daemon uses that lock to get the object.
995 */
996
997 XPR(XPR_VM_PAGE,
998 "vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n",
999 (integer_t)new_object, (integer_t)new_offset,
1000 (integer_t)mem, 0,0);
1001
1002 vm_page_lock_queues();
1003 vm_page_remove(mem);
1004 vm_page_insert(mem, new_object, new_offset);
1005 vm_page_unlock_queues();
1006}
1007
1008/*
1009 * vm_page_init:
1010 *
1011 * Initialize the fields in a new page.
1012 * This takes a structure with random values and initializes it
1013 * so that it can be given to vm_page_release or vm_page_insert.
1014 */
1015void
1016vm_page_init(
1017 vm_page_t mem,
55e303ae 1018 ppnum_t phys_page)
1c79356b
A
1019{
1020 *mem = vm_page_template;
55e303ae 1021 mem->phys_page = phys_page;
1c79356b
A
1022}
1023
1024/*
1025 * vm_page_grab_fictitious:
1026 *
1027 * Remove a fictitious page from the free list.
1028 * Returns VM_PAGE_NULL if there are no free pages.
1029 */
1030int c_vm_page_grab_fictitious = 0;
1031int c_vm_page_release_fictitious = 0;
1032int c_vm_page_more_fictitious = 0;
1033
1034vm_page_t
1035vm_page_grab_fictitious(void)
1036{
1037 register vm_page_t m;
1038
1039 m = (vm_page_t)zget(vm_page_zone);
1040 if (m) {
1c79356b
A
1041 vm_page_init(m, vm_page_fictitious_addr);
1042 m->fictitious = TRUE;
1c79356b
A
1043 }
1044
1045 c_vm_page_grab_fictitious++;
1046 return m;
1047}
1048
1049/*
1050 * vm_page_release_fictitious:
1051 *
1052 * Release a fictitious page to the free list.
1053 */
1054
1055void
1056vm_page_release_fictitious(
1057 register vm_page_t m)
1058{
1059 assert(!m->free);
1060 assert(m->busy);
1061 assert(m->fictitious);
55e303ae 1062 assert(m->phys_page == vm_page_fictitious_addr);
1c79356b
A
1063
1064 c_vm_page_release_fictitious++;
1065
1066 if (m->free)
1067 panic("vm_page_release_fictitious");
1068 m->free = TRUE;
1069 zfree(vm_page_zone, (vm_offset_t)m);
1070}
1071
1072/*
1073 * vm_page_more_fictitious:
1074 *
1075 * Add more fictitious pages to the free list.
1076 * Allowed to block. This routine is way intimate
1077 * with the zones code, for several reasons:
1078 * 1. we need to carve some page structures out of physical
1079 * memory before zones work, so they _cannot_ come from
1080 * the zone_map.
1081 * 2. the zone needs to be collectable in order to prevent
1082 * growth without bound. These structures are used by
1083 * the device pager (by the hundreds and thousands), as
1084 * private pages for pageout, and as blocking pages for
1085 * pagein. Temporary bursts in demand should not result in
1086 * permanent allocation of a resource.
1087 * 3. To smooth allocation humps, we allocate single pages
1088 * with kernel_memory_allocate(), and cram them into the
1089 * zone. This also allows us to initialize the vm_page_t's
1090 * on the way into the zone, so that zget() always returns
1091 * an initialized structure. The zone free element pointer
1092 * and the free page pointer are both the first item in the
1093 * vm_page_t.
1094 * 4. By having the pages in the zone pre-initialized, we need
1095 * not keep 2 levels of lists. The garbage collector simply
1096 * scans our list, and reduces physical memory usage as it
1097 * sees fit.
1098 */
1099
1100void vm_page_more_fictitious(void)
1101{
1102 extern vm_map_t zone_map;
1103 register vm_page_t m;
1104 vm_offset_t addr;
1105 kern_return_t retval;
1106 int i;
1107
1108 c_vm_page_more_fictitious++;
1109
1c79356b
A
1110 /*
1111 * Allocate a single page from the zone_map. Do not wait if no physical
1112 * pages are immediately available, and do not zero the space. We need
1113 * our own blocking lock here to prevent having multiple,
1114 * simultaneous requests from piling up on the zone_map lock. Exactly
1115 * one (of our) threads should be potentially waiting on the map lock.
1116 * If winner is not vm-privileged, then the page allocation will fail,
1117 * and it will temporarily block here in the vm_page_wait().
1118 */
1119 mutex_lock(&vm_page_alloc_lock);
1120 /*
1121 * If another thread allocated space, just bail out now.
1122 */
1123 if (zone_free_count(vm_page_zone) > 5) {
1124 /*
1125 * The number "5" is a small number that is larger than the
1126 * number of fictitious pages that any single caller will
1127 * attempt to allocate. Otherwise, a thread will attempt to
1128 * acquire a fictitious page (vm_page_grab_fictitious), fail,
1129 * release all of the resources and locks already acquired,
1130 * and then call this routine. This routine finds the pages
1131 * that the caller released, so fails to allocate new space.
1132 * The process repeats infinitely. The largest known number
1133 * of fictitious pages required in this manner is 2. 5 is
1134 * simply a somewhat larger number.
1135 */
1136 mutex_unlock(&vm_page_alloc_lock);
1137 return;
1138 }
1139
1140 if ((retval = kernel_memory_allocate(zone_map,
1141 &addr, PAGE_SIZE, VM_PROT_ALL,
1142 KMA_KOBJECT|KMA_NOPAGEWAIT)) != KERN_SUCCESS) {
1143 /*
1144 * No page was available. Tell the pageout daemon, drop the
1145 * lock to give another thread a chance at it, and
1146 * wait for the pageout daemon to make progress.
1147 */
1148 mutex_unlock(&vm_page_alloc_lock);
1149 vm_page_wait(THREAD_UNINT);
1150 return;
1151 }
1152 /*
1153 * Initialize as many vm_page_t's as will fit on this page. This
1154 * depends on the zone code disturbing ONLY the first item of
1155 * each zone element.
1156 */
1157 m = (vm_page_t)addr;
1158 for (i = PAGE_SIZE/sizeof(struct vm_page); i > 0; i--) {
1159 vm_page_init(m, vm_page_fictitious_addr);
1160 m->fictitious = TRUE;
1161 m++;
1162 }
1163 zcram(vm_page_zone, addr, PAGE_SIZE);
1164 mutex_unlock(&vm_page_alloc_lock);
1165}
1166
1167/*
1168 * vm_page_convert:
1169 *
1170 * Attempt to convert a fictitious page into a real page.
1171 */
1172
1173boolean_t
1174vm_page_convert(
1175 register vm_page_t m)
1176{
1177 register vm_page_t real_m;
1178
1179 assert(m->busy);
1180 assert(m->fictitious);
1181 assert(!m->dirty);
1182
1183 real_m = vm_page_grab();
1184 if (real_m == VM_PAGE_NULL)
1185 return FALSE;
1186
55e303ae 1187 m->phys_page = real_m->phys_page;
1c79356b 1188 m->fictitious = FALSE;
765c9de3 1189 m->no_isync = TRUE;
1c79356b
A
1190
1191 vm_page_lock_queues();
1192 if (m->active)
1193 vm_page_active_count++;
1194 else if (m->inactive)
1195 vm_page_inactive_count++;
1196 vm_page_unlock_queues();
1197
55e303ae 1198 real_m->phys_page = vm_page_fictitious_addr;
1c79356b
A
1199 real_m->fictitious = TRUE;
1200
1201 vm_page_release_fictitious(real_m);
1202 return TRUE;
1203}
1204
1205/*
1206 * vm_pool_low():
1207 *
1208 * Return true if it is not likely that a non-vm_privileged thread
1209 * can get memory without blocking. Advisory only, since the
1210 * situation may change under us.
1211 */
1212int
1213vm_pool_low(void)
1214{
1215 /* No locking, at worst we will fib. */
1216 return( vm_page_free_count < vm_page_free_reserved );
1217}
1218
1219/*
1220 * vm_page_grab:
1221 *
1222 * Remove a page from the free list.
1223 * Returns VM_PAGE_NULL if the free list is too small.
1224 */
1225
1226unsigned long vm_page_grab_count = 0; /* measure demand */
1227
1228vm_page_t
1229vm_page_grab(void)
1230{
1231 register vm_page_t mem;
1232
1233 mutex_lock(&vm_page_queue_free_lock);
1234 vm_page_grab_count++;
1235
1236 /*
1237 * Optionally produce warnings if the wire or gobble
1238 * counts exceed some threshold.
1239 */
1240 if (vm_page_wire_count_warning > 0
1241 && vm_page_wire_count >= vm_page_wire_count_warning) {
1242 printf("mk: vm_page_grab(): high wired page count of %d\n",
1243 vm_page_wire_count);
1244 assert(vm_page_wire_count < vm_page_wire_count_warning);
1245 }
1246 if (vm_page_gobble_count_warning > 0
1247 && vm_page_gobble_count >= vm_page_gobble_count_warning) {
1248 printf("mk: vm_page_grab(): high gobbled page count of %d\n",
1249 vm_page_gobble_count);
1250 assert(vm_page_gobble_count < vm_page_gobble_count_warning);
1251 }
1252
1253 /*
1254 * Only let privileged threads (involved in pageout)
1255 * dip into the reserved pool.
1256 */
1257
1258 if ((vm_page_free_count < vm_page_free_reserved) &&
1259 !current_thread()->vm_privilege) {
1260 mutex_unlock(&vm_page_queue_free_lock);
1261 mem = VM_PAGE_NULL;
1262 goto wakeup_pageout;
1263 }
1264
1265 while (vm_page_queue_free == VM_PAGE_NULL) {
1266 printf("vm_page_grab: no free pages, trouble expected...\n");
1267 mutex_unlock(&vm_page_queue_free_lock);
1268 VM_PAGE_WAIT();
1269 mutex_lock(&vm_page_queue_free_lock);
1270 }
1271
1272 if (--vm_page_free_count < vm_page_free_count_minimum)
1273 vm_page_free_count_minimum = vm_page_free_count;
1274 mem = vm_page_queue_free;
1275 vm_page_queue_free = (vm_page_t) mem->pageq.next;
1276 mem->free = FALSE;
0b4e3aa0 1277 mem->no_isync = TRUE;
1c79356b
A
1278 mutex_unlock(&vm_page_queue_free_lock);
1279
1280 /*
1281 * Decide if we should poke the pageout daemon.
1282 * We do this if the free count is less than the low
1283 * water mark, or if the free count is less than the high
1284 * water mark (but above the low water mark) and the inactive
1285 * count is less than its target.
1286 *
1287 * We don't have the counts locked ... if they change a little,
1288 * it doesn't really matter.
1289 */
1290
1291wakeup_pageout:
1292 if ((vm_page_free_count < vm_page_free_min) ||
1293 ((vm_page_free_count < vm_page_free_target) &&
1294 (vm_page_inactive_count < vm_page_inactive_target)))
1295 thread_wakeup((event_t) &vm_page_free_wanted);
1296
55e303ae 1297// dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */
1c79356b
A
1298
1299 return mem;
1300}
1301
1302/*
1303 * vm_page_release:
1304 *
1305 * Return a page to the free list.
1306 */
1307
1308void
1309vm_page_release(
1310 register vm_page_t mem)
1311{
55e303ae
A
1312
1313#if 0
1314 unsigned int pindex;
1315 phys_entry *physent;
1316
1317 physent = mapping_phys_lookup(mem->phys_page, &pindex); /* (BRINGUP) */
1318 if(physent->ppLink & ppN) { /* (BRINGUP) */
1319 panic("vm_page_release: already released - %08X %08X\n", mem, mem->phys_page);
1320 }
1321 physent->ppLink = physent->ppLink | ppN; /* (BRINGUP) */
1322#endif
1323
1c79356b
A
1324 assert(!mem->private && !mem->fictitious);
1325
55e303ae 1326// dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */
1c79356b
A
1327
1328 mutex_lock(&vm_page_queue_free_lock);
1329 if (mem->free)
1330 panic("vm_page_release");
1331 mem->free = TRUE;
1332 mem->pageq.next = (queue_entry_t) vm_page_queue_free;
1333 vm_page_queue_free = mem;
1334 vm_page_free_count++;
1335
1336 /*
1337 * Check if we should wake up someone waiting for page.
1338 * But don't bother waking them unless they can allocate.
1339 *
1340 * We wakeup only one thread, to prevent starvation.
1341 * Because the scheduling system handles wait queues FIFO,
1342 * if we wakeup all waiting threads, one greedy thread
1343 * can starve multiple niceguy threads. When the threads
1344 * all wakeup, the greedy threads runs first, grabs the page,
1345 * and waits for another page. It will be the first to run
1346 * when the next page is freed.
1347 *
1348 * However, there is a slight danger here.
1349 * The thread we wake might not use the free page.
1350 * Then the other threads could wait indefinitely
1351 * while the page goes unused. To forestall this,
1352 * the pageout daemon will keep making free pages
1353 * as long as vm_page_free_wanted is non-zero.
1354 */
1355
1356 if ((vm_page_free_wanted > 0) &&
1357 (vm_page_free_count >= vm_page_free_reserved)) {
1358 vm_page_free_wanted--;
1359 thread_wakeup_one((event_t) &vm_page_free_count);
1360 }
1361
1362 mutex_unlock(&vm_page_queue_free_lock);
1363}
1364
9bccf70c
A
1365#define VM_PAGEOUT_DEADLOCK_TIMEOUT 3
1366
1c79356b
A
1367/*
1368 * vm_page_wait:
1369 *
1370 * Wait for a page to become available.
1371 * If there are plenty of free pages, then we don't sleep.
1372 *
1373 * Returns:
1374 * TRUE: There may be another page, try again
1375 * FALSE: We were interrupted out of our wait, don't try again
1376 */
1377
1378boolean_t
1379vm_page_wait(
1380 int interruptible )
1381{
1382 /*
1383 * We can't use vm_page_free_reserved to make this
1384 * determination. Consider: some thread might
1385 * need to allocate two pages. The first allocation
1386 * succeeds, the second fails. After the first page is freed,
1387 * a call to vm_page_wait must really block.
1388 */
9bccf70c
A
1389 uint64_t abstime;
1390 kern_return_t wait_result;
1391 kern_return_t kr;
1392 int need_wakeup = 0;
1c79356b
A
1393
1394 mutex_lock(&vm_page_queue_free_lock);
1395 if (vm_page_free_count < vm_page_free_target) {
1396 if (vm_page_free_wanted++ == 0)
0b4e3aa0 1397 need_wakeup = 1;
9bccf70c
A
1398 wait_result = assert_wait((event_t)&vm_page_free_count,
1399 interruptible);
1c79356b
A
1400 mutex_unlock(&vm_page_queue_free_lock);
1401 counter(c_vm_page_wait_block++);
0b4e3aa0
A
1402
1403 if (need_wakeup)
1404 thread_wakeup((event_t)&vm_page_free_wanted);
9bccf70c
A
1405
1406 if (wait_result == THREAD_WAITING) {
1407 clock_interval_to_absolutetime_interval(
1408 VM_PAGEOUT_DEADLOCK_TIMEOUT,
1409 NSEC_PER_SEC, &abstime);
1410 clock_absolutetime_interval_to_deadline(
1411 abstime, &abstime);
1412 thread_set_timer_deadline(abstime);
1413 wait_result = thread_block(THREAD_CONTINUE_NULL);
1414
1415 if(wait_result == THREAD_TIMED_OUT) {
1416 kr = vm_pageout_emergency_availability_request();
1417 return TRUE;
1418 } else {
1419 thread_cancel_timer();
1420 }
1421 }
0b4e3aa0 1422
1c79356b
A
1423 return(wait_result == THREAD_AWAKENED);
1424 } else {
1425 mutex_unlock(&vm_page_queue_free_lock);
1426 return TRUE;
1427 }
1428}
1429
1430/*
1431 * vm_page_alloc:
1432 *
1433 * Allocate and return a memory cell associated
1434 * with this VM object/offset pair.
1435 *
1436 * Object must be locked.
1437 */
1438
1439vm_page_t
1440vm_page_alloc(
1441 vm_object_t object,
1442 vm_object_offset_t offset)
1443{
1444 register vm_page_t mem;
1445
1446 mem = vm_page_grab();
1447 if (mem == VM_PAGE_NULL)
1448 return VM_PAGE_NULL;
1449
1450 vm_page_insert(mem, object, offset);
1451
1452 return(mem);
1453}
1454
1c79356b
A
1455counter(unsigned int c_laundry_pages_freed = 0;)
1456
1457int vm_pagein_cluster_unused = 0;
1458boolean_t vm_page_free_verify = FALSE;
1459/*
1460 * vm_page_free:
1461 *
1462 * Returns the given page to the free list,
1463 * disassociating it with any VM object.
1464 *
1465 * Object and page queues must be locked prior to entry.
1466 */
1467void
1468vm_page_free(
1469 register vm_page_t mem)
1470{
1471 vm_object_t object = mem->object;
1472
1473 assert(!mem->free);
1474 assert(!mem->cleaning);
1475 assert(!mem->pageout);
55e303ae 1476 assert(!vm_page_free_verify || pmap_verify_free(mem->phys_page));
1c79356b
A
1477
1478 if (mem->tabled)
1479 vm_page_remove(mem); /* clears tabled, object, offset */
1480 VM_PAGE_QUEUES_REMOVE(mem); /* clears active or inactive */
1481
1482 if (mem->clustered) {
1483 mem->clustered = FALSE;
1484 vm_pagein_cluster_unused++;
1485 }
1486
1487 if (mem->wire_count) {
1488 if (!mem->private && !mem->fictitious)
1489 vm_page_wire_count--;
1490 mem->wire_count = 0;
1491 assert(!mem->gobbled);
1492 } else if (mem->gobbled) {
1493 if (!mem->private && !mem->fictitious)
1494 vm_page_wire_count--;
1495 vm_page_gobble_count--;
1496 }
1497 mem->gobbled = FALSE;
1498
1499 if (mem->laundry) {
1500 extern int vm_page_laundry_min;
55e303ae
A
1501 if (!object->internal)
1502 vm_page_burst_count--;
1c79356b
A
1503 vm_page_laundry_count--;
1504 mem->laundry = FALSE; /* laundry is now clear */
1505 counter(++c_laundry_pages_freed);
1506 if (vm_page_laundry_count < vm_page_laundry_min) {
1507 vm_page_laundry_min = 0;
1508 thread_wakeup((event_t) &vm_page_laundry_count);
1509 }
1510 }
1511
1512 mem->discard_request = FALSE;
1513
1514 PAGE_WAKEUP(mem); /* clears wanted */
1515
1516 if (mem->absent)
1517 vm_object_absent_release(object);
1518
0b4e3aa0 1519 /* Some of these may be unnecessary */
1c79356b
A
1520 mem->page_lock = 0;
1521 mem->unlock_request = 0;
1522 mem->busy = TRUE;
1523 mem->absent = FALSE;
1524 mem->error = FALSE;
1525 mem->dirty = FALSE;
1526 mem->precious = FALSE;
1527 mem->reference = FALSE;
1528
1529 mem->page_error = KERN_SUCCESS;
1530
1531 if (mem->private) {
1532 mem->private = FALSE;
1533 mem->fictitious = TRUE;
55e303ae 1534 mem->phys_page = vm_page_fictitious_addr;
1c79356b
A
1535 }
1536 if (mem->fictitious) {
1537 vm_page_release_fictitious(mem);
1538 } else {
9bccf70c
A
1539 /* depends on the queues lock */
1540 if(mem->zero_fill) {
1541 vm_zf_count-=1;
1542 mem->zero_fill = FALSE;
1543 }
55e303ae 1544 vm_page_init(mem, mem->phys_page);
1c79356b
A
1545 vm_page_release(mem);
1546 }
1547}
1548
55e303ae
A
1549
1550void
1551vm_page_free_list(
1552 register vm_page_t mem)
1553{
1554 register vm_page_t nxt;
1555 register vm_page_t first = NULL;
1556 register vm_page_t last;
1557 register int pg_count = 0;
1558
1559
1560 while (mem) {
1561 nxt = (vm_page_t)(mem->pageq.next);
1562
1563 if (mem->clustered)
1564 vm_pagein_cluster_unused++;
1565
1566 if (mem->laundry) {
1567 extern int vm_page_laundry_min;
1568
1569 if (!mem->object->internal)
1570 vm_page_burst_count--;
1571 vm_page_laundry_count--;
1572 counter(++c_laundry_pages_freed);
1573
1574 if (vm_page_laundry_count < vm_page_laundry_min) {
1575 vm_page_laundry_min = 0;
1576 thread_wakeup((event_t) &vm_page_laundry_count);
1577 }
1578 }
1579 mem->busy = TRUE;
1580
1581 PAGE_WAKEUP(mem); /* clears wanted */
1582
1583 if (mem->private)
1584 mem->fictitious = TRUE;
1585
1586 if (!mem->fictitious) {
1587 /* depends on the queues lock */
1588 if (mem->zero_fill)
1589 vm_zf_count -= 1;
1590 vm_page_init(mem, mem->phys_page);
1591
1592 mem->free = TRUE;
1593
1594 if (first == NULL)
1595 last = mem;
1596 mem->pageq.next = (queue_t) first;
1597 first = mem;
1598
1599 pg_count++;
1600 } else {
1601 mem->phys_page = vm_page_fictitious_addr;
1602 vm_page_release_fictitious(mem);
1603 }
1604 mem = nxt;
1605 }
1606 if (first) {
1607
1608 mutex_lock(&vm_page_queue_free_lock);
1609
1610 last->pageq.next = (queue_entry_t) vm_page_queue_free;
1611 vm_page_queue_free = first;
1612
1613 vm_page_free_count += pg_count;
1614
1615 if ((vm_page_free_wanted > 0) &&
1616 (vm_page_free_count >= vm_page_free_reserved)) {
1617 int available_pages;
1618
1619 available_pages = vm_page_free_count - vm_page_free_reserved;
1620
1621 if (available_pages >= vm_page_free_wanted) {
1622 vm_page_free_wanted = 0;
1623 thread_wakeup((event_t) &vm_page_free_count);
1624 } else {
1625 while (available_pages--) {
1626 vm_page_free_wanted--;
1627 thread_wakeup_one((event_t) &vm_page_free_count);
1628 }
1629 }
1630 }
1631 mutex_unlock(&vm_page_queue_free_lock);
1632 }
1633}
1634
1635
1c79356b
A
1636/*
1637 * vm_page_wire:
1638 *
1639 * Mark this page as wired down by yet
1640 * another map, removing it from paging queues
1641 * as necessary.
1642 *
1643 * The page's object and the page queues must be locked.
1644 */
1645void
1646vm_page_wire(
1647 register vm_page_t mem)
1648{
1649
1650// dbgLog(current_act(), mem->offset, mem->object, 1); /* (TEST/DEBUG) */
1651
1652 VM_PAGE_CHECK(mem);
1653
1654 if (mem->wire_count == 0) {
1655 VM_PAGE_QUEUES_REMOVE(mem);
1656 if (!mem->private && !mem->fictitious && !mem->gobbled)
1657 vm_page_wire_count++;
1658 if (mem->gobbled)
1659 vm_page_gobble_count--;
1660 mem->gobbled = FALSE;
9bccf70c
A
1661 if(mem->zero_fill) {
1662 /* depends on the queues lock */
1663 vm_zf_count-=1;
1664 mem->zero_fill = FALSE;
1665 }
1c79356b
A
1666 }
1667 assert(!mem->gobbled);
1668 mem->wire_count++;
1669}
1670
1671/*
1672 * vm_page_gobble:
1673 *
1674 * Mark this page as consumed by the vm/ipc/xmm subsystems.
1675 *
1676 * Called only for freshly vm_page_grab()ed pages - w/ nothing locked.
1677 */
1678void
1679vm_page_gobble(
1680 register vm_page_t mem)
1681{
1682 vm_page_lock_queues();
1683 VM_PAGE_CHECK(mem);
1684
1685 assert(!mem->gobbled);
1686 assert(mem->wire_count == 0);
1687
1688 if (!mem->gobbled && mem->wire_count == 0) {
1689 if (!mem->private && !mem->fictitious)
1690 vm_page_wire_count++;
1691 }
1692 vm_page_gobble_count++;
1693 mem->gobbled = TRUE;
1694 vm_page_unlock_queues();
1695}
1696
1697/*
1698 * vm_page_unwire:
1699 *
1700 * Release one wiring of this page, potentially
1701 * enabling it to be paged again.
1702 *
1703 * The page's object and the page queues must be locked.
1704 */
1705void
1706vm_page_unwire(
1707 register vm_page_t mem)
1708{
1709
1710// dbgLog(current_act(), mem->offset, mem->object, 0); /* (TEST/DEBUG) */
1711
1712 VM_PAGE_CHECK(mem);
1713 assert(mem->wire_count > 0);
1714
1715 if (--mem->wire_count == 0) {
1716 assert(!mem->private && !mem->fictitious);
1717 vm_page_wire_count--;
1718 queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
1719 vm_page_active_count++;
1720 mem->active = TRUE;
1721 mem->reference = TRUE;
1722 }
1723}
1724
1725/*
1726 * vm_page_deactivate:
1727 *
1728 * Returns the given page to the inactive list,
1729 * indicating that no physical maps have access
1730 * to this page. [Used by the physical mapping system.]
1731 *
1732 * The page queues must be locked.
1733 */
1734void
1735vm_page_deactivate(
1736 register vm_page_t m)
1737{
1738 VM_PAGE_CHECK(m);
1739
55e303ae 1740// dbgLog(m->phys_page, vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */
1c79356b
A
1741
1742 /*
1743 * This page is no longer very interesting. If it was
1744 * interesting (active or inactive/referenced), then we
1745 * clear the reference bit and (re)enter it in the
1746 * inactive queue. Note wired pages should not have
1747 * their reference bit cleared.
1748 */
1749 if (m->gobbled) { /* can this happen? */
1750 assert(m->wire_count == 0);
1751 if (!m->private && !m->fictitious)
1752 vm_page_wire_count--;
1753 vm_page_gobble_count--;
1754 m->gobbled = FALSE;
1755 }
1756 if (m->private || (m->wire_count != 0))
1757 return;
1758 if (m->active || (m->inactive && m->reference)) {
1759 if (!m->fictitious && !m->absent)
55e303ae 1760 pmap_clear_reference(m->phys_page);
1c79356b
A
1761 m->reference = FALSE;
1762 VM_PAGE_QUEUES_REMOVE(m);
1763 }
1764 if (m->wire_count == 0 && !m->inactive) {
0b4e3aa0
A
1765 m->page_ticket = vm_page_ticket;
1766 vm_page_ticket_roll++;
1767
1768 if(vm_page_ticket_roll == VM_PAGE_TICKETS_IN_ROLL) {
1769 vm_page_ticket_roll = 0;
1770 if(vm_page_ticket == VM_PAGE_TICKET_ROLL_IDS)
1771 vm_page_ticket= 0;
1772 else
1773 vm_page_ticket++;
1774 }
1775
9bccf70c
A
1776 if(m->zero_fill) {
1777 queue_enter(&vm_page_queue_zf, m, vm_page_t, pageq);
1778 } else {
1779 queue_enter(&vm_page_queue_inactive,
1780 m, vm_page_t, pageq);
1781 }
1782
1c79356b
A
1783 m->inactive = TRUE;
1784 if (!m->fictitious)
1785 vm_page_inactive_count++;
1786 }
1787}
1788
1789/*
1790 * vm_page_activate:
1791 *
1792 * Put the specified page on the active list (if appropriate).
1793 *
1794 * The page queues must be locked.
1795 */
1796
1797void
1798vm_page_activate(
1799 register vm_page_t m)
1800{
1801 VM_PAGE_CHECK(m);
1802
1803 if (m->gobbled) {
1804 assert(m->wire_count == 0);
1805 if (!m->private && !m->fictitious)
1806 vm_page_wire_count--;
1807 vm_page_gobble_count--;
1808 m->gobbled = FALSE;
1809 }
1810 if (m->private)
1811 return;
1812
1813 if (m->inactive) {
9bccf70c
A
1814 if (m->zero_fill) {
1815 queue_remove(&vm_page_queue_zf, m, vm_page_t, pageq);
1816 } else {
1817 queue_remove(&vm_page_queue_inactive,
1818 m, vm_page_t, pageq);
1819 }
1c79356b
A
1820 if (!m->fictitious)
1821 vm_page_inactive_count--;
1822 m->inactive = FALSE;
1823 }
1824 if (m->wire_count == 0) {
1825 if (m->active)
1826 panic("vm_page_activate: already active");
1827
1828 queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
1829 m->active = TRUE;
1830 m->reference = TRUE;
1831 if (!m->fictitious)
1832 vm_page_active_count++;
1833 }
1834}
1835
1836/*
1837 * vm_page_part_zero_fill:
1838 *
1839 * Zero-fill a part of the page.
1840 */
1841void
1842vm_page_part_zero_fill(
1843 vm_page_t m,
1844 vm_offset_t m_pa,
1845 vm_size_t len)
1846{
1847 vm_page_t tmp;
1848
1849 VM_PAGE_CHECK(m);
1850#ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
55e303ae 1851 pmap_zero_part_page(m->phys_page, m_pa, len);
1c79356b
A
1852#else
1853 while (1) {
1854 tmp = vm_page_grab();
1855 if (tmp == VM_PAGE_NULL) {
1856 vm_page_wait(THREAD_UNINT);
1857 continue;
1858 }
1859 break;
1860 }
1861 vm_page_zero_fill(tmp);
1862 if(m_pa != 0) {
1863 vm_page_part_copy(m, 0, tmp, 0, m_pa);
1864 }
1865 if((m_pa + len) < PAGE_SIZE) {
1866 vm_page_part_copy(m, m_pa + len, tmp,
1867 m_pa + len, PAGE_SIZE - (m_pa + len));
1868 }
1869 vm_page_copy(tmp,m);
1870 vm_page_lock_queues();
1871 vm_page_free(tmp);
1872 vm_page_unlock_queues();
1873#endif
1874
1875}
1876
1877/*
1878 * vm_page_zero_fill:
1879 *
1880 * Zero-fill the specified page.
1881 */
1882void
1883vm_page_zero_fill(
1884 vm_page_t m)
1885{
1886 XPR(XPR_VM_PAGE,
1887 "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n",
1888 (integer_t)m->object, (integer_t)m->offset, (integer_t)m, 0,0);
1889
1890 VM_PAGE_CHECK(m);
1891
55e303ae
A
1892// dbgTrace(0xAEAEAEAE, m->phys_page, 0); /* (BRINGUP) */
1893 pmap_zero_page(m->phys_page);
1c79356b
A
1894}
1895
1896/*
1897 * vm_page_part_copy:
1898 *
1899 * copy part of one page to another
1900 */
1901
1902void
1903vm_page_part_copy(
1904 vm_page_t src_m,
1905 vm_offset_t src_pa,
1906 vm_page_t dst_m,
1907 vm_offset_t dst_pa,
1908 vm_size_t len)
1909{
1910 VM_PAGE_CHECK(src_m);
1911 VM_PAGE_CHECK(dst_m);
1912
55e303ae
A
1913 pmap_copy_part_page(src_m->phys_page, src_pa,
1914 dst_m->phys_page, dst_pa, len);
1c79356b
A
1915}
1916
1917/*
1918 * vm_page_copy:
1919 *
1920 * Copy one page to another
1921 */
1922
1923void
1924vm_page_copy(
1925 vm_page_t src_m,
1926 vm_page_t dest_m)
1927{
1928 XPR(XPR_VM_PAGE,
1929 "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n",
1930 (integer_t)src_m->object, src_m->offset,
1931 (integer_t)dest_m->object, dest_m->offset,
1932 0);
1933
1934 VM_PAGE_CHECK(src_m);
1935 VM_PAGE_CHECK(dest_m);
1936
55e303ae 1937 pmap_copy_page(src_m->phys_page, dest_m->phys_page);
1c79356b
A
1938}
1939
1c79356b
A
1940/*
1941 * Currently, this is a primitive allocator that grabs
1942 * free pages from the system, sorts them by physical
1943 * address, then searches for a region large enough to
1944 * satisfy the user's request.
1945 *
1946 * Additional levels of effort:
1947 * + steal clean active/inactive pages
1948 * + force pageouts of dirty pages
1949 * + maintain a map of available physical
1950 * memory
1951 */
1952
1953#define SET_NEXT_PAGE(m,n) ((m)->pageq.next = (struct queue_entry *) (n))
1954
1955#if MACH_ASSERT
1956int vm_page_verify_contiguous(
1957 vm_page_t pages,
1958 unsigned int npages);
1959#endif /* MACH_ASSERT */
1960
1961cpm_counter(unsigned int vpfls_pages_handled = 0;)
1962cpm_counter(unsigned int vpfls_head_insertions = 0;)
1963cpm_counter(unsigned int vpfls_tail_insertions = 0;)
1964cpm_counter(unsigned int vpfls_general_insertions = 0;)
1965cpm_counter(unsigned int vpfc_failed = 0;)
1966cpm_counter(unsigned int vpfc_satisfied = 0;)
1967
1968/*
1969 * Sort free list by ascending physical address,
1970 * using a not-particularly-bright sort algorithm.
1971 * Caller holds vm_page_queue_free_lock.
1972 */
1973static void
1974vm_page_free_list_sort(void)
1975{
1976 vm_page_t sort_list;
1977 vm_page_t sort_list_end;
1978 vm_page_t m, m1, *prev, next_m;
1979 vm_offset_t addr;
1980#if MACH_ASSERT
1981 unsigned int npages;
1982 int old_free_count;
1983#endif /* MACH_ASSERT */
1984
1985#if MACH_ASSERT
1986 /*
1987 * Verify pages in the free list..
1988 */
1989 npages = 0;
1990 for (m = vm_page_queue_free; m != VM_PAGE_NULL; m = NEXT_PAGE(m))
1991 ++npages;
1992 if (npages != vm_page_free_count)
1993 panic("vm_sort_free_list: prelim: npages %d free_count %d",
1994 npages, vm_page_free_count);
1995 old_free_count = vm_page_free_count;
1996#endif /* MACH_ASSERT */
1997
1998 sort_list = sort_list_end = vm_page_queue_free;
1999 m = NEXT_PAGE(vm_page_queue_free);
2000 SET_NEXT_PAGE(vm_page_queue_free, VM_PAGE_NULL);
2001 cpm_counter(vpfls_pages_handled = 0);
2002 while (m != VM_PAGE_NULL) {
2003 cpm_counter(++vpfls_pages_handled);
2004 next_m = NEXT_PAGE(m);
55e303ae 2005 if (m->phys_page < sort_list->phys_page) {
1c79356b
A
2006 cpm_counter(++vpfls_head_insertions);
2007 SET_NEXT_PAGE(m, sort_list);
2008 sort_list = m;
55e303ae 2009 } else if (m->phys_page > sort_list_end->phys_page) {
1c79356b
A
2010 cpm_counter(++vpfls_tail_insertions);
2011 SET_NEXT_PAGE(sort_list_end, m);
2012 SET_NEXT_PAGE(m, VM_PAGE_NULL);
2013 sort_list_end = m;
2014 } else {
2015 cpm_counter(++vpfls_general_insertions);
2016 /* general sorted list insertion */
2017 prev = &sort_list;
2018 for (m1=sort_list; m1!=VM_PAGE_NULL; m1=NEXT_PAGE(m1)) {
55e303ae 2019 if (m1->phys_page > m->phys_page) {
1c79356b
A
2020 if (*prev != m1)
2021 panic("vm_sort_free_list: ugh");
2022 SET_NEXT_PAGE(m, *prev);
2023 *prev = m;
2024 break;
2025 }
2026 prev = (vm_page_t *) &m1->pageq.next;
2027 }
2028 }
2029 m = next_m;
2030 }
2031
2032#if MACH_ASSERT
2033 /*
2034 * Verify that pages are sorted into ascending order.
2035 */
2036 for (m = sort_list, npages = 0; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
2037 if (m != sort_list &&
55e303ae 2038 m->phys_page <= addr) {
1c79356b
A
2039 printf("m 0x%x addr 0x%x\n", m, addr);
2040 panic("vm_sort_free_list");
2041 }
55e303ae 2042 addr = m->phys_page;
1c79356b
A
2043 ++npages;
2044 }
2045 if (old_free_count != vm_page_free_count)
2046 panic("vm_sort_free_list: old_free %d free_count %d",
2047 old_free_count, vm_page_free_count);
2048 if (npages != vm_page_free_count)
2049 panic("vm_sort_free_list: npages %d free_count %d",
2050 npages, vm_page_free_count);
2051#endif /* MACH_ASSERT */
2052
2053 vm_page_queue_free = sort_list;
2054}
2055
2056
2057#if MACH_ASSERT
2058/*
2059 * Check that the list of pages is ordered by
2060 * ascending physical address and has no holes.
2061 */
2062int
2063vm_page_verify_contiguous(
2064 vm_page_t pages,
2065 unsigned int npages)
2066{
2067 register vm_page_t m;
2068 unsigned int page_count;
2069 vm_offset_t prev_addr;
2070
55e303ae 2071 prev_addr = pages->phys_page;
1c79356b
A
2072 page_count = 1;
2073 for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
55e303ae 2074 if (m->phys_page != prev_addr + 1) {
1c79356b 2075 printf("m 0x%x prev_addr 0x%x, current addr 0x%x\n",
55e303ae 2076 m, prev_addr, m->phys_page);
1c79356b
A
2077 printf("pages 0x%x page_count %d\n", pages, page_count);
2078 panic("vm_page_verify_contiguous: not contiguous!");
2079 }
55e303ae 2080 prev_addr = m->phys_page;
1c79356b
A
2081 ++page_count;
2082 }
2083 if (page_count != npages) {
2084 printf("pages 0x%x actual count 0x%x but requested 0x%x\n",
2085 pages, page_count, npages);
2086 panic("vm_page_verify_contiguous: count error");
2087 }
2088 return 1;
2089}
2090#endif /* MACH_ASSERT */
2091
2092
2093/*
2094 * Find a region large enough to contain at least npages
2095 * of contiguous physical memory.
2096 *
2097 * Requirements:
2098 * - Called while holding vm_page_queue_free_lock.
2099 * - Doesn't respect vm_page_free_reserved; caller
2100 * must not ask for more pages than are legal to grab.
2101 *
2102 * Returns a pointer to a list of gobbled pages or VM_PAGE_NULL.
2103 *
2104 */
2105static vm_page_t
2106vm_page_find_contiguous(
2107 int npages)
2108{
2109 vm_page_t m, *contig_prev, *prev_ptr;
55e303ae 2110 ppnum_t prev_page;
1c79356b
A
2111 unsigned int contig_npages;
2112 vm_page_t list;
2113
2114 if (npages < 1)
2115 return VM_PAGE_NULL;
2116
55e303ae 2117 prev_page = vm_page_queue_free->phys_page - 2;
1c79356b
A
2118 prev_ptr = &vm_page_queue_free;
2119 for (m = vm_page_queue_free; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
2120
55e303ae 2121 if (m->phys_page != prev_page + 1) {
1c79356b
A
2122 /*
2123 * Whoops! Pages aren't contiguous. Start over.
2124 */
2125 contig_npages = 0;
2126 contig_prev = prev_ptr;
2127 }
2128
2129 if (++contig_npages == npages) {
2130 /*
2131 * Chop these pages out of the free list.
2132 * Mark them all as gobbled.
2133 */
2134 list = *contig_prev;
2135 *contig_prev = NEXT_PAGE(m);
2136 SET_NEXT_PAGE(m, VM_PAGE_NULL);
2137 for (m = list; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
2138 assert(m->free);
2139 assert(!m->wanted);
2140 m->free = FALSE;
765c9de3 2141 m->no_isync = TRUE;
1c79356b
A
2142 m->gobbled = TRUE;
2143 }
2144 vm_page_free_count -= npages;
2145 if (vm_page_free_count < vm_page_free_count_minimum)
2146 vm_page_free_count_minimum = vm_page_free_count;
2147 vm_page_wire_count += npages;
2148 vm_page_gobble_count += npages;
2149 cpm_counter(++vpfc_satisfied);
2150 assert(vm_page_verify_contiguous(list, contig_npages));
2151 return list;
2152 }
2153
2154 assert(contig_npages < npages);
2155 prev_ptr = (vm_page_t *) &m->pageq.next;
55e303ae 2156 prev_page = m->phys_page;
1c79356b
A
2157 }
2158 cpm_counter(++vpfc_failed);
2159 return VM_PAGE_NULL;
2160}
2161
2162/*
2163 * Allocate a list of contiguous, wired pages.
2164 */
2165kern_return_t
2166cpm_allocate(
2167 vm_size_t size,
2168 vm_page_t *list,
2169 boolean_t wire)
2170{
2171 register vm_page_t m;
2172 vm_page_t *first_contig;
2173 vm_page_t free_list, pages;
2174 unsigned int npages, n1pages;
2175 int vm_pages_available;
2176
2177 if (size % page_size != 0)
2178 return KERN_INVALID_ARGUMENT;
2179
2180 vm_page_lock_queues();
2181 mutex_lock(&vm_page_queue_free_lock);
2182
2183 /*
2184 * Should also take active and inactive pages
2185 * into account... One day...
2186 */
2187 vm_pages_available = vm_page_free_count - vm_page_free_reserved;
2188
2189 if (size > vm_pages_available * page_size) {
2190 mutex_unlock(&vm_page_queue_free_lock);
2191 return KERN_RESOURCE_SHORTAGE;
2192 }
2193
2194 vm_page_free_list_sort();
2195
2196 npages = size / page_size;
2197
2198 /*
2199 * Obtain a pointer to a subset of the free
2200 * list large enough to satisfy the request;
2201 * the region will be physically contiguous.
2202 */
2203 pages = vm_page_find_contiguous(npages);
2204 if (pages == VM_PAGE_NULL) {
2205 mutex_unlock(&vm_page_queue_free_lock);
2206 vm_page_unlock_queues();
2207 return KERN_NO_SPACE;
2208 }
2209
2210 mutex_unlock(&vm_page_queue_free_lock);
2211
2212 /*
2213 * Walk the returned list, wiring the pages.
2214 */
2215 if (wire == TRUE)
2216 for (m = pages; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
2217 /*
2218 * Essentially inlined vm_page_wire.
2219 */
2220 assert(!m->active);
2221 assert(!m->inactive);
2222 assert(!m->private);
2223 assert(!m->fictitious);
2224 assert(m->wire_count == 0);
2225 assert(m->gobbled);
2226 m->gobbled = FALSE;
2227 m->wire_count++;
2228 --vm_page_gobble_count;
2229 }
2230 vm_page_unlock_queues();
2231
2232 /*
2233 * The CPM pages should now be available and
2234 * ordered by ascending physical address.
2235 */
2236 assert(vm_page_verify_contiguous(pages, npages));
2237
2238 *list = pages;
2239 return KERN_SUCCESS;
2240}
2241
2242
2243#include <mach_vm_debug.h>
2244#if MACH_VM_DEBUG
2245
2246#include <mach_debug/hash_info.h>
2247#include <vm/vm_debug.h>
2248
2249/*
2250 * Routine: vm_page_info
2251 * Purpose:
2252 * Return information about the global VP table.
2253 * Fills the buffer with as much information as possible
2254 * and returns the desired size of the buffer.
2255 * Conditions:
2256 * Nothing locked. The caller should provide
2257 * possibly-pageable memory.
2258 */
2259
2260unsigned int
2261vm_page_info(
2262 hash_info_bucket_t *info,
2263 unsigned int count)
2264{
2265 int i;
2266
2267 if (vm_page_bucket_count < count)
2268 count = vm_page_bucket_count;
2269
2270 for (i = 0; i < count; i++) {
2271 vm_page_bucket_t *bucket = &vm_page_buckets[i];
2272 unsigned int bucket_count = 0;
2273 vm_page_t m;
2274
2275 simple_lock(&vm_page_bucket_lock);
2276 for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next)
2277 bucket_count++;
2278 simple_unlock(&vm_page_bucket_lock);
2279
2280 /* don't touch pageable memory while holding locks */
2281 info[i].hib_count = bucket_count;
2282 }
2283
2284 return vm_page_bucket_count;
2285}
2286#endif /* MACH_VM_DEBUG */
2287
2288#include <mach_kdb.h>
2289#if MACH_KDB
2290
2291#include <ddb/db_output.h>
2292#include <vm/vm_print.h>
2293#define printf kdbprintf
2294
2295/*
2296 * Routine: vm_page_print [exported]
2297 */
2298void
2299vm_page_print(
2300 vm_page_t p)
2301{
2302 extern db_indent;
2303
2304 iprintf("page 0x%x\n", p);
2305
2306 db_indent += 2;
2307
2308 iprintf("object=0x%x", p->object);
2309 printf(", offset=0x%x", p->offset);
2310 printf(", wire_count=%d", p->wire_count);
1c79356b
A
2311
2312 iprintf("%sinactive, %sactive, %sgobbled, %slaundry, %sfree, %sref, %sdiscard\n",
2313 (p->inactive ? "" : "!"),
2314 (p->active ? "" : "!"),
2315 (p->gobbled ? "" : "!"),
2316 (p->laundry ? "" : "!"),
2317 (p->free ? "" : "!"),
2318 (p->reference ? "" : "!"),
2319 (p->discard_request ? "" : "!"));
2320 iprintf("%sbusy, %swanted, %stabled, %sfictitious, %sprivate, %sprecious\n",
2321 (p->busy ? "" : "!"),
2322 (p->wanted ? "" : "!"),
2323 (p->tabled ? "" : "!"),
2324 (p->fictitious ? "" : "!"),
2325 (p->private ? "" : "!"),
2326 (p->precious ? "" : "!"));
2327 iprintf("%sabsent, %serror, %sdirty, %scleaning, %spageout, %sclustered\n",
2328 (p->absent ? "" : "!"),
2329 (p->error ? "" : "!"),
2330 (p->dirty ? "" : "!"),
2331 (p->cleaning ? "" : "!"),
2332 (p->pageout ? "" : "!"),
2333 (p->clustered ? "" : "!"));
0b4e3aa0 2334 iprintf("%slock_supplied, %soverwriting, %srestart, %sunusual\n",
1c79356b
A
2335 (p->lock_supplied ? "" : "!"),
2336 (p->overwriting ? "" : "!"),
2337 (p->restart ? "" : "!"),
0b4e3aa0 2338 (p->unusual ? "" : "!"));
1c79356b 2339
55e303ae 2340 iprintf("phys_page=0x%x", p->phys_page);
1c79356b
A
2341 printf(", page_error=0x%x", p->page_error);
2342 printf(", page_lock=0x%x", p->page_lock);
2343 printf(", unlock_request=%d\n", p->unlock_request);
2344
2345 db_indent -= 2;
2346}
2347#endif /* MACH_KDB */