]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_resident.c
ddebd6350247cf3f5d41e84c66308907880fa614
[apple/xnu.git] / osfmk / vm / vm_resident.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58 /*
59 */
60 /*
61 * File: vm/vm_page.c
62 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 *
64 * Resident memory management module.
65 */
66
67 #include <debug.h>
68
69 #include <mach/clock_types.h>
70 #include <mach/vm_prot.h>
71 #include <mach/vm_statistics.h>
72 #include <kern/counters.h>
73 #include <kern/sched_prim.h>
74 #include <kern/task.h>
75 #include <kern/thread.h>
76 #include <kern/zalloc.h>
77 #include <kern/xpr.h>
78 #include <vm/pmap.h>
79 #include <vm/vm_init.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_page.h>
82 #include <vm/vm_pageout.h>
83 #include <vm/vm_kern.h> /* kernel_memory_allocate() */
84 #include <kern/misc_protos.h>
85 #include <zone_debug.h>
86 #include <vm/cpm.h>
87 #include <ppc/mappings.h> /* (BRINGUP) */
88 #include <pexpert/pexpert.h> /* (BRINGUP) */
89
90 #include <vm/vm_protos.h>
91
92 /* Variables used to indicate the relative age of pages in the
93 * inactive list
94 */
95
96 unsigned int vm_page_ticket_roll = 0;
97 unsigned int vm_page_ticket = 0;
98 /*
99 * Associated with page of user-allocatable memory is a
100 * page structure.
101 */
102
103 /*
104 * These variables record the values returned by vm_page_bootstrap,
105 * for debugging purposes. The implementation of pmap_steal_memory
106 * and pmap_startup here also uses them internally.
107 */
108
109 vm_offset_t virtual_space_start;
110 vm_offset_t virtual_space_end;
111 int vm_page_pages;
112
113 /*
114 * The vm_page_lookup() routine, which provides for fast
115 * (virtual memory object, offset) to page lookup, employs
116 * the following hash table. The vm_page_{insert,remove}
117 * routines install and remove associations in the table.
118 * [This table is often called the virtual-to-physical,
119 * or VP, table.]
120 */
121 typedef struct {
122 vm_page_t pages;
123 #if MACH_PAGE_HASH_STATS
124 int cur_count; /* current count */
125 int hi_count; /* high water mark */
126 #endif /* MACH_PAGE_HASH_STATS */
127 } vm_page_bucket_t;
128
129 vm_page_bucket_t *vm_page_buckets; /* Array of buckets */
130 unsigned int vm_page_bucket_count = 0; /* How big is array? */
131 unsigned int vm_page_hash_mask; /* Mask for hash function */
132 unsigned int vm_page_hash_shift; /* Shift for hash function */
133 uint32_t vm_page_bucket_hash; /* Basic bucket hash */
134 decl_simple_lock_data(,vm_page_bucket_lock)
135
136 vm_page_t
137 vm_page_lookup_nohint(vm_object_t object, vm_object_offset_t offset);
138
139
140 #if MACH_PAGE_HASH_STATS
141 /* This routine is only for debug. It is intended to be called by
142 * hand by a developer using a kernel debugger. This routine prints
143 * out vm_page_hash table statistics to the kernel debug console.
144 */
145 void
146 hash_debug(void)
147 {
148 int i;
149 int numbuckets = 0;
150 int highsum = 0;
151 int maxdepth = 0;
152
153 for (i = 0; i < vm_page_bucket_count; i++) {
154 if (vm_page_buckets[i].hi_count) {
155 numbuckets++;
156 highsum += vm_page_buckets[i].hi_count;
157 if (vm_page_buckets[i].hi_count > maxdepth)
158 maxdepth = vm_page_buckets[i].hi_count;
159 }
160 }
161 printf("Total number of buckets: %d\n", vm_page_bucket_count);
162 printf("Number used buckets: %d = %d%%\n",
163 numbuckets, 100*numbuckets/vm_page_bucket_count);
164 printf("Number unused buckets: %d = %d%%\n",
165 vm_page_bucket_count - numbuckets,
166 100*(vm_page_bucket_count-numbuckets)/vm_page_bucket_count);
167 printf("Sum of bucket max depth: %d\n", highsum);
168 printf("Average bucket depth: %d.%2d\n",
169 highsum/vm_page_bucket_count,
170 highsum%vm_page_bucket_count);
171 printf("Maximum bucket depth: %d\n", maxdepth);
172 }
173 #endif /* MACH_PAGE_HASH_STATS */
174
175 /*
176 * The virtual page size is currently implemented as a runtime
177 * variable, but is constant once initialized using vm_set_page_size.
178 * This initialization must be done in the machine-dependent
179 * bootstrap sequence, before calling other machine-independent
180 * initializations.
181 *
182 * All references to the virtual page size outside this
183 * module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
184 * constants.
185 */
186 vm_size_t page_size = PAGE_SIZE;
187 vm_size_t page_mask = PAGE_MASK;
188 int page_shift = PAGE_SHIFT;
189
190 /*
191 * Resident page structures are initialized from
192 * a template (see vm_page_alloc).
193 *
194 * When adding a new field to the virtual memory
195 * object structure, be sure to add initialization
196 * (see vm_page_bootstrap).
197 */
198 struct vm_page vm_page_template;
199
200 /*
201 * Resident pages that represent real memory
202 * are allocated from a free list.
203 */
204 vm_page_t vm_page_queue_free;
205 vm_page_t vm_page_queue_fictitious;
206 unsigned int vm_page_free_wanted;
207 unsigned int vm_page_free_count;
208 unsigned int vm_page_fictitious_count;
209
210 unsigned int vm_page_free_count_minimum; /* debugging */
211
212 /*
213 * Occasionally, the virtual memory system uses
214 * resident page structures that do not refer to
215 * real pages, for example to leave a page with
216 * important state information in the VP table.
217 *
218 * These page structures are allocated the way
219 * most other kernel structures are.
220 */
221 zone_t vm_page_zone;
222 decl_mutex_data(,vm_page_alloc_lock)
223 unsigned int io_throttle_zero_fill;
224
225 /*
226 * Fictitious pages don't have a physical address,
227 * but we must initialize phys_page to something.
228 * For debugging, this should be a strange value
229 * that the pmap module can recognize in assertions.
230 */
231 vm_offset_t vm_page_fictitious_addr = (vm_offset_t) -1;
232
233 /*
234 * Resident page structures are also chained on
235 * queues that are used by the page replacement
236 * system (pageout daemon). These queues are
237 * defined here, but are shared by the pageout
238 * module. The inactive queue is broken into
239 * inactive and zf for convenience as the
240 * pageout daemon often assignes a higher
241 * affinity to zf pages
242 */
243 queue_head_t vm_page_queue_active;
244 queue_head_t vm_page_queue_inactive;
245 unsigned int vm_page_active_count;
246 unsigned int vm_page_inactive_count;
247 unsigned int vm_page_wire_count;
248 unsigned int vm_page_gobble_count = 0;
249 unsigned int vm_page_wire_count_warning = 0;
250 unsigned int vm_page_gobble_count_warning = 0;
251
252 unsigned int vm_page_purgeable_count = 0; /* # of pages purgeable now */
253 uint64_t vm_page_purged_count = 0; /* total count of purged pages */
254
255 ppnum_t vm_lopage_poolstart = 0;
256 ppnum_t vm_lopage_poolend = 0;
257 int vm_lopage_poolsize = 0;
258 uint64_t max_valid_dma_address = 0xffffffffffffffffULL;
259
260
261 /*
262 * Several page replacement parameters are also
263 * shared with this module, so that page allocation
264 * (done here in vm_page_alloc) can trigger the
265 * pageout daemon.
266 */
267 unsigned int vm_page_free_target = 0;
268 unsigned int vm_page_free_min = 0;
269 unsigned int vm_page_inactive_target = 0;
270 unsigned int vm_page_free_reserved = 0;
271 unsigned int vm_page_throttled_count = 0;
272
273 /*
274 * The VM system has a couple of heuristics for deciding
275 * that pages are "uninteresting" and should be placed
276 * on the inactive queue as likely candidates for replacement.
277 * These variables let the heuristics be controlled at run-time
278 * to make experimentation easier.
279 */
280
281 boolean_t vm_page_deactivate_hint = TRUE;
282
283 /*
284 * vm_set_page_size:
285 *
286 * Sets the page size, perhaps based upon the memory
287 * size. Must be called before any use of page-size
288 * dependent functions.
289 *
290 * Sets page_shift and page_mask from page_size.
291 */
292 void
293 vm_set_page_size(void)
294 {
295 page_mask = page_size - 1;
296
297 if ((page_mask & page_size) != 0)
298 panic("vm_set_page_size: page size not a power of two");
299
300 for (page_shift = 0; ; page_shift++)
301 if ((1U << page_shift) == page_size)
302 break;
303 }
304
305 /*
306 * vm_page_bootstrap:
307 *
308 * Initializes the resident memory module.
309 *
310 * Allocates memory for the page cells, and
311 * for the object/offset-to-page hash table headers.
312 * Each page cell is initialized and placed on the free list.
313 * Returns the range of available kernel virtual memory.
314 */
315
316 void
317 vm_page_bootstrap(
318 vm_offset_t *startp,
319 vm_offset_t *endp)
320 {
321 register vm_page_t m;
322 unsigned int i;
323 unsigned int log1;
324 unsigned int log2;
325 unsigned int size;
326
327 /*
328 * Initialize the vm_page template.
329 */
330
331 m = &vm_page_template;
332 m->object = VM_OBJECT_NULL; /* reset later */
333 m->offset = (vm_object_offset_t) -1; /* reset later */
334 m->wire_count = 0;
335
336 m->pageq.next = NULL;
337 m->pageq.prev = NULL;
338 m->listq.next = NULL;
339 m->listq.prev = NULL;
340
341 m->inactive = FALSE;
342 m->active = FALSE;
343 m->laundry = FALSE;
344 m->free = FALSE;
345 m->no_isync = TRUE;
346 m->reference = FALSE;
347 m->pageout = FALSE;
348 m->dump_cleaning = FALSE;
349 m->list_req_pending = FALSE;
350
351 m->busy = TRUE;
352 m->wanted = FALSE;
353 m->tabled = FALSE;
354 m->fictitious = FALSE;
355 m->private = FALSE;
356 m->absent = FALSE;
357 m->error = FALSE;
358 m->dirty = FALSE;
359 m->cleaning = FALSE;
360 m->precious = FALSE;
361 m->clustered = FALSE;
362 m->lock_supplied = FALSE;
363 m->unusual = FALSE;
364 m->restart = FALSE;
365 m->zero_fill = FALSE;
366 m->encrypted = FALSE;
367
368 m->phys_page = 0; /* reset later */
369
370 m->page_lock = VM_PROT_NONE;
371 m->unlock_request = VM_PROT_NONE;
372 m->page_error = KERN_SUCCESS;
373
374 /*
375 * Initialize the page queues.
376 */
377
378 mutex_init(&vm_page_queue_free_lock, 0);
379 mutex_init(&vm_page_queue_lock, 0);
380
381 vm_page_queue_free = VM_PAGE_NULL;
382 vm_page_queue_fictitious = VM_PAGE_NULL;
383 queue_init(&vm_page_queue_active);
384 queue_init(&vm_page_queue_inactive);
385 queue_init(&vm_page_queue_zf);
386
387 vm_page_free_wanted = 0;
388
389 /*
390 * Steal memory for the map and zone subsystems.
391 */
392
393 vm_map_steal_memory();
394 zone_steal_memory();
395
396 /*
397 * Allocate (and initialize) the virtual-to-physical
398 * table hash buckets.
399 *
400 * The number of buckets should be a power of two to
401 * get a good hash function. The following computation
402 * chooses the first power of two that is greater
403 * than the number of physical pages in the system.
404 */
405
406 simple_lock_init(&vm_page_bucket_lock, 0);
407
408 if (vm_page_bucket_count == 0) {
409 unsigned int npages = pmap_free_pages();
410
411 vm_page_bucket_count = 1;
412 while (vm_page_bucket_count < npages)
413 vm_page_bucket_count <<= 1;
414 }
415
416 vm_page_hash_mask = vm_page_bucket_count - 1;
417
418 /*
419 * Calculate object shift value for hashing algorithm:
420 * O = log2(sizeof(struct vm_object))
421 * B = log2(vm_page_bucket_count)
422 * hash shifts the object left by
423 * B/2 - O
424 */
425 size = vm_page_bucket_count;
426 for (log1 = 0; size > 1; log1++)
427 size /= 2;
428 size = sizeof(struct vm_object);
429 for (log2 = 0; size > 1; log2++)
430 size /= 2;
431 vm_page_hash_shift = log1/2 - log2 + 1;
432
433 vm_page_bucket_hash = 1 << ((log1 + 1) >> 1); /* Get (ceiling of sqrt of table size) */
434 vm_page_bucket_hash |= 1 << ((log1 + 1) >> 2); /* Get (ceiling of quadroot of table size) */
435 vm_page_bucket_hash |= 1; /* Set bit and add 1 - always must be 1 to insure unique series */
436
437 if (vm_page_hash_mask & vm_page_bucket_count)
438 printf("vm_page_bootstrap: WARNING -- strange page hash\n");
439
440 vm_page_buckets = (vm_page_bucket_t *)
441 pmap_steal_memory(vm_page_bucket_count *
442 sizeof(vm_page_bucket_t));
443
444 for (i = 0; i < vm_page_bucket_count; i++) {
445 register vm_page_bucket_t *bucket = &vm_page_buckets[i];
446
447 bucket->pages = VM_PAGE_NULL;
448 #if MACH_PAGE_HASH_STATS
449 bucket->cur_count = 0;
450 bucket->hi_count = 0;
451 #endif /* MACH_PAGE_HASH_STATS */
452 }
453
454 /*
455 * Machine-dependent code allocates the resident page table.
456 * It uses vm_page_init to initialize the page frames.
457 * The code also returns to us the virtual space available
458 * to the kernel. We don't trust the pmap module
459 * to get the alignment right.
460 */
461
462 pmap_startup(&virtual_space_start, &virtual_space_end);
463 virtual_space_start = round_page(virtual_space_start);
464 virtual_space_end = trunc_page(virtual_space_end);
465
466 *startp = virtual_space_start;
467 *endp = virtual_space_end;
468
469 /*
470 * Compute the initial "wire" count.
471 * Up until now, the pages which have been set aside are not under
472 * the VM system's control, so although they aren't explicitly
473 * wired, they nonetheless can't be moved. At this moment,
474 * all VM managed pages are "free", courtesy of pmap_startup.
475 */
476 vm_page_wire_count = atop_64(max_mem) - vm_page_free_count; /* initial value */
477
478 printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count);
479 vm_page_free_count_minimum = vm_page_free_count;
480
481 simple_lock_init(&vm_paging_lock, 0);
482 }
483
484 #ifndef MACHINE_PAGES
485 /*
486 * We implement pmap_steal_memory and pmap_startup with the help
487 * of two simpler functions, pmap_virtual_space and pmap_next_page.
488 */
489
490 void *
491 pmap_steal_memory(
492 vm_size_t size)
493 {
494 vm_offset_t addr, vaddr;
495 ppnum_t phys_page;
496
497 /*
498 * We round the size to a round multiple.
499 */
500
501 size = (size + sizeof (void *) - 1) &~ (sizeof (void *) - 1);
502
503 /*
504 * If this is the first call to pmap_steal_memory,
505 * we have to initialize ourself.
506 */
507
508 if (virtual_space_start == virtual_space_end) {
509 pmap_virtual_space(&virtual_space_start, &virtual_space_end);
510
511 /*
512 * The initial values must be aligned properly, and
513 * we don't trust the pmap module to do it right.
514 */
515
516 virtual_space_start = round_page(virtual_space_start);
517 virtual_space_end = trunc_page(virtual_space_end);
518 }
519
520 /*
521 * Allocate virtual memory for this request.
522 */
523
524 addr = virtual_space_start;
525 virtual_space_start += size;
526
527 kprintf("pmap_steal_memory: %08X - %08X; size=%08X\n", addr, virtual_space_start, size); /* (TEST/DEBUG) */
528
529 /*
530 * Allocate and map physical pages to back new virtual pages.
531 */
532
533 for (vaddr = round_page(addr);
534 vaddr < addr + size;
535 vaddr += PAGE_SIZE) {
536 if (!pmap_next_page(&phys_page))
537 panic("pmap_steal_memory");
538
539 /*
540 * XXX Logically, these mappings should be wired,
541 * but some pmap modules barf if they are.
542 */
543
544 pmap_enter(kernel_pmap, vaddr, phys_page,
545 VM_PROT_READ|VM_PROT_WRITE,
546 VM_WIMG_USE_DEFAULT, FALSE);
547 /*
548 * Account for newly stolen memory
549 */
550 vm_page_wire_count++;
551
552 }
553
554 return (void *) addr;
555 }
556
557 void
558 pmap_startup(
559 vm_offset_t *startp,
560 vm_offset_t *endp)
561 {
562 unsigned int i, npages, pages_initialized, fill, fillval;
563 vm_page_t pages;
564 ppnum_t phys_page;
565 addr64_t tmpaddr;
566 unsigned int num_of_lopages = 0;
567 unsigned int last_index;
568
569 /*
570 * We calculate how many page frames we will have
571 * and then allocate the page structures in one chunk.
572 */
573
574 tmpaddr = (addr64_t)pmap_free_pages() * (addr64_t)PAGE_SIZE; /* Get the amount of memory left */
575 tmpaddr = tmpaddr + (addr64_t)(round_page_32(virtual_space_start) - virtual_space_start); /* Account for any slop */
576 npages = (unsigned int)(tmpaddr / (addr64_t)(PAGE_SIZE + sizeof(*pages))); /* Figure size of all vm_page_ts, including enough to hold the vm_page_ts */
577
578 pages = (vm_page_t) pmap_steal_memory(npages * sizeof *pages);
579
580 /*
581 * Initialize the page frames.
582 */
583 for (i = 0, pages_initialized = 0; i < npages; i++) {
584 if (!pmap_next_page(&phys_page))
585 break;
586
587 vm_page_init(&pages[i], phys_page);
588 vm_page_pages++;
589 pages_initialized++;
590 }
591
592 /*
593 * Check if we want to initialize pages to a known value
594 */
595 fill = 0; /* Assume no fill */
596 if (PE_parse_boot_arg("fill", &fillval)) fill = 1; /* Set fill */
597
598 /*
599 * if vm_lopage_poolsize is non-zero, than we need to reserve
600 * a pool of pages whose addresess are less than 4G... this pool
601 * is used by drivers whose hardware can't DMA beyond 32 bits...
602 *
603 * note that I'm assuming that the page list is ascending and
604 * ordered w/r to the physical address
605 */
606 for (i = 0, num_of_lopages = vm_lopage_poolsize; num_of_lopages && i < pages_initialized; num_of_lopages--, i++) {
607 vm_page_t m;
608
609 m = &pages[i];
610
611 if (m->phys_page >= (1 << (32 - PAGE_SHIFT)))
612 panic("couldn't reserve the lopage pool: not enough lo pages\n");
613
614 if (m->phys_page < vm_lopage_poolend)
615 panic("couldn't reserve the lopage pool: page list out of order\n");
616
617 vm_lopage_poolend = m->phys_page;
618
619 if (vm_lopage_poolstart == 0)
620 vm_lopage_poolstart = m->phys_page;
621 else {
622 if (m->phys_page < vm_lopage_poolstart)
623 panic("couldn't reserve the lopage pool: page list out of order\n");
624 }
625
626 if (fill)
627 fillPage(m->phys_page, fillval); /* Fill the page with a know value if requested at boot */
628
629 vm_page_release(m);
630 }
631 last_index = i;
632
633 // -debug code remove
634 if (2 == vm_himemory_mode) {
635 // free low -> high so high is preferred
636 for (i = last_index + 1; i <= pages_initialized; i++) {
637 if(fill) fillPage(pages[i - 1].phys_page, fillval); /* Fill the page with a know value if requested at boot */
638 vm_page_release(&pages[i - 1]);
639 }
640 }
641 else
642 // debug code remove-
643
644 /*
645 * Release pages in reverse order so that physical pages
646 * initially get allocated in ascending addresses. This keeps
647 * the devices (which must address physical memory) happy if
648 * they require several consecutive pages.
649 */
650 for (i = pages_initialized; i > last_index; i--) {
651 if(fill) fillPage(pages[i - 1].phys_page, fillval); /* Fill the page with a know value if requested at boot */
652 vm_page_release(&pages[i - 1]);
653 }
654
655 #if 0
656 {
657 vm_page_t xx, xxo, xxl;
658 int j, k, l;
659
660 j = 0; /* (BRINGUP) */
661 xxl = 0;
662
663 for(xx = vm_page_queue_free; xx; xxl = xx, xx = xx->pageq.next) { /* (BRINGUP) */
664 j++; /* (BRINGUP) */
665 if(j > vm_page_free_count) { /* (BRINGUP) */
666 panic("pmap_startup: too many pages, xx = %08X, xxl = %08X\n", xx, xxl);
667 }
668
669 l = vm_page_free_count - j; /* (BRINGUP) */
670 k = 0; /* (BRINGUP) */
671
672 if(((j - 1) & 0xFFFF) == 0) kprintf("checking number %d of %d\n", j, vm_page_free_count);
673
674 for(xxo = xx->pageq.next; xxo; xxo = xxo->pageq.next) { /* (BRINGUP) */
675 k++;
676 if(k > l) panic("pmap_startup: too many in secondary check %d %d\n", k, l);
677 if((xx->phys_page & 0xFFFFFFFF) == (xxo->phys_page & 0xFFFFFFFF)) { /* (BRINGUP) */
678 panic("pmap_startup: duplicate physaddr, xx = %08X, xxo = %08X\n", xx, xxo);
679 }
680 }
681 }
682
683 if(j != vm_page_free_count) { /* (BRINGUP) */
684 panic("pmap_startup: vm_page_free_count does not match, calc = %d, vm_page_free_count = %08X\n", j, vm_page_free_count);
685 }
686 }
687 #endif
688
689
690 /*
691 * We have to re-align virtual_space_start,
692 * because pmap_steal_memory has been using it.
693 */
694
695 virtual_space_start = round_page_32(virtual_space_start);
696
697 *startp = virtual_space_start;
698 *endp = virtual_space_end;
699 }
700 #endif /* MACHINE_PAGES */
701
702 /*
703 * Routine: vm_page_module_init
704 * Purpose:
705 * Second initialization pass, to be done after
706 * the basic VM system is ready.
707 */
708 void
709 vm_page_module_init(void)
710 {
711 vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page),
712 0, PAGE_SIZE, "vm pages");
713
714 #if ZONE_DEBUG
715 zone_debug_disable(vm_page_zone);
716 #endif /* ZONE_DEBUG */
717
718 zone_change(vm_page_zone, Z_EXPAND, FALSE);
719 zone_change(vm_page_zone, Z_EXHAUST, TRUE);
720 zone_change(vm_page_zone, Z_FOREIGN, TRUE);
721
722 /*
723 * Adjust zone statistics to account for the real pages allocated
724 * in vm_page_create(). [Q: is this really what we want?]
725 */
726 vm_page_zone->count += vm_page_pages;
727 vm_page_zone->cur_size += vm_page_pages * vm_page_zone->elem_size;
728
729 mutex_init(&vm_page_alloc_lock, 0);
730 }
731
732 /*
733 * Routine: vm_page_create
734 * Purpose:
735 * After the VM system is up, machine-dependent code
736 * may stumble across more physical memory. For example,
737 * memory that it was reserving for a frame buffer.
738 * vm_page_create turns this memory into available pages.
739 */
740
741 void
742 vm_page_create(
743 ppnum_t start,
744 ppnum_t end)
745 {
746 ppnum_t phys_page;
747 vm_page_t m;
748
749 for (phys_page = start;
750 phys_page < end;
751 phys_page++) {
752 while ((m = (vm_page_t) vm_page_grab_fictitious())
753 == VM_PAGE_NULL)
754 vm_page_more_fictitious();
755
756 vm_page_init(m, phys_page);
757 vm_page_pages++;
758 vm_page_release(m);
759 }
760 }
761
762 /*
763 * vm_page_hash:
764 *
765 * Distributes the object/offset key pair among hash buckets.
766 *
767 * NOTE: The bucket count must be a power of 2
768 */
769 #define vm_page_hash(object, offset) (\
770 ( (natural_t)((uint32_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
771 & vm_page_hash_mask)
772
773 /*
774 * vm_page_insert: [ internal use only ]
775 *
776 * Inserts the given mem entry into the object/object-page
777 * table and object list.
778 *
779 * The object must be locked.
780 */
781
782 void
783 vm_page_insert(
784 register vm_page_t mem,
785 register vm_object_t object,
786 register vm_object_offset_t offset)
787 {
788 register vm_page_bucket_t *bucket;
789
790 XPR(XPR_VM_PAGE,
791 "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n",
792 (integer_t)object, (integer_t)offset, (integer_t)mem, 0,0);
793
794 VM_PAGE_CHECK(mem);
795 #if DEBUG
796 _mutex_assert(&object->Lock, MA_OWNED);
797
798 if (mem->tabled || mem->object != VM_OBJECT_NULL)
799 panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
800 "already in (obj=%p,off=0x%llx)",
801 mem, object, offset, mem->object, mem->offset);
802 #endif
803 assert(!object->internal || offset < object->size);
804
805 /* only insert "pageout" pages into "pageout" objects,
806 * and normal pages into normal objects */
807 assert(object->pageout == mem->pageout);
808
809 assert(vm_page_lookup(object, offset) == VM_PAGE_NULL);
810
811 /*
812 * Record the object/offset pair in this page
813 */
814
815 mem->object = object;
816 mem->offset = offset;
817
818 /*
819 * Insert it into the object_object/offset hash table
820 */
821
822 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
823 simple_lock(&vm_page_bucket_lock);
824 mem->next = bucket->pages;
825 bucket->pages = mem;
826 #if MACH_PAGE_HASH_STATS
827 if (++bucket->cur_count > bucket->hi_count)
828 bucket->hi_count = bucket->cur_count;
829 #endif /* MACH_PAGE_HASH_STATS */
830 simple_unlock(&vm_page_bucket_lock);
831
832 /*
833 * Now link into the object's list of backed pages.
834 */
835
836 VM_PAGE_INSERT(mem, object);
837 mem->tabled = TRUE;
838
839 /*
840 * Show that the object has one more resident page.
841 */
842
843 object->resident_page_count++;
844
845 if (object->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
846 object->purgable == VM_OBJECT_PURGABLE_EMPTY) {
847 vm_page_lock_queues();
848 vm_page_purgeable_count++;
849 vm_page_unlock_queues();
850 }
851 }
852
853 /*
854 * vm_page_replace:
855 *
856 * Exactly like vm_page_insert, except that we first
857 * remove any existing page at the given offset in object.
858 *
859 * The object and page queues must be locked.
860 */
861
862 void
863 vm_page_replace(
864 register vm_page_t mem,
865 register vm_object_t object,
866 register vm_object_offset_t offset)
867 {
868 vm_page_bucket_t *bucket;
869 vm_page_t found_m = VM_PAGE_NULL;
870
871 VM_PAGE_CHECK(mem);
872 #if DEBUG
873 _mutex_assert(&object->Lock, MA_OWNED);
874 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
875
876 if (mem->tabled || mem->object != VM_OBJECT_NULL)
877 panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
878 "already in (obj=%p,off=0x%llx)",
879 mem, object, offset, mem->object, mem->offset);
880 #endif
881 /*
882 * Record the object/offset pair in this page
883 */
884
885 mem->object = object;
886 mem->offset = offset;
887
888 /*
889 * Insert it into the object_object/offset hash table,
890 * replacing any page that might have been there.
891 */
892
893 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
894 simple_lock(&vm_page_bucket_lock);
895
896 if (bucket->pages) {
897 vm_page_t *mp = &bucket->pages;
898 register vm_page_t m = *mp;
899
900 do {
901 if (m->object == object && m->offset == offset) {
902 /*
903 * Remove old page from hash list
904 */
905 *mp = m->next;
906
907 found_m = m;
908 break;
909 }
910 mp = &m->next;
911 } while ((m = *mp));
912
913 mem->next = bucket->pages;
914 } else {
915 mem->next = VM_PAGE_NULL;
916 }
917 /*
918 * insert new page at head of hash list
919 */
920 bucket->pages = mem;
921
922 simple_unlock(&vm_page_bucket_lock);
923
924 if (found_m) {
925 /*
926 * there was already a page at the specified
927 * offset for this object... remove it from
928 * the object and free it back to the free list
929 */
930 VM_PAGE_REMOVE(found_m);
931 found_m->tabled = FALSE;
932
933 found_m->object = VM_OBJECT_NULL;
934 found_m->offset = (vm_object_offset_t) -1;
935 object->resident_page_count--;
936
937 if (object->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
938 object->purgable == VM_OBJECT_PURGABLE_EMPTY) {
939 assert(vm_page_purgeable_count > 0);
940 vm_page_purgeable_count--;
941 }
942
943 /*
944 * Return page to the free list.
945 * Note the page is not tabled now
946 */
947 vm_page_free(found_m);
948 }
949 /*
950 * Now link into the object's list of backed pages.
951 */
952
953 VM_PAGE_INSERT(mem, object);
954 mem->tabled = TRUE;
955
956 /*
957 * And show that the object has one more resident
958 * page.
959 */
960
961 object->resident_page_count++;
962
963 if (object->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
964 object->purgable == VM_OBJECT_PURGABLE_EMPTY) {
965 vm_page_purgeable_count++;
966 }
967 }
968
969 /*
970 * vm_page_remove: [ internal use only ]
971 *
972 * Removes the given mem entry from the object/offset-page
973 * table and the object page list.
974 *
975 * The object and page queues must be locked.
976 */
977
978 void
979 vm_page_remove(
980 register vm_page_t mem)
981 {
982 register vm_page_bucket_t *bucket;
983 register vm_page_t this;
984
985 XPR(XPR_VM_PAGE,
986 "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n",
987 (integer_t)mem->object, (integer_t)mem->offset,
988 (integer_t)mem, 0,0);
989 #if DEBUG
990 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
991 _mutex_assert(&mem->object->Lock, MA_OWNED);
992 #endif
993 assert(mem->tabled);
994 assert(!mem->cleaning);
995 VM_PAGE_CHECK(mem);
996
997
998 /*
999 * Remove from the object_object/offset hash table
1000 */
1001
1002 bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
1003 simple_lock(&vm_page_bucket_lock);
1004 if ((this = bucket->pages) == mem) {
1005 /* optimize for common case */
1006
1007 bucket->pages = mem->next;
1008 } else {
1009 register vm_page_t *prev;
1010
1011 for (prev = &this->next;
1012 (this = *prev) != mem;
1013 prev = &this->next)
1014 continue;
1015 *prev = this->next;
1016 }
1017 #if MACH_PAGE_HASH_STATS
1018 bucket->cur_count--;
1019 #endif /* MACH_PAGE_HASH_STATS */
1020 simple_unlock(&vm_page_bucket_lock);
1021
1022 /*
1023 * Now remove from the object's list of backed pages.
1024 */
1025
1026 VM_PAGE_REMOVE(mem);
1027
1028 /*
1029 * And show that the object has one fewer resident
1030 * page.
1031 */
1032
1033 mem->object->resident_page_count--;
1034
1035 if (mem->object->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
1036 mem->object->purgable == VM_OBJECT_PURGABLE_EMPTY) {
1037 assert(vm_page_purgeable_count > 0);
1038 vm_page_purgeable_count--;
1039 }
1040
1041 mem->tabled = FALSE;
1042 mem->object = VM_OBJECT_NULL;
1043 mem->offset = (vm_object_offset_t) -1;
1044 }
1045
1046 /*
1047 * vm_page_lookup:
1048 *
1049 * Returns the page associated with the object/offset
1050 * pair specified; if none is found, VM_PAGE_NULL is returned.
1051 *
1052 * The object must be locked. No side effects.
1053 */
1054
1055 unsigned long vm_page_lookup_hint = 0;
1056 unsigned long vm_page_lookup_hint_next = 0;
1057 unsigned long vm_page_lookup_hint_prev = 0;
1058 unsigned long vm_page_lookup_hint_miss = 0;
1059
1060 vm_page_t
1061 vm_page_lookup(
1062 register vm_object_t object,
1063 register vm_object_offset_t offset)
1064 {
1065 register vm_page_t mem;
1066 register vm_page_bucket_t *bucket;
1067 queue_entry_t qe;
1068 #if 0
1069 _mutex_assert(&object->Lock, MA_OWNED);
1070 #endif
1071
1072 mem = object->memq_hint;
1073 if (mem != VM_PAGE_NULL) {
1074 assert(mem->object == object);
1075 if (mem->offset == offset) {
1076 vm_page_lookup_hint++;
1077 return mem;
1078 }
1079 qe = queue_next(&mem->listq);
1080 if (! queue_end(&object->memq, qe)) {
1081 vm_page_t next_page;
1082
1083 next_page = (vm_page_t) qe;
1084 assert(next_page->object == object);
1085 if (next_page->offset == offset) {
1086 vm_page_lookup_hint_next++;
1087 object->memq_hint = next_page; /* new hint */
1088 return next_page;
1089 }
1090 }
1091 qe = queue_prev(&mem->listq);
1092 if (! queue_end(&object->memq, qe)) {
1093 vm_page_t prev_page;
1094
1095 prev_page = (vm_page_t) qe;
1096 assert(prev_page->object == object);
1097 if (prev_page->offset == offset) {
1098 vm_page_lookup_hint_prev++;
1099 object->memq_hint = prev_page; /* new hint */
1100 return prev_page;
1101 }
1102 }
1103 }
1104
1105 /*
1106 * Search the hash table for this object/offset pair
1107 */
1108
1109 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
1110
1111 /*
1112 * since we hold the object lock, we are guaranteed that no
1113 * new pages can be inserted into this object... this in turn
1114 * guarantess that the page we're looking for can't exist
1115 * if the bucket it hashes to is currently NULL even when looked
1116 * at outside the scope of the hash bucket lock... this is a
1117 * really cheap optimiztion to avoid taking the lock
1118 */
1119 if (bucket->pages == VM_PAGE_NULL) {
1120 return (VM_PAGE_NULL);
1121 }
1122 simple_lock(&vm_page_bucket_lock);
1123
1124 for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
1125 VM_PAGE_CHECK(mem);
1126 if ((mem->object == object) && (mem->offset == offset))
1127 break;
1128 }
1129 simple_unlock(&vm_page_bucket_lock);
1130
1131 if (mem != VM_PAGE_NULL) {
1132 if (object->memq_hint != VM_PAGE_NULL) {
1133 vm_page_lookup_hint_miss++;
1134 }
1135 assert(mem->object == object);
1136 object->memq_hint = mem;
1137 }
1138
1139 return(mem);
1140 }
1141
1142
1143 vm_page_t
1144 vm_page_lookup_nohint(
1145 vm_object_t object,
1146 vm_object_offset_t offset)
1147 {
1148 register vm_page_t mem;
1149 register vm_page_bucket_t *bucket;
1150
1151 #if 0
1152 _mutex_assert(&object->Lock, MA_OWNED);
1153 #endif
1154 /*
1155 * Search the hash table for this object/offset pair
1156 */
1157
1158 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
1159
1160 simple_lock(&vm_page_bucket_lock);
1161 for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
1162 VM_PAGE_CHECK(mem);
1163 if ((mem->object == object) && (mem->offset == offset))
1164 break;
1165 }
1166 simple_unlock(&vm_page_bucket_lock);
1167
1168 return(mem);
1169 }
1170
1171 /*
1172 * vm_page_rename:
1173 *
1174 * Move the given memory entry from its
1175 * current object to the specified target object/offset.
1176 *
1177 * The object must be locked.
1178 */
1179 void
1180 vm_page_rename(
1181 register vm_page_t mem,
1182 register vm_object_t new_object,
1183 vm_object_offset_t new_offset)
1184 {
1185 assert(mem->object != new_object);
1186 /*
1187 * ENCRYPTED SWAP:
1188 * The encryption key is based on the page's memory object
1189 * (aka "pager") and paging offset. Moving the page to
1190 * another VM object changes its "pager" and "paging_offset"
1191 * so it has to be decrypted first.
1192 */
1193 if (mem->encrypted) {
1194 panic("vm_page_rename: page %p is encrypted\n", mem);
1195 }
1196 /*
1197 * Changes to mem->object require the page lock because
1198 * the pageout daemon uses that lock to get the object.
1199 */
1200
1201 XPR(XPR_VM_PAGE,
1202 "vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n",
1203 (integer_t)new_object, (integer_t)new_offset,
1204 (integer_t)mem, 0,0);
1205
1206 vm_page_lock_queues();
1207 vm_page_remove(mem);
1208 vm_page_insert(mem, new_object, new_offset);
1209 vm_page_unlock_queues();
1210 }
1211
1212 /*
1213 * vm_page_init:
1214 *
1215 * Initialize the fields in a new page.
1216 * This takes a structure with random values and initializes it
1217 * so that it can be given to vm_page_release or vm_page_insert.
1218 */
1219 void
1220 vm_page_init(
1221 vm_page_t mem,
1222 ppnum_t phys_page)
1223 {
1224 assert(phys_page);
1225 *mem = vm_page_template;
1226 mem->phys_page = phys_page;
1227 }
1228
1229 /*
1230 * vm_page_grab_fictitious:
1231 *
1232 * Remove a fictitious page from the free list.
1233 * Returns VM_PAGE_NULL if there are no free pages.
1234 */
1235 int c_vm_page_grab_fictitious = 0;
1236 int c_vm_page_release_fictitious = 0;
1237 int c_vm_page_more_fictitious = 0;
1238
1239 vm_page_t
1240 vm_page_grab_fictitious(void)
1241 {
1242 register vm_page_t m;
1243
1244 m = (vm_page_t)zget(vm_page_zone);
1245 if (m) {
1246 vm_page_init(m, vm_page_fictitious_addr);
1247 m->fictitious = TRUE;
1248 }
1249
1250 c_vm_page_grab_fictitious++;
1251 return m;
1252 }
1253
1254 /*
1255 * vm_page_release_fictitious:
1256 *
1257 * Release a fictitious page to the free list.
1258 */
1259
1260 void
1261 vm_page_release_fictitious(
1262 register vm_page_t m)
1263 {
1264 assert(!m->free);
1265 assert(m->busy);
1266 assert(m->fictitious);
1267 assert(m->phys_page == vm_page_fictitious_addr);
1268
1269 c_vm_page_release_fictitious++;
1270 #if DEBUG
1271 if (m->free)
1272 panic("vm_page_release_fictitious");
1273 #endif
1274 m->free = TRUE;
1275 zfree(vm_page_zone, m);
1276 }
1277
1278 /*
1279 * vm_page_more_fictitious:
1280 *
1281 * Add more fictitious pages to the free list.
1282 * Allowed to block. This routine is way intimate
1283 * with the zones code, for several reasons:
1284 * 1. we need to carve some page structures out of physical
1285 * memory before zones work, so they _cannot_ come from
1286 * the zone_map.
1287 * 2. the zone needs to be collectable in order to prevent
1288 * growth without bound. These structures are used by
1289 * the device pager (by the hundreds and thousands), as
1290 * private pages for pageout, and as blocking pages for
1291 * pagein. Temporary bursts in demand should not result in
1292 * permanent allocation of a resource.
1293 * 3. To smooth allocation humps, we allocate single pages
1294 * with kernel_memory_allocate(), and cram them into the
1295 * zone. This also allows us to initialize the vm_page_t's
1296 * on the way into the zone, so that zget() always returns
1297 * an initialized structure. The zone free element pointer
1298 * and the free page pointer are both the first item in the
1299 * vm_page_t.
1300 * 4. By having the pages in the zone pre-initialized, we need
1301 * not keep 2 levels of lists. The garbage collector simply
1302 * scans our list, and reduces physical memory usage as it
1303 * sees fit.
1304 */
1305
1306 void vm_page_more_fictitious(void)
1307 {
1308 register vm_page_t m;
1309 vm_offset_t addr;
1310 kern_return_t retval;
1311 int i;
1312
1313 c_vm_page_more_fictitious++;
1314
1315 /*
1316 * Allocate a single page from the zone_map. Do not wait if no physical
1317 * pages are immediately available, and do not zero the space. We need
1318 * our own blocking lock here to prevent having multiple,
1319 * simultaneous requests from piling up on the zone_map lock. Exactly
1320 * one (of our) threads should be potentially waiting on the map lock.
1321 * If winner is not vm-privileged, then the page allocation will fail,
1322 * and it will temporarily block here in the vm_page_wait().
1323 */
1324 mutex_lock(&vm_page_alloc_lock);
1325 /*
1326 * If another thread allocated space, just bail out now.
1327 */
1328 if (zone_free_count(vm_page_zone) > 5) {
1329 /*
1330 * The number "5" is a small number that is larger than the
1331 * number of fictitious pages that any single caller will
1332 * attempt to allocate. Otherwise, a thread will attempt to
1333 * acquire a fictitious page (vm_page_grab_fictitious), fail,
1334 * release all of the resources and locks already acquired,
1335 * and then call this routine. This routine finds the pages
1336 * that the caller released, so fails to allocate new space.
1337 * The process repeats infinitely. The largest known number
1338 * of fictitious pages required in this manner is 2. 5 is
1339 * simply a somewhat larger number.
1340 */
1341 mutex_unlock(&vm_page_alloc_lock);
1342 return;
1343 }
1344
1345 retval = kernel_memory_allocate(zone_map,
1346 &addr, PAGE_SIZE, VM_PROT_ALL,
1347 KMA_KOBJECT|KMA_NOPAGEWAIT);
1348 if (retval != KERN_SUCCESS) {
1349 /*
1350 * No page was available. Tell the pageout daemon, drop the
1351 * lock to give another thread a chance at it, and
1352 * wait for the pageout daemon to make progress.
1353 */
1354 mutex_unlock(&vm_page_alloc_lock);
1355 vm_page_wait(THREAD_UNINT);
1356 return;
1357 }
1358 /*
1359 * Initialize as many vm_page_t's as will fit on this page. This
1360 * depends on the zone code disturbing ONLY the first item of
1361 * each zone element.
1362 */
1363 m = (vm_page_t)addr;
1364 for (i = PAGE_SIZE/sizeof(struct vm_page); i > 0; i--) {
1365 vm_page_init(m, vm_page_fictitious_addr);
1366 m->fictitious = TRUE;
1367 m++;
1368 }
1369 zcram(vm_page_zone, (void *) addr, PAGE_SIZE);
1370 mutex_unlock(&vm_page_alloc_lock);
1371 }
1372
1373 /*
1374 * vm_page_convert:
1375 *
1376 * Attempt to convert a fictitious page into a real page.
1377 */
1378
1379 boolean_t
1380 vm_page_convert(
1381 register vm_page_t m)
1382 {
1383 register vm_page_t real_m;
1384
1385 assert(m->busy);
1386 assert(m->fictitious);
1387 assert(!m->dirty);
1388
1389 real_m = vm_page_grab();
1390 if (real_m == VM_PAGE_NULL)
1391 return FALSE;
1392
1393 m->phys_page = real_m->phys_page;
1394 m->fictitious = FALSE;
1395 m->no_isync = TRUE;
1396
1397 vm_page_lock_queues();
1398 if (m->active)
1399 vm_page_active_count++;
1400 else if (m->inactive)
1401 vm_page_inactive_count++;
1402 vm_page_unlock_queues();
1403
1404 real_m->phys_page = vm_page_fictitious_addr;
1405 real_m->fictitious = TRUE;
1406
1407 vm_page_release_fictitious(real_m);
1408 return TRUE;
1409 }
1410
1411 /*
1412 * vm_pool_low():
1413 *
1414 * Return true if it is not likely that a non-vm_privileged thread
1415 * can get memory without blocking. Advisory only, since the
1416 * situation may change under us.
1417 */
1418 int
1419 vm_pool_low(void)
1420 {
1421 /* No locking, at worst we will fib. */
1422 return( vm_page_free_count < vm_page_free_reserved );
1423 }
1424
1425
1426
1427 /*
1428 * this is an interface to support bring-up of drivers
1429 * on platforms with physical memory > 4G...
1430 */
1431 int vm_himemory_mode = 0;
1432
1433
1434 /*
1435 * this interface exists to support hardware controllers
1436 * incapable of generating DMAs with more than 32 bits
1437 * of address on platforms with physical memory > 4G...
1438 */
1439 unsigned int vm_lopage_free_count = 0;
1440 unsigned int vm_lopage_max_count = 0;
1441 vm_page_t vm_lopage_queue_free = VM_PAGE_NULL;
1442
1443 vm_page_t
1444 vm_page_grablo(void)
1445 {
1446 register vm_page_t mem;
1447 unsigned int vm_lopage_alloc_count;
1448
1449 if (vm_lopage_poolsize == 0)
1450 return (vm_page_grab());
1451
1452 mutex_lock(&vm_page_queue_free_lock);
1453
1454 if ((mem = vm_lopage_queue_free) != VM_PAGE_NULL) {
1455
1456 vm_lopage_queue_free = (vm_page_t) mem->pageq.next;
1457 mem->pageq.next = NULL;
1458 mem->pageq.prev = NULL;
1459 mem->free = FALSE;
1460 mem->no_isync = TRUE;
1461
1462 vm_lopage_free_count--;
1463 vm_lopage_alloc_count = (vm_lopage_poolend - vm_lopage_poolstart) - vm_lopage_free_count;
1464 if (vm_lopage_alloc_count > vm_lopage_max_count)
1465 vm_lopage_max_count = vm_lopage_alloc_count;
1466 }
1467 mutex_unlock(&vm_page_queue_free_lock);
1468
1469 return (mem);
1470 }
1471
1472
1473
1474 /*
1475 * vm_page_grab:
1476 *
1477 * Remove a page from the free list.
1478 * Returns VM_PAGE_NULL if the free list is too small.
1479 */
1480
1481 unsigned long vm_page_grab_count = 0; /* measure demand */
1482
1483 vm_page_t
1484 vm_page_grab(void)
1485 {
1486 register vm_page_t mem;
1487
1488 mutex_lock(&vm_page_queue_free_lock);
1489 vm_page_grab_count++;
1490
1491 /*
1492 * Optionally produce warnings if the wire or gobble
1493 * counts exceed some threshold.
1494 */
1495 if (vm_page_wire_count_warning > 0
1496 && vm_page_wire_count >= vm_page_wire_count_warning) {
1497 printf("mk: vm_page_grab(): high wired page count of %d\n",
1498 vm_page_wire_count);
1499 assert(vm_page_wire_count < vm_page_wire_count_warning);
1500 }
1501 if (vm_page_gobble_count_warning > 0
1502 && vm_page_gobble_count >= vm_page_gobble_count_warning) {
1503 printf("mk: vm_page_grab(): high gobbled page count of %d\n",
1504 vm_page_gobble_count);
1505 assert(vm_page_gobble_count < vm_page_gobble_count_warning);
1506 }
1507
1508 /*
1509 * Only let privileged threads (involved in pageout)
1510 * dip into the reserved pool.
1511 */
1512
1513 if ((vm_page_free_count < vm_page_free_reserved) &&
1514 !(current_thread()->options & TH_OPT_VMPRIV)) {
1515 mutex_unlock(&vm_page_queue_free_lock);
1516 mem = VM_PAGE_NULL;
1517 goto wakeup_pageout;
1518 }
1519
1520 while (vm_page_queue_free == VM_PAGE_NULL) {
1521 mutex_unlock(&vm_page_queue_free_lock);
1522 VM_PAGE_WAIT();
1523 mutex_lock(&vm_page_queue_free_lock);
1524 }
1525
1526 if (--vm_page_free_count < vm_page_free_count_minimum)
1527 vm_page_free_count_minimum = vm_page_free_count;
1528 mem = vm_page_queue_free;
1529 vm_page_queue_free = (vm_page_t) mem->pageq.next;
1530 mem->pageq.next = NULL;
1531 mem->pageq.prev = NULL;
1532 assert(mem->listq.next == NULL && mem->listq.prev == NULL);
1533 assert(mem->tabled == FALSE);
1534 assert(mem->object == VM_OBJECT_NULL);
1535 assert(!mem->laundry);
1536 mem->free = FALSE;
1537 mem->no_isync = TRUE;
1538 mutex_unlock(&vm_page_queue_free_lock);
1539
1540 assert(pmap_verify_free(mem->phys_page));
1541
1542 /*
1543 * Decide if we should poke the pageout daemon.
1544 * We do this if the free count is less than the low
1545 * water mark, or if the free count is less than the high
1546 * water mark (but above the low water mark) and the inactive
1547 * count is less than its target.
1548 *
1549 * We don't have the counts locked ... if they change a little,
1550 * it doesn't really matter.
1551 */
1552
1553 wakeup_pageout:
1554 if ((vm_page_free_count < vm_page_free_min) ||
1555 ((vm_page_free_count < vm_page_free_target) &&
1556 (vm_page_inactive_count < vm_page_inactive_target)))
1557 thread_wakeup((event_t) &vm_page_free_wanted);
1558
1559 // dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */
1560
1561 return mem;
1562 }
1563
1564 /*
1565 * vm_page_release:
1566 *
1567 * Return a page to the free list.
1568 */
1569
1570 void
1571 vm_page_release(
1572 register vm_page_t mem)
1573 {
1574
1575 #if 0
1576 unsigned int pindex;
1577 phys_entry *physent;
1578
1579 physent = mapping_phys_lookup(mem->phys_page, &pindex); /* (BRINGUP) */
1580 if(physent->ppLink & ppN) { /* (BRINGUP) */
1581 panic("vm_page_release: already released - %08X %08X\n", mem, mem->phys_page);
1582 }
1583 physent->ppLink = physent->ppLink | ppN; /* (BRINGUP) */
1584 #endif
1585 assert(!mem->private && !mem->fictitious);
1586
1587 // dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */
1588
1589 mutex_lock(&vm_page_queue_free_lock);
1590 #if DEBUG
1591 if (mem->free)
1592 panic("vm_page_release");
1593 #endif
1594 mem->free = TRUE;
1595 assert(!mem->laundry);
1596 assert(mem->object == VM_OBJECT_NULL);
1597 assert(mem->pageq.next == NULL &&
1598 mem->pageq.prev == NULL);
1599
1600 if (mem->phys_page <= vm_lopage_poolend && mem->phys_page >= vm_lopage_poolstart) {
1601 /*
1602 * this exists to support hardware controllers
1603 * incapable of generating DMAs with more than 32 bits
1604 * of address on platforms with physical memory > 4G...
1605 */
1606 mem->pageq.next = (queue_entry_t) vm_lopage_queue_free;
1607 vm_lopage_queue_free = mem;
1608 vm_lopage_free_count++;
1609 } else {
1610 mem->pageq.next = (queue_entry_t) vm_page_queue_free;
1611 vm_page_queue_free = mem;
1612 vm_page_free_count++;
1613 /*
1614 * Check if we should wake up someone waiting for page.
1615 * But don't bother waking them unless they can allocate.
1616 *
1617 * We wakeup only one thread, to prevent starvation.
1618 * Because the scheduling system handles wait queues FIFO,
1619 * if we wakeup all waiting threads, one greedy thread
1620 * can starve multiple niceguy threads. When the threads
1621 * all wakeup, the greedy threads runs first, grabs the page,
1622 * and waits for another page. It will be the first to run
1623 * when the next page is freed.
1624 *
1625 * However, there is a slight danger here.
1626 * The thread we wake might not use the free page.
1627 * Then the other threads could wait indefinitely
1628 * while the page goes unused. To forestall this,
1629 * the pageout daemon will keep making free pages
1630 * as long as vm_page_free_wanted is non-zero.
1631 */
1632
1633 if ((vm_page_free_wanted > 0) &&
1634 (vm_page_free_count >= vm_page_free_reserved)) {
1635 vm_page_free_wanted--;
1636 thread_wakeup_one((event_t) &vm_page_free_count);
1637 }
1638 }
1639 mutex_unlock(&vm_page_queue_free_lock);
1640 }
1641
1642 /*
1643 * vm_page_wait:
1644 *
1645 * Wait for a page to become available.
1646 * If there are plenty of free pages, then we don't sleep.
1647 *
1648 * Returns:
1649 * TRUE: There may be another page, try again
1650 * FALSE: We were interrupted out of our wait, don't try again
1651 */
1652
1653 boolean_t
1654 vm_page_wait(
1655 int interruptible )
1656 {
1657 /*
1658 * We can't use vm_page_free_reserved to make this
1659 * determination. Consider: some thread might
1660 * need to allocate two pages. The first allocation
1661 * succeeds, the second fails. After the first page is freed,
1662 * a call to vm_page_wait must really block.
1663 */
1664 kern_return_t wait_result;
1665 int need_wakeup = 0;
1666
1667 mutex_lock(&vm_page_queue_free_lock);
1668 if (vm_page_free_count < vm_page_free_target) {
1669 if (vm_page_free_wanted++ == 0)
1670 need_wakeup = 1;
1671 wait_result = assert_wait((event_t)&vm_page_free_count, interruptible);
1672 mutex_unlock(&vm_page_queue_free_lock);
1673 counter(c_vm_page_wait_block++);
1674
1675 if (need_wakeup)
1676 thread_wakeup((event_t)&vm_page_free_wanted);
1677
1678 if (wait_result == THREAD_WAITING)
1679 wait_result = thread_block(THREAD_CONTINUE_NULL);
1680
1681 return(wait_result == THREAD_AWAKENED);
1682 } else {
1683 mutex_unlock(&vm_page_queue_free_lock);
1684 return TRUE;
1685 }
1686 }
1687
1688 /*
1689 * vm_page_alloc:
1690 *
1691 * Allocate and return a memory cell associated
1692 * with this VM object/offset pair.
1693 *
1694 * Object must be locked.
1695 */
1696
1697 vm_page_t
1698 vm_page_alloc(
1699 vm_object_t object,
1700 vm_object_offset_t offset)
1701 {
1702 register vm_page_t mem;
1703
1704 #if DEBUG
1705 _mutex_assert(&object->Lock, MA_OWNED);
1706 #endif
1707 mem = vm_page_grab();
1708 if (mem == VM_PAGE_NULL)
1709 return VM_PAGE_NULL;
1710
1711 vm_page_insert(mem, object, offset);
1712
1713 return(mem);
1714 }
1715
1716
1717 vm_page_t
1718 vm_page_alloclo(
1719 vm_object_t object,
1720 vm_object_offset_t offset)
1721 {
1722 register vm_page_t mem;
1723
1724 #if DEBUG
1725 _mutex_assert(&object->Lock, MA_OWNED);
1726 #endif
1727 mem = vm_page_grablo();
1728 if (mem == VM_PAGE_NULL)
1729 return VM_PAGE_NULL;
1730
1731 vm_page_insert(mem, object, offset);
1732
1733 return(mem);
1734 }
1735
1736
1737 counter(unsigned int c_laundry_pages_freed = 0;)
1738
1739 int vm_pagein_cluster_unused = 0;
1740 boolean_t vm_page_free_verify = TRUE;
1741 /*
1742 * vm_page_free:
1743 *
1744 * Returns the given page to the free list,
1745 * disassociating it with any VM object.
1746 *
1747 * Object and page queues must be locked prior to entry.
1748 */
1749 void
1750 vm_page_free(
1751 register vm_page_t mem)
1752 {
1753 vm_object_t object = mem->object;
1754
1755 assert(!mem->free);
1756 assert(!mem->cleaning);
1757 assert(!mem->pageout);
1758 if (vm_page_free_verify && !mem->fictitious && !mem->private) {
1759 assert(pmap_verify_free(mem->phys_page));
1760 }
1761
1762 #if DEBUG
1763 if (mem->object)
1764 _mutex_assert(&mem->object->Lock, MA_OWNED);
1765 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
1766
1767 if (mem->free)
1768 panic("vm_page_free: freeing page on free list\n");
1769 #endif
1770 if (mem->tabled)
1771 vm_page_remove(mem); /* clears tabled, object, offset */
1772 VM_PAGE_QUEUES_REMOVE(mem); /* clears active or inactive */
1773
1774 if (mem->clustered) {
1775 mem->clustered = FALSE;
1776 vm_pagein_cluster_unused++;
1777 }
1778
1779 if (mem->wire_count) {
1780 if (!mem->private && !mem->fictitious)
1781 vm_page_wire_count--;
1782 mem->wire_count = 0;
1783 assert(!mem->gobbled);
1784 } else if (mem->gobbled) {
1785 if (!mem->private && !mem->fictitious)
1786 vm_page_wire_count--;
1787 vm_page_gobble_count--;
1788 }
1789 mem->gobbled = FALSE;
1790
1791 if (mem->laundry) {
1792 vm_pageout_throttle_up(mem);
1793 counter(++c_laundry_pages_freed);
1794 }
1795
1796 PAGE_WAKEUP(mem); /* clears wanted */
1797
1798 if (mem->absent)
1799 vm_object_absent_release(object);
1800
1801 /* Some of these may be unnecessary */
1802 mem->page_lock = 0;
1803 mem->unlock_request = 0;
1804 mem->busy = TRUE;
1805 mem->absent = FALSE;
1806 mem->error = FALSE;
1807 mem->dirty = FALSE;
1808 mem->precious = FALSE;
1809 mem->reference = FALSE;
1810 mem->encrypted = FALSE;
1811
1812 mem->page_error = KERN_SUCCESS;
1813
1814 if (mem->private) {
1815 mem->private = FALSE;
1816 mem->fictitious = TRUE;
1817 mem->phys_page = vm_page_fictitious_addr;
1818 }
1819 if (mem->fictitious) {
1820 vm_page_release_fictitious(mem);
1821 } else {
1822 /* depends on the queues lock */
1823 if(mem->zero_fill) {
1824 vm_zf_count-=1;
1825 mem->zero_fill = FALSE;
1826 }
1827 vm_page_init(mem, mem->phys_page);
1828 vm_page_release(mem);
1829 }
1830 }
1831
1832
1833 void
1834 vm_page_free_list(
1835 register vm_page_t mem)
1836 {
1837 register vm_page_t nxt;
1838 register vm_page_t first = NULL;
1839 register vm_page_t last = VM_PAGE_NULL;
1840 register int pg_count = 0;
1841
1842 #if DEBUG
1843 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
1844 #endif
1845 while (mem) {
1846 #if DEBUG
1847 if (mem->tabled || mem->object)
1848 panic("vm_page_free_list: freeing tabled page\n");
1849 if (mem->inactive || mem->active || mem->free)
1850 panic("vm_page_free_list: freeing page on list\n");
1851 #endif
1852 assert(mem->pageq.prev == NULL);
1853 nxt = (vm_page_t)(mem->pageq.next);
1854
1855 if (mem->clustered)
1856 vm_pagein_cluster_unused++;
1857
1858 if (mem->laundry) {
1859 vm_pageout_throttle_up(mem);
1860 counter(++c_laundry_pages_freed);
1861 }
1862 mem->busy = TRUE;
1863
1864 PAGE_WAKEUP(mem); /* clears wanted */
1865
1866 if (mem->private)
1867 mem->fictitious = TRUE;
1868
1869 if (!mem->fictitious) {
1870 /* depends on the queues lock */
1871 if (mem->zero_fill)
1872 vm_zf_count -= 1;
1873 assert(!mem->laundry);
1874 vm_page_init(mem, mem->phys_page);
1875
1876 mem->free = TRUE;
1877
1878 if (first == NULL)
1879 last = mem;
1880 mem->pageq.next = (queue_t) first;
1881 first = mem;
1882
1883 pg_count++;
1884 } else {
1885 mem->phys_page = vm_page_fictitious_addr;
1886 vm_page_release_fictitious(mem);
1887 }
1888 mem = nxt;
1889 }
1890 if (first) {
1891
1892 mutex_lock(&vm_page_queue_free_lock);
1893
1894 last->pageq.next = (queue_entry_t) vm_page_queue_free;
1895 vm_page_queue_free = first;
1896
1897 vm_page_free_count += pg_count;
1898
1899 if ((vm_page_free_wanted > 0) &&
1900 (vm_page_free_count >= vm_page_free_reserved)) {
1901 unsigned int available_pages;
1902
1903 if (vm_page_free_count >= vm_page_free_reserved) {
1904 available_pages = (vm_page_free_count
1905 - vm_page_free_reserved);
1906 } else {
1907 available_pages = 0;
1908 }
1909
1910 if (available_pages >= vm_page_free_wanted) {
1911 vm_page_free_wanted = 0;
1912 thread_wakeup((event_t) &vm_page_free_count);
1913 } else {
1914 while (available_pages--) {
1915 vm_page_free_wanted--;
1916 thread_wakeup_one((event_t) &vm_page_free_count);
1917 }
1918 }
1919 }
1920 mutex_unlock(&vm_page_queue_free_lock);
1921 }
1922 }
1923
1924
1925 /*
1926 * vm_page_wire:
1927 *
1928 * Mark this page as wired down by yet
1929 * another map, removing it from paging queues
1930 * as necessary.
1931 *
1932 * The page's object and the page queues must be locked.
1933 */
1934 void
1935 vm_page_wire(
1936 register vm_page_t mem)
1937 {
1938
1939 // dbgLog(current_thread(), mem->offset, mem->object, 1); /* (TEST/DEBUG) */
1940
1941 VM_PAGE_CHECK(mem);
1942 #if DEBUG
1943 if (mem->object)
1944 _mutex_assert(&mem->object->Lock, MA_OWNED);
1945 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
1946 #endif
1947 if (mem->wire_count == 0) {
1948 VM_PAGE_QUEUES_REMOVE(mem);
1949 if (!mem->private && !mem->fictitious && !mem->gobbled)
1950 vm_page_wire_count++;
1951 if (mem->gobbled)
1952 vm_page_gobble_count--;
1953 mem->gobbled = FALSE;
1954 if(mem->zero_fill) {
1955 /* depends on the queues lock */
1956 vm_zf_count-=1;
1957 mem->zero_fill = FALSE;
1958 }
1959 /*
1960 * ENCRYPTED SWAP:
1961 * The page could be encrypted, but
1962 * We don't have to decrypt it here
1963 * because we don't guarantee that the
1964 * data is actually valid at this point.
1965 * The page will get decrypted in
1966 * vm_fault_wire() if needed.
1967 */
1968 }
1969 assert(!mem->gobbled);
1970 mem->wire_count++;
1971 }
1972
1973 /*
1974 * vm_page_gobble:
1975 *
1976 * Mark this page as consumed by the vm/ipc/xmm subsystems.
1977 *
1978 * Called only for freshly vm_page_grab()ed pages - w/ nothing locked.
1979 */
1980 void
1981 vm_page_gobble(
1982 register vm_page_t mem)
1983 {
1984 vm_page_lock_queues();
1985 VM_PAGE_CHECK(mem);
1986
1987 assert(!mem->gobbled);
1988 assert(mem->wire_count == 0);
1989
1990 if (!mem->gobbled && mem->wire_count == 0) {
1991 if (!mem->private && !mem->fictitious)
1992 vm_page_wire_count++;
1993 }
1994 vm_page_gobble_count++;
1995 mem->gobbled = TRUE;
1996 vm_page_unlock_queues();
1997 }
1998
1999 /*
2000 * vm_page_unwire:
2001 *
2002 * Release one wiring of this page, potentially
2003 * enabling it to be paged again.
2004 *
2005 * The page's object and the page queues must be locked.
2006 */
2007 void
2008 vm_page_unwire(
2009 register vm_page_t mem)
2010 {
2011
2012 // dbgLog(current_thread(), mem->offset, mem->object, 0); /* (TEST/DEBUG) */
2013
2014 VM_PAGE_CHECK(mem);
2015 assert(mem->wire_count > 0);
2016 #if DEBUG
2017 if (mem->object)
2018 _mutex_assert(&mem->object->Lock, MA_OWNED);
2019 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
2020 #endif
2021 if (--mem->wire_count == 0) {
2022 assert(!mem->private && !mem->fictitious);
2023 vm_page_wire_count--;
2024 assert(!mem->laundry);
2025 assert(mem->object != kernel_object);
2026 assert(mem->pageq.next == NULL && mem->pageq.prev == NULL);
2027 queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
2028 vm_page_active_count++;
2029 mem->active = TRUE;
2030 mem->reference = TRUE;
2031 }
2032 }
2033
2034 /*
2035 * vm_page_deactivate:
2036 *
2037 * Returns the given page to the inactive list,
2038 * indicating that no physical maps have access
2039 * to this page. [Used by the physical mapping system.]
2040 *
2041 * The page queues must be locked.
2042 */
2043 void
2044 vm_page_deactivate(
2045 register vm_page_t m)
2046 {
2047 VM_PAGE_CHECK(m);
2048 assert(m->object != kernel_object);
2049
2050 // dbgLog(m->phys_page, vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */
2051 #if DEBUG
2052 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
2053 #endif
2054 /*
2055 * This page is no longer very interesting. If it was
2056 * interesting (active or inactive/referenced), then we
2057 * clear the reference bit and (re)enter it in the
2058 * inactive queue. Note wired pages should not have
2059 * their reference bit cleared.
2060 */
2061 if (m->gobbled) { /* can this happen? */
2062 assert(m->wire_count == 0);
2063 if (!m->private && !m->fictitious)
2064 vm_page_wire_count--;
2065 vm_page_gobble_count--;
2066 m->gobbled = FALSE;
2067 }
2068 if (m->private || (m->wire_count != 0))
2069 return;
2070 if (m->active || (m->inactive && m->reference)) {
2071 if (!m->fictitious && !m->absent)
2072 pmap_clear_reference(m->phys_page);
2073 m->reference = FALSE;
2074 VM_PAGE_QUEUES_REMOVE(m);
2075 }
2076 if (m->wire_count == 0 && !m->inactive) {
2077 m->page_ticket = vm_page_ticket;
2078 vm_page_ticket_roll++;
2079
2080 if(vm_page_ticket_roll == VM_PAGE_TICKETS_IN_ROLL) {
2081 vm_page_ticket_roll = 0;
2082 if(vm_page_ticket == VM_PAGE_TICKET_ROLL_IDS)
2083 vm_page_ticket= 0;
2084 else
2085 vm_page_ticket++;
2086 }
2087
2088 assert(!m->laundry);
2089 assert(m->pageq.next == NULL && m->pageq.prev == NULL);
2090 if(m->zero_fill) {
2091 queue_enter(&vm_page_queue_zf, m, vm_page_t, pageq);
2092 } else {
2093 queue_enter(&vm_page_queue_inactive,
2094 m, vm_page_t, pageq);
2095 }
2096
2097 m->inactive = TRUE;
2098 if (!m->fictitious)
2099 vm_page_inactive_count++;
2100 }
2101 }
2102
2103 /*
2104 * vm_page_activate:
2105 *
2106 * Put the specified page on the active list (if appropriate).
2107 *
2108 * The page queues must be locked.
2109 */
2110
2111 void
2112 vm_page_activate(
2113 register vm_page_t m)
2114 {
2115 VM_PAGE_CHECK(m);
2116 assert(m->object != kernel_object);
2117 #if DEBUG
2118 _mutex_assert(&vm_page_queue_lock, MA_OWNED);
2119 #endif
2120 if (m->gobbled) {
2121 assert(m->wire_count == 0);
2122 if (!m->private && !m->fictitious)
2123 vm_page_wire_count--;
2124 vm_page_gobble_count--;
2125 m->gobbled = FALSE;
2126 }
2127 if (m->private)
2128 return;
2129
2130 if (m->inactive) {
2131 assert(!m->laundry);
2132 if (m->zero_fill) {
2133 queue_remove(&vm_page_queue_zf, m, vm_page_t, pageq);
2134 } else {
2135 queue_remove(&vm_page_queue_inactive,
2136 m, vm_page_t, pageq);
2137 }
2138 m->pageq.next = NULL;
2139 m->pageq.prev = NULL;
2140 if (!m->fictitious)
2141 vm_page_inactive_count--;
2142 m->inactive = FALSE;
2143 }
2144 if (m->wire_count == 0) {
2145 #if DEBUG
2146 if (m->active)
2147 panic("vm_page_activate: already active");
2148 #endif
2149 assert(!m->laundry);
2150 assert(m->pageq.next == NULL && m->pageq.prev == NULL);
2151 queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
2152 m->active = TRUE;
2153 m->reference = TRUE;
2154 if (!m->fictitious)
2155 vm_page_active_count++;
2156 }
2157 }
2158
2159 /*
2160 * vm_page_part_zero_fill:
2161 *
2162 * Zero-fill a part of the page.
2163 */
2164 void
2165 vm_page_part_zero_fill(
2166 vm_page_t m,
2167 vm_offset_t m_pa,
2168 vm_size_t len)
2169 {
2170 vm_page_t tmp;
2171
2172 VM_PAGE_CHECK(m);
2173 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
2174 pmap_zero_part_page(m->phys_page, m_pa, len);
2175 #else
2176 while (1) {
2177 tmp = vm_page_grab();
2178 if (tmp == VM_PAGE_NULL) {
2179 vm_page_wait(THREAD_UNINT);
2180 continue;
2181 }
2182 break;
2183 }
2184 vm_page_zero_fill(tmp);
2185 if(m_pa != 0) {
2186 vm_page_part_copy(m, 0, tmp, 0, m_pa);
2187 }
2188 if((m_pa + len) < PAGE_SIZE) {
2189 vm_page_part_copy(m, m_pa + len, tmp,
2190 m_pa + len, PAGE_SIZE - (m_pa + len));
2191 }
2192 vm_page_copy(tmp,m);
2193 vm_page_lock_queues();
2194 vm_page_free(tmp);
2195 vm_page_unlock_queues();
2196 #endif
2197
2198 }
2199
2200 /*
2201 * vm_page_zero_fill:
2202 *
2203 * Zero-fill the specified page.
2204 */
2205 void
2206 vm_page_zero_fill(
2207 vm_page_t m)
2208 {
2209 XPR(XPR_VM_PAGE,
2210 "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n",
2211 (integer_t)m->object, (integer_t)m->offset, (integer_t)m, 0,0);
2212
2213 VM_PAGE_CHECK(m);
2214
2215 // dbgTrace(0xAEAEAEAE, m->phys_page, 0); /* (BRINGUP) */
2216 pmap_zero_page(m->phys_page);
2217 }
2218
2219 /*
2220 * vm_page_part_copy:
2221 *
2222 * copy part of one page to another
2223 */
2224
2225 void
2226 vm_page_part_copy(
2227 vm_page_t src_m,
2228 vm_offset_t src_pa,
2229 vm_page_t dst_m,
2230 vm_offset_t dst_pa,
2231 vm_size_t len)
2232 {
2233 VM_PAGE_CHECK(src_m);
2234 VM_PAGE_CHECK(dst_m);
2235
2236 pmap_copy_part_page(src_m->phys_page, src_pa,
2237 dst_m->phys_page, dst_pa, len);
2238 }
2239
2240 /*
2241 * vm_page_copy:
2242 *
2243 * Copy one page to another
2244 *
2245 * ENCRYPTED SWAP:
2246 * The source page should not be encrypted. The caller should
2247 * make sure the page is decrypted first, if necessary.
2248 */
2249
2250 void
2251 vm_page_copy(
2252 vm_page_t src_m,
2253 vm_page_t dest_m)
2254 {
2255 XPR(XPR_VM_PAGE,
2256 "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n",
2257 (integer_t)src_m->object, src_m->offset,
2258 (integer_t)dest_m->object, dest_m->offset,
2259 0);
2260
2261 VM_PAGE_CHECK(src_m);
2262 VM_PAGE_CHECK(dest_m);
2263
2264 /*
2265 * ENCRYPTED SWAP:
2266 * The source page should not be encrypted at this point.
2267 * The destination page will therefore not contain encrypted
2268 * data after the copy.
2269 */
2270 if (src_m->encrypted) {
2271 panic("vm_page_copy: source page %p is encrypted\n", src_m);
2272 }
2273 dest_m->encrypted = FALSE;
2274
2275 pmap_copy_page(src_m->phys_page, dest_m->phys_page);
2276 }
2277
2278 /*
2279 * Currently, this is a primitive allocator that grabs
2280 * free pages from the system, sorts them by physical
2281 * address, then searches for a region large enough to
2282 * satisfy the user's request.
2283 *
2284 * Additional levels of effort:
2285 * + steal clean active/inactive pages
2286 * + force pageouts of dirty pages
2287 * + maintain a map of available physical
2288 * memory
2289 */
2290
2291 #if MACH_ASSERT
2292 /*
2293 * Check that the list of pages is ordered by
2294 * ascending physical address and has no holes.
2295 */
2296 int vm_page_verify_contiguous(
2297 vm_page_t pages,
2298 unsigned int npages);
2299
2300 int
2301 vm_page_verify_contiguous(
2302 vm_page_t pages,
2303 unsigned int npages)
2304 {
2305 register vm_page_t m;
2306 unsigned int page_count;
2307 vm_offset_t prev_addr;
2308
2309 prev_addr = pages->phys_page;
2310 page_count = 1;
2311 for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
2312 if (m->phys_page != prev_addr + 1) {
2313 printf("m 0x%x prev_addr 0x%x, current addr 0x%x\n",
2314 m, prev_addr, m->phys_page);
2315 printf("pages 0x%x page_count %d\n", pages, page_count);
2316 panic("vm_page_verify_contiguous: not contiguous!");
2317 }
2318 prev_addr = m->phys_page;
2319 ++page_count;
2320 }
2321 if (page_count != npages) {
2322 printf("pages 0x%x actual count 0x%x but requested 0x%x\n",
2323 pages, page_count, npages);
2324 panic("vm_page_verify_contiguous: count error");
2325 }
2326 return 1;
2327 }
2328 #endif /* MACH_ASSERT */
2329
2330
2331 cpm_counter(unsigned int vpfls_pages_handled = 0;)
2332 cpm_counter(unsigned int vpfls_head_insertions = 0;)
2333 cpm_counter(unsigned int vpfls_tail_insertions = 0;)
2334 cpm_counter(unsigned int vpfls_general_insertions = 0;)
2335 cpm_counter(unsigned int vpfc_failed = 0;)
2336 cpm_counter(unsigned int vpfc_satisfied = 0;)
2337
2338 /*
2339 * Find a region large enough to contain at least npages
2340 * of contiguous physical memory.
2341 *
2342 * Requirements:
2343 * - Called while holding vm_page_queue_free_lock.
2344 * - Doesn't respect vm_page_free_reserved; caller
2345 * must not ask for more pages than are legal to grab.
2346 *
2347 * Returns a pointer to a list of gobbled pages or VM_PAGE_NULL.
2348 *
2349 * Algorithm:
2350 * Loop over the free list, extracting one page at a time and
2351 * inserting those into a sorted sub-list. We stop as soon as
2352 * there's a contiguous range within the sorted list that can
2353 * satisfy the contiguous memory request. This contiguous sub-
2354 * list is chopped out of the sorted sub-list and the remainder
2355 * of the sorted sub-list is put back onto the beginning of the
2356 * free list.
2357 */
2358 static vm_page_t
2359 vm_page_find_contiguous(
2360 unsigned int contig_pages)
2361 {
2362 vm_page_t sort_list;
2363 vm_page_t *contfirstprev, contlast;
2364 vm_page_t m, m1;
2365 ppnum_t prevcontaddr;
2366 ppnum_t nextcontaddr;
2367 unsigned int npages;
2368
2369 m = NULL;
2370 #if DEBUG
2371 _mutex_assert(&vm_page_queue_free_lock, MA_OWNED);
2372 #endif
2373 #if MACH_ASSERT
2374 /*
2375 * Verify pages in the free list..
2376 */
2377 npages = 0;
2378 for (m = vm_page_queue_free; m != VM_PAGE_NULL; m = NEXT_PAGE(m))
2379 ++npages;
2380 if (npages != vm_page_free_count)
2381 panic("vm_sort_free_list: prelim: npages %u free_count %d",
2382 npages, vm_page_free_count);
2383 #endif /* MACH_ASSERT */
2384
2385 if (contig_pages == 0 || vm_page_queue_free == VM_PAGE_NULL)
2386 return VM_PAGE_NULL;
2387
2388 #define PPNUM_PREV(x) (((x) > 0) ? ((x) - 1) : 0)
2389 #define PPNUM_NEXT(x) (((x) < PPNUM_MAX) ? ((x) + 1) : PPNUM_MAX)
2390 #define SET_NEXT_PAGE(m,n) ((m)->pageq.next = (struct queue_entry *) (n))
2391
2392 npages = 1;
2393 contfirstprev = &sort_list;
2394 contlast = sort_list = vm_page_queue_free;
2395 vm_page_queue_free = NEXT_PAGE(sort_list);
2396 SET_NEXT_PAGE(sort_list, VM_PAGE_NULL);
2397 prevcontaddr = PPNUM_PREV(sort_list->phys_page);
2398 nextcontaddr = PPNUM_NEXT(sort_list->phys_page);
2399
2400 while (npages < contig_pages &&
2401 (m = vm_page_queue_free) != VM_PAGE_NULL)
2402 {
2403 cpm_counter(++vpfls_pages_handled);
2404
2405 /* prepend to existing run? */
2406 if (m->phys_page == prevcontaddr)
2407 {
2408 vm_page_queue_free = NEXT_PAGE(m);
2409 cpm_counter(++vpfls_head_insertions);
2410 prevcontaddr = PPNUM_PREV(prevcontaddr);
2411 SET_NEXT_PAGE(m, *contfirstprev);
2412 *contfirstprev = m;
2413 npages++;
2414 continue; /* no tail expansion check needed */
2415 }
2416
2417 /* append to tail of existing run? */
2418 else if (m->phys_page == nextcontaddr)
2419 {
2420 vm_page_queue_free = NEXT_PAGE(m);
2421 cpm_counter(++vpfls_tail_insertions);
2422 nextcontaddr = PPNUM_NEXT(nextcontaddr);
2423 SET_NEXT_PAGE(m, NEXT_PAGE(contlast));
2424 SET_NEXT_PAGE(contlast, m);
2425 contlast = m;
2426 npages++;
2427 }
2428
2429 /* prepend to the very front of sorted list? */
2430 else if (m->phys_page < sort_list->phys_page)
2431 {
2432 vm_page_queue_free = NEXT_PAGE(m);
2433 cpm_counter(++vpfls_general_insertions);
2434 prevcontaddr = PPNUM_PREV(m->phys_page);
2435 nextcontaddr = PPNUM_NEXT(m->phys_page);
2436 SET_NEXT_PAGE(m, sort_list);
2437 contfirstprev = &sort_list;
2438 contlast = sort_list = m;
2439 npages = 1;
2440 }
2441
2442 else /* get to proper place for insertion */
2443 {
2444 if (m->phys_page < nextcontaddr)
2445 {
2446 prevcontaddr = PPNUM_PREV(sort_list->phys_page);
2447 nextcontaddr = PPNUM_NEXT(sort_list->phys_page);
2448 contfirstprev = &sort_list;
2449 contlast = sort_list;
2450 npages = 1;
2451 }
2452 for (m1 = NEXT_PAGE(contlast);
2453 npages < contig_pages &&
2454 m1 != VM_PAGE_NULL && m1->phys_page < m->phys_page;
2455 m1 = NEXT_PAGE(m1))
2456 {
2457 if (m1->phys_page != nextcontaddr) {
2458 prevcontaddr = PPNUM_PREV(m1->phys_page);
2459 contfirstprev = NEXT_PAGE_PTR(contlast);
2460 npages = 1;
2461 } else {
2462 npages++;
2463 }
2464 nextcontaddr = PPNUM_NEXT(m1->phys_page);
2465 contlast = m1;
2466 }
2467
2468 /*
2469 * We may actually already have enough.
2470 * This could happen if a previous prepend
2471 * joined up two runs to meet our needs.
2472 * If so, bail before we take the current
2473 * page off the free queue.
2474 */
2475 if (npages == contig_pages)
2476 break;
2477
2478 if (m->phys_page != nextcontaddr)
2479 {
2480 contfirstprev = NEXT_PAGE_PTR(contlast);
2481 prevcontaddr = PPNUM_PREV(m->phys_page);
2482 nextcontaddr = PPNUM_NEXT(m->phys_page);
2483 npages = 1;
2484 } else {
2485 nextcontaddr = PPNUM_NEXT(nextcontaddr);
2486 npages++;
2487 }
2488 vm_page_queue_free = NEXT_PAGE(m);
2489 cpm_counter(++vpfls_general_insertions);
2490 SET_NEXT_PAGE(m, NEXT_PAGE(contlast));
2491 SET_NEXT_PAGE(contlast, m);
2492 contlast = m;
2493 }
2494
2495 /* See how many pages are now contiguous after the insertion */
2496 for (m1 = NEXT_PAGE(m);
2497 npages < contig_pages &&
2498 m1 != VM_PAGE_NULL && m1->phys_page == nextcontaddr;
2499 m1 = NEXT_PAGE(m1))
2500 {
2501 nextcontaddr = PPNUM_NEXT(nextcontaddr);
2502 contlast = m1;
2503 npages++;
2504 }
2505 }
2506
2507 /* how did we do? */
2508 if (npages == contig_pages)
2509 {
2510 cpm_counter(++vpfc_satisfied);
2511
2512 /* remove the contiguous range from the sorted list */
2513 m = *contfirstprev;
2514 *contfirstprev = NEXT_PAGE(contlast);
2515 SET_NEXT_PAGE(contlast, VM_PAGE_NULL);
2516 assert(vm_page_verify_contiguous(m, npages));
2517
2518 /* inline vm_page_gobble() for each returned page */
2519 for (m1 = m; m1 != VM_PAGE_NULL; m1 = NEXT_PAGE(m1)) {
2520 assert(m1->free);
2521 assert(!m1->wanted);
2522 assert(!m1->laundry);
2523 m1->free = FALSE;
2524 m1->no_isync = TRUE;
2525 m1->gobbled = TRUE;
2526 }
2527 vm_page_wire_count += npages;
2528 vm_page_gobble_count += npages;
2529 vm_page_free_count -= npages;
2530
2531 /* stick free list at the tail of the sorted list */
2532 while ((m1 = *contfirstprev) != VM_PAGE_NULL)
2533 contfirstprev = (vm_page_t *)&m1->pageq.next;
2534 *contfirstprev = vm_page_queue_free;
2535 }
2536
2537 vm_page_queue_free = sort_list;
2538 return m;
2539 }
2540
2541 /*
2542 * Allocate a list of contiguous, wired pages.
2543 */
2544 kern_return_t
2545 cpm_allocate(
2546 vm_size_t size,
2547 vm_page_t *list,
2548 boolean_t wire)
2549 {
2550 register vm_page_t m;
2551 vm_page_t pages;
2552 unsigned int npages;
2553 unsigned int vm_pages_available;
2554 boolean_t wakeup;
2555
2556 if (size % page_size != 0)
2557 return KERN_INVALID_ARGUMENT;
2558
2559 vm_page_lock_queues();
2560 mutex_lock(&vm_page_queue_free_lock);
2561
2562 /*
2563 * Should also take active and inactive pages
2564 * into account... One day...
2565 */
2566 npages = size / page_size;
2567 vm_pages_available = vm_page_free_count - vm_page_free_reserved;
2568
2569 if (npages > vm_pages_available) {
2570 mutex_unlock(&vm_page_queue_free_lock);
2571 vm_page_unlock_queues();
2572 return KERN_RESOURCE_SHORTAGE;
2573 }
2574
2575 /*
2576 * Obtain a pointer to a subset of the free
2577 * list large enough to satisfy the request;
2578 * the region will be physically contiguous.
2579 */
2580 pages = vm_page_find_contiguous(npages);
2581
2582 /* adjust global freelist counts and determine need for wakeups */
2583 if (vm_page_free_count < vm_page_free_count_minimum)
2584 vm_page_free_count_minimum = vm_page_free_count;
2585
2586 wakeup = ((vm_page_free_count < vm_page_free_min) ||
2587 ((vm_page_free_count < vm_page_free_target) &&
2588 (vm_page_inactive_count < vm_page_inactive_target)));
2589
2590 mutex_unlock(&vm_page_queue_free_lock);
2591
2592 if (pages == VM_PAGE_NULL) {
2593 vm_page_unlock_queues();
2594 return KERN_NO_SPACE;
2595 }
2596
2597 /*
2598 * Walk the returned list, wiring the pages.
2599 */
2600 if (wire == TRUE)
2601 for (m = pages; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
2602 /*
2603 * Essentially inlined vm_page_wire.
2604 */
2605 assert(!m->active);
2606 assert(!m->inactive);
2607 assert(!m->private);
2608 assert(!m->fictitious);
2609 assert(m->wire_count == 0);
2610 assert(m->gobbled);
2611 m->gobbled = FALSE;
2612 m->wire_count++;
2613 --vm_page_gobble_count;
2614 }
2615 vm_page_unlock_queues();
2616
2617 if (wakeup)
2618 thread_wakeup((event_t) &vm_page_free_wanted);
2619
2620 /*
2621 * The CPM pages should now be available and
2622 * ordered by ascending physical address.
2623 */
2624 assert(vm_page_verify_contiguous(pages, npages));
2625
2626 *list = pages;
2627 return KERN_SUCCESS;
2628 }
2629
2630
2631 #include <mach_vm_debug.h>
2632 #if MACH_VM_DEBUG
2633
2634 #include <mach_debug/hash_info.h>
2635 #include <vm/vm_debug.h>
2636
2637 /*
2638 * Routine: vm_page_info
2639 * Purpose:
2640 * Return information about the global VP table.
2641 * Fills the buffer with as much information as possible
2642 * and returns the desired size of the buffer.
2643 * Conditions:
2644 * Nothing locked. The caller should provide
2645 * possibly-pageable memory.
2646 */
2647
2648 unsigned int
2649 vm_page_info(
2650 hash_info_bucket_t *info,
2651 unsigned int count)
2652 {
2653 unsigned int i;
2654
2655 if (vm_page_bucket_count < count)
2656 count = vm_page_bucket_count;
2657
2658 for (i = 0; i < count; i++) {
2659 vm_page_bucket_t *bucket = &vm_page_buckets[i];
2660 unsigned int bucket_count = 0;
2661 vm_page_t m;
2662
2663 simple_lock(&vm_page_bucket_lock);
2664 for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next)
2665 bucket_count++;
2666 simple_unlock(&vm_page_bucket_lock);
2667
2668 /* don't touch pageable memory while holding locks */
2669 info[i].hib_count = bucket_count;
2670 }
2671
2672 return vm_page_bucket_count;
2673 }
2674 #endif /* MACH_VM_DEBUG */
2675
2676 #include <mach_kdb.h>
2677 #if MACH_KDB
2678
2679 #include <ddb/db_output.h>
2680 #include <vm/vm_print.h>
2681 #define printf kdbprintf
2682
2683 /*
2684 * Routine: vm_page_print [exported]
2685 */
2686 void
2687 vm_page_print(
2688 db_addr_t db_addr)
2689 {
2690 vm_page_t p;
2691
2692 p = (vm_page_t) (long) db_addr;
2693
2694 iprintf("page 0x%x\n", p);
2695
2696 db_indent += 2;
2697
2698 iprintf("object=0x%x", p->object);
2699 printf(", offset=0x%x", p->offset);
2700 printf(", wire_count=%d", p->wire_count);
2701
2702 iprintf("%sinactive, %sactive, %sgobbled, %slaundry, %sfree, %sref, %sencrypted\n",
2703 (p->inactive ? "" : "!"),
2704 (p->active ? "" : "!"),
2705 (p->gobbled ? "" : "!"),
2706 (p->laundry ? "" : "!"),
2707 (p->free ? "" : "!"),
2708 (p->reference ? "" : "!"),
2709 (p->encrypted ? "" : "!"));
2710 iprintf("%sbusy, %swanted, %stabled, %sfictitious, %sprivate, %sprecious\n",
2711 (p->busy ? "" : "!"),
2712 (p->wanted ? "" : "!"),
2713 (p->tabled ? "" : "!"),
2714 (p->fictitious ? "" : "!"),
2715 (p->private ? "" : "!"),
2716 (p->precious ? "" : "!"));
2717 iprintf("%sabsent, %serror, %sdirty, %scleaning, %spageout, %sclustered\n",
2718 (p->absent ? "" : "!"),
2719 (p->error ? "" : "!"),
2720 (p->dirty ? "" : "!"),
2721 (p->cleaning ? "" : "!"),
2722 (p->pageout ? "" : "!"),
2723 (p->clustered ? "" : "!"));
2724 iprintf("%slock_supplied, %soverwriting, %srestart, %sunusual\n",
2725 (p->lock_supplied ? "" : "!"),
2726 (p->overwriting ? "" : "!"),
2727 (p->restart ? "" : "!"),
2728 (p->unusual ? "" : "!"));
2729
2730 iprintf("phys_page=0x%x", p->phys_page);
2731 printf(", page_error=0x%x", p->page_error);
2732 printf(", page_lock=0x%x", p->page_lock);
2733 printf(", unlock_request=%d\n", p->unlock_request);
2734
2735 db_indent -= 2;
2736 }
2737 #endif /* MACH_KDB */