]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_resident.c
xnu-517.7.7.tar.gz
[apple/xnu.git] / osfmk / vm / vm_resident.c
1 /*
2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: vm/vm_page.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
55 *
56 * Resident memory management module.
57 */
58
59 #include <mach/clock_types.h>
60 #include <mach/vm_prot.h>
61 #include <mach/vm_statistics.h>
62 #include <kern/counters.h>
63 #include <kern/sched_prim.h>
64 #include <kern/task.h>
65 #include <kern/thread.h>
66 #include <kern/zalloc.h>
67 #include <kern/xpr.h>
68 #include <vm/pmap.h>
69 #include <vm/vm_init.h>
70 #include <vm/vm_map.h>
71 #include <vm/vm_page.h>
72 #include <vm/vm_pageout.h>
73 #include <vm/vm_kern.h> /* kernel_memory_allocate() */
74 #include <kern/misc_protos.h>
75 #include <zone_debug.h>
76 #include <vm/cpm.h>
77 #include <ppc/mappings.h> /* (BRINGUP) */
78 #include <pexpert/pexpert.h> /* (BRINGUP) */
79
80
81 /* Variables used to indicate the relative age of pages in the
82 * inactive list
83 */
84
85 int vm_page_ticket_roll = 0;
86 int vm_page_ticket = 0;
87 /*
88 * Associated with page of user-allocatable memory is a
89 * page structure.
90 */
91
92 /*
93 * These variables record the values returned by vm_page_bootstrap,
94 * for debugging purposes. The implementation of pmap_steal_memory
95 * and pmap_startup here also uses them internally.
96 */
97
98 vm_offset_t virtual_space_start;
99 vm_offset_t virtual_space_end;
100 int vm_page_pages;
101
102 /*
103 * The vm_page_lookup() routine, which provides for fast
104 * (virtual memory object, offset) to page lookup, employs
105 * the following hash table. The vm_page_{insert,remove}
106 * routines install and remove associations in the table.
107 * [This table is often called the virtual-to-physical,
108 * or VP, table.]
109 */
110 typedef struct {
111 vm_page_t pages;
112 #if MACH_PAGE_HASH_STATS
113 int cur_count; /* current count */
114 int hi_count; /* high water mark */
115 #endif /* MACH_PAGE_HASH_STATS */
116 } vm_page_bucket_t;
117
118 vm_page_bucket_t *vm_page_buckets; /* Array of buckets */
119 unsigned int vm_page_bucket_count = 0; /* How big is array? */
120 unsigned int vm_page_hash_mask; /* Mask for hash function */
121 unsigned int vm_page_hash_shift; /* Shift for hash function */
122 uint32_t vm_page_bucket_hash; /* Basic bucket hash */
123 decl_simple_lock_data(,vm_page_bucket_lock)
124
125 #if MACH_PAGE_HASH_STATS
126 /* This routine is only for debug. It is intended to be called by
127 * hand by a developer using a kernel debugger. This routine prints
128 * out vm_page_hash table statistics to the kernel debug console.
129 */
130 void
131 hash_debug(void)
132 {
133 int i;
134 int numbuckets = 0;
135 int highsum = 0;
136 int maxdepth = 0;
137
138 for (i = 0; i < vm_page_bucket_count; i++) {
139 if (vm_page_buckets[i].hi_count) {
140 numbuckets++;
141 highsum += vm_page_buckets[i].hi_count;
142 if (vm_page_buckets[i].hi_count > maxdepth)
143 maxdepth = vm_page_buckets[i].hi_count;
144 }
145 }
146 printf("Total number of buckets: %d\n", vm_page_bucket_count);
147 printf("Number used buckets: %d = %d%%\n",
148 numbuckets, 100*numbuckets/vm_page_bucket_count);
149 printf("Number unused buckets: %d = %d%%\n",
150 vm_page_bucket_count - numbuckets,
151 100*(vm_page_bucket_count-numbuckets)/vm_page_bucket_count);
152 printf("Sum of bucket max depth: %d\n", highsum);
153 printf("Average bucket depth: %d.%2d\n",
154 highsum/vm_page_bucket_count,
155 highsum%vm_page_bucket_count);
156 printf("Maximum bucket depth: %d\n", maxdepth);
157 }
158 #endif /* MACH_PAGE_HASH_STATS */
159
160 /*
161 * The virtual page size is currently implemented as a runtime
162 * variable, but is constant once initialized using vm_set_page_size.
163 * This initialization must be done in the machine-dependent
164 * bootstrap sequence, before calling other machine-independent
165 * initializations.
166 *
167 * All references to the virtual page size outside this
168 * module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
169 * constants.
170 */
171 #ifndef PAGE_SIZE_FIXED
172 vm_size_t page_size = 4096;
173 vm_size_t page_mask = 4095;
174 int page_shift = 12;
175 #else
176 vm_size_t page_size = PAGE_SIZE;
177 vm_size_t page_mask = PAGE_MASK;
178 int page_shift = PAGE_SHIFT;
179 #endif /* PAGE_SIZE_FIXED */
180
181 /*
182 * Resident page structures are initialized from
183 * a template (see vm_page_alloc).
184 *
185 * When adding a new field to the virtual memory
186 * object structure, be sure to add initialization
187 * (see vm_page_bootstrap).
188 */
189 struct vm_page vm_page_template;
190
191 /*
192 * Resident pages that represent real memory
193 * are allocated from a free list.
194 */
195 vm_page_t vm_page_queue_free;
196 vm_page_t vm_page_queue_fictitious;
197 decl_mutex_data(,vm_page_queue_free_lock)
198 unsigned int vm_page_free_wanted;
199 int vm_page_free_count;
200 int vm_page_fictitious_count;
201
202 unsigned int vm_page_free_count_minimum; /* debugging */
203
204 /*
205 * Occasionally, the virtual memory system uses
206 * resident page structures that do not refer to
207 * real pages, for example to leave a page with
208 * important state information in the VP table.
209 *
210 * These page structures are allocated the way
211 * most other kernel structures are.
212 */
213 zone_t vm_page_zone;
214 decl_mutex_data(,vm_page_alloc_lock)
215 unsigned int io_throttle_zero_fill;
216 decl_mutex_data(,vm_page_zero_fill_lock)
217
218 /*
219 * Fictitious pages don't have a physical address,
220 * but we must initialize phys_page to something.
221 * For debugging, this should be a strange value
222 * that the pmap module can recognize in assertions.
223 */
224 vm_offset_t vm_page_fictitious_addr = (vm_offset_t) -1;
225
226 /*
227 * Resident page structures are also chained on
228 * queues that are used by the page replacement
229 * system (pageout daemon). These queues are
230 * defined here, but are shared by the pageout
231 * module. The inactive queue is broken into
232 * inactive and zf for convenience as the
233 * pageout daemon often assignes a higher
234 * affinity to zf pages
235 */
236 queue_head_t vm_page_queue_active;
237 queue_head_t vm_page_queue_inactive;
238 queue_head_t vm_page_queue_zf;
239 decl_mutex_data(,vm_page_queue_lock)
240 int vm_page_active_count;
241 int vm_page_inactive_count;
242 int vm_page_wire_count;
243 int vm_page_gobble_count = 0;
244 int vm_page_wire_count_warning = 0;
245 int vm_page_gobble_count_warning = 0;
246
247 /* the following fields are protected by the vm_page_queue_lock */
248 queue_head_t vm_page_queue_limbo;
249 int vm_page_limbo_count = 0; /* total pages in limbo */
250 int vm_page_limbo_real_count = 0; /* real pages in limbo */
251 int vm_page_pin_count = 0; /* number of pinned pages */
252
253 decl_simple_lock_data(,vm_page_preppin_lock)
254
255 /*
256 * Several page replacement parameters are also
257 * shared with this module, so that page allocation
258 * (done here in vm_page_alloc) can trigger the
259 * pageout daemon.
260 */
261 int vm_page_free_target = 0;
262 int vm_page_free_min = 0;
263 int vm_page_inactive_target = 0;
264 int vm_page_free_reserved = 0;
265 int vm_page_laundry_count = 0;
266 int vm_page_burst_count = 0;
267 int vm_page_throttled_count = 0;
268
269 /*
270 * The VM system has a couple of heuristics for deciding
271 * that pages are "uninteresting" and should be placed
272 * on the inactive queue as likely candidates for replacement.
273 * These variables let the heuristics be controlled at run-time
274 * to make experimentation easier.
275 */
276
277 boolean_t vm_page_deactivate_hint = TRUE;
278
279 /*
280 * vm_set_page_size:
281 *
282 * Sets the page size, perhaps based upon the memory
283 * size. Must be called before any use of page-size
284 * dependent functions.
285 *
286 * Sets page_shift and page_mask from page_size.
287 */
288 void
289 vm_set_page_size(void)
290 {
291 #ifndef PAGE_SIZE_FIXED
292 page_mask = page_size - 1;
293
294 if ((page_mask & page_size) != 0)
295 panic("vm_set_page_size: page size not a power of two");
296
297 for (page_shift = 0; ; page_shift++)
298 if ((1 << page_shift) == page_size)
299 break;
300 #endif /* PAGE_SIZE_FIXED */
301 }
302
303 /*
304 * vm_page_bootstrap:
305 *
306 * Initializes the resident memory module.
307 *
308 * Allocates memory for the page cells, and
309 * for the object/offset-to-page hash table headers.
310 * Each page cell is initialized and placed on the free list.
311 * Returns the range of available kernel virtual memory.
312 */
313
314 void
315 vm_page_bootstrap(
316 vm_offset_t *startp,
317 vm_offset_t *endp)
318 {
319 register vm_page_t m;
320 int i;
321 unsigned int log1;
322 unsigned int log2;
323 unsigned int size;
324
325 /*
326 * Initialize the vm_page template.
327 */
328
329 m = &vm_page_template;
330 m->object = VM_OBJECT_NULL; /* reset later */
331 m->offset = 0; /* reset later */
332 m->wire_count = 0;
333
334 m->inactive = FALSE;
335 m->active = FALSE;
336 m->laundry = FALSE;
337 m->free = FALSE;
338 m->no_isync = TRUE;
339 m->reference = FALSE;
340 m->pageout = FALSE;
341 m->dump_cleaning = FALSE;
342 m->list_req_pending = FALSE;
343
344 m->busy = TRUE;
345 m->wanted = FALSE;
346 m->tabled = FALSE;
347 m->fictitious = FALSE;
348 m->private = FALSE;
349 m->absent = FALSE;
350 m->error = FALSE;
351 m->dirty = FALSE;
352 m->cleaning = FALSE;
353 m->precious = FALSE;
354 m->clustered = FALSE;
355 m->lock_supplied = FALSE;
356 m->unusual = FALSE;
357 m->restart = FALSE;
358 m->zero_fill = FALSE;
359
360 m->phys_page = 0; /* reset later */
361
362 m->page_lock = VM_PROT_NONE;
363 m->unlock_request = VM_PROT_NONE;
364 m->page_error = KERN_SUCCESS;
365
366 /*
367 * Initialize the page queues.
368 */
369
370 mutex_init(&vm_page_queue_free_lock, ETAP_VM_PAGEQ_FREE);
371 mutex_init(&vm_page_queue_lock, ETAP_VM_PAGEQ);
372 simple_lock_init(&vm_page_preppin_lock, ETAP_VM_PREPPIN);
373
374 vm_page_queue_free = VM_PAGE_NULL;
375 vm_page_queue_fictitious = VM_PAGE_NULL;
376 queue_init(&vm_page_queue_active);
377 queue_init(&vm_page_queue_inactive);
378 queue_init(&vm_page_queue_zf);
379 queue_init(&vm_page_queue_limbo);
380
381 vm_page_free_wanted = 0;
382
383 /*
384 * Steal memory for the map and zone subsystems.
385 */
386
387 vm_map_steal_memory();
388 zone_steal_memory();
389
390 /*
391 * Allocate (and initialize) the virtual-to-physical
392 * table hash buckets.
393 *
394 * The number of buckets should be a power of two to
395 * get a good hash function. The following computation
396 * chooses the first power of two that is greater
397 * than the number of physical pages in the system.
398 */
399
400 simple_lock_init(&vm_page_bucket_lock, ETAP_VM_BUCKET);
401
402 if (vm_page_bucket_count == 0) {
403 unsigned int npages = pmap_free_pages();
404
405 vm_page_bucket_count = 1;
406 while (vm_page_bucket_count < npages)
407 vm_page_bucket_count <<= 1;
408 }
409
410 vm_page_hash_mask = vm_page_bucket_count - 1;
411
412 /*
413 * Calculate object shift value for hashing algorithm:
414 * O = log2(sizeof(struct vm_object))
415 * B = log2(vm_page_bucket_count)
416 * hash shifts the object left by
417 * B/2 - O
418 */
419 size = vm_page_bucket_count;
420 for (log1 = 0; size > 1; log1++)
421 size /= 2;
422 size = sizeof(struct vm_object);
423 for (log2 = 0; size > 1; log2++)
424 size /= 2;
425 vm_page_hash_shift = log1/2 - log2 + 1;
426
427 vm_page_bucket_hash = 1 << ((log1 + 1) >> 1); /* Get (ceiling of sqrt of table size) */
428 vm_page_bucket_hash |= 1 << ((log1 + 1) >> 2); /* Get (ceiling of quadroot of table size) */
429 vm_page_bucket_hash |= 1; /* Set bit and add 1 - always must be 1 to insure unique series */
430
431 if (vm_page_hash_mask & vm_page_bucket_count)
432 printf("vm_page_bootstrap: WARNING -- strange page hash\n");
433
434 vm_page_buckets = (vm_page_bucket_t *)
435 pmap_steal_memory(vm_page_bucket_count *
436 sizeof(vm_page_bucket_t));
437
438 for (i = 0; i < vm_page_bucket_count; i++) {
439 register vm_page_bucket_t *bucket = &vm_page_buckets[i];
440
441 bucket->pages = VM_PAGE_NULL;
442 #if MACH_PAGE_HASH_STATS
443 bucket->cur_count = 0;
444 bucket->hi_count = 0;
445 #endif /* MACH_PAGE_HASH_STATS */
446 }
447
448 /*
449 * Machine-dependent code allocates the resident page table.
450 * It uses vm_page_init to initialize the page frames.
451 * The code also returns to us the virtual space available
452 * to the kernel. We don't trust the pmap module
453 * to get the alignment right.
454 */
455
456 pmap_startup(&virtual_space_start, &virtual_space_end);
457 virtual_space_start = round_page_32(virtual_space_start);
458 virtual_space_end = trunc_page_32(virtual_space_end);
459
460 *startp = virtual_space_start;
461 *endp = virtual_space_end;
462
463 /*
464 * Compute the initial "wire" count.
465 * Up until now, the pages which have been set aside are not under
466 * the VM system's control, so although they aren't explicitly
467 * wired, they nonetheless can't be moved. At this moment,
468 * all VM managed pages are "free", courtesy of pmap_startup.
469 */
470 vm_page_wire_count = atop_64(max_mem) - vm_page_free_count; /* initial value */
471
472 printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count);
473 vm_page_free_count_minimum = vm_page_free_count;
474 }
475
476 #ifndef MACHINE_PAGES
477 /*
478 * We implement pmap_steal_memory and pmap_startup with the help
479 * of two simpler functions, pmap_virtual_space and pmap_next_page.
480 */
481
482 vm_offset_t
483 pmap_steal_memory(
484 vm_size_t size)
485 {
486 vm_offset_t addr, vaddr;
487 ppnum_t phys_page;
488
489 /*
490 * We round the size to a round multiple.
491 */
492
493 size = (size + sizeof (void *) - 1) &~ (sizeof (void *) - 1);
494
495 /*
496 * If this is the first call to pmap_steal_memory,
497 * we have to initialize ourself.
498 */
499
500 if (virtual_space_start == virtual_space_end) {
501 pmap_virtual_space(&virtual_space_start, &virtual_space_end);
502
503 /*
504 * The initial values must be aligned properly, and
505 * we don't trust the pmap module to do it right.
506 */
507
508 virtual_space_start = round_page_32(virtual_space_start);
509 virtual_space_end = trunc_page_32(virtual_space_end);
510 }
511
512 /*
513 * Allocate virtual memory for this request.
514 */
515
516 addr = virtual_space_start;
517 virtual_space_start += size;
518
519 kprintf("pmap_steal_memory: %08X - %08X; size=%08X\n", addr, virtual_space_start, size); /* (TEST/DEBUG) */
520
521 /*
522 * Allocate and map physical pages to back new virtual pages.
523 */
524
525 for (vaddr = round_page_32(addr);
526 vaddr < addr + size;
527 vaddr += PAGE_SIZE) {
528 if (!pmap_next_page(&phys_page))
529 panic("pmap_steal_memory");
530
531 /*
532 * XXX Logically, these mappings should be wired,
533 * but some pmap modules barf if they are.
534 */
535
536 pmap_enter(kernel_pmap, vaddr, phys_page,
537 VM_PROT_READ|VM_PROT_WRITE,
538 VM_WIMG_USE_DEFAULT, FALSE);
539 /*
540 * Account for newly stolen memory
541 */
542 vm_page_wire_count++;
543
544 }
545
546 return addr;
547 }
548
549 void
550 pmap_startup(
551 vm_offset_t *startp,
552 vm_offset_t *endp)
553 {
554 unsigned int i, npages, pages_initialized, fill, fillval;
555 vm_page_t pages;
556 ppnum_t phys_page;
557 addr64_t tmpaddr;
558
559 /*
560 * We calculate how many page frames we will have
561 * and then allocate the page structures in one chunk.
562 */
563
564 tmpaddr = (addr64_t)pmap_free_pages() * (addr64_t)PAGE_SIZE; /* Get the amount of memory left */
565 tmpaddr = tmpaddr + (addr64_t)(round_page_32(virtual_space_start) - virtual_space_start); /* Account for any slop */
566 npages = (unsigned int)(tmpaddr / (addr64_t)(PAGE_SIZE + sizeof(*pages))); /* Figure size of all vm_page_ts, including enough to hold the vm_page_ts */
567
568 pages = (vm_page_t) pmap_steal_memory(npages * sizeof *pages);
569
570 /*
571 * Initialize the page frames.
572 */
573
574 for (i = 0, pages_initialized = 0; i < npages; i++) {
575 if (!pmap_next_page(&phys_page))
576 break;
577
578 vm_page_init(&pages[i], phys_page);
579 vm_page_pages++;
580 pages_initialized++;
581 }
582
583 /*
584 * Release pages in reverse order so that physical pages
585 * initially get allocated in ascending addresses. This keeps
586 * the devices (which must address physical memory) happy if
587 * they require several consecutive pages.
588 */
589
590 /*
591 * Check if we want to initialize pages to a known value
592 */
593
594 fill = 0; /* Assume no fill */
595 if (PE_parse_boot_arg("fill", &fillval)) fill = 1; /* Set fill */
596
597 for (i = pages_initialized; i > 0; i--) {
598 extern void fillPage(ppnum_t phys_page, unsigned int fillval);
599 if(fill) fillPage(pages[i - 1].phys_page, fillval); /* Fill the page with a know value if requested at boot */
600 vm_page_release(&pages[i - 1]);
601 }
602
603 #if 0
604 {
605 vm_page_t xx, xxo, xxl;
606 int j, k, l;
607
608 j = 0; /* (BRINGUP) */
609 xxl = 0;
610
611 for(xx = vm_page_queue_free; xx; xxl = xx, xx = xx->pageq.next) { /* (BRINGUP) */
612 j++; /* (BRINGUP) */
613 if(j > vm_page_free_count) { /* (BRINGUP) */
614 panic("pmap_startup: too many pages, xx = %08X, xxl = %08X\n", xx, xxl);
615 }
616
617 l = vm_page_free_count - j; /* (BRINGUP) */
618 k = 0; /* (BRINGUP) */
619
620 if(((j - 1) & 0xFFFF) == 0) kprintf("checking number %d of %d\n", j, vm_page_free_count);
621
622 for(xxo = xx->pageq.next; xxo; xxo = xxo->pageq.next) { /* (BRINGUP) */
623 k++;
624 if(k > l) panic("pmap_startup: too many in secondary check %d %d\n", k, l);
625 if((xx->phys_page & 0xFFFFFFFF) == (xxo->phys_page & 0xFFFFFFFF)) { /* (BRINGUP) */
626 panic("pmap_startup: duplicate physaddr, xx = %08X, xxo = %08X\n", xx, xxo);
627 }
628 }
629 }
630
631 if(j != vm_page_free_count) { /* (BRINGUP) */
632 panic("pmap_startup: vm_page_free_count does not match, calc = %d, vm_page_free_count = %08X\n", j, vm_page_free_count);
633 }
634 }
635 #endif
636
637
638 /*
639 * We have to re-align virtual_space_start,
640 * because pmap_steal_memory has been using it.
641 */
642
643 virtual_space_start = round_page_32(virtual_space_start);
644
645 *startp = virtual_space_start;
646 *endp = virtual_space_end;
647 }
648 #endif /* MACHINE_PAGES */
649
650 /*
651 * Routine: vm_page_module_init
652 * Purpose:
653 * Second initialization pass, to be done after
654 * the basic VM system is ready.
655 */
656 void
657 vm_page_module_init(void)
658 {
659 vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page),
660 0, PAGE_SIZE, "vm pages");
661
662 #if ZONE_DEBUG
663 zone_debug_disable(vm_page_zone);
664 #endif /* ZONE_DEBUG */
665
666 zone_change(vm_page_zone, Z_EXPAND, FALSE);
667 zone_change(vm_page_zone, Z_EXHAUST, TRUE);
668 zone_change(vm_page_zone, Z_FOREIGN, TRUE);
669
670 /*
671 * Adjust zone statistics to account for the real pages allocated
672 * in vm_page_create(). [Q: is this really what we want?]
673 */
674 vm_page_zone->count += vm_page_pages;
675 vm_page_zone->cur_size += vm_page_pages * vm_page_zone->elem_size;
676
677 mutex_init(&vm_page_alloc_lock, ETAP_VM_PAGE_ALLOC);
678 mutex_init(&vm_page_zero_fill_lock, ETAP_VM_PAGE_ALLOC);
679 }
680
681 /*
682 * Routine: vm_page_create
683 * Purpose:
684 * After the VM system is up, machine-dependent code
685 * may stumble across more physical memory. For example,
686 * memory that it was reserving for a frame buffer.
687 * vm_page_create turns this memory into available pages.
688 */
689
690 void
691 vm_page_create(
692 ppnum_t start,
693 ppnum_t end)
694 {
695 ppnum_t phys_page;
696 vm_page_t m;
697
698 for (phys_page = start;
699 phys_page < end;
700 phys_page++) {
701 while ((m = (vm_page_t) vm_page_grab_fictitious())
702 == VM_PAGE_NULL)
703 vm_page_more_fictitious();
704
705 vm_page_init(m, phys_page);
706 vm_page_pages++;
707 vm_page_release(m);
708 }
709 }
710
711 /*
712 * vm_page_hash:
713 *
714 * Distributes the object/offset key pair among hash buckets.
715 *
716 * NOTE: The bucket count must be a power of 2
717 */
718 #define vm_page_hash(object, offset) (\
719 ( (natural_t)((uint32_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
720 & vm_page_hash_mask)
721
722 /*
723 * vm_page_insert: [ internal use only ]
724 *
725 * Inserts the given mem entry into the object/object-page
726 * table and object list.
727 *
728 * The object must be locked.
729 */
730
731 void
732 vm_page_insert(
733 register vm_page_t mem,
734 register vm_object_t object,
735 register vm_object_offset_t offset)
736 {
737 register vm_page_bucket_t *bucket;
738
739 XPR(XPR_VM_PAGE,
740 "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n",
741 (integer_t)object, (integer_t)offset, (integer_t)mem, 0,0);
742
743 VM_PAGE_CHECK(mem);
744
745 if (mem->tabled)
746 panic("vm_page_insert");
747
748 assert(!object->internal || offset < object->size);
749
750 /* only insert "pageout" pages into "pageout" objects,
751 * and normal pages into normal objects */
752 assert(object->pageout == mem->pageout);
753
754 /*
755 * Record the object/offset pair in this page
756 */
757
758 mem->object = object;
759 mem->offset = offset;
760
761 /*
762 * Insert it into the object_object/offset hash table
763 */
764
765 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
766 simple_lock(&vm_page_bucket_lock);
767 mem->next = bucket->pages;
768 bucket->pages = mem;
769 #if MACH_PAGE_HASH_STATS
770 if (++bucket->cur_count > bucket->hi_count)
771 bucket->hi_count = bucket->cur_count;
772 #endif /* MACH_PAGE_HASH_STATS */
773 simple_unlock(&vm_page_bucket_lock);
774
775 /*
776 * Now link into the object's list of backed pages.
777 */
778
779 queue_enter(&object->memq, mem, vm_page_t, listq);
780 mem->tabled = TRUE;
781
782 /*
783 * Show that the object has one more resident page.
784 */
785
786 object->resident_page_count++;
787 }
788
789 /*
790 * vm_page_replace:
791 *
792 * Exactly like vm_page_insert, except that we first
793 * remove any existing page at the given offset in object.
794 *
795 * The object and page queues must be locked.
796 */
797
798 void
799 vm_page_replace(
800 register vm_page_t mem,
801 register vm_object_t object,
802 register vm_object_offset_t offset)
803 {
804 register vm_page_bucket_t *bucket;
805
806 VM_PAGE_CHECK(mem);
807
808 if (mem->tabled)
809 panic("vm_page_replace");
810
811 /*
812 * Record the object/offset pair in this page
813 */
814
815 mem->object = object;
816 mem->offset = offset;
817
818 /*
819 * Insert it into the object_object/offset hash table,
820 * replacing any page that might have been there.
821 */
822
823 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
824 simple_lock(&vm_page_bucket_lock);
825 if (bucket->pages) {
826 vm_page_t *mp = &bucket->pages;
827 register vm_page_t m = *mp;
828 do {
829 if (m->object == object && m->offset == offset) {
830 /*
831 * Remove page from bucket and from object,
832 * and return it to the free list.
833 */
834 *mp = m->next;
835 queue_remove(&object->memq, m, vm_page_t,
836 listq);
837 m->tabled = FALSE;
838 object->resident_page_count--;
839
840 /*
841 * Return page to the free list.
842 * Note the page is not tabled now, so this
843 * won't self-deadlock on the bucket lock.
844 */
845
846 vm_page_free(m);
847 break;
848 }
849 mp = &m->next;
850 } while (m = *mp);
851 mem->next = bucket->pages;
852 } else {
853 mem->next = VM_PAGE_NULL;
854 }
855 bucket->pages = mem;
856 simple_unlock(&vm_page_bucket_lock);
857
858 /*
859 * Now link into the object's list of backed pages.
860 */
861
862 queue_enter(&object->memq, mem, vm_page_t, listq);
863 mem->tabled = TRUE;
864
865 /*
866 * And show that the object has one more resident
867 * page.
868 */
869
870 object->resident_page_count++;
871 }
872
873 /*
874 * vm_page_remove: [ internal use only ]
875 *
876 * Removes the given mem entry from the object/offset-page
877 * table and the object page list.
878 *
879 * The object and page must be locked.
880 */
881
882 void
883 vm_page_remove(
884 register vm_page_t mem)
885 {
886 register vm_page_bucket_t *bucket;
887 register vm_page_t this;
888
889 XPR(XPR_VM_PAGE,
890 "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n",
891 (integer_t)mem->object, (integer_t)mem->offset,
892 (integer_t)mem, 0,0);
893
894 assert(mem->tabled);
895 assert(!mem->cleaning);
896 VM_PAGE_CHECK(mem);
897
898 /*
899 * Remove from the object_object/offset hash table
900 */
901
902 bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
903 simple_lock(&vm_page_bucket_lock);
904 if ((this = bucket->pages) == mem) {
905 /* optimize for common case */
906
907 bucket->pages = mem->next;
908 } else {
909 register vm_page_t *prev;
910
911 for (prev = &this->next;
912 (this = *prev) != mem;
913 prev = &this->next)
914 continue;
915 *prev = this->next;
916 }
917 #if MACH_PAGE_HASH_STATS
918 bucket->cur_count--;
919 #endif /* MACH_PAGE_HASH_STATS */
920 simple_unlock(&vm_page_bucket_lock);
921
922 /*
923 * Now remove from the object's list of backed pages.
924 */
925
926 queue_remove(&mem->object->memq, mem, vm_page_t, listq);
927
928 /*
929 * And show that the object has one fewer resident
930 * page.
931 */
932
933 mem->object->resident_page_count--;
934
935 mem->tabled = FALSE;
936 mem->object = VM_OBJECT_NULL;
937 mem->offset = 0;
938 }
939
940 /*
941 * vm_page_lookup:
942 *
943 * Returns the page associated with the object/offset
944 * pair specified; if none is found, VM_PAGE_NULL is returned.
945 *
946 * The object must be locked. No side effects.
947 */
948
949 vm_page_t
950 vm_page_lookup(
951 register vm_object_t object,
952 register vm_object_offset_t offset)
953 {
954 register vm_page_t mem;
955 register vm_page_bucket_t *bucket;
956
957 /*
958 * Search the hash table for this object/offset pair
959 */
960
961 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
962
963 simple_lock(&vm_page_bucket_lock);
964 for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
965 VM_PAGE_CHECK(mem);
966 if ((mem->object == object) && (mem->offset == offset))
967 break;
968 }
969 simple_unlock(&vm_page_bucket_lock);
970
971 return(mem);
972 }
973
974 /*
975 * vm_page_rename:
976 *
977 * Move the given memory entry from its
978 * current object to the specified target object/offset.
979 *
980 * The object must be locked.
981 */
982 void
983 vm_page_rename(
984 register vm_page_t mem,
985 register vm_object_t new_object,
986 vm_object_offset_t new_offset)
987 {
988 assert(mem->object != new_object);
989 /*
990 * Changes to mem->object require the page lock because
991 * the pageout daemon uses that lock to get the object.
992 */
993
994 XPR(XPR_VM_PAGE,
995 "vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n",
996 (integer_t)new_object, (integer_t)new_offset,
997 (integer_t)mem, 0,0);
998
999 vm_page_lock_queues();
1000 vm_page_remove(mem);
1001 vm_page_insert(mem, new_object, new_offset);
1002 vm_page_unlock_queues();
1003 }
1004
1005 /*
1006 * vm_page_init:
1007 *
1008 * Initialize the fields in a new page.
1009 * This takes a structure with random values and initializes it
1010 * so that it can be given to vm_page_release or vm_page_insert.
1011 */
1012 void
1013 vm_page_init(
1014 vm_page_t mem,
1015 ppnum_t phys_page)
1016 {
1017 *mem = vm_page_template;
1018 mem->phys_page = phys_page;
1019 }
1020
1021 /*
1022 * vm_page_grab_fictitious:
1023 *
1024 * Remove a fictitious page from the free list.
1025 * Returns VM_PAGE_NULL if there are no free pages.
1026 */
1027 int c_vm_page_grab_fictitious = 0;
1028 int c_vm_page_release_fictitious = 0;
1029 int c_vm_page_more_fictitious = 0;
1030
1031 vm_page_t
1032 vm_page_grab_fictitious(void)
1033 {
1034 register vm_page_t m;
1035
1036 m = (vm_page_t)zget(vm_page_zone);
1037 if (m) {
1038 vm_page_init(m, vm_page_fictitious_addr);
1039 m->fictitious = TRUE;
1040 }
1041
1042 c_vm_page_grab_fictitious++;
1043 return m;
1044 }
1045
1046 /*
1047 * vm_page_release_fictitious:
1048 *
1049 * Release a fictitious page to the free list.
1050 */
1051
1052 void
1053 vm_page_release_fictitious(
1054 register vm_page_t m)
1055 {
1056 assert(!m->free);
1057 assert(m->busy);
1058 assert(m->fictitious);
1059 assert(m->phys_page == vm_page_fictitious_addr);
1060
1061 c_vm_page_release_fictitious++;
1062
1063 if (m->free)
1064 panic("vm_page_release_fictitious");
1065 m->free = TRUE;
1066 zfree(vm_page_zone, (vm_offset_t)m);
1067 }
1068
1069 /*
1070 * vm_page_more_fictitious:
1071 *
1072 * Add more fictitious pages to the free list.
1073 * Allowed to block. This routine is way intimate
1074 * with the zones code, for several reasons:
1075 * 1. we need to carve some page structures out of physical
1076 * memory before zones work, so they _cannot_ come from
1077 * the zone_map.
1078 * 2. the zone needs to be collectable in order to prevent
1079 * growth without bound. These structures are used by
1080 * the device pager (by the hundreds and thousands), as
1081 * private pages for pageout, and as blocking pages for
1082 * pagein. Temporary bursts in demand should not result in
1083 * permanent allocation of a resource.
1084 * 3. To smooth allocation humps, we allocate single pages
1085 * with kernel_memory_allocate(), and cram them into the
1086 * zone. This also allows us to initialize the vm_page_t's
1087 * on the way into the zone, so that zget() always returns
1088 * an initialized structure. The zone free element pointer
1089 * and the free page pointer are both the first item in the
1090 * vm_page_t.
1091 * 4. By having the pages in the zone pre-initialized, we need
1092 * not keep 2 levels of lists. The garbage collector simply
1093 * scans our list, and reduces physical memory usage as it
1094 * sees fit.
1095 */
1096
1097 void vm_page_more_fictitious(void)
1098 {
1099 extern vm_map_t zone_map;
1100 register vm_page_t m;
1101 vm_offset_t addr;
1102 kern_return_t retval;
1103 int i;
1104
1105 c_vm_page_more_fictitious++;
1106
1107 /*
1108 * Allocate a single page from the zone_map. Do not wait if no physical
1109 * pages are immediately available, and do not zero the space. We need
1110 * our own blocking lock here to prevent having multiple,
1111 * simultaneous requests from piling up on the zone_map lock. Exactly
1112 * one (of our) threads should be potentially waiting on the map lock.
1113 * If winner is not vm-privileged, then the page allocation will fail,
1114 * and it will temporarily block here in the vm_page_wait().
1115 */
1116 mutex_lock(&vm_page_alloc_lock);
1117 /*
1118 * If another thread allocated space, just bail out now.
1119 */
1120 if (zone_free_count(vm_page_zone) > 5) {
1121 /*
1122 * The number "5" is a small number that is larger than the
1123 * number of fictitious pages that any single caller will
1124 * attempt to allocate. Otherwise, a thread will attempt to
1125 * acquire a fictitious page (vm_page_grab_fictitious), fail,
1126 * release all of the resources and locks already acquired,
1127 * and then call this routine. This routine finds the pages
1128 * that the caller released, so fails to allocate new space.
1129 * The process repeats infinitely. The largest known number
1130 * of fictitious pages required in this manner is 2. 5 is
1131 * simply a somewhat larger number.
1132 */
1133 mutex_unlock(&vm_page_alloc_lock);
1134 return;
1135 }
1136
1137 if ((retval = kernel_memory_allocate(zone_map,
1138 &addr, PAGE_SIZE, VM_PROT_ALL,
1139 KMA_KOBJECT|KMA_NOPAGEWAIT)) != KERN_SUCCESS) {
1140 /*
1141 * No page was available. Tell the pageout daemon, drop the
1142 * lock to give another thread a chance at it, and
1143 * wait for the pageout daemon to make progress.
1144 */
1145 mutex_unlock(&vm_page_alloc_lock);
1146 vm_page_wait(THREAD_UNINT);
1147 return;
1148 }
1149 /*
1150 * Initialize as many vm_page_t's as will fit on this page. This
1151 * depends on the zone code disturbing ONLY the first item of
1152 * each zone element.
1153 */
1154 m = (vm_page_t)addr;
1155 for (i = PAGE_SIZE/sizeof(struct vm_page); i > 0; i--) {
1156 vm_page_init(m, vm_page_fictitious_addr);
1157 m->fictitious = TRUE;
1158 m++;
1159 }
1160 zcram(vm_page_zone, addr, PAGE_SIZE);
1161 mutex_unlock(&vm_page_alloc_lock);
1162 }
1163
1164 /*
1165 * vm_page_convert:
1166 *
1167 * Attempt to convert a fictitious page into a real page.
1168 */
1169
1170 boolean_t
1171 vm_page_convert(
1172 register vm_page_t m)
1173 {
1174 register vm_page_t real_m;
1175
1176 assert(m->busy);
1177 assert(m->fictitious);
1178 assert(!m->dirty);
1179
1180 real_m = vm_page_grab();
1181 if (real_m == VM_PAGE_NULL)
1182 return FALSE;
1183
1184 m->phys_page = real_m->phys_page;
1185 m->fictitious = FALSE;
1186 m->no_isync = TRUE;
1187
1188 vm_page_lock_queues();
1189 if (m->active)
1190 vm_page_active_count++;
1191 else if (m->inactive)
1192 vm_page_inactive_count++;
1193 vm_page_unlock_queues();
1194
1195 real_m->phys_page = vm_page_fictitious_addr;
1196 real_m->fictitious = TRUE;
1197
1198 vm_page_release_fictitious(real_m);
1199 return TRUE;
1200 }
1201
1202 /*
1203 * vm_pool_low():
1204 *
1205 * Return true if it is not likely that a non-vm_privileged thread
1206 * can get memory without blocking. Advisory only, since the
1207 * situation may change under us.
1208 */
1209 int
1210 vm_pool_low(void)
1211 {
1212 /* No locking, at worst we will fib. */
1213 return( vm_page_free_count < vm_page_free_reserved );
1214 }
1215
1216 /*
1217 * vm_page_grab:
1218 *
1219 * Remove a page from the free list.
1220 * Returns VM_PAGE_NULL if the free list is too small.
1221 */
1222
1223 unsigned long vm_page_grab_count = 0; /* measure demand */
1224
1225 vm_page_t
1226 vm_page_grab(void)
1227 {
1228 register vm_page_t mem;
1229
1230 mutex_lock(&vm_page_queue_free_lock);
1231 vm_page_grab_count++;
1232
1233 /*
1234 * Optionally produce warnings if the wire or gobble
1235 * counts exceed some threshold.
1236 */
1237 if (vm_page_wire_count_warning > 0
1238 && vm_page_wire_count >= vm_page_wire_count_warning) {
1239 printf("mk: vm_page_grab(): high wired page count of %d\n",
1240 vm_page_wire_count);
1241 assert(vm_page_wire_count < vm_page_wire_count_warning);
1242 }
1243 if (vm_page_gobble_count_warning > 0
1244 && vm_page_gobble_count >= vm_page_gobble_count_warning) {
1245 printf("mk: vm_page_grab(): high gobbled page count of %d\n",
1246 vm_page_gobble_count);
1247 assert(vm_page_gobble_count < vm_page_gobble_count_warning);
1248 }
1249
1250 /*
1251 * Only let privileged threads (involved in pageout)
1252 * dip into the reserved pool.
1253 */
1254
1255 if ((vm_page_free_count < vm_page_free_reserved) &&
1256 !current_thread()->vm_privilege) {
1257 mutex_unlock(&vm_page_queue_free_lock);
1258 mem = VM_PAGE_NULL;
1259 goto wakeup_pageout;
1260 }
1261
1262 while (vm_page_queue_free == VM_PAGE_NULL) {
1263 printf("vm_page_grab: no free pages, trouble expected...\n");
1264 mutex_unlock(&vm_page_queue_free_lock);
1265 VM_PAGE_WAIT();
1266 mutex_lock(&vm_page_queue_free_lock);
1267 }
1268
1269 if (--vm_page_free_count < vm_page_free_count_minimum)
1270 vm_page_free_count_minimum = vm_page_free_count;
1271 mem = vm_page_queue_free;
1272 vm_page_queue_free = (vm_page_t) mem->pageq.next;
1273 mem->free = FALSE;
1274 mem->no_isync = TRUE;
1275 mutex_unlock(&vm_page_queue_free_lock);
1276
1277 /*
1278 * Decide if we should poke the pageout daemon.
1279 * We do this if the free count is less than the low
1280 * water mark, or if the free count is less than the high
1281 * water mark (but above the low water mark) and the inactive
1282 * count is less than its target.
1283 *
1284 * We don't have the counts locked ... if they change a little,
1285 * it doesn't really matter.
1286 */
1287
1288 wakeup_pageout:
1289 if ((vm_page_free_count < vm_page_free_min) ||
1290 ((vm_page_free_count < vm_page_free_target) &&
1291 (vm_page_inactive_count < vm_page_inactive_target)))
1292 thread_wakeup((event_t) &vm_page_free_wanted);
1293
1294 // dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */
1295
1296 return mem;
1297 }
1298
1299 /*
1300 * vm_page_release:
1301 *
1302 * Return a page to the free list.
1303 */
1304
1305 void
1306 vm_page_release(
1307 register vm_page_t mem)
1308 {
1309
1310 #if 0
1311 unsigned int pindex;
1312 phys_entry *physent;
1313
1314 physent = mapping_phys_lookup(mem->phys_page, &pindex); /* (BRINGUP) */
1315 if(physent->ppLink & ppN) { /* (BRINGUP) */
1316 panic("vm_page_release: already released - %08X %08X\n", mem, mem->phys_page);
1317 }
1318 physent->ppLink = physent->ppLink | ppN; /* (BRINGUP) */
1319 #endif
1320
1321 assert(!mem->private && !mem->fictitious);
1322
1323 // dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */
1324
1325 mutex_lock(&vm_page_queue_free_lock);
1326 if (mem->free)
1327 panic("vm_page_release");
1328 mem->free = TRUE;
1329 mem->pageq.next = (queue_entry_t) vm_page_queue_free;
1330 vm_page_queue_free = mem;
1331 vm_page_free_count++;
1332
1333 /*
1334 * Check if we should wake up someone waiting for page.
1335 * But don't bother waking them unless they can allocate.
1336 *
1337 * We wakeup only one thread, to prevent starvation.
1338 * Because the scheduling system handles wait queues FIFO,
1339 * if we wakeup all waiting threads, one greedy thread
1340 * can starve multiple niceguy threads. When the threads
1341 * all wakeup, the greedy threads runs first, grabs the page,
1342 * and waits for another page. It will be the first to run
1343 * when the next page is freed.
1344 *
1345 * However, there is a slight danger here.
1346 * The thread we wake might not use the free page.
1347 * Then the other threads could wait indefinitely
1348 * while the page goes unused. To forestall this,
1349 * the pageout daemon will keep making free pages
1350 * as long as vm_page_free_wanted is non-zero.
1351 */
1352
1353 if ((vm_page_free_wanted > 0) &&
1354 (vm_page_free_count >= vm_page_free_reserved)) {
1355 vm_page_free_wanted--;
1356 thread_wakeup_one((event_t) &vm_page_free_count);
1357 }
1358
1359 mutex_unlock(&vm_page_queue_free_lock);
1360 }
1361
1362 #define VM_PAGEOUT_DEADLOCK_TIMEOUT 3
1363
1364 /*
1365 * vm_page_wait:
1366 *
1367 * Wait for a page to become available.
1368 * If there are plenty of free pages, then we don't sleep.
1369 *
1370 * Returns:
1371 * TRUE: There may be another page, try again
1372 * FALSE: We were interrupted out of our wait, don't try again
1373 */
1374
1375 boolean_t
1376 vm_page_wait(
1377 int interruptible )
1378 {
1379 /*
1380 * We can't use vm_page_free_reserved to make this
1381 * determination. Consider: some thread might
1382 * need to allocate two pages. The first allocation
1383 * succeeds, the second fails. After the first page is freed,
1384 * a call to vm_page_wait must really block.
1385 */
1386 uint64_t abstime;
1387 kern_return_t wait_result;
1388 kern_return_t kr;
1389 int need_wakeup = 0;
1390
1391 mutex_lock(&vm_page_queue_free_lock);
1392 if (vm_page_free_count < vm_page_free_target) {
1393 if (vm_page_free_wanted++ == 0)
1394 need_wakeup = 1;
1395 wait_result = assert_wait((event_t)&vm_page_free_count,
1396 interruptible);
1397 mutex_unlock(&vm_page_queue_free_lock);
1398 counter(c_vm_page_wait_block++);
1399
1400 if (need_wakeup)
1401 thread_wakeup((event_t)&vm_page_free_wanted);
1402
1403 if (wait_result == THREAD_WAITING) {
1404 clock_interval_to_absolutetime_interval(
1405 VM_PAGEOUT_DEADLOCK_TIMEOUT,
1406 NSEC_PER_SEC, &abstime);
1407 clock_absolutetime_interval_to_deadline(
1408 abstime, &abstime);
1409 thread_set_timer_deadline(abstime);
1410 wait_result = thread_block(THREAD_CONTINUE_NULL);
1411
1412 if(wait_result == THREAD_TIMED_OUT) {
1413 kr = vm_pageout_emergency_availability_request();
1414 return TRUE;
1415 } else {
1416 thread_cancel_timer();
1417 }
1418 }
1419
1420 return(wait_result == THREAD_AWAKENED);
1421 } else {
1422 mutex_unlock(&vm_page_queue_free_lock);
1423 return TRUE;
1424 }
1425 }
1426
1427 /*
1428 * vm_page_alloc:
1429 *
1430 * Allocate and return a memory cell associated
1431 * with this VM object/offset pair.
1432 *
1433 * Object must be locked.
1434 */
1435
1436 vm_page_t
1437 vm_page_alloc(
1438 vm_object_t object,
1439 vm_object_offset_t offset)
1440 {
1441 register vm_page_t mem;
1442
1443 mem = vm_page_grab();
1444 if (mem == VM_PAGE_NULL)
1445 return VM_PAGE_NULL;
1446
1447 vm_page_insert(mem, object, offset);
1448
1449 return(mem);
1450 }
1451
1452 counter(unsigned int c_laundry_pages_freed = 0;)
1453
1454 int vm_pagein_cluster_unused = 0;
1455 boolean_t vm_page_free_verify = FALSE;
1456 /*
1457 * vm_page_free:
1458 *
1459 * Returns the given page to the free list,
1460 * disassociating it with any VM object.
1461 *
1462 * Object and page queues must be locked prior to entry.
1463 */
1464 void
1465 vm_page_free(
1466 register vm_page_t mem)
1467 {
1468 vm_object_t object = mem->object;
1469
1470 assert(!mem->free);
1471 assert(!mem->cleaning);
1472 assert(!mem->pageout);
1473 assert(!vm_page_free_verify || pmap_verify_free(mem->phys_page));
1474
1475 if (mem->tabled)
1476 vm_page_remove(mem); /* clears tabled, object, offset */
1477 VM_PAGE_QUEUES_REMOVE(mem); /* clears active or inactive */
1478
1479 if (mem->clustered) {
1480 mem->clustered = FALSE;
1481 vm_pagein_cluster_unused++;
1482 }
1483
1484 if (mem->wire_count) {
1485 if (!mem->private && !mem->fictitious)
1486 vm_page_wire_count--;
1487 mem->wire_count = 0;
1488 assert(!mem->gobbled);
1489 } else if (mem->gobbled) {
1490 if (!mem->private && !mem->fictitious)
1491 vm_page_wire_count--;
1492 vm_page_gobble_count--;
1493 }
1494 mem->gobbled = FALSE;
1495
1496 if (mem->laundry) {
1497 extern int vm_page_laundry_min;
1498 if (!object->internal)
1499 vm_page_burst_count--;
1500 vm_page_laundry_count--;
1501 mem->laundry = FALSE; /* laundry is now clear */
1502 counter(++c_laundry_pages_freed);
1503 if (vm_page_laundry_count < vm_page_laundry_min) {
1504 vm_page_laundry_min = 0;
1505 thread_wakeup((event_t) &vm_page_laundry_count);
1506 }
1507 }
1508
1509 mem->discard_request = FALSE;
1510
1511 PAGE_WAKEUP(mem); /* clears wanted */
1512
1513 if (mem->absent)
1514 vm_object_absent_release(object);
1515
1516 /* Some of these may be unnecessary */
1517 mem->page_lock = 0;
1518 mem->unlock_request = 0;
1519 mem->busy = TRUE;
1520 mem->absent = FALSE;
1521 mem->error = FALSE;
1522 mem->dirty = FALSE;
1523 mem->precious = FALSE;
1524 mem->reference = FALSE;
1525
1526 mem->page_error = KERN_SUCCESS;
1527
1528 if (mem->private) {
1529 mem->private = FALSE;
1530 mem->fictitious = TRUE;
1531 mem->phys_page = vm_page_fictitious_addr;
1532 }
1533 if (mem->fictitious) {
1534 vm_page_release_fictitious(mem);
1535 } else {
1536 /* depends on the queues lock */
1537 if(mem->zero_fill) {
1538 vm_zf_count-=1;
1539 mem->zero_fill = FALSE;
1540 }
1541 vm_page_init(mem, mem->phys_page);
1542 vm_page_release(mem);
1543 }
1544 }
1545
1546
1547 void
1548 vm_page_free_list(
1549 register vm_page_t mem)
1550 {
1551 register vm_page_t nxt;
1552 register vm_page_t first = NULL;
1553 register vm_page_t last;
1554 register int pg_count = 0;
1555
1556
1557 while (mem) {
1558 nxt = (vm_page_t)(mem->pageq.next);
1559
1560 if (mem->clustered)
1561 vm_pagein_cluster_unused++;
1562
1563 if (mem->laundry) {
1564 extern int vm_page_laundry_min;
1565
1566 if (!mem->object->internal)
1567 vm_page_burst_count--;
1568 vm_page_laundry_count--;
1569 counter(++c_laundry_pages_freed);
1570
1571 if (vm_page_laundry_count < vm_page_laundry_min) {
1572 vm_page_laundry_min = 0;
1573 thread_wakeup((event_t) &vm_page_laundry_count);
1574 }
1575 }
1576 mem->busy = TRUE;
1577
1578 PAGE_WAKEUP(mem); /* clears wanted */
1579
1580 if (mem->private)
1581 mem->fictitious = TRUE;
1582
1583 if (!mem->fictitious) {
1584 /* depends on the queues lock */
1585 if (mem->zero_fill)
1586 vm_zf_count -= 1;
1587 vm_page_init(mem, mem->phys_page);
1588
1589 mem->free = TRUE;
1590
1591 if (first == NULL)
1592 last = mem;
1593 mem->pageq.next = (queue_t) first;
1594 first = mem;
1595
1596 pg_count++;
1597 } else {
1598 mem->phys_page = vm_page_fictitious_addr;
1599 vm_page_release_fictitious(mem);
1600 }
1601 mem = nxt;
1602 }
1603 if (first) {
1604
1605 mutex_lock(&vm_page_queue_free_lock);
1606
1607 last->pageq.next = (queue_entry_t) vm_page_queue_free;
1608 vm_page_queue_free = first;
1609
1610 vm_page_free_count += pg_count;
1611
1612 if ((vm_page_free_wanted > 0) &&
1613 (vm_page_free_count >= vm_page_free_reserved)) {
1614 int available_pages;
1615
1616 available_pages = vm_page_free_count - vm_page_free_reserved;
1617
1618 if (available_pages >= vm_page_free_wanted) {
1619 vm_page_free_wanted = 0;
1620 thread_wakeup((event_t) &vm_page_free_count);
1621 } else {
1622 while (available_pages--) {
1623 vm_page_free_wanted--;
1624 thread_wakeup_one((event_t) &vm_page_free_count);
1625 }
1626 }
1627 }
1628 mutex_unlock(&vm_page_queue_free_lock);
1629 }
1630 }
1631
1632
1633 /*
1634 * vm_page_wire:
1635 *
1636 * Mark this page as wired down by yet
1637 * another map, removing it from paging queues
1638 * as necessary.
1639 *
1640 * The page's object and the page queues must be locked.
1641 */
1642 void
1643 vm_page_wire(
1644 register vm_page_t mem)
1645 {
1646
1647 // dbgLog(current_act(), mem->offset, mem->object, 1); /* (TEST/DEBUG) */
1648
1649 VM_PAGE_CHECK(mem);
1650
1651 if (mem->wire_count == 0) {
1652 VM_PAGE_QUEUES_REMOVE(mem);
1653 if (!mem->private && !mem->fictitious && !mem->gobbled)
1654 vm_page_wire_count++;
1655 if (mem->gobbled)
1656 vm_page_gobble_count--;
1657 mem->gobbled = FALSE;
1658 if(mem->zero_fill) {
1659 /* depends on the queues lock */
1660 vm_zf_count-=1;
1661 mem->zero_fill = FALSE;
1662 }
1663 }
1664 assert(!mem->gobbled);
1665 mem->wire_count++;
1666 }
1667
1668 /*
1669 * vm_page_gobble:
1670 *
1671 * Mark this page as consumed by the vm/ipc/xmm subsystems.
1672 *
1673 * Called only for freshly vm_page_grab()ed pages - w/ nothing locked.
1674 */
1675 void
1676 vm_page_gobble(
1677 register vm_page_t mem)
1678 {
1679 vm_page_lock_queues();
1680 VM_PAGE_CHECK(mem);
1681
1682 assert(!mem->gobbled);
1683 assert(mem->wire_count == 0);
1684
1685 if (!mem->gobbled && mem->wire_count == 0) {
1686 if (!mem->private && !mem->fictitious)
1687 vm_page_wire_count++;
1688 }
1689 vm_page_gobble_count++;
1690 mem->gobbled = TRUE;
1691 vm_page_unlock_queues();
1692 }
1693
1694 /*
1695 * vm_page_unwire:
1696 *
1697 * Release one wiring of this page, potentially
1698 * enabling it to be paged again.
1699 *
1700 * The page's object and the page queues must be locked.
1701 */
1702 void
1703 vm_page_unwire(
1704 register vm_page_t mem)
1705 {
1706
1707 // dbgLog(current_act(), mem->offset, mem->object, 0); /* (TEST/DEBUG) */
1708
1709 VM_PAGE_CHECK(mem);
1710 assert(mem->wire_count > 0);
1711
1712 if (--mem->wire_count == 0) {
1713 assert(!mem->private && !mem->fictitious);
1714 vm_page_wire_count--;
1715 queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
1716 vm_page_active_count++;
1717 mem->active = TRUE;
1718 mem->reference = TRUE;
1719 }
1720 }
1721
1722 /*
1723 * vm_page_deactivate:
1724 *
1725 * Returns the given page to the inactive list,
1726 * indicating that no physical maps have access
1727 * to this page. [Used by the physical mapping system.]
1728 *
1729 * The page queues must be locked.
1730 */
1731 void
1732 vm_page_deactivate(
1733 register vm_page_t m)
1734 {
1735 VM_PAGE_CHECK(m);
1736
1737 // dbgLog(m->phys_page, vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */
1738
1739 /*
1740 * This page is no longer very interesting. If it was
1741 * interesting (active or inactive/referenced), then we
1742 * clear the reference bit and (re)enter it in the
1743 * inactive queue. Note wired pages should not have
1744 * their reference bit cleared.
1745 */
1746 if (m->gobbled) { /* can this happen? */
1747 assert(m->wire_count == 0);
1748 if (!m->private && !m->fictitious)
1749 vm_page_wire_count--;
1750 vm_page_gobble_count--;
1751 m->gobbled = FALSE;
1752 }
1753 if (m->private || (m->wire_count != 0))
1754 return;
1755 if (m->active || (m->inactive && m->reference)) {
1756 if (!m->fictitious && !m->absent)
1757 pmap_clear_reference(m->phys_page);
1758 m->reference = FALSE;
1759 VM_PAGE_QUEUES_REMOVE(m);
1760 }
1761 if (m->wire_count == 0 && !m->inactive) {
1762 m->page_ticket = vm_page_ticket;
1763 vm_page_ticket_roll++;
1764
1765 if(vm_page_ticket_roll == VM_PAGE_TICKETS_IN_ROLL) {
1766 vm_page_ticket_roll = 0;
1767 if(vm_page_ticket == VM_PAGE_TICKET_ROLL_IDS)
1768 vm_page_ticket= 0;
1769 else
1770 vm_page_ticket++;
1771 }
1772
1773 if(m->zero_fill) {
1774 queue_enter(&vm_page_queue_zf, m, vm_page_t, pageq);
1775 } else {
1776 queue_enter(&vm_page_queue_inactive,
1777 m, vm_page_t, pageq);
1778 }
1779
1780 m->inactive = TRUE;
1781 if (!m->fictitious)
1782 vm_page_inactive_count++;
1783 }
1784 }
1785
1786 /*
1787 * vm_page_activate:
1788 *
1789 * Put the specified page on the active list (if appropriate).
1790 *
1791 * The page queues must be locked.
1792 */
1793
1794 void
1795 vm_page_activate(
1796 register vm_page_t m)
1797 {
1798 VM_PAGE_CHECK(m);
1799
1800 if (m->gobbled) {
1801 assert(m->wire_count == 0);
1802 if (!m->private && !m->fictitious)
1803 vm_page_wire_count--;
1804 vm_page_gobble_count--;
1805 m->gobbled = FALSE;
1806 }
1807 if (m->private)
1808 return;
1809
1810 if (m->inactive) {
1811 if (m->zero_fill) {
1812 queue_remove(&vm_page_queue_zf, m, vm_page_t, pageq);
1813 } else {
1814 queue_remove(&vm_page_queue_inactive,
1815 m, vm_page_t, pageq);
1816 }
1817 if (!m->fictitious)
1818 vm_page_inactive_count--;
1819 m->inactive = FALSE;
1820 }
1821 if (m->wire_count == 0) {
1822 if (m->active)
1823 panic("vm_page_activate: already active");
1824
1825 queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
1826 m->active = TRUE;
1827 m->reference = TRUE;
1828 if (!m->fictitious)
1829 vm_page_active_count++;
1830 }
1831 }
1832
1833 /*
1834 * vm_page_part_zero_fill:
1835 *
1836 * Zero-fill a part of the page.
1837 */
1838 void
1839 vm_page_part_zero_fill(
1840 vm_page_t m,
1841 vm_offset_t m_pa,
1842 vm_size_t len)
1843 {
1844 vm_page_t tmp;
1845
1846 VM_PAGE_CHECK(m);
1847 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
1848 pmap_zero_part_page(m->phys_page, m_pa, len);
1849 #else
1850 while (1) {
1851 tmp = vm_page_grab();
1852 if (tmp == VM_PAGE_NULL) {
1853 vm_page_wait(THREAD_UNINT);
1854 continue;
1855 }
1856 break;
1857 }
1858 vm_page_zero_fill(tmp);
1859 if(m_pa != 0) {
1860 vm_page_part_copy(m, 0, tmp, 0, m_pa);
1861 }
1862 if((m_pa + len) < PAGE_SIZE) {
1863 vm_page_part_copy(m, m_pa + len, tmp,
1864 m_pa + len, PAGE_SIZE - (m_pa + len));
1865 }
1866 vm_page_copy(tmp,m);
1867 vm_page_lock_queues();
1868 vm_page_free(tmp);
1869 vm_page_unlock_queues();
1870 #endif
1871
1872 }
1873
1874 /*
1875 * vm_page_zero_fill:
1876 *
1877 * Zero-fill the specified page.
1878 */
1879 void
1880 vm_page_zero_fill(
1881 vm_page_t m)
1882 {
1883 XPR(XPR_VM_PAGE,
1884 "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n",
1885 (integer_t)m->object, (integer_t)m->offset, (integer_t)m, 0,0);
1886
1887 VM_PAGE_CHECK(m);
1888
1889 // dbgTrace(0xAEAEAEAE, m->phys_page, 0); /* (BRINGUP) */
1890 pmap_zero_page(m->phys_page);
1891 }
1892
1893 /*
1894 * vm_page_part_copy:
1895 *
1896 * copy part of one page to another
1897 */
1898
1899 void
1900 vm_page_part_copy(
1901 vm_page_t src_m,
1902 vm_offset_t src_pa,
1903 vm_page_t dst_m,
1904 vm_offset_t dst_pa,
1905 vm_size_t len)
1906 {
1907 VM_PAGE_CHECK(src_m);
1908 VM_PAGE_CHECK(dst_m);
1909
1910 pmap_copy_part_page(src_m->phys_page, src_pa,
1911 dst_m->phys_page, dst_pa, len);
1912 }
1913
1914 /*
1915 * vm_page_copy:
1916 *
1917 * Copy one page to another
1918 */
1919
1920 void
1921 vm_page_copy(
1922 vm_page_t src_m,
1923 vm_page_t dest_m)
1924 {
1925 XPR(XPR_VM_PAGE,
1926 "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n",
1927 (integer_t)src_m->object, src_m->offset,
1928 (integer_t)dest_m->object, dest_m->offset,
1929 0);
1930
1931 VM_PAGE_CHECK(src_m);
1932 VM_PAGE_CHECK(dest_m);
1933
1934 pmap_copy_page(src_m->phys_page, dest_m->phys_page);
1935 }
1936
1937 /*
1938 * Currently, this is a primitive allocator that grabs
1939 * free pages from the system, sorts them by physical
1940 * address, then searches for a region large enough to
1941 * satisfy the user's request.
1942 *
1943 * Additional levels of effort:
1944 * + steal clean active/inactive pages
1945 * + force pageouts of dirty pages
1946 * + maintain a map of available physical
1947 * memory
1948 */
1949
1950 #define SET_NEXT_PAGE(m,n) ((m)->pageq.next = (struct queue_entry *) (n))
1951
1952 #if MACH_ASSERT
1953 int vm_page_verify_contiguous(
1954 vm_page_t pages,
1955 unsigned int npages);
1956 #endif /* MACH_ASSERT */
1957
1958 cpm_counter(unsigned int vpfls_pages_handled = 0;)
1959 cpm_counter(unsigned int vpfls_head_insertions = 0;)
1960 cpm_counter(unsigned int vpfls_tail_insertions = 0;)
1961 cpm_counter(unsigned int vpfls_general_insertions = 0;)
1962 cpm_counter(unsigned int vpfc_failed = 0;)
1963 cpm_counter(unsigned int vpfc_satisfied = 0;)
1964
1965
1966
1967 #if MACH_ASSERT
1968 /*
1969 * Check that the list of pages is ordered by
1970 * ascending physical address and has no holes.
1971 */
1972 int
1973 vm_page_verify_contiguous(
1974 vm_page_t pages,
1975 unsigned int npages)
1976 {
1977 register vm_page_t m;
1978 unsigned int page_count;
1979 ppnum_t prev_addr;
1980
1981 prev_addr = pages->phys_page;
1982 page_count = 1;
1983 for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
1984 if (m->phys_page != prev_addr + 1) {
1985 printf("m 0x%x prev_addr 0x%x, current addr 0x%x\n",
1986 m, prev_addr, m->phys_page);
1987 printf("pages 0x%x page_count %u\n", pages, page_count);
1988 panic("vm_page_verify_contiguous: not contiguous!");
1989 }
1990 prev_addr = m->phys_page;
1991 ++page_count;
1992 }
1993 if (page_count != npages) {
1994 printf("pages 0x%x actual count 0x%x but requested 0x%x\n",
1995 pages, page_count, npages);
1996 panic("vm_page_verify_contiguous: count error");
1997 }
1998 return 1;
1999 }
2000 #endif /* MACH_ASSERT */
2001
2002
2003 /*
2004 * Find a region large enough to contain at least npages
2005 * of contiguous physical memory.
2006 *
2007 * Requirements:
2008 * - Called while holding vm_page_queue_free_lock.
2009 * - Doesn't respect vm_page_free_reserved; caller
2010 * must not ask for more pages than are legal to grab.
2011 *
2012 * Returns a pointer to a list of gobbled pages or VM_PAGE_NULL.
2013 *
2014 * Algorithm:
2015 * Loop over the free list, extracting one page at a time and
2016 * inserting those into a sorted sub-list. We stop as soon as
2017 * there's a contiguous range within the sorted list that can
2018 * satisfy the contiguous memory request. This contiguous sub-
2019 * list is chopped out of the sorted sub-list and the remainder
2020 * of the sorted sub-list is put back onto the beginning of the
2021 * free list.
2022 */
2023 static vm_page_t
2024 vm_page_find_contiguous(
2025 unsigned int contig_pages)
2026 {
2027 vm_page_t sort_list;
2028 vm_page_t *contfirstprev, contlast;
2029 vm_page_t m, m1;
2030 ppnum_t prevcontaddr;
2031 ppnum_t nextcontaddr;
2032 unsigned int npages;
2033
2034 #if MACH_ASSERT
2035 /*
2036 * Verify pages in the free list..
2037 */
2038 npages = 0;
2039 for (m = vm_page_queue_free; m != VM_PAGE_NULL; m = NEXT_PAGE(m))
2040 ++npages;
2041 if (npages != vm_page_free_count)
2042 panic("vm_sort_free_list: prelim: npages %u free_count %d",
2043 npages, vm_page_free_count);
2044 #endif /* MACH_ASSERT */
2045
2046 if (contig_pages == 0 || vm_page_queue_free == VM_PAGE_NULL)
2047 return VM_PAGE_NULL;
2048
2049 #define PPNUM_PREV(x) (((x) > 0) ? ((x) - 1) : 0)
2050 #define PPNUM_NEXT(x) (((x) < PPNUM_MAX) ? ((x) + 1) : PPNUM_MAX)
2051
2052 npages = 1;
2053 contfirstprev = &sort_list;
2054 contlast = sort_list = vm_page_queue_free;
2055 vm_page_queue_free = NEXT_PAGE(sort_list);
2056 SET_NEXT_PAGE(sort_list, VM_PAGE_NULL);
2057 prevcontaddr = PPNUM_PREV(sort_list->phys_page);
2058 nextcontaddr = PPNUM_NEXT(sort_list->phys_page);
2059
2060 while (npages < contig_pages &&
2061 (m = vm_page_queue_free) != VM_PAGE_NULL)
2062 {
2063 cpm_counter(++vpfls_pages_handled);
2064
2065 /* prepend to existing run? */
2066 if (m->phys_page == prevcontaddr)
2067 {
2068 vm_page_queue_free = NEXT_PAGE(m);
2069 cpm_counter(++vpfls_head_insertions);
2070 prevcontaddr = PPNUM_PREV(prevcontaddr);
2071 SET_NEXT_PAGE(m, *contfirstprev);
2072 *contfirstprev = m;
2073 npages++;
2074 continue; /* no tail expansion check needed */
2075 }
2076
2077 /* append to tail of existing run? */
2078 else if (m->phys_page == nextcontaddr)
2079 {
2080 vm_page_queue_free = NEXT_PAGE(m);
2081 cpm_counter(++vpfls_tail_insertions);
2082 nextcontaddr = PPNUM_NEXT(nextcontaddr);
2083 SET_NEXT_PAGE(m, NEXT_PAGE(contlast));
2084 SET_NEXT_PAGE(contlast, m);
2085 contlast = m;
2086 npages++;
2087 }
2088
2089 /* prepend to the very front of sorted list? */
2090 else if (m->phys_page < sort_list->phys_page)
2091 {
2092 vm_page_queue_free = NEXT_PAGE(m);
2093 cpm_counter(++vpfls_general_insertions);
2094 prevcontaddr = PPNUM_PREV(m->phys_page);
2095 nextcontaddr = PPNUM_NEXT(m->phys_page);
2096 SET_NEXT_PAGE(m, sort_list);
2097 contfirstprev = &sort_list;
2098 contlast = sort_list = m;
2099 npages = 1;
2100 }
2101
2102 else /* get to proper place for insertion */
2103 {
2104 if (m->phys_page < nextcontaddr)
2105 {
2106 prevcontaddr = PPNUM_PREV(sort_list->phys_page);
2107 nextcontaddr = PPNUM_NEXT(sort_list->phys_page);
2108 contfirstprev = &sort_list;
2109 contlast = sort_list;
2110 npages = 1;
2111 }
2112 for (m1 = NEXT_PAGE(contlast);
2113 npages < contig_pages &&
2114 m1 != VM_PAGE_NULL && m1->phys_page < m->phys_page;
2115 m1 = NEXT_PAGE(m1))
2116 {
2117 if (m1->phys_page != nextcontaddr) {
2118 prevcontaddr = PPNUM_PREV(m1->phys_page);
2119 contfirstprev = NEXT_PAGE_PTR(contlast);
2120 npages = 1;
2121 } else {
2122 npages++;
2123 }
2124 nextcontaddr = PPNUM_NEXT(m1->phys_page);
2125 contlast = m1;
2126 }
2127
2128 /*
2129 * We may actually already have enough.
2130 * This could happen if a previous prepend
2131 * joined up two runs to meet our needs.
2132 * If so, bail before we take the current
2133 * page off the free queue.
2134 */
2135 if (npages == contig_pages)
2136 break;
2137
2138 if (m->phys_page != nextcontaddr) {
2139 contfirstprev = NEXT_PAGE_PTR(contlast);
2140 prevcontaddr = PPNUM_PREV(m->phys_page);
2141 nextcontaddr = PPNUM_NEXT(m->phys_page);
2142 npages = 1;
2143 } else {
2144 nextcontaddr = PPNUM_NEXT(nextcontaddr);
2145 npages++;
2146 }
2147 vm_page_queue_free = NEXT_PAGE(m);
2148 cpm_counter(++vpfls_general_insertions);
2149 SET_NEXT_PAGE(m, NEXT_PAGE(contlast));
2150 SET_NEXT_PAGE(contlast, m);
2151 contlast = m;
2152 }
2153
2154 /* See how many pages are now contiguous after the insertion */
2155 for (m1 = NEXT_PAGE(m);
2156 npages < contig_pages &&
2157 m1 != VM_PAGE_NULL && m1->phys_page == nextcontaddr;
2158 m1 = NEXT_PAGE(m1))
2159 {
2160 nextcontaddr = PPNUM_NEXT(nextcontaddr);
2161 contlast = m1;
2162 npages++;
2163 }
2164 }
2165
2166 /* how did we do? */
2167 if (npages == contig_pages)
2168 {
2169 cpm_counter(++vpfc_satisfied);
2170
2171 /* remove the contiguous range from the sorted list */
2172 m = *contfirstprev;
2173 *contfirstprev = NEXT_PAGE(contlast);
2174 SET_NEXT_PAGE(contlast, VM_PAGE_NULL);
2175 assert(vm_page_verify_contiguous(m, npages));
2176
2177 /* inline vm_page_gobble() for each returned page */
2178 for (m1 = m; m1 != VM_PAGE_NULL; m1 = NEXT_PAGE(m1)) {
2179 assert(m1->free);
2180 assert(!m1->wanted);
2181 m1->free = FALSE;
2182 m1->no_isync = TRUE;
2183 m1->gobbled = TRUE;
2184 }
2185 vm_page_wire_count += npages;
2186 vm_page_gobble_count += npages;
2187 vm_page_free_count -= npages;
2188
2189 /* stick free list at the tail of the sorted list */
2190 while ((m1 = *contfirstprev) != VM_PAGE_NULL)
2191 contfirstprev = (vm_page_t *)&m1->pageq.next;
2192 *contfirstprev = vm_page_queue_free;
2193 }
2194
2195 vm_page_queue_free = sort_list;
2196 return m;
2197 }
2198
2199 /*
2200 * Allocate a list of contiguous, wired pages.
2201 */
2202 kern_return_t
2203 cpm_allocate(
2204 vm_size_t size,
2205 vm_page_t *list,
2206 boolean_t wire)
2207 {
2208 register vm_page_t m;
2209 vm_page_t *first_contig;
2210 vm_page_t free_list, pages;
2211 unsigned int npages, n1pages;
2212 int vm_pages_available;
2213 boolean_t wakeup;
2214
2215 if (size % page_size != 0)
2216 return KERN_INVALID_ARGUMENT;
2217
2218 vm_page_lock_queues();
2219 mutex_lock(&vm_page_queue_free_lock);
2220
2221 /*
2222 * Should also take active and inactive pages
2223 * into account... One day...
2224 */
2225 npages = size / page_size;
2226 vm_pages_available = vm_page_free_count - vm_page_free_reserved;
2227
2228 if (npages > vm_pages_available) {
2229 mutex_unlock(&vm_page_queue_free_lock);
2230 vm_page_unlock_queues();
2231 return KERN_RESOURCE_SHORTAGE;
2232 }
2233
2234 /*
2235 * Obtain a pointer to a subset of the free
2236 * list large enough to satisfy the request;
2237 * the region will be physically contiguous.
2238 */
2239 pages = vm_page_find_contiguous(npages);
2240
2241 /* adjust global freelist counts and determine need for wakeups */
2242 if (vm_page_free_count < vm_page_free_count_minimum)
2243 vm_page_free_count_minimum = vm_page_free_count;
2244
2245 wakeup = ((vm_page_free_count < vm_page_free_min) ||
2246 ((vm_page_free_count < vm_page_free_target) &&
2247 (vm_page_inactive_count < vm_page_inactive_target)));
2248
2249 mutex_unlock(&vm_page_queue_free_lock);
2250
2251 if (pages == VM_PAGE_NULL) {
2252 vm_page_unlock_queues();
2253 return KERN_NO_SPACE;
2254 }
2255
2256 /*
2257 * Walk the returned list, wiring the pages.
2258 */
2259 if (wire == TRUE)
2260 for (m = pages; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
2261 /*
2262 * Essentially inlined vm_page_wire.
2263 */
2264 assert(!m->active);
2265 assert(!m->inactive);
2266 assert(!m->private);
2267 assert(!m->fictitious);
2268 assert(m->wire_count == 0);
2269 assert(m->gobbled);
2270 m->gobbled = FALSE;
2271 m->wire_count++;
2272 --vm_page_gobble_count;
2273 }
2274 vm_page_unlock_queues();
2275
2276 if (wakeup)
2277 thread_wakeup((event_t) &vm_page_free_wanted);
2278
2279 /*
2280 * The CPM pages should now be available and
2281 * ordered by ascending physical address.
2282 */
2283 assert(vm_page_verify_contiguous(pages, npages));
2284
2285 *list = pages;
2286 return KERN_SUCCESS;
2287 }
2288
2289
2290 #include <mach_vm_debug.h>
2291 #if MACH_VM_DEBUG
2292
2293 #include <mach_debug/hash_info.h>
2294 #include <vm/vm_debug.h>
2295
2296 /*
2297 * Routine: vm_page_info
2298 * Purpose:
2299 * Return information about the global VP table.
2300 * Fills the buffer with as much information as possible
2301 * and returns the desired size of the buffer.
2302 * Conditions:
2303 * Nothing locked. The caller should provide
2304 * possibly-pageable memory.
2305 */
2306
2307 unsigned int
2308 vm_page_info(
2309 hash_info_bucket_t *info,
2310 unsigned int count)
2311 {
2312 int i;
2313
2314 if (vm_page_bucket_count < count)
2315 count = vm_page_bucket_count;
2316
2317 for (i = 0; i < count; i++) {
2318 vm_page_bucket_t *bucket = &vm_page_buckets[i];
2319 unsigned int bucket_count = 0;
2320 vm_page_t m;
2321
2322 simple_lock(&vm_page_bucket_lock);
2323 for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next)
2324 bucket_count++;
2325 simple_unlock(&vm_page_bucket_lock);
2326
2327 /* don't touch pageable memory while holding locks */
2328 info[i].hib_count = bucket_count;
2329 }
2330
2331 return vm_page_bucket_count;
2332 }
2333 #endif /* MACH_VM_DEBUG */
2334
2335 #include <mach_kdb.h>
2336 #if MACH_KDB
2337
2338 #include <ddb/db_output.h>
2339 #include <vm/vm_print.h>
2340 #define printf kdbprintf
2341
2342 /*
2343 * Routine: vm_page_print [exported]
2344 */
2345 void
2346 vm_page_print(
2347 vm_page_t p)
2348 {
2349 extern db_indent;
2350
2351 iprintf("page 0x%x\n", p);
2352
2353 db_indent += 2;
2354
2355 iprintf("object=0x%x", p->object);
2356 printf(", offset=0x%x", p->offset);
2357 printf(", wire_count=%d", p->wire_count);
2358
2359 iprintf("%sinactive, %sactive, %sgobbled, %slaundry, %sfree, %sref, %sdiscard\n",
2360 (p->inactive ? "" : "!"),
2361 (p->active ? "" : "!"),
2362 (p->gobbled ? "" : "!"),
2363 (p->laundry ? "" : "!"),
2364 (p->free ? "" : "!"),
2365 (p->reference ? "" : "!"),
2366 (p->discard_request ? "" : "!"));
2367 iprintf("%sbusy, %swanted, %stabled, %sfictitious, %sprivate, %sprecious\n",
2368 (p->busy ? "" : "!"),
2369 (p->wanted ? "" : "!"),
2370 (p->tabled ? "" : "!"),
2371 (p->fictitious ? "" : "!"),
2372 (p->private ? "" : "!"),
2373 (p->precious ? "" : "!"));
2374 iprintf("%sabsent, %serror, %sdirty, %scleaning, %spageout, %sclustered\n",
2375 (p->absent ? "" : "!"),
2376 (p->error ? "" : "!"),
2377 (p->dirty ? "" : "!"),
2378 (p->cleaning ? "" : "!"),
2379 (p->pageout ? "" : "!"),
2380 (p->clustered ? "" : "!"));
2381 iprintf("%slock_supplied, %soverwriting, %srestart, %sunusual\n",
2382 (p->lock_supplied ? "" : "!"),
2383 (p->overwriting ? "" : "!"),
2384 (p->restart ? "" : "!"),
2385 (p->unusual ? "" : "!"));
2386
2387 iprintf("phys_page=0x%x", p->phys_page);
2388 printf(", page_error=0x%x", p->page_error);
2389 printf(", page_lock=0x%x", p->page_lock);
2390 printf(", unlock_request=%d\n", p->unlock_request);
2391
2392 db_indent -= 2;
2393 }
2394 #endif /* MACH_KDB */