]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_resident.c
7fd4fc0e6e9aaebc1822a635ed9e854a7f440744
[apple/xnu.git] / osfmk / vm / vm_resident.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: vm/vm_page.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
55 *
56 * Resident memory management module.
57 */
58
59 #include <mach/vm_prot.h>
60 #include <mach/vm_statistics.h>
61 #include <kern/counters.h>
62 #include <kern/sched_prim.h>
63 #include <kern/task.h>
64 #include <kern/thread.h>
65 #include <kern/zalloc.h>
66 #include <kern/xpr.h>
67 #include <vm/pmap.h>
68 #include <vm/vm_init.h>
69 #include <vm/vm_map.h>
70 #include <vm/vm_page.h>
71 #include <vm/vm_pageout.h>
72 #include <vm/vm_kern.h> /* kernel_memory_allocate() */
73 #include <kern/misc_protos.h>
74 #include <zone_debug.h>
75 #include <vm/cpm.h>
76
77 /* Variables used to indicate the relative age of pages in the
78 * inactive list
79 */
80
81 int vm_page_ticket_roll = 0;
82 int vm_page_ticket = 0;
83 /*
84 * Associated with page of user-allocatable memory is a
85 * page structure.
86 */
87
88 /*
89 * These variables record the values returned by vm_page_bootstrap,
90 * for debugging purposes. The implementation of pmap_steal_memory
91 * and pmap_startup here also uses them internally.
92 */
93
94 vm_offset_t virtual_space_start;
95 vm_offset_t virtual_space_end;
96 int vm_page_pages;
97
98 /*
99 * The vm_page_lookup() routine, which provides for fast
100 * (virtual memory object, offset) to page lookup, employs
101 * the following hash table. The vm_page_{insert,remove}
102 * routines install and remove associations in the table.
103 * [This table is often called the virtual-to-physical,
104 * or VP, table.]
105 */
106 typedef struct {
107 vm_page_t pages;
108 #if MACH_PAGE_HASH_STATS
109 int cur_count; /* current count */
110 int hi_count; /* high water mark */
111 #endif /* MACH_PAGE_HASH_STATS */
112 } vm_page_bucket_t;
113
114 vm_page_bucket_t *vm_page_buckets; /* Array of buckets */
115 unsigned int vm_page_bucket_count = 0; /* How big is array? */
116 unsigned int vm_page_hash_mask; /* Mask for hash function */
117 unsigned int vm_page_hash_shift; /* Shift for hash function */
118 decl_simple_lock_data(,vm_page_bucket_lock)
119
120 #if MACH_PAGE_HASH_STATS
121 /* This routine is only for debug. It is intended to be called by
122 * hand by a developer using a kernel debugger. This routine prints
123 * out vm_page_hash table statistics to the kernel debug console.
124 */
125 void
126 hash_debug(void)
127 {
128 int i;
129 int numbuckets = 0;
130 int highsum = 0;
131 int maxdepth = 0;
132
133 for (i = 0; i < vm_page_bucket_count; i++) {
134 if (vm_page_buckets[i].hi_count) {
135 numbuckets++;
136 highsum += vm_page_buckets[i].hi_count;
137 if (vm_page_buckets[i].hi_count > maxdepth)
138 maxdepth = vm_page_buckets[i].hi_count;
139 }
140 }
141 printf("Total number of buckets: %d\n", vm_page_bucket_count);
142 printf("Number used buckets: %d = %d%%\n",
143 numbuckets, 100*numbuckets/vm_page_bucket_count);
144 printf("Number unused buckets: %d = %d%%\n",
145 vm_page_bucket_count - numbuckets,
146 100*(vm_page_bucket_count-numbuckets)/vm_page_bucket_count);
147 printf("Sum of bucket max depth: %d\n", highsum);
148 printf("Average bucket depth: %d.%2d\n",
149 highsum/vm_page_bucket_count,
150 highsum%vm_page_bucket_count);
151 printf("Maximum bucket depth: %d\n", maxdepth);
152 }
153 #endif /* MACH_PAGE_HASH_STATS */
154
155 /*
156 * The virtual page size is currently implemented as a runtime
157 * variable, but is constant once initialized using vm_set_page_size.
158 * This initialization must be done in the machine-dependent
159 * bootstrap sequence, before calling other machine-independent
160 * initializations.
161 *
162 * All references to the virtual page size outside this
163 * module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
164 * constants.
165 */
166 #ifndef PAGE_SIZE_FIXED
167 vm_size_t page_size = 4096;
168 vm_size_t page_mask = 4095;
169 int page_shift = 12;
170 #endif /* PAGE_SIZE_FIXED */
171
172 /*
173 * Resident page structures are initialized from
174 * a template (see vm_page_alloc).
175 *
176 * When adding a new field to the virtual memory
177 * object structure, be sure to add initialization
178 * (see vm_page_bootstrap).
179 */
180 struct vm_page vm_page_template;
181
182 /*
183 * Resident pages that represent real memory
184 * are allocated from a free list.
185 */
186 vm_page_t vm_page_queue_free;
187 vm_page_t vm_page_queue_fictitious;
188 decl_mutex_data(,vm_page_queue_free_lock)
189 unsigned int vm_page_free_wanted;
190 int vm_page_free_count;
191 int vm_page_fictitious_count;
192
193 unsigned int vm_page_free_count_minimum; /* debugging */
194
195 /*
196 * Occasionally, the virtual memory system uses
197 * resident page structures that do not refer to
198 * real pages, for example to leave a page with
199 * important state information in the VP table.
200 *
201 * These page structures are allocated the way
202 * most other kernel structures are.
203 */
204 zone_t vm_page_zone;
205 decl_mutex_data(,vm_page_alloc_lock)
206
207 /*
208 * Fictitious pages don't have a physical address,
209 * but we must initialize phys_addr to something.
210 * For debugging, this should be a strange value
211 * that the pmap module can recognize in assertions.
212 */
213 vm_offset_t vm_page_fictitious_addr = (vm_offset_t) -1;
214
215 /*
216 * Resident page structures are also chained on
217 * queues that are used by the page replacement
218 * system (pageout daemon). These queues are
219 * defined here, but are shared by the pageout
220 * module.
221 */
222 queue_head_t vm_page_queue_active;
223 queue_head_t vm_page_queue_inactive;
224 decl_mutex_data(,vm_page_queue_lock)
225 int vm_page_active_count;
226 int vm_page_inactive_count;
227 int vm_page_wire_count;
228 int vm_page_gobble_count = 0;
229 int vm_page_wire_count_warning = 0;
230 int vm_page_gobble_count_warning = 0;
231
232 /* the following fields are protected by the vm_page_queue_lock */
233 queue_head_t vm_page_queue_limbo;
234 int vm_page_limbo_count = 0; /* total pages in limbo */
235 int vm_page_limbo_real_count = 0; /* real pages in limbo */
236 int vm_page_pin_count = 0; /* number of pinned pages */
237
238 decl_simple_lock_data(,vm_page_preppin_lock)
239
240 /*
241 * Several page replacement parameters are also
242 * shared with this module, so that page allocation
243 * (done here in vm_page_alloc) can trigger the
244 * pageout daemon.
245 */
246 int vm_page_free_target = 0;
247 int vm_page_free_min = 0;
248 int vm_page_inactive_target = 0;
249 int vm_page_free_reserved = 0;
250 int vm_page_laundry_count = 0;
251
252 /*
253 * The VM system has a couple of heuristics for deciding
254 * that pages are "uninteresting" and should be placed
255 * on the inactive queue as likely candidates for replacement.
256 * These variables let the heuristics be controlled at run-time
257 * to make experimentation easier.
258 */
259
260 boolean_t vm_page_deactivate_hint = TRUE;
261
262 /*
263 * vm_set_page_size:
264 *
265 * Sets the page size, perhaps based upon the memory
266 * size. Must be called before any use of page-size
267 * dependent functions.
268 *
269 * Sets page_shift and page_mask from page_size.
270 */
271 void
272 vm_set_page_size(void)
273 {
274 #ifndef PAGE_SIZE_FIXED
275 page_mask = page_size - 1;
276
277 if ((page_mask & page_size) != 0)
278 panic("vm_set_page_size: page size not a power of two");
279
280 for (page_shift = 0; ; page_shift++)
281 if ((1 << page_shift) == page_size)
282 break;
283 #endif /* PAGE_SIZE_FIXED */
284 }
285
286 /*
287 * vm_page_bootstrap:
288 *
289 * Initializes the resident memory module.
290 *
291 * Allocates memory for the page cells, and
292 * for the object/offset-to-page hash table headers.
293 * Each page cell is initialized and placed on the free list.
294 * Returns the range of available kernel virtual memory.
295 */
296
297 void
298 vm_page_bootstrap(
299 vm_offset_t *startp,
300 vm_offset_t *endp)
301 {
302 register vm_page_t m;
303 int i;
304 unsigned int log1;
305 unsigned int log2;
306 unsigned int size;
307
308 /*
309 * Initialize the vm_page template.
310 */
311
312 m = &vm_page_template;
313 m->object = VM_OBJECT_NULL; /* reset later */
314 m->offset = 0; /* reset later */
315 m->wire_count = 0;
316
317 m->inactive = FALSE;
318 m->active = FALSE;
319 m->laundry = FALSE;
320 m->free = FALSE;
321 m->reference = FALSE;
322 m->pageout = FALSE;
323 m->dump_cleaning = FALSE;
324 m->list_req_pending = FALSE;
325
326 m->busy = TRUE;
327 m->wanted = FALSE;
328 m->tabled = FALSE;
329 m->fictitious = FALSE;
330 m->private = FALSE;
331 m->absent = FALSE;
332 m->error = FALSE;
333 m->dirty = FALSE;
334 m->cleaning = FALSE;
335 m->precious = FALSE;
336 m->clustered = FALSE;
337 m->lock_supplied = FALSE;
338 m->unusual = FALSE;
339 m->restart = FALSE;
340
341 m->phys_addr = 0; /* reset later */
342
343 m->page_lock = VM_PROT_NONE;
344 m->unlock_request = VM_PROT_NONE;
345 m->page_error = KERN_SUCCESS;
346
347 /*
348 * Initialize the page queues.
349 */
350
351 mutex_init(&vm_page_queue_free_lock, ETAP_VM_PAGEQ_FREE);
352 mutex_init(&vm_page_queue_lock, ETAP_VM_PAGEQ);
353 simple_lock_init(&vm_page_preppin_lock, ETAP_VM_PREPPIN);
354
355 vm_page_queue_free = VM_PAGE_NULL;
356 vm_page_queue_fictitious = VM_PAGE_NULL;
357 queue_init(&vm_page_queue_active);
358 queue_init(&vm_page_queue_inactive);
359 queue_init(&vm_page_queue_limbo);
360
361 vm_page_free_wanted = 0;
362
363 /*
364 * Steal memory for the map and zone subsystems.
365 */
366
367 vm_map_steal_memory();
368 zone_steal_memory();
369
370 /*
371 * Allocate (and initialize) the virtual-to-physical
372 * table hash buckets.
373 *
374 * The number of buckets should be a power of two to
375 * get a good hash function. The following computation
376 * chooses the first power of two that is greater
377 * than the number of physical pages in the system.
378 */
379
380 simple_lock_init(&vm_page_bucket_lock, ETAP_VM_BUCKET);
381
382 if (vm_page_bucket_count == 0) {
383 unsigned int npages = pmap_free_pages();
384
385 vm_page_bucket_count = 1;
386 while (vm_page_bucket_count < npages)
387 vm_page_bucket_count <<= 1;
388 }
389
390 vm_page_hash_mask = vm_page_bucket_count - 1;
391
392 /*
393 * Calculate object shift value for hashing algorithm:
394 * O = log2(sizeof(struct vm_object))
395 * B = log2(vm_page_bucket_count)
396 * hash shifts the object left by
397 * B/2 - O
398 */
399 size = vm_page_bucket_count;
400 for (log1 = 0; size > 1; log1++)
401 size /= 2;
402 size = sizeof(struct vm_object);
403 for (log2 = 0; size > 1; log2++)
404 size /= 2;
405 vm_page_hash_shift = log1/2 - log2 + 1;
406
407 if (vm_page_hash_mask & vm_page_bucket_count)
408 printf("vm_page_bootstrap: WARNING -- strange page hash\n");
409
410 vm_page_buckets = (vm_page_bucket_t *)
411 pmap_steal_memory(vm_page_bucket_count *
412 sizeof(vm_page_bucket_t));
413
414 for (i = 0; i < vm_page_bucket_count; i++) {
415 register vm_page_bucket_t *bucket = &vm_page_buckets[i];
416
417 bucket->pages = VM_PAGE_NULL;
418 #if MACH_PAGE_HASH_STATS
419 bucket->cur_count = 0;
420 bucket->hi_count = 0;
421 #endif /* MACH_PAGE_HASH_STATS */
422 }
423
424 /*
425 * Machine-dependent code allocates the resident page table.
426 * It uses vm_page_init to initialize the page frames.
427 * The code also returns to us the virtual space available
428 * to the kernel. We don't trust the pmap module
429 * to get the alignment right.
430 */
431
432 pmap_startup(&virtual_space_start, &virtual_space_end);
433 virtual_space_start = round_page(virtual_space_start);
434 virtual_space_end = trunc_page(virtual_space_end);
435
436 *startp = virtual_space_start;
437 *endp = virtual_space_end;
438
439 /*
440 * Compute the initial "wire" count.
441 * Up until now, the pages which have been set aside are not under
442 * the VM system's control, so although they aren't explicitly
443 * wired, they nonetheless can't be moved. At this moment,
444 * all VM managed pages are "free", courtesy of pmap_startup.
445 */
446 vm_page_wire_count = atop(mem_size) - vm_page_free_count; /* initial value */
447
448 printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count);
449 vm_page_free_count_minimum = vm_page_free_count;
450 }
451
452 #ifndef MACHINE_PAGES
453 /*
454 * We implement pmap_steal_memory and pmap_startup with the help
455 * of two simpler functions, pmap_virtual_space and pmap_next_page.
456 */
457
458 vm_offset_t
459 pmap_steal_memory(
460 vm_size_t size)
461 {
462 vm_offset_t addr, vaddr, paddr;
463
464 /*
465 * We round the size to a round multiple.
466 */
467
468 size = (size + sizeof (void *) - 1) &~ (sizeof (void *) - 1);
469
470 /*
471 * If this is the first call to pmap_steal_memory,
472 * we have to initialize ourself.
473 */
474
475 if (virtual_space_start == virtual_space_end) {
476 pmap_virtual_space(&virtual_space_start, &virtual_space_end);
477
478 /*
479 * The initial values must be aligned properly, and
480 * we don't trust the pmap module to do it right.
481 */
482
483 virtual_space_start = round_page(virtual_space_start);
484 virtual_space_end = trunc_page(virtual_space_end);
485 }
486
487 /*
488 * Allocate virtual memory for this request.
489 */
490
491 addr = virtual_space_start;
492 virtual_space_start += size;
493
494 kprintf("pmap_steal_memory: %08X - %08X; size=%08X\n", addr, virtual_space_start, size); /* (TEST/DEBUG) */
495
496 /*
497 * Allocate and map physical pages to back new virtual pages.
498 */
499
500 for (vaddr = round_page(addr);
501 vaddr < addr + size;
502 vaddr += PAGE_SIZE) {
503 if (!pmap_next_page(&paddr))
504 panic("pmap_steal_memory");
505
506 /*
507 * XXX Logically, these mappings should be wired,
508 * but some pmap modules barf if they are.
509 */
510
511 pmap_enter(kernel_pmap, vaddr, paddr,
512 VM_PROT_READ|VM_PROT_WRITE, FALSE);
513 /*
514 * Account for newly stolen memory
515 */
516 vm_page_wire_count++;
517
518 }
519
520 return addr;
521 }
522
523 void
524 pmap_startup(
525 vm_offset_t *startp,
526 vm_offset_t *endp)
527 {
528 unsigned int i, npages, pages_initialized;
529 vm_page_t pages;
530 vm_offset_t paddr;
531
532 /*
533 * We calculate how many page frames we will have
534 * and then allocate the page structures in one chunk.
535 */
536
537 npages = ((PAGE_SIZE * pmap_free_pages() +
538 (round_page(virtual_space_start) - virtual_space_start)) /
539 (PAGE_SIZE + sizeof *pages));
540
541 pages = (vm_page_t) pmap_steal_memory(npages * sizeof *pages);
542
543 /*
544 * Initialize the page frames.
545 */
546
547 for (i = 0, pages_initialized = 0; i < npages; i++) {
548 if (!pmap_next_page(&paddr))
549 break;
550
551 vm_page_init(&pages[i], paddr);
552 vm_page_pages++;
553 pages_initialized++;
554 }
555
556 /*
557 * Release pages in reverse order so that physical pages
558 * initially get allocated in ascending addresses. This keeps
559 * the devices (which must address physical memory) happy if
560 * they require several consecutive pages.
561 */
562
563 for (i = pages_initialized; i > 0; i--) {
564 vm_page_release(&pages[i - 1]);
565 }
566
567 /*
568 * We have to re-align virtual_space_start,
569 * because pmap_steal_memory has been using it.
570 */
571
572 virtual_space_start = round_page(virtual_space_start);
573
574 *startp = virtual_space_start;
575 *endp = virtual_space_end;
576 }
577 #endif /* MACHINE_PAGES */
578
579 /*
580 * Routine: vm_page_module_init
581 * Purpose:
582 * Second initialization pass, to be done after
583 * the basic VM system is ready.
584 */
585 void
586 vm_page_module_init(void)
587 {
588 vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page),
589 0, PAGE_SIZE, "vm pages");
590
591 #if ZONE_DEBUG
592 zone_debug_disable(vm_page_zone);
593 #endif /* ZONE_DEBUG */
594
595 zone_change(vm_page_zone, Z_EXPAND, FALSE);
596 zone_change(vm_page_zone, Z_EXHAUST, TRUE);
597 zone_change(vm_page_zone, Z_FOREIGN, TRUE);
598
599 /*
600 * Adjust zone statistics to account for the real pages allocated
601 * in vm_page_create(). [Q: is this really what we want?]
602 */
603 vm_page_zone->count += vm_page_pages;
604 vm_page_zone->cur_size += vm_page_pages * vm_page_zone->elem_size;
605
606 mutex_init(&vm_page_alloc_lock, ETAP_VM_PAGE_ALLOC);
607 }
608
609 /*
610 * Routine: vm_page_create
611 * Purpose:
612 * After the VM system is up, machine-dependent code
613 * may stumble across more physical memory. For example,
614 * memory that it was reserving for a frame buffer.
615 * vm_page_create turns this memory into available pages.
616 */
617
618 void
619 vm_page_create(
620 vm_offset_t start,
621 vm_offset_t end)
622 {
623 vm_offset_t paddr;
624 vm_page_t m;
625
626 for (paddr = round_page(start);
627 paddr < trunc_page(end);
628 paddr += PAGE_SIZE) {
629 while ((m = (vm_page_t) vm_page_grab_fictitious())
630 == VM_PAGE_NULL)
631 vm_page_more_fictitious();
632
633 vm_page_init(m, paddr);
634 vm_page_pages++;
635 vm_page_release(m);
636 }
637 }
638
639 /*
640 * vm_page_hash:
641 *
642 * Distributes the object/offset key pair among hash buckets.
643 *
644 * NOTE: To get a good hash function, the bucket count should
645 * be a power of two.
646 */
647 #define vm_page_hash(object, offset) (\
648 ( ((natural_t)(vm_offset_t)object<<vm_page_hash_shift) + (natural_t)atop(offset))\
649 & vm_page_hash_mask)
650
651 /*
652 * vm_page_insert: [ internal use only ]
653 *
654 * Inserts the given mem entry into the object/object-page
655 * table and object list.
656 *
657 * The object must be locked.
658 */
659
660 void
661 vm_page_insert(
662 register vm_page_t mem,
663 register vm_object_t object,
664 register vm_object_offset_t offset)
665 {
666 register vm_page_bucket_t *bucket;
667
668 XPR(XPR_VM_PAGE,
669 "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n",
670 (integer_t)object, (integer_t)offset, (integer_t)mem, 0,0);
671
672 VM_PAGE_CHECK(mem);
673
674 if (mem->tabled)
675 panic("vm_page_insert");
676
677 assert(!object->internal || offset < object->size);
678
679 /* only insert "pageout" pages into "pageout" objects,
680 * and normal pages into normal objects */
681 assert(object->pageout == mem->pageout);
682
683 /*
684 * Record the object/offset pair in this page
685 */
686
687 mem->object = object;
688 mem->offset = offset;
689
690 /*
691 * Insert it into the object_object/offset hash table
692 */
693
694 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
695 simple_lock(&vm_page_bucket_lock);
696 mem->next = bucket->pages;
697 bucket->pages = mem;
698 #if MACH_PAGE_HASH_STATS
699 if (++bucket->cur_count > bucket->hi_count)
700 bucket->hi_count = bucket->cur_count;
701 #endif /* MACH_PAGE_HASH_STATS */
702 simple_unlock(&vm_page_bucket_lock);
703
704 /*
705 * Now link into the object's list of backed pages.
706 */
707
708 queue_enter(&object->memq, mem, vm_page_t, listq);
709 mem->tabled = TRUE;
710
711 /*
712 * Show that the object has one more resident page.
713 */
714
715 object->resident_page_count++;
716 }
717
718 /*
719 * vm_page_replace:
720 *
721 * Exactly like vm_page_insert, except that we first
722 * remove any existing page at the given offset in object.
723 *
724 * The object and page queues must be locked.
725 */
726
727 void
728 vm_page_replace(
729 register vm_page_t mem,
730 register vm_object_t object,
731 register vm_object_offset_t offset)
732 {
733 register vm_page_bucket_t *bucket;
734
735 VM_PAGE_CHECK(mem);
736
737 if (mem->tabled)
738 panic("vm_page_replace");
739
740 /*
741 * Record the object/offset pair in this page
742 */
743
744 mem->object = object;
745 mem->offset = offset;
746
747 /*
748 * Insert it into the object_object/offset hash table,
749 * replacing any page that might have been there.
750 */
751
752 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
753 simple_lock(&vm_page_bucket_lock);
754 if (bucket->pages) {
755 vm_page_t *mp = &bucket->pages;
756 register vm_page_t m = *mp;
757 do {
758 if (m->object == object && m->offset == offset) {
759 /*
760 * Remove page from bucket and from object,
761 * and return it to the free list.
762 */
763 *mp = m->next;
764 queue_remove(&object->memq, m, vm_page_t,
765 listq);
766 m->tabled = FALSE;
767 object->resident_page_count--;
768
769 /*
770 * Return page to the free list.
771 * Note the page is not tabled now, so this
772 * won't self-deadlock on the bucket lock.
773 */
774
775 vm_page_free(m);
776 break;
777 }
778 mp = &m->next;
779 } while (m = *mp);
780 mem->next = bucket->pages;
781 } else {
782 mem->next = VM_PAGE_NULL;
783 }
784 bucket->pages = mem;
785 simple_unlock(&vm_page_bucket_lock);
786
787 /*
788 * Now link into the object's list of backed pages.
789 */
790
791 queue_enter(&object->memq, mem, vm_page_t, listq);
792 mem->tabled = TRUE;
793
794 /*
795 * And show that the object has one more resident
796 * page.
797 */
798
799 object->resident_page_count++;
800 }
801
802 /*
803 * vm_page_remove: [ internal use only ]
804 *
805 * Removes the given mem entry from the object/offset-page
806 * table and the object page list.
807 *
808 * The object and page must be locked.
809 */
810
811 void
812 vm_page_remove(
813 register vm_page_t mem)
814 {
815 register vm_page_bucket_t *bucket;
816 register vm_page_t this;
817
818 XPR(XPR_VM_PAGE,
819 "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n",
820 (integer_t)mem->object, (integer_t)mem->offset,
821 (integer_t)mem, 0,0);
822
823 assert(mem->tabled);
824 assert(!mem->cleaning);
825 VM_PAGE_CHECK(mem);
826
827 /*
828 * Remove from the object_object/offset hash table
829 */
830
831 bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
832 simple_lock(&vm_page_bucket_lock);
833 if ((this = bucket->pages) == mem) {
834 /* optimize for common case */
835
836 bucket->pages = mem->next;
837 } else {
838 register vm_page_t *prev;
839
840 for (prev = &this->next;
841 (this = *prev) != mem;
842 prev = &this->next)
843 continue;
844 *prev = this->next;
845 }
846 #if MACH_PAGE_HASH_STATS
847 bucket->cur_count--;
848 #endif /* MACH_PAGE_HASH_STATS */
849 simple_unlock(&vm_page_bucket_lock);
850
851 /*
852 * Now remove from the object's list of backed pages.
853 */
854
855 queue_remove(&mem->object->memq, mem, vm_page_t, listq);
856
857 /*
858 * And show that the object has one fewer resident
859 * page.
860 */
861
862 mem->object->resident_page_count--;
863
864 mem->tabled = FALSE;
865 mem->object = VM_OBJECT_NULL;
866 mem->offset = 0;
867 }
868
869 /*
870 * vm_page_lookup:
871 *
872 * Returns the page associated with the object/offset
873 * pair specified; if none is found, VM_PAGE_NULL is returned.
874 *
875 * The object must be locked. No side effects.
876 */
877
878 vm_page_t
879 vm_page_lookup(
880 register vm_object_t object,
881 register vm_object_offset_t offset)
882 {
883 register vm_page_t mem;
884 register vm_page_bucket_t *bucket;
885
886 /*
887 * Search the hash table for this object/offset pair
888 */
889
890 bucket = &vm_page_buckets[vm_page_hash(object, offset)];
891
892 simple_lock(&vm_page_bucket_lock);
893 for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
894 VM_PAGE_CHECK(mem);
895 if ((mem->object == object) && (mem->offset == offset))
896 break;
897 }
898 simple_unlock(&vm_page_bucket_lock);
899 return(mem);
900 }
901
902 /*
903 * vm_page_rename:
904 *
905 * Move the given memory entry from its
906 * current object to the specified target object/offset.
907 *
908 * The object must be locked.
909 */
910 void
911 vm_page_rename(
912 register vm_page_t mem,
913 register vm_object_t new_object,
914 vm_object_offset_t new_offset)
915 {
916 assert(mem->object != new_object);
917 /*
918 * Changes to mem->object require the page lock because
919 * the pageout daemon uses that lock to get the object.
920 */
921
922 XPR(XPR_VM_PAGE,
923 "vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n",
924 (integer_t)new_object, (integer_t)new_offset,
925 (integer_t)mem, 0,0);
926
927 vm_page_lock_queues();
928 vm_page_remove(mem);
929 vm_page_insert(mem, new_object, new_offset);
930 vm_page_unlock_queues();
931 }
932
933 /*
934 * vm_page_init:
935 *
936 * Initialize the fields in a new page.
937 * This takes a structure with random values and initializes it
938 * so that it can be given to vm_page_release or vm_page_insert.
939 */
940 void
941 vm_page_init(
942 vm_page_t mem,
943 vm_offset_t phys_addr)
944 {
945 *mem = vm_page_template;
946 mem->phys_addr = phys_addr;
947 }
948
949 /*
950 * vm_page_grab_fictitious:
951 *
952 * Remove a fictitious page from the free list.
953 * Returns VM_PAGE_NULL if there are no free pages.
954 */
955 int c_vm_page_grab_fictitious = 0;
956 int c_vm_page_release_fictitious = 0;
957 int c_vm_page_more_fictitious = 0;
958
959 vm_page_t
960 vm_page_grab_fictitious(void)
961 {
962 register vm_page_t m;
963
964 m = (vm_page_t)zget(vm_page_zone);
965 if (m) {
966 m->free = FALSE;
967 vm_page_init(m, vm_page_fictitious_addr);
968 m->fictitious = TRUE;
969 }
970
971 c_vm_page_grab_fictitious++;
972 return m;
973 }
974
975 /*
976 * vm_page_release_fictitious:
977 *
978 * Release a fictitious page to the free list.
979 */
980
981 void
982 vm_page_release_fictitious(
983 register vm_page_t m)
984 {
985 assert(!m->free);
986 assert(m->busy);
987 assert(m->fictitious);
988 assert(m->phys_addr == vm_page_fictitious_addr);
989
990 c_vm_page_release_fictitious++;
991
992 if (m->free)
993 panic("vm_page_release_fictitious");
994 m->free = TRUE;
995 zfree(vm_page_zone, (vm_offset_t)m);
996 }
997
998 /*
999 * vm_page_more_fictitious:
1000 *
1001 * Add more fictitious pages to the free list.
1002 * Allowed to block. This routine is way intimate
1003 * with the zones code, for several reasons:
1004 * 1. we need to carve some page structures out of physical
1005 * memory before zones work, so they _cannot_ come from
1006 * the zone_map.
1007 * 2. the zone needs to be collectable in order to prevent
1008 * growth without bound. These structures are used by
1009 * the device pager (by the hundreds and thousands), as
1010 * private pages for pageout, and as blocking pages for
1011 * pagein. Temporary bursts in demand should not result in
1012 * permanent allocation of a resource.
1013 * 3. To smooth allocation humps, we allocate single pages
1014 * with kernel_memory_allocate(), and cram them into the
1015 * zone. This also allows us to initialize the vm_page_t's
1016 * on the way into the zone, so that zget() always returns
1017 * an initialized structure. The zone free element pointer
1018 * and the free page pointer are both the first item in the
1019 * vm_page_t.
1020 * 4. By having the pages in the zone pre-initialized, we need
1021 * not keep 2 levels of lists. The garbage collector simply
1022 * scans our list, and reduces physical memory usage as it
1023 * sees fit.
1024 */
1025
1026 void vm_page_more_fictitious(void)
1027 {
1028 extern vm_map_t zone_map;
1029 register vm_page_t m;
1030 vm_offset_t addr;
1031 kern_return_t retval;
1032 int i;
1033
1034 c_vm_page_more_fictitious++;
1035
1036 /*
1037 * Allocate a single page from the zone_map. Do not wait if no physical
1038 * pages are immediately available, and do not zero the space. We need
1039 * our own blocking lock here to prevent having multiple,
1040 * simultaneous requests from piling up on the zone_map lock. Exactly
1041 * one (of our) threads should be potentially waiting on the map lock.
1042 * If winner is not vm-privileged, then the page allocation will fail,
1043 * and it will temporarily block here in the vm_page_wait().
1044 */
1045 mutex_lock(&vm_page_alloc_lock);
1046 /*
1047 * If another thread allocated space, just bail out now.
1048 */
1049 if (zone_free_count(vm_page_zone) > 5) {
1050 /*
1051 * The number "5" is a small number that is larger than the
1052 * number of fictitious pages that any single caller will
1053 * attempt to allocate. Otherwise, a thread will attempt to
1054 * acquire a fictitious page (vm_page_grab_fictitious), fail,
1055 * release all of the resources and locks already acquired,
1056 * and then call this routine. This routine finds the pages
1057 * that the caller released, so fails to allocate new space.
1058 * The process repeats infinitely. The largest known number
1059 * of fictitious pages required in this manner is 2. 5 is
1060 * simply a somewhat larger number.
1061 */
1062 mutex_unlock(&vm_page_alloc_lock);
1063 return;
1064 }
1065
1066 if ((retval = kernel_memory_allocate(zone_map,
1067 &addr, PAGE_SIZE, VM_PROT_ALL,
1068 KMA_KOBJECT|KMA_NOPAGEWAIT)) != KERN_SUCCESS) {
1069 /*
1070 * No page was available. Tell the pageout daemon, drop the
1071 * lock to give another thread a chance at it, and
1072 * wait for the pageout daemon to make progress.
1073 */
1074 mutex_unlock(&vm_page_alloc_lock);
1075 vm_page_wait(THREAD_UNINT);
1076 return;
1077 }
1078 /*
1079 * Initialize as many vm_page_t's as will fit on this page. This
1080 * depends on the zone code disturbing ONLY the first item of
1081 * each zone element.
1082 */
1083 m = (vm_page_t)addr;
1084 for (i = PAGE_SIZE/sizeof(struct vm_page); i > 0; i--) {
1085 vm_page_init(m, vm_page_fictitious_addr);
1086 m->fictitious = TRUE;
1087 m++;
1088 }
1089 zcram(vm_page_zone, addr, PAGE_SIZE);
1090 mutex_unlock(&vm_page_alloc_lock);
1091 }
1092
1093 /*
1094 * vm_page_convert:
1095 *
1096 * Attempt to convert a fictitious page into a real page.
1097 */
1098
1099 boolean_t
1100 vm_page_convert(
1101 register vm_page_t m)
1102 {
1103 register vm_page_t real_m;
1104
1105 assert(m->busy);
1106 assert(m->fictitious);
1107 assert(!m->dirty);
1108
1109 real_m = vm_page_grab();
1110 if (real_m == VM_PAGE_NULL)
1111 return FALSE;
1112
1113 m->phys_addr = real_m->phys_addr;
1114 m->fictitious = FALSE;
1115
1116 vm_page_lock_queues();
1117 m->no_isync = TRUE;
1118 real_m->no_isync = FALSE;
1119 if (m->active)
1120 vm_page_active_count++;
1121 else if (m->inactive)
1122 vm_page_inactive_count++;
1123 vm_page_unlock_queues();
1124
1125 real_m->phys_addr = vm_page_fictitious_addr;
1126 real_m->fictitious = TRUE;
1127
1128 vm_page_release_fictitious(real_m);
1129 return TRUE;
1130 }
1131
1132 /*
1133 * vm_pool_low():
1134 *
1135 * Return true if it is not likely that a non-vm_privileged thread
1136 * can get memory without blocking. Advisory only, since the
1137 * situation may change under us.
1138 */
1139 int
1140 vm_pool_low(void)
1141 {
1142 /* No locking, at worst we will fib. */
1143 return( vm_page_free_count < vm_page_free_reserved );
1144 }
1145
1146 /*
1147 * vm_page_grab:
1148 *
1149 * Remove a page from the free list.
1150 * Returns VM_PAGE_NULL if the free list is too small.
1151 */
1152
1153 unsigned long vm_page_grab_count = 0; /* measure demand */
1154
1155 vm_page_t
1156 vm_page_grab(void)
1157 {
1158 register vm_page_t mem;
1159
1160 mutex_lock(&vm_page_queue_free_lock);
1161 vm_page_grab_count++;
1162
1163 /*
1164 * Optionally produce warnings if the wire or gobble
1165 * counts exceed some threshold.
1166 */
1167 if (vm_page_wire_count_warning > 0
1168 && vm_page_wire_count >= vm_page_wire_count_warning) {
1169 printf("mk: vm_page_grab(): high wired page count of %d\n",
1170 vm_page_wire_count);
1171 assert(vm_page_wire_count < vm_page_wire_count_warning);
1172 }
1173 if (vm_page_gobble_count_warning > 0
1174 && vm_page_gobble_count >= vm_page_gobble_count_warning) {
1175 printf("mk: vm_page_grab(): high gobbled page count of %d\n",
1176 vm_page_gobble_count);
1177 assert(vm_page_gobble_count < vm_page_gobble_count_warning);
1178 }
1179
1180 /*
1181 * Only let privileged threads (involved in pageout)
1182 * dip into the reserved pool.
1183 */
1184
1185 if ((vm_page_free_count < vm_page_free_reserved) &&
1186 !current_thread()->vm_privilege) {
1187 mutex_unlock(&vm_page_queue_free_lock);
1188 mem = VM_PAGE_NULL;
1189 goto wakeup_pageout;
1190 }
1191
1192 while (vm_page_queue_free == VM_PAGE_NULL) {
1193 printf("vm_page_grab: no free pages, trouble expected...\n");
1194 mutex_unlock(&vm_page_queue_free_lock);
1195 VM_PAGE_WAIT();
1196 mutex_lock(&vm_page_queue_free_lock);
1197 }
1198
1199 if (--vm_page_free_count < vm_page_free_count_minimum)
1200 vm_page_free_count_minimum = vm_page_free_count;
1201 mem = vm_page_queue_free;
1202 vm_page_queue_free = (vm_page_t) mem->pageq.next;
1203 mem->free = FALSE;
1204 mem->no_isync = TRUE;
1205 mutex_unlock(&vm_page_queue_free_lock);
1206
1207 /*
1208 * Decide if we should poke the pageout daemon.
1209 * We do this if the free count is less than the low
1210 * water mark, or if the free count is less than the high
1211 * water mark (but above the low water mark) and the inactive
1212 * count is less than its target.
1213 *
1214 * We don't have the counts locked ... if they change a little,
1215 * it doesn't really matter.
1216 */
1217
1218 wakeup_pageout:
1219 if ((vm_page_free_count < vm_page_free_min) ||
1220 ((vm_page_free_count < vm_page_free_target) &&
1221 (vm_page_inactive_count < vm_page_inactive_target)))
1222 thread_wakeup((event_t) &vm_page_free_wanted);
1223
1224 // dbgLog(mem->phys_addr, vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */
1225
1226 return mem;
1227 }
1228
1229 /*
1230 * vm_page_release:
1231 *
1232 * Return a page to the free list.
1233 */
1234
1235 void
1236 vm_page_release(
1237 register vm_page_t mem)
1238 {
1239 assert(!mem->private && !mem->fictitious);
1240
1241 // dbgLog(mem->phys_addr, vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */
1242
1243 mutex_lock(&vm_page_queue_free_lock);
1244 if (mem->free)
1245 panic("vm_page_release");
1246 mem->free = TRUE;
1247 mem->pageq.next = (queue_entry_t) vm_page_queue_free;
1248 vm_page_queue_free = mem;
1249 vm_page_free_count++;
1250
1251 /*
1252 * Check if we should wake up someone waiting for page.
1253 * But don't bother waking them unless they can allocate.
1254 *
1255 * We wakeup only one thread, to prevent starvation.
1256 * Because the scheduling system handles wait queues FIFO,
1257 * if we wakeup all waiting threads, one greedy thread
1258 * can starve multiple niceguy threads. When the threads
1259 * all wakeup, the greedy threads runs first, grabs the page,
1260 * and waits for another page. It will be the first to run
1261 * when the next page is freed.
1262 *
1263 * However, there is a slight danger here.
1264 * The thread we wake might not use the free page.
1265 * Then the other threads could wait indefinitely
1266 * while the page goes unused. To forestall this,
1267 * the pageout daemon will keep making free pages
1268 * as long as vm_page_free_wanted is non-zero.
1269 */
1270
1271 if ((vm_page_free_wanted > 0) &&
1272 (vm_page_free_count >= vm_page_free_reserved)) {
1273 vm_page_free_wanted--;
1274 thread_wakeup_one((event_t) &vm_page_free_count);
1275 }
1276
1277 mutex_unlock(&vm_page_queue_free_lock);
1278 }
1279
1280 /*
1281 * vm_page_wait:
1282 *
1283 * Wait for a page to become available.
1284 * If there are plenty of free pages, then we don't sleep.
1285 *
1286 * Returns:
1287 * TRUE: There may be another page, try again
1288 * FALSE: We were interrupted out of our wait, don't try again
1289 */
1290
1291 boolean_t
1292 vm_page_wait(
1293 int interruptible )
1294 {
1295 /*
1296 * We can't use vm_page_free_reserved to make this
1297 * determination. Consider: some thread might
1298 * need to allocate two pages. The first allocation
1299 * succeeds, the second fails. After the first page is freed,
1300 * a call to vm_page_wait must really block.
1301 */
1302 kern_return_t wait_result;
1303 int need_wakeup = 0;
1304
1305 mutex_lock(&vm_page_queue_free_lock);
1306 if (vm_page_free_count < vm_page_free_target) {
1307 if (vm_page_free_wanted++ == 0)
1308 need_wakeup = 1;
1309 assert_wait((event_t)&vm_page_free_count, interruptible);
1310 mutex_unlock(&vm_page_queue_free_lock);
1311 counter(c_vm_page_wait_block++);
1312
1313 if (need_wakeup)
1314 thread_wakeup((event_t)&vm_page_free_wanted);
1315 wait_result = thread_block((void (*)(void))0);
1316
1317 return(wait_result == THREAD_AWAKENED);
1318 } else {
1319 mutex_unlock(&vm_page_queue_free_lock);
1320 return TRUE;
1321 }
1322 }
1323
1324 /*
1325 * vm_page_alloc:
1326 *
1327 * Allocate and return a memory cell associated
1328 * with this VM object/offset pair.
1329 *
1330 * Object must be locked.
1331 */
1332
1333 vm_page_t
1334 vm_page_alloc(
1335 vm_object_t object,
1336 vm_object_offset_t offset)
1337 {
1338 register vm_page_t mem;
1339
1340 mem = vm_page_grab();
1341 if (mem == VM_PAGE_NULL)
1342 return VM_PAGE_NULL;
1343
1344 vm_page_insert(mem, object, offset);
1345
1346 return(mem);
1347 }
1348
1349 counter(unsigned int c_laundry_pages_freed = 0;)
1350
1351 int vm_pagein_cluster_unused = 0;
1352 boolean_t vm_page_free_verify = FALSE;
1353 /*
1354 * vm_page_free:
1355 *
1356 * Returns the given page to the free list,
1357 * disassociating it with any VM object.
1358 *
1359 * Object and page queues must be locked prior to entry.
1360 */
1361 void
1362 vm_page_free(
1363 register vm_page_t mem)
1364 {
1365 vm_object_t object = mem->object;
1366
1367 assert(!mem->free);
1368 assert(!mem->cleaning);
1369 assert(!mem->pageout);
1370 assert(!vm_page_free_verify || pmap_verify_free(mem->phys_addr));
1371
1372 if (mem->tabled)
1373 vm_page_remove(mem); /* clears tabled, object, offset */
1374 VM_PAGE_QUEUES_REMOVE(mem); /* clears active or inactive */
1375
1376 if (mem->clustered) {
1377 mem->clustered = FALSE;
1378 vm_pagein_cluster_unused++;
1379 }
1380
1381 if (mem->wire_count) {
1382 if (!mem->private && !mem->fictitious)
1383 vm_page_wire_count--;
1384 mem->wire_count = 0;
1385 assert(!mem->gobbled);
1386 } else if (mem->gobbled) {
1387 if (!mem->private && !mem->fictitious)
1388 vm_page_wire_count--;
1389 vm_page_gobble_count--;
1390 }
1391 mem->gobbled = FALSE;
1392
1393 if (mem->laundry) {
1394 extern int vm_page_laundry_min;
1395 vm_page_laundry_count--;
1396 mem->laundry = FALSE; /* laundry is now clear */
1397 counter(++c_laundry_pages_freed);
1398 if (vm_page_laundry_count < vm_page_laundry_min) {
1399 vm_page_laundry_min = 0;
1400 thread_wakeup((event_t) &vm_page_laundry_count);
1401 }
1402 }
1403
1404 mem->discard_request = FALSE;
1405
1406 PAGE_WAKEUP(mem); /* clears wanted */
1407
1408 if (mem->absent)
1409 vm_object_absent_release(object);
1410
1411 /* Some of these may be unnecessary */
1412 mem->page_lock = 0;
1413 mem->unlock_request = 0;
1414 mem->busy = TRUE;
1415 mem->absent = FALSE;
1416 mem->error = FALSE;
1417 mem->dirty = FALSE;
1418 mem->precious = FALSE;
1419 mem->reference = FALSE;
1420
1421 mem->page_error = KERN_SUCCESS;
1422
1423 if (mem->private) {
1424 mem->private = FALSE;
1425 mem->fictitious = TRUE;
1426 mem->phys_addr = vm_page_fictitious_addr;
1427 }
1428 if (mem->fictitious) {
1429 vm_page_release_fictitious(mem);
1430 } else {
1431 vm_page_init(mem, mem->phys_addr);
1432 vm_page_release(mem);
1433 }
1434 }
1435
1436 /*
1437 * vm_page_wire:
1438 *
1439 * Mark this page as wired down by yet
1440 * another map, removing it from paging queues
1441 * as necessary.
1442 *
1443 * The page's object and the page queues must be locked.
1444 */
1445 void
1446 vm_page_wire(
1447 register vm_page_t mem)
1448 {
1449
1450 // dbgLog(current_act(), mem->offset, mem->object, 1); /* (TEST/DEBUG) */
1451
1452 VM_PAGE_CHECK(mem);
1453
1454 if (mem->wire_count == 0) {
1455 VM_PAGE_QUEUES_REMOVE(mem);
1456 if (!mem->private && !mem->fictitious && !mem->gobbled)
1457 vm_page_wire_count++;
1458 if (mem->gobbled)
1459 vm_page_gobble_count--;
1460 mem->gobbled = FALSE;
1461 }
1462 assert(!mem->gobbled);
1463 mem->wire_count++;
1464 }
1465
1466 /*
1467 * vm_page_gobble:
1468 *
1469 * Mark this page as consumed by the vm/ipc/xmm subsystems.
1470 *
1471 * Called only for freshly vm_page_grab()ed pages - w/ nothing locked.
1472 */
1473 void
1474 vm_page_gobble(
1475 register vm_page_t mem)
1476 {
1477 vm_page_lock_queues();
1478 VM_PAGE_CHECK(mem);
1479
1480 assert(!mem->gobbled);
1481 assert(mem->wire_count == 0);
1482
1483 if (!mem->gobbled && mem->wire_count == 0) {
1484 if (!mem->private && !mem->fictitious)
1485 vm_page_wire_count++;
1486 }
1487 vm_page_gobble_count++;
1488 mem->gobbled = TRUE;
1489 vm_page_unlock_queues();
1490 }
1491
1492 /*
1493 * vm_page_unwire:
1494 *
1495 * Release one wiring of this page, potentially
1496 * enabling it to be paged again.
1497 *
1498 * The page's object and the page queues must be locked.
1499 */
1500 void
1501 vm_page_unwire(
1502 register vm_page_t mem)
1503 {
1504
1505 // dbgLog(current_act(), mem->offset, mem->object, 0); /* (TEST/DEBUG) */
1506
1507 VM_PAGE_CHECK(mem);
1508 assert(mem->wire_count > 0);
1509
1510 if (--mem->wire_count == 0) {
1511 assert(!mem->private && !mem->fictitious);
1512 vm_page_wire_count--;
1513 queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
1514 vm_page_active_count++;
1515 mem->active = TRUE;
1516 mem->reference = TRUE;
1517 }
1518 }
1519
1520 /*
1521 * vm_page_deactivate:
1522 *
1523 * Returns the given page to the inactive list,
1524 * indicating that no physical maps have access
1525 * to this page. [Used by the physical mapping system.]
1526 *
1527 * The page queues must be locked.
1528 */
1529 void
1530 vm_page_deactivate(
1531 register vm_page_t m)
1532 {
1533 VM_PAGE_CHECK(m);
1534
1535 // dbgLog(m->phys_addr, vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */
1536
1537 /*
1538 * This page is no longer very interesting. If it was
1539 * interesting (active or inactive/referenced), then we
1540 * clear the reference bit and (re)enter it in the
1541 * inactive queue. Note wired pages should not have
1542 * their reference bit cleared.
1543 */
1544 if (m->gobbled) { /* can this happen? */
1545 assert(m->wire_count == 0);
1546 if (!m->private && !m->fictitious)
1547 vm_page_wire_count--;
1548 vm_page_gobble_count--;
1549 m->gobbled = FALSE;
1550 }
1551 if (m->private || (m->wire_count != 0))
1552 return;
1553 if (m->active || (m->inactive && m->reference)) {
1554 if (!m->fictitious && !m->absent)
1555 pmap_clear_reference(m->phys_addr);
1556 m->reference = FALSE;
1557 VM_PAGE_QUEUES_REMOVE(m);
1558 }
1559 if (m->wire_count == 0 && !m->inactive) {
1560 m->page_ticket = vm_page_ticket;
1561 vm_page_ticket_roll++;
1562
1563 if(vm_page_ticket_roll == VM_PAGE_TICKETS_IN_ROLL) {
1564 vm_page_ticket_roll = 0;
1565 if(vm_page_ticket == VM_PAGE_TICKET_ROLL_IDS)
1566 vm_page_ticket= 0;
1567 else
1568 vm_page_ticket++;
1569 }
1570
1571 queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq);
1572 m->inactive = TRUE;
1573 if (!m->fictitious)
1574 vm_page_inactive_count++;
1575 }
1576 }
1577
1578 /*
1579 * vm_page_activate:
1580 *
1581 * Put the specified page on the active list (if appropriate).
1582 *
1583 * The page queues must be locked.
1584 */
1585
1586 void
1587 vm_page_activate(
1588 register vm_page_t m)
1589 {
1590 VM_PAGE_CHECK(m);
1591
1592 if (m->gobbled) {
1593 assert(m->wire_count == 0);
1594 if (!m->private && !m->fictitious)
1595 vm_page_wire_count--;
1596 vm_page_gobble_count--;
1597 m->gobbled = FALSE;
1598 }
1599 if (m->private)
1600 return;
1601
1602 if (m->inactive) {
1603 queue_remove(&vm_page_queue_inactive, m, vm_page_t, pageq);
1604 if (!m->fictitious)
1605 vm_page_inactive_count--;
1606 m->inactive = FALSE;
1607 }
1608 if (m->wire_count == 0) {
1609 if (m->active)
1610 panic("vm_page_activate: already active");
1611
1612 queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
1613 m->active = TRUE;
1614 m->reference = TRUE;
1615 if (!m->fictitious)
1616 vm_page_active_count++;
1617 }
1618 }
1619
1620 /*
1621 * vm_page_part_zero_fill:
1622 *
1623 * Zero-fill a part of the page.
1624 */
1625 void
1626 vm_page_part_zero_fill(
1627 vm_page_t m,
1628 vm_offset_t m_pa,
1629 vm_size_t len)
1630 {
1631 vm_page_t tmp;
1632
1633 VM_PAGE_CHECK(m);
1634 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
1635 pmap_zero_part_page(m->phys_addr, m_pa, len);
1636 #else
1637 while (1) {
1638 tmp = vm_page_grab();
1639 if (tmp == VM_PAGE_NULL) {
1640 vm_page_wait(THREAD_UNINT);
1641 continue;
1642 }
1643 break;
1644 }
1645 vm_page_zero_fill(tmp);
1646 if(m_pa != 0) {
1647 vm_page_part_copy(m, 0, tmp, 0, m_pa);
1648 }
1649 if((m_pa + len) < PAGE_SIZE) {
1650 vm_page_part_copy(m, m_pa + len, tmp,
1651 m_pa + len, PAGE_SIZE - (m_pa + len));
1652 }
1653 vm_page_copy(tmp,m);
1654 vm_page_lock_queues();
1655 vm_page_free(tmp);
1656 vm_page_unlock_queues();
1657 #endif
1658
1659 }
1660
1661 /*
1662 * vm_page_zero_fill:
1663 *
1664 * Zero-fill the specified page.
1665 */
1666 void
1667 vm_page_zero_fill(
1668 vm_page_t m)
1669 {
1670 XPR(XPR_VM_PAGE,
1671 "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n",
1672 (integer_t)m->object, (integer_t)m->offset, (integer_t)m, 0,0);
1673
1674 VM_PAGE_CHECK(m);
1675
1676 pmap_zero_page(m->phys_addr);
1677 }
1678
1679 /*
1680 * vm_page_part_copy:
1681 *
1682 * copy part of one page to another
1683 */
1684
1685 void
1686 vm_page_part_copy(
1687 vm_page_t src_m,
1688 vm_offset_t src_pa,
1689 vm_page_t dst_m,
1690 vm_offset_t dst_pa,
1691 vm_size_t len)
1692 {
1693 VM_PAGE_CHECK(src_m);
1694 VM_PAGE_CHECK(dst_m);
1695
1696 pmap_copy_part_page(src_m->phys_addr, src_pa,
1697 dst_m->phys_addr, dst_pa, len);
1698 }
1699
1700 /*
1701 * vm_page_copy:
1702 *
1703 * Copy one page to another
1704 */
1705
1706 void
1707 vm_page_copy(
1708 vm_page_t src_m,
1709 vm_page_t dest_m)
1710 {
1711 XPR(XPR_VM_PAGE,
1712 "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n",
1713 (integer_t)src_m->object, src_m->offset,
1714 (integer_t)dest_m->object, dest_m->offset,
1715 0);
1716
1717 VM_PAGE_CHECK(src_m);
1718 VM_PAGE_CHECK(dest_m);
1719
1720 pmap_copy_page(src_m->phys_addr, dest_m->phys_addr);
1721 }
1722
1723 /*
1724 * Currently, this is a primitive allocator that grabs
1725 * free pages from the system, sorts them by physical
1726 * address, then searches for a region large enough to
1727 * satisfy the user's request.
1728 *
1729 * Additional levels of effort:
1730 * + steal clean active/inactive pages
1731 * + force pageouts of dirty pages
1732 * + maintain a map of available physical
1733 * memory
1734 */
1735
1736 #define SET_NEXT_PAGE(m,n) ((m)->pageq.next = (struct queue_entry *) (n))
1737
1738 #if MACH_ASSERT
1739 int vm_page_verify_contiguous(
1740 vm_page_t pages,
1741 unsigned int npages);
1742 #endif /* MACH_ASSERT */
1743
1744 cpm_counter(unsigned int vpfls_pages_handled = 0;)
1745 cpm_counter(unsigned int vpfls_head_insertions = 0;)
1746 cpm_counter(unsigned int vpfls_tail_insertions = 0;)
1747 cpm_counter(unsigned int vpfls_general_insertions = 0;)
1748 cpm_counter(unsigned int vpfc_failed = 0;)
1749 cpm_counter(unsigned int vpfc_satisfied = 0;)
1750
1751 /*
1752 * Sort free list by ascending physical address,
1753 * using a not-particularly-bright sort algorithm.
1754 * Caller holds vm_page_queue_free_lock.
1755 */
1756 static void
1757 vm_page_free_list_sort(void)
1758 {
1759 vm_page_t sort_list;
1760 vm_page_t sort_list_end;
1761 vm_page_t m, m1, *prev, next_m;
1762 vm_offset_t addr;
1763 #if MACH_ASSERT
1764 unsigned int npages;
1765 int old_free_count;
1766 #endif /* MACH_ASSERT */
1767
1768 #if MACH_ASSERT
1769 /*
1770 * Verify pages in the free list..
1771 */
1772 npages = 0;
1773 for (m = vm_page_queue_free; m != VM_PAGE_NULL; m = NEXT_PAGE(m))
1774 ++npages;
1775 if (npages != vm_page_free_count)
1776 panic("vm_sort_free_list: prelim: npages %d free_count %d",
1777 npages, vm_page_free_count);
1778 old_free_count = vm_page_free_count;
1779 #endif /* MACH_ASSERT */
1780
1781 sort_list = sort_list_end = vm_page_queue_free;
1782 m = NEXT_PAGE(vm_page_queue_free);
1783 SET_NEXT_PAGE(vm_page_queue_free, VM_PAGE_NULL);
1784 cpm_counter(vpfls_pages_handled = 0);
1785 while (m != VM_PAGE_NULL) {
1786 cpm_counter(++vpfls_pages_handled);
1787 next_m = NEXT_PAGE(m);
1788 if (m->phys_addr < sort_list->phys_addr) {
1789 cpm_counter(++vpfls_head_insertions);
1790 SET_NEXT_PAGE(m, sort_list);
1791 sort_list = m;
1792 } else if (m->phys_addr > sort_list_end->phys_addr) {
1793 cpm_counter(++vpfls_tail_insertions);
1794 SET_NEXT_PAGE(sort_list_end, m);
1795 SET_NEXT_PAGE(m, VM_PAGE_NULL);
1796 sort_list_end = m;
1797 } else {
1798 cpm_counter(++vpfls_general_insertions);
1799 /* general sorted list insertion */
1800 prev = &sort_list;
1801 for (m1=sort_list; m1!=VM_PAGE_NULL; m1=NEXT_PAGE(m1)) {
1802 if (m1->phys_addr > m->phys_addr) {
1803 if (*prev != m1)
1804 panic("vm_sort_free_list: ugh");
1805 SET_NEXT_PAGE(m, *prev);
1806 *prev = m;
1807 break;
1808 }
1809 prev = (vm_page_t *) &m1->pageq.next;
1810 }
1811 }
1812 m = next_m;
1813 }
1814
1815 #if MACH_ASSERT
1816 /*
1817 * Verify that pages are sorted into ascending order.
1818 */
1819 for (m = sort_list, npages = 0; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
1820 if (m != sort_list &&
1821 m->phys_addr <= addr) {
1822 printf("m 0x%x addr 0x%x\n", m, addr);
1823 panic("vm_sort_free_list");
1824 }
1825 addr = m->phys_addr;
1826 ++npages;
1827 }
1828 if (old_free_count != vm_page_free_count)
1829 panic("vm_sort_free_list: old_free %d free_count %d",
1830 old_free_count, vm_page_free_count);
1831 if (npages != vm_page_free_count)
1832 panic("vm_sort_free_list: npages %d free_count %d",
1833 npages, vm_page_free_count);
1834 #endif /* MACH_ASSERT */
1835
1836 vm_page_queue_free = sort_list;
1837 }
1838
1839
1840 #if MACH_ASSERT
1841 /*
1842 * Check that the list of pages is ordered by
1843 * ascending physical address and has no holes.
1844 */
1845 int
1846 vm_page_verify_contiguous(
1847 vm_page_t pages,
1848 unsigned int npages)
1849 {
1850 register vm_page_t m;
1851 unsigned int page_count;
1852 vm_offset_t prev_addr;
1853
1854 prev_addr = pages->phys_addr;
1855 page_count = 1;
1856 for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
1857 if (m->phys_addr != prev_addr + page_size) {
1858 printf("m 0x%x prev_addr 0x%x, current addr 0x%x\n",
1859 m, prev_addr, m->phys_addr);
1860 printf("pages 0x%x page_count %d\n", pages, page_count);
1861 panic("vm_page_verify_contiguous: not contiguous!");
1862 }
1863 prev_addr = m->phys_addr;
1864 ++page_count;
1865 }
1866 if (page_count != npages) {
1867 printf("pages 0x%x actual count 0x%x but requested 0x%x\n",
1868 pages, page_count, npages);
1869 panic("vm_page_verify_contiguous: count error");
1870 }
1871 return 1;
1872 }
1873 #endif /* MACH_ASSERT */
1874
1875
1876 /*
1877 * Find a region large enough to contain at least npages
1878 * of contiguous physical memory.
1879 *
1880 * Requirements:
1881 * - Called while holding vm_page_queue_free_lock.
1882 * - Doesn't respect vm_page_free_reserved; caller
1883 * must not ask for more pages than are legal to grab.
1884 *
1885 * Returns a pointer to a list of gobbled pages or VM_PAGE_NULL.
1886 *
1887 */
1888 static vm_page_t
1889 vm_page_find_contiguous(
1890 int npages)
1891 {
1892 vm_page_t m, *contig_prev, *prev_ptr;
1893 vm_offset_t prev_addr;
1894 unsigned int contig_npages;
1895 vm_page_t list;
1896
1897 if (npages < 1)
1898 return VM_PAGE_NULL;
1899
1900 prev_addr = vm_page_queue_free->phys_addr - (page_size + 1);
1901 prev_ptr = &vm_page_queue_free;
1902 for (m = vm_page_queue_free; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
1903
1904 if (m->phys_addr != prev_addr + page_size) {
1905 /*
1906 * Whoops! Pages aren't contiguous. Start over.
1907 */
1908 contig_npages = 0;
1909 contig_prev = prev_ptr;
1910 }
1911
1912 if (++contig_npages == npages) {
1913 /*
1914 * Chop these pages out of the free list.
1915 * Mark them all as gobbled.
1916 */
1917 list = *contig_prev;
1918 *contig_prev = NEXT_PAGE(m);
1919 SET_NEXT_PAGE(m, VM_PAGE_NULL);
1920 for (m = list; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
1921 assert(m->free);
1922 assert(!m->wanted);
1923 m->free = FALSE;
1924 m->gobbled = TRUE;
1925 }
1926 vm_page_free_count -= npages;
1927 if (vm_page_free_count < vm_page_free_count_minimum)
1928 vm_page_free_count_minimum = vm_page_free_count;
1929 vm_page_wire_count += npages;
1930 vm_page_gobble_count += npages;
1931 cpm_counter(++vpfc_satisfied);
1932 assert(vm_page_verify_contiguous(list, contig_npages));
1933 return list;
1934 }
1935
1936 assert(contig_npages < npages);
1937 prev_ptr = (vm_page_t *) &m->pageq.next;
1938 prev_addr = m->phys_addr;
1939 }
1940 cpm_counter(++vpfc_failed);
1941 return VM_PAGE_NULL;
1942 }
1943
1944 /*
1945 * Allocate a list of contiguous, wired pages.
1946 */
1947 kern_return_t
1948 cpm_allocate(
1949 vm_size_t size,
1950 vm_page_t *list,
1951 boolean_t wire)
1952 {
1953 register vm_page_t m;
1954 vm_page_t *first_contig;
1955 vm_page_t free_list, pages;
1956 unsigned int npages, n1pages;
1957 int vm_pages_available;
1958
1959 if (size % page_size != 0)
1960 return KERN_INVALID_ARGUMENT;
1961
1962 vm_page_lock_queues();
1963 mutex_lock(&vm_page_queue_free_lock);
1964
1965 /*
1966 * Should also take active and inactive pages
1967 * into account... One day...
1968 */
1969 vm_pages_available = vm_page_free_count - vm_page_free_reserved;
1970
1971 if (size > vm_pages_available * page_size) {
1972 mutex_unlock(&vm_page_queue_free_lock);
1973 return KERN_RESOURCE_SHORTAGE;
1974 }
1975
1976 vm_page_free_list_sort();
1977
1978 npages = size / page_size;
1979
1980 /*
1981 * Obtain a pointer to a subset of the free
1982 * list large enough to satisfy the request;
1983 * the region will be physically contiguous.
1984 */
1985 pages = vm_page_find_contiguous(npages);
1986 if (pages == VM_PAGE_NULL) {
1987 mutex_unlock(&vm_page_queue_free_lock);
1988 vm_page_unlock_queues();
1989 return KERN_NO_SPACE;
1990 }
1991
1992 mutex_unlock(&vm_page_queue_free_lock);
1993
1994 /*
1995 * Walk the returned list, wiring the pages.
1996 */
1997 if (wire == TRUE)
1998 for (m = pages; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
1999 /*
2000 * Essentially inlined vm_page_wire.
2001 */
2002 assert(!m->active);
2003 assert(!m->inactive);
2004 assert(!m->private);
2005 assert(!m->fictitious);
2006 assert(m->wire_count == 0);
2007 assert(m->gobbled);
2008 m->gobbled = FALSE;
2009 m->wire_count++;
2010 --vm_page_gobble_count;
2011 }
2012 vm_page_unlock_queues();
2013
2014 /*
2015 * The CPM pages should now be available and
2016 * ordered by ascending physical address.
2017 */
2018 assert(vm_page_verify_contiguous(pages, npages));
2019
2020 *list = pages;
2021 return KERN_SUCCESS;
2022 }
2023
2024
2025 #include <mach_vm_debug.h>
2026 #if MACH_VM_DEBUG
2027
2028 #include <mach_debug/hash_info.h>
2029 #include <vm/vm_debug.h>
2030
2031 /*
2032 * Routine: vm_page_info
2033 * Purpose:
2034 * Return information about the global VP table.
2035 * Fills the buffer with as much information as possible
2036 * and returns the desired size of the buffer.
2037 * Conditions:
2038 * Nothing locked. The caller should provide
2039 * possibly-pageable memory.
2040 */
2041
2042 unsigned int
2043 vm_page_info(
2044 hash_info_bucket_t *info,
2045 unsigned int count)
2046 {
2047 int i;
2048
2049 if (vm_page_bucket_count < count)
2050 count = vm_page_bucket_count;
2051
2052 for (i = 0; i < count; i++) {
2053 vm_page_bucket_t *bucket = &vm_page_buckets[i];
2054 unsigned int bucket_count = 0;
2055 vm_page_t m;
2056
2057 simple_lock(&vm_page_bucket_lock);
2058 for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next)
2059 bucket_count++;
2060 simple_unlock(&vm_page_bucket_lock);
2061
2062 /* don't touch pageable memory while holding locks */
2063 info[i].hib_count = bucket_count;
2064 }
2065
2066 return vm_page_bucket_count;
2067 }
2068 #endif /* MACH_VM_DEBUG */
2069
2070 #include <mach_kdb.h>
2071 #if MACH_KDB
2072
2073 #include <ddb/db_output.h>
2074 #include <vm/vm_print.h>
2075 #define printf kdbprintf
2076
2077 /*
2078 * Routine: vm_page_print [exported]
2079 */
2080 void
2081 vm_page_print(
2082 vm_page_t p)
2083 {
2084 extern db_indent;
2085
2086 iprintf("page 0x%x\n", p);
2087
2088 db_indent += 2;
2089
2090 iprintf("object=0x%x", p->object);
2091 printf(", offset=0x%x", p->offset);
2092 printf(", wire_count=%d", p->wire_count);
2093
2094 iprintf("%sinactive, %sactive, %sgobbled, %slaundry, %sfree, %sref, %sdiscard\n",
2095 (p->inactive ? "" : "!"),
2096 (p->active ? "" : "!"),
2097 (p->gobbled ? "" : "!"),
2098 (p->laundry ? "" : "!"),
2099 (p->free ? "" : "!"),
2100 (p->reference ? "" : "!"),
2101 (p->discard_request ? "" : "!"));
2102 iprintf("%sbusy, %swanted, %stabled, %sfictitious, %sprivate, %sprecious\n",
2103 (p->busy ? "" : "!"),
2104 (p->wanted ? "" : "!"),
2105 (p->tabled ? "" : "!"),
2106 (p->fictitious ? "" : "!"),
2107 (p->private ? "" : "!"),
2108 (p->precious ? "" : "!"));
2109 iprintf("%sabsent, %serror, %sdirty, %scleaning, %spageout, %sclustered\n",
2110 (p->absent ? "" : "!"),
2111 (p->error ? "" : "!"),
2112 (p->dirty ? "" : "!"),
2113 (p->cleaning ? "" : "!"),
2114 (p->pageout ? "" : "!"),
2115 (p->clustered ? "" : "!"));
2116 iprintf("%slock_supplied, %soverwriting, %srestart, %sunusual\n",
2117 (p->lock_supplied ? "" : "!"),
2118 (p->overwriting ? "" : "!"),
2119 (p->restart ? "" : "!"),
2120 (p->unusual ? "" : "!"));
2121
2122 iprintf("phys_addr=0x%x", p->phys_addr);
2123 printf(", page_error=0x%x", p->page_error);
2124 printf(", page_lock=0x%x", p->page_lock);
2125 printf(", unlock_request=%d\n", p->unlock_request);
2126
2127 db_indent -= 2;
2128 }
2129 #endif /* MACH_KDB */