]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
91447636 | 2 | * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. |
1c79356b A |
3 | * |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
e5568f75 A |
6 | * The contents of this file constitute Original Code as defined in and |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
1c79356b | 11 | * |
e5568f75 A |
12 | * This Original Code and all software distributed under the License are |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
e5568f75 A |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
1c79356b A |
19 | * |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | /* | |
26 | * Mach Operating System | |
27 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
28 | * All Rights Reserved. | |
29 | * | |
30 | * Permission to use, copy, modify and distribute this software and its | |
31 | * documentation is hereby granted, provided that both the copyright | |
32 | * notice and this permission notice appear in all copies of the | |
33 | * software, derivative works or modified versions, and any portions | |
34 | * thereof, and that both notices appear in supporting documentation. | |
35 | * | |
36 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
37 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
38 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
39 | * | |
40 | * Carnegie Mellon requests users of this software to return to | |
41 | * | |
42 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
43 | * School of Computer Science | |
44 | * Carnegie Mellon University | |
45 | * Pittsburgh PA 15213-3890 | |
46 | * | |
47 | * any improvements or extensions that they make and grant Carnegie Mellon | |
48 | * the rights to redistribute these changes. | |
49 | */ | |
50 | /* | |
51 | */ | |
52 | /* | |
53 | * File: vm/vm_page.c | |
54 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
55 | * | |
56 | * Resident memory management module. | |
57 | */ | |
58 | ||
91447636 A |
59 | #include <debug.h> |
60 | ||
9bccf70c | 61 | #include <mach/clock_types.h> |
1c79356b A |
62 | #include <mach/vm_prot.h> |
63 | #include <mach/vm_statistics.h> | |
64 | #include <kern/counters.h> | |
65 | #include <kern/sched_prim.h> | |
66 | #include <kern/task.h> | |
67 | #include <kern/thread.h> | |
68 | #include <kern/zalloc.h> | |
69 | #include <kern/xpr.h> | |
70 | #include <vm/pmap.h> | |
71 | #include <vm/vm_init.h> | |
72 | #include <vm/vm_map.h> | |
73 | #include <vm/vm_page.h> | |
74 | #include <vm/vm_pageout.h> | |
75 | #include <vm/vm_kern.h> /* kernel_memory_allocate() */ | |
76 | #include <kern/misc_protos.h> | |
77 | #include <zone_debug.h> | |
78 | #include <vm/cpm.h> | |
55e303ae A |
79 | #include <ppc/mappings.h> /* (BRINGUP) */ |
80 | #include <pexpert/pexpert.h> /* (BRINGUP) */ | |
81 | ||
91447636 | 82 | #include <vm/vm_protos.h> |
1c79356b | 83 | |
0b4e3aa0 A |
84 | /* Variables used to indicate the relative age of pages in the |
85 | * inactive list | |
86 | */ | |
87 | ||
91447636 A |
88 | unsigned int vm_page_ticket_roll = 0; |
89 | unsigned int vm_page_ticket = 0; | |
1c79356b A |
90 | /* |
91 | * Associated with page of user-allocatable memory is a | |
92 | * page structure. | |
93 | */ | |
94 | ||
95 | /* | |
96 | * These variables record the values returned by vm_page_bootstrap, | |
97 | * for debugging purposes. The implementation of pmap_steal_memory | |
98 | * and pmap_startup here also uses them internally. | |
99 | */ | |
100 | ||
101 | vm_offset_t virtual_space_start; | |
102 | vm_offset_t virtual_space_end; | |
103 | int vm_page_pages; | |
104 | ||
105 | /* | |
106 | * The vm_page_lookup() routine, which provides for fast | |
107 | * (virtual memory object, offset) to page lookup, employs | |
108 | * the following hash table. The vm_page_{insert,remove} | |
109 | * routines install and remove associations in the table. | |
110 | * [This table is often called the virtual-to-physical, | |
111 | * or VP, table.] | |
112 | */ | |
113 | typedef struct { | |
114 | vm_page_t pages; | |
115 | #if MACH_PAGE_HASH_STATS | |
116 | int cur_count; /* current count */ | |
117 | int hi_count; /* high water mark */ | |
118 | #endif /* MACH_PAGE_HASH_STATS */ | |
119 | } vm_page_bucket_t; | |
120 | ||
121 | vm_page_bucket_t *vm_page_buckets; /* Array of buckets */ | |
122 | unsigned int vm_page_bucket_count = 0; /* How big is array? */ | |
123 | unsigned int vm_page_hash_mask; /* Mask for hash function */ | |
124 | unsigned int vm_page_hash_shift; /* Shift for hash function */ | |
55e303ae | 125 | uint32_t vm_page_bucket_hash; /* Basic bucket hash */ |
1c79356b A |
126 | decl_simple_lock_data(,vm_page_bucket_lock) |
127 | ||
91447636 A |
128 | vm_page_t |
129 | vm_page_lookup_nohint(vm_object_t object, vm_object_offset_t offset); | |
130 | ||
131 | ||
1c79356b A |
132 | #if MACH_PAGE_HASH_STATS |
133 | /* This routine is only for debug. It is intended to be called by | |
134 | * hand by a developer using a kernel debugger. This routine prints | |
135 | * out vm_page_hash table statistics to the kernel debug console. | |
136 | */ | |
137 | void | |
138 | hash_debug(void) | |
139 | { | |
140 | int i; | |
141 | int numbuckets = 0; | |
142 | int highsum = 0; | |
143 | int maxdepth = 0; | |
144 | ||
145 | for (i = 0; i < vm_page_bucket_count; i++) { | |
146 | if (vm_page_buckets[i].hi_count) { | |
147 | numbuckets++; | |
148 | highsum += vm_page_buckets[i].hi_count; | |
149 | if (vm_page_buckets[i].hi_count > maxdepth) | |
150 | maxdepth = vm_page_buckets[i].hi_count; | |
151 | } | |
152 | } | |
153 | printf("Total number of buckets: %d\n", vm_page_bucket_count); | |
154 | printf("Number used buckets: %d = %d%%\n", | |
155 | numbuckets, 100*numbuckets/vm_page_bucket_count); | |
156 | printf("Number unused buckets: %d = %d%%\n", | |
157 | vm_page_bucket_count - numbuckets, | |
158 | 100*(vm_page_bucket_count-numbuckets)/vm_page_bucket_count); | |
159 | printf("Sum of bucket max depth: %d\n", highsum); | |
160 | printf("Average bucket depth: %d.%2d\n", | |
161 | highsum/vm_page_bucket_count, | |
162 | highsum%vm_page_bucket_count); | |
163 | printf("Maximum bucket depth: %d\n", maxdepth); | |
164 | } | |
165 | #endif /* MACH_PAGE_HASH_STATS */ | |
166 | ||
167 | /* | |
168 | * The virtual page size is currently implemented as a runtime | |
169 | * variable, but is constant once initialized using vm_set_page_size. | |
170 | * This initialization must be done in the machine-dependent | |
171 | * bootstrap sequence, before calling other machine-independent | |
172 | * initializations. | |
173 | * | |
174 | * All references to the virtual page size outside this | |
175 | * module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT | |
176 | * constants. | |
177 | */ | |
55e303ae A |
178 | vm_size_t page_size = PAGE_SIZE; |
179 | vm_size_t page_mask = PAGE_MASK; | |
91447636 | 180 | int page_shift = PAGE_SHIFT; |
1c79356b A |
181 | |
182 | /* | |
183 | * Resident page structures are initialized from | |
184 | * a template (see vm_page_alloc). | |
185 | * | |
186 | * When adding a new field to the virtual memory | |
187 | * object structure, be sure to add initialization | |
188 | * (see vm_page_bootstrap). | |
189 | */ | |
190 | struct vm_page vm_page_template; | |
191 | ||
192 | /* | |
193 | * Resident pages that represent real memory | |
194 | * are allocated from a free list. | |
195 | */ | |
196 | vm_page_t vm_page_queue_free; | |
197 | vm_page_t vm_page_queue_fictitious; | |
1c79356b | 198 | unsigned int vm_page_free_wanted; |
91447636 A |
199 | unsigned int vm_page_free_count; |
200 | unsigned int vm_page_fictitious_count; | |
1c79356b A |
201 | |
202 | unsigned int vm_page_free_count_minimum; /* debugging */ | |
203 | ||
204 | /* | |
205 | * Occasionally, the virtual memory system uses | |
206 | * resident page structures that do not refer to | |
207 | * real pages, for example to leave a page with | |
208 | * important state information in the VP table. | |
209 | * | |
210 | * These page structures are allocated the way | |
211 | * most other kernel structures are. | |
212 | */ | |
213 | zone_t vm_page_zone; | |
214 | decl_mutex_data(,vm_page_alloc_lock) | |
9bccf70c | 215 | unsigned int io_throttle_zero_fill; |
1c79356b A |
216 | |
217 | /* | |
218 | * Fictitious pages don't have a physical address, | |
55e303ae | 219 | * but we must initialize phys_page to something. |
1c79356b A |
220 | * For debugging, this should be a strange value |
221 | * that the pmap module can recognize in assertions. | |
222 | */ | |
223 | vm_offset_t vm_page_fictitious_addr = (vm_offset_t) -1; | |
224 | ||
225 | /* | |
226 | * Resident page structures are also chained on | |
227 | * queues that are used by the page replacement | |
228 | * system (pageout daemon). These queues are | |
229 | * defined here, but are shared by the pageout | |
9bccf70c A |
230 | * module. The inactive queue is broken into |
231 | * inactive and zf for convenience as the | |
232 | * pageout daemon often assignes a higher | |
233 | * affinity to zf pages | |
1c79356b A |
234 | */ |
235 | queue_head_t vm_page_queue_active; | |
236 | queue_head_t vm_page_queue_inactive; | |
91447636 A |
237 | unsigned int vm_page_active_count; |
238 | unsigned int vm_page_inactive_count; | |
239 | unsigned int vm_page_wire_count; | |
240 | unsigned int vm_page_gobble_count = 0; | |
241 | unsigned int vm_page_wire_count_warning = 0; | |
242 | unsigned int vm_page_gobble_count_warning = 0; | |
243 | ||
244 | unsigned int vm_page_purgeable_count = 0; /* # of pages purgeable now */ | |
245 | uint64_t vm_page_purged_count = 0; /* total count of purged pages */ | |
1c79356b A |
246 | |
247 | /* | |
248 | * Several page replacement parameters are also | |
249 | * shared with this module, so that page allocation | |
250 | * (done here in vm_page_alloc) can trigger the | |
251 | * pageout daemon. | |
252 | */ | |
91447636 A |
253 | unsigned int vm_page_free_target = 0; |
254 | unsigned int vm_page_free_min = 0; | |
255 | unsigned int vm_page_inactive_target = 0; | |
256 | unsigned int vm_page_free_reserved = 0; | |
257 | unsigned int vm_page_throttled_count = 0; | |
1c79356b A |
258 | |
259 | /* | |
260 | * The VM system has a couple of heuristics for deciding | |
261 | * that pages are "uninteresting" and should be placed | |
262 | * on the inactive queue as likely candidates for replacement. | |
263 | * These variables let the heuristics be controlled at run-time | |
264 | * to make experimentation easier. | |
265 | */ | |
266 | ||
267 | boolean_t vm_page_deactivate_hint = TRUE; | |
268 | ||
269 | /* | |
270 | * vm_set_page_size: | |
271 | * | |
272 | * Sets the page size, perhaps based upon the memory | |
273 | * size. Must be called before any use of page-size | |
274 | * dependent functions. | |
275 | * | |
276 | * Sets page_shift and page_mask from page_size. | |
277 | */ | |
278 | void | |
279 | vm_set_page_size(void) | |
280 | { | |
1c79356b A |
281 | page_mask = page_size - 1; |
282 | ||
283 | if ((page_mask & page_size) != 0) | |
284 | panic("vm_set_page_size: page size not a power of two"); | |
285 | ||
286 | for (page_shift = 0; ; page_shift++) | |
91447636 | 287 | if ((1U << page_shift) == page_size) |
1c79356b | 288 | break; |
1c79356b A |
289 | } |
290 | ||
291 | /* | |
292 | * vm_page_bootstrap: | |
293 | * | |
294 | * Initializes the resident memory module. | |
295 | * | |
296 | * Allocates memory for the page cells, and | |
297 | * for the object/offset-to-page hash table headers. | |
298 | * Each page cell is initialized and placed on the free list. | |
299 | * Returns the range of available kernel virtual memory. | |
300 | */ | |
301 | ||
302 | void | |
303 | vm_page_bootstrap( | |
304 | vm_offset_t *startp, | |
305 | vm_offset_t *endp) | |
306 | { | |
307 | register vm_page_t m; | |
91447636 | 308 | unsigned int i; |
1c79356b A |
309 | unsigned int log1; |
310 | unsigned int log2; | |
311 | unsigned int size; | |
312 | ||
313 | /* | |
314 | * Initialize the vm_page template. | |
315 | */ | |
316 | ||
317 | m = &vm_page_template; | |
91447636 A |
318 | m->object = VM_OBJECT_NULL; /* reset later */ |
319 | m->offset = (vm_object_offset_t) -1; /* reset later */ | |
1c79356b A |
320 | m->wire_count = 0; |
321 | ||
91447636 A |
322 | m->pageq.next = NULL; |
323 | m->pageq.prev = NULL; | |
324 | m->listq.next = NULL; | |
325 | m->listq.prev = NULL; | |
326 | ||
1c79356b A |
327 | m->inactive = FALSE; |
328 | m->active = FALSE; | |
329 | m->laundry = FALSE; | |
330 | m->free = FALSE; | |
765c9de3 | 331 | m->no_isync = TRUE; |
1c79356b A |
332 | m->reference = FALSE; |
333 | m->pageout = FALSE; | |
0b4e3aa0 | 334 | m->dump_cleaning = FALSE; |
1c79356b A |
335 | m->list_req_pending = FALSE; |
336 | ||
337 | m->busy = TRUE; | |
338 | m->wanted = FALSE; | |
339 | m->tabled = FALSE; | |
340 | m->fictitious = FALSE; | |
341 | m->private = FALSE; | |
342 | m->absent = FALSE; | |
343 | m->error = FALSE; | |
344 | m->dirty = FALSE; | |
345 | m->cleaning = FALSE; | |
346 | m->precious = FALSE; | |
347 | m->clustered = FALSE; | |
348 | m->lock_supplied = FALSE; | |
349 | m->unusual = FALSE; | |
350 | m->restart = FALSE; | |
9bccf70c | 351 | m->zero_fill = FALSE; |
91447636 | 352 | m->encrypted = FALSE; |
1c79356b | 353 | |
55e303ae | 354 | m->phys_page = 0; /* reset later */ |
1c79356b A |
355 | |
356 | m->page_lock = VM_PROT_NONE; | |
357 | m->unlock_request = VM_PROT_NONE; | |
358 | m->page_error = KERN_SUCCESS; | |
359 | ||
360 | /* | |
361 | * Initialize the page queues. | |
362 | */ | |
363 | ||
91447636 A |
364 | mutex_init(&vm_page_queue_free_lock, 0); |
365 | mutex_init(&vm_page_queue_lock, 0); | |
1c79356b A |
366 | |
367 | vm_page_queue_free = VM_PAGE_NULL; | |
368 | vm_page_queue_fictitious = VM_PAGE_NULL; | |
369 | queue_init(&vm_page_queue_active); | |
370 | queue_init(&vm_page_queue_inactive); | |
9bccf70c | 371 | queue_init(&vm_page_queue_zf); |
1c79356b A |
372 | |
373 | vm_page_free_wanted = 0; | |
374 | ||
375 | /* | |
376 | * Steal memory for the map and zone subsystems. | |
377 | */ | |
378 | ||
379 | vm_map_steal_memory(); | |
380 | zone_steal_memory(); | |
381 | ||
382 | /* | |
383 | * Allocate (and initialize) the virtual-to-physical | |
384 | * table hash buckets. | |
385 | * | |
386 | * The number of buckets should be a power of two to | |
387 | * get a good hash function. The following computation | |
388 | * chooses the first power of two that is greater | |
389 | * than the number of physical pages in the system. | |
390 | */ | |
391 | ||
91447636 | 392 | simple_lock_init(&vm_page_bucket_lock, 0); |
1c79356b A |
393 | |
394 | if (vm_page_bucket_count == 0) { | |
395 | unsigned int npages = pmap_free_pages(); | |
396 | ||
397 | vm_page_bucket_count = 1; | |
398 | while (vm_page_bucket_count < npages) | |
399 | vm_page_bucket_count <<= 1; | |
400 | } | |
401 | ||
402 | vm_page_hash_mask = vm_page_bucket_count - 1; | |
403 | ||
404 | /* | |
405 | * Calculate object shift value for hashing algorithm: | |
406 | * O = log2(sizeof(struct vm_object)) | |
407 | * B = log2(vm_page_bucket_count) | |
408 | * hash shifts the object left by | |
409 | * B/2 - O | |
410 | */ | |
411 | size = vm_page_bucket_count; | |
412 | for (log1 = 0; size > 1; log1++) | |
413 | size /= 2; | |
414 | size = sizeof(struct vm_object); | |
415 | for (log2 = 0; size > 1; log2++) | |
416 | size /= 2; | |
417 | vm_page_hash_shift = log1/2 - log2 + 1; | |
55e303ae A |
418 | |
419 | vm_page_bucket_hash = 1 << ((log1 + 1) >> 1); /* Get (ceiling of sqrt of table size) */ | |
420 | vm_page_bucket_hash |= 1 << ((log1 + 1) >> 2); /* Get (ceiling of quadroot of table size) */ | |
421 | vm_page_bucket_hash |= 1; /* Set bit and add 1 - always must be 1 to insure unique series */ | |
1c79356b A |
422 | |
423 | if (vm_page_hash_mask & vm_page_bucket_count) | |
424 | printf("vm_page_bootstrap: WARNING -- strange page hash\n"); | |
425 | ||
426 | vm_page_buckets = (vm_page_bucket_t *) | |
427 | pmap_steal_memory(vm_page_bucket_count * | |
428 | sizeof(vm_page_bucket_t)); | |
429 | ||
430 | for (i = 0; i < vm_page_bucket_count; i++) { | |
431 | register vm_page_bucket_t *bucket = &vm_page_buckets[i]; | |
432 | ||
433 | bucket->pages = VM_PAGE_NULL; | |
434 | #if MACH_PAGE_HASH_STATS | |
435 | bucket->cur_count = 0; | |
436 | bucket->hi_count = 0; | |
437 | #endif /* MACH_PAGE_HASH_STATS */ | |
438 | } | |
439 | ||
440 | /* | |
441 | * Machine-dependent code allocates the resident page table. | |
442 | * It uses vm_page_init to initialize the page frames. | |
443 | * The code also returns to us the virtual space available | |
444 | * to the kernel. We don't trust the pmap module | |
445 | * to get the alignment right. | |
446 | */ | |
447 | ||
448 | pmap_startup(&virtual_space_start, &virtual_space_end); | |
91447636 A |
449 | virtual_space_start = round_page(virtual_space_start); |
450 | virtual_space_end = trunc_page(virtual_space_end); | |
1c79356b A |
451 | |
452 | *startp = virtual_space_start; | |
453 | *endp = virtual_space_end; | |
454 | ||
455 | /* | |
456 | * Compute the initial "wire" count. | |
457 | * Up until now, the pages which have been set aside are not under | |
458 | * the VM system's control, so although they aren't explicitly | |
459 | * wired, they nonetheless can't be moved. At this moment, | |
460 | * all VM managed pages are "free", courtesy of pmap_startup. | |
461 | */ | |
55e303ae | 462 | vm_page_wire_count = atop_64(max_mem) - vm_page_free_count; /* initial value */ |
1c79356b A |
463 | |
464 | printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count); | |
465 | vm_page_free_count_minimum = vm_page_free_count; | |
91447636 A |
466 | |
467 | simple_lock_init(&vm_paging_lock, 0); | |
1c79356b A |
468 | } |
469 | ||
470 | #ifndef MACHINE_PAGES | |
471 | /* | |
472 | * We implement pmap_steal_memory and pmap_startup with the help | |
473 | * of two simpler functions, pmap_virtual_space and pmap_next_page. | |
474 | */ | |
475 | ||
91447636 | 476 | void * |
1c79356b A |
477 | pmap_steal_memory( |
478 | vm_size_t size) | |
479 | { | |
55e303ae A |
480 | vm_offset_t addr, vaddr; |
481 | ppnum_t phys_page; | |
1c79356b A |
482 | |
483 | /* | |
484 | * We round the size to a round multiple. | |
485 | */ | |
486 | ||
487 | size = (size + sizeof (void *) - 1) &~ (sizeof (void *) - 1); | |
488 | ||
489 | /* | |
490 | * If this is the first call to pmap_steal_memory, | |
491 | * we have to initialize ourself. | |
492 | */ | |
493 | ||
494 | if (virtual_space_start == virtual_space_end) { | |
495 | pmap_virtual_space(&virtual_space_start, &virtual_space_end); | |
496 | ||
497 | /* | |
498 | * The initial values must be aligned properly, and | |
499 | * we don't trust the pmap module to do it right. | |
500 | */ | |
501 | ||
91447636 A |
502 | virtual_space_start = round_page(virtual_space_start); |
503 | virtual_space_end = trunc_page(virtual_space_end); | |
1c79356b A |
504 | } |
505 | ||
506 | /* | |
507 | * Allocate virtual memory for this request. | |
508 | */ | |
509 | ||
510 | addr = virtual_space_start; | |
511 | virtual_space_start += size; | |
512 | ||
513 | kprintf("pmap_steal_memory: %08X - %08X; size=%08X\n", addr, virtual_space_start, size); /* (TEST/DEBUG) */ | |
514 | ||
515 | /* | |
516 | * Allocate and map physical pages to back new virtual pages. | |
517 | */ | |
518 | ||
91447636 | 519 | for (vaddr = round_page(addr); |
1c79356b A |
520 | vaddr < addr + size; |
521 | vaddr += PAGE_SIZE) { | |
55e303ae | 522 | if (!pmap_next_page(&phys_page)) |
1c79356b A |
523 | panic("pmap_steal_memory"); |
524 | ||
525 | /* | |
526 | * XXX Logically, these mappings should be wired, | |
527 | * but some pmap modules barf if they are. | |
528 | */ | |
529 | ||
55e303ae | 530 | pmap_enter(kernel_pmap, vaddr, phys_page, |
9bccf70c A |
531 | VM_PROT_READ|VM_PROT_WRITE, |
532 | VM_WIMG_USE_DEFAULT, FALSE); | |
1c79356b A |
533 | /* |
534 | * Account for newly stolen memory | |
535 | */ | |
536 | vm_page_wire_count++; | |
537 | ||
538 | } | |
539 | ||
91447636 | 540 | return (void *) addr; |
1c79356b A |
541 | } |
542 | ||
543 | void | |
544 | pmap_startup( | |
545 | vm_offset_t *startp, | |
546 | vm_offset_t *endp) | |
547 | { | |
55e303ae A |
548 | unsigned int i, npages, pages_initialized, fill, fillval; |
549 | vm_page_t pages; | |
550 | ppnum_t phys_page; | |
551 | addr64_t tmpaddr; | |
1c79356b A |
552 | |
553 | /* | |
554 | * We calculate how many page frames we will have | |
555 | * and then allocate the page structures in one chunk. | |
556 | */ | |
557 | ||
55e303ae A |
558 | tmpaddr = (addr64_t)pmap_free_pages() * (addr64_t)PAGE_SIZE; /* Get the amount of memory left */ |
559 | tmpaddr = tmpaddr + (addr64_t)(round_page_32(virtual_space_start) - virtual_space_start); /* Account for any slop */ | |
560 | npages = (unsigned int)(tmpaddr / (addr64_t)(PAGE_SIZE + sizeof(*pages))); /* Figure size of all vm_page_ts, including enough to hold the vm_page_ts */ | |
1c79356b A |
561 | |
562 | pages = (vm_page_t) pmap_steal_memory(npages * sizeof *pages); | |
563 | ||
564 | /* | |
565 | * Initialize the page frames. | |
566 | */ | |
567 | ||
568 | for (i = 0, pages_initialized = 0; i < npages; i++) { | |
55e303ae | 569 | if (!pmap_next_page(&phys_page)) |
1c79356b A |
570 | break; |
571 | ||
55e303ae | 572 | vm_page_init(&pages[i], phys_page); |
1c79356b A |
573 | vm_page_pages++; |
574 | pages_initialized++; | |
575 | } | |
576 | ||
577 | /* | |
578 | * Release pages in reverse order so that physical pages | |
579 | * initially get allocated in ascending addresses. This keeps | |
580 | * the devices (which must address physical memory) happy if | |
581 | * they require several consecutive pages. | |
582 | */ | |
583 | ||
55e303ae A |
584 | /* |
585 | * Check if we want to initialize pages to a known value | |
586 | */ | |
587 | ||
588 | fill = 0; /* Assume no fill */ | |
589 | if (PE_parse_boot_arg("fill", &fillval)) fill = 1; /* Set fill */ | |
590 | ||
1c79356b | 591 | for (i = pages_initialized; i > 0; i--) { |
55e303ae | 592 | if(fill) fillPage(pages[i - 1].phys_page, fillval); /* Fill the page with a know value if requested at boot */ |
1c79356b A |
593 | vm_page_release(&pages[i - 1]); |
594 | } | |
595 | ||
55e303ae A |
596 | #if 0 |
597 | { | |
598 | vm_page_t xx, xxo, xxl; | |
599 | int j, k, l; | |
600 | ||
601 | j = 0; /* (BRINGUP) */ | |
602 | xxl = 0; | |
603 | ||
604 | for(xx = vm_page_queue_free; xx; xxl = xx, xx = xx->pageq.next) { /* (BRINGUP) */ | |
605 | j++; /* (BRINGUP) */ | |
606 | if(j > vm_page_free_count) { /* (BRINGUP) */ | |
607 | panic("pmap_startup: too many pages, xx = %08X, xxl = %08X\n", xx, xxl); | |
608 | } | |
609 | ||
610 | l = vm_page_free_count - j; /* (BRINGUP) */ | |
611 | k = 0; /* (BRINGUP) */ | |
612 | ||
613 | if(((j - 1) & 0xFFFF) == 0) kprintf("checking number %d of %d\n", j, vm_page_free_count); | |
614 | ||
615 | for(xxo = xx->pageq.next; xxo; xxo = xxo->pageq.next) { /* (BRINGUP) */ | |
616 | k++; | |
617 | if(k > l) panic("pmap_startup: too many in secondary check %d %d\n", k, l); | |
618 | if((xx->phys_page & 0xFFFFFFFF) == (xxo->phys_page & 0xFFFFFFFF)) { /* (BRINGUP) */ | |
619 | panic("pmap_startup: duplicate physaddr, xx = %08X, xxo = %08X\n", xx, xxo); | |
620 | } | |
621 | } | |
622 | } | |
623 | ||
624 | if(j != vm_page_free_count) { /* (BRINGUP) */ | |
625 | panic("pmap_startup: vm_page_free_count does not match, calc = %d, vm_page_free_count = %08X\n", j, vm_page_free_count); | |
626 | } | |
627 | } | |
628 | #endif | |
629 | ||
630 | ||
1c79356b A |
631 | /* |
632 | * We have to re-align virtual_space_start, | |
633 | * because pmap_steal_memory has been using it. | |
634 | */ | |
635 | ||
55e303ae | 636 | virtual_space_start = round_page_32(virtual_space_start); |
1c79356b A |
637 | |
638 | *startp = virtual_space_start; | |
639 | *endp = virtual_space_end; | |
640 | } | |
641 | #endif /* MACHINE_PAGES */ | |
642 | ||
643 | /* | |
644 | * Routine: vm_page_module_init | |
645 | * Purpose: | |
646 | * Second initialization pass, to be done after | |
647 | * the basic VM system is ready. | |
648 | */ | |
649 | void | |
650 | vm_page_module_init(void) | |
651 | { | |
652 | vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page), | |
653 | 0, PAGE_SIZE, "vm pages"); | |
654 | ||
655 | #if ZONE_DEBUG | |
656 | zone_debug_disable(vm_page_zone); | |
657 | #endif /* ZONE_DEBUG */ | |
658 | ||
659 | zone_change(vm_page_zone, Z_EXPAND, FALSE); | |
660 | zone_change(vm_page_zone, Z_EXHAUST, TRUE); | |
661 | zone_change(vm_page_zone, Z_FOREIGN, TRUE); | |
662 | ||
663 | /* | |
664 | * Adjust zone statistics to account for the real pages allocated | |
665 | * in vm_page_create(). [Q: is this really what we want?] | |
666 | */ | |
667 | vm_page_zone->count += vm_page_pages; | |
668 | vm_page_zone->cur_size += vm_page_pages * vm_page_zone->elem_size; | |
669 | ||
91447636 | 670 | mutex_init(&vm_page_alloc_lock, 0); |
1c79356b A |
671 | } |
672 | ||
673 | /* | |
674 | * Routine: vm_page_create | |
675 | * Purpose: | |
676 | * After the VM system is up, machine-dependent code | |
677 | * may stumble across more physical memory. For example, | |
678 | * memory that it was reserving for a frame buffer. | |
679 | * vm_page_create turns this memory into available pages. | |
680 | */ | |
681 | ||
682 | void | |
683 | vm_page_create( | |
55e303ae A |
684 | ppnum_t start, |
685 | ppnum_t end) | |
1c79356b | 686 | { |
55e303ae A |
687 | ppnum_t phys_page; |
688 | vm_page_t m; | |
1c79356b | 689 | |
55e303ae A |
690 | for (phys_page = start; |
691 | phys_page < end; | |
692 | phys_page++) { | |
1c79356b A |
693 | while ((m = (vm_page_t) vm_page_grab_fictitious()) |
694 | == VM_PAGE_NULL) | |
695 | vm_page_more_fictitious(); | |
696 | ||
55e303ae | 697 | vm_page_init(m, phys_page); |
1c79356b A |
698 | vm_page_pages++; |
699 | vm_page_release(m); | |
700 | } | |
701 | } | |
702 | ||
703 | /* | |
704 | * vm_page_hash: | |
705 | * | |
706 | * Distributes the object/offset key pair among hash buckets. | |
707 | * | |
55e303ae | 708 | * NOTE: The bucket count must be a power of 2 |
1c79356b A |
709 | */ |
710 | #define vm_page_hash(object, offset) (\ | |
55e303ae | 711 | ( (natural_t)((uint32_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\ |
1c79356b A |
712 | & vm_page_hash_mask) |
713 | ||
714 | /* | |
715 | * vm_page_insert: [ internal use only ] | |
716 | * | |
717 | * Inserts the given mem entry into the object/object-page | |
718 | * table and object list. | |
719 | * | |
720 | * The object must be locked. | |
721 | */ | |
722 | ||
723 | void | |
724 | vm_page_insert( | |
725 | register vm_page_t mem, | |
726 | register vm_object_t object, | |
727 | register vm_object_offset_t offset) | |
728 | { | |
729 | register vm_page_bucket_t *bucket; | |
730 | ||
731 | XPR(XPR_VM_PAGE, | |
732 | "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n", | |
733 | (integer_t)object, (integer_t)offset, (integer_t)mem, 0,0); | |
734 | ||
735 | VM_PAGE_CHECK(mem); | |
91447636 A |
736 | #if DEBUG |
737 | _mutex_assert(&object->Lock, MA_OWNED); | |
1c79356b | 738 | |
91447636 A |
739 | if (mem->tabled || mem->object != VM_OBJECT_NULL) |
740 | panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) " | |
741 | "already in (obj=%p,off=0x%llx)", | |
742 | mem, object, offset, mem->object, mem->offset); | |
743 | #endif | |
1c79356b A |
744 | assert(!object->internal || offset < object->size); |
745 | ||
746 | /* only insert "pageout" pages into "pageout" objects, | |
747 | * and normal pages into normal objects */ | |
748 | assert(object->pageout == mem->pageout); | |
749 | ||
91447636 A |
750 | assert(vm_page_lookup(object, offset) == VM_PAGE_NULL); |
751 | ||
1c79356b A |
752 | /* |
753 | * Record the object/offset pair in this page | |
754 | */ | |
755 | ||
756 | mem->object = object; | |
757 | mem->offset = offset; | |
758 | ||
759 | /* | |
760 | * Insert it into the object_object/offset hash table | |
761 | */ | |
762 | ||
763 | bucket = &vm_page_buckets[vm_page_hash(object, offset)]; | |
764 | simple_lock(&vm_page_bucket_lock); | |
765 | mem->next = bucket->pages; | |
766 | bucket->pages = mem; | |
767 | #if MACH_PAGE_HASH_STATS | |
768 | if (++bucket->cur_count > bucket->hi_count) | |
769 | bucket->hi_count = bucket->cur_count; | |
770 | #endif /* MACH_PAGE_HASH_STATS */ | |
771 | simple_unlock(&vm_page_bucket_lock); | |
772 | ||
773 | /* | |
774 | * Now link into the object's list of backed pages. | |
775 | */ | |
776 | ||
91447636 | 777 | VM_PAGE_INSERT(mem, object); |
1c79356b A |
778 | mem->tabled = TRUE; |
779 | ||
780 | /* | |
781 | * Show that the object has one more resident page. | |
782 | */ | |
783 | ||
784 | object->resident_page_count++; | |
91447636 A |
785 | |
786 | if (object->purgable == VM_OBJECT_PURGABLE_VOLATILE || | |
787 | object->purgable == VM_OBJECT_PURGABLE_EMPTY) { | |
788 | vm_page_lock_queues(); | |
789 | vm_page_purgeable_count++; | |
790 | vm_page_unlock_queues(); | |
791 | } | |
1c79356b A |
792 | } |
793 | ||
794 | /* | |
795 | * vm_page_replace: | |
796 | * | |
797 | * Exactly like vm_page_insert, except that we first | |
798 | * remove any existing page at the given offset in object. | |
799 | * | |
800 | * The object and page queues must be locked. | |
801 | */ | |
802 | ||
803 | void | |
804 | vm_page_replace( | |
805 | register vm_page_t mem, | |
806 | register vm_object_t object, | |
807 | register vm_object_offset_t offset) | |
808 | { | |
809 | register vm_page_bucket_t *bucket; | |
810 | ||
811 | VM_PAGE_CHECK(mem); | |
91447636 A |
812 | #if DEBUG |
813 | _mutex_assert(&object->Lock, MA_OWNED); | |
814 | _mutex_assert(&vm_page_queue_lock, MA_OWNED); | |
815 | ||
816 | if (mem->tabled || mem->object != VM_OBJECT_NULL) | |
817 | panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) " | |
818 | "already in (obj=%p,off=0x%llx)", | |
819 | mem, object, offset, mem->object, mem->offset); | |
820 | #endif | |
1c79356b A |
821 | /* |
822 | * Record the object/offset pair in this page | |
823 | */ | |
824 | ||
825 | mem->object = object; | |
826 | mem->offset = offset; | |
827 | ||
828 | /* | |
829 | * Insert it into the object_object/offset hash table, | |
830 | * replacing any page that might have been there. | |
831 | */ | |
832 | ||
833 | bucket = &vm_page_buckets[vm_page_hash(object, offset)]; | |
834 | simple_lock(&vm_page_bucket_lock); | |
835 | if (bucket->pages) { | |
836 | vm_page_t *mp = &bucket->pages; | |
837 | register vm_page_t m = *mp; | |
838 | do { | |
839 | if (m->object == object && m->offset == offset) { | |
840 | /* | |
841 | * Remove page from bucket and from object, | |
842 | * and return it to the free list. | |
843 | */ | |
844 | *mp = m->next; | |
91447636 | 845 | VM_PAGE_REMOVE(m); |
1c79356b | 846 | m->tabled = FALSE; |
91447636 A |
847 | m->object = VM_OBJECT_NULL; |
848 | m->offset = (vm_object_offset_t) -1; | |
1c79356b A |
849 | object->resident_page_count--; |
850 | ||
91447636 A |
851 | if (object->purgable == VM_OBJECT_PURGABLE_VOLATILE || |
852 | object->purgable == VM_OBJECT_PURGABLE_EMPTY) { | |
853 | assert(vm_page_purgeable_count > 0); | |
854 | vm_page_purgeable_count--; | |
855 | } | |
856 | ||
1c79356b A |
857 | /* |
858 | * Return page to the free list. | |
859 | * Note the page is not tabled now, so this | |
860 | * won't self-deadlock on the bucket lock. | |
861 | */ | |
862 | ||
863 | vm_page_free(m); | |
864 | break; | |
865 | } | |
866 | mp = &m->next; | |
91447636 | 867 | } while ((m = *mp)); |
1c79356b A |
868 | mem->next = bucket->pages; |
869 | } else { | |
870 | mem->next = VM_PAGE_NULL; | |
871 | } | |
872 | bucket->pages = mem; | |
873 | simple_unlock(&vm_page_bucket_lock); | |
874 | ||
875 | /* | |
876 | * Now link into the object's list of backed pages. | |
877 | */ | |
878 | ||
91447636 | 879 | VM_PAGE_INSERT(mem, object); |
1c79356b A |
880 | mem->tabled = TRUE; |
881 | ||
882 | /* | |
883 | * And show that the object has one more resident | |
884 | * page. | |
885 | */ | |
886 | ||
887 | object->resident_page_count++; | |
91447636 A |
888 | |
889 | if (object->purgable == VM_OBJECT_PURGABLE_VOLATILE || | |
890 | object->purgable == VM_OBJECT_PURGABLE_EMPTY) { | |
891 | vm_page_purgeable_count++; | |
892 | } | |
1c79356b A |
893 | } |
894 | ||
895 | /* | |
896 | * vm_page_remove: [ internal use only ] | |
897 | * | |
898 | * Removes the given mem entry from the object/offset-page | |
899 | * table and the object page list. | |
900 | * | |
91447636 | 901 | * The object and page queues must be locked. |
1c79356b A |
902 | */ |
903 | ||
904 | void | |
905 | vm_page_remove( | |
906 | register vm_page_t mem) | |
907 | { | |
908 | register vm_page_bucket_t *bucket; | |
909 | register vm_page_t this; | |
910 | ||
911 | XPR(XPR_VM_PAGE, | |
912 | "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n", | |
913 | (integer_t)mem->object, (integer_t)mem->offset, | |
914 | (integer_t)mem, 0,0); | |
91447636 A |
915 | #if DEBUG |
916 | _mutex_assert(&vm_page_queue_lock, MA_OWNED); | |
917 | _mutex_assert(&mem->object->Lock, MA_OWNED); | |
918 | #endif | |
1c79356b A |
919 | assert(mem->tabled); |
920 | assert(!mem->cleaning); | |
921 | VM_PAGE_CHECK(mem); | |
922 | ||
91447636 | 923 | |
1c79356b A |
924 | /* |
925 | * Remove from the object_object/offset hash table | |
926 | */ | |
927 | ||
928 | bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)]; | |
929 | simple_lock(&vm_page_bucket_lock); | |
930 | if ((this = bucket->pages) == mem) { | |
931 | /* optimize for common case */ | |
932 | ||
933 | bucket->pages = mem->next; | |
934 | } else { | |
935 | register vm_page_t *prev; | |
936 | ||
937 | for (prev = &this->next; | |
938 | (this = *prev) != mem; | |
939 | prev = &this->next) | |
940 | continue; | |
941 | *prev = this->next; | |
942 | } | |
943 | #if MACH_PAGE_HASH_STATS | |
944 | bucket->cur_count--; | |
945 | #endif /* MACH_PAGE_HASH_STATS */ | |
946 | simple_unlock(&vm_page_bucket_lock); | |
947 | ||
948 | /* | |
949 | * Now remove from the object's list of backed pages. | |
950 | */ | |
951 | ||
91447636 | 952 | VM_PAGE_REMOVE(mem); |
1c79356b A |
953 | |
954 | /* | |
955 | * And show that the object has one fewer resident | |
956 | * page. | |
957 | */ | |
958 | ||
959 | mem->object->resident_page_count--; | |
960 | ||
91447636 A |
961 | if (mem->object->purgable == VM_OBJECT_PURGABLE_VOLATILE || |
962 | mem->object->purgable == VM_OBJECT_PURGABLE_EMPTY) { | |
963 | assert(vm_page_purgeable_count > 0); | |
964 | vm_page_purgeable_count--; | |
965 | } | |
966 | ||
1c79356b A |
967 | mem->tabled = FALSE; |
968 | mem->object = VM_OBJECT_NULL; | |
91447636 | 969 | mem->offset = (vm_object_offset_t) -1; |
1c79356b A |
970 | } |
971 | ||
972 | /* | |
973 | * vm_page_lookup: | |
974 | * | |
975 | * Returns the page associated with the object/offset | |
976 | * pair specified; if none is found, VM_PAGE_NULL is returned. | |
977 | * | |
978 | * The object must be locked. No side effects. | |
979 | */ | |
980 | ||
91447636 A |
981 | unsigned long vm_page_lookup_hint = 0; |
982 | unsigned long vm_page_lookup_hint_next = 0; | |
983 | unsigned long vm_page_lookup_hint_prev = 0; | |
984 | unsigned long vm_page_lookup_hint_miss = 0; | |
985 | ||
1c79356b A |
986 | vm_page_t |
987 | vm_page_lookup( | |
988 | register vm_object_t object, | |
989 | register vm_object_offset_t offset) | |
990 | { | |
991 | register vm_page_t mem; | |
992 | register vm_page_bucket_t *bucket; | |
91447636 A |
993 | queue_entry_t qe; |
994 | #if 0 | |
995 | _mutex_assert(&object->Lock, MA_OWNED); | |
996 | #endif | |
997 | ||
998 | mem = object->memq_hint; | |
999 | if (mem != VM_PAGE_NULL) { | |
1000 | assert(mem->object == object); | |
1001 | if (mem->offset == offset) { | |
1002 | vm_page_lookup_hint++; | |
1003 | return mem; | |
1004 | } | |
1005 | qe = queue_next(&mem->listq); | |
1006 | if (! queue_end(&object->memq, qe)) { | |
1007 | vm_page_t next_page; | |
1008 | ||
1009 | next_page = (vm_page_t) qe; | |
1010 | assert(next_page->object == object); | |
1011 | if (next_page->offset == offset) { | |
1012 | vm_page_lookup_hint_next++; | |
1013 | object->memq_hint = next_page; /* new hint */ | |
1014 | return next_page; | |
1015 | } | |
1016 | } | |
1017 | qe = queue_prev(&mem->listq); | |
1018 | if (! queue_end(&object->memq, qe)) { | |
1019 | vm_page_t prev_page; | |
1020 | ||
1021 | prev_page = (vm_page_t) qe; | |
1022 | assert(prev_page->object == object); | |
1023 | if (prev_page->offset == offset) { | |
1024 | vm_page_lookup_hint_prev++; | |
1025 | object->memq_hint = prev_page; /* new hint */ | |
1026 | return prev_page; | |
1027 | } | |
1028 | } | |
1029 | } | |
1c79356b A |
1030 | |
1031 | /* | |
1032 | * Search the hash table for this object/offset pair | |
1033 | */ | |
1034 | ||
1035 | bucket = &vm_page_buckets[vm_page_hash(object, offset)]; | |
1036 | ||
1037 | simple_lock(&vm_page_bucket_lock); | |
1038 | for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) { | |
1039 | VM_PAGE_CHECK(mem); | |
1040 | if ((mem->object == object) && (mem->offset == offset)) | |
1041 | break; | |
1042 | } | |
1043 | simple_unlock(&vm_page_bucket_lock); | |
55e303ae | 1044 | |
91447636 A |
1045 | if (mem != VM_PAGE_NULL) { |
1046 | if (object->memq_hint != VM_PAGE_NULL) { | |
1047 | vm_page_lookup_hint_miss++; | |
1048 | } | |
1049 | assert(mem->object == object); | |
1050 | object->memq_hint = mem; | |
1051 | } | |
1052 | ||
1053 | return(mem); | |
1054 | } | |
1055 | ||
1056 | ||
1057 | vm_page_t | |
1058 | vm_page_lookup_nohint( | |
1059 | vm_object_t object, | |
1060 | vm_object_offset_t offset) | |
1061 | { | |
1062 | register vm_page_t mem; | |
1063 | register vm_page_bucket_t *bucket; | |
1064 | ||
1065 | #if 0 | |
1066 | _mutex_assert(&object->Lock, MA_OWNED); | |
1067 | #endif | |
1068 | /* | |
1069 | * Search the hash table for this object/offset pair | |
1070 | */ | |
1071 | ||
1072 | bucket = &vm_page_buckets[vm_page_hash(object, offset)]; | |
1073 | ||
1074 | simple_lock(&vm_page_bucket_lock); | |
1075 | for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) { | |
1076 | VM_PAGE_CHECK(mem); | |
1077 | if ((mem->object == object) && (mem->offset == offset)) | |
1078 | break; | |
1079 | } | |
1080 | simple_unlock(&vm_page_bucket_lock); | |
1081 | ||
1c79356b A |
1082 | return(mem); |
1083 | } | |
1084 | ||
1085 | /* | |
1086 | * vm_page_rename: | |
1087 | * | |
1088 | * Move the given memory entry from its | |
1089 | * current object to the specified target object/offset. | |
1090 | * | |
1091 | * The object must be locked. | |
1092 | */ | |
1093 | void | |
1094 | vm_page_rename( | |
1095 | register vm_page_t mem, | |
1096 | register vm_object_t new_object, | |
1097 | vm_object_offset_t new_offset) | |
1098 | { | |
1099 | assert(mem->object != new_object); | |
91447636 A |
1100 | /* |
1101 | * ENCRYPTED SWAP: | |
1102 | * The encryption key is based on the page's memory object | |
1103 | * (aka "pager") and paging offset. Moving the page to | |
1104 | * another VM object changes its "pager" and "paging_offset" | |
1105 | * so it has to be decrypted first. | |
1106 | */ | |
1107 | if (mem->encrypted) { | |
1108 | panic("vm_page_rename: page %p is encrypted\n", mem); | |
1109 | } | |
1c79356b A |
1110 | /* |
1111 | * Changes to mem->object require the page lock because | |
1112 | * the pageout daemon uses that lock to get the object. | |
1113 | */ | |
1114 | ||
1115 | XPR(XPR_VM_PAGE, | |
1116 | "vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n", | |
1117 | (integer_t)new_object, (integer_t)new_offset, | |
1118 | (integer_t)mem, 0,0); | |
1119 | ||
1120 | vm_page_lock_queues(); | |
1121 | vm_page_remove(mem); | |
1122 | vm_page_insert(mem, new_object, new_offset); | |
1123 | vm_page_unlock_queues(); | |
1124 | } | |
1125 | ||
1126 | /* | |
1127 | * vm_page_init: | |
1128 | * | |
1129 | * Initialize the fields in a new page. | |
1130 | * This takes a structure with random values and initializes it | |
1131 | * so that it can be given to vm_page_release or vm_page_insert. | |
1132 | */ | |
1133 | void | |
1134 | vm_page_init( | |
1135 | vm_page_t mem, | |
55e303ae | 1136 | ppnum_t phys_page) |
1c79356b | 1137 | { |
91447636 | 1138 | assert(phys_page); |
1c79356b | 1139 | *mem = vm_page_template; |
55e303ae | 1140 | mem->phys_page = phys_page; |
1c79356b A |
1141 | } |
1142 | ||
1143 | /* | |
1144 | * vm_page_grab_fictitious: | |
1145 | * | |
1146 | * Remove a fictitious page from the free list. | |
1147 | * Returns VM_PAGE_NULL if there are no free pages. | |
1148 | */ | |
1149 | int c_vm_page_grab_fictitious = 0; | |
1150 | int c_vm_page_release_fictitious = 0; | |
1151 | int c_vm_page_more_fictitious = 0; | |
1152 | ||
1153 | vm_page_t | |
1154 | vm_page_grab_fictitious(void) | |
1155 | { | |
1156 | register vm_page_t m; | |
1157 | ||
1158 | m = (vm_page_t)zget(vm_page_zone); | |
1159 | if (m) { | |
1c79356b A |
1160 | vm_page_init(m, vm_page_fictitious_addr); |
1161 | m->fictitious = TRUE; | |
1c79356b A |
1162 | } |
1163 | ||
1164 | c_vm_page_grab_fictitious++; | |
1165 | return m; | |
1166 | } | |
1167 | ||
1168 | /* | |
1169 | * vm_page_release_fictitious: | |
1170 | * | |
1171 | * Release a fictitious page to the free list. | |
1172 | */ | |
1173 | ||
1174 | void | |
1175 | vm_page_release_fictitious( | |
1176 | register vm_page_t m) | |
1177 | { | |
1178 | assert(!m->free); | |
1179 | assert(m->busy); | |
1180 | assert(m->fictitious); | |
55e303ae | 1181 | assert(m->phys_page == vm_page_fictitious_addr); |
1c79356b A |
1182 | |
1183 | c_vm_page_release_fictitious++; | |
91447636 | 1184 | #if DEBUG |
1c79356b A |
1185 | if (m->free) |
1186 | panic("vm_page_release_fictitious"); | |
91447636 | 1187 | #endif |
1c79356b | 1188 | m->free = TRUE; |
91447636 | 1189 | zfree(vm_page_zone, m); |
1c79356b A |
1190 | } |
1191 | ||
1192 | /* | |
1193 | * vm_page_more_fictitious: | |
1194 | * | |
1195 | * Add more fictitious pages to the free list. | |
1196 | * Allowed to block. This routine is way intimate | |
1197 | * with the zones code, for several reasons: | |
1198 | * 1. we need to carve some page structures out of physical | |
1199 | * memory before zones work, so they _cannot_ come from | |
1200 | * the zone_map. | |
1201 | * 2. the zone needs to be collectable in order to prevent | |
1202 | * growth without bound. These structures are used by | |
1203 | * the device pager (by the hundreds and thousands), as | |
1204 | * private pages for pageout, and as blocking pages for | |
1205 | * pagein. Temporary bursts in demand should not result in | |
1206 | * permanent allocation of a resource. | |
1207 | * 3. To smooth allocation humps, we allocate single pages | |
1208 | * with kernel_memory_allocate(), and cram them into the | |
1209 | * zone. This also allows us to initialize the vm_page_t's | |
1210 | * on the way into the zone, so that zget() always returns | |
1211 | * an initialized structure. The zone free element pointer | |
1212 | * and the free page pointer are both the first item in the | |
1213 | * vm_page_t. | |
1214 | * 4. By having the pages in the zone pre-initialized, we need | |
1215 | * not keep 2 levels of lists. The garbage collector simply | |
1216 | * scans our list, and reduces physical memory usage as it | |
1217 | * sees fit. | |
1218 | */ | |
1219 | ||
1220 | void vm_page_more_fictitious(void) | |
1221 | { | |
1c79356b A |
1222 | register vm_page_t m; |
1223 | vm_offset_t addr; | |
1224 | kern_return_t retval; | |
1225 | int i; | |
1226 | ||
1227 | c_vm_page_more_fictitious++; | |
1228 | ||
1c79356b A |
1229 | /* |
1230 | * Allocate a single page from the zone_map. Do not wait if no physical | |
1231 | * pages are immediately available, and do not zero the space. We need | |
1232 | * our own blocking lock here to prevent having multiple, | |
1233 | * simultaneous requests from piling up on the zone_map lock. Exactly | |
1234 | * one (of our) threads should be potentially waiting on the map lock. | |
1235 | * If winner is not vm-privileged, then the page allocation will fail, | |
1236 | * and it will temporarily block here in the vm_page_wait(). | |
1237 | */ | |
1238 | mutex_lock(&vm_page_alloc_lock); | |
1239 | /* | |
1240 | * If another thread allocated space, just bail out now. | |
1241 | */ | |
1242 | if (zone_free_count(vm_page_zone) > 5) { | |
1243 | /* | |
1244 | * The number "5" is a small number that is larger than the | |
1245 | * number of fictitious pages that any single caller will | |
1246 | * attempt to allocate. Otherwise, a thread will attempt to | |
1247 | * acquire a fictitious page (vm_page_grab_fictitious), fail, | |
1248 | * release all of the resources and locks already acquired, | |
1249 | * and then call this routine. This routine finds the pages | |
1250 | * that the caller released, so fails to allocate new space. | |
1251 | * The process repeats infinitely. The largest known number | |
1252 | * of fictitious pages required in this manner is 2. 5 is | |
1253 | * simply a somewhat larger number. | |
1254 | */ | |
1255 | mutex_unlock(&vm_page_alloc_lock); | |
1256 | return; | |
1257 | } | |
1258 | ||
91447636 A |
1259 | retval = kernel_memory_allocate(zone_map, |
1260 | &addr, PAGE_SIZE, VM_PROT_ALL, | |
1261 | KMA_KOBJECT|KMA_NOPAGEWAIT); | |
1262 | if (retval != KERN_SUCCESS) { | |
1c79356b A |
1263 | /* |
1264 | * No page was available. Tell the pageout daemon, drop the | |
1265 | * lock to give another thread a chance at it, and | |
1266 | * wait for the pageout daemon to make progress. | |
1267 | */ | |
1268 | mutex_unlock(&vm_page_alloc_lock); | |
1269 | vm_page_wait(THREAD_UNINT); | |
1270 | return; | |
1271 | } | |
1272 | /* | |
1273 | * Initialize as many vm_page_t's as will fit on this page. This | |
1274 | * depends on the zone code disturbing ONLY the first item of | |
1275 | * each zone element. | |
1276 | */ | |
1277 | m = (vm_page_t)addr; | |
1278 | for (i = PAGE_SIZE/sizeof(struct vm_page); i > 0; i--) { | |
1279 | vm_page_init(m, vm_page_fictitious_addr); | |
1280 | m->fictitious = TRUE; | |
1281 | m++; | |
1282 | } | |
91447636 | 1283 | zcram(vm_page_zone, (void *) addr, PAGE_SIZE); |
1c79356b A |
1284 | mutex_unlock(&vm_page_alloc_lock); |
1285 | } | |
1286 | ||
1287 | /* | |
1288 | * vm_page_convert: | |
1289 | * | |
1290 | * Attempt to convert a fictitious page into a real page. | |
1291 | */ | |
1292 | ||
1293 | boolean_t | |
1294 | vm_page_convert( | |
1295 | register vm_page_t m) | |
1296 | { | |
1297 | register vm_page_t real_m; | |
1298 | ||
1299 | assert(m->busy); | |
1300 | assert(m->fictitious); | |
1301 | assert(!m->dirty); | |
1302 | ||
1303 | real_m = vm_page_grab(); | |
1304 | if (real_m == VM_PAGE_NULL) | |
1305 | return FALSE; | |
1306 | ||
55e303ae | 1307 | m->phys_page = real_m->phys_page; |
1c79356b | 1308 | m->fictitious = FALSE; |
765c9de3 | 1309 | m->no_isync = TRUE; |
1c79356b A |
1310 | |
1311 | vm_page_lock_queues(); | |
1312 | if (m->active) | |
1313 | vm_page_active_count++; | |
1314 | else if (m->inactive) | |
1315 | vm_page_inactive_count++; | |
1316 | vm_page_unlock_queues(); | |
1317 | ||
55e303ae | 1318 | real_m->phys_page = vm_page_fictitious_addr; |
1c79356b A |
1319 | real_m->fictitious = TRUE; |
1320 | ||
1321 | vm_page_release_fictitious(real_m); | |
1322 | return TRUE; | |
1323 | } | |
1324 | ||
1325 | /* | |
1326 | * vm_pool_low(): | |
1327 | * | |
1328 | * Return true if it is not likely that a non-vm_privileged thread | |
1329 | * can get memory without blocking. Advisory only, since the | |
1330 | * situation may change under us. | |
1331 | */ | |
1332 | int | |
1333 | vm_pool_low(void) | |
1334 | { | |
1335 | /* No locking, at worst we will fib. */ | |
1336 | return( vm_page_free_count < vm_page_free_reserved ); | |
1337 | } | |
1338 | ||
1339 | /* | |
1340 | * vm_page_grab: | |
1341 | * | |
1342 | * Remove a page from the free list. | |
1343 | * Returns VM_PAGE_NULL if the free list is too small. | |
1344 | */ | |
1345 | ||
1346 | unsigned long vm_page_grab_count = 0; /* measure demand */ | |
1347 | ||
1348 | vm_page_t | |
1349 | vm_page_grab(void) | |
1350 | { | |
1351 | register vm_page_t mem; | |
1352 | ||
1353 | mutex_lock(&vm_page_queue_free_lock); | |
1354 | vm_page_grab_count++; | |
1355 | ||
1356 | /* | |
1357 | * Optionally produce warnings if the wire or gobble | |
1358 | * counts exceed some threshold. | |
1359 | */ | |
1360 | if (vm_page_wire_count_warning > 0 | |
1361 | && vm_page_wire_count >= vm_page_wire_count_warning) { | |
1362 | printf("mk: vm_page_grab(): high wired page count of %d\n", | |
1363 | vm_page_wire_count); | |
1364 | assert(vm_page_wire_count < vm_page_wire_count_warning); | |
1365 | } | |
1366 | if (vm_page_gobble_count_warning > 0 | |
1367 | && vm_page_gobble_count >= vm_page_gobble_count_warning) { | |
1368 | printf("mk: vm_page_grab(): high gobbled page count of %d\n", | |
1369 | vm_page_gobble_count); | |
1370 | assert(vm_page_gobble_count < vm_page_gobble_count_warning); | |
1371 | } | |
1372 | ||
1373 | /* | |
1374 | * Only let privileged threads (involved in pageout) | |
1375 | * dip into the reserved pool. | |
1376 | */ | |
1377 | ||
1378 | if ((vm_page_free_count < vm_page_free_reserved) && | |
91447636 | 1379 | !(current_thread()->options & TH_OPT_VMPRIV)) { |
1c79356b A |
1380 | mutex_unlock(&vm_page_queue_free_lock); |
1381 | mem = VM_PAGE_NULL; | |
1382 | goto wakeup_pageout; | |
1383 | } | |
1384 | ||
1385 | while (vm_page_queue_free == VM_PAGE_NULL) { | |
1c79356b A |
1386 | mutex_unlock(&vm_page_queue_free_lock); |
1387 | VM_PAGE_WAIT(); | |
1388 | mutex_lock(&vm_page_queue_free_lock); | |
1389 | } | |
1390 | ||
1391 | if (--vm_page_free_count < vm_page_free_count_minimum) | |
1392 | vm_page_free_count_minimum = vm_page_free_count; | |
1393 | mem = vm_page_queue_free; | |
1394 | vm_page_queue_free = (vm_page_t) mem->pageq.next; | |
91447636 A |
1395 | mem->pageq.next = NULL; |
1396 | mem->pageq.prev = NULL; | |
1397 | assert(mem->listq.next == NULL && mem->listq.prev == NULL); | |
1398 | assert(mem->tabled == FALSE); | |
1399 | assert(mem->object == VM_OBJECT_NULL); | |
1400 | assert(!mem->laundry); | |
1c79356b | 1401 | mem->free = FALSE; |
0b4e3aa0 | 1402 | mem->no_isync = TRUE; |
1c79356b A |
1403 | mutex_unlock(&vm_page_queue_free_lock); |
1404 | ||
91447636 A |
1405 | assert(pmap_verify_free(mem->phys_page)); |
1406 | ||
1c79356b A |
1407 | /* |
1408 | * Decide if we should poke the pageout daemon. | |
1409 | * We do this if the free count is less than the low | |
1410 | * water mark, or if the free count is less than the high | |
1411 | * water mark (but above the low water mark) and the inactive | |
1412 | * count is less than its target. | |
1413 | * | |
1414 | * We don't have the counts locked ... if they change a little, | |
1415 | * it doesn't really matter. | |
1416 | */ | |
1417 | ||
1418 | wakeup_pageout: | |
1419 | if ((vm_page_free_count < vm_page_free_min) || | |
1420 | ((vm_page_free_count < vm_page_free_target) && | |
1421 | (vm_page_inactive_count < vm_page_inactive_target))) | |
1422 | thread_wakeup((event_t) &vm_page_free_wanted); | |
1423 | ||
55e303ae | 1424 | // dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */ |
1c79356b A |
1425 | |
1426 | return mem; | |
1427 | } | |
1428 | ||
1429 | /* | |
1430 | * vm_page_release: | |
1431 | * | |
1432 | * Return a page to the free list. | |
1433 | */ | |
1434 | ||
1435 | void | |
1436 | vm_page_release( | |
1437 | register vm_page_t mem) | |
1438 | { | |
55e303ae A |
1439 | |
1440 | #if 0 | |
1441 | unsigned int pindex; | |
1442 | phys_entry *physent; | |
1443 | ||
1444 | physent = mapping_phys_lookup(mem->phys_page, &pindex); /* (BRINGUP) */ | |
1445 | if(physent->ppLink & ppN) { /* (BRINGUP) */ | |
1446 | panic("vm_page_release: already released - %08X %08X\n", mem, mem->phys_page); | |
1447 | } | |
1448 | physent->ppLink = physent->ppLink | ppN; /* (BRINGUP) */ | |
1449 | #endif | |
1c79356b A |
1450 | assert(!mem->private && !mem->fictitious); |
1451 | ||
55e303ae | 1452 | // dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */ |
1c79356b A |
1453 | |
1454 | mutex_lock(&vm_page_queue_free_lock); | |
91447636 | 1455 | #if DEBUG |
1c79356b A |
1456 | if (mem->free) |
1457 | panic("vm_page_release"); | |
91447636 | 1458 | #endif |
1c79356b | 1459 | mem->free = TRUE; |
91447636 A |
1460 | assert(!mem->laundry); |
1461 | assert(mem->object == VM_OBJECT_NULL); | |
1462 | assert(mem->pageq.next == NULL && | |
1463 | mem->pageq.prev == NULL); | |
1c79356b A |
1464 | mem->pageq.next = (queue_entry_t) vm_page_queue_free; |
1465 | vm_page_queue_free = mem; | |
1466 | vm_page_free_count++; | |
1467 | ||
1468 | /* | |
1469 | * Check if we should wake up someone waiting for page. | |
1470 | * But don't bother waking them unless they can allocate. | |
1471 | * | |
1472 | * We wakeup only one thread, to prevent starvation. | |
1473 | * Because the scheduling system handles wait queues FIFO, | |
1474 | * if we wakeup all waiting threads, one greedy thread | |
1475 | * can starve multiple niceguy threads. When the threads | |
1476 | * all wakeup, the greedy threads runs first, grabs the page, | |
1477 | * and waits for another page. It will be the first to run | |
1478 | * when the next page is freed. | |
1479 | * | |
1480 | * However, there is a slight danger here. | |
1481 | * The thread we wake might not use the free page. | |
1482 | * Then the other threads could wait indefinitely | |
1483 | * while the page goes unused. To forestall this, | |
1484 | * the pageout daemon will keep making free pages | |
1485 | * as long as vm_page_free_wanted is non-zero. | |
1486 | */ | |
1487 | ||
1488 | if ((vm_page_free_wanted > 0) && | |
1489 | (vm_page_free_count >= vm_page_free_reserved)) { | |
1490 | vm_page_free_wanted--; | |
1491 | thread_wakeup_one((event_t) &vm_page_free_count); | |
1492 | } | |
1493 | ||
1494 | mutex_unlock(&vm_page_queue_free_lock); | |
1495 | } | |
1496 | ||
1c79356b A |
1497 | /* |
1498 | * vm_page_wait: | |
1499 | * | |
1500 | * Wait for a page to become available. | |
1501 | * If there are plenty of free pages, then we don't sleep. | |
1502 | * | |
1503 | * Returns: | |
1504 | * TRUE: There may be another page, try again | |
1505 | * FALSE: We were interrupted out of our wait, don't try again | |
1506 | */ | |
1507 | ||
1508 | boolean_t | |
1509 | vm_page_wait( | |
1510 | int interruptible ) | |
1511 | { | |
1512 | /* | |
1513 | * We can't use vm_page_free_reserved to make this | |
1514 | * determination. Consider: some thread might | |
1515 | * need to allocate two pages. The first allocation | |
1516 | * succeeds, the second fails. After the first page is freed, | |
1517 | * a call to vm_page_wait must really block. | |
1518 | */ | |
9bccf70c | 1519 | kern_return_t wait_result; |
9bccf70c | 1520 | int need_wakeup = 0; |
1c79356b A |
1521 | |
1522 | mutex_lock(&vm_page_queue_free_lock); | |
1523 | if (vm_page_free_count < vm_page_free_target) { | |
1524 | if (vm_page_free_wanted++ == 0) | |
0b4e3aa0 | 1525 | need_wakeup = 1; |
91447636 | 1526 | wait_result = assert_wait((event_t)&vm_page_free_count, interruptible); |
1c79356b A |
1527 | mutex_unlock(&vm_page_queue_free_lock); |
1528 | counter(c_vm_page_wait_block++); | |
0b4e3aa0 A |
1529 | |
1530 | if (need_wakeup) | |
1531 | thread_wakeup((event_t)&vm_page_free_wanted); | |
9bccf70c | 1532 | |
91447636 | 1533 | if (wait_result == THREAD_WAITING) |
9bccf70c A |
1534 | wait_result = thread_block(THREAD_CONTINUE_NULL); |
1535 | ||
1c79356b A |
1536 | return(wait_result == THREAD_AWAKENED); |
1537 | } else { | |
1538 | mutex_unlock(&vm_page_queue_free_lock); | |
1539 | return TRUE; | |
1540 | } | |
1541 | } | |
1542 | ||
1543 | /* | |
1544 | * vm_page_alloc: | |
1545 | * | |
1546 | * Allocate and return a memory cell associated | |
1547 | * with this VM object/offset pair. | |
1548 | * | |
1549 | * Object must be locked. | |
1550 | */ | |
1551 | ||
1552 | vm_page_t | |
1553 | vm_page_alloc( | |
1554 | vm_object_t object, | |
1555 | vm_object_offset_t offset) | |
1556 | { | |
1557 | register vm_page_t mem; | |
1558 | ||
91447636 A |
1559 | #if DEBUG |
1560 | _mutex_assert(&object->Lock, MA_OWNED); | |
1561 | #endif | |
1c79356b A |
1562 | mem = vm_page_grab(); |
1563 | if (mem == VM_PAGE_NULL) | |
1564 | return VM_PAGE_NULL; | |
1565 | ||
1566 | vm_page_insert(mem, object, offset); | |
1567 | ||
1568 | return(mem); | |
1569 | } | |
1570 | ||
1c79356b A |
1571 | counter(unsigned int c_laundry_pages_freed = 0;) |
1572 | ||
1573 | int vm_pagein_cluster_unused = 0; | |
91447636 | 1574 | boolean_t vm_page_free_verify = TRUE; |
1c79356b A |
1575 | /* |
1576 | * vm_page_free: | |
1577 | * | |
1578 | * Returns the given page to the free list, | |
1579 | * disassociating it with any VM object. | |
1580 | * | |
1581 | * Object and page queues must be locked prior to entry. | |
1582 | */ | |
1583 | void | |
1584 | vm_page_free( | |
1585 | register vm_page_t mem) | |
1586 | { | |
1587 | vm_object_t object = mem->object; | |
1588 | ||
1589 | assert(!mem->free); | |
1590 | assert(!mem->cleaning); | |
1591 | assert(!mem->pageout); | |
91447636 A |
1592 | if (vm_page_free_verify && !mem->fictitious && !mem->private) { |
1593 | assert(pmap_verify_free(mem->phys_page)); | |
1594 | } | |
1595 | ||
1596 | #if DEBUG | |
1597 | if (mem->object) | |
1598 | _mutex_assert(&mem->object->Lock, MA_OWNED); | |
1599 | _mutex_assert(&vm_page_queue_lock, MA_OWNED); | |
1c79356b | 1600 | |
91447636 A |
1601 | if (mem->free) |
1602 | panic("vm_page_free: freeing page on free list\n"); | |
1603 | #endif | |
1c79356b A |
1604 | if (mem->tabled) |
1605 | vm_page_remove(mem); /* clears tabled, object, offset */ | |
1606 | VM_PAGE_QUEUES_REMOVE(mem); /* clears active or inactive */ | |
1607 | ||
1608 | if (mem->clustered) { | |
1609 | mem->clustered = FALSE; | |
1610 | vm_pagein_cluster_unused++; | |
1611 | } | |
1612 | ||
1613 | if (mem->wire_count) { | |
1614 | if (!mem->private && !mem->fictitious) | |
1615 | vm_page_wire_count--; | |
1616 | mem->wire_count = 0; | |
1617 | assert(!mem->gobbled); | |
1618 | } else if (mem->gobbled) { | |
1619 | if (!mem->private && !mem->fictitious) | |
1620 | vm_page_wire_count--; | |
1621 | vm_page_gobble_count--; | |
1622 | } | |
1623 | mem->gobbled = FALSE; | |
1624 | ||
1625 | if (mem->laundry) { | |
91447636 | 1626 | vm_pageout_throttle_up(mem); |
1c79356b | 1627 | counter(++c_laundry_pages_freed); |
1c79356b A |
1628 | } |
1629 | ||
1c79356b A |
1630 | PAGE_WAKEUP(mem); /* clears wanted */ |
1631 | ||
1632 | if (mem->absent) | |
1633 | vm_object_absent_release(object); | |
1634 | ||
0b4e3aa0 | 1635 | /* Some of these may be unnecessary */ |
1c79356b A |
1636 | mem->page_lock = 0; |
1637 | mem->unlock_request = 0; | |
1638 | mem->busy = TRUE; | |
1639 | mem->absent = FALSE; | |
1640 | mem->error = FALSE; | |
1641 | mem->dirty = FALSE; | |
1642 | mem->precious = FALSE; | |
1643 | mem->reference = FALSE; | |
91447636 | 1644 | mem->encrypted = FALSE; |
1c79356b A |
1645 | |
1646 | mem->page_error = KERN_SUCCESS; | |
1647 | ||
1648 | if (mem->private) { | |
1649 | mem->private = FALSE; | |
1650 | mem->fictitious = TRUE; | |
55e303ae | 1651 | mem->phys_page = vm_page_fictitious_addr; |
1c79356b A |
1652 | } |
1653 | if (mem->fictitious) { | |
1654 | vm_page_release_fictitious(mem); | |
1655 | } else { | |
9bccf70c A |
1656 | /* depends on the queues lock */ |
1657 | if(mem->zero_fill) { | |
1658 | vm_zf_count-=1; | |
1659 | mem->zero_fill = FALSE; | |
1660 | } | |
55e303ae | 1661 | vm_page_init(mem, mem->phys_page); |
1c79356b A |
1662 | vm_page_release(mem); |
1663 | } | |
1664 | } | |
1665 | ||
55e303ae A |
1666 | |
1667 | void | |
1668 | vm_page_free_list( | |
1669 | register vm_page_t mem) | |
1670 | { | |
91447636 | 1671 | register vm_page_t nxt; |
55e303ae | 1672 | register vm_page_t first = NULL; |
91447636 | 1673 | register vm_page_t last = VM_PAGE_NULL; |
55e303ae A |
1674 | register int pg_count = 0; |
1675 | ||
91447636 A |
1676 | #if DEBUG |
1677 | _mutex_assert(&vm_page_queue_lock, MA_OWNED); | |
1678 | #endif | |
55e303ae | 1679 | while (mem) { |
91447636 A |
1680 | #if DEBUG |
1681 | if (mem->tabled || mem->object) | |
1682 | panic("vm_page_free_list: freeing tabled page\n"); | |
1683 | if (mem->inactive || mem->active || mem->free) | |
1684 | panic("vm_page_free_list: freeing page on list\n"); | |
1685 | #endif | |
1686 | assert(mem->pageq.prev == NULL); | |
55e303ae A |
1687 | nxt = (vm_page_t)(mem->pageq.next); |
1688 | ||
1689 | if (mem->clustered) | |
1690 | vm_pagein_cluster_unused++; | |
1691 | ||
1692 | if (mem->laundry) { | |
91447636 | 1693 | vm_pageout_throttle_up(mem); |
55e303ae | 1694 | counter(++c_laundry_pages_freed); |
55e303ae A |
1695 | } |
1696 | mem->busy = TRUE; | |
1697 | ||
1698 | PAGE_WAKEUP(mem); /* clears wanted */ | |
1699 | ||
1700 | if (mem->private) | |
1701 | mem->fictitious = TRUE; | |
1702 | ||
1703 | if (!mem->fictitious) { | |
1704 | /* depends on the queues lock */ | |
1705 | if (mem->zero_fill) | |
1706 | vm_zf_count -= 1; | |
91447636 | 1707 | assert(!mem->laundry); |
55e303ae A |
1708 | vm_page_init(mem, mem->phys_page); |
1709 | ||
1710 | mem->free = TRUE; | |
1711 | ||
1712 | if (first == NULL) | |
1713 | last = mem; | |
1714 | mem->pageq.next = (queue_t) first; | |
1715 | first = mem; | |
1716 | ||
1717 | pg_count++; | |
1718 | } else { | |
1719 | mem->phys_page = vm_page_fictitious_addr; | |
1720 | vm_page_release_fictitious(mem); | |
1721 | } | |
1722 | mem = nxt; | |
1723 | } | |
1724 | if (first) { | |
1725 | ||
1726 | mutex_lock(&vm_page_queue_free_lock); | |
1727 | ||
1728 | last->pageq.next = (queue_entry_t) vm_page_queue_free; | |
1729 | vm_page_queue_free = first; | |
1730 | ||
1731 | vm_page_free_count += pg_count; | |
1732 | ||
1733 | if ((vm_page_free_wanted > 0) && | |
1734 | (vm_page_free_count >= vm_page_free_reserved)) { | |
91447636 | 1735 | unsigned int available_pages; |
55e303ae | 1736 | |
91447636 A |
1737 | if (vm_page_free_count >= vm_page_free_reserved) { |
1738 | available_pages = (vm_page_free_count | |
1739 | - vm_page_free_reserved); | |
1740 | } else { | |
1741 | available_pages = 0; | |
1742 | } | |
55e303ae A |
1743 | |
1744 | if (available_pages >= vm_page_free_wanted) { | |
1745 | vm_page_free_wanted = 0; | |
1746 | thread_wakeup((event_t) &vm_page_free_count); | |
1747 | } else { | |
1748 | while (available_pages--) { | |
1749 | vm_page_free_wanted--; | |
1750 | thread_wakeup_one((event_t) &vm_page_free_count); | |
1751 | } | |
1752 | } | |
1753 | } | |
1754 | mutex_unlock(&vm_page_queue_free_lock); | |
1755 | } | |
1756 | } | |
1757 | ||
1758 | ||
1c79356b A |
1759 | /* |
1760 | * vm_page_wire: | |
1761 | * | |
1762 | * Mark this page as wired down by yet | |
1763 | * another map, removing it from paging queues | |
1764 | * as necessary. | |
1765 | * | |
1766 | * The page's object and the page queues must be locked. | |
1767 | */ | |
1768 | void | |
1769 | vm_page_wire( | |
1770 | register vm_page_t mem) | |
1771 | { | |
1772 | ||
91447636 | 1773 | // dbgLog(current_thread(), mem->offset, mem->object, 1); /* (TEST/DEBUG) */ |
1c79356b A |
1774 | |
1775 | VM_PAGE_CHECK(mem); | |
91447636 A |
1776 | #if DEBUG |
1777 | if (mem->object) | |
1778 | _mutex_assert(&mem->object->Lock, MA_OWNED); | |
1779 | _mutex_assert(&vm_page_queue_lock, MA_OWNED); | |
1780 | #endif | |
1c79356b A |
1781 | if (mem->wire_count == 0) { |
1782 | VM_PAGE_QUEUES_REMOVE(mem); | |
1783 | if (!mem->private && !mem->fictitious && !mem->gobbled) | |
1784 | vm_page_wire_count++; | |
1785 | if (mem->gobbled) | |
1786 | vm_page_gobble_count--; | |
1787 | mem->gobbled = FALSE; | |
9bccf70c A |
1788 | if(mem->zero_fill) { |
1789 | /* depends on the queues lock */ | |
1790 | vm_zf_count-=1; | |
1791 | mem->zero_fill = FALSE; | |
1792 | } | |
91447636 A |
1793 | /* |
1794 | * ENCRYPTED SWAP: | |
1795 | * The page could be encrypted, but | |
1796 | * We don't have to decrypt it here | |
1797 | * because we don't guarantee that the | |
1798 | * data is actually valid at this point. | |
1799 | * The page will get decrypted in | |
1800 | * vm_fault_wire() if needed. | |
1801 | */ | |
1c79356b A |
1802 | } |
1803 | assert(!mem->gobbled); | |
1804 | mem->wire_count++; | |
1805 | } | |
1806 | ||
1807 | /* | |
1808 | * vm_page_gobble: | |
1809 | * | |
1810 | * Mark this page as consumed by the vm/ipc/xmm subsystems. | |
1811 | * | |
1812 | * Called only for freshly vm_page_grab()ed pages - w/ nothing locked. | |
1813 | */ | |
1814 | void | |
1815 | vm_page_gobble( | |
1816 | register vm_page_t mem) | |
1817 | { | |
1818 | vm_page_lock_queues(); | |
1819 | VM_PAGE_CHECK(mem); | |
1820 | ||
1821 | assert(!mem->gobbled); | |
1822 | assert(mem->wire_count == 0); | |
1823 | ||
1824 | if (!mem->gobbled && mem->wire_count == 0) { | |
1825 | if (!mem->private && !mem->fictitious) | |
1826 | vm_page_wire_count++; | |
1827 | } | |
1828 | vm_page_gobble_count++; | |
1829 | mem->gobbled = TRUE; | |
1830 | vm_page_unlock_queues(); | |
1831 | } | |
1832 | ||
1833 | /* | |
1834 | * vm_page_unwire: | |
1835 | * | |
1836 | * Release one wiring of this page, potentially | |
1837 | * enabling it to be paged again. | |
1838 | * | |
1839 | * The page's object and the page queues must be locked. | |
1840 | */ | |
1841 | void | |
1842 | vm_page_unwire( | |
1843 | register vm_page_t mem) | |
1844 | { | |
1845 | ||
91447636 | 1846 | // dbgLog(current_thread(), mem->offset, mem->object, 0); /* (TEST/DEBUG) */ |
1c79356b A |
1847 | |
1848 | VM_PAGE_CHECK(mem); | |
1849 | assert(mem->wire_count > 0); | |
91447636 A |
1850 | #if DEBUG |
1851 | if (mem->object) | |
1852 | _mutex_assert(&mem->object->Lock, MA_OWNED); | |
1853 | _mutex_assert(&vm_page_queue_lock, MA_OWNED); | |
1854 | #endif | |
1c79356b A |
1855 | if (--mem->wire_count == 0) { |
1856 | assert(!mem->private && !mem->fictitious); | |
1857 | vm_page_wire_count--; | |
91447636 A |
1858 | assert(!mem->laundry); |
1859 | assert(mem->object != kernel_object); | |
1860 | assert(mem->pageq.next == NULL && mem->pageq.prev == NULL); | |
1c79356b A |
1861 | queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq); |
1862 | vm_page_active_count++; | |
1863 | mem->active = TRUE; | |
1864 | mem->reference = TRUE; | |
1865 | } | |
1866 | } | |
1867 | ||
1868 | /* | |
1869 | * vm_page_deactivate: | |
1870 | * | |
1871 | * Returns the given page to the inactive list, | |
1872 | * indicating that no physical maps have access | |
1873 | * to this page. [Used by the physical mapping system.] | |
1874 | * | |
1875 | * The page queues must be locked. | |
1876 | */ | |
1877 | void | |
1878 | vm_page_deactivate( | |
1879 | register vm_page_t m) | |
1880 | { | |
1881 | VM_PAGE_CHECK(m); | |
91447636 | 1882 | assert(m->object != kernel_object); |
1c79356b | 1883 | |
55e303ae | 1884 | // dbgLog(m->phys_page, vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */ |
91447636 A |
1885 | #if DEBUG |
1886 | _mutex_assert(&vm_page_queue_lock, MA_OWNED); | |
1887 | #endif | |
1c79356b A |
1888 | /* |
1889 | * This page is no longer very interesting. If it was | |
1890 | * interesting (active or inactive/referenced), then we | |
1891 | * clear the reference bit and (re)enter it in the | |
1892 | * inactive queue. Note wired pages should not have | |
1893 | * their reference bit cleared. | |
1894 | */ | |
1895 | if (m->gobbled) { /* can this happen? */ | |
1896 | assert(m->wire_count == 0); | |
1897 | if (!m->private && !m->fictitious) | |
1898 | vm_page_wire_count--; | |
1899 | vm_page_gobble_count--; | |
1900 | m->gobbled = FALSE; | |
1901 | } | |
1902 | if (m->private || (m->wire_count != 0)) | |
1903 | return; | |
1904 | if (m->active || (m->inactive && m->reference)) { | |
1905 | if (!m->fictitious && !m->absent) | |
55e303ae | 1906 | pmap_clear_reference(m->phys_page); |
1c79356b A |
1907 | m->reference = FALSE; |
1908 | VM_PAGE_QUEUES_REMOVE(m); | |
1909 | } | |
1910 | if (m->wire_count == 0 && !m->inactive) { | |
0b4e3aa0 A |
1911 | m->page_ticket = vm_page_ticket; |
1912 | vm_page_ticket_roll++; | |
1913 | ||
1914 | if(vm_page_ticket_roll == VM_PAGE_TICKETS_IN_ROLL) { | |
1915 | vm_page_ticket_roll = 0; | |
1916 | if(vm_page_ticket == VM_PAGE_TICKET_ROLL_IDS) | |
1917 | vm_page_ticket= 0; | |
1918 | else | |
1919 | vm_page_ticket++; | |
1920 | } | |
1921 | ||
91447636 A |
1922 | assert(!m->laundry); |
1923 | assert(m->pageq.next == NULL && m->pageq.prev == NULL); | |
9bccf70c A |
1924 | if(m->zero_fill) { |
1925 | queue_enter(&vm_page_queue_zf, m, vm_page_t, pageq); | |
1926 | } else { | |
1927 | queue_enter(&vm_page_queue_inactive, | |
1928 | m, vm_page_t, pageq); | |
1929 | } | |
1930 | ||
1c79356b A |
1931 | m->inactive = TRUE; |
1932 | if (!m->fictitious) | |
1933 | vm_page_inactive_count++; | |
1934 | } | |
1935 | } | |
1936 | ||
1937 | /* | |
1938 | * vm_page_activate: | |
1939 | * | |
1940 | * Put the specified page on the active list (if appropriate). | |
1941 | * | |
1942 | * The page queues must be locked. | |
1943 | */ | |
1944 | ||
1945 | void | |
1946 | vm_page_activate( | |
1947 | register vm_page_t m) | |
1948 | { | |
1949 | VM_PAGE_CHECK(m); | |
91447636 A |
1950 | assert(m->object != kernel_object); |
1951 | #if DEBUG | |
1952 | _mutex_assert(&vm_page_queue_lock, MA_OWNED); | |
1953 | #endif | |
1c79356b A |
1954 | if (m->gobbled) { |
1955 | assert(m->wire_count == 0); | |
1956 | if (!m->private && !m->fictitious) | |
1957 | vm_page_wire_count--; | |
1958 | vm_page_gobble_count--; | |
1959 | m->gobbled = FALSE; | |
1960 | } | |
1961 | if (m->private) | |
1962 | return; | |
1963 | ||
1964 | if (m->inactive) { | |
91447636 | 1965 | assert(!m->laundry); |
9bccf70c A |
1966 | if (m->zero_fill) { |
1967 | queue_remove(&vm_page_queue_zf, m, vm_page_t, pageq); | |
1968 | } else { | |
1969 | queue_remove(&vm_page_queue_inactive, | |
1970 | m, vm_page_t, pageq); | |
1971 | } | |
91447636 A |
1972 | m->pageq.next = NULL; |
1973 | m->pageq.prev = NULL; | |
1c79356b A |
1974 | if (!m->fictitious) |
1975 | vm_page_inactive_count--; | |
1976 | m->inactive = FALSE; | |
1977 | } | |
1978 | if (m->wire_count == 0) { | |
91447636 | 1979 | #if DEBUG |
1c79356b A |
1980 | if (m->active) |
1981 | panic("vm_page_activate: already active"); | |
91447636 A |
1982 | #endif |
1983 | assert(!m->laundry); | |
1984 | assert(m->pageq.next == NULL && m->pageq.prev == NULL); | |
1c79356b A |
1985 | queue_enter(&vm_page_queue_active, m, vm_page_t, pageq); |
1986 | m->active = TRUE; | |
1987 | m->reference = TRUE; | |
1988 | if (!m->fictitious) | |
1989 | vm_page_active_count++; | |
1990 | } | |
1991 | } | |
1992 | ||
1993 | /* | |
1994 | * vm_page_part_zero_fill: | |
1995 | * | |
1996 | * Zero-fill a part of the page. | |
1997 | */ | |
1998 | void | |
1999 | vm_page_part_zero_fill( | |
2000 | vm_page_t m, | |
2001 | vm_offset_t m_pa, | |
2002 | vm_size_t len) | |
2003 | { | |
2004 | vm_page_t tmp; | |
2005 | ||
2006 | VM_PAGE_CHECK(m); | |
2007 | #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED | |
55e303ae | 2008 | pmap_zero_part_page(m->phys_page, m_pa, len); |
1c79356b A |
2009 | #else |
2010 | while (1) { | |
2011 | tmp = vm_page_grab(); | |
2012 | if (tmp == VM_PAGE_NULL) { | |
2013 | vm_page_wait(THREAD_UNINT); | |
2014 | continue; | |
2015 | } | |
2016 | break; | |
2017 | } | |
2018 | vm_page_zero_fill(tmp); | |
2019 | if(m_pa != 0) { | |
2020 | vm_page_part_copy(m, 0, tmp, 0, m_pa); | |
2021 | } | |
2022 | if((m_pa + len) < PAGE_SIZE) { | |
2023 | vm_page_part_copy(m, m_pa + len, tmp, | |
2024 | m_pa + len, PAGE_SIZE - (m_pa + len)); | |
2025 | } | |
2026 | vm_page_copy(tmp,m); | |
2027 | vm_page_lock_queues(); | |
2028 | vm_page_free(tmp); | |
2029 | vm_page_unlock_queues(); | |
2030 | #endif | |
2031 | ||
2032 | } | |
2033 | ||
2034 | /* | |
2035 | * vm_page_zero_fill: | |
2036 | * | |
2037 | * Zero-fill the specified page. | |
2038 | */ | |
2039 | void | |
2040 | vm_page_zero_fill( | |
2041 | vm_page_t m) | |
2042 | { | |
2043 | XPR(XPR_VM_PAGE, | |
2044 | "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n", | |
2045 | (integer_t)m->object, (integer_t)m->offset, (integer_t)m, 0,0); | |
2046 | ||
2047 | VM_PAGE_CHECK(m); | |
2048 | ||
55e303ae A |
2049 | // dbgTrace(0xAEAEAEAE, m->phys_page, 0); /* (BRINGUP) */ |
2050 | pmap_zero_page(m->phys_page); | |
1c79356b A |
2051 | } |
2052 | ||
2053 | /* | |
2054 | * vm_page_part_copy: | |
2055 | * | |
2056 | * copy part of one page to another | |
2057 | */ | |
2058 | ||
2059 | void | |
2060 | vm_page_part_copy( | |
2061 | vm_page_t src_m, | |
2062 | vm_offset_t src_pa, | |
2063 | vm_page_t dst_m, | |
2064 | vm_offset_t dst_pa, | |
2065 | vm_size_t len) | |
2066 | { | |
2067 | VM_PAGE_CHECK(src_m); | |
2068 | VM_PAGE_CHECK(dst_m); | |
2069 | ||
55e303ae A |
2070 | pmap_copy_part_page(src_m->phys_page, src_pa, |
2071 | dst_m->phys_page, dst_pa, len); | |
1c79356b A |
2072 | } |
2073 | ||
2074 | /* | |
2075 | * vm_page_copy: | |
2076 | * | |
2077 | * Copy one page to another | |
91447636 A |
2078 | * |
2079 | * ENCRYPTED SWAP: | |
2080 | * The source page should not be encrypted. The caller should | |
2081 | * make sure the page is decrypted first, if necessary. | |
1c79356b A |
2082 | */ |
2083 | ||
2084 | void | |
2085 | vm_page_copy( | |
2086 | vm_page_t src_m, | |
2087 | vm_page_t dest_m) | |
2088 | { | |
2089 | XPR(XPR_VM_PAGE, | |
2090 | "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n", | |
2091 | (integer_t)src_m->object, src_m->offset, | |
2092 | (integer_t)dest_m->object, dest_m->offset, | |
2093 | 0); | |
2094 | ||
2095 | VM_PAGE_CHECK(src_m); | |
2096 | VM_PAGE_CHECK(dest_m); | |
2097 | ||
91447636 A |
2098 | /* |
2099 | * ENCRYPTED SWAP: | |
2100 | * The source page should not be encrypted at this point. | |
2101 | * The destination page will therefore not contain encrypted | |
2102 | * data after the copy. | |
2103 | */ | |
2104 | if (src_m->encrypted) { | |
2105 | panic("vm_page_copy: source page %p is encrypted\n", src_m); | |
2106 | } | |
2107 | dest_m->encrypted = FALSE; | |
2108 | ||
55e303ae | 2109 | pmap_copy_page(src_m->phys_page, dest_m->phys_page); |
1c79356b A |
2110 | } |
2111 | ||
1c79356b A |
2112 | /* |
2113 | * Currently, this is a primitive allocator that grabs | |
2114 | * free pages from the system, sorts them by physical | |
2115 | * address, then searches for a region large enough to | |
2116 | * satisfy the user's request. | |
2117 | * | |
2118 | * Additional levels of effort: | |
2119 | * + steal clean active/inactive pages | |
2120 | * + force pageouts of dirty pages | |
2121 | * + maintain a map of available physical | |
2122 | * memory | |
2123 | */ | |
2124 | ||
1c79356b A |
2125 | #if MACH_ASSERT |
2126 | /* | |
2127 | * Check that the list of pages is ordered by | |
2128 | * ascending physical address and has no holes. | |
2129 | */ | |
91447636 A |
2130 | int vm_page_verify_contiguous( |
2131 | vm_page_t pages, | |
2132 | unsigned int npages); | |
2133 | ||
1c79356b A |
2134 | int |
2135 | vm_page_verify_contiguous( | |
2136 | vm_page_t pages, | |
2137 | unsigned int npages) | |
2138 | { | |
2139 | register vm_page_t m; | |
2140 | unsigned int page_count; | |
91447636 | 2141 | vm_offset_t prev_addr; |
1c79356b | 2142 | |
55e303ae | 2143 | prev_addr = pages->phys_page; |
1c79356b A |
2144 | page_count = 1; |
2145 | for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) { | |
55e303ae | 2146 | if (m->phys_page != prev_addr + 1) { |
1c79356b | 2147 | printf("m 0x%x prev_addr 0x%x, current addr 0x%x\n", |
55e303ae | 2148 | m, prev_addr, m->phys_page); |
91447636 | 2149 | printf("pages 0x%x page_count %d\n", pages, page_count); |
1c79356b A |
2150 | panic("vm_page_verify_contiguous: not contiguous!"); |
2151 | } | |
55e303ae | 2152 | prev_addr = m->phys_page; |
1c79356b A |
2153 | ++page_count; |
2154 | } | |
2155 | if (page_count != npages) { | |
2156 | printf("pages 0x%x actual count 0x%x but requested 0x%x\n", | |
2157 | pages, page_count, npages); | |
2158 | panic("vm_page_verify_contiguous: count error"); | |
2159 | } | |
2160 | return 1; | |
2161 | } | |
2162 | #endif /* MACH_ASSERT */ | |
2163 | ||
2164 | ||
91447636 A |
2165 | cpm_counter(unsigned int vpfls_pages_handled = 0;) |
2166 | cpm_counter(unsigned int vpfls_head_insertions = 0;) | |
2167 | cpm_counter(unsigned int vpfls_tail_insertions = 0;) | |
2168 | cpm_counter(unsigned int vpfls_general_insertions = 0;) | |
2169 | cpm_counter(unsigned int vpfc_failed = 0;) | |
2170 | cpm_counter(unsigned int vpfc_satisfied = 0;) | |
2171 | ||
1c79356b A |
2172 | /* |
2173 | * Find a region large enough to contain at least npages | |
2174 | * of contiguous physical memory. | |
2175 | * | |
2176 | * Requirements: | |
2177 | * - Called while holding vm_page_queue_free_lock. | |
2178 | * - Doesn't respect vm_page_free_reserved; caller | |
2179 | * must not ask for more pages than are legal to grab. | |
2180 | * | |
2181 | * Returns a pointer to a list of gobbled pages or VM_PAGE_NULL. | |
2182 | * | |
e5568f75 A |
2183 | * Algorithm: |
2184 | * Loop over the free list, extracting one page at a time and | |
2185 | * inserting those into a sorted sub-list. We stop as soon as | |
2186 | * there's a contiguous range within the sorted list that can | |
2187 | * satisfy the contiguous memory request. This contiguous sub- | |
2188 | * list is chopped out of the sorted sub-list and the remainder | |
2189 | * of the sorted sub-list is put back onto the beginning of the | |
2190 | * free list. | |
1c79356b A |
2191 | */ |
2192 | static vm_page_t | |
2193 | vm_page_find_contiguous( | |
e5568f75 | 2194 | unsigned int contig_pages) |
1c79356b | 2195 | { |
e5568f75 A |
2196 | vm_page_t sort_list; |
2197 | vm_page_t *contfirstprev, contlast; | |
2198 | vm_page_t m, m1; | |
2199 | ppnum_t prevcontaddr; | |
2200 | ppnum_t nextcontaddr; | |
2201 | unsigned int npages; | |
2202 | ||
91447636 A |
2203 | m = NULL; |
2204 | #if DEBUG | |
2205 | _mutex_assert(&vm_page_queue_free_lock, MA_OWNED); | |
2206 | #endif | |
e5568f75 A |
2207 | #if MACH_ASSERT |
2208 | /* | |
2209 | * Verify pages in the free list.. | |
2210 | */ | |
2211 | npages = 0; | |
2212 | for (m = vm_page_queue_free; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) | |
2213 | ++npages; | |
2214 | if (npages != vm_page_free_count) | |
2215 | panic("vm_sort_free_list: prelim: npages %u free_count %d", | |
2216 | npages, vm_page_free_count); | |
2217 | #endif /* MACH_ASSERT */ | |
1c79356b | 2218 | |
e5568f75 | 2219 | if (contig_pages == 0 || vm_page_queue_free == VM_PAGE_NULL) |
1c79356b A |
2220 | return VM_PAGE_NULL; |
2221 | ||
91447636 A |
2222 | #define PPNUM_PREV(x) (((x) > 0) ? ((x) - 1) : 0) |
2223 | #define PPNUM_NEXT(x) (((x) < PPNUM_MAX) ? ((x) + 1) : PPNUM_MAX) | |
2224 | #define SET_NEXT_PAGE(m,n) ((m)->pageq.next = (struct queue_entry *) (n)) | |
1c79356b | 2225 | |
e5568f75 A |
2226 | npages = 1; |
2227 | contfirstprev = &sort_list; | |
2228 | contlast = sort_list = vm_page_queue_free; | |
2229 | vm_page_queue_free = NEXT_PAGE(sort_list); | |
2230 | SET_NEXT_PAGE(sort_list, VM_PAGE_NULL); | |
2231 | prevcontaddr = PPNUM_PREV(sort_list->phys_page); | |
2232 | nextcontaddr = PPNUM_NEXT(sort_list->phys_page); | |
2233 | ||
2234 | while (npages < contig_pages && | |
2235 | (m = vm_page_queue_free) != VM_PAGE_NULL) | |
2236 | { | |
2237 | cpm_counter(++vpfls_pages_handled); | |
2238 | ||
2239 | /* prepend to existing run? */ | |
2240 | if (m->phys_page == prevcontaddr) | |
2241 | { | |
2242 | vm_page_queue_free = NEXT_PAGE(m); | |
2243 | cpm_counter(++vpfls_head_insertions); | |
2244 | prevcontaddr = PPNUM_PREV(prevcontaddr); | |
2245 | SET_NEXT_PAGE(m, *contfirstprev); | |
2246 | *contfirstprev = m; | |
2247 | npages++; | |
2248 | continue; /* no tail expansion check needed */ | |
2249 | } | |
2250 | ||
2251 | /* append to tail of existing run? */ | |
2252 | else if (m->phys_page == nextcontaddr) | |
2253 | { | |
2254 | vm_page_queue_free = NEXT_PAGE(m); | |
2255 | cpm_counter(++vpfls_tail_insertions); | |
2256 | nextcontaddr = PPNUM_NEXT(nextcontaddr); | |
2257 | SET_NEXT_PAGE(m, NEXT_PAGE(contlast)); | |
2258 | SET_NEXT_PAGE(contlast, m); | |
2259 | contlast = m; | |
2260 | npages++; | |
2261 | } | |
2262 | ||
2263 | /* prepend to the very front of sorted list? */ | |
2264 | else if (m->phys_page < sort_list->phys_page) | |
2265 | { | |
2266 | vm_page_queue_free = NEXT_PAGE(m); | |
2267 | cpm_counter(++vpfls_general_insertions); | |
2268 | prevcontaddr = PPNUM_PREV(m->phys_page); | |
2269 | nextcontaddr = PPNUM_NEXT(m->phys_page); | |
2270 | SET_NEXT_PAGE(m, sort_list); | |
2271 | contfirstprev = &sort_list; | |
2272 | contlast = sort_list = m; | |
2273 | npages = 1; | |
1c79356b A |
2274 | } |
2275 | ||
e5568f75 A |
2276 | else /* get to proper place for insertion */ |
2277 | { | |
2278 | if (m->phys_page < nextcontaddr) | |
2279 | { | |
2280 | prevcontaddr = PPNUM_PREV(sort_list->phys_page); | |
2281 | nextcontaddr = PPNUM_NEXT(sort_list->phys_page); | |
2282 | contfirstprev = &sort_list; | |
2283 | contlast = sort_list; | |
2284 | npages = 1; | |
2285 | } | |
2286 | for (m1 = NEXT_PAGE(contlast); | |
2287 | npages < contig_pages && | |
2288 | m1 != VM_PAGE_NULL && m1->phys_page < m->phys_page; | |
2289 | m1 = NEXT_PAGE(m1)) | |
2290 | { | |
2291 | if (m1->phys_page != nextcontaddr) { | |
2292 | prevcontaddr = PPNUM_PREV(m1->phys_page); | |
2293 | contfirstprev = NEXT_PAGE_PTR(contlast); | |
2294 | npages = 1; | |
2295 | } else { | |
2296 | npages++; | |
2297 | } | |
2298 | nextcontaddr = PPNUM_NEXT(m1->phys_page); | |
2299 | contlast = m1; | |
2300 | } | |
2301 | ||
1c79356b | 2302 | /* |
e5568f75 A |
2303 | * We may actually already have enough. |
2304 | * This could happen if a previous prepend | |
2305 | * joined up two runs to meet our needs. | |
2306 | * If so, bail before we take the current | |
2307 | * page off the free queue. | |
1c79356b | 2308 | */ |
e5568f75 A |
2309 | if (npages == contig_pages) |
2310 | break; | |
2311 | ||
91447636 A |
2312 | if (m->phys_page != nextcontaddr) |
2313 | { | |
e5568f75 A |
2314 | contfirstprev = NEXT_PAGE_PTR(contlast); |
2315 | prevcontaddr = PPNUM_PREV(m->phys_page); | |
2316 | nextcontaddr = PPNUM_NEXT(m->phys_page); | |
2317 | npages = 1; | |
2318 | } else { | |
2319 | nextcontaddr = PPNUM_NEXT(nextcontaddr); | |
2320 | npages++; | |
1c79356b | 2321 | } |
e5568f75 A |
2322 | vm_page_queue_free = NEXT_PAGE(m); |
2323 | cpm_counter(++vpfls_general_insertions); | |
2324 | SET_NEXT_PAGE(m, NEXT_PAGE(contlast)); | |
2325 | SET_NEXT_PAGE(contlast, m); | |
2326 | contlast = m; | |
2327 | } | |
2328 | ||
2329 | /* See how many pages are now contiguous after the insertion */ | |
2330 | for (m1 = NEXT_PAGE(m); | |
2331 | npages < contig_pages && | |
2332 | m1 != VM_PAGE_NULL && m1->phys_page == nextcontaddr; | |
2333 | m1 = NEXT_PAGE(m1)) | |
2334 | { | |
2335 | nextcontaddr = PPNUM_NEXT(nextcontaddr); | |
2336 | contlast = m1; | |
2337 | npages++; | |
1c79356b | 2338 | } |
e5568f75 | 2339 | } |
1c79356b | 2340 | |
e5568f75 A |
2341 | /* how did we do? */ |
2342 | if (npages == contig_pages) | |
2343 | { | |
2344 | cpm_counter(++vpfc_satisfied); | |
2345 | ||
2346 | /* remove the contiguous range from the sorted list */ | |
2347 | m = *contfirstprev; | |
2348 | *contfirstprev = NEXT_PAGE(contlast); | |
2349 | SET_NEXT_PAGE(contlast, VM_PAGE_NULL); | |
2350 | assert(vm_page_verify_contiguous(m, npages)); | |
2351 | ||
2352 | /* inline vm_page_gobble() for each returned page */ | |
2353 | for (m1 = m; m1 != VM_PAGE_NULL; m1 = NEXT_PAGE(m1)) { | |
2354 | assert(m1->free); | |
2355 | assert(!m1->wanted); | |
91447636 | 2356 | assert(!m1->laundry); |
e5568f75 A |
2357 | m1->free = FALSE; |
2358 | m1->no_isync = TRUE; | |
2359 | m1->gobbled = TRUE; | |
2360 | } | |
2361 | vm_page_wire_count += npages; | |
2362 | vm_page_gobble_count += npages; | |
2363 | vm_page_free_count -= npages; | |
2364 | ||
2365 | /* stick free list at the tail of the sorted list */ | |
2366 | while ((m1 = *contfirstprev) != VM_PAGE_NULL) | |
2367 | contfirstprev = (vm_page_t *)&m1->pageq.next; | |
2368 | *contfirstprev = vm_page_queue_free; | |
1c79356b | 2369 | } |
e5568f75 A |
2370 | |
2371 | vm_page_queue_free = sort_list; | |
2372 | return m; | |
1c79356b A |
2373 | } |
2374 | ||
2375 | /* | |
2376 | * Allocate a list of contiguous, wired pages. | |
2377 | */ | |
2378 | kern_return_t | |
2379 | cpm_allocate( | |
2380 | vm_size_t size, | |
2381 | vm_page_t *list, | |
2382 | boolean_t wire) | |
2383 | { | |
2384 | register vm_page_t m; | |
91447636 A |
2385 | vm_page_t pages; |
2386 | unsigned int npages; | |
2387 | unsigned int vm_pages_available; | |
e5568f75 | 2388 | boolean_t wakeup; |
1c79356b A |
2389 | |
2390 | if (size % page_size != 0) | |
2391 | return KERN_INVALID_ARGUMENT; | |
2392 | ||
2393 | vm_page_lock_queues(); | |
2394 | mutex_lock(&vm_page_queue_free_lock); | |
2395 | ||
2396 | /* | |
2397 | * Should also take active and inactive pages | |
2398 | * into account... One day... | |
2399 | */ | |
e5568f75 | 2400 | npages = size / page_size; |
1c79356b A |
2401 | vm_pages_available = vm_page_free_count - vm_page_free_reserved; |
2402 | ||
e5568f75 | 2403 | if (npages > vm_pages_available) { |
1c79356b | 2404 | mutex_unlock(&vm_page_queue_free_lock); |
e5568f75 | 2405 | vm_page_unlock_queues(); |
1c79356b A |
2406 | return KERN_RESOURCE_SHORTAGE; |
2407 | } | |
2408 | ||
1c79356b A |
2409 | /* |
2410 | * Obtain a pointer to a subset of the free | |
2411 | * list large enough to satisfy the request; | |
2412 | * the region will be physically contiguous. | |
2413 | */ | |
2414 | pages = vm_page_find_contiguous(npages); | |
e5568f75 A |
2415 | |
2416 | /* adjust global freelist counts and determine need for wakeups */ | |
2417 | if (vm_page_free_count < vm_page_free_count_minimum) | |
2418 | vm_page_free_count_minimum = vm_page_free_count; | |
2419 | ||
2420 | wakeup = ((vm_page_free_count < vm_page_free_min) || | |
2421 | ((vm_page_free_count < vm_page_free_target) && | |
2422 | (vm_page_inactive_count < vm_page_inactive_target))); | |
2423 | ||
2424 | mutex_unlock(&vm_page_queue_free_lock); | |
2425 | ||
1c79356b | 2426 | if (pages == VM_PAGE_NULL) { |
1c79356b A |
2427 | vm_page_unlock_queues(); |
2428 | return KERN_NO_SPACE; | |
2429 | } | |
2430 | ||
1c79356b A |
2431 | /* |
2432 | * Walk the returned list, wiring the pages. | |
2433 | */ | |
2434 | if (wire == TRUE) | |
2435 | for (m = pages; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) { | |
2436 | /* | |
2437 | * Essentially inlined vm_page_wire. | |
2438 | */ | |
2439 | assert(!m->active); | |
2440 | assert(!m->inactive); | |
2441 | assert(!m->private); | |
2442 | assert(!m->fictitious); | |
2443 | assert(m->wire_count == 0); | |
2444 | assert(m->gobbled); | |
2445 | m->gobbled = FALSE; | |
2446 | m->wire_count++; | |
2447 | --vm_page_gobble_count; | |
2448 | } | |
2449 | vm_page_unlock_queues(); | |
2450 | ||
e5568f75 A |
2451 | if (wakeup) |
2452 | thread_wakeup((event_t) &vm_page_free_wanted); | |
2453 | ||
1c79356b A |
2454 | /* |
2455 | * The CPM pages should now be available and | |
2456 | * ordered by ascending physical address. | |
2457 | */ | |
2458 | assert(vm_page_verify_contiguous(pages, npages)); | |
2459 | ||
2460 | *list = pages; | |
2461 | return KERN_SUCCESS; | |
2462 | } | |
2463 | ||
2464 | ||
2465 | #include <mach_vm_debug.h> | |
2466 | #if MACH_VM_DEBUG | |
2467 | ||
2468 | #include <mach_debug/hash_info.h> | |
2469 | #include <vm/vm_debug.h> | |
2470 | ||
2471 | /* | |
2472 | * Routine: vm_page_info | |
2473 | * Purpose: | |
2474 | * Return information about the global VP table. | |
2475 | * Fills the buffer with as much information as possible | |
2476 | * and returns the desired size of the buffer. | |
2477 | * Conditions: | |
2478 | * Nothing locked. The caller should provide | |
2479 | * possibly-pageable memory. | |
2480 | */ | |
2481 | ||
2482 | unsigned int | |
2483 | vm_page_info( | |
2484 | hash_info_bucket_t *info, | |
2485 | unsigned int count) | |
2486 | { | |
91447636 | 2487 | unsigned int i; |
1c79356b A |
2488 | |
2489 | if (vm_page_bucket_count < count) | |
2490 | count = vm_page_bucket_count; | |
2491 | ||
2492 | for (i = 0; i < count; i++) { | |
2493 | vm_page_bucket_t *bucket = &vm_page_buckets[i]; | |
2494 | unsigned int bucket_count = 0; | |
2495 | vm_page_t m; | |
2496 | ||
2497 | simple_lock(&vm_page_bucket_lock); | |
2498 | for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next) | |
2499 | bucket_count++; | |
2500 | simple_unlock(&vm_page_bucket_lock); | |
2501 | ||
2502 | /* don't touch pageable memory while holding locks */ | |
2503 | info[i].hib_count = bucket_count; | |
2504 | } | |
2505 | ||
2506 | return vm_page_bucket_count; | |
2507 | } | |
2508 | #endif /* MACH_VM_DEBUG */ | |
2509 | ||
2510 | #include <mach_kdb.h> | |
2511 | #if MACH_KDB | |
2512 | ||
2513 | #include <ddb/db_output.h> | |
2514 | #include <vm/vm_print.h> | |
2515 | #define printf kdbprintf | |
2516 | ||
2517 | /* | |
2518 | * Routine: vm_page_print [exported] | |
2519 | */ | |
2520 | void | |
2521 | vm_page_print( | |
91447636 | 2522 | db_addr_t db_addr) |
1c79356b | 2523 | { |
91447636 A |
2524 | vm_page_t p; |
2525 | ||
2526 | p = (vm_page_t) (long) db_addr; | |
1c79356b A |
2527 | |
2528 | iprintf("page 0x%x\n", p); | |
2529 | ||
2530 | db_indent += 2; | |
2531 | ||
2532 | iprintf("object=0x%x", p->object); | |
2533 | printf(", offset=0x%x", p->offset); | |
2534 | printf(", wire_count=%d", p->wire_count); | |
1c79356b | 2535 | |
91447636 | 2536 | iprintf("%sinactive, %sactive, %sgobbled, %slaundry, %sfree, %sref, %sencrypted\n", |
1c79356b A |
2537 | (p->inactive ? "" : "!"), |
2538 | (p->active ? "" : "!"), | |
2539 | (p->gobbled ? "" : "!"), | |
2540 | (p->laundry ? "" : "!"), | |
2541 | (p->free ? "" : "!"), | |
2542 | (p->reference ? "" : "!"), | |
91447636 | 2543 | (p->encrypted ? "" : "!")); |
1c79356b A |
2544 | iprintf("%sbusy, %swanted, %stabled, %sfictitious, %sprivate, %sprecious\n", |
2545 | (p->busy ? "" : "!"), | |
2546 | (p->wanted ? "" : "!"), | |
2547 | (p->tabled ? "" : "!"), | |
2548 | (p->fictitious ? "" : "!"), | |
2549 | (p->private ? "" : "!"), | |
2550 | (p->precious ? "" : "!")); | |
2551 | iprintf("%sabsent, %serror, %sdirty, %scleaning, %spageout, %sclustered\n", | |
2552 | (p->absent ? "" : "!"), | |
2553 | (p->error ? "" : "!"), | |
2554 | (p->dirty ? "" : "!"), | |
2555 | (p->cleaning ? "" : "!"), | |
2556 | (p->pageout ? "" : "!"), | |
2557 | (p->clustered ? "" : "!")); | |
0b4e3aa0 | 2558 | iprintf("%slock_supplied, %soverwriting, %srestart, %sunusual\n", |
1c79356b A |
2559 | (p->lock_supplied ? "" : "!"), |
2560 | (p->overwriting ? "" : "!"), | |
2561 | (p->restart ? "" : "!"), | |
0b4e3aa0 | 2562 | (p->unusual ? "" : "!")); |
1c79356b | 2563 | |
55e303ae | 2564 | iprintf("phys_page=0x%x", p->phys_page); |
1c79356b A |
2565 | printf(", page_error=0x%x", p->page_error); |
2566 | printf(", page_lock=0x%x", p->page_lock); | |
2567 | printf(", unlock_request=%d\n", p->unlock_request); | |
2568 | ||
2569 | db_indent -= 2; | |
2570 | } | |
2571 | #endif /* MACH_KDB */ |