]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
91447636 | 2 | * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. |
1c79356b | 3 | * |
8f6c56a5 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
8f6c56a5 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
8ad349bb | 24 | * limitations under the License. |
8f6c56a5 A |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm/vm_page.c | |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
61 | * | |
62 | * Resident memory management module. | |
63 | */ | |
64 | ||
91447636 A |
65 | #include <debug.h> |
66 | ||
9bccf70c | 67 | #include <mach/clock_types.h> |
1c79356b A |
68 | #include <mach/vm_prot.h> |
69 | #include <mach/vm_statistics.h> | |
70 | #include <kern/counters.h> | |
71 | #include <kern/sched_prim.h> | |
72 | #include <kern/task.h> | |
73 | #include <kern/thread.h> | |
74 | #include <kern/zalloc.h> | |
75 | #include <kern/xpr.h> | |
76 | #include <vm/pmap.h> | |
77 | #include <vm/vm_init.h> | |
78 | #include <vm/vm_map.h> | |
79 | #include <vm/vm_page.h> | |
80 | #include <vm/vm_pageout.h> | |
81 | #include <vm/vm_kern.h> /* kernel_memory_allocate() */ | |
82 | #include <kern/misc_protos.h> | |
83 | #include <zone_debug.h> | |
84 | #include <vm/cpm.h> | |
55e303ae A |
85 | #include <ppc/mappings.h> /* (BRINGUP) */ |
86 | #include <pexpert/pexpert.h> /* (BRINGUP) */ | |
87 | ||
91447636 | 88 | #include <vm/vm_protos.h> |
1c79356b | 89 | |
0b4e3aa0 A |
90 | /* Variables used to indicate the relative age of pages in the |
91 | * inactive list | |
92 | */ | |
93 | ||
91447636 A |
94 | unsigned int vm_page_ticket_roll = 0; |
95 | unsigned int vm_page_ticket = 0; | |
1c79356b A |
96 | /* |
97 | * Associated with page of user-allocatable memory is a | |
98 | * page structure. | |
99 | */ | |
100 | ||
101 | /* | |
102 | * These variables record the values returned by vm_page_bootstrap, | |
103 | * for debugging purposes. The implementation of pmap_steal_memory | |
104 | * and pmap_startup here also uses them internally. | |
105 | */ | |
106 | ||
107 | vm_offset_t virtual_space_start; | |
108 | vm_offset_t virtual_space_end; | |
109 | int vm_page_pages; | |
110 | ||
111 | /* | |
112 | * The vm_page_lookup() routine, which provides for fast | |
113 | * (virtual memory object, offset) to page lookup, employs | |
114 | * the following hash table. The vm_page_{insert,remove} | |
115 | * routines install and remove associations in the table. | |
116 | * [This table is often called the virtual-to-physical, | |
117 | * or VP, table.] | |
118 | */ | |
119 | typedef struct { | |
120 | vm_page_t pages; | |
121 | #if MACH_PAGE_HASH_STATS | |
122 | int cur_count; /* current count */ | |
123 | int hi_count; /* high water mark */ | |
124 | #endif /* MACH_PAGE_HASH_STATS */ | |
125 | } vm_page_bucket_t; | |
126 | ||
127 | vm_page_bucket_t *vm_page_buckets; /* Array of buckets */ | |
128 | unsigned int vm_page_bucket_count = 0; /* How big is array? */ | |
129 | unsigned int vm_page_hash_mask; /* Mask for hash function */ | |
130 | unsigned int vm_page_hash_shift; /* Shift for hash function */ | |
55e303ae | 131 | uint32_t vm_page_bucket_hash; /* Basic bucket hash */ |
1c79356b A |
132 | decl_simple_lock_data(,vm_page_bucket_lock) |
133 | ||
91447636 A |
134 | vm_page_t |
135 | vm_page_lookup_nohint(vm_object_t object, vm_object_offset_t offset); | |
136 | ||
137 | ||
1c79356b A |
138 | #if MACH_PAGE_HASH_STATS |
139 | /* This routine is only for debug. It is intended to be called by | |
140 | * hand by a developer using a kernel debugger. This routine prints | |
141 | * out vm_page_hash table statistics to the kernel debug console. | |
142 | */ | |
143 | void | |
144 | hash_debug(void) | |
145 | { | |
146 | int i; | |
147 | int numbuckets = 0; | |
148 | int highsum = 0; | |
149 | int maxdepth = 0; | |
150 | ||
151 | for (i = 0; i < vm_page_bucket_count; i++) { | |
152 | if (vm_page_buckets[i].hi_count) { | |
153 | numbuckets++; | |
154 | highsum += vm_page_buckets[i].hi_count; | |
155 | if (vm_page_buckets[i].hi_count > maxdepth) | |
156 | maxdepth = vm_page_buckets[i].hi_count; | |
157 | } | |
158 | } | |
159 | printf("Total number of buckets: %d\n", vm_page_bucket_count); | |
160 | printf("Number used buckets: %d = %d%%\n", | |
161 | numbuckets, 100*numbuckets/vm_page_bucket_count); | |
162 | printf("Number unused buckets: %d = %d%%\n", | |
163 | vm_page_bucket_count - numbuckets, | |
164 | 100*(vm_page_bucket_count-numbuckets)/vm_page_bucket_count); | |
165 | printf("Sum of bucket max depth: %d\n", highsum); | |
166 | printf("Average bucket depth: %d.%2d\n", | |
167 | highsum/vm_page_bucket_count, | |
168 | highsum%vm_page_bucket_count); | |
169 | printf("Maximum bucket depth: %d\n", maxdepth); | |
170 | } | |
171 | #endif /* MACH_PAGE_HASH_STATS */ | |
172 | ||
173 | /* | |
174 | * The virtual page size is currently implemented as a runtime | |
175 | * variable, but is constant once initialized using vm_set_page_size. | |
176 | * This initialization must be done in the machine-dependent | |
177 | * bootstrap sequence, before calling other machine-independent | |
178 | * initializations. | |
179 | * | |
180 | * All references to the virtual page size outside this | |
181 | * module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT | |
182 | * constants. | |
183 | */ | |
55e303ae A |
184 | vm_size_t page_size = PAGE_SIZE; |
185 | vm_size_t page_mask = PAGE_MASK; | |
91447636 | 186 | int page_shift = PAGE_SHIFT; |
1c79356b A |
187 | |
188 | /* | |
189 | * Resident page structures are initialized from | |
190 | * a template (see vm_page_alloc). | |
191 | * | |
192 | * When adding a new field to the virtual memory | |
193 | * object structure, be sure to add initialization | |
194 | * (see vm_page_bootstrap). | |
195 | */ | |
196 | struct vm_page vm_page_template; | |
197 | ||
198 | /* | |
199 | * Resident pages that represent real memory | |
200 | * are allocated from a free list. | |
201 | */ | |
202 | vm_page_t vm_page_queue_free; | |
203 | vm_page_t vm_page_queue_fictitious; | |
1c79356b | 204 | unsigned int vm_page_free_wanted; |
91447636 A |
205 | unsigned int vm_page_free_count; |
206 | unsigned int vm_page_fictitious_count; | |
1c79356b A |
207 | |
208 | unsigned int vm_page_free_count_minimum; /* debugging */ | |
209 | ||
210 | /* | |
211 | * Occasionally, the virtual memory system uses | |
212 | * resident page structures that do not refer to | |
213 | * real pages, for example to leave a page with | |
214 | * important state information in the VP table. | |
215 | * | |
216 | * These page structures are allocated the way | |
217 | * most other kernel structures are. | |
218 | */ | |
219 | zone_t vm_page_zone; | |
220 | decl_mutex_data(,vm_page_alloc_lock) | |
9bccf70c | 221 | unsigned int io_throttle_zero_fill; |
1c79356b A |
222 | |
223 | /* | |
224 | * Fictitious pages don't have a physical address, | |
55e303ae | 225 | * but we must initialize phys_page to something. |
1c79356b A |
226 | * For debugging, this should be a strange value |
227 | * that the pmap module can recognize in assertions. | |
228 | */ | |
229 | vm_offset_t vm_page_fictitious_addr = (vm_offset_t) -1; | |
230 | ||
231 | /* | |
232 | * Resident page structures are also chained on | |
233 | * queues that are used by the page replacement | |
234 | * system (pageout daemon). These queues are | |
235 | * defined here, but are shared by the pageout | |
9bccf70c A |
236 | * module. The inactive queue is broken into |
237 | * inactive and zf for convenience as the | |
238 | * pageout daemon often assignes a higher | |
239 | * affinity to zf pages | |
1c79356b A |
240 | */ |
241 | queue_head_t vm_page_queue_active; | |
242 | queue_head_t vm_page_queue_inactive; | |
91447636 A |
243 | unsigned int vm_page_active_count; |
244 | unsigned int vm_page_inactive_count; | |
245 | unsigned int vm_page_wire_count; | |
246 | unsigned int vm_page_gobble_count = 0; | |
247 | unsigned int vm_page_wire_count_warning = 0; | |
248 | unsigned int vm_page_gobble_count_warning = 0; | |
249 | ||
250 | unsigned int vm_page_purgeable_count = 0; /* # of pages purgeable now */ | |
251 | uint64_t vm_page_purged_count = 0; /* total count of purged pages */ | |
1c79356b | 252 | |
4452a7af A |
253 | ppnum_t vm_lopage_poolstart = 0; |
254 | ppnum_t vm_lopage_poolend = 0; | |
255 | int vm_lopage_poolsize = 0; | |
256 | uint64_t max_valid_dma_address = 0xffffffffffffffffULL; | |
257 | ||
258 | ||
1c79356b A |
259 | /* |
260 | * Several page replacement parameters are also | |
261 | * shared with this module, so that page allocation | |
262 | * (done here in vm_page_alloc) can trigger the | |
263 | * pageout daemon. | |
264 | */ | |
91447636 A |
265 | unsigned int vm_page_free_target = 0; |
266 | unsigned int vm_page_free_min = 0; | |
267 | unsigned int vm_page_inactive_target = 0; | |
268 | unsigned int vm_page_free_reserved = 0; | |
269 | unsigned int vm_page_throttled_count = 0; | |
1c79356b A |
270 | |
271 | /* | |
272 | * The VM system has a couple of heuristics for deciding | |
273 | * that pages are "uninteresting" and should be placed | |
274 | * on the inactive queue as likely candidates for replacement. | |
275 | * These variables let the heuristics be controlled at run-time | |
276 | * to make experimentation easier. | |
277 | */ | |
278 | ||
279 | boolean_t vm_page_deactivate_hint = TRUE; | |
280 | ||
281 | /* | |
282 | * vm_set_page_size: | |
283 | * | |
284 | * Sets the page size, perhaps based upon the memory | |
285 | * size. Must be called before any use of page-size | |
286 | * dependent functions. | |
287 | * | |
288 | * Sets page_shift and page_mask from page_size. | |
289 | */ | |
290 | void | |
291 | vm_set_page_size(void) | |
292 | { | |
1c79356b A |
293 | page_mask = page_size - 1; |
294 | ||
295 | if ((page_mask & page_size) != 0) | |
296 | panic("vm_set_page_size: page size not a power of two"); | |
297 | ||
298 | for (page_shift = 0; ; page_shift++) | |
91447636 | 299 | if ((1U << page_shift) == page_size) |
1c79356b | 300 | break; |
1c79356b A |
301 | } |
302 | ||
303 | /* | |
304 | * vm_page_bootstrap: | |
305 | * | |
306 | * Initializes the resident memory module. | |
307 | * | |
308 | * Allocates memory for the page cells, and | |
309 | * for the object/offset-to-page hash table headers. | |
310 | * Each page cell is initialized and placed on the free list. | |
311 | * Returns the range of available kernel virtual memory. | |
312 | */ | |
313 | ||
314 | void | |
315 | vm_page_bootstrap( | |
316 | vm_offset_t *startp, | |
317 | vm_offset_t *endp) | |
318 | { | |
319 | register vm_page_t m; | |
91447636 | 320 | unsigned int i; |
1c79356b A |
321 | unsigned int log1; |
322 | unsigned int log2; | |
323 | unsigned int size; | |
324 | ||
325 | /* | |
326 | * Initialize the vm_page template. | |
327 | */ | |
328 | ||
329 | m = &vm_page_template; | |
91447636 A |
330 | m->object = VM_OBJECT_NULL; /* reset later */ |
331 | m->offset = (vm_object_offset_t) -1; /* reset later */ | |
1c79356b A |
332 | m->wire_count = 0; |
333 | ||
91447636 A |
334 | m->pageq.next = NULL; |
335 | m->pageq.prev = NULL; | |
336 | m->listq.next = NULL; | |
337 | m->listq.prev = NULL; | |
338 | ||
1c79356b A |
339 | m->inactive = FALSE; |
340 | m->active = FALSE; | |
341 | m->laundry = FALSE; | |
342 | m->free = FALSE; | |
765c9de3 | 343 | m->no_isync = TRUE; |
1c79356b A |
344 | m->reference = FALSE; |
345 | m->pageout = FALSE; | |
0b4e3aa0 | 346 | m->dump_cleaning = FALSE; |
1c79356b A |
347 | m->list_req_pending = FALSE; |
348 | ||
349 | m->busy = TRUE; | |
350 | m->wanted = FALSE; | |
351 | m->tabled = FALSE; | |
352 | m->fictitious = FALSE; | |
353 | m->private = FALSE; | |
354 | m->absent = FALSE; | |
355 | m->error = FALSE; | |
356 | m->dirty = FALSE; | |
357 | m->cleaning = FALSE; | |
358 | m->precious = FALSE; | |
359 | m->clustered = FALSE; | |
360 | m->lock_supplied = FALSE; | |
361 | m->unusual = FALSE; | |
362 | m->restart = FALSE; | |
9bccf70c | 363 | m->zero_fill = FALSE; |
91447636 | 364 | m->encrypted = FALSE; |
1c79356b | 365 | |
55e303ae | 366 | m->phys_page = 0; /* reset later */ |
1c79356b A |
367 | |
368 | m->page_lock = VM_PROT_NONE; | |
369 | m->unlock_request = VM_PROT_NONE; | |
370 | m->page_error = KERN_SUCCESS; | |
371 | ||
372 | /* | |
373 | * Initialize the page queues. | |
374 | */ | |
375 | ||
91447636 A |
376 | mutex_init(&vm_page_queue_free_lock, 0); |
377 | mutex_init(&vm_page_queue_lock, 0); | |
1c79356b A |
378 | |
379 | vm_page_queue_free = VM_PAGE_NULL; | |
380 | vm_page_queue_fictitious = VM_PAGE_NULL; | |
381 | queue_init(&vm_page_queue_active); | |
382 | queue_init(&vm_page_queue_inactive); | |
9bccf70c | 383 | queue_init(&vm_page_queue_zf); |
1c79356b A |
384 | |
385 | vm_page_free_wanted = 0; | |
386 | ||
387 | /* | |
388 | * Steal memory for the map and zone subsystems. | |
389 | */ | |
390 | ||
391 | vm_map_steal_memory(); | |
392 | zone_steal_memory(); | |
393 | ||
394 | /* | |
395 | * Allocate (and initialize) the virtual-to-physical | |
396 | * table hash buckets. | |
397 | * | |
398 | * The number of buckets should be a power of two to | |
399 | * get a good hash function. The following computation | |
400 | * chooses the first power of two that is greater | |
401 | * than the number of physical pages in the system. | |
402 | */ | |
403 | ||
91447636 | 404 | simple_lock_init(&vm_page_bucket_lock, 0); |
1c79356b A |
405 | |
406 | if (vm_page_bucket_count == 0) { | |
407 | unsigned int npages = pmap_free_pages(); | |
408 | ||
409 | vm_page_bucket_count = 1; | |
410 | while (vm_page_bucket_count < npages) | |
411 | vm_page_bucket_count <<= 1; | |
412 | } | |
413 | ||
414 | vm_page_hash_mask = vm_page_bucket_count - 1; | |
415 | ||
416 | /* | |
417 | * Calculate object shift value for hashing algorithm: | |
418 | * O = log2(sizeof(struct vm_object)) | |
419 | * B = log2(vm_page_bucket_count) | |
420 | * hash shifts the object left by | |
421 | * B/2 - O | |
422 | */ | |
423 | size = vm_page_bucket_count; | |
424 | for (log1 = 0; size > 1; log1++) | |
425 | size /= 2; | |
426 | size = sizeof(struct vm_object); | |
427 | for (log2 = 0; size > 1; log2++) | |
428 | size /= 2; | |
429 | vm_page_hash_shift = log1/2 - log2 + 1; | |
55e303ae A |
430 | |
431 | vm_page_bucket_hash = 1 << ((log1 + 1) >> 1); /* Get (ceiling of sqrt of table size) */ | |
432 | vm_page_bucket_hash |= 1 << ((log1 + 1) >> 2); /* Get (ceiling of quadroot of table size) */ | |
433 | vm_page_bucket_hash |= 1; /* Set bit and add 1 - always must be 1 to insure unique series */ | |
1c79356b A |
434 | |
435 | if (vm_page_hash_mask & vm_page_bucket_count) | |
436 | printf("vm_page_bootstrap: WARNING -- strange page hash\n"); | |
437 | ||
438 | vm_page_buckets = (vm_page_bucket_t *) | |
439 | pmap_steal_memory(vm_page_bucket_count * | |
440 | sizeof(vm_page_bucket_t)); | |
441 | ||
442 | for (i = 0; i < vm_page_bucket_count; i++) { | |
443 | register vm_page_bucket_t *bucket = &vm_page_buckets[i]; | |
444 | ||
445 | bucket->pages = VM_PAGE_NULL; | |
446 | #if MACH_PAGE_HASH_STATS | |
447 | bucket->cur_count = 0; | |
448 | bucket->hi_count = 0; | |
449 | #endif /* MACH_PAGE_HASH_STATS */ | |
450 | } | |
451 | ||
452 | /* | |
453 | * Machine-dependent code allocates the resident page table. | |
454 | * It uses vm_page_init to initialize the page frames. | |
455 | * The code also returns to us the virtual space available | |
456 | * to the kernel. We don't trust the pmap module | |
457 | * to get the alignment right. | |
458 | */ | |
459 | ||
460 | pmap_startup(&virtual_space_start, &virtual_space_end); | |
91447636 A |
461 | virtual_space_start = round_page(virtual_space_start); |
462 | virtual_space_end = trunc_page(virtual_space_end); | |
1c79356b A |
463 | |
464 | *startp = virtual_space_start; | |
465 | *endp = virtual_space_end; | |
466 | ||
467 | /* | |
468 | * Compute the initial "wire" count. | |
469 | * Up until now, the pages which have been set aside are not under | |
470 | * the VM system's control, so although they aren't explicitly | |
471 | * wired, they nonetheless can't be moved. At this moment, | |
472 | * all VM managed pages are "free", courtesy of pmap_startup. | |
473 | */ | |
55e303ae | 474 | vm_page_wire_count = atop_64(max_mem) - vm_page_free_count; /* initial value */ |
1c79356b A |
475 | |
476 | printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count); | |
477 | vm_page_free_count_minimum = vm_page_free_count; | |
91447636 A |
478 | |
479 | simple_lock_init(&vm_paging_lock, 0); | |
1c79356b A |
480 | } |
481 | ||
482 | #ifndef MACHINE_PAGES | |
483 | /* | |
484 | * We implement pmap_steal_memory and pmap_startup with the help | |
485 | * of two simpler functions, pmap_virtual_space and pmap_next_page. | |
486 | */ | |
487 | ||
91447636 | 488 | void * |
1c79356b A |
489 | pmap_steal_memory( |
490 | vm_size_t size) | |
491 | { | |
55e303ae A |
492 | vm_offset_t addr, vaddr; |
493 | ppnum_t phys_page; | |
1c79356b A |
494 | |
495 | /* | |
496 | * We round the size to a round multiple. | |
497 | */ | |
498 | ||
499 | size = (size + sizeof (void *) - 1) &~ (sizeof (void *) - 1); | |
500 | ||
501 | /* | |
502 | * If this is the first call to pmap_steal_memory, | |
503 | * we have to initialize ourself. | |
504 | */ | |
505 | ||
506 | if (virtual_space_start == virtual_space_end) { | |
507 | pmap_virtual_space(&virtual_space_start, &virtual_space_end); | |
508 | ||
509 | /* | |
510 | * The initial values must be aligned properly, and | |
511 | * we don't trust the pmap module to do it right. | |
512 | */ | |
513 | ||
91447636 A |
514 | virtual_space_start = round_page(virtual_space_start); |
515 | virtual_space_end = trunc_page(virtual_space_end); | |
1c79356b A |
516 | } |
517 | ||
518 | /* | |
519 | * Allocate virtual memory for this request. | |
520 | */ | |
521 | ||
522 | addr = virtual_space_start; | |
523 | virtual_space_start += size; | |
524 | ||
525 | kprintf("pmap_steal_memory: %08X - %08X; size=%08X\n", addr, virtual_space_start, size); /* (TEST/DEBUG) */ | |
526 | ||
527 | /* | |
528 | * Allocate and map physical pages to back new virtual pages. | |
529 | */ | |
530 | ||
91447636 | 531 | for (vaddr = round_page(addr); |
1c79356b A |
532 | vaddr < addr + size; |
533 | vaddr += PAGE_SIZE) { | |
55e303ae | 534 | if (!pmap_next_page(&phys_page)) |
1c79356b A |
535 | panic("pmap_steal_memory"); |
536 | ||
537 | /* | |
538 | * XXX Logically, these mappings should be wired, | |
539 | * but some pmap modules barf if they are. | |
540 | */ | |
541 | ||
55e303ae | 542 | pmap_enter(kernel_pmap, vaddr, phys_page, |
9bccf70c A |
543 | VM_PROT_READ|VM_PROT_WRITE, |
544 | VM_WIMG_USE_DEFAULT, FALSE); | |
1c79356b A |
545 | /* |
546 | * Account for newly stolen memory | |
547 | */ | |
548 | vm_page_wire_count++; | |
549 | ||
550 | } | |
551 | ||
91447636 | 552 | return (void *) addr; |
1c79356b A |
553 | } |
554 | ||
555 | void | |
556 | pmap_startup( | |
557 | vm_offset_t *startp, | |
558 | vm_offset_t *endp) | |
559 | { | |
55e303ae A |
560 | unsigned int i, npages, pages_initialized, fill, fillval; |
561 | vm_page_t pages; | |
562 | ppnum_t phys_page; | |
563 | addr64_t tmpaddr; | |
4452a7af A |
564 | unsigned int num_of_lopages = 0; |
565 | unsigned int last_index; | |
1c79356b A |
566 | |
567 | /* | |
568 | * We calculate how many page frames we will have | |
569 | * and then allocate the page structures in one chunk. | |
570 | */ | |
571 | ||
55e303ae A |
572 | tmpaddr = (addr64_t)pmap_free_pages() * (addr64_t)PAGE_SIZE; /* Get the amount of memory left */ |
573 | tmpaddr = tmpaddr + (addr64_t)(round_page_32(virtual_space_start) - virtual_space_start); /* Account for any slop */ | |
574 | npages = (unsigned int)(tmpaddr / (addr64_t)(PAGE_SIZE + sizeof(*pages))); /* Figure size of all vm_page_ts, including enough to hold the vm_page_ts */ | |
1c79356b A |
575 | |
576 | pages = (vm_page_t) pmap_steal_memory(npages * sizeof *pages); | |
577 | ||
578 | /* | |
579 | * Initialize the page frames. | |
580 | */ | |
1c79356b | 581 | for (i = 0, pages_initialized = 0; i < npages; i++) { |
55e303ae | 582 | if (!pmap_next_page(&phys_page)) |
1c79356b A |
583 | break; |
584 | ||
55e303ae | 585 | vm_page_init(&pages[i], phys_page); |
1c79356b A |
586 | vm_page_pages++; |
587 | pages_initialized++; | |
588 | } | |
589 | ||
4452a7af A |
590 | /* |
591 | * Check if we want to initialize pages to a known value | |
592 | */ | |
593 | fill = 0; /* Assume no fill */ | |
594 | if (PE_parse_boot_arg("fill", &fillval)) fill = 1; /* Set fill */ | |
595 | ||
596 | /* | |
597 | * if vm_lopage_poolsize is non-zero, than we need to reserve | |
598 | * a pool of pages whose addresess are less than 4G... this pool | |
599 | * is used by drivers whose hardware can't DMA beyond 32 bits... | |
600 | * | |
601 | * note that I'm assuming that the page list is ascending and | |
602 | * ordered w/r to the physical address | |
603 | */ | |
604 | for (i = 0, num_of_lopages = vm_lopage_poolsize; num_of_lopages && i < pages_initialized; num_of_lopages--, i++) { | |
605 | vm_page_t m; | |
606 | ||
607 | m = &pages[i]; | |
608 | ||
609 | if (m->phys_page >= (1 << (32 - PAGE_SHIFT))) | |
610 | panic("couldn't reserve the lopage pool: not enough lo pages\n"); | |
611 | ||
612 | if (m->phys_page < vm_lopage_poolend) | |
613 | panic("couldn't reserve the lopage pool: page list out of order\n"); | |
614 | ||
615 | vm_lopage_poolend = m->phys_page; | |
616 | ||
617 | if (vm_lopage_poolstart == 0) | |
618 | vm_lopage_poolstart = m->phys_page; | |
619 | else { | |
620 | if (m->phys_page < vm_lopage_poolstart) | |
621 | panic("couldn't reserve the lopage pool: page list out of order\n"); | |
622 | } | |
623 | ||
624 | if (fill) | |
625 | fillPage(m->phys_page, fillval); /* Fill the page with a know value if requested at boot */ | |
626 | ||
627 | vm_page_release(m); | |
628 | } | |
629 | last_index = i; | |
630 | ||
631 | // -debug code remove | |
632 | if (2 == vm_himemory_mode) { | |
633 | // free low -> high so high is preferred | |
634 | for (i = last_index + 1; i <= pages_initialized; i++) { | |
635 | if(fill) fillPage(pages[i - 1].phys_page, fillval); /* Fill the page with a know value if requested at boot */ | |
636 | vm_page_release(&pages[i - 1]); | |
637 | } | |
638 | } | |
639 | else | |
640 | // debug code remove- | |
641 | ||
1c79356b A |
642 | /* |
643 | * Release pages in reverse order so that physical pages | |
644 | * initially get allocated in ascending addresses. This keeps | |
645 | * the devices (which must address physical memory) happy if | |
646 | * they require several consecutive pages. | |
647 | */ | |
4452a7af | 648 | for (i = pages_initialized; i > last_index; i--) { |
55e303ae | 649 | if(fill) fillPage(pages[i - 1].phys_page, fillval); /* Fill the page with a know value if requested at boot */ |
1c79356b A |
650 | vm_page_release(&pages[i - 1]); |
651 | } | |
652 | ||
55e303ae A |
653 | #if 0 |
654 | { | |
655 | vm_page_t xx, xxo, xxl; | |
656 | int j, k, l; | |
657 | ||
658 | j = 0; /* (BRINGUP) */ | |
659 | xxl = 0; | |
660 | ||
661 | for(xx = vm_page_queue_free; xx; xxl = xx, xx = xx->pageq.next) { /* (BRINGUP) */ | |
662 | j++; /* (BRINGUP) */ | |
663 | if(j > vm_page_free_count) { /* (BRINGUP) */ | |
664 | panic("pmap_startup: too many pages, xx = %08X, xxl = %08X\n", xx, xxl); | |
665 | } | |
666 | ||
667 | l = vm_page_free_count - j; /* (BRINGUP) */ | |
668 | k = 0; /* (BRINGUP) */ | |
669 | ||
670 | if(((j - 1) & 0xFFFF) == 0) kprintf("checking number %d of %d\n", j, vm_page_free_count); | |
671 | ||
672 | for(xxo = xx->pageq.next; xxo; xxo = xxo->pageq.next) { /* (BRINGUP) */ | |
673 | k++; | |
674 | if(k > l) panic("pmap_startup: too many in secondary check %d %d\n", k, l); | |
675 | if((xx->phys_page & 0xFFFFFFFF) == (xxo->phys_page & 0xFFFFFFFF)) { /* (BRINGUP) */ | |
676 | panic("pmap_startup: duplicate physaddr, xx = %08X, xxo = %08X\n", xx, xxo); | |
677 | } | |
678 | } | |
679 | } | |
680 | ||
681 | if(j != vm_page_free_count) { /* (BRINGUP) */ | |
682 | panic("pmap_startup: vm_page_free_count does not match, calc = %d, vm_page_free_count = %08X\n", j, vm_page_free_count); | |
683 | } | |
684 | } | |
685 | #endif | |
686 | ||
687 | ||
1c79356b A |
688 | /* |
689 | * We have to re-align virtual_space_start, | |
690 | * because pmap_steal_memory has been using it. | |
691 | */ | |
692 | ||
55e303ae | 693 | virtual_space_start = round_page_32(virtual_space_start); |
1c79356b A |
694 | |
695 | *startp = virtual_space_start; | |
696 | *endp = virtual_space_end; | |
697 | } | |
698 | #endif /* MACHINE_PAGES */ | |
699 | ||
700 | /* | |
701 | * Routine: vm_page_module_init | |
702 | * Purpose: | |
703 | * Second initialization pass, to be done after | |
704 | * the basic VM system is ready. | |
705 | */ | |
706 | void | |
707 | vm_page_module_init(void) | |
708 | { | |
709 | vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page), | |
710 | 0, PAGE_SIZE, "vm pages"); | |
711 | ||
712 | #if ZONE_DEBUG | |
713 | zone_debug_disable(vm_page_zone); | |
714 | #endif /* ZONE_DEBUG */ | |
715 | ||
716 | zone_change(vm_page_zone, Z_EXPAND, FALSE); | |
717 | zone_change(vm_page_zone, Z_EXHAUST, TRUE); | |
718 | zone_change(vm_page_zone, Z_FOREIGN, TRUE); | |
719 | ||
720 | /* | |
721 | * Adjust zone statistics to account for the real pages allocated | |
722 | * in vm_page_create(). [Q: is this really what we want?] | |
723 | */ | |
724 | vm_page_zone->count += vm_page_pages; | |
725 | vm_page_zone->cur_size += vm_page_pages * vm_page_zone->elem_size; | |
726 | ||
91447636 | 727 | mutex_init(&vm_page_alloc_lock, 0); |
1c79356b A |
728 | } |
729 | ||
730 | /* | |
731 | * Routine: vm_page_create | |
732 | * Purpose: | |
733 | * After the VM system is up, machine-dependent code | |
734 | * may stumble across more physical memory. For example, | |
735 | * memory that it was reserving for a frame buffer. | |
736 | * vm_page_create turns this memory into available pages. | |
737 | */ | |
738 | ||
739 | void | |
740 | vm_page_create( | |
55e303ae A |
741 | ppnum_t start, |
742 | ppnum_t end) | |
1c79356b | 743 | { |
55e303ae A |
744 | ppnum_t phys_page; |
745 | vm_page_t m; | |
1c79356b | 746 | |
55e303ae A |
747 | for (phys_page = start; |
748 | phys_page < end; | |
749 | phys_page++) { | |
1c79356b A |
750 | while ((m = (vm_page_t) vm_page_grab_fictitious()) |
751 | == VM_PAGE_NULL) | |
752 | vm_page_more_fictitious(); | |
753 | ||
55e303ae | 754 | vm_page_init(m, phys_page); |
1c79356b A |
755 | vm_page_pages++; |
756 | vm_page_release(m); | |
757 | } | |
758 | } | |
759 | ||
760 | /* | |
761 | * vm_page_hash: | |
762 | * | |
763 | * Distributes the object/offset key pair among hash buckets. | |
764 | * | |
55e303ae | 765 | * NOTE: The bucket count must be a power of 2 |
1c79356b A |
766 | */ |
767 | #define vm_page_hash(object, offset) (\ | |
55e303ae | 768 | ( (natural_t)((uint32_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\ |
1c79356b A |
769 | & vm_page_hash_mask) |
770 | ||
771 | /* | |
772 | * vm_page_insert: [ internal use only ] | |
773 | * | |
774 | * Inserts the given mem entry into the object/object-page | |
775 | * table and object list. | |
776 | * | |
777 | * The object must be locked. | |
778 | */ | |
779 | ||
780 | void | |
781 | vm_page_insert( | |
782 | register vm_page_t mem, | |
783 | register vm_object_t object, | |
784 | register vm_object_offset_t offset) | |
785 | { | |
786 | register vm_page_bucket_t *bucket; | |
787 | ||
788 | XPR(XPR_VM_PAGE, | |
789 | "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n", | |
790 | (integer_t)object, (integer_t)offset, (integer_t)mem, 0,0); | |
791 | ||
792 | VM_PAGE_CHECK(mem); | |
91447636 A |
793 | #if DEBUG |
794 | _mutex_assert(&object->Lock, MA_OWNED); | |
1c79356b | 795 | |
91447636 A |
796 | if (mem->tabled || mem->object != VM_OBJECT_NULL) |
797 | panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) " | |
798 | "already in (obj=%p,off=0x%llx)", | |
799 | mem, object, offset, mem->object, mem->offset); | |
800 | #endif | |
1c79356b A |
801 | assert(!object->internal || offset < object->size); |
802 | ||
803 | /* only insert "pageout" pages into "pageout" objects, | |
804 | * and normal pages into normal objects */ | |
805 | assert(object->pageout == mem->pageout); | |
806 | ||
91447636 A |
807 | assert(vm_page_lookup(object, offset) == VM_PAGE_NULL); |
808 | ||
1c79356b A |
809 | /* |
810 | * Record the object/offset pair in this page | |
811 | */ | |
812 | ||
813 | mem->object = object; | |
814 | mem->offset = offset; | |
815 | ||
816 | /* | |
817 | * Insert it into the object_object/offset hash table | |
818 | */ | |
819 | ||
820 | bucket = &vm_page_buckets[vm_page_hash(object, offset)]; | |
821 | simple_lock(&vm_page_bucket_lock); | |
822 | mem->next = bucket->pages; | |
823 | bucket->pages = mem; | |
824 | #if MACH_PAGE_HASH_STATS | |
825 | if (++bucket->cur_count > bucket->hi_count) | |
826 | bucket->hi_count = bucket->cur_count; | |
827 | #endif /* MACH_PAGE_HASH_STATS */ | |
828 | simple_unlock(&vm_page_bucket_lock); | |
829 | ||
830 | /* | |
831 | * Now link into the object's list of backed pages. | |
832 | */ | |
833 | ||
91447636 | 834 | VM_PAGE_INSERT(mem, object); |
1c79356b A |
835 | mem->tabled = TRUE; |
836 | ||
837 | /* | |
838 | * Show that the object has one more resident page. | |
839 | */ | |
840 | ||
841 | object->resident_page_count++; | |
91447636 A |
842 | |
843 | if (object->purgable == VM_OBJECT_PURGABLE_VOLATILE || | |
844 | object->purgable == VM_OBJECT_PURGABLE_EMPTY) { | |
845 | vm_page_lock_queues(); | |
846 | vm_page_purgeable_count++; | |
847 | vm_page_unlock_queues(); | |
848 | } | |
1c79356b A |
849 | } |
850 | ||
851 | /* | |
852 | * vm_page_replace: | |
853 | * | |
854 | * Exactly like vm_page_insert, except that we first | |
855 | * remove any existing page at the given offset in object. | |
856 | * | |
857 | * The object and page queues must be locked. | |
858 | */ | |
859 | ||
860 | void | |
861 | vm_page_replace( | |
862 | register vm_page_t mem, | |
863 | register vm_object_t object, | |
864 | register vm_object_offset_t offset) | |
865 | { | |
4452a7af A |
866 | vm_page_bucket_t *bucket; |
867 | vm_page_t found_m = VM_PAGE_NULL; | |
1c79356b A |
868 | |
869 | VM_PAGE_CHECK(mem); | |
91447636 A |
870 | #if DEBUG |
871 | _mutex_assert(&object->Lock, MA_OWNED); | |
872 | _mutex_assert(&vm_page_queue_lock, MA_OWNED); | |
873 | ||
874 | if (mem->tabled || mem->object != VM_OBJECT_NULL) | |
875 | panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) " | |
876 | "already in (obj=%p,off=0x%llx)", | |
877 | mem, object, offset, mem->object, mem->offset); | |
878 | #endif | |
1c79356b A |
879 | /* |
880 | * Record the object/offset pair in this page | |
881 | */ | |
882 | ||
883 | mem->object = object; | |
884 | mem->offset = offset; | |
885 | ||
886 | /* | |
887 | * Insert it into the object_object/offset hash table, | |
888 | * replacing any page that might have been there. | |
889 | */ | |
890 | ||
891 | bucket = &vm_page_buckets[vm_page_hash(object, offset)]; | |
892 | simple_lock(&vm_page_bucket_lock); | |
4452a7af | 893 | |
1c79356b A |
894 | if (bucket->pages) { |
895 | vm_page_t *mp = &bucket->pages; | |
896 | register vm_page_t m = *mp; | |
4452a7af | 897 | |
1c79356b A |
898 | do { |
899 | if (m->object == object && m->offset == offset) { | |
900 | /* | |
4452a7af | 901 | * Remove old page from hash list |
1c79356b A |
902 | */ |
903 | *mp = m->next; | |
1c79356b | 904 | |
4452a7af | 905 | found_m = m; |
1c79356b A |
906 | break; |
907 | } | |
908 | mp = &m->next; | |
91447636 | 909 | } while ((m = *mp)); |
4452a7af | 910 | |
1c79356b A |
911 | mem->next = bucket->pages; |
912 | } else { | |
913 | mem->next = VM_PAGE_NULL; | |
914 | } | |
4452a7af A |
915 | /* |
916 | * insert new page at head of hash list | |
917 | */ | |
1c79356b | 918 | bucket->pages = mem; |
4452a7af | 919 | |
1c79356b A |
920 | simple_unlock(&vm_page_bucket_lock); |
921 | ||
4452a7af A |
922 | if (found_m) { |
923 | /* | |
924 | * there was already a page at the specified | |
925 | * offset for this object... remove it from | |
926 | * the object and free it back to the free list | |
927 | */ | |
928 | VM_PAGE_REMOVE(found_m); | |
929 | found_m->tabled = FALSE; | |
930 | ||
931 | found_m->object = VM_OBJECT_NULL; | |
932 | found_m->offset = (vm_object_offset_t) -1; | |
933 | object->resident_page_count--; | |
934 | ||
935 | if (object->purgable == VM_OBJECT_PURGABLE_VOLATILE || | |
936 | object->purgable == VM_OBJECT_PURGABLE_EMPTY) { | |
937 | assert(vm_page_purgeable_count > 0); | |
938 | vm_page_purgeable_count--; | |
939 | } | |
940 | ||
941 | /* | |
942 | * Return page to the free list. | |
943 | * Note the page is not tabled now | |
944 | */ | |
945 | vm_page_free(found_m); | |
946 | } | |
1c79356b A |
947 | /* |
948 | * Now link into the object's list of backed pages. | |
949 | */ | |
950 | ||
91447636 | 951 | VM_PAGE_INSERT(mem, object); |
1c79356b A |
952 | mem->tabled = TRUE; |
953 | ||
954 | /* | |
955 | * And show that the object has one more resident | |
956 | * page. | |
957 | */ | |
958 | ||
959 | object->resident_page_count++; | |
91447636 A |
960 | |
961 | if (object->purgable == VM_OBJECT_PURGABLE_VOLATILE || | |
962 | object->purgable == VM_OBJECT_PURGABLE_EMPTY) { | |
963 | vm_page_purgeable_count++; | |
964 | } | |
1c79356b A |
965 | } |
966 | ||
967 | /* | |
968 | * vm_page_remove: [ internal use only ] | |
969 | * | |
970 | * Removes the given mem entry from the object/offset-page | |
971 | * table and the object page list. | |
972 | * | |
91447636 | 973 | * The object and page queues must be locked. |
1c79356b A |
974 | */ |
975 | ||
976 | void | |
977 | vm_page_remove( | |
978 | register vm_page_t mem) | |
979 | { | |
980 | register vm_page_bucket_t *bucket; | |
981 | register vm_page_t this; | |
982 | ||
983 | XPR(XPR_VM_PAGE, | |
984 | "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n", | |
985 | (integer_t)mem->object, (integer_t)mem->offset, | |
986 | (integer_t)mem, 0,0); | |
91447636 A |
987 | #if DEBUG |
988 | _mutex_assert(&vm_page_queue_lock, MA_OWNED); | |
989 | _mutex_assert(&mem->object->Lock, MA_OWNED); | |
990 | #endif | |
1c79356b A |
991 | assert(mem->tabled); |
992 | assert(!mem->cleaning); | |
993 | VM_PAGE_CHECK(mem); | |
994 | ||
91447636 | 995 | |
1c79356b A |
996 | /* |
997 | * Remove from the object_object/offset hash table | |
998 | */ | |
999 | ||
1000 | bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)]; | |
1001 | simple_lock(&vm_page_bucket_lock); | |
1002 | if ((this = bucket->pages) == mem) { | |
1003 | /* optimize for common case */ | |
1004 | ||
1005 | bucket->pages = mem->next; | |
1006 | } else { | |
1007 | register vm_page_t *prev; | |
1008 | ||
1009 | for (prev = &this->next; | |
1010 | (this = *prev) != mem; | |
1011 | prev = &this->next) | |
1012 | continue; | |
1013 | *prev = this->next; | |
1014 | } | |
1015 | #if MACH_PAGE_HASH_STATS | |
1016 | bucket->cur_count--; | |
1017 | #endif /* MACH_PAGE_HASH_STATS */ | |
1018 | simple_unlock(&vm_page_bucket_lock); | |
1019 | ||
1020 | /* | |
1021 | * Now remove from the object's list of backed pages. | |
1022 | */ | |
1023 | ||
91447636 | 1024 | VM_PAGE_REMOVE(mem); |
1c79356b A |
1025 | |
1026 | /* | |
1027 | * And show that the object has one fewer resident | |
1028 | * page. | |
1029 | */ | |
1030 | ||
1031 | mem->object->resident_page_count--; | |
1032 | ||
91447636 A |
1033 | if (mem->object->purgable == VM_OBJECT_PURGABLE_VOLATILE || |
1034 | mem->object->purgable == VM_OBJECT_PURGABLE_EMPTY) { | |
1035 | assert(vm_page_purgeable_count > 0); | |
1036 | vm_page_purgeable_count--; | |
1037 | } | |
1038 | ||
1c79356b A |
1039 | mem->tabled = FALSE; |
1040 | mem->object = VM_OBJECT_NULL; | |
91447636 | 1041 | mem->offset = (vm_object_offset_t) -1; |
1c79356b A |
1042 | } |
1043 | ||
1044 | /* | |
1045 | * vm_page_lookup: | |
1046 | * | |
1047 | * Returns the page associated with the object/offset | |
1048 | * pair specified; if none is found, VM_PAGE_NULL is returned. | |
1049 | * | |
1050 | * The object must be locked. No side effects. | |
1051 | */ | |
1052 | ||
91447636 A |
1053 | unsigned long vm_page_lookup_hint = 0; |
1054 | unsigned long vm_page_lookup_hint_next = 0; | |
1055 | unsigned long vm_page_lookup_hint_prev = 0; | |
1056 | unsigned long vm_page_lookup_hint_miss = 0; | |
1057 | ||
1c79356b A |
1058 | vm_page_t |
1059 | vm_page_lookup( | |
1060 | register vm_object_t object, | |
1061 | register vm_object_offset_t offset) | |
1062 | { | |
1063 | register vm_page_t mem; | |
1064 | register vm_page_bucket_t *bucket; | |
91447636 A |
1065 | queue_entry_t qe; |
1066 | #if 0 | |
1067 | _mutex_assert(&object->Lock, MA_OWNED); | |
1068 | #endif | |
1069 | ||
1070 | mem = object->memq_hint; | |
1071 | if (mem != VM_PAGE_NULL) { | |
1072 | assert(mem->object == object); | |
1073 | if (mem->offset == offset) { | |
1074 | vm_page_lookup_hint++; | |
1075 | return mem; | |
1076 | } | |
1077 | qe = queue_next(&mem->listq); | |
1078 | if (! queue_end(&object->memq, qe)) { | |
1079 | vm_page_t next_page; | |
1080 | ||
1081 | next_page = (vm_page_t) qe; | |
1082 | assert(next_page->object == object); | |
1083 | if (next_page->offset == offset) { | |
1084 | vm_page_lookup_hint_next++; | |
1085 | object->memq_hint = next_page; /* new hint */ | |
1086 | return next_page; | |
1087 | } | |
1088 | } | |
1089 | qe = queue_prev(&mem->listq); | |
1090 | if (! queue_end(&object->memq, qe)) { | |
1091 | vm_page_t prev_page; | |
1092 | ||
1093 | prev_page = (vm_page_t) qe; | |
1094 | assert(prev_page->object == object); | |
1095 | if (prev_page->offset == offset) { | |
1096 | vm_page_lookup_hint_prev++; | |
1097 | object->memq_hint = prev_page; /* new hint */ | |
1098 | return prev_page; | |
1099 | } | |
1100 | } | |
1101 | } | |
1c79356b A |
1102 | |
1103 | /* | |
1104 | * Search the hash table for this object/offset pair | |
1105 | */ | |
1106 | ||
1107 | bucket = &vm_page_buckets[vm_page_hash(object, offset)]; | |
1108 | ||
4452a7af A |
1109 | /* |
1110 | * since we hold the object lock, we are guaranteed that no | |
1111 | * new pages can be inserted into this object... this in turn | |
1112 | * guarantess that the page we're looking for can't exist | |
1113 | * if the bucket it hashes to is currently NULL even when looked | |
1114 | * at outside the scope of the hash bucket lock... this is a | |
1115 | * really cheap optimiztion to avoid taking the lock | |
1116 | */ | |
1117 | if (bucket->pages == VM_PAGE_NULL) { | |
1118 | return (VM_PAGE_NULL); | |
1119 | } | |
1c79356b | 1120 | simple_lock(&vm_page_bucket_lock); |
4452a7af | 1121 | |
1c79356b A |
1122 | for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) { |
1123 | VM_PAGE_CHECK(mem); | |
1124 | if ((mem->object == object) && (mem->offset == offset)) | |
1125 | break; | |
1126 | } | |
1127 | simple_unlock(&vm_page_bucket_lock); | |
55e303ae | 1128 | |
91447636 A |
1129 | if (mem != VM_PAGE_NULL) { |
1130 | if (object->memq_hint != VM_PAGE_NULL) { | |
1131 | vm_page_lookup_hint_miss++; | |
1132 | } | |
1133 | assert(mem->object == object); | |
1134 | object->memq_hint = mem; | |
1135 | } | |
1136 | ||
1137 | return(mem); | |
1138 | } | |
1139 | ||
1140 | ||
1141 | vm_page_t | |
1142 | vm_page_lookup_nohint( | |
1143 | vm_object_t object, | |
1144 | vm_object_offset_t offset) | |
1145 | { | |
1146 | register vm_page_t mem; | |
1147 | register vm_page_bucket_t *bucket; | |
1148 | ||
1149 | #if 0 | |
1150 | _mutex_assert(&object->Lock, MA_OWNED); | |
1151 | #endif | |
1152 | /* | |
1153 | * Search the hash table for this object/offset pair | |
1154 | */ | |
1155 | ||
1156 | bucket = &vm_page_buckets[vm_page_hash(object, offset)]; | |
1157 | ||
1158 | simple_lock(&vm_page_bucket_lock); | |
1159 | for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) { | |
1160 | VM_PAGE_CHECK(mem); | |
1161 | if ((mem->object == object) && (mem->offset == offset)) | |
1162 | break; | |
1163 | } | |
1164 | simple_unlock(&vm_page_bucket_lock); | |
1165 | ||
1c79356b A |
1166 | return(mem); |
1167 | } | |
1168 | ||
1169 | /* | |
1170 | * vm_page_rename: | |
1171 | * | |
1172 | * Move the given memory entry from its | |
1173 | * current object to the specified target object/offset. | |
1174 | * | |
1175 | * The object must be locked. | |
1176 | */ | |
1177 | void | |
1178 | vm_page_rename( | |
1179 | register vm_page_t mem, | |
1180 | register vm_object_t new_object, | |
1181 | vm_object_offset_t new_offset) | |
1182 | { | |
1183 | assert(mem->object != new_object); | |
91447636 A |
1184 | /* |
1185 | * ENCRYPTED SWAP: | |
1186 | * The encryption key is based on the page's memory object | |
1187 | * (aka "pager") and paging offset. Moving the page to | |
1188 | * another VM object changes its "pager" and "paging_offset" | |
1189 | * so it has to be decrypted first. | |
1190 | */ | |
1191 | if (mem->encrypted) { | |
1192 | panic("vm_page_rename: page %p is encrypted\n", mem); | |
1193 | } | |
1c79356b A |
1194 | /* |
1195 | * Changes to mem->object require the page lock because | |
1196 | * the pageout daemon uses that lock to get the object. | |
1197 | */ | |
1198 | ||
1199 | XPR(XPR_VM_PAGE, | |
1200 | "vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n", | |
1201 | (integer_t)new_object, (integer_t)new_offset, | |
1202 | (integer_t)mem, 0,0); | |
1203 | ||
1204 | vm_page_lock_queues(); | |
1205 | vm_page_remove(mem); | |
1206 | vm_page_insert(mem, new_object, new_offset); | |
1207 | vm_page_unlock_queues(); | |
1208 | } | |
1209 | ||
1210 | /* | |
1211 | * vm_page_init: | |
1212 | * | |
1213 | * Initialize the fields in a new page. | |
1214 | * This takes a structure with random values and initializes it | |
1215 | * so that it can be given to vm_page_release or vm_page_insert. | |
1216 | */ | |
1217 | void | |
1218 | vm_page_init( | |
1219 | vm_page_t mem, | |
55e303ae | 1220 | ppnum_t phys_page) |
1c79356b | 1221 | { |
91447636 | 1222 | assert(phys_page); |
1c79356b | 1223 | *mem = vm_page_template; |
55e303ae | 1224 | mem->phys_page = phys_page; |
1c79356b A |
1225 | } |
1226 | ||
1227 | /* | |
1228 | * vm_page_grab_fictitious: | |
1229 | * | |
1230 | * Remove a fictitious page from the free list. | |
1231 | * Returns VM_PAGE_NULL if there are no free pages. | |
1232 | */ | |
1233 | int c_vm_page_grab_fictitious = 0; | |
1234 | int c_vm_page_release_fictitious = 0; | |
1235 | int c_vm_page_more_fictitious = 0; | |
1236 | ||
1237 | vm_page_t | |
1238 | vm_page_grab_fictitious(void) | |
1239 | { | |
1240 | register vm_page_t m; | |
1241 | ||
1242 | m = (vm_page_t)zget(vm_page_zone); | |
1243 | if (m) { | |
1c79356b A |
1244 | vm_page_init(m, vm_page_fictitious_addr); |
1245 | m->fictitious = TRUE; | |
1c79356b A |
1246 | } |
1247 | ||
1248 | c_vm_page_grab_fictitious++; | |
1249 | return m; | |
1250 | } | |
1251 | ||
1252 | /* | |
1253 | * vm_page_release_fictitious: | |
1254 | * | |
1255 | * Release a fictitious page to the free list. | |
1256 | */ | |
1257 | ||
1258 | void | |
1259 | vm_page_release_fictitious( | |
1260 | register vm_page_t m) | |
1261 | { | |
1262 | assert(!m->free); | |
1263 | assert(m->busy); | |
1264 | assert(m->fictitious); | |
55e303ae | 1265 | assert(m->phys_page == vm_page_fictitious_addr); |
1c79356b A |
1266 | |
1267 | c_vm_page_release_fictitious++; | |
91447636 | 1268 | #if DEBUG |
1c79356b A |
1269 | if (m->free) |
1270 | panic("vm_page_release_fictitious"); | |
91447636 | 1271 | #endif |
1c79356b | 1272 | m->free = TRUE; |
91447636 | 1273 | zfree(vm_page_zone, m); |
1c79356b A |
1274 | } |
1275 | ||
1276 | /* | |
1277 | * vm_page_more_fictitious: | |
1278 | * | |
1279 | * Add more fictitious pages to the free list. | |
1280 | * Allowed to block. This routine is way intimate | |
1281 | * with the zones code, for several reasons: | |
1282 | * 1. we need to carve some page structures out of physical | |
1283 | * memory before zones work, so they _cannot_ come from | |
1284 | * the zone_map. | |
1285 | * 2. the zone needs to be collectable in order to prevent | |
1286 | * growth without bound. These structures are used by | |
1287 | * the device pager (by the hundreds and thousands), as | |
1288 | * private pages for pageout, and as blocking pages for | |
1289 | * pagein. Temporary bursts in demand should not result in | |
1290 | * permanent allocation of a resource. | |
1291 | * 3. To smooth allocation humps, we allocate single pages | |
1292 | * with kernel_memory_allocate(), and cram them into the | |
1293 | * zone. This also allows us to initialize the vm_page_t's | |
1294 | * on the way into the zone, so that zget() always returns | |
1295 | * an initialized structure. The zone free element pointer | |
1296 | * and the free page pointer are both the first item in the | |
1297 | * vm_page_t. | |
1298 | * 4. By having the pages in the zone pre-initialized, we need | |
1299 | * not keep 2 levels of lists. The garbage collector simply | |
1300 | * scans our list, and reduces physical memory usage as it | |
1301 | * sees fit. | |
1302 | */ | |
1303 | ||
1304 | void vm_page_more_fictitious(void) | |
1305 | { | |
1c79356b A |
1306 | register vm_page_t m; |
1307 | vm_offset_t addr; | |
1308 | kern_return_t retval; | |
1309 | int i; | |
1310 | ||
1311 | c_vm_page_more_fictitious++; | |
1312 | ||
1c79356b A |
1313 | /* |
1314 | * Allocate a single page from the zone_map. Do not wait if no physical | |
1315 | * pages are immediately available, and do not zero the space. We need | |
1316 | * our own blocking lock here to prevent having multiple, | |
1317 | * simultaneous requests from piling up on the zone_map lock. Exactly | |
1318 | * one (of our) threads should be potentially waiting on the map lock. | |
1319 | * If winner is not vm-privileged, then the page allocation will fail, | |
1320 | * and it will temporarily block here in the vm_page_wait(). | |
1321 | */ | |
1322 | mutex_lock(&vm_page_alloc_lock); | |
1323 | /* | |
1324 | * If another thread allocated space, just bail out now. | |
1325 | */ | |
1326 | if (zone_free_count(vm_page_zone) > 5) { | |
1327 | /* | |
1328 | * The number "5" is a small number that is larger than the | |
1329 | * number of fictitious pages that any single caller will | |
1330 | * attempt to allocate. Otherwise, a thread will attempt to | |
1331 | * acquire a fictitious page (vm_page_grab_fictitious), fail, | |
1332 | * release all of the resources and locks already acquired, | |
1333 | * and then call this routine. This routine finds the pages | |
1334 | * that the caller released, so fails to allocate new space. | |
1335 | * The process repeats infinitely. The largest known number | |
1336 | * of fictitious pages required in this manner is 2. 5 is | |
1337 | * simply a somewhat larger number. | |
1338 | */ | |
1339 | mutex_unlock(&vm_page_alloc_lock); | |
1340 | return; | |
1341 | } | |
1342 | ||
91447636 A |
1343 | retval = kernel_memory_allocate(zone_map, |
1344 | &addr, PAGE_SIZE, VM_PROT_ALL, | |
1345 | KMA_KOBJECT|KMA_NOPAGEWAIT); | |
1346 | if (retval != KERN_SUCCESS) { | |
1c79356b A |
1347 | /* |
1348 | * No page was available. Tell the pageout daemon, drop the | |
1349 | * lock to give another thread a chance at it, and | |
1350 | * wait for the pageout daemon to make progress. | |
1351 | */ | |
1352 | mutex_unlock(&vm_page_alloc_lock); | |
1353 | vm_page_wait(THREAD_UNINT); | |
1354 | return; | |
1355 | } | |
1356 | /* | |
1357 | * Initialize as many vm_page_t's as will fit on this page. This | |
1358 | * depends on the zone code disturbing ONLY the first item of | |
1359 | * each zone element. | |
1360 | */ | |
1361 | m = (vm_page_t)addr; | |
1362 | for (i = PAGE_SIZE/sizeof(struct vm_page); i > 0; i--) { | |
1363 | vm_page_init(m, vm_page_fictitious_addr); | |
1364 | m->fictitious = TRUE; | |
1365 | m++; | |
1366 | } | |
91447636 | 1367 | zcram(vm_page_zone, (void *) addr, PAGE_SIZE); |
1c79356b A |
1368 | mutex_unlock(&vm_page_alloc_lock); |
1369 | } | |
1370 | ||
1371 | /* | |
1372 | * vm_page_convert: | |
1373 | * | |
1374 | * Attempt to convert a fictitious page into a real page. | |
1375 | */ | |
1376 | ||
1377 | boolean_t | |
1378 | vm_page_convert( | |
1379 | register vm_page_t m) | |
1380 | { | |
1381 | register vm_page_t real_m; | |
1382 | ||
1383 | assert(m->busy); | |
1384 | assert(m->fictitious); | |
1385 | assert(!m->dirty); | |
1386 | ||
1387 | real_m = vm_page_grab(); | |
1388 | if (real_m == VM_PAGE_NULL) | |
1389 | return FALSE; | |
1390 | ||
55e303ae | 1391 | m->phys_page = real_m->phys_page; |
1c79356b | 1392 | m->fictitious = FALSE; |
765c9de3 | 1393 | m->no_isync = TRUE; |
1c79356b A |
1394 | |
1395 | vm_page_lock_queues(); | |
1396 | if (m->active) | |
1397 | vm_page_active_count++; | |
1398 | else if (m->inactive) | |
1399 | vm_page_inactive_count++; | |
1400 | vm_page_unlock_queues(); | |
1401 | ||
55e303ae | 1402 | real_m->phys_page = vm_page_fictitious_addr; |
1c79356b A |
1403 | real_m->fictitious = TRUE; |
1404 | ||
1405 | vm_page_release_fictitious(real_m); | |
1406 | return TRUE; | |
1407 | } | |
1408 | ||
1409 | /* | |
1410 | * vm_pool_low(): | |
1411 | * | |
1412 | * Return true if it is not likely that a non-vm_privileged thread | |
1413 | * can get memory without blocking. Advisory only, since the | |
1414 | * situation may change under us. | |
1415 | */ | |
1416 | int | |
1417 | vm_pool_low(void) | |
1418 | { | |
1419 | /* No locking, at worst we will fib. */ | |
1420 | return( vm_page_free_count < vm_page_free_reserved ); | |
1421 | } | |
1422 | ||
4452a7af A |
1423 | |
1424 | ||
1425 | /* | |
1426 | * this is an interface to support bring-up of drivers | |
1427 | * on platforms with physical memory > 4G... | |
1428 | */ | |
1429 | int vm_himemory_mode = 0; | |
1430 | ||
1431 | ||
1432 | /* | |
1433 | * this interface exists to support hardware controllers | |
1434 | * incapable of generating DMAs with more than 32 bits | |
1435 | * of address on platforms with physical memory > 4G... | |
1436 | */ | |
1437 | unsigned int vm_lopage_free_count = 0; | |
1438 | unsigned int vm_lopage_max_count = 0; | |
1439 | vm_page_t vm_lopage_queue_free = VM_PAGE_NULL; | |
1440 | ||
1441 | vm_page_t | |
1442 | vm_page_grablo(void) | |
1443 | { | |
1444 | register vm_page_t mem; | |
1445 | unsigned int vm_lopage_alloc_count; | |
1446 | ||
1447 | if (vm_lopage_poolsize == 0) | |
1448 | return (vm_page_grab()); | |
1449 | ||
1450 | mutex_lock(&vm_page_queue_free_lock); | |
1451 | ||
1452 | if ((mem = vm_lopage_queue_free) != VM_PAGE_NULL) { | |
1453 | ||
1454 | vm_lopage_queue_free = (vm_page_t) mem->pageq.next; | |
1455 | mem->pageq.next = NULL; | |
1456 | mem->pageq.prev = NULL; | |
1457 | mem->free = FALSE; | |
1458 | mem->no_isync = TRUE; | |
1459 | ||
1460 | vm_lopage_free_count--; | |
1461 | vm_lopage_alloc_count = (vm_lopage_poolend - vm_lopage_poolstart) - vm_lopage_free_count; | |
1462 | if (vm_lopage_alloc_count > vm_lopage_max_count) | |
1463 | vm_lopage_max_count = vm_lopage_alloc_count; | |
1464 | } | |
1465 | mutex_unlock(&vm_page_queue_free_lock); | |
1466 | ||
1467 | return (mem); | |
1468 | } | |
1469 | ||
1470 | ||
1471 | ||
1c79356b A |
1472 | /* |
1473 | * vm_page_grab: | |
1474 | * | |
1475 | * Remove a page from the free list. | |
1476 | * Returns VM_PAGE_NULL if the free list is too small. | |
1477 | */ | |
1478 | ||
1479 | unsigned long vm_page_grab_count = 0; /* measure demand */ | |
1480 | ||
1481 | vm_page_t | |
1482 | vm_page_grab(void) | |
1483 | { | |
1484 | register vm_page_t mem; | |
1485 | ||
1486 | mutex_lock(&vm_page_queue_free_lock); | |
1487 | vm_page_grab_count++; | |
1488 | ||
1489 | /* | |
1490 | * Optionally produce warnings if the wire or gobble | |
1491 | * counts exceed some threshold. | |
1492 | */ | |
1493 | if (vm_page_wire_count_warning > 0 | |
1494 | && vm_page_wire_count >= vm_page_wire_count_warning) { | |
1495 | printf("mk: vm_page_grab(): high wired page count of %d\n", | |
1496 | vm_page_wire_count); | |
1497 | assert(vm_page_wire_count < vm_page_wire_count_warning); | |
1498 | } | |
1499 | if (vm_page_gobble_count_warning > 0 | |
1500 | && vm_page_gobble_count >= vm_page_gobble_count_warning) { | |
1501 | printf("mk: vm_page_grab(): high gobbled page count of %d\n", | |
1502 | vm_page_gobble_count); | |
1503 | assert(vm_page_gobble_count < vm_page_gobble_count_warning); | |
1504 | } | |
1505 | ||
1506 | /* | |
1507 | * Only let privileged threads (involved in pageout) | |
1508 | * dip into the reserved pool. | |
1509 | */ | |
1510 | ||
1511 | if ((vm_page_free_count < vm_page_free_reserved) && | |
91447636 | 1512 | !(current_thread()->options & TH_OPT_VMPRIV)) { |
1c79356b A |
1513 | mutex_unlock(&vm_page_queue_free_lock); |
1514 | mem = VM_PAGE_NULL; | |
1515 | goto wakeup_pageout; | |
1516 | } | |
1517 | ||
1518 | while (vm_page_queue_free == VM_PAGE_NULL) { | |
1c79356b A |
1519 | mutex_unlock(&vm_page_queue_free_lock); |
1520 | VM_PAGE_WAIT(); | |
1521 | mutex_lock(&vm_page_queue_free_lock); | |
1522 | } | |
1523 | ||
1524 | if (--vm_page_free_count < vm_page_free_count_minimum) | |
1525 | vm_page_free_count_minimum = vm_page_free_count; | |
1526 | mem = vm_page_queue_free; | |
1527 | vm_page_queue_free = (vm_page_t) mem->pageq.next; | |
91447636 A |
1528 | mem->pageq.next = NULL; |
1529 | mem->pageq.prev = NULL; | |
1530 | assert(mem->listq.next == NULL && mem->listq.prev == NULL); | |
1531 | assert(mem->tabled == FALSE); | |
1532 | assert(mem->object == VM_OBJECT_NULL); | |
1533 | assert(!mem->laundry); | |
1c79356b | 1534 | mem->free = FALSE; |
0b4e3aa0 | 1535 | mem->no_isync = TRUE; |
1c79356b A |
1536 | mutex_unlock(&vm_page_queue_free_lock); |
1537 | ||
91447636 A |
1538 | assert(pmap_verify_free(mem->phys_page)); |
1539 | ||
1c79356b A |
1540 | /* |
1541 | * Decide if we should poke the pageout daemon. | |
1542 | * We do this if the free count is less than the low | |
1543 | * water mark, or if the free count is less than the high | |
1544 | * water mark (but above the low water mark) and the inactive | |
1545 | * count is less than its target. | |
1546 | * | |
1547 | * We don't have the counts locked ... if they change a little, | |
1548 | * it doesn't really matter. | |
1549 | */ | |
1550 | ||
1551 | wakeup_pageout: | |
1552 | if ((vm_page_free_count < vm_page_free_min) || | |
1553 | ((vm_page_free_count < vm_page_free_target) && | |
1554 | (vm_page_inactive_count < vm_page_inactive_target))) | |
1555 | thread_wakeup((event_t) &vm_page_free_wanted); | |
1556 | ||
55e303ae | 1557 | // dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */ |
1c79356b A |
1558 | |
1559 | return mem; | |
1560 | } | |
1561 | ||
1562 | /* | |
1563 | * vm_page_release: | |
1564 | * | |
1565 | * Return a page to the free list. | |
1566 | */ | |
1567 | ||
1568 | void | |
1569 | vm_page_release( | |
1570 | register vm_page_t mem) | |
1571 | { | |
55e303ae A |
1572 | |
1573 | #if 0 | |
1574 | unsigned int pindex; | |
1575 | phys_entry *physent; | |
1576 | ||
1577 | physent = mapping_phys_lookup(mem->phys_page, &pindex); /* (BRINGUP) */ | |
1578 | if(physent->ppLink & ppN) { /* (BRINGUP) */ | |
1579 | panic("vm_page_release: already released - %08X %08X\n", mem, mem->phys_page); | |
1580 | } | |
1581 | physent->ppLink = physent->ppLink | ppN; /* (BRINGUP) */ | |
1582 | #endif | |
1c79356b A |
1583 | assert(!mem->private && !mem->fictitious); |
1584 | ||
55e303ae | 1585 | // dbgLog(mem->phys_page, vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */ |
1c79356b A |
1586 | |
1587 | mutex_lock(&vm_page_queue_free_lock); | |
91447636 | 1588 | #if DEBUG |
1c79356b A |
1589 | if (mem->free) |
1590 | panic("vm_page_release"); | |
91447636 | 1591 | #endif |
1c79356b | 1592 | mem->free = TRUE; |
91447636 A |
1593 | assert(!mem->laundry); |
1594 | assert(mem->object == VM_OBJECT_NULL); | |
1595 | assert(mem->pageq.next == NULL && | |
1596 | mem->pageq.prev == NULL); | |
1c79356b | 1597 | |
4452a7af A |
1598 | if (mem->phys_page <= vm_lopage_poolend && mem->phys_page >= vm_lopage_poolstart) { |
1599 | /* | |
1600 | * this exists to support hardware controllers | |
1601 | * incapable of generating DMAs with more than 32 bits | |
1602 | * of address on platforms with physical memory > 4G... | |
1603 | */ | |
1604 | mem->pageq.next = (queue_entry_t) vm_lopage_queue_free; | |
1605 | vm_lopage_queue_free = mem; | |
1606 | vm_lopage_free_count++; | |
1607 | } else { | |
1608 | mem->pageq.next = (queue_entry_t) vm_page_queue_free; | |
1609 | vm_page_queue_free = mem; | |
1610 | vm_page_free_count++; | |
1611 | /* | |
1612 | * Check if we should wake up someone waiting for page. | |
1613 | * But don't bother waking them unless they can allocate. | |
1614 | * | |
1615 | * We wakeup only one thread, to prevent starvation. | |
1616 | * Because the scheduling system handles wait queues FIFO, | |
1617 | * if we wakeup all waiting threads, one greedy thread | |
1618 | * can starve multiple niceguy threads. When the threads | |
1619 | * all wakeup, the greedy threads runs first, grabs the page, | |
1620 | * and waits for another page. It will be the first to run | |
1621 | * when the next page is freed. | |
1622 | * | |
1623 | * However, there is a slight danger here. | |
1624 | * The thread we wake might not use the free page. | |
1625 | * Then the other threads could wait indefinitely | |
1626 | * while the page goes unused. To forestall this, | |
1627 | * the pageout daemon will keep making free pages | |
1628 | * as long as vm_page_free_wanted is non-zero. | |
1629 | */ | |
1c79356b | 1630 | |
4452a7af A |
1631 | if ((vm_page_free_wanted > 0) && |
1632 | (vm_page_free_count >= vm_page_free_reserved)) { | |
1633 | vm_page_free_wanted--; | |
1634 | thread_wakeup_one((event_t) &vm_page_free_count); | |
1635 | } | |
1c79356b | 1636 | } |
1c79356b A |
1637 | mutex_unlock(&vm_page_queue_free_lock); |
1638 | } | |
1639 | ||
1c79356b A |
1640 | /* |
1641 | * vm_page_wait: | |
1642 | * | |
1643 | * Wait for a page to become available. | |
1644 | * If there are plenty of free pages, then we don't sleep. | |
1645 | * | |
1646 | * Returns: | |
1647 | * TRUE: There may be another page, try again | |
1648 | * FALSE: We were interrupted out of our wait, don't try again | |
1649 | */ | |
1650 | ||
1651 | boolean_t | |
1652 | vm_page_wait( | |
1653 | int interruptible ) | |
1654 | { | |
1655 | /* | |
1656 | * We can't use vm_page_free_reserved to make this | |
1657 | * determination. Consider: some thread might | |
1658 | * need to allocate two pages. The first allocation | |
1659 | * succeeds, the second fails. After the first page is freed, | |
1660 | * a call to vm_page_wait must really block. | |
1661 | */ | |
9bccf70c | 1662 | kern_return_t wait_result; |
9bccf70c | 1663 | int need_wakeup = 0; |
1c79356b A |
1664 | |
1665 | mutex_lock(&vm_page_queue_free_lock); | |
1666 | if (vm_page_free_count < vm_page_free_target) { | |
1667 | if (vm_page_free_wanted++ == 0) | |
0b4e3aa0 | 1668 | need_wakeup = 1; |
91447636 | 1669 | wait_result = assert_wait((event_t)&vm_page_free_count, interruptible); |
1c79356b A |
1670 | mutex_unlock(&vm_page_queue_free_lock); |
1671 | counter(c_vm_page_wait_block++); | |
0b4e3aa0 A |
1672 | |
1673 | if (need_wakeup) | |
1674 | thread_wakeup((event_t)&vm_page_free_wanted); | |
9bccf70c | 1675 | |
91447636 | 1676 | if (wait_result == THREAD_WAITING) |
9bccf70c A |
1677 | wait_result = thread_block(THREAD_CONTINUE_NULL); |
1678 | ||
1c79356b A |
1679 | return(wait_result == THREAD_AWAKENED); |
1680 | } else { | |
1681 | mutex_unlock(&vm_page_queue_free_lock); | |
1682 | return TRUE; | |
1683 | } | |
1684 | } | |
1685 | ||
1686 | /* | |
1687 | * vm_page_alloc: | |
1688 | * | |
1689 | * Allocate and return a memory cell associated | |
1690 | * with this VM object/offset pair. | |
1691 | * | |
1692 | * Object must be locked. | |
1693 | */ | |
1694 | ||
1695 | vm_page_t | |
1696 | vm_page_alloc( | |
1697 | vm_object_t object, | |
1698 | vm_object_offset_t offset) | |
1699 | { | |
1700 | register vm_page_t mem; | |
1701 | ||
91447636 A |
1702 | #if DEBUG |
1703 | _mutex_assert(&object->Lock, MA_OWNED); | |
1704 | #endif | |
1c79356b A |
1705 | mem = vm_page_grab(); |
1706 | if (mem == VM_PAGE_NULL) | |
1707 | return VM_PAGE_NULL; | |
1708 | ||
1709 | vm_page_insert(mem, object, offset); | |
1710 | ||
1711 | return(mem); | |
1712 | } | |
1713 | ||
4452a7af A |
1714 | |
1715 | vm_page_t | |
1716 | vm_page_alloclo( | |
1717 | vm_object_t object, | |
1718 | vm_object_offset_t offset) | |
1719 | { | |
1720 | register vm_page_t mem; | |
1721 | ||
1722 | #if DEBUG | |
1723 | _mutex_assert(&object->Lock, MA_OWNED); | |
1724 | #endif | |
1725 | mem = vm_page_grablo(); | |
1726 | if (mem == VM_PAGE_NULL) | |
1727 | return VM_PAGE_NULL; | |
1728 | ||
1729 | vm_page_insert(mem, object, offset); | |
1730 | ||
1731 | return(mem); | |
1732 | } | |
1733 | ||
1734 | ||
1c79356b A |
1735 | counter(unsigned int c_laundry_pages_freed = 0;) |
1736 | ||
1737 | int vm_pagein_cluster_unused = 0; | |
91447636 | 1738 | boolean_t vm_page_free_verify = TRUE; |
1c79356b A |
1739 | /* |
1740 | * vm_page_free: | |
1741 | * | |
1742 | * Returns the given page to the free list, | |
1743 | * disassociating it with any VM object. | |
1744 | * | |
1745 | * Object and page queues must be locked prior to entry. | |
1746 | */ | |
1747 | void | |
1748 | vm_page_free( | |
1749 | register vm_page_t mem) | |
1750 | { | |
1751 | vm_object_t object = mem->object; | |
1752 | ||
1753 | assert(!mem->free); | |
1754 | assert(!mem->cleaning); | |
1755 | assert(!mem->pageout); | |
91447636 A |
1756 | if (vm_page_free_verify && !mem->fictitious && !mem->private) { |
1757 | assert(pmap_verify_free(mem->phys_page)); | |
1758 | } | |
1759 | ||
1760 | #if DEBUG | |
1761 | if (mem->object) | |
1762 | _mutex_assert(&mem->object->Lock, MA_OWNED); | |
1763 | _mutex_assert(&vm_page_queue_lock, MA_OWNED); | |
1c79356b | 1764 | |
91447636 A |
1765 | if (mem->free) |
1766 | panic("vm_page_free: freeing page on free list\n"); | |
1767 | #endif | |
1c79356b A |
1768 | if (mem->tabled) |
1769 | vm_page_remove(mem); /* clears tabled, object, offset */ | |
1770 | VM_PAGE_QUEUES_REMOVE(mem); /* clears active or inactive */ | |
1771 | ||
1772 | if (mem->clustered) { | |
1773 | mem->clustered = FALSE; | |
1774 | vm_pagein_cluster_unused++; | |
1775 | } | |
1776 | ||
1777 | if (mem->wire_count) { | |
1778 | if (!mem->private && !mem->fictitious) | |
1779 | vm_page_wire_count--; | |
1780 | mem->wire_count = 0; | |
1781 | assert(!mem->gobbled); | |
1782 | } else if (mem->gobbled) { | |
1783 | if (!mem->private && !mem->fictitious) | |
1784 | vm_page_wire_count--; | |
1785 | vm_page_gobble_count--; | |
1786 | } | |
1787 | mem->gobbled = FALSE; | |
1788 | ||
1789 | if (mem->laundry) { | |
91447636 | 1790 | vm_pageout_throttle_up(mem); |
1c79356b | 1791 | counter(++c_laundry_pages_freed); |
1c79356b A |
1792 | } |
1793 | ||
1c79356b A |
1794 | PAGE_WAKEUP(mem); /* clears wanted */ |
1795 | ||
1796 | if (mem->absent) | |
1797 | vm_object_absent_release(object); | |
1798 | ||
0b4e3aa0 | 1799 | /* Some of these may be unnecessary */ |
1c79356b A |
1800 | mem->page_lock = 0; |
1801 | mem->unlock_request = 0; | |
1802 | mem->busy = TRUE; | |
1803 | mem->absent = FALSE; | |
1804 | mem->error = FALSE; | |
1805 | mem->dirty = FALSE; | |
1806 | mem->precious = FALSE; | |
1807 | mem->reference = FALSE; | |
91447636 | 1808 | mem->encrypted = FALSE; |
1c79356b A |
1809 | |
1810 | mem->page_error = KERN_SUCCESS; | |
1811 | ||
1812 | if (mem->private) { | |
1813 | mem->private = FALSE; | |
1814 | mem->fictitious = TRUE; | |
55e303ae | 1815 | mem->phys_page = vm_page_fictitious_addr; |
1c79356b A |
1816 | } |
1817 | if (mem->fictitious) { | |
1818 | vm_page_release_fictitious(mem); | |
1819 | } else { | |
9bccf70c A |
1820 | /* depends on the queues lock */ |
1821 | if(mem->zero_fill) { | |
1822 | vm_zf_count-=1; | |
1823 | mem->zero_fill = FALSE; | |
1824 | } | |
55e303ae | 1825 | vm_page_init(mem, mem->phys_page); |
1c79356b A |
1826 | vm_page_release(mem); |
1827 | } | |
1828 | } | |
1829 | ||
55e303ae A |
1830 | |
1831 | void | |
1832 | vm_page_free_list( | |
1833 | register vm_page_t mem) | |
1834 | { | |
91447636 | 1835 | register vm_page_t nxt; |
55e303ae | 1836 | register vm_page_t first = NULL; |
91447636 | 1837 | register vm_page_t last = VM_PAGE_NULL; |
55e303ae A |
1838 | register int pg_count = 0; |
1839 | ||
91447636 A |
1840 | #if DEBUG |
1841 | _mutex_assert(&vm_page_queue_lock, MA_OWNED); | |
1842 | #endif | |
55e303ae | 1843 | while (mem) { |
91447636 A |
1844 | #if DEBUG |
1845 | if (mem->tabled || mem->object) | |
1846 | panic("vm_page_free_list: freeing tabled page\n"); | |
1847 | if (mem->inactive || mem->active || mem->free) | |
1848 | panic("vm_page_free_list: freeing page on list\n"); | |
1849 | #endif | |
1850 | assert(mem->pageq.prev == NULL); | |
55e303ae A |
1851 | nxt = (vm_page_t)(mem->pageq.next); |
1852 | ||
1853 | if (mem->clustered) | |
1854 | vm_pagein_cluster_unused++; | |
1855 | ||
1856 | if (mem->laundry) { | |
91447636 | 1857 | vm_pageout_throttle_up(mem); |
55e303ae | 1858 | counter(++c_laundry_pages_freed); |
55e303ae A |
1859 | } |
1860 | mem->busy = TRUE; | |
1861 | ||
1862 | PAGE_WAKEUP(mem); /* clears wanted */ | |
1863 | ||
1864 | if (mem->private) | |
1865 | mem->fictitious = TRUE; | |
1866 | ||
1867 | if (!mem->fictitious) { | |
1868 | /* depends on the queues lock */ | |
1869 | if (mem->zero_fill) | |
1870 | vm_zf_count -= 1; | |
91447636 | 1871 | assert(!mem->laundry); |
55e303ae A |
1872 | vm_page_init(mem, mem->phys_page); |
1873 | ||
1874 | mem->free = TRUE; | |
1875 | ||
1876 | if (first == NULL) | |
1877 | last = mem; | |
1878 | mem->pageq.next = (queue_t) first; | |
1879 | first = mem; | |
1880 | ||
1881 | pg_count++; | |
1882 | } else { | |
1883 | mem->phys_page = vm_page_fictitious_addr; | |
1884 | vm_page_release_fictitious(mem); | |
1885 | } | |
1886 | mem = nxt; | |
1887 | } | |
1888 | if (first) { | |
1889 | ||
1890 | mutex_lock(&vm_page_queue_free_lock); | |
1891 | ||
1892 | last->pageq.next = (queue_entry_t) vm_page_queue_free; | |
1893 | vm_page_queue_free = first; | |
1894 | ||
1895 | vm_page_free_count += pg_count; | |
1896 | ||
1897 | if ((vm_page_free_wanted > 0) && | |
1898 | (vm_page_free_count >= vm_page_free_reserved)) { | |
91447636 | 1899 | unsigned int available_pages; |
55e303ae | 1900 | |
91447636 A |
1901 | if (vm_page_free_count >= vm_page_free_reserved) { |
1902 | available_pages = (vm_page_free_count | |
1903 | - vm_page_free_reserved); | |
1904 | } else { | |
1905 | available_pages = 0; | |
1906 | } | |
55e303ae A |
1907 | |
1908 | if (available_pages >= vm_page_free_wanted) { | |
1909 | vm_page_free_wanted = 0; | |
1910 | thread_wakeup((event_t) &vm_page_free_count); | |
1911 | } else { | |
1912 | while (available_pages--) { | |
1913 | vm_page_free_wanted--; | |
1914 | thread_wakeup_one((event_t) &vm_page_free_count); | |
1915 | } | |
1916 | } | |
1917 | } | |
1918 | mutex_unlock(&vm_page_queue_free_lock); | |
1919 | } | |
1920 | } | |
1921 | ||
1922 | ||
1c79356b A |
1923 | /* |
1924 | * vm_page_wire: | |
1925 | * | |
1926 | * Mark this page as wired down by yet | |
1927 | * another map, removing it from paging queues | |
1928 | * as necessary. | |
1929 | * | |
1930 | * The page's object and the page queues must be locked. | |
1931 | */ | |
1932 | void | |
1933 | vm_page_wire( | |
1934 | register vm_page_t mem) | |
1935 | { | |
1936 | ||
91447636 | 1937 | // dbgLog(current_thread(), mem->offset, mem->object, 1); /* (TEST/DEBUG) */ |
1c79356b A |
1938 | |
1939 | VM_PAGE_CHECK(mem); | |
91447636 A |
1940 | #if DEBUG |
1941 | if (mem->object) | |
1942 | _mutex_assert(&mem->object->Lock, MA_OWNED); | |
1943 | _mutex_assert(&vm_page_queue_lock, MA_OWNED); | |
1944 | #endif | |
1c79356b A |
1945 | if (mem->wire_count == 0) { |
1946 | VM_PAGE_QUEUES_REMOVE(mem); | |
1947 | if (!mem->private && !mem->fictitious && !mem->gobbled) | |
1948 | vm_page_wire_count++; | |
1949 | if (mem->gobbled) | |
1950 | vm_page_gobble_count--; | |
1951 | mem->gobbled = FALSE; | |
9bccf70c A |
1952 | if(mem->zero_fill) { |
1953 | /* depends on the queues lock */ | |
1954 | vm_zf_count-=1; | |
1955 | mem->zero_fill = FALSE; | |
1956 | } | |
91447636 A |
1957 | /* |
1958 | * ENCRYPTED SWAP: | |
1959 | * The page could be encrypted, but | |
1960 | * We don't have to decrypt it here | |
1961 | * because we don't guarantee that the | |
1962 | * data is actually valid at this point. | |
1963 | * The page will get decrypted in | |
1964 | * vm_fault_wire() if needed. | |
1965 | */ | |
1c79356b A |
1966 | } |
1967 | assert(!mem->gobbled); | |
1968 | mem->wire_count++; | |
1969 | } | |
1970 | ||
1971 | /* | |
1972 | * vm_page_gobble: | |
1973 | * | |
1974 | * Mark this page as consumed by the vm/ipc/xmm subsystems. | |
1975 | * | |
1976 | * Called only for freshly vm_page_grab()ed pages - w/ nothing locked. | |
1977 | */ | |
1978 | void | |
1979 | vm_page_gobble( | |
1980 | register vm_page_t mem) | |
1981 | { | |
1982 | vm_page_lock_queues(); | |
1983 | VM_PAGE_CHECK(mem); | |
1984 | ||
1985 | assert(!mem->gobbled); | |
1986 | assert(mem->wire_count == 0); | |
1987 | ||
1988 | if (!mem->gobbled && mem->wire_count == 0) { | |
1989 | if (!mem->private && !mem->fictitious) | |
1990 | vm_page_wire_count++; | |
1991 | } | |
1992 | vm_page_gobble_count++; | |
1993 | mem->gobbled = TRUE; | |
1994 | vm_page_unlock_queues(); | |
1995 | } | |
1996 | ||
1997 | /* | |
1998 | * vm_page_unwire: | |
1999 | * | |
2000 | * Release one wiring of this page, potentially | |
2001 | * enabling it to be paged again. | |
2002 | * | |
2003 | * The page's object and the page queues must be locked. | |
2004 | */ | |
2005 | void | |
2006 | vm_page_unwire( | |
2007 | register vm_page_t mem) | |
2008 | { | |
2009 | ||
91447636 | 2010 | // dbgLog(current_thread(), mem->offset, mem->object, 0); /* (TEST/DEBUG) */ |
1c79356b A |
2011 | |
2012 | VM_PAGE_CHECK(mem); | |
2013 | assert(mem->wire_count > 0); | |
91447636 A |
2014 | #if DEBUG |
2015 | if (mem->object) | |
2016 | _mutex_assert(&mem->object->Lock, MA_OWNED); | |
2017 | _mutex_assert(&vm_page_queue_lock, MA_OWNED); | |
2018 | #endif | |
1c79356b A |
2019 | if (--mem->wire_count == 0) { |
2020 | assert(!mem->private && !mem->fictitious); | |
2021 | vm_page_wire_count--; | |
91447636 A |
2022 | assert(!mem->laundry); |
2023 | assert(mem->object != kernel_object); | |
2024 | assert(mem->pageq.next == NULL && mem->pageq.prev == NULL); | |
1c79356b A |
2025 | queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq); |
2026 | vm_page_active_count++; | |
2027 | mem->active = TRUE; | |
2028 | mem->reference = TRUE; | |
2029 | } | |
2030 | } | |
2031 | ||
2032 | /* | |
2033 | * vm_page_deactivate: | |
2034 | * | |
2035 | * Returns the given page to the inactive list, | |
2036 | * indicating that no physical maps have access | |
2037 | * to this page. [Used by the physical mapping system.] | |
2038 | * | |
2039 | * The page queues must be locked. | |
2040 | */ | |
2041 | void | |
2042 | vm_page_deactivate( | |
2043 | register vm_page_t m) | |
2044 | { | |
2045 | VM_PAGE_CHECK(m); | |
91447636 | 2046 | assert(m->object != kernel_object); |
1c79356b | 2047 | |
55e303ae | 2048 | // dbgLog(m->phys_page, vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */ |
91447636 A |
2049 | #if DEBUG |
2050 | _mutex_assert(&vm_page_queue_lock, MA_OWNED); | |
2051 | #endif | |
1c79356b A |
2052 | /* |
2053 | * This page is no longer very interesting. If it was | |
2054 | * interesting (active or inactive/referenced), then we | |
2055 | * clear the reference bit and (re)enter it in the | |
2056 | * inactive queue. Note wired pages should not have | |
2057 | * their reference bit cleared. | |
2058 | */ | |
2059 | if (m->gobbled) { /* can this happen? */ | |
2060 | assert(m->wire_count == 0); | |
2061 | if (!m->private && !m->fictitious) | |
2062 | vm_page_wire_count--; | |
2063 | vm_page_gobble_count--; | |
2064 | m->gobbled = FALSE; | |
2065 | } | |
2066 | if (m->private || (m->wire_count != 0)) | |
2067 | return; | |
2068 | if (m->active || (m->inactive && m->reference)) { | |
2069 | if (!m->fictitious && !m->absent) | |
55e303ae | 2070 | pmap_clear_reference(m->phys_page); |
1c79356b A |
2071 | m->reference = FALSE; |
2072 | VM_PAGE_QUEUES_REMOVE(m); | |
2073 | } | |
2074 | if (m->wire_count == 0 && !m->inactive) { | |
0b4e3aa0 A |
2075 | m->page_ticket = vm_page_ticket; |
2076 | vm_page_ticket_roll++; | |
2077 | ||
2078 | if(vm_page_ticket_roll == VM_PAGE_TICKETS_IN_ROLL) { | |
2079 | vm_page_ticket_roll = 0; | |
2080 | if(vm_page_ticket == VM_PAGE_TICKET_ROLL_IDS) | |
2081 | vm_page_ticket= 0; | |
2082 | else | |
2083 | vm_page_ticket++; | |
2084 | } | |
2085 | ||
91447636 A |
2086 | assert(!m->laundry); |
2087 | assert(m->pageq.next == NULL && m->pageq.prev == NULL); | |
9bccf70c A |
2088 | if(m->zero_fill) { |
2089 | queue_enter(&vm_page_queue_zf, m, vm_page_t, pageq); | |
2090 | } else { | |
2091 | queue_enter(&vm_page_queue_inactive, | |
2092 | m, vm_page_t, pageq); | |
2093 | } | |
2094 | ||
1c79356b A |
2095 | m->inactive = TRUE; |
2096 | if (!m->fictitious) | |
2097 | vm_page_inactive_count++; | |
2098 | } | |
2099 | } | |
2100 | ||
2101 | /* | |
2102 | * vm_page_activate: | |
2103 | * | |
2104 | * Put the specified page on the active list (if appropriate). | |
2105 | * | |
2106 | * The page queues must be locked. | |
2107 | */ | |
2108 | ||
2109 | void | |
2110 | vm_page_activate( | |
2111 | register vm_page_t m) | |
2112 | { | |
2113 | VM_PAGE_CHECK(m); | |
91447636 A |
2114 | assert(m->object != kernel_object); |
2115 | #if DEBUG | |
2116 | _mutex_assert(&vm_page_queue_lock, MA_OWNED); | |
2117 | #endif | |
1c79356b A |
2118 | if (m->gobbled) { |
2119 | assert(m->wire_count == 0); | |
2120 | if (!m->private && !m->fictitious) | |
2121 | vm_page_wire_count--; | |
2122 | vm_page_gobble_count--; | |
2123 | m->gobbled = FALSE; | |
2124 | } | |
2125 | if (m->private) | |
2126 | return; | |
2127 | ||
2128 | if (m->inactive) { | |
91447636 | 2129 | assert(!m->laundry); |
9bccf70c A |
2130 | if (m->zero_fill) { |
2131 | queue_remove(&vm_page_queue_zf, m, vm_page_t, pageq); | |
2132 | } else { | |
2133 | queue_remove(&vm_page_queue_inactive, | |
2134 | m, vm_page_t, pageq); | |
2135 | } | |
91447636 A |
2136 | m->pageq.next = NULL; |
2137 | m->pageq.prev = NULL; | |
1c79356b A |
2138 | if (!m->fictitious) |
2139 | vm_page_inactive_count--; | |
2140 | m->inactive = FALSE; | |
2141 | } | |
2142 | if (m->wire_count == 0) { | |
91447636 | 2143 | #if DEBUG |
1c79356b A |
2144 | if (m->active) |
2145 | panic("vm_page_activate: already active"); | |
91447636 A |
2146 | #endif |
2147 | assert(!m->laundry); | |
2148 | assert(m->pageq.next == NULL && m->pageq.prev == NULL); | |
1c79356b A |
2149 | queue_enter(&vm_page_queue_active, m, vm_page_t, pageq); |
2150 | m->active = TRUE; | |
2151 | m->reference = TRUE; | |
2152 | if (!m->fictitious) | |
2153 | vm_page_active_count++; | |
2154 | } | |
2155 | } | |
2156 | ||
2157 | /* | |
2158 | * vm_page_part_zero_fill: | |
2159 | * | |
2160 | * Zero-fill a part of the page. | |
2161 | */ | |
2162 | void | |
2163 | vm_page_part_zero_fill( | |
2164 | vm_page_t m, | |
2165 | vm_offset_t m_pa, | |
2166 | vm_size_t len) | |
2167 | { | |
2168 | vm_page_t tmp; | |
2169 | ||
2170 | VM_PAGE_CHECK(m); | |
2171 | #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED | |
55e303ae | 2172 | pmap_zero_part_page(m->phys_page, m_pa, len); |
1c79356b A |
2173 | #else |
2174 | while (1) { | |
2175 | tmp = vm_page_grab(); | |
2176 | if (tmp == VM_PAGE_NULL) { | |
2177 | vm_page_wait(THREAD_UNINT); | |
2178 | continue; | |
2179 | } | |
2180 | break; | |
2181 | } | |
2182 | vm_page_zero_fill(tmp); | |
2183 | if(m_pa != 0) { | |
2184 | vm_page_part_copy(m, 0, tmp, 0, m_pa); | |
2185 | } | |
2186 | if((m_pa + len) < PAGE_SIZE) { | |
2187 | vm_page_part_copy(m, m_pa + len, tmp, | |
2188 | m_pa + len, PAGE_SIZE - (m_pa + len)); | |
2189 | } | |
2190 | vm_page_copy(tmp,m); | |
2191 | vm_page_lock_queues(); | |
2192 | vm_page_free(tmp); | |
2193 | vm_page_unlock_queues(); | |
2194 | #endif | |
2195 | ||
2196 | } | |
2197 | ||
2198 | /* | |
2199 | * vm_page_zero_fill: | |
2200 | * | |
2201 | * Zero-fill the specified page. | |
2202 | */ | |
2203 | void | |
2204 | vm_page_zero_fill( | |
2205 | vm_page_t m) | |
2206 | { | |
2207 | XPR(XPR_VM_PAGE, | |
2208 | "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n", | |
2209 | (integer_t)m->object, (integer_t)m->offset, (integer_t)m, 0,0); | |
2210 | ||
2211 | VM_PAGE_CHECK(m); | |
2212 | ||
55e303ae A |
2213 | // dbgTrace(0xAEAEAEAE, m->phys_page, 0); /* (BRINGUP) */ |
2214 | pmap_zero_page(m->phys_page); | |
1c79356b A |
2215 | } |
2216 | ||
2217 | /* | |
2218 | * vm_page_part_copy: | |
2219 | * | |
2220 | * copy part of one page to another | |
2221 | */ | |
2222 | ||
2223 | void | |
2224 | vm_page_part_copy( | |
2225 | vm_page_t src_m, | |
2226 | vm_offset_t src_pa, | |
2227 | vm_page_t dst_m, | |
2228 | vm_offset_t dst_pa, | |
2229 | vm_size_t len) | |
2230 | { | |
2231 | VM_PAGE_CHECK(src_m); | |
2232 | VM_PAGE_CHECK(dst_m); | |
2233 | ||
55e303ae A |
2234 | pmap_copy_part_page(src_m->phys_page, src_pa, |
2235 | dst_m->phys_page, dst_pa, len); | |
1c79356b A |
2236 | } |
2237 | ||
2238 | /* | |
2239 | * vm_page_copy: | |
2240 | * | |
2241 | * Copy one page to another | |
91447636 A |
2242 | * |
2243 | * ENCRYPTED SWAP: | |
2244 | * The source page should not be encrypted. The caller should | |
2245 | * make sure the page is decrypted first, if necessary. | |
1c79356b A |
2246 | */ |
2247 | ||
2248 | void | |
2249 | vm_page_copy( | |
2250 | vm_page_t src_m, | |
2251 | vm_page_t dest_m) | |
2252 | { | |
2253 | XPR(XPR_VM_PAGE, | |
2254 | "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n", | |
2255 | (integer_t)src_m->object, src_m->offset, | |
2256 | (integer_t)dest_m->object, dest_m->offset, | |
2257 | 0); | |
2258 | ||
2259 | VM_PAGE_CHECK(src_m); | |
2260 | VM_PAGE_CHECK(dest_m); | |
2261 | ||
91447636 A |
2262 | /* |
2263 | * ENCRYPTED SWAP: | |
2264 | * The source page should not be encrypted at this point. | |
2265 | * The destination page will therefore not contain encrypted | |
2266 | * data after the copy. | |
2267 | */ | |
2268 | if (src_m->encrypted) { | |
2269 | panic("vm_page_copy: source page %p is encrypted\n", src_m); | |
2270 | } | |
2271 | dest_m->encrypted = FALSE; | |
2272 | ||
55e303ae | 2273 | pmap_copy_page(src_m->phys_page, dest_m->phys_page); |
1c79356b A |
2274 | } |
2275 | ||
1c79356b A |
2276 | /* |
2277 | * Currently, this is a primitive allocator that grabs | |
2278 | * free pages from the system, sorts them by physical | |
2279 | * address, then searches for a region large enough to | |
2280 | * satisfy the user's request. | |
2281 | * | |
2282 | * Additional levels of effort: | |
2283 | * + steal clean active/inactive pages | |
2284 | * + force pageouts of dirty pages | |
2285 | * + maintain a map of available physical | |
2286 | * memory | |
2287 | */ | |
2288 | ||
1c79356b A |
2289 | #if MACH_ASSERT |
2290 | /* | |
2291 | * Check that the list of pages is ordered by | |
2292 | * ascending physical address and has no holes. | |
2293 | */ | |
91447636 A |
2294 | int vm_page_verify_contiguous( |
2295 | vm_page_t pages, | |
2296 | unsigned int npages); | |
2297 | ||
1c79356b A |
2298 | int |
2299 | vm_page_verify_contiguous( | |
2300 | vm_page_t pages, | |
2301 | unsigned int npages) | |
2302 | { | |
2303 | register vm_page_t m; | |
2304 | unsigned int page_count; | |
91447636 | 2305 | vm_offset_t prev_addr; |
1c79356b | 2306 | |
55e303ae | 2307 | prev_addr = pages->phys_page; |
1c79356b A |
2308 | page_count = 1; |
2309 | for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) { | |
55e303ae | 2310 | if (m->phys_page != prev_addr + 1) { |
1c79356b | 2311 | printf("m 0x%x prev_addr 0x%x, current addr 0x%x\n", |
55e303ae | 2312 | m, prev_addr, m->phys_page); |
91447636 | 2313 | printf("pages 0x%x page_count %d\n", pages, page_count); |
1c79356b A |
2314 | panic("vm_page_verify_contiguous: not contiguous!"); |
2315 | } | |
55e303ae | 2316 | prev_addr = m->phys_page; |
1c79356b A |
2317 | ++page_count; |
2318 | } | |
2319 | if (page_count != npages) { | |
2320 | printf("pages 0x%x actual count 0x%x but requested 0x%x\n", | |
2321 | pages, page_count, npages); | |
2322 | panic("vm_page_verify_contiguous: count error"); | |
2323 | } | |
2324 | return 1; | |
2325 | } | |
2326 | #endif /* MACH_ASSERT */ | |
2327 | ||
2328 | ||
91447636 A |
2329 | cpm_counter(unsigned int vpfls_pages_handled = 0;) |
2330 | cpm_counter(unsigned int vpfls_head_insertions = 0;) | |
2331 | cpm_counter(unsigned int vpfls_tail_insertions = 0;) | |
2332 | cpm_counter(unsigned int vpfls_general_insertions = 0;) | |
2333 | cpm_counter(unsigned int vpfc_failed = 0;) | |
2334 | cpm_counter(unsigned int vpfc_satisfied = 0;) | |
2335 | ||
1c79356b A |
2336 | /* |
2337 | * Find a region large enough to contain at least npages | |
2338 | * of contiguous physical memory. | |
2339 | * | |
2340 | * Requirements: | |
2341 | * - Called while holding vm_page_queue_free_lock. | |
2342 | * - Doesn't respect vm_page_free_reserved; caller | |
2343 | * must not ask for more pages than are legal to grab. | |
2344 | * | |
2345 | * Returns a pointer to a list of gobbled pages or VM_PAGE_NULL. | |
2346 | * | |
e5568f75 A |
2347 | * Algorithm: |
2348 | * Loop over the free list, extracting one page at a time and | |
2349 | * inserting those into a sorted sub-list. We stop as soon as | |
2350 | * there's a contiguous range within the sorted list that can | |
2351 | * satisfy the contiguous memory request. This contiguous sub- | |
2352 | * list is chopped out of the sorted sub-list and the remainder | |
2353 | * of the sorted sub-list is put back onto the beginning of the | |
2354 | * free list. | |
1c79356b A |
2355 | */ |
2356 | static vm_page_t | |
2357 | vm_page_find_contiguous( | |
e5568f75 | 2358 | unsigned int contig_pages) |
1c79356b | 2359 | { |
e5568f75 A |
2360 | vm_page_t sort_list; |
2361 | vm_page_t *contfirstprev, contlast; | |
2362 | vm_page_t m, m1; | |
2363 | ppnum_t prevcontaddr; | |
2364 | ppnum_t nextcontaddr; | |
2365 | unsigned int npages; | |
2366 | ||
91447636 A |
2367 | m = NULL; |
2368 | #if DEBUG | |
2369 | _mutex_assert(&vm_page_queue_free_lock, MA_OWNED); | |
2370 | #endif | |
e5568f75 A |
2371 | #if MACH_ASSERT |
2372 | /* | |
2373 | * Verify pages in the free list.. | |
2374 | */ | |
2375 | npages = 0; | |
2376 | for (m = vm_page_queue_free; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) | |
2377 | ++npages; | |
2378 | if (npages != vm_page_free_count) | |
2379 | panic("vm_sort_free_list: prelim: npages %u free_count %d", | |
2380 | npages, vm_page_free_count); | |
2381 | #endif /* MACH_ASSERT */ | |
1c79356b | 2382 | |
e5568f75 | 2383 | if (contig_pages == 0 || vm_page_queue_free == VM_PAGE_NULL) |
1c79356b A |
2384 | return VM_PAGE_NULL; |
2385 | ||
91447636 A |
2386 | #define PPNUM_PREV(x) (((x) > 0) ? ((x) - 1) : 0) |
2387 | #define PPNUM_NEXT(x) (((x) < PPNUM_MAX) ? ((x) + 1) : PPNUM_MAX) | |
2388 | #define SET_NEXT_PAGE(m,n) ((m)->pageq.next = (struct queue_entry *) (n)) | |
1c79356b | 2389 | |
e5568f75 A |
2390 | npages = 1; |
2391 | contfirstprev = &sort_list; | |
2392 | contlast = sort_list = vm_page_queue_free; | |
2393 | vm_page_queue_free = NEXT_PAGE(sort_list); | |
2394 | SET_NEXT_PAGE(sort_list, VM_PAGE_NULL); | |
2395 | prevcontaddr = PPNUM_PREV(sort_list->phys_page); | |
2396 | nextcontaddr = PPNUM_NEXT(sort_list->phys_page); | |
2397 | ||
2398 | while (npages < contig_pages && | |
2399 | (m = vm_page_queue_free) != VM_PAGE_NULL) | |
2400 | { | |
2401 | cpm_counter(++vpfls_pages_handled); | |
2402 | ||
2403 | /* prepend to existing run? */ | |
2404 | if (m->phys_page == prevcontaddr) | |
2405 | { | |
2406 | vm_page_queue_free = NEXT_PAGE(m); | |
2407 | cpm_counter(++vpfls_head_insertions); | |
2408 | prevcontaddr = PPNUM_PREV(prevcontaddr); | |
2409 | SET_NEXT_PAGE(m, *contfirstprev); | |
2410 | *contfirstprev = m; | |
2411 | npages++; | |
2412 | continue; /* no tail expansion check needed */ | |
2413 | } | |
2414 | ||
2415 | /* append to tail of existing run? */ | |
2416 | else if (m->phys_page == nextcontaddr) | |
2417 | { | |
2418 | vm_page_queue_free = NEXT_PAGE(m); | |
2419 | cpm_counter(++vpfls_tail_insertions); | |
2420 | nextcontaddr = PPNUM_NEXT(nextcontaddr); | |
2421 | SET_NEXT_PAGE(m, NEXT_PAGE(contlast)); | |
2422 | SET_NEXT_PAGE(contlast, m); | |
2423 | contlast = m; | |
2424 | npages++; | |
2425 | } | |
2426 | ||
2427 | /* prepend to the very front of sorted list? */ | |
2428 | else if (m->phys_page < sort_list->phys_page) | |
2429 | { | |
2430 | vm_page_queue_free = NEXT_PAGE(m); | |
2431 | cpm_counter(++vpfls_general_insertions); | |
2432 | prevcontaddr = PPNUM_PREV(m->phys_page); | |
2433 | nextcontaddr = PPNUM_NEXT(m->phys_page); | |
2434 | SET_NEXT_PAGE(m, sort_list); | |
2435 | contfirstprev = &sort_list; | |
2436 | contlast = sort_list = m; | |
2437 | npages = 1; | |
1c79356b A |
2438 | } |
2439 | ||
e5568f75 A |
2440 | else /* get to proper place for insertion */ |
2441 | { | |
2442 | if (m->phys_page < nextcontaddr) | |
2443 | { | |
2444 | prevcontaddr = PPNUM_PREV(sort_list->phys_page); | |
2445 | nextcontaddr = PPNUM_NEXT(sort_list->phys_page); | |
2446 | contfirstprev = &sort_list; | |
2447 | contlast = sort_list; | |
2448 | npages = 1; | |
2449 | } | |
2450 | for (m1 = NEXT_PAGE(contlast); | |
2451 | npages < contig_pages && | |
2452 | m1 != VM_PAGE_NULL && m1->phys_page < m->phys_page; | |
2453 | m1 = NEXT_PAGE(m1)) | |
2454 | { | |
2455 | if (m1->phys_page != nextcontaddr) { | |
2456 | prevcontaddr = PPNUM_PREV(m1->phys_page); | |
2457 | contfirstprev = NEXT_PAGE_PTR(contlast); | |
2458 | npages = 1; | |
2459 | } else { | |
2460 | npages++; | |
2461 | } | |
2462 | nextcontaddr = PPNUM_NEXT(m1->phys_page); | |
2463 | contlast = m1; | |
2464 | } | |
2465 | ||
1c79356b | 2466 | /* |
e5568f75 A |
2467 | * We may actually already have enough. |
2468 | * This could happen if a previous prepend | |
2469 | * joined up two runs to meet our needs. | |
2470 | * If so, bail before we take the current | |
2471 | * page off the free queue. | |
1c79356b | 2472 | */ |
e5568f75 A |
2473 | if (npages == contig_pages) |
2474 | break; | |
2475 | ||
91447636 A |
2476 | if (m->phys_page != nextcontaddr) |
2477 | { | |
e5568f75 A |
2478 | contfirstprev = NEXT_PAGE_PTR(contlast); |
2479 | prevcontaddr = PPNUM_PREV(m->phys_page); | |
2480 | nextcontaddr = PPNUM_NEXT(m->phys_page); | |
2481 | npages = 1; | |
2482 | } else { | |
2483 | nextcontaddr = PPNUM_NEXT(nextcontaddr); | |
2484 | npages++; | |
1c79356b | 2485 | } |
e5568f75 A |
2486 | vm_page_queue_free = NEXT_PAGE(m); |
2487 | cpm_counter(++vpfls_general_insertions); | |
2488 | SET_NEXT_PAGE(m, NEXT_PAGE(contlast)); | |
2489 | SET_NEXT_PAGE(contlast, m); | |
2490 | contlast = m; | |
2491 | } | |
2492 | ||
2493 | /* See how many pages are now contiguous after the insertion */ | |
2494 | for (m1 = NEXT_PAGE(m); | |
2495 | npages < contig_pages && | |
2496 | m1 != VM_PAGE_NULL && m1->phys_page == nextcontaddr; | |
2497 | m1 = NEXT_PAGE(m1)) | |
2498 | { | |
2499 | nextcontaddr = PPNUM_NEXT(nextcontaddr); | |
2500 | contlast = m1; | |
2501 | npages++; | |
1c79356b | 2502 | } |
e5568f75 | 2503 | } |
1c79356b | 2504 | |
e5568f75 A |
2505 | /* how did we do? */ |
2506 | if (npages == contig_pages) | |
2507 | { | |
2508 | cpm_counter(++vpfc_satisfied); | |
2509 | ||
2510 | /* remove the contiguous range from the sorted list */ | |
2511 | m = *contfirstprev; | |
2512 | *contfirstprev = NEXT_PAGE(contlast); | |
2513 | SET_NEXT_PAGE(contlast, VM_PAGE_NULL); | |
2514 | assert(vm_page_verify_contiguous(m, npages)); | |
2515 | ||
2516 | /* inline vm_page_gobble() for each returned page */ | |
2517 | for (m1 = m; m1 != VM_PAGE_NULL; m1 = NEXT_PAGE(m1)) { | |
2518 | assert(m1->free); | |
2519 | assert(!m1->wanted); | |
91447636 | 2520 | assert(!m1->laundry); |
e5568f75 A |
2521 | m1->free = FALSE; |
2522 | m1->no_isync = TRUE; | |
2523 | m1->gobbled = TRUE; | |
2524 | } | |
2525 | vm_page_wire_count += npages; | |
2526 | vm_page_gobble_count += npages; | |
2527 | vm_page_free_count -= npages; | |
2528 | ||
2529 | /* stick free list at the tail of the sorted list */ | |
2530 | while ((m1 = *contfirstprev) != VM_PAGE_NULL) | |
2531 | contfirstprev = (vm_page_t *)&m1->pageq.next; | |
2532 | *contfirstprev = vm_page_queue_free; | |
1c79356b | 2533 | } |
e5568f75 A |
2534 | |
2535 | vm_page_queue_free = sort_list; | |
2536 | return m; | |
1c79356b A |
2537 | } |
2538 | ||
2539 | /* | |
2540 | * Allocate a list of contiguous, wired pages. | |
2541 | */ | |
2542 | kern_return_t | |
2543 | cpm_allocate( | |
2544 | vm_size_t size, | |
2545 | vm_page_t *list, | |
2546 | boolean_t wire) | |
2547 | { | |
2548 | register vm_page_t m; | |
91447636 A |
2549 | vm_page_t pages; |
2550 | unsigned int npages; | |
2551 | unsigned int vm_pages_available; | |
e5568f75 | 2552 | boolean_t wakeup; |
1c79356b A |
2553 | |
2554 | if (size % page_size != 0) | |
2555 | return KERN_INVALID_ARGUMENT; | |
2556 | ||
2557 | vm_page_lock_queues(); | |
2558 | mutex_lock(&vm_page_queue_free_lock); | |
2559 | ||
2560 | /* | |
2561 | * Should also take active and inactive pages | |
2562 | * into account... One day... | |
2563 | */ | |
e5568f75 | 2564 | npages = size / page_size; |
1c79356b A |
2565 | vm_pages_available = vm_page_free_count - vm_page_free_reserved; |
2566 | ||
e5568f75 | 2567 | if (npages > vm_pages_available) { |
1c79356b | 2568 | mutex_unlock(&vm_page_queue_free_lock); |
e5568f75 | 2569 | vm_page_unlock_queues(); |
1c79356b A |
2570 | return KERN_RESOURCE_SHORTAGE; |
2571 | } | |
2572 | ||
1c79356b A |
2573 | /* |
2574 | * Obtain a pointer to a subset of the free | |
2575 | * list large enough to satisfy the request; | |
2576 | * the region will be physically contiguous. | |
2577 | */ | |
2578 | pages = vm_page_find_contiguous(npages); | |
e5568f75 A |
2579 | |
2580 | /* adjust global freelist counts and determine need for wakeups */ | |
2581 | if (vm_page_free_count < vm_page_free_count_minimum) | |
2582 | vm_page_free_count_minimum = vm_page_free_count; | |
2583 | ||
2584 | wakeup = ((vm_page_free_count < vm_page_free_min) || | |
2585 | ((vm_page_free_count < vm_page_free_target) && | |
2586 | (vm_page_inactive_count < vm_page_inactive_target))); | |
2587 | ||
2588 | mutex_unlock(&vm_page_queue_free_lock); | |
2589 | ||
1c79356b | 2590 | if (pages == VM_PAGE_NULL) { |
1c79356b A |
2591 | vm_page_unlock_queues(); |
2592 | return KERN_NO_SPACE; | |
2593 | } | |
2594 | ||
1c79356b A |
2595 | /* |
2596 | * Walk the returned list, wiring the pages. | |
2597 | */ | |
2598 | if (wire == TRUE) | |
2599 | for (m = pages; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) { | |
2600 | /* | |
2601 | * Essentially inlined vm_page_wire. | |
2602 | */ | |
2603 | assert(!m->active); | |
2604 | assert(!m->inactive); | |
2605 | assert(!m->private); | |
2606 | assert(!m->fictitious); | |
2607 | assert(m->wire_count == 0); | |
2608 | assert(m->gobbled); | |
2609 | m->gobbled = FALSE; | |
2610 | m->wire_count++; | |
2611 | --vm_page_gobble_count; | |
2612 | } | |
2613 | vm_page_unlock_queues(); | |
2614 | ||
e5568f75 A |
2615 | if (wakeup) |
2616 | thread_wakeup((event_t) &vm_page_free_wanted); | |
2617 | ||
1c79356b A |
2618 | /* |
2619 | * The CPM pages should now be available and | |
2620 | * ordered by ascending physical address. | |
2621 | */ | |
2622 | assert(vm_page_verify_contiguous(pages, npages)); | |
2623 | ||
2624 | *list = pages; | |
2625 | return KERN_SUCCESS; | |
2626 | } | |
2627 | ||
2628 | ||
2629 | #include <mach_vm_debug.h> | |
2630 | #if MACH_VM_DEBUG | |
2631 | ||
2632 | #include <mach_debug/hash_info.h> | |
2633 | #include <vm/vm_debug.h> | |
2634 | ||
2635 | /* | |
2636 | * Routine: vm_page_info | |
2637 | * Purpose: | |
2638 | * Return information about the global VP table. | |
2639 | * Fills the buffer with as much information as possible | |
2640 | * and returns the desired size of the buffer. | |
2641 | * Conditions: | |
2642 | * Nothing locked. The caller should provide | |
2643 | * possibly-pageable memory. | |
2644 | */ | |
2645 | ||
2646 | unsigned int | |
2647 | vm_page_info( | |
2648 | hash_info_bucket_t *info, | |
2649 | unsigned int count) | |
2650 | { | |
91447636 | 2651 | unsigned int i; |
1c79356b A |
2652 | |
2653 | if (vm_page_bucket_count < count) | |
2654 | count = vm_page_bucket_count; | |
2655 | ||
2656 | for (i = 0; i < count; i++) { | |
2657 | vm_page_bucket_t *bucket = &vm_page_buckets[i]; | |
2658 | unsigned int bucket_count = 0; | |
2659 | vm_page_t m; | |
2660 | ||
2661 | simple_lock(&vm_page_bucket_lock); | |
2662 | for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next) | |
2663 | bucket_count++; | |
2664 | simple_unlock(&vm_page_bucket_lock); | |
2665 | ||
2666 | /* don't touch pageable memory while holding locks */ | |
2667 | info[i].hib_count = bucket_count; | |
2668 | } | |
2669 | ||
2670 | return vm_page_bucket_count; | |
2671 | } | |
2672 | #endif /* MACH_VM_DEBUG */ | |
2673 | ||
2674 | #include <mach_kdb.h> | |
2675 | #if MACH_KDB | |
2676 | ||
2677 | #include <ddb/db_output.h> | |
2678 | #include <vm/vm_print.h> | |
2679 | #define printf kdbprintf | |
2680 | ||
2681 | /* | |
2682 | * Routine: vm_page_print [exported] | |
2683 | */ | |
2684 | void | |
2685 | vm_page_print( | |
91447636 | 2686 | db_addr_t db_addr) |
1c79356b | 2687 | { |
91447636 A |
2688 | vm_page_t p; |
2689 | ||
2690 | p = (vm_page_t) (long) db_addr; | |
1c79356b A |
2691 | |
2692 | iprintf("page 0x%x\n", p); | |
2693 | ||
2694 | db_indent += 2; | |
2695 | ||
2696 | iprintf("object=0x%x", p->object); | |
2697 | printf(", offset=0x%x", p->offset); | |
2698 | printf(", wire_count=%d", p->wire_count); | |
1c79356b | 2699 | |
91447636 | 2700 | iprintf("%sinactive, %sactive, %sgobbled, %slaundry, %sfree, %sref, %sencrypted\n", |
1c79356b A |
2701 | (p->inactive ? "" : "!"), |
2702 | (p->active ? "" : "!"), | |
2703 | (p->gobbled ? "" : "!"), | |
2704 | (p->laundry ? "" : "!"), | |
2705 | (p->free ? "" : "!"), | |
2706 | (p->reference ? "" : "!"), | |
91447636 | 2707 | (p->encrypted ? "" : "!")); |
1c79356b A |
2708 | iprintf("%sbusy, %swanted, %stabled, %sfictitious, %sprivate, %sprecious\n", |
2709 | (p->busy ? "" : "!"), | |
2710 | (p->wanted ? "" : "!"), | |
2711 | (p->tabled ? "" : "!"), | |
2712 | (p->fictitious ? "" : "!"), | |
2713 | (p->private ? "" : "!"), | |
2714 | (p->precious ? "" : "!")); | |
2715 | iprintf("%sabsent, %serror, %sdirty, %scleaning, %spageout, %sclustered\n", | |
2716 | (p->absent ? "" : "!"), | |
2717 | (p->error ? "" : "!"), | |
2718 | (p->dirty ? "" : "!"), | |
2719 | (p->cleaning ? "" : "!"), | |
2720 | (p->pageout ? "" : "!"), | |
2721 | (p->clustered ? "" : "!")); | |
0b4e3aa0 | 2722 | iprintf("%slock_supplied, %soverwriting, %srestart, %sunusual\n", |
1c79356b A |
2723 | (p->lock_supplied ? "" : "!"), |
2724 | (p->overwriting ? "" : "!"), | |
2725 | (p->restart ? "" : "!"), | |
0b4e3aa0 | 2726 | (p->unusual ? "" : "!")); |
1c79356b | 2727 | |
55e303ae | 2728 | iprintf("phys_page=0x%x", p->phys_page); |
1c79356b A |
2729 | printf(", page_error=0x%x", p->page_error); |
2730 | printf(", page_lock=0x%x", p->page_lock); | |
2731 | printf(", unlock_request=%d\n", p->unlock_request); | |
2732 | ||
2733 | db_indent -= 2; | |
2734 | } | |
2735 | #endif /* MACH_KDB */ |