]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_page.h
xnu-2422.100.13.tar.gz
[apple/xnu.git] / osfmk / vm / vm_page.h
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_page.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Resident memory system definitions.
64 */
65
66 #ifndef _VM_VM_PAGE_H_
67 #define _VM_VM_PAGE_H_
68
69 #include <debug.h>
70 #include <vm/vm_options.h>
71
72 #include <mach/boolean.h>
73 #include <mach/vm_prot.h>
74 #include <mach/vm_param.h>
75 #include <vm/vm_object.h>
76 #include <kern/queue.h>
77 #include <kern/lock.h>
78
79 #include <kern/macro_help.h>
80 #include <libkern/OSAtomic.h>
81
82
83 /*
84 * VM_PAGE_MIN_SPECULATIVE_AGE_Q through VM_PAGE_MAX_SPECULATIVE_AGE_Q
85 * represents a set of aging bins that are 'protected'...
86 *
87 * VM_PAGE_SPECULATIVE_AGED_Q is a list of the speculative pages that have
88 * not yet been 'claimed' but have been aged out of the protective bins
89 * this occurs in vm_page_speculate when it advances to the next bin
90 * and discovers that it is still occupied... at that point, all of the
91 * pages in that bin are moved to the VM_PAGE_SPECULATIVE_AGED_Q. the pages
92 * in that bin are all guaranteed to have reached at least the maximum age
93 * we allow for a protected page... they can be older if there is no
94 * memory pressure to pull them from the bin, or there are no new speculative pages
95 * being generated to push them out.
96 * this list is the one that vm_pageout_scan will prefer when looking
97 * for pages to move to the underweight free list
98 *
99 * VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS
100 * defines the amount of time a speculative page is normally
101 * allowed to live in the 'protected' state (i.e. not available
102 * to be stolen if vm_pageout_scan is running and looking for
103 * pages)... however, if the total number of speculative pages
104 * in the protected state exceeds our limit (defined in vm_pageout.c)
105 * and there are none available in VM_PAGE_SPECULATIVE_AGED_Q, then
106 * vm_pageout_scan is allowed to steal pages from the protected
107 * bucket even if they are underage.
108 *
109 * vm_pageout_scan is also allowed to pull pages from a protected
110 * bin if the bin has reached the "age of consent" we've set
111 */
112 #define VM_PAGE_MAX_SPECULATIVE_AGE_Q 10
113 #define VM_PAGE_MIN_SPECULATIVE_AGE_Q 1
114 #define VM_PAGE_SPECULATIVE_AGED_Q 0
115
116 #define VM_PAGE_SPECULATIVE_Q_AGE_MS 500
117
118 struct vm_speculative_age_q {
119 /*
120 * memory queue for speculative pages via clustered pageins
121 */
122 queue_head_t age_q;
123 mach_timespec_t age_ts;
124 };
125
126
127
128 extern
129 struct vm_speculative_age_q vm_page_queue_speculative[];
130
131 extern int speculative_steal_index;
132 extern int speculative_age_index;
133 extern unsigned int vm_page_speculative_q_age_ms;
134
135
136 #define VM_PAGE_COMPRESSOR_COUNT (compressor_object->resident_page_count)
137
138 /*
139 * Management of resident (logical) pages.
140 *
141 * A small structure is kept for each resident
142 * page, indexed by page number. Each structure
143 * is an element of several lists:
144 *
145 * A hash table bucket used to quickly
146 * perform object/offset lookups
147 *
148 * A list of all pages for a given object,
149 * so they can be quickly deactivated at
150 * time of deallocation.
151 *
152 * An ordered list of pages due for pageout.
153 *
154 * In addition, the structure contains the object
155 * and offset to which this page belongs (for pageout),
156 * and sundry status bits.
157 *
158 * Fields in this structure are locked either by the lock on the
159 * object that the page belongs to (O) or by the lock on the page
160 * queues (P). [Some fields require that both locks be held to
161 * change that field; holding either lock is sufficient to read.]
162 */
163
164 struct vm_page {
165 queue_chain_t pageq; /* queue info for FIFO */
166 /* queue or free list (P) */
167
168 queue_chain_t listq; /* all pages in same object (O) */
169 struct vm_page *next; /* VP bucket link (O) */
170
171 vm_object_t object; /* which object am I in (O&P) */
172 vm_object_offset_t offset; /* offset into that object (O,P) */
173
174 /*
175 * The following word of flags is protected
176 * by the "page queues" lock.
177 *
178 * we use the 'wire_count' field to store the local
179 * queue id if local queues are enabled...
180 * see the comments at 'VM_PAGE_QUEUES_REMOVE' as to
181 * why this is safe to do
182 */
183 #define local_id wire_count
184 unsigned int wire_count:16, /* how many wired down maps use me? (O&P) */
185 /* boolean_t */ active:1, /* page is in active list (P) */
186 inactive:1, /* page is in inactive list (P) */
187 clean_queue:1, /* page is in pre-cleaned list (P) */
188 local:1, /* page is in one of the local queues (P) */
189 speculative:1, /* page is in speculative list (P) */
190 throttled:1, /* pager is not responding or doesn't exist(P) */
191 free:1, /* page is on free list (P) */
192 pageout_queue:1,/* page is on queue for pageout (P) */
193 laundry:1, /* page is being cleaned now (P)*/
194 reference:1, /* page has been used (P) */
195 gobbled:1, /* page used internally (P) */
196 private:1, /* Page should not be returned to
197 * the free list (P) */
198 no_cache:1, /* page is not to be cached and should
199 * be reused ahead of other pages (P) */
200 xpmapped:1,
201 __unused_pageq_bits:2; /* 2 bits available here */
202
203 ppnum_t phys_page; /* Physical address of page, passed
204 * to pmap_enter (read-only) */
205
206 /*
207 * The following word of flags is protected
208 * by the "VM object" lock.
209 */
210 unsigned int
211 /* boolean_t */ busy:1, /* page is in transit (O) */
212 wanted:1, /* someone is waiting for page (O) */
213 tabled:1, /* page is in VP table (O) */
214 hashed:1, /* page is in vm_page_buckets[]
215 (O) + the bucket lock */
216 fictitious:1, /* Physical page doesn't exist (O) */
217 /*
218 * IMPORTANT: the "pmapped" bit can be turned on while holding the
219 * VM object "shared" lock. See vm_fault_enter().
220 * This is OK as long as it's the only bit in this bit field that
221 * can be updated without holding the VM object "exclusive" lock.
222 */
223 pmapped:1, /* page has been entered at some
224 * point into a pmap (O **shared**) */
225 wpmapped:1, /* page has been entered at some
226 * point into a pmap for write (O) */
227 pageout:1, /* page wired & busy for pageout (O) */
228 absent:1, /* Data has been requested, but is
229 * not yet available (O) */
230 error:1, /* Data manager was unable to provide
231 * data due to error (O) */
232 dirty:1, /* Page must be cleaned (O) */
233 cleaning:1, /* Page clean has begun (O) */
234 precious:1, /* Page is precious; data must be
235 * returned even if clean (O) */
236 clustered:1, /* page is not the faulted page (O) */
237 overwriting:1, /* Request to unlock has been made
238 * without having data. (O)
239 * [See vm_fault_page_overwrite] */
240 restart:1, /* Page was pushed higher in shadow
241 chain by copy_call-related pagers;
242 start again at top of chain */
243 unusual:1, /* Page is absent, error, restart or
244 page locked */
245 encrypted:1, /* encrypted for secure swap (O) */
246 encrypted_cleaning:1, /* encrypting page */
247 cs_validated:1, /* code-signing: page was checked */
248 cs_tainted:1, /* code-signing: page is tainted */
249 reusable:1,
250 lopage:1,
251 slid:1,
252 was_dirty:1, /* was this page previously dirty? */
253 compressor:1, /* page owned by compressor pool */
254 written_by_kernel:1, /* page was written by kernel (i.e. decompressed) */
255 __unused_object_bits:5; /* 5 bits available here */
256
257 #if __LP64__
258 unsigned int __unused_padding; /* Pad structure explicitly
259 * to 8-byte multiple for LP64 */
260 #endif
261 };
262
263 #define DEBUG_ENCRYPTED_SWAP 1
264 #if DEBUG_ENCRYPTED_SWAP
265 #define ASSERT_PAGE_DECRYPTED(page) \
266 MACRO_BEGIN \
267 if ((page)->encrypted) { \
268 panic("VM page %p should not be encrypted here\n", \
269 (page)); \
270 } \
271 MACRO_END
272 #else /* DEBUG_ENCRYPTED_SWAP */
273 #define ASSERT_PAGE_DECRYPTED(page) assert(!(page)->encrypted)
274 #endif /* DEBUG_ENCRYPTED_SWAP */
275
276 typedef struct vm_page *vm_page_t;
277
278
279 typedef struct vm_locks_array {
280 char pad __attribute__ ((aligned (64)));
281 lck_mtx_t vm_page_queue_lock2 __attribute__ ((aligned (64)));
282 lck_mtx_t vm_page_queue_free_lock2 __attribute__ ((aligned (64)));
283 char pad2 __attribute__ ((aligned (64)));
284 } vm_locks_array_t;
285
286
287 #define VM_PAGE_WIRED(m) ((!(m)->local && (m)->wire_count))
288 #define VM_PAGE_NULL ((vm_page_t) 0)
289 #define NEXT_PAGE(m) ((vm_page_t) (m)->pageq.next)
290 #define NEXT_PAGE_PTR(m) ((vm_page_t *) &(m)->pageq.next)
291
292 /*
293 * XXX The unusual bit should not be necessary. Most of the bit
294 * XXX fields above really want to be masks.
295 */
296
297 /*
298 * For debugging, this macro can be defined to perform
299 * some useful check on a page structure.
300 */
301
302 #define VM_PAGE_CHECK(mem) \
303 MACRO_BEGIN \
304 VM_PAGE_QUEUES_ASSERT(mem, 1); \
305 MACRO_END
306
307 /* Page coloring:
308 *
309 * The free page list is actually n lists, one per color,
310 * where the number of colors is a function of the machine's
311 * cache geometry set at system initialization. To disable
312 * coloring, set vm_colors to 1 and vm_color_mask to 0.
313 * The boot-arg "colors" may be used to override vm_colors.
314 * Note that there is little harm in having more colors than needed.
315 */
316
317 #define MAX_COLORS 128
318 #define DEFAULT_COLORS 32
319
320 extern
321 unsigned int vm_colors; /* must be in range 1..MAX_COLORS */
322 extern
323 unsigned int vm_color_mask; /* must be (vm_colors-1) */
324 extern
325 unsigned int vm_cache_geometry_colors; /* optimal #colors based on cache geometry */
326
327 /*
328 * Wired memory is a very limited resource and we can't let users exhaust it
329 * and deadlock the entire system. We enforce the following limits:
330 *
331 * vm_user_wire_limit (default: all memory minus vm_global_no_user_wire_amount)
332 * how much memory can be user-wired in one user task
333 *
334 * vm_global_user_wire_limit (default: same as vm_user_wire_limit)
335 * how much memory can be user-wired in all user tasks
336 *
337 * vm_global_no_user_wire_amount (default: VM_NOT_USER_WIREABLE)
338 * how much memory must remain user-unwired at any time
339 */
340 #define VM_NOT_USER_WIREABLE (64*1024*1024) /* 64MB */
341 extern
342 vm_map_size_t vm_user_wire_limit;
343 extern
344 vm_map_size_t vm_global_user_wire_limit;
345 extern
346 vm_map_size_t vm_global_no_user_wire_amount;
347
348 /*
349 * Each pageable resident page falls into one of three lists:
350 *
351 * free
352 * Available for allocation now. The free list is
353 * actually an array of lists, one per color.
354 * inactive
355 * Not referenced in any map, but still has an
356 * object/offset-page mapping, and may be dirty.
357 * This is the list of pages that should be
358 * paged out next. There are actually two
359 * inactive lists, one for pages brought in from
360 * disk or other backing store, and another
361 * for "zero-filled" pages. See vm_pageout_scan()
362 * for the distinction and usage.
363 * active
364 * A list of pages which have been placed in
365 * at least one physical map. This list is
366 * ordered, in LRU-like fashion.
367 */
368
369
370 #define VPL_LOCK_SPIN 1
371
372 struct vpl {
373 unsigned int vpl_count;
374 unsigned int vpl_internal_count;
375 unsigned int vpl_external_count;
376 queue_head_t vpl_queue;
377 #ifdef VPL_LOCK_SPIN
378 lck_spin_t vpl_lock;
379 #else
380 lck_mtx_t vpl_lock;
381 lck_mtx_ext_t vpl_lock_ext;
382 #endif
383 };
384
385 struct vplq {
386 union {
387 char cache_line_pad[128];
388 struct vpl vpl;
389 } vpl_un;
390 };
391 extern
392 unsigned int vm_page_local_q_count;
393 extern
394 struct vplq *vm_page_local_q;
395 extern
396 unsigned int vm_page_local_q_soft_limit;
397 extern
398 unsigned int vm_page_local_q_hard_limit;
399 extern
400 vm_locks_array_t vm_page_locks;
401
402 extern
403 queue_head_t vm_page_queue_free[MAX_COLORS]; /* memory free queue */
404 extern
405 queue_head_t vm_lopage_queue_free; /* low memory free queue */
406 extern
407 queue_head_t vm_page_queue_active; /* active memory queue */
408 extern
409 queue_head_t vm_page_queue_inactive; /* inactive memory queue for normal pages */
410 extern
411 queue_head_t vm_page_queue_cleaned; /* clean-queue inactive memory */
412 extern
413 queue_head_t vm_page_queue_anonymous; /* inactive memory queue for anonymous pages */
414 extern
415 queue_head_t vm_page_queue_throttled; /* memory queue for throttled pageout pages */
416
417 extern
418 vm_offset_t first_phys_addr; /* physical address for first_page */
419 extern
420 vm_offset_t last_phys_addr; /* physical address for last_page */
421
422 extern
423 unsigned int vm_page_free_count; /* How many pages are free? (sum of all colors) */
424 extern
425 unsigned int vm_page_fictitious_count;/* How many fictitious pages are free? */
426 extern
427 unsigned int vm_page_active_count; /* How many pages are active? */
428 extern
429 unsigned int vm_page_inactive_count; /* How many pages are inactive? */
430 extern
431 unsigned int vm_page_cleaned_count; /* How many pages are in the clean queue? */
432 extern
433 unsigned int vm_page_throttled_count;/* How many inactives are throttled */
434 extern
435 unsigned int vm_page_speculative_count; /* How many speculative pages are unclaimed? */
436 extern unsigned int vm_page_pageable_internal_count;
437 extern unsigned int vm_page_pageable_external_count;
438 extern
439 unsigned int vm_page_external_count; /* How many pages are file-backed? */
440 extern
441 unsigned int vm_page_internal_count; /* How many pages are anonymous? */
442 extern
443 unsigned int vm_page_wire_count; /* How many pages are wired? */
444 extern
445 unsigned int vm_page_wire_count_initial; /* How many pages wired at startup */
446 extern
447 unsigned int vm_page_free_target; /* How many do we want free? */
448 extern
449 unsigned int vm_page_free_min; /* When to wakeup pageout */
450 extern
451 unsigned int vm_page_throttle_limit; /* When to throttle new page creation */
452 extern
453 uint32_t vm_page_creation_throttle; /* When to throttle new page creation */
454 extern
455 unsigned int vm_page_inactive_target;/* How many do we want inactive? */
456 extern
457 unsigned int vm_page_anonymous_min; /* When it's ok to pre-clean */
458 extern
459 unsigned int vm_page_inactive_min; /* When do wakeup pageout */
460 extern
461 unsigned int vm_page_free_reserved; /* How many pages reserved to do pageout */
462 extern
463 unsigned int vm_page_throttle_count; /* Count of page allocations throttled */
464 extern
465 unsigned int vm_page_gobble_count;
466
467 #if DEVELOPMENT || DEBUG
468 extern
469 unsigned int vm_page_speculative_used;
470 #endif
471
472 extern
473 unsigned int vm_page_purgeable_count;/* How many pages are purgeable now ? */
474 extern
475 unsigned int vm_page_purgeable_wired_count;/* How many purgeable pages are wired now ? */
476 extern
477 uint64_t vm_page_purged_count; /* How many pages got purged so far ? */
478
479 extern unsigned int vm_page_free_wanted;
480 /* how many threads are waiting for memory */
481
482 extern unsigned int vm_page_free_wanted_privileged;
483 /* how many VM privileged threads are waiting for memory */
484
485 extern ppnum_t vm_page_fictitious_addr;
486 /* (fake) phys_addr of fictitious pages */
487
488 extern ppnum_t vm_page_guard_addr;
489 /* (fake) phys_addr of guard pages */
490
491
492 extern boolean_t vm_page_deactivate_hint;
493
494 extern int vm_compressor_mode;
495
496 /*
497 0 = all pages avail ( default. )
498 1 = disable high mem ( cap max pages to 4G)
499 2 = prefer himem
500 */
501 extern int vm_himemory_mode;
502
503 extern boolean_t vm_lopage_needed;
504 extern uint32_t vm_lopage_free_count;
505 extern uint32_t vm_lopage_free_limit;
506 extern uint32_t vm_lopage_lowater;
507 extern boolean_t vm_lopage_refill;
508 extern uint64_t max_valid_dma_address;
509 extern ppnum_t max_valid_low_ppnum;
510
511 /*
512 * Prototypes for functions exported by this module.
513 */
514 extern void vm_page_bootstrap(
515 vm_offset_t *startp,
516 vm_offset_t *endp);
517
518 extern void vm_page_module_init(void);
519
520 extern void vm_page_init_local_q(void);
521
522 extern void vm_page_create(
523 ppnum_t start,
524 ppnum_t end);
525
526 extern vm_page_t vm_page_lookup(
527 vm_object_t object,
528 vm_object_offset_t offset);
529
530 extern vm_page_t vm_page_grab_fictitious(void);
531
532 extern vm_page_t vm_page_grab_guard(void);
533
534 extern void vm_page_release_fictitious(
535 vm_page_t page);
536
537 extern void vm_page_more_fictitious(void);
538
539 extern int vm_pool_low(void);
540
541 extern vm_page_t vm_page_grab(void);
542
543 extern vm_page_t vm_page_grablo(void);
544
545 extern void vm_page_release(
546 vm_page_t page);
547
548 extern boolean_t vm_page_wait(
549 int interruptible );
550
551 extern vm_page_t vm_page_alloc(
552 vm_object_t object,
553 vm_object_offset_t offset);
554
555 extern vm_page_t vm_page_alloclo(
556 vm_object_t object,
557 vm_object_offset_t offset);
558
559 extern vm_page_t vm_page_alloc_guard(
560 vm_object_t object,
561 vm_object_offset_t offset);
562
563 extern void vm_page_init(
564 vm_page_t page,
565 ppnum_t phys_page,
566 boolean_t lopage);
567
568 extern void vm_page_free(
569 vm_page_t page);
570
571 extern void vm_page_free_unlocked(
572 vm_page_t page,
573 boolean_t remove_from_hash);
574
575 extern void vm_page_activate(
576 vm_page_t page);
577
578 extern void vm_page_deactivate(
579 vm_page_t page);
580
581 extern void vm_page_deactivate_internal(
582 vm_page_t page,
583 boolean_t clear_hw_reference);
584
585 extern void vm_page_enqueue_cleaned(vm_page_t page);
586
587 extern void vm_page_lru(
588 vm_page_t page);
589
590 extern void vm_page_speculate(
591 vm_page_t page,
592 boolean_t new);
593
594 extern void vm_page_speculate_ageit(
595 struct vm_speculative_age_q *aq);
596
597 extern void vm_page_reactivate_all_throttled(void);
598
599 extern void vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks);
600
601 extern void vm_page_rename(
602 vm_page_t page,
603 vm_object_t new_object,
604 vm_object_offset_t new_offset,
605 boolean_t encrypted_ok);
606
607 extern void vm_page_insert(
608 vm_page_t page,
609 vm_object_t object,
610 vm_object_offset_t offset);
611
612 extern void vm_page_insert_internal(
613 vm_page_t page,
614 vm_object_t object,
615 vm_object_offset_t offset,
616 boolean_t queues_lock_held,
617 boolean_t insert_in_hash,
618 boolean_t batch_pmap_op);
619
620 extern void vm_page_replace(
621 vm_page_t mem,
622 vm_object_t object,
623 vm_object_offset_t offset);
624
625 extern void vm_page_remove(
626 vm_page_t page,
627 boolean_t remove_from_hash);
628
629 extern void vm_page_zero_fill(
630 vm_page_t page);
631
632 extern void vm_page_part_zero_fill(
633 vm_page_t m,
634 vm_offset_t m_pa,
635 vm_size_t len);
636
637 extern void vm_page_copy(
638 vm_page_t src_page,
639 vm_page_t dest_page);
640
641 extern void vm_page_part_copy(
642 vm_page_t src_m,
643 vm_offset_t src_pa,
644 vm_page_t dst_m,
645 vm_offset_t dst_pa,
646 vm_size_t len);
647
648 extern void vm_page_wire(
649 vm_page_t page);
650
651 extern void vm_page_unwire(
652 vm_page_t page,
653 boolean_t queueit);
654
655 extern void vm_set_page_size(void);
656
657 extern void vm_page_gobble(
658 vm_page_t page);
659
660 extern void vm_page_validate_cs(vm_page_t page);
661 extern void vm_page_validate_cs_mapped(
662 vm_page_t page,
663 const void *kaddr);
664
665 extern void vm_page_free_prepare_queues(
666 vm_page_t page);
667
668 extern void vm_page_free_prepare_object(
669 vm_page_t page,
670 boolean_t remove_from_hash);
671
672 #if CONFIG_JETSAM
673 extern void memorystatus_pages_update(unsigned int pages_avail);
674
675 #define VM_CHECK_MEMORYSTATUS do { \
676 memorystatus_pages_update( \
677 vm_page_external_count + \
678 vm_page_free_count + \
679 (VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) ? 0 : vm_page_purgeable_count) \
680 ); \
681 } while(0)
682
683 #else /* CONFIG_JETSAM */
684
685
686 extern void vm_pressure_response(void);
687
688 #define VM_CHECK_MEMORYSTATUS vm_pressure_response()
689
690
691 #endif /* CONFIG_JETSAM */
692
693 /*
694 * Functions implemented as macros. m->wanted and m->busy are
695 * protected by the object lock.
696 */
697
698 #define SET_PAGE_DIRTY(m, set_pmap_modified) \
699 MACRO_BEGIN \
700 vm_page_t __page__ = (m); \
701 __page__->dirty = TRUE; \
702 MACRO_END
703
704 #define PAGE_ASSERT_WAIT(m, interruptible) \
705 (((m)->wanted = TRUE), \
706 assert_wait((event_t) (m), (interruptible)))
707
708 #define PAGE_SLEEP(o, m, interruptible) \
709 (((m)->wanted = TRUE), \
710 thread_sleep_vm_object((o), (m), (interruptible)))
711
712 #define PAGE_WAKEUP_DONE(m) \
713 MACRO_BEGIN \
714 (m)->busy = FALSE; \
715 if ((m)->wanted) { \
716 (m)->wanted = FALSE; \
717 thread_wakeup((event_t) (m)); \
718 } \
719 MACRO_END
720
721 #define PAGE_WAKEUP(m) \
722 MACRO_BEGIN \
723 if ((m)->wanted) { \
724 (m)->wanted = FALSE; \
725 thread_wakeup((event_t) (m)); \
726 } \
727 MACRO_END
728
729 #define VM_PAGE_FREE(p) \
730 MACRO_BEGIN \
731 vm_page_free_unlocked(p, TRUE); \
732 MACRO_END
733
734 #define VM_PAGE_GRAB_FICTITIOUS(M) \
735 MACRO_BEGIN \
736 while ((M = vm_page_grab_fictitious()) == VM_PAGE_NULL) \
737 vm_page_more_fictitious(); \
738 MACRO_END
739
740 #define VM_PAGE_WAIT() ((void)vm_page_wait(THREAD_UNINT))
741
742 #define vm_page_queue_lock (vm_page_locks.vm_page_queue_lock2)
743 #define vm_page_queue_free_lock (vm_page_locks.vm_page_queue_free_lock2)
744
745 #define vm_page_lock_queues() lck_mtx_lock(&vm_page_queue_lock)
746 #define vm_page_unlock_queues() lck_mtx_unlock(&vm_page_queue_lock)
747
748 #define vm_page_lockspin_queues() lck_mtx_lock_spin(&vm_page_queue_lock)
749 #define vm_page_trylockspin_queues() lck_mtx_try_lock_spin(&vm_page_queue_lock)
750 #define vm_page_lockconvert_queues() lck_mtx_convert_spin(&vm_page_queue_lock)
751
752 #ifdef VPL_LOCK_SPIN
753 #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_spin_init(&vlq->vpl_lock, vpl_grp, vpl_attr)
754 #define VPL_LOCK(vpl) lck_spin_lock(vpl)
755 #define VPL_UNLOCK(vpl) lck_spin_unlock(vpl)
756 #else
757 #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_mtx_init_ext(&vlq->vpl_lock, &vlq->vpl_lock_ext, vpl_grp, vpl_attr)
758 #define VPL_LOCK(vpl) lck_mtx_lock_spin(vpl)
759 #define VPL_UNLOCK(vpl) lck_mtx_unlock(vpl)
760 #endif
761
762 #if MACH_ASSERT
763 extern void vm_page_queues_assert(vm_page_t mem, int val);
764 #define VM_PAGE_QUEUES_ASSERT(mem, val) vm_page_queues_assert((mem), (val))
765 #else
766 #define VM_PAGE_QUEUES_ASSERT(mem, val)
767 #endif
768
769
770 /*
771 * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the
772 * local queues if they exist... its the only spot in the system where we add pages
773 * to those queues... once on those queues, those pages can only move to one of the
774 * global page queues or the free queues... they NEVER move from local q to local q.
775 * the 'local' state is stable when VM_PAGE_QUEUES_REMOVE is called since we're behind
776 * the global vm_page_queue_lock at this point... we still need to take the local lock
777 * in case this operation is being run on a different CPU then the local queue's identity,
778 * but we don't have to worry about the page moving to a global queue or becoming wired
779 * while we're grabbing the local lock since those operations would require the global
780 * vm_page_queue_lock to be held, and we already own it.
781 *
782 * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id...
783 * 'wired' and local are ALWAYS mutually exclusive conditions.
784 */
785
786 #define VM_PAGE_QUEUES_REMOVE(mem) \
787 MACRO_BEGIN \
788 boolean_t was_pageable; \
789 \
790 VM_PAGE_QUEUES_ASSERT(mem, 1); \
791 assert(!mem->laundry); \
792 /* \
793 * if (mem->pageout_queue) \
794 * NOTE: VM_PAGE_QUEUES_REMOVE does not deal with removing pages from the pageout queue... \
795 * the caller is responsible for determing if the page is on that queue, and if so, must \
796 * either first remove it (it needs both the page queues lock and the object lock to do \
797 * this via vm_pageout_steal_laundry), or avoid the call to VM_PAGE_QUEUES_REMOVE \
798 */ \
799 if (mem->local) { \
800 struct vpl *lq; \
801 assert(mem->object != kernel_object); \
802 assert(mem->object != compressor_object); \
803 assert(!mem->inactive && !mem->speculative); \
804 assert(!mem->active && !mem->throttled); \
805 assert(!mem->clean_queue); \
806 assert(!mem->fictitious); \
807 lq = &vm_page_local_q[mem->local_id].vpl_un.vpl; \
808 VPL_LOCK(&lq->vpl_lock); \
809 queue_remove(&lq->vpl_queue, \
810 mem, vm_page_t, pageq); \
811 mem->local = FALSE; \
812 mem->local_id = 0; \
813 lq->vpl_count--; \
814 if (mem->object->internal) { \
815 lq->vpl_internal_count--; \
816 } else { \
817 lq->vpl_external_count--; \
818 } \
819 VPL_UNLOCK(&lq->vpl_lock); \
820 was_pageable = FALSE; \
821 } \
822 \
823 else if (mem->active) { \
824 assert(mem->object != kernel_object); \
825 assert(mem->object != compressor_object); \
826 assert(!mem->inactive && !mem->speculative); \
827 assert(!mem->clean_queue); \
828 assert(!mem->throttled); \
829 assert(!mem->fictitious); \
830 queue_remove(&vm_page_queue_active, \
831 mem, vm_page_t, pageq); \
832 mem->active = FALSE; \
833 vm_page_active_count--; \
834 was_pageable = TRUE; \
835 } \
836 \
837 else if (mem->inactive) { \
838 assert(mem->object != kernel_object); \
839 assert(mem->object != compressor_object); \
840 assert(!mem->active && !mem->speculative); \
841 assert(!mem->throttled); \
842 assert(!mem->fictitious); \
843 vm_page_inactive_count--; \
844 if (mem->clean_queue) { \
845 queue_remove(&vm_page_queue_cleaned, \
846 mem, vm_page_t, pageq); \
847 mem->clean_queue = FALSE; \
848 vm_page_cleaned_count--; \
849 } else { \
850 if (mem->object->internal) { \
851 queue_remove(&vm_page_queue_anonymous, \
852 mem, vm_page_t, pageq); \
853 vm_page_anonymous_count--; \
854 } else { \
855 queue_remove(&vm_page_queue_inactive, \
856 mem, vm_page_t, pageq); \
857 } \
858 vm_purgeable_q_advance_all(); \
859 } \
860 mem->inactive = FALSE; \
861 was_pageable = TRUE; \
862 } \
863 \
864 else if (mem->throttled) { \
865 assert(mem->object != compressor_object); \
866 assert(!mem->active && !mem->inactive); \
867 assert(!mem->speculative); \
868 assert(!mem->fictitious); \
869 queue_remove(&vm_page_queue_throttled, \
870 mem, vm_page_t, pageq); \
871 mem->throttled = FALSE; \
872 vm_page_throttled_count--; \
873 was_pageable = FALSE; \
874 } \
875 \
876 else if (mem->speculative) { \
877 assert(mem->object != compressor_object); \
878 assert(!mem->active && !mem->inactive); \
879 assert(!mem->throttled); \
880 assert(!mem->fictitious); \
881 remque(&mem->pageq); \
882 mem->speculative = FALSE; \
883 vm_page_speculative_count--; \
884 was_pageable = TRUE; \
885 } \
886 \
887 else if (mem->pageq.next || mem->pageq.prev) { \
888 was_pageable = FALSE; \
889 panic("VM_PAGE_QUEUES_REMOVE: unmarked page on Q"); \
890 } else { \
891 was_pageable = FALSE; \
892 } \
893 \
894 mem->pageq.next = NULL; \
895 mem->pageq.prev = NULL; \
896 VM_PAGE_QUEUES_ASSERT(mem, 0); \
897 if (was_pageable) { \
898 if (mem->object->internal) { \
899 vm_page_pageable_internal_count--; \
900 } else { \
901 vm_page_pageable_external_count--; \
902 } \
903 } \
904 MACRO_END
905
906
907 #define VM_PAGE_ENQUEUE_INACTIVE(mem, first) \
908 MACRO_BEGIN \
909 VM_PAGE_QUEUES_ASSERT(mem, 0); \
910 assert(!mem->fictitious); \
911 assert(!mem->laundry); \
912 assert(!mem->pageout_queue); \
913 if (mem->object->internal) { \
914 if (first == TRUE) \
915 queue_enter_first(&vm_page_queue_anonymous, mem, vm_page_t, pageq); \
916 else \
917 queue_enter(&vm_page_queue_anonymous, mem, vm_page_t, pageq); \
918 vm_page_anonymous_count++; \
919 vm_page_pageable_internal_count++; \
920 } else { \
921 if (first == TRUE) \
922 queue_enter_first(&vm_page_queue_inactive, mem, vm_page_t, pageq); \
923 else \
924 queue_enter(&vm_page_queue_inactive, mem, vm_page_t, pageq); \
925 vm_page_pageable_external_count++; \
926 } \
927 mem->inactive = TRUE; \
928 vm_page_inactive_count++; \
929 token_new_pagecount++; \
930 MACRO_END
931
932
933 #if DEVELOPMENT || DEBUG
934 #define VM_PAGE_SPECULATIVE_USED_ADD() \
935 MACRO_BEGIN \
936 OSAddAtomic(1, &vm_page_speculative_used); \
937 MACRO_END
938 #else
939 #define VM_PAGE_SPECULATIVE_USED_ADD()
940 #endif
941
942
943 #define VM_PAGE_CONSUME_CLUSTERED(mem) \
944 MACRO_BEGIN \
945 if (mem->clustered) { \
946 assert(mem->object); \
947 mem->object->pages_used++; \
948 mem->clustered = FALSE; \
949 VM_PAGE_SPECULATIVE_USED_ADD(); \
950 } \
951 MACRO_END
952
953
954
955 #define DW_vm_page_unwire 0x01
956 #define DW_vm_page_wire 0x02
957 #define DW_vm_page_free 0x04
958 #define DW_vm_page_activate 0x08
959 #define DW_vm_page_deactivate_internal 0x10
960 #define DW_vm_page_speculate 0x20
961 #define DW_vm_page_lru 0x40
962 #define DW_vm_pageout_throttle_up 0x80
963 #define DW_PAGE_WAKEUP 0x100
964 #define DW_clear_busy 0x200
965 #define DW_clear_reference 0x400
966 #define DW_set_reference 0x800
967 #define DW_move_page 0x1000
968 #define DW_VM_PAGE_QUEUES_REMOVE 0x2000
969 #define DW_enqueue_cleaned 0x4000
970
971 struct vm_page_delayed_work {
972 vm_page_t dw_m;
973 int dw_mask;
974 };
975
976 void vm_page_do_delayed_work(vm_object_t object, struct vm_page_delayed_work *dwp, int dw_count);
977
978 extern unsigned int vm_max_delayed_work_limit;
979
980 #define DEFAULT_DELAYED_WORK_LIMIT 32
981
982 #define DELAYED_WORK_LIMIT(max) ((vm_max_delayed_work_limit >= max ? max : vm_max_delayed_work_limit))
983
984 /*
985 * vm_page_do_delayed_work may need to drop the object lock...
986 * if it does, we need the pages it's looking at to
987 * be held stable via the busy bit, so if busy isn't already
988 * set, we need to set it and ask vm_page_do_delayed_work
989 * to clear it and wakeup anyone that might have blocked on
990 * it once we're done processing the page.
991 */
992
993 #define VM_PAGE_ADD_DELAYED_WORK(dwp, mem, dw_cnt) \
994 MACRO_BEGIN \
995 if (mem->busy == FALSE) { \
996 mem->busy = TRUE; \
997 if ( !(dwp->dw_mask & DW_vm_page_free)) \
998 dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); \
999 } \
1000 dwp->dw_m = mem; \
1001 dwp++; \
1002 dw_cnt++; \
1003 MACRO_END
1004
1005 extern vm_page_t vm_object_page_grab(vm_object_t);
1006
1007 #if VM_PAGE_BUCKETS_CHECK
1008 extern void vm_page_buckets_check(void);
1009 #endif /* VM_PAGE_BUCKETS_CHECK */
1010
1011 #endif /* _VM_VM_PAGE_H_ */