]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_object.h
xnu-6153.41.3.tar.gz
[apple/xnu.git] / osfmk / vm / vm_object.h
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm_object.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Virtual memory object module definitions.
64 */
65
66 #ifndef _VM_VM_OBJECT_H_
67 #define _VM_VM_OBJECT_H_
68
69 #include <debug.h>
70 #include <mach_assert.h>
71 #include <mach_pagemap.h>
72 #include <task_swapper.h>
73
74 #include <mach/kern_return.h>
75 #include <mach/boolean.h>
76 #include <mach/memory_object_types.h>
77 #include <mach/port.h>
78 #include <mach/vm_prot.h>
79 #include <mach/vm_param.h>
80 #include <mach/machine/vm_types.h>
81 #include <kern/queue.h>
82 #include <kern/locks.h>
83 #include <kern/assert.h>
84 #include <kern/misc_protos.h>
85 #include <kern/macro_help.h>
86 #include <ipc/ipc_types.h>
87 #include <vm/pmap.h>
88
89 #include <vm/vm_external.h>
90
91 #include <vm/vm_options.h>
92 #include <vm/vm_page.h>
93
94 #if VM_OBJECT_TRACKING
95 #include <libkern/OSDebug.h>
96 #include <kern/btlog.h>
97 extern void vm_object_tracking_init(void);
98 extern boolean_t vm_object_tracking_inited;
99 extern btlog_t *vm_object_tracking_btlog;
100 #define VM_OBJECT_TRACKING_NUM_RECORDS 50000
101 #define VM_OBJECT_TRACKING_BTDEPTH 7
102 #define VM_OBJECT_TRACKING_OP_CREATED 1
103 #define VM_OBJECT_TRACKING_OP_MODIFIED 2
104 #define VM_OBJECT_TRACKING_OP_TRUESHARE 3
105 #endif /* VM_OBJECT_TRACKING */
106
107 struct vm_page;
108
109 /*
110 * Types defined:
111 *
112 * vm_object_t Virtual memory object.
113 * vm_object_fault_info_t Used to determine cluster size.
114 */
115
116 struct vm_object_fault_info {
117 int interruptible;
118 uint32_t user_tag;
119 vm_size_t cluster_size;
120 vm_behavior_t behavior;
121 vm_map_offset_t lo_offset;
122 vm_map_offset_t hi_offset;
123 unsigned int
124 /* boolean_t */ no_cache:1,
125 /* boolean_t */ stealth:1,
126 /* boolean_t */ io_sync:1,
127 /* boolean_t */ cs_bypass:1,
128 /* boolean_t */ pmap_cs_associated:1,
129 /* boolean_t */ mark_zf_absent:1,
130 /* boolean_t */ batch_pmap_op:1,
131 /* boolean_t */ resilient_media:1,
132 /* boolean_t */ no_copy_on_read:1,
133 __vm_object_fault_info_unused_bits:23;
134 int pmap_options;
135 };
136
137
138 #define vo_size vo_un1.vou_size
139 #define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan
140 #define vo_shadow_offset vo_un2.vou_shadow_offset
141 #define vo_cache_ts vo_un2.vou_cache_ts
142 #define vo_owner vo_un2.vou_owner
143
144 struct vm_object {
145 /*
146 * on 64 bit systems we pack the pointers hung off the memq.
147 * those pointers have to be able to point back to the memq.
148 * the packed pointers are required to be on a 64 byte boundary
149 * which means 2 things for the vm_object... (1) the memq
150 * struct has to be the first element of the structure so that
151 * we can control it's alignment... (2) the vm_object must be
152 * aligned on a 64 byte boundary... for static vm_object's
153 * this is accomplished via the 'aligned' attribute... for
154 * vm_object's in the zone pool, this is accomplished by
155 * rounding the size of the vm_object element to the nearest
156 * 64 byte size before creating the zone.
157 */
158 vm_page_queue_head_t memq; /* Resident memory - must be first */
159 lck_rw_t Lock; /* Synchronization */
160
161 #if DEVELOPMENT || DEBUG
162 thread_t Lock_owner;
163 #endif
164 union {
165 vm_object_size_t vou_size; /* Object size (only valid if internal) */
166 int vou_cache_pages_to_scan; /* pages yet to be visited in an
167 * external object in cache
168 */
169 } vo_un1;
170
171 struct vm_page *memq_hint;
172 int ref_count; /* Number of references */
173 unsigned int resident_page_count;
174 /* number of resident pages */
175 unsigned int wired_page_count; /* number of wired pages
176 * use VM_OBJECT_WIRED_PAGE_UPDATE macros to update */
177 unsigned int reusable_page_count;
178
179 struct vm_object *copy; /* Object that should receive
180 * a copy of my changed pages,
181 * for copy_delay, or just the
182 * temporary object that
183 * shadows this object, for
184 * copy_call.
185 */
186 struct vm_object *shadow; /* My shadow */
187 memory_object_t pager; /* Where to get data */
188
189 union {
190 vm_object_offset_t vou_shadow_offset; /* Offset into shadow */
191 clock_sec_t vou_cache_ts; /* age of an external object
192 * present in cache
193 */
194 task_t vou_owner; /* If the object is purgeable
195 * or has a "ledger_tag", this
196 * is the task that owns it.
197 */
198 } vo_un2;
199
200 vm_object_offset_t paging_offset; /* Offset into memory object */
201 memory_object_control_t pager_control; /* Where data comes back */
202
203 memory_object_copy_strategy_t
204 copy_strategy; /* How to handle data copy */
205
206 #if __LP64__
207 /*
208 * Some user processes (mostly VirtualMachine software) take a large
209 * number of UPLs (via IOMemoryDescriptors) to wire pages in large
210 * VM objects and overflow the 16-bit "activity_in_progress" counter.
211 * Since we never enforced any limit there, let's give them 32 bits
212 * for backwards compatibility's sake.
213 */
214 unsigned int paging_in_progress:16,
215 __object1_unused_bits:16;
216 unsigned int activity_in_progress;
217 #else /* __LP64__ */
218 /*
219 * On 32-bit platforms, enlarging "activity_in_progress" would increase
220 * the size of "struct vm_object". Since we don't know of any actual
221 * overflow of these counters on these platforms, let's keep the
222 * counters as 16-bit integers.
223 */
224 unsigned short paging_in_progress;
225 unsigned short activity_in_progress;
226 #endif /* __LP64__ */
227 /* The memory object ports are
228 * being used (e.g., for pagein
229 * or pageout) -- don't change
230 * any of these fields (i.e.,
231 * don't collapse, destroy or
232 * terminate)
233 */
234
235 unsigned int
236 /* boolean_t array */ all_wanted:11, /* Bit array of "want to be
237 * awakened" notations. See
238 * VM_OBJECT_EVENT_* items
239 * below */
240 /* boolean_t */ pager_created:1, /* Has pager been created? */
241 /* boolean_t */ pager_initialized:1, /* Are fields ready to use? */
242 /* boolean_t */ pager_ready:1, /* Will pager take requests? */
243
244 /* boolean_t */ pager_trusted:1, /* The pager for this object
245 * is trusted. This is true for
246 * all internal objects (backed
247 * by the default pager)
248 */
249 /* boolean_t */ can_persist:1, /* The kernel may keep the data
250 * for this object (and rights
251 * to the memory object) after
252 * all address map references
253 * are deallocated?
254 */
255 /* boolean_t */ internal:1, /* Created by the kernel (and
256 * therefore, managed by the
257 * default memory manger)
258 */
259 /* boolean_t */ private:1, /* magic device_pager object,
260 * holds private pages only */
261 /* boolean_t */ pageout:1, /* pageout object. contains
262 * private pages that refer to
263 * a real memory object. */
264 /* boolean_t */ alive:1, /* Not yet terminated */
265
266 /* boolean_t */ purgable:2, /* Purgable state. See
267 * VM_PURGABLE_*
268 */
269 /* boolean_t */ purgeable_only_by_kernel:1,
270 /* boolean_t */ purgeable_when_ripe:1, /* Purgeable when a token
271 * becomes ripe.
272 */
273 /* boolean_t */ shadowed:1, /* Shadow may exist */
274 /* boolean_t */ true_share:1,
275 /* This object is mapped
276 * in more than one place
277 * and hence cannot be
278 * coalesced */
279 /* boolean_t */ terminating:1,
280 /* Allows vm_object_lookup
281 * and vm_object_deallocate
282 * to special case their
283 * behavior when they are
284 * called as a result of
285 * page cleaning during
286 * object termination
287 */
288 /* boolean_t */ named:1, /* An enforces an internal
289 * naming convention, by
290 * calling the right routines
291 * for allocation and
292 * destruction, UBC references
293 * against the vm_object are
294 * checked.
295 */
296 /* boolean_t */ shadow_severed:1,
297 /* When a permanent object
298 * backing a COW goes away
299 * unexpectedly. This bit
300 * allows vm_fault to return
301 * an error rather than a
302 * zero filled page.
303 */
304 /* boolean_t */ phys_contiguous:1,
305 /* Memory is wired and
306 * guaranteed physically
307 * contiguous. However
308 * it is not device memory
309 * and obeys normal virtual
310 * memory rules w.r.t pmap
311 * access bits.
312 */
313 /* boolean_t */ nophyscache:1,
314 /* When mapped at the
315 * pmap level, don't allow
316 * primary caching. (for
317 * I/O)
318 */
319 /* boolean_t */ _object5_unused_bits:1;
320
321 queue_chain_t cached_list; /* Attachment point for the
322 * list of objects cached as a
323 * result of their can_persist
324 * value
325 */
326 /*
327 * the following fields are not protected by any locks
328 * they are updated via atomic compare and swap
329 */
330 vm_object_offset_t last_alloc; /* last allocation offset */
331 vm_offset_t cow_hint; /* last page present in */
332 /* shadow but not in object */
333 int sequential; /* sequential access size */
334
335 uint32_t pages_created;
336 uint32_t pages_used;
337 /* hold object lock when altering */
338 unsigned int
339 wimg_bits:8, /* cache WIMG bits */
340 code_signed:1, /* pages are signed and should be
341 * validated; the signatures are stored
342 * with the pager */
343 transposed:1, /* object was transposed with another */
344 mapping_in_progress:1, /* pager being mapped/unmapped */
345 phantom_isssd:1,
346 volatile_empty:1,
347 volatile_fault:1,
348 all_reusable:1,
349 blocked_access:1,
350 set_cache_attr:1,
351 object_is_shared_cache:1,
352 purgeable_queue_type:2,
353 purgeable_queue_group:3,
354 io_tracking:1,
355 no_tag_update:1, /* */
356 #if CONFIG_SECLUDED_MEMORY
357 eligible_for_secluded:1,
358 can_grab_secluded:1,
359 #else /* CONFIG_SECLUDED_MEMORY */
360 __object3_unused_bits:2,
361 #endif /* CONFIG_SECLUDED_MEMORY */
362 #if VM_OBJECT_ACCESS_TRACKING
363 access_tracking:1,
364 #else /* VM_OBJECT_ACCESS_TRACKING */
365 __unused_access_tracking:1,
366 #endif /* VM_OBJECT_ACCESS_TRACKING */
367 vo_ledger_tag:3,
368 vo_no_footprint:1;
369
370 #if VM_OBJECT_ACCESS_TRACKING
371 uint32_t access_tracking_reads;
372 uint32_t access_tracking_writes;
373 #endif /* VM_OBJECT_ACCESS_TRACKING */
374
375 uint8_t scan_collisions;
376 uint8_t __object4_unused_bits[1];
377 vm_tag_t wire_tag;
378
379 #if CONFIG_PHANTOM_CACHE
380 uint32_t phantom_object_id;
381 #endif
382 #if CONFIG_IOSCHED || UPL_DEBUG
383 queue_head_t uplq; /* List of outstanding upls */
384 #endif
385
386 #ifdef VM_PIP_DEBUG
387 /*
388 * Keep track of the stack traces for the first holders
389 * of a "paging_in_progress" reference for this VM object.
390 */
391 #define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */
392 #define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */
393 struct __pip_backtrace {
394 void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES];
395 } pip_holders[VM_PIP_DEBUG_MAX_REFS];
396 #endif /* VM_PIP_DEBUG */
397
398 queue_chain_t objq; /* object queue - currently used for purgable queues */
399 queue_chain_t task_objq; /* objects owned by task - protected by task lock */
400
401 #if !VM_TAG_ACTIVE_UPDATE
402 queue_chain_t wired_objq;
403 #endif /* !VM_TAG_ACTIVE_UPDATE */
404
405 #if DEBUG
406 void *purgeable_owner_bt[16];
407 task_t vo_purgeable_volatilizer; /* who made it volatile? */
408 void *purgeable_volatilizer_bt[16];
409 #endif /* DEBUG */
410 };
411
412 #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \
413 ((object)->volatile_fault && \
414 ((object)->purgable == VM_PURGABLE_VOLATILE || \
415 (object)->purgable == VM_PURGABLE_EMPTY))
416
417 #if VM_OBJECT_ACCESS_TRACKING
418 extern uint64_t vm_object_access_tracking_reads;
419 extern uint64_t vm_object_access_tracking_writes;
420 extern void vm_object_access_tracking(vm_object_t object,
421 int *access_tracking,
422 uint32_t *access_tracking_reads,
423 uint32_t *acess_tracking_writes);
424 #endif /* VM_OBJECT_ACCESS_TRACKING */
425
426 extern
427 vm_object_t kernel_object; /* the single kernel object */
428
429 extern
430 vm_object_t compressor_object; /* the single compressor object */
431
432 extern
433 unsigned int vm_object_absent_max; /* maximum number of absent pages
434 * at a time for each object */
435
436 # define VM_MSYNC_INITIALIZED 0
437 # define VM_MSYNC_SYNCHRONIZING 1
438 # define VM_MSYNC_DONE 2
439
440
441 extern lck_grp_t vm_map_lck_grp;
442 extern lck_attr_t vm_map_lck_attr;
443
444 #ifndef VM_TAG_ACTIVE_UPDATE
445 #error VM_TAG_ACTIVE_UPDATE
446 #endif
447
448 #if VM_TAG_ACTIVE_UPDATE
449 #define VM_OBJECT_WIRED_ENQUEUE(object) panic("VM_OBJECT_WIRED_ENQUEUE")
450 #define VM_OBJECT_WIRED_DEQUEUE(object) panic("VM_OBJECT_WIRED_DEQUEUE")
451 #else /* VM_TAG_ACTIVE_UPDATE */
452 #define VM_OBJECT_WIRED_ENQUEUE(object) \
453 MACRO_BEGIN \
454 lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket); \
455 assert(!(object)->wired_objq.next); \
456 assert(!(object)->wired_objq.prev); \
457 queue_enter(&vm_objects_wired, (object), \
458 vm_object_t, wired_objq); \
459 lck_spin_unlock(&vm_objects_wired_lock); \
460 MACRO_END
461 #define VM_OBJECT_WIRED_DEQUEUE(object) \
462 MACRO_BEGIN \
463 if ((object)->wired_objq.next) { \
464 lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket); \
465 queue_remove(&vm_objects_wired, (object), \
466 vm_object_t, wired_objq); \
467 lck_spin_unlock(&vm_objects_wired_lock); \
468 } \
469 MACRO_END
470 #endif /* VM_TAG_ACTIVE_UPDATE */
471
472 #define VM_OBJECT_WIRED(object, tag) \
473 MACRO_BEGIN \
474 assert(VM_KERN_MEMORY_NONE != (tag)); \
475 assert(VM_KERN_MEMORY_NONE == (object)->wire_tag); \
476 (object)->wire_tag = (tag); \
477 if (!VM_TAG_ACTIVE_UPDATE) { \
478 VM_OBJECT_WIRED_ENQUEUE((object)); \
479 } \
480 MACRO_END
481
482 #define VM_OBJECT_UNWIRED(object) \
483 MACRO_BEGIN \
484 if (!VM_TAG_ACTIVE_UPDATE) { \
485 VM_OBJECT_WIRED_DEQUEUE((object)); \
486 } \
487 if (VM_KERN_MEMORY_NONE != (object)->wire_tag) { \
488 vm_tag_update_size((object)->wire_tag, -ptoa_64((object)->wired_page_count)); \
489 (object)->wire_tag = VM_KERN_MEMORY_NONE; \
490 } \
491 MACRO_END
492
493 // These two macros start & end a C block
494 #define VM_OBJECT_WIRED_PAGE_UPDATE_START(object) \
495 MACRO_BEGIN \
496 { \
497 int64_t __wireddelta = 0; vm_tag_t __waswired = (object)->wire_tag;
498
499 #define VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag) \
500 if (__wireddelta) { \
501 boolean_t __overflow __assert_only = \
502 os_add_overflow((object)->wired_page_count, __wireddelta, \
503 &(object)->wired_page_count); \
504 assert(!__overflow); \
505 if (!(object)->pageout && !(object)->no_tag_update) { \
506 if (__wireddelta > 0) { \
507 assert (VM_KERN_MEMORY_NONE != (tag)); \
508 if (VM_KERN_MEMORY_NONE == __waswired) { \
509 VM_OBJECT_WIRED((object), (tag)); \
510 } \
511 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \
512 } else if (VM_KERN_MEMORY_NONE != __waswired) { \
513 assert (VM_KERN_MEMORY_NONE != (object)->wire_tag); \
514 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \
515 if (!(object)->wired_page_count) { \
516 VM_OBJECT_UNWIRED((object)); \
517 } \
518 } \
519 } \
520 } \
521 } \
522 MACRO_END
523
524 #define VM_OBJECT_WIRED_PAGE_COUNT(object, delta) \
525 __wireddelta += delta; \
526
527 #define VM_OBJECT_WIRED_PAGE_ADD(object, m) \
528 if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta++;
529
530 #define VM_OBJECT_WIRED_PAGE_REMOVE(object, m) \
531 if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta--;
532
533
534
535 #define OBJECT_LOCK_SHARED 0
536 #define OBJECT_LOCK_EXCLUSIVE 1
537
538 extern lck_grp_t vm_object_lck_grp;
539 extern lck_grp_attr_t vm_object_lck_grp_attr;
540 extern lck_attr_t vm_object_lck_attr;
541 extern lck_attr_t kernel_object_lck_attr;
542 extern lck_attr_t compressor_object_lck_attr;
543
544 extern vm_object_t vm_pageout_scan_wants_object;
545
546 extern void vm_object_lock(vm_object_t);
547 extern boolean_t vm_object_lock_try(vm_object_t);
548 extern boolean_t _vm_object_lock_try(vm_object_t);
549 extern boolean_t vm_object_lock_avoid(vm_object_t);
550 extern void vm_object_lock_shared(vm_object_t);
551 extern boolean_t vm_object_lock_yield_shared(vm_object_t);
552 extern boolean_t vm_object_lock_try_shared(vm_object_t);
553 extern void vm_object_unlock(vm_object_t);
554 extern boolean_t vm_object_lock_upgrade(vm_object_t);
555
556 /*
557 * Object locking macros
558 */
559
560 #define vm_object_lock_init(object) \
561 lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \
562 (((object) == kernel_object || \
563 (object) == vm_submap_object) ? \
564 &kernel_object_lck_attr : \
565 (((object) == compressor_object) ? \
566 &compressor_object_lck_attr : \
567 &vm_object_lck_attr)))
568 #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
569
570 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
571
572 /*
573 * CAUTION: the following vm_object_lock_assert_held*() macros merely
574 * check if anyone is holding the lock, but the holder may not necessarily
575 * be the caller...
576 */
577 #if MACH_ASSERT || DEBUG
578 #define vm_object_lock_assert_held(object) \
579 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
580 #define vm_object_lock_assert_shared(object) \
581 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
582 #define vm_object_lock_assert_exclusive(object) \
583 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
584 #define vm_object_lock_assert_notheld(object) \
585 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
586 #else /* MACH_ASSERT || DEBUG */
587 #define vm_object_lock_assert_held(object)
588 #define vm_object_lock_assert_shared(object)
589 #define vm_object_lock_assert_exclusive(object)
590 #define vm_object_lock_assert_notheld(object)
591 #endif /* MACH_ASSERT || DEBUG */
592
593
594 /*
595 * Declare procedures that operate on VM objects.
596 */
597
598 __private_extern__ void vm_object_bootstrap(void);
599
600 __private_extern__ void vm_object_init(void);
601
602 __private_extern__ void vm_object_init_lck_grp(void);
603
604 __private_extern__ void vm_object_reaper_init(void);
605
606 __private_extern__ vm_object_t vm_object_allocate(vm_object_size_t size);
607
608 __private_extern__ void _vm_object_allocate(vm_object_size_t size,
609 vm_object_t object);
610
611 #if TASK_SWAPPER
612
613 __private_extern__ void vm_object_res_reference(
614 vm_object_t object);
615 __private_extern__ void vm_object_res_deallocate(
616 vm_object_t object);
617 #define VM_OBJ_RES_INCR(object) (object)->res_count++
618 #define VM_OBJ_RES_DECR(object) (object)->res_count--
619
620 #else /* TASK_SWAPPER */
621
622 #define VM_OBJ_RES_INCR(object)
623 #define VM_OBJ_RES_DECR(object)
624 #define vm_object_res_reference(object)
625 #define vm_object_res_deallocate(object)
626
627 #endif /* TASK_SWAPPER */
628
629 #define vm_object_reference_locked(object) \
630 MACRO_BEGIN \
631 vm_object_t RLObject = (object); \
632 vm_object_lock_assert_exclusive(object); \
633 assert((RLObject)->ref_count > 0); \
634 (RLObject)->ref_count++; \
635 assert((RLObject)->ref_count > 1); \
636 vm_object_res_reference(RLObject); \
637 MACRO_END
638
639
640 #define vm_object_reference_shared(object) \
641 MACRO_BEGIN \
642 vm_object_t RLObject = (object); \
643 vm_object_lock_assert_shared(object); \
644 assert((RLObject)->ref_count > 0); \
645 OSAddAtomic(1, &(RLObject)->ref_count); \
646 assert((RLObject)->ref_count > 0); \
647 /* XXX we would need an atomic version of the following ... */ \
648 vm_object_res_reference(RLObject); \
649 MACRO_END
650
651
652 __private_extern__ void vm_object_reference(
653 vm_object_t object);
654
655 #if !MACH_ASSERT
656
657 #define vm_object_reference(object) \
658 MACRO_BEGIN \
659 vm_object_t RObject = (object); \
660 if (RObject) { \
661 vm_object_lock_shared(RObject); \
662 vm_object_reference_shared(RObject); \
663 vm_object_unlock(RObject); \
664 } \
665 MACRO_END
666
667 #endif /* MACH_ASSERT */
668
669 __private_extern__ void vm_object_deallocate(
670 vm_object_t object);
671
672 __private_extern__ kern_return_t vm_object_release_name(
673 vm_object_t object,
674 int flags);
675
676 __private_extern__ void vm_object_pmap_protect(
677 vm_object_t object,
678 vm_object_offset_t offset,
679 vm_object_size_t size,
680 pmap_t pmap,
681 vm_map_offset_t pmap_start,
682 vm_prot_t prot);
683
684 __private_extern__ void vm_object_pmap_protect_options(
685 vm_object_t object,
686 vm_object_offset_t offset,
687 vm_object_size_t size,
688 pmap_t pmap,
689 vm_map_offset_t pmap_start,
690 vm_prot_t prot,
691 int options);
692
693 __private_extern__ void vm_object_page_remove(
694 vm_object_t object,
695 vm_object_offset_t start,
696 vm_object_offset_t end);
697
698 __private_extern__ void vm_object_deactivate_pages(
699 vm_object_t object,
700 vm_object_offset_t offset,
701 vm_object_size_t size,
702 boolean_t kill_page,
703 boolean_t reusable_page,
704 struct pmap *pmap,
705 vm_map_offset_t pmap_offset);
706
707 __private_extern__ void vm_object_reuse_pages(
708 vm_object_t object,
709 vm_object_offset_t start_offset,
710 vm_object_offset_t end_offset,
711 boolean_t allow_partial_reuse);
712
713 __private_extern__ uint64_t vm_object_purge(
714 vm_object_t object,
715 int flags);
716
717 __private_extern__ kern_return_t vm_object_purgable_control(
718 vm_object_t object,
719 vm_purgable_t control,
720 int *state);
721
722 __private_extern__ kern_return_t vm_object_get_page_counts(
723 vm_object_t object,
724 vm_object_offset_t offset,
725 vm_object_size_t size,
726 unsigned int *resident_page_count,
727 unsigned int *dirty_page_count);
728
729 __private_extern__ boolean_t vm_object_coalesce(
730 vm_object_t prev_object,
731 vm_object_t next_object,
732 vm_object_offset_t prev_offset,
733 vm_object_offset_t next_offset,
734 vm_object_size_t prev_size,
735 vm_object_size_t next_size);
736
737 __private_extern__ boolean_t vm_object_shadow(
738 vm_object_t *object,
739 vm_object_offset_t *offset,
740 vm_object_size_t length);
741
742 __private_extern__ void vm_object_collapse(
743 vm_object_t object,
744 vm_object_offset_t offset,
745 boolean_t can_bypass);
746
747 __private_extern__ boolean_t vm_object_copy_quickly(
748 vm_object_t *_object,
749 vm_object_offset_t src_offset,
750 vm_object_size_t size,
751 boolean_t *_src_needs_copy,
752 boolean_t *_dst_needs_copy);
753
754 __private_extern__ kern_return_t vm_object_copy_strategically(
755 vm_object_t src_object,
756 vm_object_offset_t src_offset,
757 vm_object_size_t size,
758 vm_object_t *dst_object,
759 vm_object_offset_t *dst_offset,
760 boolean_t *dst_needs_copy);
761
762 __private_extern__ kern_return_t vm_object_copy_slowly(
763 vm_object_t src_object,
764 vm_object_offset_t src_offset,
765 vm_object_size_t size,
766 boolean_t interruptible,
767 vm_object_t *_result_object);
768
769 __private_extern__ vm_object_t vm_object_copy_delayed(
770 vm_object_t src_object,
771 vm_object_offset_t src_offset,
772 vm_object_size_t size,
773 boolean_t src_object_shared);
774
775
776
777 __private_extern__ kern_return_t vm_object_destroy(
778 vm_object_t object,
779 kern_return_t reason);
780
781 __private_extern__ void vm_object_pager_create(
782 vm_object_t object);
783
784 __private_extern__ void vm_object_compressor_pager_create(
785 vm_object_t object);
786
787 __private_extern__ void vm_object_page_map(
788 vm_object_t object,
789 vm_object_offset_t offset,
790 vm_object_size_t size,
791 vm_object_offset_t (*map_fn)
792 (void *, vm_object_offset_t),
793 void *map_fn_data);
794
795 __private_extern__ kern_return_t vm_object_upl_request(
796 vm_object_t object,
797 vm_object_offset_t offset,
798 upl_size_t size,
799 upl_t *upl,
800 upl_page_info_t *page_info,
801 unsigned int *count,
802 upl_control_flags_t flags,
803 vm_tag_t tag);
804
805 __private_extern__ kern_return_t vm_object_transpose(
806 vm_object_t object1,
807 vm_object_t object2,
808 vm_object_size_t transpose_size);
809
810 __private_extern__ boolean_t vm_object_sync(
811 vm_object_t object,
812 vm_object_offset_t offset,
813 vm_object_size_t size,
814 boolean_t should_flush,
815 boolean_t should_return,
816 boolean_t should_iosync);
817
818 __private_extern__ kern_return_t vm_object_update(
819 vm_object_t object,
820 vm_object_offset_t offset,
821 vm_object_size_t size,
822 vm_object_offset_t *error_offset,
823 int *io_errno,
824 memory_object_return_t should_return,
825 int flags,
826 vm_prot_t prot);
827
828 __private_extern__ kern_return_t vm_object_lock_request(
829 vm_object_t object,
830 vm_object_offset_t offset,
831 vm_object_size_t size,
832 memory_object_return_t should_return,
833 int flags,
834 vm_prot_t prot);
835
836
837
838 __private_extern__ vm_object_t vm_object_memory_object_associate(
839 memory_object_t pager,
840 vm_object_t object,
841 vm_object_size_t size,
842 boolean_t check_named);
843
844
845 __private_extern__ void vm_object_cluster_size(
846 vm_object_t object,
847 vm_object_offset_t *start,
848 vm_size_t *length,
849 vm_object_fault_info_t fault_info,
850 uint32_t *io_streaming);
851
852 __private_extern__ kern_return_t vm_object_populate_with_private(
853 vm_object_t object,
854 vm_object_offset_t offset,
855 ppnum_t phys_page,
856 vm_size_t size);
857
858 __private_extern__ void vm_object_change_wimg_mode(
859 vm_object_t object,
860 unsigned int wimg_mode);
861
862 extern kern_return_t adjust_vm_object_cache(
863 vm_size_t oval,
864 vm_size_t nval);
865
866 extern kern_return_t vm_object_page_op(
867 vm_object_t object,
868 vm_object_offset_t offset,
869 int ops,
870 ppnum_t *phys_entry,
871 int *flags);
872
873 extern kern_return_t vm_object_range_op(
874 vm_object_t object,
875 vm_object_offset_t offset_beg,
876 vm_object_offset_t offset_end,
877 int ops,
878 uint32_t *range);
879
880
881 __private_extern__ void vm_object_reap_pages(
882 vm_object_t object,
883 int reap_type);
884 #define REAP_REAP 0
885 #define REAP_TERMINATE 1
886 #define REAP_PURGEABLE 2
887 #define REAP_DATA_FLUSH 3
888
889 #if CONFIG_FREEZE
890
891 __private_extern__ uint32_t
892 vm_object_compressed_freezer_pageout(
893 vm_object_t object, uint32_t dirty_budget);
894
895 __private_extern__ void
896 vm_object_compressed_freezer_done(
897 void);
898
899 #endif /* CONFIG_FREEZE */
900
901 __private_extern__ void
902 vm_object_pageout(
903 vm_object_t object);
904
905 #if CONFIG_IOSCHED
906 struct io_reprioritize_req {
907 uint64_t blkno;
908 uint32_t len;
909 int priority;
910 struct vnode *devvp;
911 queue_chain_t io_reprioritize_list;
912 };
913 typedef struct io_reprioritize_req *io_reprioritize_req_t;
914
915 extern void vm_io_reprioritize_init(void);
916 #endif
917
918 /*
919 * Event waiting handling
920 */
921
922 #define VM_OBJECT_EVENT_INITIALIZED 0
923 #define VM_OBJECT_EVENT_PAGER_READY 1
924 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2
925 #define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3
926 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4
927 #define VM_OBJECT_EVENT_UNCACHING 5
928 #define VM_OBJECT_EVENT_COPY_CALL 6
929 #define VM_OBJECT_EVENT_CACHING 7
930 #define VM_OBJECT_EVENT_UNBLOCKED 8
931 #define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9
932
933 #define VM_OBJECT_EVENT_MAX 10 /* 11 bits in "all_wanted", so 0->10 */
934
935 static __inline__ wait_result_t
936 vm_object_assert_wait(
937 vm_object_t object,
938 int event,
939 wait_interrupt_t interruptible)
940 {
941 wait_result_t wr;
942
943 vm_object_lock_assert_exclusive(object);
944 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
945
946 object->all_wanted |= 1 << event;
947 wr = assert_wait((event_t)((vm_offset_t)object + event),
948 interruptible);
949 return wr;
950 }
951
952 static __inline__ wait_result_t
953 vm_object_wait(
954 vm_object_t object,
955 int event,
956 wait_interrupt_t interruptible)
957 {
958 wait_result_t wr;
959
960 vm_object_assert_wait(object, event, interruptible);
961 vm_object_unlock(object);
962 wr = thread_block(THREAD_CONTINUE_NULL);
963 return wr;
964 }
965
966 static __inline__ wait_result_t
967 thread_sleep_vm_object(
968 vm_object_t object,
969 event_t event,
970 wait_interrupt_t interruptible)
971 {
972 wait_result_t wr;
973
974 #if DEVELOPMENT || DEBUG
975 if (object->Lock_owner != current_thread()) {
976 panic("thread_sleep_vm_object: now owner - %p\n", object);
977 }
978 object->Lock_owner = 0;
979 #endif
980 wr = lck_rw_sleep(&object->Lock,
981 LCK_SLEEP_PROMOTED_PRI,
982 event,
983 interruptible);
984 #if DEVELOPMENT || DEBUG
985 object->Lock_owner = current_thread();
986 #endif
987 return wr;
988 }
989
990 static __inline__ wait_result_t
991 vm_object_sleep(
992 vm_object_t object,
993 int event,
994 wait_interrupt_t interruptible)
995 {
996 wait_result_t wr;
997
998 vm_object_lock_assert_exclusive(object);
999 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
1000
1001 object->all_wanted |= 1 << event;
1002 wr = thread_sleep_vm_object(object,
1003 (event_t)((vm_offset_t)object + event),
1004 interruptible);
1005 return wr;
1006 }
1007
1008 static __inline__ void
1009 vm_object_wakeup(
1010 vm_object_t object,
1011 int event)
1012 {
1013 vm_object_lock_assert_exclusive(object);
1014 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
1015
1016 if (object->all_wanted & (1 << event)) {
1017 thread_wakeup((event_t)((vm_offset_t)object + event));
1018 }
1019 object->all_wanted &= ~(1 << event);
1020 }
1021
1022 static __inline__ void
1023 vm_object_set_wanted(
1024 vm_object_t object,
1025 int event)
1026 {
1027 vm_object_lock_assert_exclusive(object);
1028 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
1029
1030 object->all_wanted |= (1 << event);
1031 }
1032
1033 static __inline__ int
1034 vm_object_wanted(
1035 vm_object_t object,
1036 int event)
1037 {
1038 vm_object_lock_assert_held(object);
1039 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
1040
1041 return object->all_wanted & (1 << event);
1042 }
1043
1044 /*
1045 * Routines implemented as macros
1046 */
1047 #ifdef VM_PIP_DEBUG
1048 #include <libkern/OSDebug.h>
1049 #define VM_PIP_DEBUG_BEGIN(object) \
1050 MACRO_BEGIN \
1051 int pip = ((object)->paging_in_progress + \
1052 (object)->activity_in_progress); \
1053 if (pip < VM_PIP_DEBUG_MAX_REFS) { \
1054 (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
1055 VM_PIP_DEBUG_STACK_FRAMES); \
1056 } \
1057 MACRO_END
1058 #else /* VM_PIP_DEBUG */
1059 #define VM_PIP_DEBUG_BEGIN(object)
1060 #endif /* VM_PIP_DEBUG */
1061
1062 #define vm_object_activity_begin(object) \
1063 MACRO_BEGIN \
1064 vm_object_lock_assert_exclusive((object)); \
1065 VM_PIP_DEBUG_BEGIN((object)); \
1066 (object)->activity_in_progress++; \
1067 if ((object)->activity_in_progress == 0) { \
1068 panic("vm_object_activity_begin(%p): overflow\n", (object));\
1069 } \
1070 MACRO_END
1071
1072 #define vm_object_activity_end(object) \
1073 MACRO_BEGIN \
1074 vm_object_lock_assert_exclusive((object)); \
1075 if ((object)->activity_in_progress == 0) { \
1076 panic("vm_object_activity_end(%p): underflow\n", (object));\
1077 } \
1078 (object)->activity_in_progress--; \
1079 if ((object)->paging_in_progress == 0 && \
1080 (object)->activity_in_progress == 0) \
1081 vm_object_wakeup((object), \
1082 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1083 MACRO_END
1084
1085 #define vm_object_paging_begin(object) \
1086 MACRO_BEGIN \
1087 vm_object_lock_assert_exclusive((object)); \
1088 VM_PIP_DEBUG_BEGIN((object)); \
1089 (object)->paging_in_progress++; \
1090 if ((object)->paging_in_progress == 0) { \
1091 panic("vm_object_paging_begin(%p): overflow\n", (object));\
1092 } \
1093 MACRO_END
1094
1095 #define vm_object_paging_end(object) \
1096 MACRO_BEGIN \
1097 vm_object_lock_assert_exclusive((object)); \
1098 if ((object)->paging_in_progress == 0) { \
1099 panic("vm_object_paging_end(%p): underflow\n", (object));\
1100 } \
1101 (object)->paging_in_progress--; \
1102 if ((object)->paging_in_progress == 0) { \
1103 vm_object_wakeup((object), \
1104 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
1105 if ((object)->activity_in_progress == 0) \
1106 vm_object_wakeup((object), \
1107 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1108 } \
1109 MACRO_END
1110
1111 #define vm_object_paging_wait(object, interruptible) \
1112 MACRO_BEGIN \
1113 vm_object_lock_assert_exclusive((object)); \
1114 while ((object)->paging_in_progress != 0 || \
1115 (object)->activity_in_progress != 0) { \
1116 wait_result_t _wr; \
1117 \
1118 _wr = vm_object_sleep((object), \
1119 VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
1120 (interruptible)); \
1121 \
1122 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1123 /*XXX break; */ \
1124 } \
1125 MACRO_END
1126
1127 #define vm_object_paging_only_wait(object, interruptible) \
1128 MACRO_BEGIN \
1129 vm_object_lock_assert_exclusive((object)); \
1130 while ((object)->paging_in_progress != 0) { \
1131 wait_result_t _wr; \
1132 \
1133 _wr = vm_object_sleep((object), \
1134 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
1135 (interruptible)); \
1136 \
1137 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1138 /*XXX break; */ \
1139 } \
1140 MACRO_END
1141
1142
1143 #define vm_object_mapping_begin(object) \
1144 MACRO_BEGIN \
1145 vm_object_lock_assert_exclusive((object)); \
1146 assert(! (object)->mapping_in_progress); \
1147 (object)->mapping_in_progress = TRUE; \
1148 MACRO_END
1149
1150 #define vm_object_mapping_end(object) \
1151 MACRO_BEGIN \
1152 vm_object_lock_assert_exclusive((object)); \
1153 assert((object)->mapping_in_progress); \
1154 (object)->mapping_in_progress = FALSE; \
1155 vm_object_wakeup((object), \
1156 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \
1157 MACRO_END
1158
1159 #define vm_object_mapping_wait(object, interruptible) \
1160 MACRO_BEGIN \
1161 vm_object_lock_assert_exclusive((object)); \
1162 while ((object)->mapping_in_progress) { \
1163 wait_result_t _wr; \
1164 \
1165 _wr = vm_object_sleep((object), \
1166 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
1167 (interruptible)); \
1168 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1169 /*XXX break; */ \
1170 } \
1171 assert(!(object)->mapping_in_progress); \
1172 MACRO_END
1173
1174
1175
1176 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
1177 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
1178
1179 extern void vm_object_cache_add(vm_object_t);
1180 extern void vm_object_cache_remove(vm_object_t);
1181 extern int vm_object_cache_evict(int, int);
1182
1183 #define VM_OBJECT_OWNER_DISOWNED ((task_t) -1)
1184 #define VM_OBJECT_OWNER(object) \
1185 ((((object)->purgable == VM_PURGABLE_DENY && \
1186 (object)->vo_ledger_tag == 0) || \
1187 (object)->vo_owner == TASK_NULL) \
1188 ? TASK_NULL /* not owned */ \
1189 : (((object)->vo_owner == VM_OBJECT_OWNER_DISOWNED) \
1190 ? kernel_task /* disowned -> kernel */ \
1191 : (object)->vo_owner)) /* explicit owner */ \
1192
1193 extern void vm_object_ledger_tag_ledgers(
1194 vm_object_t object,
1195 int *ledger_idx_volatile,
1196 int *ledger_idx_nonvolatile,
1197 int *ledger_idx_volatile_compressed,
1198 int *ledger_idx_nonvolatile_compressed,
1199 boolean_t *do_footprint);
1200 extern kern_return_t vm_object_ownership_change(
1201 vm_object_t object,
1202 int new_ledger_tag,
1203 task_t new_owner,
1204 int new_ledger_flags,
1205 boolean_t task_objq_locked);
1206
1207 #endif /* _VM_VM_OBJECT_H_ */