2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * Virtual memory object module definitions.
66 #ifndef _VM_VM_OBJECT_H_
67 #define _VM_VM_OBJECT_H_
69 #include <mach_pagemap.h>
70 #include <task_swapper.h>
72 #include <mach/kern_return.h>
73 #include <mach/boolean.h>
74 #include <mach/memory_object_types.h>
75 #include <mach/port.h>
76 #include <mach/vm_prot.h>
77 #include <mach/vm_param.h>
78 #include <mach/machine/vm_types.h>
79 #include <kern/queue.h>
80 #include <kern/lock.h>
81 #include <kern/locks.h>
82 #include <kern/assert.h>
83 #include <kern/misc_protos.h>
84 #include <kern/macro_help.h>
85 #include <ipc/ipc_types.h>
89 #include <vm/vm_external.h>
90 #endif /* MACH_PAGEMAP */
97 * vm_object_t Virtual memory object.
98 * vm_object_fault_info_t Used to determine cluster size.
101 struct vm_object_fault_info
{
104 vm_size_t cluster_size
;
105 vm_behavior_t behavior
;
106 vm_map_offset_t lo_offset
;
107 vm_map_offset_t hi_offset
;
114 queue_head_t memq
; /* Resident memory */
115 lck_rw_t Lock
; /* Synchronization */
117 vm_object_size_t size
; /* Object size (only valid
120 struct vm_page
*memq_hint
;
121 int ref_count
; /* Number of references */
123 int res_count
; /* Residency references (swap)*/
124 #endif /* TASK_SWAPPER */
125 unsigned int resident_page_count
;
126 /* number of resident pages */
128 struct vm_object
*copy
; /* Object that should receive
129 * a copy of my changed pages,
130 * for copy_delay, or just the
131 * temporary object that
132 * shadows this object, for
135 struct vm_object
*shadow
; /* My shadow */
136 vm_object_offset_t shadow_offset
; /* Offset into shadow */
138 memory_object_t pager
; /* Where to get data */
139 vm_object_offset_t paging_offset
; /* Offset into memory object */
140 memory_object_control_t pager_control
; /* Where data comes back */
142 memory_object_copy_strategy_t
143 copy_strategy
; /* How to handle data copy */
145 int paging_in_progress
;
146 /* The memory object ports are
147 * being used (e.g., for pagein
148 * or pageout) -- don't change
149 * any of these fields (i.e.,
150 * don't collapse, destroy or
154 /* boolean_t array */ all_wanted
:11, /* Bit array of "want to be
155 * awakened" notations. See
156 * VM_OBJECT_EVENT_* items
158 /* boolean_t */ pager_created
:1, /* Has pager been created? */
159 /* boolean_t */ pager_initialized
:1, /* Are fields ready to use? */
160 /* boolean_t */ pager_ready
:1, /* Will pager take requests? */
162 /* boolean_t */ pager_trusted
:1,/* The pager for this object
163 * is trusted. This is true for
164 * all internal objects (backed
165 * by the default pager)
167 /* boolean_t */ can_persist
:1, /* The kernel may keep the data
168 * for this object (and rights
169 * to the memory object) after
170 * all address map references
173 /* boolean_t */ internal
:1, /* Created by the kernel (and
174 * therefore, managed by the
175 * default memory manger)
177 /* boolean_t */ temporary
:1, /* Permanent objects may be
178 * changed externally by the
179 * memory manager, and changes
180 * made in memory must be
181 * reflected back to the memory
182 * manager. Temporary objects
186 /* boolean_t */ private:1, /* magic device_pager object,
187 * holds private pages only */
188 /* boolean_t */ pageout
:1, /* pageout object. contains
189 * private pages that refer to
190 * a real memory object. */
191 /* boolean_t */ alive
:1, /* Not yet terminated */
193 /* boolean_t */ purgable
:2, /* Purgable state. See
196 /* boolean_t */ shadowed
:1, /* Shadow may exist */
197 /* boolean_t */ silent_overwrite
:1,
198 /* Allow full page overwrite
199 * without data_request if
201 /* boolean_t */ advisory_pageout
:1,
202 /* Instead of sending page
203 * via OOL, just notify
204 * pager that the kernel
205 * wants to discard it, page
206 * remains in object */
207 /* boolean_t */ true_share
:1,
208 /* This object is mapped
209 * in more than one place
210 * and hence cannot be
212 /* boolean_t */ terminating
:1,
213 /* Allows vm_object_lookup
214 * and vm_object_deallocate
215 * to special case their
216 * behavior when they are
217 * called as a result of
218 * page cleaning during
221 /* boolean_t */ named
:1, /* An enforces an internal
222 * naming convention, by
223 * calling the right routines
225 * destruction, UBC references
226 * against the vm_object are
229 /* boolean_t */ shadow_severed
:1,
230 /* When a permanent object
231 * backing a COW goes away
232 * unexpectedly. This bit
233 * allows vm_fault to return
234 * an error rather than a
237 /* boolean_t */ phys_contiguous
:1,
238 /* Memory is wired and
239 * guaranteed physically
240 * contiguous. However
241 * it is not device memory
242 * and obeys normal virtual
243 * memory rules w.r.t pmap
246 /* boolean_t */ nophyscache
:1;
247 /* When mapped at the
248 * pmap level, don't allow
249 * primary caching. (for
255 queue_chain_t cached_list
; /* Attachment point for the
256 * list of objects cached as a
257 * result of their can_persist
261 queue_head_t msr_q
; /* memory object synchronise
265 * the following fields are not protected by any locks
266 * they are updated via atomic compare and swap
268 vm_object_offset_t last_alloc
; /* last allocation offset */
269 int sequential
; /* sequential access size */
271 uint32_t pages_created
;
274 vm_external_map_t existence_map
; /* bitmap of pages written to
276 #endif /* MACH_PAGEMAP */
277 vm_offset_t cow_hint
; /* last page present in */
278 /* shadow but not in object */
280 struct vm_object
*paging_object
; /* object which pages to be
281 * swapped out are temporary
282 * put in current object
285 /* hold object lock when altering */
287 wimg_bits
:8, /* cache WIMG bits */
288 code_signed
:1, /* pages are signed and should be
289 validated; the signatures are stored
291 not_in_use
:23; /* for expansion */
294 queue_head_t uplq
; /* List of outstanding upls */
295 #endif /* UPL_DEBUG */
299 * Keep track of the stack traces for the first holders
300 * of a "paging_in_progress" reference for this VM object.
302 #define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */
303 #define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */
304 struct __pip_backtrace
{
305 void *pip_retaddr
[VM_PIP_DEBUG_STACK_FRAMES
];
306 } pip_holders
[VM_PIP_DEBUG_MAX_REFS
];
307 #endif /* VM_PIP_DEBUG */
309 queue_chain_t objq
; /* object queue - currently used for purgable queues */
312 #define VM_PAGE_REMOVE(page) \
314 vm_page_t __page = (page); \
315 vm_object_t __object = __page->object; \
316 if (__page == __object->memq_hint) { \
317 vm_page_t __new_hint; \
318 queue_entry_t __qe; \
319 __qe = queue_next(&__page->listq); \
320 if (queue_end(&__object->memq, __qe)) { \
321 __qe = queue_prev(&__page->listq); \
322 if (queue_end(&__object->memq, __qe)) { \
326 __new_hint = (vm_page_t) __qe; \
327 __object->memq_hint = __new_hint; \
329 queue_remove(&__object->memq, __page, vm_page_t, listq); \
332 #define VM_PAGE_INSERT(page, object) \
334 vm_page_t __page = (page); \
335 vm_object_t __object = (object); \
336 queue_enter(&__object->memq, __page, vm_page_t, listq); \
337 __object->memq_hint = __page; \
341 vm_object_t kernel_object
; /* the single kernel object */
344 unsigned int vm_object_absent_max
; /* maximum number of absent pages
345 at a time for each object */
347 # define VM_MSYNC_INITIALIZED 0
348 # define VM_MSYNC_SYNCHRONIZING 1
349 # define VM_MSYNC_DONE 2
352 queue_chain_t msr_q
; /* object request queue */
353 queue_chain_t req_q
; /* vm_msync request queue */
355 vm_object_offset_t offset
;
356 vm_object_size_t length
;
357 vm_object_t object
; /* back pointer */
358 decl_mutex_data(, msync_req_lock
) /* Lock for this structure */
361 typedef struct msync_req
*msync_req_t
;
362 #define MSYNC_REQ_NULL ((msync_req_t) 0)
365 * Macros to allocate and free msync_reqs
367 #define msync_req_alloc(msr) \
369 (msr) = (msync_req_t)kalloc(sizeof(struct msync_req)); \
370 mutex_init(&(msr)->msync_req_lock, 0); \
371 msr->flag = VM_MSYNC_INITIALIZED; \
374 #define msync_req_free(msr) \
375 (kfree((msr), sizeof(struct msync_req)))
377 #define msr_lock(msr) mutex_lock(&(msr)->msync_req_lock)
378 #define msr_unlock(msr) mutex_unlock(&(msr)->msync_req_lock)
381 * Declare procedures that operate on VM objects.
384 __private_extern__
void vm_object_bootstrap(void) __attribute__((section("__TEXT, initcode")));
386 __private_extern__
void vm_object_init(void);
388 __private_extern__
void vm_object_init_lck_grp(void);
390 __private_extern__
void vm_object_reaper_init(void);
392 __private_extern__ vm_object_t
vm_object_allocate(
393 vm_object_size_t size
);
395 __private_extern__
void _vm_object_allocate(vm_object_size_t size
,
400 __private_extern__
void vm_object_res_reference(
402 __private_extern__
void vm_object_res_deallocate(
404 #define VM_OBJ_RES_INCR(object) (object)->res_count++
405 #define VM_OBJ_RES_DECR(object) (object)->res_count--
407 #else /* TASK_SWAPPER */
409 #define VM_OBJ_RES_INCR(object)
410 #define VM_OBJ_RES_DECR(object)
411 #define vm_object_res_reference(object)
412 #define vm_object_res_deallocate(object)
414 #endif /* TASK_SWAPPER */
416 #define vm_object_reference_locked(object) \
418 vm_object_t RLObject = (object); \
419 vm_object_lock_assert_exclusive(object); \
420 assert((RLObject)->ref_count > 0); \
421 (RLObject)->ref_count++; \
422 assert((RLObject)->ref_count > 1); \
423 vm_object_res_reference(RLObject); \
427 #define vm_object_reference_shared(object) \
429 vm_object_t RLObject = (object); \
430 vm_object_lock_assert_shared(object); \
431 assert((RLObject)->ref_count > 0); \
432 OSAddAtomic(1, (SInt32 *)&(RLObject)->ref_count); \
433 assert((RLObject)->ref_count > 1); \
434 /* XXX we would need an atomic version of the following ... */ \
435 vm_object_res_reference(RLObject); \
439 __private_extern__
void vm_object_reference(
444 #define vm_object_reference(object) \
446 vm_object_t RObject = (object); \
448 vm_object_lock(RObject); \
449 vm_object_reference_locked(RObject); \
450 vm_object_unlock(RObject); \
454 #endif /* MACH_ASSERT */
456 __private_extern__
void vm_object_deallocate(
459 __private_extern__ kern_return_t
vm_object_release_name(
463 __private_extern__
void vm_object_pmap_protect(
465 vm_object_offset_t offset
,
466 vm_object_size_t size
,
468 vm_map_offset_t pmap_start
,
471 __private_extern__
void vm_object_page_remove(
473 vm_object_offset_t start
,
474 vm_object_offset_t end
);
476 __private_extern__
void vm_object_deactivate_pages(
478 vm_object_offset_t offset
,
479 vm_object_size_t size
,
480 boolean_t kill_page
);
482 __private_extern__
unsigned int vm_object_purge(
485 __private_extern__ kern_return_t
vm_object_purgable_control(
487 vm_purgable_t control
,
490 __private_extern__ boolean_t
vm_object_coalesce(
491 vm_object_t prev_object
,
492 vm_object_t next_object
,
493 vm_object_offset_t prev_offset
,
494 vm_object_offset_t next_offset
,
495 vm_object_size_t prev_size
,
496 vm_object_size_t next_size
);
498 __private_extern__ boolean_t
vm_object_shadow(
500 vm_object_offset_t
*offset
,
501 vm_object_size_t length
);
503 __private_extern__
void vm_object_collapse(
505 vm_object_offset_t offset
,
506 boolean_t can_bypass
);
508 __private_extern__ boolean_t
vm_object_copy_quickly(
509 vm_object_t
*_object
,
510 vm_object_offset_t src_offset
,
511 vm_object_size_t size
,
512 boolean_t
*_src_needs_copy
,
513 boolean_t
*_dst_needs_copy
);
515 __private_extern__ kern_return_t
vm_object_copy_strategically(
516 vm_object_t src_object
,
517 vm_object_offset_t src_offset
,
518 vm_object_size_t size
,
519 vm_object_t
*dst_object
,
520 vm_object_offset_t
*dst_offset
,
521 boolean_t
*dst_needs_copy
);
523 __private_extern__ kern_return_t
vm_object_copy_slowly(
524 vm_object_t src_object
,
525 vm_object_offset_t src_offset
,
526 vm_object_size_t size
,
528 vm_object_t
*_result_object
);
530 __private_extern__ vm_object_t
vm_object_copy_delayed(
531 vm_object_t src_object
,
532 vm_object_offset_t src_offset
,
533 vm_object_size_t size
,
534 boolean_t src_object_shared
);
538 __private_extern__ kern_return_t
vm_object_destroy(
540 kern_return_t reason
);
542 __private_extern__
void vm_object_pager_create(
545 __private_extern__
void vm_object_page_map(
547 vm_object_offset_t offset
,
548 vm_object_size_t size
,
549 vm_object_offset_t (*map_fn
)
550 (void *, vm_object_offset_t
),
553 __private_extern__ kern_return_t
vm_object_upl_request(
555 vm_object_offset_t offset
,
558 upl_page_info_t
*page_info
,
562 __private_extern__ kern_return_t
vm_object_transpose(
565 vm_object_size_t transpose_size
);
567 __private_extern__ boolean_t
vm_object_sync(
569 vm_object_offset_t offset
,
570 vm_object_size_t size
,
571 boolean_t should_flush
,
572 boolean_t should_return
,
573 boolean_t should_iosync
);
575 __private_extern__ kern_return_t
vm_object_update(
577 vm_object_offset_t offset
,
578 vm_object_size_t size
,
579 vm_object_offset_t
*error_offset
,
581 memory_object_return_t should_return
,
585 __private_extern__ kern_return_t
vm_object_lock_request(
587 vm_object_offset_t offset
,
588 vm_object_size_t size
,
589 memory_object_return_t should_return
,
595 __private_extern__ vm_object_t
vm_object_enter(
596 memory_object_t pager
,
597 vm_object_size_t size
,
600 boolean_t check_named
);
603 __private_extern__
void vm_object_cluster_size(
605 vm_object_offset_t
*start
,
607 vm_object_fault_info_t fault_info
);
609 __private_extern__ kern_return_t
vm_object_populate_with_private(
611 vm_object_offset_t offset
,
615 extern kern_return_t
adjust_vm_object_cache(
619 extern kern_return_t
vm_object_page_op(
621 vm_object_offset_t offset
,
626 extern kern_return_t
vm_object_range_op(
628 vm_object_offset_t offset_beg
,
629 vm_object_offset_t offset_end
,
634 * Event waiting handling
637 #define VM_OBJECT_EVENT_INITIALIZED 0
638 #define VM_OBJECT_EVENT_PAGER_READY 1
639 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2
640 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4
641 #define VM_OBJECT_EVENT_UNCACHING 5
642 #define VM_OBJECT_EVENT_COPY_CALL 6
643 #define VM_OBJECT_EVENT_CACHING 7
645 #define vm_object_assert_wait(object, event, interruptible) \
646 (((object)->all_wanted |= 1 << (event)), \
647 assert_wait((event_t)((vm_offset_t)(object)+(event)),(interruptible)))
649 #define vm_object_wait(object, event, interruptible) \
650 (vm_object_assert_wait((object),(event),(interruptible)), \
651 vm_object_unlock(object), \
652 thread_block(THREAD_CONTINUE_NULL)) \
654 #define thread_sleep_vm_object(object, event, interruptible) \
655 lck_rw_sleep(&(object)->Lock, LCK_SLEEP_DEFAULT, (event_t)(event), (interruptible))
657 #define vm_object_sleep(object, event, interruptible) \
658 (((object)->all_wanted |= 1 << (event)), \
659 thread_sleep_vm_object((object), \
660 ((vm_offset_t)(object)+(event)), (interruptible)))
662 #define vm_object_wakeup(object, event) \
664 if ((object)->all_wanted & (1 << (event))) \
665 thread_wakeup((event_t)((vm_offset_t)(object) + (event))); \
666 (object)->all_wanted &= ~(1 << (event)); \
669 #define vm_object_set_wanted(object, event) \
671 ((object)->all_wanted |= (1 << (event))); \
674 #define vm_object_wanted(object, event) \
675 ((object)->all_wanted & (1 << (event)))
678 * Routines implemented as macros
681 #include <libkern/OSDebug.h>
682 #define VM_PIP_DEBUG_BEGIN(object) \
684 if ((object)->paging_in_progress < VM_PIP_DEBUG_MAX_REFS) { \
685 int pip = (object)->paging_in_progress; \
686 (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
687 VM_PIP_DEBUG_STACK_FRAMES); \
690 #else /* VM_PIP_DEBUG */
691 #define VM_PIP_DEBUG_BEGIN(object)
692 #endif /* VM_PIP_DEBUG */
694 #define vm_object_paging_begin(object) \
696 vm_object_lock_assert_exclusive((object)); \
697 assert((object)->paging_in_progress >= 0); \
698 VM_PIP_DEBUG_BEGIN((object)); \
699 (object)->paging_in_progress++; \
702 #define vm_object_paging_end(object) \
704 vm_object_lock_assert_exclusive((object)); \
705 assert((object)->paging_in_progress > 0); \
706 if (--(object)->paging_in_progress == 0) { \
707 vm_object_wakeup(object, \
708 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
712 #define vm_object_paging_wait(object, interruptible) \
714 vm_object_lock_assert_exclusive((object)); \
715 while ((object)->paging_in_progress != 0) { \
718 _wr = vm_object_sleep((object), \
719 VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
722 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
729 #define OBJECT_LOCK_SHARED 0
730 #define OBJECT_LOCK_EXCLUSIVE 1
732 extern lck_grp_t vm_object_lck_grp
;
733 extern lck_grp_attr_t vm_object_lck_grp_attr
;
734 extern lck_attr_t vm_object_lck_attr
;
735 extern lck_attr_t kernel_object_lck_attr
;
737 extern vm_object_t vm_pageout_scan_wants_object
;
739 extern void vm_object_lock(vm_object_t
);
740 extern boolean_t
vm_object_lock_try(vm_object_t
);
741 extern void vm_object_lock_shared(vm_object_t
);
742 extern boolean_t
vm_object_lock_try_shared(vm_object_t
);
745 * Object locking macros
748 #define vm_object_lock_init(object) \
749 lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \
750 (((object) == kernel_object || \
751 (object) == vm_submap_object) ? \
752 &kernel_object_lck_attr : \
753 &vm_object_lck_attr))
754 #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
756 #define vm_object_unlock(object) lck_rw_done(&(object)->Lock)
757 #define vm_object_lock_upgrade(object) lck_rw_lock_shared_to_exclusive(&(object)->Lock)
758 #define vm_object_lock_try_scan(object) lck_rw_try_lock_exclusive(&(object)->Lock)
761 * CAUTION: the following vm_object_lock_assert_held*() macros merely
762 * check if anyone is holding the lock, but the holder may not necessarily
766 #define vm_object_lock_assert_held(object) \
767 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
768 #define vm_object_lock_assert_shared(object) \
769 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
770 #define vm_object_lock_assert_exclusive(object) \
771 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
773 #define vm_object_lock_assert_held(object)
774 #define vm_object_lock_assert_shared(object)
775 #define vm_object_lock_assert_exclusive(object)
778 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
779 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
781 #endif /* _VM_VM_OBJECT_H_ */