2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * Virtual memory object module definitions.
66 #ifndef _VM_VM_OBJECT_H_
67 #define _VM_VM_OBJECT_H_
69 #include <mach_pagemap.h>
70 #include <task_swapper.h>
72 #include <mach/kern_return.h>
73 #include <mach/boolean.h>
74 #include <mach/memory_object_types.h>
75 #include <mach/port.h>
76 #include <mach/vm_prot.h>
77 #include <mach/vm_param.h>
78 #include <mach/machine/vm_types.h>
79 #include <kern/queue.h>
80 #include <kern/lock.h>
81 #include <kern/locks.h>
82 #include <kern/assert.h>
83 #include <kern/misc_protos.h>
84 #include <kern/macro_help.h>
85 #include <ipc/ipc_types.h>
89 #include <vm/vm_external.h>
90 #endif /* MACH_PAGEMAP */
97 * vm_object_t Virtual memory object.
98 * vm_object_fault_info_t Used to determine cluster size.
101 struct vm_object_fault_info
{
104 vm_size_t cluster_size
;
105 vm_behavior_t behavior
;
106 vm_map_offset_t lo_offset
;
107 vm_map_offset_t hi_offset
;
114 queue_head_t memq
; /* Resident memory */
115 lck_rw_t Lock
; /* Synchronization */
117 vm_object_size_t size
; /* Object size (only valid
120 struct vm_page
*memq_hint
;
121 int ref_count
; /* Number of references */
123 int res_count
; /* Residency references (swap)*/
124 #endif /* TASK_SWAPPER */
125 unsigned int resident_page_count
;
126 /* number of resident pages */
128 struct vm_object
*copy
; /* Object that should receive
129 * a copy of my changed pages,
130 * for copy_delay, or just the
131 * temporary object that
132 * shadows this object, for
135 struct vm_object
*shadow
; /* My shadow */
136 vm_object_offset_t shadow_offset
; /* Offset into shadow */
138 memory_object_t pager
; /* Where to get data */
139 vm_object_offset_t paging_offset
; /* Offset into memory object */
140 memory_object_control_t pager_control
; /* Where data comes back */
142 memory_object_copy_strategy_t
143 copy_strategy
; /* How to handle data copy */
145 int paging_in_progress
;
146 /* The memory object ports are
147 * being used (e.g., for pagein
148 * or pageout) -- don't change
149 * any of these fields (i.e.,
150 * don't collapse, destroy or
154 /* boolean_t array */ all_wanted
:11, /* Bit array of "want to be
155 * awakened" notations. See
156 * VM_OBJECT_EVENT_* items
158 /* boolean_t */ pager_created
:1, /* Has pager been created? */
159 /* boolean_t */ pager_initialized
:1, /* Are fields ready to use? */
160 /* boolean_t */ pager_ready
:1, /* Will pager take requests? */
162 /* boolean_t */ pager_trusted
:1,/* The pager for this object
163 * is trusted. This is true for
164 * all internal objects (backed
165 * by the default pager)
167 /* boolean_t */ can_persist
:1, /* The kernel may keep the data
168 * for this object (and rights
169 * to the memory object) after
170 * all address map references
173 /* boolean_t */ internal
:1, /* Created by the kernel (and
174 * therefore, managed by the
175 * default memory manger)
177 /* boolean_t */ temporary
:1, /* Permanent objects may be
178 * changed externally by the
179 * memory manager, and changes
180 * made in memory must be
181 * reflected back to the memory
182 * manager. Temporary objects
186 /* boolean_t */ private:1, /* magic device_pager object,
187 * holds private pages only */
188 /* boolean_t */ pageout
:1, /* pageout object. contains
189 * private pages that refer to
190 * a real memory object. */
191 /* boolean_t */ alive
:1, /* Not yet terminated */
193 /* boolean_t */ purgable
:2, /* Purgable state. See
196 /* boolean_t */ shadowed
:1, /* Shadow may exist */
197 /* boolean_t */ silent_overwrite
:1,
198 /* Allow full page overwrite
199 * without data_request if
201 /* boolean_t */ advisory_pageout
:1,
202 /* Instead of sending page
203 * via OOL, just notify
204 * pager that the kernel
205 * wants to discard it, page
206 * remains in object */
207 /* boolean_t */ true_share
:1,
208 /* This object is mapped
209 * in more than one place
210 * and hence cannot be
212 /* boolean_t */ terminating
:1,
213 /* Allows vm_object_lookup
214 * and vm_object_deallocate
215 * to special case their
216 * behavior when they are
217 * called as a result of
218 * page cleaning during
221 /* boolean_t */ named
:1, /* An enforces an internal
222 * naming convention, by
223 * calling the right routines
225 * destruction, UBC references
226 * against the vm_object are
229 /* boolean_t */ shadow_severed
:1,
230 /* When a permanent object
231 * backing a COW goes away
232 * unexpectedly. This bit
233 * allows vm_fault to return
234 * an error rather than a
237 /* boolean_t */ phys_contiguous
:1,
238 /* Memory is wired and
239 * guaranteed physically
240 * contiguous. However
241 * it is not device memory
242 * and obeys normal virtual
243 * memory rules w.r.t pmap
246 /* boolean_t */ nophyscache
:1;
247 /* When mapped at the
248 * pmap level, don't allow
249 * primary caching. (for
255 queue_chain_t cached_list
; /* Attachment point for the
256 * list of objects cached as a
257 * result of their can_persist
261 queue_head_t msr_q
; /* memory object synchronise
265 * the following fields are not protected by any locks
266 * they are updated via atomic compare and swap
268 vm_object_offset_t last_alloc
; /* last allocation offset */
269 int sequential
; /* sequential access size */
271 uint32_t pages_created
;
274 vm_external_map_t existence_map
; /* bitmap of pages written to
276 #endif /* MACH_PAGEMAP */
277 vm_offset_t cow_hint
; /* last page present in */
278 /* shadow but not in object */
280 struct vm_object
*paging_object
; /* object which pages to be
281 * swapped out are temporary
282 * put in current object
285 /* hold object lock when altering */
287 wimg_bits
:8, /* cache WIMG bits */
288 code_signed
:1, /* pages are signed and should be
289 validated; the signatures are stored
291 mapping_in_progress
:1, /* pager being mapped/unmapped */
292 not_in_use
:22; /* for expansion */
295 queue_head_t uplq
; /* List of outstanding upls */
296 #endif /* UPL_DEBUG */
300 * Keep track of the stack traces for the first holders
301 * of a "paging_in_progress" reference for this VM object.
303 #define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */
304 #define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */
305 struct __pip_backtrace
{
306 void *pip_retaddr
[VM_PIP_DEBUG_STACK_FRAMES
];
307 } pip_holders
[VM_PIP_DEBUG_MAX_REFS
];
308 #endif /* VM_PIP_DEBUG */
310 queue_chain_t objq
; /* object queue - currently used for purgable queues */
313 #define VM_PAGE_REMOVE(page) \
315 vm_page_t __page = (page); \
316 vm_object_t __object = __page->object; \
317 if (__page == __object->memq_hint) { \
318 vm_page_t __new_hint; \
319 queue_entry_t __qe; \
320 __qe = queue_next(&__page->listq); \
321 if (queue_end(&__object->memq, __qe)) { \
322 __qe = queue_prev(&__page->listq); \
323 if (queue_end(&__object->memq, __qe)) { \
327 __new_hint = (vm_page_t) __qe; \
328 __object->memq_hint = __new_hint; \
330 queue_remove(&__object->memq, __page, vm_page_t, listq); \
333 #define VM_PAGE_INSERT(page, object) \
335 vm_page_t __page = (page); \
336 vm_object_t __object = (object); \
337 queue_enter(&__object->memq, __page, vm_page_t, listq); \
338 __object->memq_hint = __page; \
342 vm_object_t kernel_object
; /* the single kernel object */
345 unsigned int vm_object_absent_max
; /* maximum number of absent pages
346 at a time for each object */
348 # define VM_MSYNC_INITIALIZED 0
349 # define VM_MSYNC_SYNCHRONIZING 1
350 # define VM_MSYNC_DONE 2
353 queue_chain_t msr_q
; /* object request queue */
354 queue_chain_t req_q
; /* vm_msync request queue */
356 vm_object_offset_t offset
;
357 vm_object_size_t length
;
358 vm_object_t object
; /* back pointer */
359 decl_mutex_data(, msync_req_lock
) /* Lock for this structure */
362 typedef struct msync_req
*msync_req_t
;
363 #define MSYNC_REQ_NULL ((msync_req_t) 0)
366 * Macros to allocate and free msync_reqs
368 #define msync_req_alloc(msr) \
370 (msr) = (msync_req_t)kalloc(sizeof(struct msync_req)); \
371 mutex_init(&(msr)->msync_req_lock, 0); \
372 msr->flag = VM_MSYNC_INITIALIZED; \
375 #define msync_req_free(msr) \
376 (kfree((msr), sizeof(struct msync_req)))
378 #define msr_lock(msr) mutex_lock(&(msr)->msync_req_lock)
379 #define msr_unlock(msr) mutex_unlock(&(msr)->msync_req_lock)
382 * Declare procedures that operate on VM objects.
385 __private_extern__
void vm_object_bootstrap(void) __attribute__((section("__TEXT, initcode")));
387 __private_extern__
void vm_object_init(void);
389 __private_extern__
void vm_object_init_lck_grp(void);
391 __private_extern__
void vm_object_reaper_init(void);
393 __private_extern__ vm_object_t
vm_object_allocate(
394 vm_object_size_t size
);
396 __private_extern__
void _vm_object_allocate(vm_object_size_t size
,
401 __private_extern__
void vm_object_res_reference(
403 __private_extern__
void vm_object_res_deallocate(
405 #define VM_OBJ_RES_INCR(object) (object)->res_count++
406 #define VM_OBJ_RES_DECR(object) (object)->res_count--
408 #else /* TASK_SWAPPER */
410 #define VM_OBJ_RES_INCR(object)
411 #define VM_OBJ_RES_DECR(object)
412 #define vm_object_res_reference(object)
413 #define vm_object_res_deallocate(object)
415 #endif /* TASK_SWAPPER */
417 #define vm_object_reference_locked(object) \
419 vm_object_t RLObject = (object); \
420 vm_object_lock_assert_exclusive(object); \
421 assert((RLObject)->ref_count > 0); \
422 (RLObject)->ref_count++; \
423 assert((RLObject)->ref_count > 1); \
424 vm_object_res_reference(RLObject); \
428 #define vm_object_reference_shared(object) \
430 vm_object_t RLObject = (object); \
431 vm_object_lock_assert_shared(object); \
432 assert((RLObject)->ref_count > 0); \
433 OSAddAtomic(1, (SInt32 *)&(RLObject)->ref_count); \
434 assert((RLObject)->ref_count > 1); \
435 /* XXX we would need an atomic version of the following ... */ \
436 vm_object_res_reference(RLObject); \
440 __private_extern__
void vm_object_reference(
445 #define vm_object_reference(object) \
447 vm_object_t RObject = (object); \
449 vm_object_lock(RObject); \
450 vm_object_reference_locked(RObject); \
451 vm_object_unlock(RObject); \
455 #endif /* MACH_ASSERT */
457 __private_extern__
void vm_object_deallocate(
460 __private_extern__ kern_return_t
vm_object_release_name(
464 __private_extern__
void vm_object_pmap_protect(
466 vm_object_offset_t offset
,
467 vm_object_size_t size
,
469 vm_map_offset_t pmap_start
,
472 __private_extern__
void vm_object_page_remove(
474 vm_object_offset_t start
,
475 vm_object_offset_t end
);
477 __private_extern__
void vm_object_deactivate_pages(
479 vm_object_offset_t offset
,
480 vm_object_size_t size
,
481 boolean_t kill_page
);
483 __private_extern__
unsigned int vm_object_purge(
486 __private_extern__ kern_return_t
vm_object_purgable_control(
488 vm_purgable_t control
,
491 __private_extern__ boolean_t
vm_object_coalesce(
492 vm_object_t prev_object
,
493 vm_object_t next_object
,
494 vm_object_offset_t prev_offset
,
495 vm_object_offset_t next_offset
,
496 vm_object_size_t prev_size
,
497 vm_object_size_t next_size
);
499 __private_extern__ boolean_t
vm_object_shadow(
501 vm_object_offset_t
*offset
,
502 vm_object_size_t length
);
504 __private_extern__
void vm_object_collapse(
506 vm_object_offset_t offset
,
507 boolean_t can_bypass
);
509 __private_extern__ boolean_t
vm_object_copy_quickly(
510 vm_object_t
*_object
,
511 vm_object_offset_t src_offset
,
512 vm_object_size_t size
,
513 boolean_t
*_src_needs_copy
,
514 boolean_t
*_dst_needs_copy
);
516 __private_extern__ kern_return_t
vm_object_copy_strategically(
517 vm_object_t src_object
,
518 vm_object_offset_t src_offset
,
519 vm_object_size_t size
,
520 vm_object_t
*dst_object
,
521 vm_object_offset_t
*dst_offset
,
522 boolean_t
*dst_needs_copy
);
524 __private_extern__ kern_return_t
vm_object_copy_slowly(
525 vm_object_t src_object
,
526 vm_object_offset_t src_offset
,
527 vm_object_size_t size
,
529 vm_object_t
*_result_object
);
531 __private_extern__ vm_object_t
vm_object_copy_delayed(
532 vm_object_t src_object
,
533 vm_object_offset_t src_offset
,
534 vm_object_size_t size
,
535 boolean_t src_object_shared
);
539 __private_extern__ kern_return_t
vm_object_destroy(
541 kern_return_t reason
);
543 __private_extern__
void vm_object_pager_create(
546 __private_extern__
void vm_object_page_map(
548 vm_object_offset_t offset
,
549 vm_object_size_t size
,
550 vm_object_offset_t (*map_fn
)
551 (void *, vm_object_offset_t
),
554 __private_extern__ kern_return_t
vm_object_upl_request(
556 vm_object_offset_t offset
,
559 upl_page_info_t
*page_info
,
563 __private_extern__ kern_return_t
vm_object_transpose(
566 vm_object_size_t transpose_size
);
568 __private_extern__ boolean_t
vm_object_sync(
570 vm_object_offset_t offset
,
571 vm_object_size_t size
,
572 boolean_t should_flush
,
573 boolean_t should_return
,
574 boolean_t should_iosync
);
576 __private_extern__ kern_return_t
vm_object_update(
578 vm_object_offset_t offset
,
579 vm_object_size_t size
,
580 vm_object_offset_t
*error_offset
,
582 memory_object_return_t should_return
,
586 __private_extern__ kern_return_t
vm_object_lock_request(
588 vm_object_offset_t offset
,
589 vm_object_size_t size
,
590 memory_object_return_t should_return
,
596 __private_extern__ vm_object_t
vm_object_enter(
597 memory_object_t pager
,
598 vm_object_size_t size
,
601 boolean_t check_named
);
604 __private_extern__
void vm_object_cluster_size(
606 vm_object_offset_t
*start
,
608 vm_object_fault_info_t fault_info
);
610 __private_extern__ kern_return_t
vm_object_populate_with_private(
612 vm_object_offset_t offset
,
616 extern kern_return_t
adjust_vm_object_cache(
620 extern kern_return_t
vm_object_page_op(
622 vm_object_offset_t offset
,
627 extern kern_return_t
vm_object_range_op(
629 vm_object_offset_t offset_beg
,
630 vm_object_offset_t offset_end
,
635 * Event waiting handling
638 #define VM_OBJECT_EVENT_INITIALIZED 0
639 #define VM_OBJECT_EVENT_PAGER_READY 1
640 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2
641 #define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3
642 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4
643 #define VM_OBJECT_EVENT_UNCACHING 5
644 #define VM_OBJECT_EVENT_COPY_CALL 6
645 #define VM_OBJECT_EVENT_CACHING 7
647 #define vm_object_assert_wait(object, event, interruptible) \
648 (((object)->all_wanted |= 1 << (event)), \
649 assert_wait((event_t)((vm_offset_t)(object)+(event)),(interruptible)))
651 #define vm_object_wait(object, event, interruptible) \
652 (vm_object_assert_wait((object),(event),(interruptible)), \
653 vm_object_unlock(object), \
654 thread_block(THREAD_CONTINUE_NULL)) \
656 #define thread_sleep_vm_object(object, event, interruptible) \
657 lck_rw_sleep(&(object)->Lock, LCK_SLEEP_DEFAULT, (event_t)(event), (interruptible))
659 #define vm_object_sleep(object, event, interruptible) \
660 (((object)->all_wanted |= 1 << (event)), \
661 thread_sleep_vm_object((object), \
662 ((vm_offset_t)(object)+(event)), (interruptible)))
664 #define vm_object_wakeup(object, event) \
666 if ((object)->all_wanted & (1 << (event))) \
667 thread_wakeup((event_t)((vm_offset_t)(object) + (event))); \
668 (object)->all_wanted &= ~(1 << (event)); \
671 #define vm_object_set_wanted(object, event) \
673 ((object)->all_wanted |= (1 << (event))); \
676 #define vm_object_wanted(object, event) \
677 ((object)->all_wanted & (1 << (event)))
680 * Routines implemented as macros
683 #include <libkern/OSDebug.h>
684 #define VM_PIP_DEBUG_BEGIN(object) \
686 if ((object)->paging_in_progress < VM_PIP_DEBUG_MAX_REFS) { \
687 int pip = (object)->paging_in_progress; \
688 (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
689 VM_PIP_DEBUG_STACK_FRAMES); \
692 #else /* VM_PIP_DEBUG */
693 #define VM_PIP_DEBUG_BEGIN(object)
694 #endif /* VM_PIP_DEBUG */
696 #define vm_object_paging_begin(object) \
698 vm_object_lock_assert_exclusive((object)); \
699 assert((object)->paging_in_progress >= 0); \
700 VM_PIP_DEBUG_BEGIN((object)); \
701 (object)->paging_in_progress++; \
704 #define vm_object_paging_end(object) \
706 vm_object_lock_assert_exclusive((object)); \
707 assert((object)->paging_in_progress > 0); \
708 if (--(object)->paging_in_progress == 0) { \
709 vm_object_wakeup(object, \
710 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
714 #define vm_object_paging_wait(object, interruptible) \
716 vm_object_lock_assert_exclusive((object)); \
717 while ((object)->paging_in_progress != 0) { \
720 _wr = vm_object_sleep((object), \
721 VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
724 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
730 #define vm_object_mapping_begin(object) \
732 vm_object_lock_assert_exclusive((object)); \
733 assert(! (object)->mapping_in_progress); \
734 (object)->mapping_in_progress = TRUE; \
737 #define vm_object_mapping_end(object) \
739 vm_object_lock_assert_exclusive((object)); \
740 assert((object)->mapping_in_progress); \
741 (object)->mapping_in_progress = FALSE; \
742 vm_object_wakeup((object), \
743 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \
746 #define vm_object_mapping_wait(object, interruptible) \
748 vm_object_lock_assert_exclusive((object)); \
749 while ((object)->mapping_in_progress) { \
752 _wr = vm_object_sleep((object), \
753 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
755 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
758 assert(!(object)->mapping_in_progress); \
763 #define OBJECT_LOCK_SHARED 0
764 #define OBJECT_LOCK_EXCLUSIVE 1
766 extern lck_grp_t vm_object_lck_grp
;
767 extern lck_grp_attr_t vm_object_lck_grp_attr
;
768 extern lck_attr_t vm_object_lck_attr
;
769 extern lck_attr_t kernel_object_lck_attr
;
771 extern vm_object_t vm_pageout_scan_wants_object
;
773 extern void vm_object_lock(vm_object_t
);
774 extern boolean_t
vm_object_lock_try(vm_object_t
);
775 extern void vm_object_lock_shared(vm_object_t
);
776 extern boolean_t
vm_object_lock_try_shared(vm_object_t
);
779 * Object locking macros
782 #define vm_object_lock_init(object) \
783 lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \
784 (((object) == kernel_object || \
785 (object) == vm_submap_object) ? \
786 &kernel_object_lck_attr : \
787 &vm_object_lck_attr))
788 #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
790 #define vm_object_unlock(object) lck_rw_done(&(object)->Lock)
791 #define vm_object_lock_upgrade(object) lck_rw_lock_shared_to_exclusive(&(object)->Lock)
792 #define vm_object_lock_try_scan(object) lck_rw_try_lock_exclusive(&(object)->Lock)
795 * CAUTION: the following vm_object_lock_assert_held*() macros merely
796 * check if anyone is holding the lock, but the holder may not necessarily
800 #define vm_object_lock_assert_held(object) \
801 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
802 #define vm_object_lock_assert_shared(object) \
803 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
804 #define vm_object_lock_assert_exclusive(object) \
805 lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
807 #define vm_object_lock_assert_held(object)
808 #define vm_object_lock_assert_shared(object)
809 #define vm_object_lock_assert_exclusive(object)
812 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
813 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
815 #endif /* _VM_VM_OBJECT_H_ */