/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#include <mach/memory_object_types.h>
#include <mach/port.h>
#include <mach/vm_prot.h>
+#include <mach/vm_param.h>
#include <mach/machine/vm_types.h>
#include <kern/queue.h>
#include <kern/lock.h>
#include <kern/assert.h>
+#include <kern/misc_protos.h>
#include <kern/macro_help.h>
#include <ipc/ipc_types.h>
#include <vm/pmap.h>
-#include <kern/misc_protos.h>
#if MACH_PAGEMAP
#include <vm/vm_external.h>
#endif /* MACH_PAGEMAP */
-typedef struct ipc_port * pager_request_t;
-#define PAGER_REQUEST_NULL ((pager_request_t) 0)
+struct vm_page;
/*
* Types defined:
*
* vm_object_t Virtual memory object.
- *
- * We use "struct ipc_port *" instead of "ipc_port_t"
- * to avoid include file circularities.
*/
-typedef unsigned long long vm_object_size_t;
-
-
struct vm_object {
queue_head_t memq; /* Resident memory */
decl_mutex_data(, Lock) /* Synchronization */
vm_object_size_t size; /* Object size (only valid
* if internal)
*/
- vm_object_size_t frozen_size; /* How much has been marked
- * copy-on-write (only
- * valid if copy_symmetric)
- */
+ struct vm_page *memq_hint;
int ref_count; /* Number of references */
#if TASK_SWAPPER
int res_count; /* Residency references (swap)*/
struct vm_object *shadow; /* My shadow */
vm_object_offset_t shadow_offset; /* Offset into shadow */
- struct ipc_port *pager; /* Where to get data */
+ memory_object_t pager; /* Where to get data */
vm_object_offset_t paging_offset; /* Offset into memory object */
- pager_request_t pager_request; /* Where data comes back */
+ memory_object_control_t pager_control; /* Where data comes back */
memory_object_copy_strategy_t
copy_strategy; /* How to handle data copy */
* asserted.
*/
- unsigned int paging_in_progress;
+ int paging_in_progress;
/* The memory object ports are
* being used (e.g., for pagein
* or pageout) -- don't change
* a real memory object. */
/* boolean_t */ alive:1, /* Not yet terminated */
- /* boolean_t */ lock_in_progress:1,
- /* Is a multi-page lock
- * request in progress?
- */
- /* boolean_t */ lock_restart:1,
- /* Should lock request in
- * progress restart search?
+ /* boolean_t */ purgable:2, /* Purgable state. See
+ * VM_OBJECT_PURGABLE_*
+ * items below.
*/
/* boolean_t */ shadowed:1, /* Shadow may exist */
/* boolean_t */ silent_overwrite:1,
* an error rather than a
* zero filled page.
*/
- /* boolean_t */ phys_contiguous:1;
+ /* boolean_t */ phys_contiguous:1,
/* Memory is wired and
* guaranteed physically
* contiguous. However
* memory rules w.r.t pmap
* access bits.
*/
+ /* boolean_t */ nophyscache:1;
+ /* When mapped at the
+ * pmap level, don't allow
+ * primary caching. (for
+ * I/O)
+ */
request queue */
vm_object_offset_t last_alloc; /* last allocation offset */
+ vm_object_offset_t sequential; /* sequential access size */
vm_size_t cluster_size; /* size of paging cluster */
#if MACH_PAGEMAP
vm_external_map_t existence_map; /* bitmap of pages written to
* backing storage */
#endif /* MACH_PAGEMAP */
+ vm_offset_t cow_hint; /* last page present in */
+ /* shadow but not in object */
#if MACH_ASSERT
struct vm_object *paging_object; /* object which pages to be
* swapped out are temporary
* put in current object
*/
#endif
-#ifdef UBC_DEBUG
+ /* hold object lock when altering */
+ unsigned int /* cache WIMG bits */
+ wimg_bits:8, /* wimg plus some expansion*/
+ not_in_use:24;
+#ifdef UPL_DEBUG
queue_head_t uplq; /* List of outstanding upls */
-#endif /* UBC_DEBUG */
+#ifdef VM_PIP_DEBUG
+/*
+ * Keep track of the stack traces for the first holders
+ * of a "paging_in_progress" reference for this VM object.
+ */
+#define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */
+#define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */
+ struct __pip_backtrace {
+ void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES];
+ } pip_holders[VM_PIP_DEBUG_MAX_REFS];
+#endif /* VM_PIP_DEBUG */
+#endif /* UPL_DEBUG */
};
-extern
+#define VM_PAGE_REMOVE(page) \
+ MACRO_BEGIN \
+ vm_page_t __page = (page); \
+ vm_object_t __object = __page->object; \
+ if (__page == __object->memq_hint) { \
+ vm_page_t __new_hint; \
+ queue_entry_t __qe; \
+ __qe = queue_next(&__page->listq); \
+ if (queue_end(&__object->memq, __qe)) { \
+ __qe = queue_prev(&__page->listq); \
+ if (queue_end(&__object->memq, __qe)) { \
+ __qe = NULL; \
+ } \
+ } \
+ __new_hint = (vm_page_t) __qe; \
+ __object->memq_hint = __new_hint; \
+ } \
+ queue_remove(&__object->memq, __page, vm_page_t, listq); \
+ MACRO_END
+
+#define VM_PAGE_INSERT(page, object) \
+ MACRO_BEGIN \
+ vm_page_t __page = (page); \
+ vm_object_t __object = (object); \
+ queue_enter(&__object->memq, __page, vm_page_t, listq); \
+ __object->memq_hint = __page; \
+ MACRO_END
+
+__private_extern__
vm_object_t kernel_object; /* the single kernel object */
-int vm_object_absent_max; /* maximum number of absent pages
+__private_extern__
+unsigned int vm_object_absent_max; /* maximum number of absent pages
at a time for each object */
# define VM_MSYNC_INITIALIZED 0
#define msync_req_alloc(msr) \
MACRO_BEGIN \
(msr) = (msync_req_t)kalloc(sizeof(struct msync_req)); \
- mutex_init(&(msr)->msync_req_lock, ETAP_VM_MSYNC); \
+ mutex_init(&(msr)->msync_req_lock, 0); \
msr->flag = VM_MSYNC_INITIALIZED; \
MACRO_END
#define msync_req_free(msr) \
- (kfree((vm_offset_t)(msr), sizeof(struct msync_req)))
+ (kfree((msr), sizeof(struct msync_req)))
#define msr_lock(msr) mutex_lock(&(msr)->msync_req_lock)
#define msr_unlock(msr) mutex_unlock(&(msr)->msync_req_lock)
* Declare procedures that operate on VM objects.
*/
-extern void vm_object_bootstrap(void);
+__private_extern__ void vm_object_bootstrap(void);
-extern void vm_object_init(void);
+__private_extern__ void vm_object_init(void);
-extern vm_object_t vm_object_allocate(
+__private_extern__ vm_object_t vm_object_allocate(
vm_object_size_t size);
-#if MACH_ASSERT
-extern void vm_object_reference(
+__private_extern__ void _vm_object_allocate(vm_object_size_t size,
+ vm_object_t object);
+
+#if TASK_SWAPPER
+
+__private_extern__ void vm_object_res_reference(
+ vm_object_t object);
+__private_extern__ void vm_object_res_deallocate(
+ vm_object_t object);
+#define VM_OBJ_RES_INCR(object) (object)->res_count++
+#define VM_OBJ_RES_DECR(object) (object)->res_count--
+
+#else /* TASK_SWAPPER */
+
+#define VM_OBJ_RES_INCR(object)
+#define VM_OBJ_RES_DECR(object)
+#define vm_object_res_reference(object)
+#define vm_object_res_deallocate(object)
+
+#endif /* TASK_SWAPPER */
+
+#define vm_object_reference_locked(object) \
+MACRO_BEGIN \
+ vm_object_t RLObject = (object); \
+ assert((RLObject)->ref_count > 0); \
+ (RLObject)->ref_count++; \
+ vm_object_res_reference(RLObject); \
+MACRO_END
+
+
+__private_extern__ void vm_object_reference(
vm_object_t object);
-#else /* MACH_ASSERT */
+
+#if !MACH_ASSERT
+
#define vm_object_reference(object) \
MACRO_BEGIN \
- vm_object_t Object = (object); \
- if (Object) { \
- vm_object_lock(Object); \
- Object->ref_count++; \
- vm_object_res_reference(Object); \
- vm_object_unlock(Object); \
+ vm_object_t RObject = (object); \
+ if (RObject) { \
+ vm_object_lock(RObject); \
+ vm_object_reference_locked(RObject); \
+ vm_object_unlock(RObject); \
} \
MACRO_END
+
#endif /* MACH_ASSERT */
-extern void vm_object_deallocate(
+__private_extern__ void vm_object_deallocate(
vm_object_t object);
-extern void vm_object_pmap_protect(
+__private_extern__ kern_return_t vm_object_release_name(
+ vm_object_t object,
+ int flags);
+
+__private_extern__ void vm_object_pmap_protect(
vm_object_t object,
vm_object_offset_t offset,
- vm_size_t size,
+ vm_object_size_t size,
pmap_t pmap,
- vm_offset_t pmap_start,
+ vm_map_offset_t pmap_start,
vm_prot_t prot);
-extern void vm_object_page_remove(
+__private_extern__ void vm_object_page_remove(
vm_object_t object,
vm_object_offset_t start,
vm_object_offset_t end);
-extern boolean_t vm_object_coalesce(
+__private_extern__ void vm_object_deactivate_pages(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_object_size_t size,
+ boolean_t kill_page);
+
+__private_extern__ unsigned int vm_object_purge(
+ vm_object_t object);
+
+__private_extern__ kern_return_t vm_object_purgable_control(
+ vm_object_t object,
+ vm_purgable_t control,
+ int *state);
+
+__private_extern__ boolean_t vm_object_coalesce(
vm_object_t prev_object,
vm_object_t next_object,
vm_object_offset_t prev_offset,
vm_object_size_t prev_size,
vm_object_size_t next_size);
-extern boolean_t vm_object_shadow(
+__private_extern__ boolean_t vm_object_shadow(
vm_object_t *object,
vm_object_offset_t *offset,
vm_object_size_t length);
-extern void vm_object_collapse(
- vm_object_t object);
-
-extern vm_object_t vm_object_lookup(
- ipc_port_t port);
-
-extern ipc_port_t vm_object_name(
- vm_object_t object);
+__private_extern__ void vm_object_collapse(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ boolean_t can_bypass);
-extern boolean_t vm_object_copy_quickly(
+__private_extern__ boolean_t vm_object_copy_quickly(
vm_object_t *_object,
vm_object_offset_t src_offset,
vm_object_size_t size,
boolean_t *_src_needs_copy,
boolean_t *_dst_needs_copy);
-extern kern_return_t vm_object_copy_strategically(
+__private_extern__ kern_return_t vm_object_copy_strategically(
vm_object_t src_object,
vm_object_offset_t src_offset,
vm_object_size_t size,
vm_object_offset_t *dst_offset,
boolean_t *dst_needs_copy);
-extern kern_return_t vm_object_copy_slowly(
+__private_extern__ kern_return_t vm_object_copy_slowly(
vm_object_t src_object,
vm_object_offset_t src_offset,
vm_object_size_t size,
int interruptible,
vm_object_t *_result_object);
-extern void vm_object_pager_create(
- vm_object_t object);
+__private_extern__ vm_object_t vm_object_copy_delayed(
+ vm_object_t src_object,
+ vm_object_offset_t src_offset,
+ vm_object_size_t size);
+
-extern void vm_object_destroy(
- ipc_port_t pager);
-extern void vm_object_pager_wakeup(
- ipc_port_t pager);
+__private_extern__ kern_return_t vm_object_destroy(
+ vm_object_t object,
+ kern_return_t reason);
-extern void vm_object_page_map(
+__private_extern__ void vm_object_pager_create(
+ vm_object_t object);
+
+__private_extern__ void vm_object_page_map(
vm_object_t object,
vm_object_offset_t offset,
vm_object_size_t size,
(void *, vm_object_offset_t),
void *map_fn_data);
-#if TASK_SWAPPER
-
-extern void vm_object_res_reference(
- vm_object_t object);
-extern void vm_object_res_deallocate(
- vm_object_t object);
-#define VM_OBJ_RES_INCR(object) (object)->res_count++
-#define VM_OBJ_RES_DECR(object) (object)->res_count--
+__private_extern__ kern_return_t vm_object_upl_request(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ upl_size_t size,
+ upl_t *upl,
+ upl_page_info_t *page_info,
+ unsigned int *count,
+ int flags);
+
+__private_extern__ kern_return_t vm_object_transpose(
+ vm_object_t object1,
+ vm_object_t object2,
+ vm_object_size_t transpose_size);
+
+__private_extern__ boolean_t vm_object_sync(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_object_size_t size,
+ boolean_t should_flush,
+ boolean_t should_return,
+ boolean_t should_iosync);
-#else /* TASK_SWAPPER */
+__private_extern__ kern_return_t vm_object_update(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_object_size_t size,
+ vm_object_offset_t *error_offset,
+ int *io_errno,
+ memory_object_return_t should_return,
+ int flags,
+ vm_prot_t prot);
+
+__private_extern__ kern_return_t vm_object_lock_request(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_object_size_t size,
+ memory_object_return_t should_return,
+ int flags,
+ vm_prot_t prot);
-#define VM_OBJ_RES_INCR(object)
-#define VM_OBJ_RES_DECR(object)
-#define vm_object_res_reference(object)
-#define vm_object_res_deallocate(object)
-#endif /* TASK_SWAPPER */
-extern vm_object_t vm_object_enter(
- ipc_port_t pager,
+__private_extern__ vm_object_t vm_object_enter(
+ memory_object_t pager,
vm_object_size_t size,
boolean_t internal,
boolean_t init,
boolean_t check_named);
-extern vm_object_t vm_object_copy_delayed(
- vm_object_t src_object,
- vm_object_offset_t src_offset,
- vm_object_size_t size);
+/*
+ * Purgable object state.
+ */
+#define VM_OBJECT_NONPURGABLE 0 /* not a purgable object */
+#define VM_OBJECT_PURGABLE_NONVOLATILE 1 /* non-volatile purgable object */
+#define VM_OBJECT_PURGABLE_VOLATILE 2 /* volatile (but intact) purgable object */
+#define VM_OBJECT_PURGABLE_EMPTY 3 /* volatile purgable object that has been emptied */
+
+__private_extern__ kern_return_t vm_object_populate_with_private(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ ppnum_t phys_page,
+ vm_size_t size);
+
+extern kern_return_t adjust_vm_object_cache(
+ vm_size_t oval,
+ vm_size_t nval);
+
+extern kern_return_t vm_object_page_op(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ int ops,
+ ppnum_t *phys_entry,
+ int *flags);
+
+extern kern_return_t vm_object_range_op(
+ vm_object_t object,
+ vm_object_offset_t offset_beg,
+ vm_object_offset_t offset_end,
+ int ops,
+ int *range);
/*
* Event waiting handling
#define VM_OBJECT_EVENT_CACHING 7
#define vm_object_assert_wait(object, event, interruptible) \
- MACRO_BEGIN \
- (object)->all_wanted |= 1 << (event); \
- assert_wait((event_t)((vm_offset_t)(object)+(event)),(interruptible)); \
- MACRO_END
+ (((object)->all_wanted |= 1 << (event)), \
+ assert_wait((event_t)((vm_offset_t)(object)+(event)),(interruptible)))
#define vm_object_wait(object, event, interruptible) \
- MACRO_BEGIN \
- vm_object_assert_wait((object),(event),(interruptible)); \
- vm_object_unlock(object); \
- thread_block((void (*)(void)) 0); \
- MACRO_END
+ (vm_object_assert_wait((object),(event),(interruptible)), \
+ vm_object_unlock(object), \
+ thread_block(THREAD_CONTINUE_NULL)) \
+
+#define thread_sleep_vm_object(object, event, interruptible) \
+ thread_sleep_mutex((event_t)(event), &(object)->Lock, (interruptible))
+
+#define vm_object_sleep(object, event, interruptible) \
+ (((object)->all_wanted |= 1 << (event)), \
+ thread_sleep_vm_object((object), \
+ ((vm_offset_t)(object)+(event)), (interruptible)))
#define vm_object_wakeup(object, event) \
MACRO_BEGIN \
/*
* Routines implemented as macros
*/
+#ifdef VM_PIP_DEBUG
+extern unsigned OSBacktrace(void **bt, unsigned maxAddrs);
+#define VM_PIP_DEBUG_BEGIN(object) \
+ MACRO_BEGIN \
+ if ((object)->paging_in_progress < VM_PIP_DEBUG_MAX_REFS) { \
+ int pip = (object)->paging_in_progress; \
+ (void) OSBacktrace(&(object)->pip_holders[pip].retaddr[0], \
+ VM_PIP_DEBUG_STACK_FRAMES); \
+ } \
+ MACRO_END
+#else /* VM_PIP_DEBUG */
+#define VM_PIP_DEBUG_BEGIN(object)
+#endif /* VM_PIP_DEBUG */
#define vm_object_paging_begin(object) \
MACRO_BEGIN \
+ assert((object)->paging_in_progress >= 0); \
+ VM_PIP_DEBUG_BEGIN((object)); \
(object)->paging_in_progress++; \
MACRO_END
#define vm_object_paging_end(object) \
MACRO_BEGIN \
- assert((object)->paging_in_progress != 0); \
+ assert((object)->paging_in_progress > 0); \
if (--(object)->paging_in_progress == 0) { \
vm_object_wakeup(object, \
VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
#define vm_object_paging_wait(object, interruptible) \
MACRO_BEGIN \
while ((object)->paging_in_progress != 0) { \
- vm_object_wait( (object), \
+ wait_result_t _wr; \
+ \
+ _wr = vm_object_sleep((object), \
VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
(interruptible)); \
- vm_object_lock(object); \
\
- /*XXX if ((interruptible) && */ \
- /*XXX (current_thread()->wait_result != THREAD_AWAKENED))*/ \
+ /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\
/*XXX break; */ \
} \
MACRO_END
* Object locking macros
*/
-#define vm_object_lock_init(object) mutex_init(&(object)->Lock, ETAP_VM_OBJ)
+#define vm_object_lock_init(object) mutex_init(&(object)->Lock, 0)
#define vm_object_lock(object) mutex_lock(&(object)->Lock)
#define vm_object_unlock(object) mutex_unlock(&(object)->Lock)
#define vm_object_lock_try(object) mutex_try(&(object)->Lock)
+#define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
+#define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
+
#endif /* _VM_VM_OBJECT_H_ */