#define VM_FAULT_RETRY 1
#define VM_FAULT_INTERRUPTED 2
#define VM_FAULT_MEMORY_SHORTAGE 3
-#define VM_FAULT_FICTITIOUS_SHORTAGE 4
#define VM_FAULT_MEMORY_ERROR 5
+#define VM_FAULT_SUCCESS_NO_VM_PAGE 6 /* success but no VM page */
/*
* Page fault handling based on vm_map (or entries therein)
vm_object_offset_t first_offset,/* Offset into object */
vm_prot_t fault_type, /* What access is requested */
boolean_t must_be_resident,/* Must page be resident? */
- int interruptible,/* how may fault be interrupted */
- vm_map_offset_t lo_offset, /* Map entry start */
- vm_map_offset_t hi_offset, /* Map entry end */
- vm_behavior_t behavior, /* Expected paging behavior */
+ boolean_t caller_lookup, /* caller looked up page */
/* Modifies in place: */
vm_prot_t *protection, /* Protection for mapping */
- /* Returns: */
vm_page_t *result_page, /* Page found, if successful */
+ /* Returns: */
vm_page_t *top_page, /* Page in top object, if
* not result_page. */
int *type_of_fault, /* if non-zero, return COW, zero-filled, etc...
kern_return_t *error_code, /* code if page is in error */
boolean_t no_zero_fill, /* don't fill absent pages */
boolean_t data_supply, /* treat as data_supply */
- vm_map_t map,
- vm_map_offset_t vaddr);
+ vm_object_fault_info_t fault_info);
extern void vm_fault_cleanup(
vm_object_t object,
vm_map_version_t *dst_version,
int interruptible);
+extern kern_return_t vm_fault_enter(
+ vm_page_t m,
+ pmap_t pmap,
+ vm_map_offset_t vaddr,
+ vm_prot_t prot,
+ vm_prot_t fault_type,
+ boolean_t wired,
+ boolean_t change_wiring,
+ boolean_t no_cache,
+ boolean_t cs_bypass,
+ boolean_t *need_retry,
+ int *type_of_fault);
+
#endif /* MACH_KERNEL_PRIVATE */
#endif /* KERNEL_PRIVATE */