]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_fault.h
xnu-3248.60.10.tar.gz
[apple/xnu.git] / osfmk / vm / vm_fault.h
index d3be4ec450093949995bf190bac9b276880816cc..d6824c4fd7abf98a40235dc3fb22a569ea3e06d0 100644 (file)
@@ -79,8 +79,8 @@ typedef       kern_return_t   vm_fault_return_t;
 #define VM_FAULT_RETRY                 1
 #define VM_FAULT_INTERRUPTED           2
 #define VM_FAULT_MEMORY_SHORTAGE       3
-#define VM_FAULT_FICTITIOUS_SHORTAGE   4
 #define VM_FAULT_MEMORY_ERROR          5
+#define VM_FAULT_SUCCESS_NO_VM_PAGE    6       /* success but no VM page */
 
 /*
  *     Page fault handling based on vm_map (or entries therein)
@@ -95,6 +95,8 @@ extern kern_return_t vm_fault(
                pmap_t          pmap,
                vm_map_offset_t pmap_addr);
 
+extern void vm_pre_fault(vm_map_offset_t);
+
 #ifdef MACH_KERNEL_PRIVATE
 
 #include <vm/vm_page.h>
@@ -113,14 +115,11 @@ extern vm_fault_return_t vm_fault_page(
                vm_object_offset_t first_offset,/* Offset into object */
                vm_prot_t       fault_type,     /* What access is requested */
                boolean_t       must_be_resident,/* Must page be resident? */
-               int             interruptible,/* how may fault be interrupted */
-               vm_map_offset_t lo_offset,      /* Map entry start */
-               vm_map_offset_t hi_offset,      /* Map entry end */
-               vm_behavior_t   behavior,       /* Expected paging behavior */
+               boolean_t       caller_lookup,  /* caller looked up page */
                /* Modifies in place: */
                vm_prot_t       *protection,    /* Protection for mapping */
-               /* Returns: */
                vm_page_t       *result_page,   /* Page found, if successful */
+               /* Returns: */
                vm_page_t       *top_page,      /* Page in top object, if
                                                 * not result_page.  */
                int             *type_of_fault, /* if non-zero, return COW, zero-filled, etc...
@@ -129,8 +128,7 @@ extern vm_fault_return_t vm_fault_page(
                kern_return_t   *error_code,    /* code if page is in error */
                boolean_t       no_zero_fill,   /* don't fill absent pages */
                boolean_t       data_supply,    /* treat as data_supply */
-               vm_map_t        map,
-               vm_map_offset_t vaddr);
+               vm_object_fault_info_t fault_info);
 
 extern void vm_fault_cleanup(
                vm_object_t     object,
@@ -139,8 +137,10 @@ extern void vm_fault_cleanup(
 extern kern_return_t vm_fault_wire(
                vm_map_t        map,
                vm_map_entry_t  entry,
+               vm_prot_t       prot,
                pmap_t          pmap,
-               vm_map_offset_t pmap_addr);
+               vm_map_offset_t pmap_addr,
+               ppnum_t         *physpage_p);
 
 extern void vm_fault_unwire(
                vm_map_t        map,
@@ -159,6 +159,27 @@ extern kern_return_t       vm_fault_copy(
                vm_map_version_t         *dst_version,
                int                     interruptible);
 
+extern kern_return_t vm_fault_enter(
+       vm_page_t m,
+       pmap_t pmap,
+       vm_map_offset_t vaddr,
+       vm_prot_t prot,
+       vm_prot_t fault_type,
+       boolean_t wired,
+       boolean_t change_wiring,
+       boolean_t no_cache,
+       boolean_t cs_bypass,
+       int       user_tag,
+       int       pmap_options,
+       boolean_t *need_retry,
+       int *type_of_fault);
+
+extern vm_offset_t kdp_lightweight_fault(
+               vm_map_t map,
+               vm_offset_t cur_target_addr,
+               uint32_t *fault_results);
+
+
 #endif /* MACH_KERNEL_PRIVATE */
 
 #endif /* KERNEL_PRIVATE */