X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/43866e378188c25dd1e2208016ab3cbeb086ae6c..7e41aa883dd258f888d0470250eead40a53ef1f5:/osfmk/vm/vm_fault.h diff --git a/osfmk/vm/vm_fault.h b/osfmk/vm/vm_fault.h index 65b60936e..d6824c4fd 100644 --- a/osfmk/vm/vm_fault.h +++ b/osfmk/vm/vm_fault.h @@ -1,16 +1,19 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER @@ -20,7 +23,7 @@ * Please see the License for the specific language governing rights and * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ @@ -61,22 +64,44 @@ #ifndef _VM_VM_FAULT_H_ #define _VM_VM_FAULT_H_ +#include #include #include #include #include #include -#include -#include -#include + +#ifdef KERNEL_PRIVATE typedef kern_return_t vm_fault_return_t; + #define VM_FAULT_SUCCESS 0 #define VM_FAULT_RETRY 1 #define VM_FAULT_INTERRUPTED 2 #define VM_FAULT_MEMORY_SHORTAGE 3 -#define VM_FAULT_FICTITIOUS_SHORTAGE 4 #define VM_FAULT_MEMORY_ERROR 5 +#define VM_FAULT_SUCCESS_NO_VM_PAGE 6 /* success but no VM page */ + +/* + * Page fault handling based on vm_map (or entries therein) + */ + +extern kern_return_t vm_fault( + vm_map_t map, + vm_map_offset_t vaddr, + vm_prot_t fault_type, + boolean_t change_wiring, + int interruptible, + pmap_t pmap, + vm_map_offset_t pmap_addr); + +extern void vm_pre_fault(vm_map_offset_t); + +#ifdef MACH_KERNEL_PRIVATE + +#include +#include +#include extern void vm_fault_init(void); @@ -90,14 +115,11 @@ extern vm_fault_return_t vm_fault_page( vm_object_offset_t first_offset,/* Offset into object */ vm_prot_t fault_type, /* What access is requested */ boolean_t must_be_resident,/* Must page be resident? */ - int interruptible,/* how may fault be interrupted */ - vm_object_offset_t lo_offset, /* Map entry start */ - vm_object_offset_t hi_offset, /* Map entry end */ - vm_behavior_t behavior, /* Expected paging behavior */ + boolean_t caller_lookup, /* caller looked up page */ /* Modifies in place: */ vm_prot_t *protection, /* Protection for mapping */ - /* Returns: */ vm_page_t *result_page, /* Page found, if successful */ + /* Returns: */ vm_page_t *top_page, /* Page in top object, if * not result_page. */ int *type_of_fault, /* if non-zero, return COW, zero-filled, etc... @@ -106,46 +128,60 @@ extern vm_fault_return_t vm_fault_page( kern_return_t *error_code, /* code if page is in error */ boolean_t no_zero_fill, /* don't fill absent pages */ boolean_t data_supply, /* treat as data_supply */ - vm_map_t map, - vm_offset_t vaddr); + vm_object_fault_info_t fault_info); extern void vm_fault_cleanup( vm_object_t object, vm_page_t top_page); -/* - * Page fault handling based on vm_map (or entries therein) - */ - -extern kern_return_t vm_fault( - vm_map_t map, - vm_offset_t vaddr, - vm_prot_t fault_type, - boolean_t change_wiring, - int interruptible, - pmap_t pmap, - vm_offset_t pmap_addr); extern kern_return_t vm_fault_wire( vm_map_t map, vm_map_entry_t entry, + vm_prot_t prot, pmap_t pmap, - vm_offset_t pmap_addr); + vm_map_offset_t pmap_addr, + ppnum_t *physpage_p); extern void vm_fault_unwire( vm_map_t map, vm_map_entry_t entry, boolean_t deallocate, pmap_t pmap, - vm_offset_t pmap_addr); + vm_map_offset_t pmap_addr); extern kern_return_t vm_fault_copy( vm_object_t src_object, vm_object_offset_t src_offset, - vm_size_t *src_size, /* INOUT */ + vm_map_size_t *copy_size, /* INOUT */ vm_object_t dst_object, vm_object_offset_t dst_offset, vm_map_t dst_map, vm_map_version_t *dst_version, int interruptible); +extern kern_return_t vm_fault_enter( + vm_page_t m, + pmap_t pmap, + vm_map_offset_t vaddr, + vm_prot_t prot, + vm_prot_t fault_type, + boolean_t wired, + boolean_t change_wiring, + boolean_t no_cache, + boolean_t cs_bypass, + int user_tag, + int pmap_options, + boolean_t *need_retry, + int *type_of_fault); + +extern vm_offset_t kdp_lightweight_fault( + vm_map_t map, + vm_offset_t cur_target_addr, + uint32_t *fault_results); + + +#endif /* MACH_KERNEL_PRIVATE */ + +#endif /* KERNEL_PRIVATE */ + #endif /* _VM_VM_FAULT_H_ */