2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  32  * Mach Operating System 
  33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University 
  34  * All Rights Reserved. 
  36  * Permission to use, copy, modify and distribute this software and its 
  37  * documentation is hereby granted, provided that both the copyright 
  38  * notice and this permission notice appear in all copies of the 
  39  * software, derivative works or modified versions, and any portions 
  40  * thereof, and that both notices appear in supporting documentation. 
  42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 
  43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 
  44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 
  46  * Carnegie Mellon requests users of this software to return to 
  48  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU 
  49  *  School of Computer Science 
  50  *  Carnegie Mellon University 
  51  *  Pittsburgh PA 15213-3890 
  53  * any improvements or extensions that they make and grant Carnegie Mellon 
  54  * the rights to redistribute these changes. 
  61  *      Author: Avadis Tevanian, Jr., Michael Wayne Young 
  64  *      Virtual memory map module definitions. 
  73 #include <mach/mach_types.h> 
  74 #include <mach/kern_return.h> 
  75 #include <mach/boolean.h> 
  76 #include <mach/vm_types.h> 
  77 #include <mach/vm_prot.h> 
  78 #include <mach/vm_inherit.h> 
  79 #include <mach/vm_behavior.h> 
  80 #include <mach/vm_param.h> 
  83 #include <os/overflow.h> 
  87 #include <sys/cdefs.h> 
  89 #ifdef XNU_KERNEL_PRIVATE 
  90 #include <vm/vm_protos.h> 
  91 #endif /* XNU_KERNEL_PRIVATE */ 
  95 extern void     vm_map_reference(vm_map_t       map
); 
  96 extern vm_map_t 
current_map(void); 
  98 /* Setup reserved areas in a new VM map */ 
  99 extern kern_return_t    
vm_map_exec( 
 105         cpu_subtype_t           cpu_subtype
, 
 110 #ifdef  MACH_KERNEL_PRIVATE 
 112 #include <task_swapper.h> 
 113 #include <mach_assert.h> 
 115 #include <vm/vm_object.h> 
 116 #include <vm/vm_page.h> 
 117 #include <kern/locks.h> 
 118 #include <kern/zalloc.h> 
 119 #include <kern/macro_help.h> 
 121 #include <kern/thread.h> 
 122 #include <os/refcnt.h> 
 124 #define current_map_fast()      (current_thread()->map) 
 125 #define current_map()           (current_map_fast()) 
 127 #include <vm/vm_map_store.h> 
 133  *      vm_map_t                the high-level address map data structure. 
 134  *      vm_map_entry_t          an entry in an address map. 
 135  *      vm_map_version_t        a timestamp of a map, for use with vm_map_lookup 
 136  *      vm_map_copy_t           represents memory copied from an address map, 
 137  *                               used for inter-map copy operations 
 139 typedef struct vm_map_entry     
*vm_map_entry_t
; 
 140 #define VM_MAP_ENTRY_NULL       ((vm_map_entry_t) NULL) 
 144  *      Type:           vm_map_object_t [internal use only] 
 147  *              The target of an address mapping, either a virtual 
 148  *              memory object or a sub map (of the kernel map). 
 150 typedef union vm_map_object 
{ 
 151         vm_object_t             vmo_object
;     /* object object */ 
 152         vm_map_t                vmo_submap
;     /* belongs to another map */ 
 155 #define named_entry_lock_init(object)   lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr) 
 156 #define named_entry_lock_destroy(object)        lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp) 
 157 #define named_entry_lock(object)                lck_mtx_lock(&(object)->Lock) 
 158 #define named_entry_unlock(object)              lck_mtx_unlock(&(object)->Lock) 
 159 #if VM_NAMED_ENTRY_LIST 
 160 extern queue_head_t vm_named_entry_list
; 
 161 #endif /* VM_NAMED_ENTRY_LIST */ 
 164  *      Type:           vm_named_entry_t [internal use only] 
 167  *              Description of a mapping to a memory cache object. 
 170  *              While the handle to this object is used as a means to map 
 171  *              and pass around the right to map regions backed by pagers 
 172  *              of all sorts, the named_entry itself is only manipulated 
 173  *              by the kernel.  Named entries hold information on the 
 174  *              right to map a region of a cached object.  Namely, 
 175  *              the target cache object, the beginning and ending of the 
 176  *              region to be mapped, and the permissions, (read, write) 
 177  *              with which it can be mapped. 
 181 struct vm_named_entry 
{ 
 182         decl_lck_mtx_data(, Lock
);              /* Synchronization */ 
 184                 vm_map_t        map
;            /* map backing submap */ 
 185                 vm_map_copy_t   copy
;           /* a VM map copy */ 
 187         vm_object_offset_t      offset
;         /* offset into object */ 
 188         vm_object_size_t        size
;           /* size of region */ 
 189         vm_object_offset_t      data_offset
;    /* offset to first byte of data */ 
 190         vm_prot_t               protection
;     /* access permissions */ 
 191         int                     ref_count
;      /* Number of references */ 
 192         unsigned int                            /* Is backing.xxx : */ 
 193         /* boolean_t */ is_object
:1,            /* ... a VM object (wrapped in a VM map copy) */ 
 194         /* boolean_t */ internal
:1,              /* ... an internal object */ 
 195         /* boolean_t */ is_sub_map
:1,           /* ... a submap? */ 
 196         /* boolean_t */ is_copy
:1;              /* ... a VM map copy */ 
 197 #if VM_NAMED_ENTRY_LIST 
 198         queue_chain_t           named_entry_list
; 
 199         int                     named_entry_alias
; 
 200         mach_port_t             named_entry_port
; 
 201 #define NAMED_ENTRY_BT_DEPTH 16 
 202         void                    *named_entry_bt
[NAMED_ENTRY_BT_DEPTH
]; 
 203 #endif /* VM_NAMED_ENTRY_LIST */ 
 207  *      Type:           vm_map_entry_t [internal use only] 
 210  *              A single mapping within an address map. 
 213  *              Address map entries consist of start and end addresses, 
 214  *              a VM object (or sub map) and offset into that object, 
 215  *              and user-exported inheritance and protection information. 
 216  *              Control information for virtual copy operations is also 
 217  *              stored in the address map entry. 
 220 struct vm_map_links 
{ 
 221         struct vm_map_entry     
*prev
;          /* previous entry */ 
 222         struct vm_map_entry     
*next
;          /* next entry */ 
 223         vm_map_offset_t         start
;          /* start address */ 
 224         vm_map_offset_t         end
;            /* end address */ 
 228  * FOOTPRINT ACCOUNTING: 
 229  * The "memory footprint" is better described in the pmap layer. 
 231  * At the VM level, these 2 vm_map_entry_t fields are relevant: 
 233  *      For an "iokit_mapped" entry, we add the size of the entry to the 
 234  *      footprint when the entry is entered into the map and we subtract that 
 235  *      size when the entry is removed.  No other accounting should take place. 
 236  *      "use_pmap" should be FALSE but is not taken into account. 
 237  * use_pmap: (only when is_sub_map is FALSE) 
 238  *      This indicates if we should ask the pmap layer to account for pages 
 239  *      in this mapping.  If FALSE, we expect that another form of accounting 
 240  *      is being used (e.g. "iokit_mapped" or the explicit accounting of 
 241  *      non-volatile purgable memory). 
 243  * So the logic is mostly: 
 244  * if entry->is_sub_map == TRUE 
 245  *      anything in a submap does not count for the footprint 
 246  * else if entry->iokit_mapped == TRUE 
 247  *      footprint includes the entire virtual size of this entry 
 248  * else if entry->use_pmap == FALSE 
 249  *      tell pmap NOT to account for pages being pmap_enter()'d from this 
 250  *      mapping (i.e. use "alternate accounting") 
 252  *      pmap will account for pages being pmap_enter()'d from this mapping 
 253  *      as it sees fit (only if anonymous, etc...) 
 256 struct vm_map_entry 
{ 
 257         struct vm_map_links     links
;          /* links to other entries */ 
 258 #define vme_prev                links.prev 
 259 #define vme_next                links.next 
 260 #define vme_start               links.start 
 261 #define vme_end                 links.end 
 263         struct vm_map_store     store
; 
 264         union vm_map_object     vme_object
;     /* object I point to */ 
 265         vm_object_offset_t      vme_offset
;     /* offset into object */ 
 268         /* boolean_t */ is_shared
:1,    /* region is shared */ 
 269         /* boolean_t */ is_sub_map
:1,   /* Is "object" a submap? */ 
 270         /* boolean_t */ in_transition
:1, /* Entry being changed */ 
 271         /* boolean_t */ needs_wakeup
:1, /* Waiters on in_transition */ 
 272         /* vm_behavior_t */ behavior
:2, /* user paging behavior hint */ 
 273         /* behavior is not defined for submap type */ 
 274         /* boolean_t */ needs_copy
:1,   /* object need to be copied? */ 
 276         /* Only in task maps: */ 
 277         /* vm_prot_t */ protection
:3,   /* protection code */ 
 278         /* vm_prot_t */ max_protection
:3, /* maximum protection */ 
 279         /* vm_inherit_t */ inheritance
:2, /* inheritance */ 
 280         /* boolean_t */ use_pmap
:1,     /* 
 281                                          * use_pmap is overloaded: 
 284                                          * else (i.e. if object): 
 285                                          *      use pmap accounting 
 288         /* boolean_t */ no_cache
:1,     /* should new pages be cached? */ 
 289         /* boolean_t */ permanent
:1,    /* mapping can not be removed */ 
 290         /* boolean_t */ superpage_size
:1, /* use superpages of a certain size */ 
 291         /* boolean_t */ map_aligned
:1,  /* align to map's page size */ 
 292         /* boolean_t */ zero_wired_pages
:1, /* zero out the wired pages of 
 293                                              * this entry it is being deleted 
 294                                              * without unwiring them */ 
 295         /* boolean_t */ used_for_jit
:1, 
 296         /* boolean_t */ pmap_cs_associated
:1, /* pmap_cs will validate */ 
 297         /* boolean_t */ from_reserved_zone
:1, /* Allocated from 
 298                                                * kernel reserved zone    */ 
 300         /* iokit accounting: use the virtual size rather than resident size: */ 
 301         /* boolean_t */ iokit_acct
:1, 
 302         /* boolean_t */ vme_resilient_codesign
:1, 
 303         /* boolean_t */ vme_resilient_media
:1, 
 304         /* boolean_t */ vme_atomic
:1, /* entry cannot be split/coalesced */ 
 305         /* boolean_t */ vme_no_copy_on_read
:1, 
 306         /* boolean_t */ translated_allow_execute
:1, /* execute in translated processes */ 
 309         unsigned short          wired_count
;    /* can be paged if = 0 */ 
 310         unsigned short          user_wired_count
; /* for vm_wire */ 
 312 #define MAP_ENTRY_CREATION_DEBUG (1) 
 313 #define MAP_ENTRY_INSERTION_DEBUG (1) 
 315 #if     MAP_ENTRY_CREATION_DEBUG 
 316         struct vm_map_header    
*vme_creation_maphdr
; 
 317         uintptr_t               vme_creation_bt
[16]; 
 319 #if     MAP_ENTRY_INSERTION_DEBUG 
 320         vm_map_offset_t         vme_start_original
; 
 321         vm_map_offset_t         vme_end_original
; 
 322         uintptr_t               vme_insertion_bt
[16]; 
 326 #define VME_SUBMAP_PTR(entry)                   \ 
 327         (&((entry)->vme_object.vmo_submap)) 
 328 #define VME_SUBMAP(entry)                                       \ 
 329         ((vm_map_t)((uintptr_t)0 + *VME_SUBMAP_PTR(entry))) 
 330 #define VME_OBJECT_PTR(entry)                   \ 
 331         (&((entry)->vme_object.vmo_object)) 
 332 #define VME_OBJECT(entry)                                       \ 
 333         ((vm_object_t)((uintptr_t)0 + *VME_OBJECT_PTR(entry))) 
 334 #define VME_OFFSET(entry)                       \ 
 335         ((entry)->vme_offset & (vm_object_offset_t)~FOURK_PAGE_MASK) 
 336 #define VME_ALIAS_MASK (FOURK_PAGE_MASK) 
 337 #define VME_ALIAS(entry)                                        \ 
 338         ((unsigned int)((entry)->vme_offset & VME_ALIAS_MASK)) 
 342         vm_map_entry_t entry
, 
 345         entry
->vme_object
.vmo_object 
= object
; 
 346         if (object 
!= VM_OBJECT_NULL 
&& !object
->internal
) { 
 347                 entry
->vme_resilient_media 
= FALSE
; 
 349         entry
->vme_resilient_codesign 
= FALSE
; 
 350         entry
->used_for_jit 
= FALSE
; 
 354         vm_map_entry_t entry
, 
 357         entry
->vme_object
.vmo_submap 
= submap
; 
 361         vm_map_entry_t entry
, 
 362         vm_object_offset_t offset
) 
 365         alias 
= VME_ALIAS(entry
); 
 366         assert((offset 
& FOURK_PAGE_MASK
) == 0); 
 367         entry
->vme_offset 
= offset 
| alias
; 
 371  * The "alias" field can be updated while holding the VM map lock 
 372  * "shared".  It's OK as along as it's the only field that can be 
 373  * updated without the VM map "exclusive" lock. 
 377         vm_map_entry_t entry
, 
 380         vm_object_offset_t offset
; 
 381         offset 
= VME_OFFSET(entry
); 
 382         entry
->vme_offset 
= offset 
| ((unsigned int)alias 
& VME_ALIAS_MASK
); 
 387         vm_map_entry_t entry
, 
 388         vm_object_size_t length
) 
 391         vm_object_offset_t offset
; 
 393         object 
= VME_OBJECT(entry
); 
 394         offset 
= VME_OFFSET(entry
); 
 395         vm_object_shadow(&object
, &offset
, length
); 
 396         if (object 
!= VME_OBJECT(entry
)) { 
 397                 VME_OBJECT_SET(entry
, object
); 
 398                 entry
->use_pmap 
= TRUE
; 
 400         if (offset 
!= VME_OFFSET(entry
)) { 
 401                 VME_OFFSET_SET(entry
, offset
); 
 407  * Convenience macros for dealing with superpages 
 408  * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h 
 410 #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES) 
 411 #define SUPERPAGE_MASK (-SUPERPAGE_SIZE) 
 412 #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK) 
 413 #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK) 
 416  * wired_counts are unsigned short.  This value is used to safeguard 
 417  * against any mishaps due to runaway user programs. 
 419 #define MAX_WIRE_COUNT          65535 
 424  *      Type:           struct vm_map_header 
 427  *              Header for a vm_map and a vm_map_copy. 
 431 struct vm_map_header 
{ 
 432         struct vm_map_links     links
;          /* first, last, min, max */ 
 433         int                     nentries
;       /* Number of entries */ 
 434         boolean_t               entries_pageable
; 
 435         /* are map entries pageable? */ 
 436 #ifdef VM_MAP_STORE_USE_RB 
 437         struct rb_head  rb_head_store
; 
 439         int                     page_shift
;     /* page shift */ 
 442 #define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift) 
 443 #define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr))) 
 444 #define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1) 
 447  *      Type:           vm_map_t [exported; contents invisible] 
 450  *              An address map -- a directory relating valid 
 451  *              regions of a task's address space to the corresponding 
 452  *              virtual memory objects. 
 455  *              Maps are doubly-linked lists of map entries, sorted 
 456  *              by address.  One hint is used to start 
 457  *              searches again from the last successful search, 
 458  *              insertion, or removal.  Another hint is used to 
 459  *              quickly find free space. 
 462         lck_rw_t                lock
;           /* map lock */ 
 463         struct vm_map_header    hdr
;            /* Map entry header */ 
 464 #define min_offset              hdr.links.start /* start of range */ 
 465 #define max_offset              hdr.links.end   /* end of range */ 
 466         pmap_t                  
XNU_PTRAUTH_SIGNED_PTR("_vm_map.pmap") pmap
;           /* Physical map */ 
 467         vm_map_size_t           size
;           /* virtual size */ 
 468         vm_map_size_t           user_wire_limit
;/* rlimit on user locked memory */ 
 469         vm_map_size_t           user_wire_size
; /* current size of user locked memory in this map */ 
 471         vm_map_offset_t         vmmap_high_start
; 
 476                  * If map->disable_vmentry_reuse == TRUE: 
 477                  * the end address of the highest allocated vm_map_entry_t. 
 479                 vm_map_offset_t         vmu1_highest_entry_end
; 
 481                  * For a nested VM map: 
 482                  * the lowest address in this nested VM map that we would 
 483                  * expect to be unnested under normal operation (i.e. for 
 484                  * regular copy-on-write on DATA section). 
 486                 vm_map_offset_t         vmu1_lowest_unnestable_start
; 
 488 #define highest_entry_end       vmu1.vmu1_highest_entry_end 
 489 #define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start 
 490         decl_lck_mtx_data(, s_lock
);                    /* Lock ref, res fields */ 
 491         lck_mtx_ext_t           s_lock_ext
; 
 492         vm_map_entry_t          hint
;           /* hint for quick lookups */ 
 494                 struct vm_map_links
* vmmap_hole_hint
;   /* hint for quick hole lookups */ 
 495                 struct vm_map_corpse_footprint_header 
*vmmap_corpse_footprint
; 
 497 #define hole_hint vmmap_u_1.vmmap_hole_hint 
 498 #define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint 
 500                 vm_map_entry_t          _first_free
;    /* First free space hint */ 
 501                 struct vm_map_links
*    _holes
;         /* links all holes between entries */ 
 502         } f_s
;                                          /* Union for free space data structures being used */ 
 504 #define first_free              f_s._first_free 
 505 #define holes_list              f_s._holes 
 507         struct os_refcnt        map_refcnt
;     /* Reference count */ 
 510         int                     res_count
;      /* Residence count (swap) */ 
 511         int                     sw_state
;       /* Swap state */ 
 512 #endif  /* TASK_SWAPPER */ 
 515         /* boolean_t */ wait_for_space
:1,         /* Should callers wait for space? */ 
 516         /* boolean_t */ wiring_required
:1,         /* All memory wired? */ 
 517         /* boolean_t */ no_zero_fill
:1,         /*No zero fill absent pages */ 
 518         /* boolean_t */ mapped_in_other_pmaps
:1,         /*has this submap been mapped in maps that use a different pmap */ 
 519         /* boolean_t */ switch_protect
:1,         /*  Protect map from write faults while switched */ 
 520         /* boolean_t */ disable_vmentry_reuse
:1,         /*  All vm entries should keep using newer and higher addresses in the map */ 
 521         /* boolean_t */ map_disallow_data_exec
:1,         /* Disallow execution from data pages on exec-permissive architectures */ 
 522         /* boolean_t */ holelistenabled
:1, 
 523         /* boolean_t */ is_nested_map
:1, 
 524         /* boolean_t */ map_disallow_new_exec
:1,         /* Disallow new executable code */ 
 525         /* boolean_t */ jit_entry_exists
:1, 
 526         /* boolean_t */ has_corpse_footprint
:1, 
 527         /* boolean_t */ terminated
:1, 
 528         /* boolean_t */ is_alien
:1,             /* for platform simulation, i.e. PLATFORM_IOS on OSX */ 
 529         /* boolean_t */ cs_enforcement
:1,       /* code-signing enforcement */ 
 530         /* boolean_t */ reserved_regions
:1,       /* has reserved regions. The map size that userspace sees should ignore these. */ 
 531         /* reserved */ pad
:16; 
 532         unsigned int            timestamp
;      /* Version number */ 
 535 #define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x)) 
 536 #define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links) 
 537 #define vm_map_first_entry(map) ((map)->hdr.links.next) 
 538 #define vm_map_last_entry(map)  ((map)->hdr.links.prev) 
 542  * VM map swap states.  There are no transition states. 
 544 #define MAP_SW_IN        1      /* map is swapped in; residence count > 0 */ 
 545 #define MAP_SW_OUT       2      /* map is out (res_count == 0 */ 
 546 #endif  /* TASK_SWAPPER */ 
 549  *      Type:           vm_map_version_t [exported; contents invisible] 
 552  *              Map versions may be used to quickly validate a previous 
 556  *              Because they are bulky objects, map versions are usually 
 557  *              passed by reference. 
 560  *              Just a timestamp for the main map. 
 562 typedef struct vm_map_version 
{ 
 563         unsigned int    main_timestamp
; 
 567  *      Type:           vm_map_copy_t [exported; contents invisible] 
 570  *              A map copy object represents a region of virtual memory 
 571  *              that has been copied from an address map but is still 
 574  *              A map copy object may only be used by a single thread 
 578  *              There are three formats for map copy objects. 
 579  *              The first is very similar to the main 
 580  *              address map in structure, and as a result, some 
 581  *              of the internal maintenance functions/macros can 
 582  *              be used with either address maps or map copy objects. 
 584  *              The map copy object contains a header links 
 585  *              entry onto which the other entries that represent 
 586  *              the region are chained. 
 588  *              The second format is a single vm object.  This was used 
 589  *              primarily in the pageout path - but is not currently used 
 590  *              except for placeholder copy objects (see vm_map_copy_copy()). 
 592  *              The third format is a kernel buffer copy object - for data 
 593  *              small enough that physical copies were the most efficient 
 594  *              method. This method uses a zero-sized array unioned with 
 595  *              other format-specific data in the 'c_u' member. This unsized 
 596  *              array overlaps the other elements and allows us to use this 
 597  *              extra structure space for physical memory copies. On 64-bit 
 598  *              systems this saves ~64 bytes per vm_map_copy. 
 603 #define VM_MAP_COPY_ENTRY_LIST          1 
 604 #define VM_MAP_COPY_OBJECT              2 
 605 #define VM_MAP_COPY_KERNEL_BUFFER       3 
 606         vm_object_offset_t      offset
; 
 609                 struct vm_map_header                  hdr
;    /* ENTRY_LIST */ 
 610                 vm_object_t                           object
; /* OBJECT */ 
 611                 void *XNU_PTRAUTH_SIGNED_PTR("vm_map_copy.kdata") kdata
;  /* KERNEL_BUFFER */ 
 616 #define cpy_hdr                 c_u.hdr 
 618 #define cpy_object              c_u.object 
 619 #define cpy_kdata               c_u.kdata 
 621 #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift) 
 622 #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy))) 
 623 #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1) 
 626  *      Useful macros for entry list copy objects 
 629 #define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links) 
 630 #define vm_map_copy_first_entry(copy)           \ 
 631                 ((copy)->cpy_hdr.links.next) 
 632 #define vm_map_copy_last_entry(copy)            \ 
 633                 ((copy)->cpy_hdr.links.prev) 
 636 vm_map_copy_adjust_to_target( 
 637         vm_map_copy_t           copy_map
, 
 638         vm_map_offset_t         offset
, 
 642         vm_map_copy_t           
*target_copy_map_p
, 
 643         vm_map_offset_t         
*overmap_start_p
, 
 644         vm_map_offset_t         
*overmap_end_p
, 
 645         vm_map_offset_t         
*trimmed_start_p
); 
 648  *      Macros:         vm_map_lock, etc. [internal use only] 
 650  *              Perform locking on the data portion of a map. 
 651  *      When multiple maps are to be locked, order by map address. 
 652  *      (See vm_map.c::vm_remap()) 
 655 #define vm_map_lock_init(map)                                           \ 
 656         ((map)->timestamp = 0 ,                                         \ 
 657         lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr)) 
 659 #define vm_map_lock(map)                     \ 
 661         DTRACE_VM(vm_map_lock_w);            \ 
 662         lck_rw_lock_exclusive(&(map)->lock); \ 
 665 #define vm_map_unlock(map)          \ 
 667         DTRACE_VM(vm_map_unlock_w); \ 
 668         (map)->timestamp++;         \ 
 669         lck_rw_done(&(map)->lock);  \ 
 672 #define vm_map_lock_read(map)             \ 
 674         DTRACE_VM(vm_map_lock_r);         \ 
 675         lck_rw_lock_shared(&(map)->lock); \ 
 678 #define vm_map_unlock_read(map)     \ 
 680         DTRACE_VM(vm_map_unlock_r); \ 
 681         lck_rw_done(&(map)->lock);  \ 
 684 #define vm_map_lock_write_to_read(map)                 \ 
 686         DTRACE_VM(vm_map_lock_downgrade);              \ 
 687         (map)->timestamp++;                            \ 
 688         lck_rw_lock_exclusive_to_shared(&(map)->lock); \ 
 691 __attribute__((always_inline
)) 
 692 int vm_map_lock_read_to_write(vm_map_t map
); 
 694 __attribute__((always_inline
)) 
 695 boolean_t 
vm_map_try_lock(vm_map_t map
); 
 697 __attribute__((always_inline
)) 
 698 boolean_t 
vm_map_try_lock_read(vm_map_t map
); 
 700 int vm_self_region_page_shift(vm_map_t target_map
); 
 701 int vm_self_region_page_shift_safely(vm_map_t target_map
); 
 703 #if MACH_ASSERT || DEBUG 
 704 #define vm_map_lock_assert_held(map) \ 
 705         lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD) 
 706 #define vm_map_lock_assert_shared(map)  \ 
 707         lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_SHARED) 
 708 #define vm_map_lock_assert_exclusive(map) \ 
 709         lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE) 
 710 #define vm_map_lock_assert_notheld(map) \ 
 711         lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_NOTHELD) 
 712 #else  /* MACH_ASSERT || DEBUG */ 
 713 #define vm_map_lock_assert_held(map) 
 714 #define vm_map_lock_assert_shared(map) 
 715 #define vm_map_lock_assert_exclusive(map) 
 716 #define vm_map_lock_assert_notheld(map) 
 717 #endif /* MACH_ASSERT || DEBUG */ 
 720  *      Exported procedures that operate on vm_map_t. 
 723 /* Initialize the module */ 
 724 extern void             vm_map_init(void); 
 726 extern void             vm_kernel_reserved_entry_init(void); 
 728 /* Allocate a range in the specified virtual address map and 
 729  * return the entry allocated for that range. */ 
 730 extern kern_return_t 
vm_map_find_space( 
 732         vm_map_address_t        
*address
,                               /* OUT */ 
 734         vm_map_offset_t         mask
, 
 736         vm_map_kernel_flags_t   vmk_flags
, 
 738         vm_map_entry_t          
*o_entry
);                              /* OUT */ 
 740 /* flags for vm_map_find_space */ 
 741 #define VM_MAP_FIND_LAST_FREE              0x01 
 743 extern void vm_map_clip_start( 
 745         vm_map_entry_t  entry
, 
 746         vm_map_offset_t endaddr
); 
 747 extern void vm_map_clip_end( 
 749         vm_map_entry_t  entry
, 
 750         vm_map_offset_t endaddr
); 
 751 extern boolean_t 
vm_map_entry_should_cow_for_true_share( 
 752         vm_map_entry_t  entry
); 
 754 /* Lookup map entry containing or the specified address in the given map */ 
 755 extern boolean_t        
vm_map_lookup_entry( 
 757         vm_map_address_t        address
, 
 758         vm_map_entry_t          
*entry
);                                /* OUT */ 
 760 extern void             vm_map_copy_remap( 
 762         vm_map_entry_t          where
, 
 764         vm_map_offset_t         adjustment
, 
 767         vm_inherit_t            inheritance
); 
 769 /* Find the VM object, offset, and protection for a given virtual address 
 770  * in the specified map, assuming a page fault of the   type specified. */ 
 771 extern kern_return_t    
vm_map_lookup_locked( 
 772         vm_map_t                
*var_map
,                               /* IN/OUT */ 
 773         vm_map_address_t        vaddr
, 
 774         vm_prot_t               fault_type
, 
 775         int                     object_lock_type
, 
 776         vm_map_version_t        
*out_version
,                           /* OUT */ 
 777         vm_object_t             
*object
,                                /* OUT */ 
 778         vm_object_offset_t      
*offset
,                                /* OUT */ 
 779         vm_prot_t               
*out_prot
,                              /* OUT */ 
 780         boolean_t               
*wired
,                                 /* OUT */ 
 781         vm_object_fault_info_t  fault_info
,                             /* OUT */ 
 782         vm_map_t                
*real_map
,                              /* OUT */ 
 783         bool                    *contended
);                            /* OUT */ 
 785 /* Verifies that the map has not changed since the given version. */ 
 786 extern boolean_t        
vm_map_verify( 
 788         vm_map_version_t        
*version
);                              /* REF */ 
 790 extern vm_map_entry_t   
vm_map_entry_insert( 
 792         vm_map_entry_t          insp_entry
, 
 793         vm_map_offset_t         start
, 
 796         vm_object_offset_t      offset
, 
 797         boolean_t               needs_copy
, 
 799         boolean_t               in_transition
, 
 800         vm_prot_t               cur_protection
, 
 801         vm_prot_t               max_protection
, 
 802         vm_behavior_t           behavior
, 
 803         vm_inherit_t            inheritance
, 
 804         unsigned short          wired_count
, 
 807         boolean_t               no_copy_on_read
, 
 808         unsigned int            superpage_size
, 
 809         boolean_t               clear_map_aligned
, 
 811         boolean_t               used_for_jit
, 
 813         boolean_t               translated_allow_execute
); 
 817  *      Functions implemented as macros 
 819 #define         vm_map_min(map) ((map)->min_offset) 
 820 /* Lowest valid address in 
 823 #define         vm_map_max(map) ((map)->max_offset) 
 824 /* Highest valid address */ 
 826 #define         vm_map_pmap(map)        ((map)->pmap) 
 827 /* Physical map associated 
 828 * with this address map */ 
 831  * Macros/functions for map residence counts and swapin/out of vm maps 
 836 /* Gain a reference to an existing map */ 
 837 extern void             vm_map_reference( 
 839 /* Lose a residence count */ 
 840 extern void             vm_map_res_deallocate( 
 842 /* Gain a residence count on a map */ 
 843 extern void             vm_map_res_reference( 
 845 /* Gain reference & residence counts to possibly swapped-out map */ 
 846 extern void             vm_map_reference_swap( 
 849 #else   /* MACH_ASSERT */ 
 851 #define vm_map_reference(map)           \ 
 853         vm_map_t Map = (map);                    \ 
 855                 lck_mtx_lock(&Map->s_lock);      \ 
 857                 os_ref_retain(&Map->map_refcnt); \ 
 858                 lck_mtx_unlock(&Map->s_lock);    \ 
 862 #define vm_map_res_reference(map)               \ 
 864         vm_map_t Lmap = (map);          \ 
 865         if (Lmap->res_count == 0) {             \ 
 866                 lck_mtx_unlock(&Lmap->s_lock);\ 
 868                 vm_map_swapin(Lmap);            \ 
 869                 lck_mtx_lock(&Lmap->s_lock);    \ 
 871                 vm_map_unlock(Lmap);            \ 
 876 #define vm_map_res_deallocate(map)              \ 
 878         vm_map_t Map = (map);           \ 
 879         if (--Map->res_count == 0) {    \ 
 880                 lck_mtx_unlock(&Map->s_lock);   \ 
 882                 vm_map_swapout(Map);            \ 
 883                 vm_map_unlock(Map);             \ 
 884                 lck_mtx_lock(&Map->s_lock);     \ 
 888 #define vm_map_reference_swap(map)      \ 
 890         vm_map_t Map = (map);           \ 
 891         lck_mtx_lock(&Map->s_lock);     \ 
 892         os_ref_retain(&Map->map_refcnt);\ 
 893         vm_map_res_reference(Map);      \ 
 894         lck_mtx_unlock(&Map->s_lock);   \ 
 896 #endif  /* MACH_ASSERT */ 
 898 extern void             vm_map_swapin( 
 901 extern void             vm_map_swapout( 
 904 #else   /* TASK_SWAPPER */ 
 906 #define vm_map_reference(map)                   \ 
 908         vm_map_t Map = (map);                   \ 
 910                 lck_mtx_lock(&Map->s_lock);     \ 
 911                 os_ref_retain(&Map->map_refcnt);\ 
 912                 lck_mtx_unlock(&Map->s_lock);   \ 
 916 #define vm_map_reference_swap(map)      vm_map_reference(map) 
 917 #define vm_map_res_reference(map) 
 918 #define vm_map_res_deallocate(map) 
 920 #endif  /* TASK_SWAPPER */ 
 923  *      Submap object.  Must be used to create memory to be put 
 924  *      in a submap by vm_map_submap. 
 926 extern vm_object_t      vm_submap_object
; 
 929  *      Wait and wakeup macros for in_transition map entries. 
 931 #define vm_map_entry_wait(map, interruptible)           \ 
 932         ((map)->timestamp++ ,                           \ 
 933          lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \ 
 934                                   (event_t)&(map)->hdr, interruptible)) 
 937 #define vm_map_entry_wakeup(map)        \ 
 938         thread_wakeup((event_t)(&(map)->hdr)) 
 941 #define vm_map_ref_fast(map)                    \ 
 943         lck_mtx_lock(&map->s_lock);                     \ 
 945         vm_map_res_reference(map);                      \ 
 946         lck_mtx_unlock(&map->s_lock);                   \ 
 949 #define vm_map_dealloc_fast(map)                \ 
 953         lck_mtx_lock(&map->s_lock);                     \ 
 954         c = --map->ref_count;                   \ 
 956                 vm_map_res_deallocate(map);             \ 
 957         lck_mtx_unlock(&map->s_lock);                   \ 
 959                 vm_map_destroy(map);                    \ 
 963 /* simplify map entries */ 
 964 extern void             vm_map_simplify_entry( 
 966         vm_map_entry_t  this_entry
); 
 967 extern void             vm_map_simplify( 
 969         vm_map_offset_t         start
); 
 971 /* Move the information in a map copy object to a new map copy object */ 
 972 extern vm_map_copy_t    
vm_map_copy_copy( 
 975 /* Create a copy object from an object. */ 
 976 extern kern_return_t    
vm_map_copyin_object( 
 978         vm_object_offset_t      offset
, 
 979         vm_object_size_t        size
, 
 980         vm_map_copy_t           
*copy_result
);                         /* OUT */ 
 982 extern kern_return_t    
vm_map_random_address_for_size( 
 984         vm_map_offset_t 
*address
, 
 987 /* Enter a mapping */ 
 988 extern kern_return_t    
vm_map_enter( 
 990         vm_map_offset_t         
*address
, 
 992         vm_map_offset_t         mask
, 
 994         vm_map_kernel_flags_t   vmk_flags
, 
 997         vm_object_offset_t      offset
, 
 998         boolean_t               needs_copy
, 
 999         vm_prot_t               cur_protection
, 
1000         vm_prot_t               max_protection
, 
1001         vm_inherit_t            inheritance
); 
1004 extern kern_return_t    
vm_map_enter_fourk( 
1006         vm_map_offset_t         
*address
, 
1008         vm_map_offset_t         mask
, 
1010         vm_map_kernel_flags_t   vmk_flags
, 
1013         vm_object_offset_t      offset
, 
1014         boolean_t               needs_copy
, 
1015         vm_prot_t               cur_protection
, 
1016         vm_prot_t               max_protection
, 
1017         vm_inherit_t            inheritance
); 
1018 #endif /* __arm64__ */ 
1020 /* XXX should go away - replaced with regular enter of contig object */ 
1021 extern  kern_return_t   
vm_map_enter_cpm( 
1023         vm_map_address_t        
*addr
, 
1027 extern kern_return_t 
vm_map_remap( 
1028         vm_map_t                target_map
, 
1029         vm_map_offset_t         
*address
, 
1031         vm_map_offset_t         mask
, 
1033         vm_map_kernel_flags_t   vmk_flags
, 
1036         vm_map_offset_t         memory_address
, 
1038         vm_prot_t               
*cur_protection
, 
1039         vm_prot_t               
*max_protection
, 
1040         vm_inherit_t            inheritance
); 
1044  * Read and write from a kernel buffer to a specified map. 
1046 extern  kern_return_t   
vm_map_write_user( 
1049         vm_map_offset_t         dst_addr
, 
1052 extern  kern_return_t   
vm_map_read_user( 
1054         vm_map_offset_t         src_addr
, 
1058 /* Create a new task map using an existing task map as a template. */ 
1059 extern vm_map_t         
vm_map_fork( 
1063 #define VM_MAP_FORK_SHARE_IF_INHERIT_NONE       0x00000001 
1064 #define VM_MAP_FORK_PRESERVE_PURGEABLE          0x00000002 
1065 #define VM_MAP_FORK_CORPSE_FOOTPRINT            0x00000004 
1067 /* Change inheritance */ 
1068 extern kern_return_t    
vm_map_inherit( 
1070         vm_map_offset_t         start
, 
1071         vm_map_offset_t         end
, 
1072         vm_inherit_t            new_inheritance
); 
1074 /* Add or remove machine-dependent attributes from map regions */ 
1075 extern kern_return_t    
vm_map_machine_attribute( 
1077         vm_map_offset_t         start
, 
1078         vm_map_offset_t         end
, 
1079         vm_machine_attribute_t  attribute
, 
1080         vm_machine_attribute_val_t
* value
);                         /* IN/OUT */ 
1082 extern kern_return_t    
vm_map_msync( 
1084         vm_map_address_t        address
, 
1086         vm_sync_t               sync_flags
); 
1088 /* Set paging behavior */ 
1089 extern kern_return_t    
vm_map_behavior_set( 
1091         vm_map_offset_t         start
, 
1092         vm_map_offset_t         end
, 
1093         vm_behavior_t           new_behavior
); 
1095 extern kern_return_t 
vm_map_region( 
1097         vm_map_offset_t         
*address
, 
1098         vm_map_size_t           
*size
, 
1099         vm_region_flavor_t       flavor
, 
1100         vm_region_info_t         info
, 
1101         mach_msg_type_number_t  
*count
, 
1102         mach_port_t             
*object_name
); 
1104 extern kern_return_t 
vm_map_region_recurse_64( 
1106         vm_map_offset_t         
*address
, 
1107         vm_map_size_t           
*size
, 
1108         natural_t               
*nesting_depth
, 
1109         vm_region_submap_info_64_t info
, 
1110         mach_msg_type_number_t  
*count
); 
1112 extern kern_return_t 
vm_map_page_query_internal( 
1114         vm_map_offset_t         offset
, 
1118 extern kern_return_t 
vm_map_query_volatile( 
1120         mach_vm_size_t  
*volatile_virtual_size_p
, 
1121         mach_vm_size_t  
*volatile_resident_size_p
, 
1122         mach_vm_size_t  
*volatile_compressed_size_p
, 
1123         mach_vm_size_t  
*volatile_pmap_size_p
, 
1124         mach_vm_size_t  
*volatile_compressed_pmap_size_p
); 
1126 extern kern_return_t    
vm_map_submap( 
1128         vm_map_offset_t         start
, 
1129         vm_map_offset_t         end
, 
1131         vm_map_offset_t         offset
, 
1132         boolean_t               use_pmap
); 
1134 extern void vm_map_submap_pmap_clean( 
1136         vm_map_offset_t start
, 
1137         vm_map_offset_t end
, 
1139         vm_map_offset_t offset
); 
1141 /* Convert from a map entry port to a map */ 
1142 extern vm_map_t 
convert_port_entry_to_map( 
1145 /* Convert from a port to a vm_object */ 
1146 extern vm_object_t 
convert_port_entry_to_object( 
1150 extern kern_return_t 
vm_map_set_cache_attr( 
1152         vm_map_offset_t va
); 
1155 /* definitions related to overriding the NX behavior */ 
1157 #define VM_ABI_32       0x1 
1158 #define VM_ABI_64       0x2 
1160 extern int override_nx(vm_map_t map
, uint32_t user_tag
); 
1163 extern void vm_map_region_top_walk( 
1164         vm_map_entry_t entry
, 
1165         vm_region_top_info_t top
); 
1166 extern void vm_map_region_walk( 
1169         vm_map_entry_t entry
, 
1170         vm_object_offset_t offset
, 
1171         vm_object_size_t range
, 
1172         vm_region_extended_info_t extended
, 
1173         boolean_t look_for_pages
, 
1174         mach_msg_type_number_t count
); 
1178 extern void vm_map_copy_footprint_ledgers( 
1181 extern void vm_map_copy_ledger( 
1187  * Represents a single region of virtual address space that should be reserved 
1188  * (pre-mapped) in a user address space. 
1190 struct vm_reserved_region 
{ 
1192         vm_map_offset_t vmrr_addr
; 
1193         vm_map_size_t   vmrr_size
; 
1197  * Return back a machine-dependent array of address space regions that should be 
1198  * reserved by the VM. This function is defined in the machine-dependent 
1199  * machine_routines.c files. 
1201 extern size_t ml_get_vm_reserved_regions( 
1203         struct vm_reserved_region 
**regions
); 
1205 #endif /* MACH_KERNEL_PRIVATE */ 
1209 /* Create an empty map */ 
1210 extern vm_map_t         
vm_map_create( 
1212         vm_map_offset_t         min_off
, 
1213         vm_map_offset_t         max_off
, 
1214         boolean_t               pageable
); 
1215 extern vm_map_t 
vm_map_create_options( 
1217         vm_map_offset_t         min_off
, 
1218         vm_map_offset_t         max_off
, 
1220 #define VM_MAP_CREATE_PAGEABLE          0x00000001 
1221 #define VM_MAP_CREATE_CORPSE_FOOTPRINT  0x00000002 
1222 #define VM_MAP_CREATE_ALL_OPTIONS (VM_MAP_CREATE_PAGEABLE | \ 
1223                                    VM_MAP_CREATE_CORPSE_FOOTPRINT) 
1225 extern vm_map_size_t    
vm_map_adjusted_size(vm_map_t map
); 
1227 extern void             vm_map_disable_hole_optimization(vm_map_t map
); 
1229 /* Get rid of a map */ 
1230 extern void             vm_map_destroy( 
1234 /* Lose a reference */ 
1235 extern void             vm_map_deallocate( 
1238 /* Lose a reference */ 
1239 extern void             vm_map_inspect_deallocate( 
1240         vm_map_inspect_t        map
); 
1242 /* Lose a reference */ 
1243 extern void             vm_map_read_deallocate( 
1246 extern vm_map_t         
vm_map_switch( 
1249 /* Change protection */ 
1250 extern kern_return_t    
vm_map_protect( 
1252         vm_map_offset_t         start
, 
1253         vm_map_offset_t         end
, 
1257 /* Check protection */ 
1258 extern boolean_t 
vm_map_check_protection( 
1260         vm_map_offset_t         start
, 
1261         vm_map_offset_t         end
, 
1262         vm_prot_t               protection
); 
1264 extern boolean_t 
vm_map_cs_enforcement( 
1266 extern void vm_map_cs_enforcement_set( 
1270 /* wire down a region */ 
1272 #ifdef XNU_KERNEL_PRIVATE 
1274 extern kern_return_t    
vm_map_wire_kernel( 
1276         vm_map_offset_t         start
, 
1277         vm_map_offset_t         end
, 
1278         vm_prot_t               access_type
, 
1280         boolean_t               user_wire
); 
1282 extern kern_return_t    
vm_map_wire_and_extract_kernel( 
1284         vm_map_offset_t         start
, 
1285         vm_prot_t               access_type
, 
1287         boolean_t               user_wire
, 
1288         ppnum_t                 
*physpage_p
); 
1290 /* kext exported versions */ 
1292 extern kern_return_t    
vm_map_wire_external( 
1294         vm_map_offset_t         start
, 
1295         vm_map_offset_t         end
, 
1296         vm_prot_t               access_type
, 
1297         boolean_t               user_wire
); 
1299 extern kern_return_t    
vm_map_wire_and_extract_external( 
1301         vm_map_offset_t         start
, 
1302         vm_prot_t               access_type
, 
1303         boolean_t               user_wire
, 
1304         ppnum_t                 
*physpage_p
); 
1306 #else /* XNU_KERNEL_PRIVATE */ 
1308 extern kern_return_t    
vm_map_wire( 
1310         vm_map_offset_t         start
, 
1311         vm_map_offset_t         end
, 
1312         vm_prot_t               access_type
, 
1313         boolean_t               user_wire
); 
1315 extern kern_return_t    
vm_map_wire_and_extract( 
1317         vm_map_offset_t         start
, 
1318         vm_prot_t               access_type
, 
1319         boolean_t               user_wire
, 
1320         ppnum_t                 
*physpage_p
); 
1322 #endif /* !XNU_KERNEL_PRIVATE */ 
1324 /* unwire a region */ 
1325 extern kern_return_t    
vm_map_unwire( 
1327         vm_map_offset_t         start
, 
1328         vm_map_offset_t         end
, 
1329         boolean_t               user_wire
); 
1331 #ifdef XNU_KERNEL_PRIVATE 
1333 /* Enter a mapping of a memory object */ 
1334 extern kern_return_t    
vm_map_enter_mem_object( 
1336         vm_map_offset_t         
*address
, 
1338         vm_map_offset_t         mask
, 
1340         vm_map_kernel_flags_t   vmk_flags
, 
1343         vm_object_offset_t      offset
, 
1344         boolean_t               needs_copy
, 
1345         vm_prot_t               cur_protection
, 
1346         vm_prot_t               max_protection
, 
1347         vm_inherit_t            inheritance
); 
1349 /* Enter a mapping of a memory object */ 
1350 extern kern_return_t    
vm_map_enter_mem_object_prefault( 
1352         vm_map_offset_t         
*address
, 
1354         vm_map_offset_t         mask
, 
1356         vm_map_kernel_flags_t   vmk_flags
, 
1359         vm_object_offset_t      offset
, 
1360         vm_prot_t               cur_protection
, 
1361         vm_prot_t               max_protection
, 
1362         upl_page_list_ptr_t     page_list
, 
1363         unsigned int            page_list_count
); 
1365 /* Enter a mapping of a memory object */ 
1366 extern kern_return_t    
vm_map_enter_mem_object_control( 
1368         vm_map_offset_t         
*address
, 
1370         vm_map_offset_t         mask
, 
1372         vm_map_kernel_flags_t   vmk_flags
, 
1374         memory_object_control_t control
, 
1375         vm_object_offset_t      offset
, 
1376         boolean_t               needs_copy
, 
1377         vm_prot_t               cur_protection
, 
1378         vm_prot_t               max_protection
, 
1379         vm_inherit_t            inheritance
); 
1381 extern kern_return_t    
vm_map_terminate( 
1384 #endif /* !XNU_KERNEL_PRIVATE */ 
1386 /* Deallocate a region */ 
1387 extern kern_return_t    
vm_map_remove( 
1389         vm_map_offset_t         start
, 
1390         vm_map_offset_t         end
, 
1393 /* Deallocate a region when the map is already locked */ 
1394 extern kern_return_t    
vm_map_remove_locked( 
1396         vm_map_offset_t     start
, 
1397         vm_map_offset_t     end
, 
1400 /* Discard a copy without using it */ 
1401 extern void             vm_map_copy_discard( 
1402         vm_map_copy_t           copy
); 
1404 /* Overwrite existing memory with a copy */ 
1405 extern kern_return_t    
vm_map_copy_overwrite( 
1407         vm_map_address_t        dst_addr
, 
1409         vm_map_size_t           copy_size
, 
1410         boolean_t               interruptible
); 
1412 #define VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES      (3) 
1415 /* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */ 
1416 extern boolean_t        
vm_map_copy_validate_size( 
1419         vm_map_size_t           
*size
); 
1421 /* Place a copy into a map */ 
1422 extern kern_return_t    
vm_map_copyout( 
1424         vm_map_address_t        
*dst_addr
,                              /* OUT */ 
1425         vm_map_copy_t           copy
); 
1427 extern kern_return_t 
vm_map_copyout_size( 
1429         vm_map_address_t        
*dst_addr
,                              /* OUT */ 
1431         vm_map_size_t           copy_size
); 
1433 extern kern_return_t    
vm_map_copyout_internal( 
1435         vm_map_address_t        
*dst_addr
,      /* OUT */ 
1437         vm_map_size_t           copy_size
, 
1438         boolean_t               consume_on_success
, 
1439         vm_prot_t               cur_protection
, 
1440         vm_prot_t               max_protection
, 
1441         vm_inherit_t            inheritance
); 
1443 extern kern_return_t    
vm_map_copyin( 
1445         vm_map_address_t        src_addr
, 
1447         boolean_t                       src_destroy
, 
1448         vm_map_copy_t           
*copy_result
);                          /* OUT */ 
1450 extern kern_return_t    
vm_map_copyin_common( 
1452         vm_map_address_t        src_addr
, 
1454         boolean_t               src_destroy
, 
1455         boolean_t               src_volatile
, 
1456         vm_map_copy_t           
*copy_result
,                           /* OUT */ 
1457         boolean_t               use_maxprot
); 
1459 #define VM_MAP_COPYIN_SRC_DESTROY       0x00000001 
1460 #define VM_MAP_COPYIN_USE_MAXPROT       0x00000002 
1461 #define VM_MAP_COPYIN_ENTRY_LIST        0x00000004 
1462 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008 
1463 #define VM_MAP_COPYIN_ALL_FLAGS         0x0000000F 
1464 extern kern_return_t    
vm_map_copyin_internal( 
1466         vm_map_address_t        src_addr
, 
1469         vm_map_copy_t           
*copy_result
);                         /* OUT */ 
1471 extern kern_return_t    
vm_map_copy_extract( 
1473         vm_map_address_t        src_addr
, 
1475         vm_prot_t               required_prot
, 
1477         vm_map_copy_t           
*copy_result
,   /* OUT */ 
1478         vm_prot_t               
*cur_prot
,      /* OUT */ 
1479         vm_prot_t               
*max_prot
,      /* OUT */ 
1480         vm_inherit_t            inheritance
, 
1481         vm_map_kernel_flags_t   vmk_flags
); 
1484 extern void             vm_map_disable_NX( 
1487 extern void             vm_map_disallow_data_exec( 
1490 extern void             vm_map_set_64bit( 
1493 extern void             vm_map_set_32bit( 
1496 extern void             vm_map_set_jumbo( 
1499 extern void             vm_map_set_jit_entitled( 
1502 extern void             vm_map_set_max_addr( 
1503         vm_map_t                map
, vm_map_offset_t new_max_offset
); 
1505 extern boolean_t        
vm_map_has_hard_pagezero( 
1507         vm_map_offset_t         pagezero_size
); 
1508 extern void             vm_commit_pagezero_status(vm_map_t      tmap
); 
1511 static inline boolean_t
 
1512 vm_map_is_64bit(__unused vm_map_t map
) 
1517 extern boolean_t        
vm_map_is_64bit( 
1522 extern kern_return_t    
vm_map_raise_max_offset( 
1524         vm_map_offset_t new_max_offset
); 
1526 extern kern_return_t    
vm_map_raise_min_offset( 
1528         vm_map_offset_t new_min_offset
); 
1529 #if !CONFIG_EMBEDDED 
1530 extern void vm_map_set_high_start( 
1532         vm_map_offset_t high_start
); 
1535 extern vm_map_offset_t  
vm_compute_max_offset( 
1538 extern void             vm_map_get_max_aslr_slide_section( 
1540         int64_t                 *max_sections
, 
1541         int64_t                 *section_size
); 
1543 extern uint64_t         vm_map_get_max_aslr_slide_pages( 
1546 extern uint64_t         vm_map_get_max_loader_aslr_slide_pages( 
1549 extern void             vm_map_set_user_wire_limit( 
1553 extern void vm_map_switch_protect( 
1557 extern void vm_map_iokit_mapped_region( 
1561 extern void vm_map_iokit_unmapped_region( 
1566 extern boolean_t 
first_free_is_valid(vm_map_t
); 
1568 extern int              vm_map_page_shift( 
1571 extern vm_map_offset_t  
vm_map_page_mask( 
1574 extern int              vm_map_page_size( 
1577 extern vm_map_offset_t  
vm_map_round_page_mask( 
1578         vm_map_offset_t         offset
, 
1579         vm_map_offset_t         mask
); 
1581 extern vm_map_offset_t  
vm_map_trunc_page_mask( 
1582         vm_map_offset_t         offset
, 
1583         vm_map_offset_t         mask
); 
1585 extern boolean_t        
vm_map_page_aligned( 
1586         vm_map_offset_t         offset
, 
1587         vm_map_offset_t         mask
); 
1590 vm_map_range_overflows(vm_map_offset_t addr
, vm_map_size_t size
) 
1592         vm_map_offset_t sum
; 
1593         return os_add_overflow(addr
, size
, &sum
); 
1597 mach_vm_range_overflows(mach_vm_offset_t addr
, mach_vm_size_t size
) 
1599         mach_vm_offset_t sum
; 
1600         return os_add_overflow(addr
, size
, &sum
); 
1603 #ifdef XNU_KERNEL_PRIVATE 
1605 #if XNU_TARGET_OS_OSX 
1606 extern void vm_map_mark_alien(vm_map_t map
); 
1607 #endif /* XNU_TARGET_OS_OSX */ 
1609 extern kern_return_t 
vm_map_page_info( 
1611         vm_map_offset_t         offset
, 
1612         vm_page_info_flavor_t   flavor
, 
1613         vm_page_info_t          info
, 
1614         mach_msg_type_number_t  
*count
); 
1615 extern kern_return_t 
vm_map_page_range_info_internal( 
1617         vm_map_offset_t         start_offset
, 
1618         vm_map_offset_t         end_offset
, 
1619         int                     effective_page_shift
, 
1620         vm_page_info_flavor_t   flavor
, 
1621         vm_page_info_t          info
, 
1622         mach_msg_type_number_t  
*count
); 
1623 #endif /* XNU_KERNEL_PRIVATE */ 
1626 #ifdef  MACH_KERNEL_PRIVATE 
1629  *      Macros to invoke vm_map_copyin_common.  vm_map_copyin is the 
1630  *      usual form; it handles a copyin based on the current protection 
1631  *      (current protection == VM_PROT_NONE) is a failure. 
1632  *      vm_map_copyin_maxprot handles a copyin based on maximum possible 
1633  *      access.  The difference is that a region with no current access 
1634  *      BUT possible maximum access is rejected by vm_map_copyin(), but 
1635  *      returned by vm_map_copyin_maxprot. 
1637 #define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \ 
1638                 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \ 
1639                                         FALSE, copy_result, FALSE) 
1641 #define vm_map_copyin_maxprot(src_map, \ 
1642             src_addr, len, src_destroy, copy_result) \ 
1643                 vm_map_copyin_common(src_map, src_addr, len, src_destroy, \ 
1644                                         FALSE, copy_result, TRUE) 
1648  * Internal macros for rounding and truncation of vm_map offsets and sizes 
1650 #define VM_MAP_ROUND_PAGE(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask))) 
1651 #define VM_MAP_TRUNC_PAGE(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask))) 
1654  * Macros for rounding and truncation of vm_map offsets and sizes 
1656 #define VM_MAP_PAGE_SHIFT(map) ((map) ? (map)->hdr.page_shift : PAGE_SHIFT) 
1657 #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map))) 
1658 #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1) 
1659 #define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0) 
1663         vm_map_t map __unused
) 
1666         if (VM_MAP_PAGE_SHIFT(map
) < PAGE_SHIFT 
|| 
1667             pmap_is_exotic(map
->pmap
)) { 
1670 #endif /* __arm64__ */ 
1676         vm_map_t map __unused
) 
1679          * An "alien" process/task/map/pmap should mostly behave 
1680          * as it currently would on iOS. 
1682 #if XNU_TARGET_OS_OSX 
1683         if (map
->is_alien
) { 
1687 #else /* XNU_TARGET_OS_OSX */ 
1689 #endif /* XNU_TARGET_OS_OSX */ 
1693 VM_MAP_POLICY_WX_FAIL( 
1694         vm_map_t map __unused
) 
1696         if (VM_MAP_IS_ALIEN(map
)) { 
1703 VM_MAP_POLICY_WX_STRIP_X( 
1704         vm_map_t map __unused
) 
1706         if (VM_MAP_IS_ALIEN(map
)) { 
1713 VM_MAP_POLICY_ALLOW_MULTIPLE_JIT( 
1714         vm_map_t map __unused
) 
1716         if (VM_MAP_IS_ALIEN(map
)) { 
1723 VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS( 
1726         return VM_MAP_IS_ALIEN(map
); 
1730 VM_MAP_POLICY_ALLOW_JIT_INHERIT( 
1731         vm_map_t map __unused
) 
1733         if (VM_MAP_IS_ALIEN(map
)) { 
1740 VM_MAP_POLICY_ALLOW_JIT_SHARING( 
1741         vm_map_t map __unused
) 
1743         if (VM_MAP_IS_ALIEN(map
)) { 
1750 VM_MAP_POLICY_ALLOW_JIT_COPY( 
1751         vm_map_t map __unused
) 
1753         if (VM_MAP_IS_ALIEN(map
)) { 
1760 VM_MAP_POLICY_WRITABLE_SHARED_REGION( 
1761         vm_map_t map __unused
) 
1765 #else /* __x86_64__ */ 
1766         if (VM_MAP_IS_EXOTIC(map
)) { 
1770 #endif /* __x86_64__ */ 
1774 vm_prot_to_wimg(unsigned int prot
, unsigned int *wimg
) 
1777         case MAP_MEM_NOOP
:                      break; 
1778         case MAP_MEM_IO
:                        *wimg 
= VM_WIMG_IO
; break; 
1779         case MAP_MEM_COPYBACK
:                  *wimg 
= VM_WIMG_USE_DEFAULT
; break; 
1780         case MAP_MEM_INNERWBACK
:                *wimg 
= VM_WIMG_INNERWBACK
; break; 
1781         case MAP_MEM_POSTED
:                    *wimg 
= VM_WIMG_POSTED
; break; 
1782         case MAP_MEM_POSTED_REORDERED
:          *wimg 
= VM_WIMG_POSTED_REORDERED
; break; 
1783         case MAP_MEM_POSTED_COMBINED_REORDERED
: *wimg 
= VM_WIMG_POSTED_COMBINED_REORDERED
; break; 
1784         case MAP_MEM_WTHRU
:                     *wimg 
= VM_WIMG_WTHRU
; break; 
1785         case MAP_MEM_WCOMB
:                     *wimg 
= VM_WIMG_WCOMB
; break; 
1786         case MAP_MEM_RT
:                        *wimg 
= VM_WIMG_RT
; break; 
1791 #endif /* MACH_KERNEL_PRIVATE */ 
1793 #ifdef XNU_KERNEL_PRIVATE 
1794 extern kern_return_t 
vm_map_set_page_shift(vm_map_t map
, int pageshift
); 
1795 extern bool vm_map_is_exotic(vm_map_t map
); 
1796 extern bool vm_map_is_alien(vm_map_t map
); 
1797 #endif /* XNU_KERNEL_PRIVATE */ 
1799 #define vm_map_round_page(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask))) 
1800 #define vm_map_trunc_page(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask))) 
1803  * Flags for vm_map_remove() and vm_map_delete() 
1805 #define VM_MAP_REMOVE_NO_FLAGS          0x0 
1806 #define VM_MAP_REMOVE_KUNWIRE           0x1 
1807 #define VM_MAP_REMOVE_INTERRUPTIBLE     0x2 
1808 #define VM_MAP_REMOVE_WAIT_FOR_KWIRE    0x4 
1809 #define VM_MAP_REMOVE_SAVE_ENTRIES      0x8 
1810 #define VM_MAP_REMOVE_NO_PMAP_CLEANUP   0x10 
1811 #define VM_MAP_REMOVE_NO_MAP_ALIGN      0x20 
1812 #define VM_MAP_REMOVE_NO_UNNESTING      0x40 
1813 #define VM_MAP_REMOVE_IMMUTABLE         0x80 
1814 #define VM_MAP_REMOVE_GAPS_OK           0x100 
1816 /* Support for UPLs from vm_maps */ 
1818 #ifdef XNU_KERNEL_PRIVATE 
1820 extern kern_return_t 
vm_map_get_upl( 
1821         vm_map_t                target_map
, 
1822         vm_map_offset_t         map_offset
, 
1825         upl_page_info_array_t   page_info
, 
1826         unsigned int            *page_infoCnt
, 
1827         upl_control_flags_t     
*flags
, 
1829         int                     force_data_sync
); 
1831 #endif /* XNU_KERNEL_PRIVATE */ 
1834 vm_map_sizes(vm_map_t map
, 
1835     vm_map_size_t 
* psize
, 
1836     vm_map_size_t 
* pfree
, 
1837     vm_map_size_t 
* plargest_free
); 
1839 #if CONFIG_DYNAMIC_CODE_SIGNING 
1840 extern kern_return_t 
vm_map_sign(vm_map_t map
, 
1841     vm_map_offset_t start
, 
1842     vm_map_offset_t end
); 
1845 extern kern_return_t 
vm_map_partial_reap( 
1847         unsigned int *reclaimed_resident
, 
1848         unsigned int *reclaimed_compressed
); 
1851 #if DEVELOPMENT || DEBUG 
1853 extern int vm_map_disconnect_page_mappings( 
1857 extern kern_return_t 
vm_map_inject_error(vm_map_t map
, vm_map_offset_t vaddr
); 
1864 extern kern_return_t 
vm_map_freeze( 
1866         unsigned int *purgeable_count
, 
1867         unsigned int *wired_count
, 
1868         unsigned int *clean_count
, 
1869         unsigned int *dirty_count
, 
1870         unsigned int dirty_budget
, 
1871         unsigned int *shared_count
, 
1872         int          *freezer_error_code
, 
1873         boolean_t    eval_only
); 
1875 #define FREEZER_ERROR_GENERIC                   (-1) 
1876 #define FREEZER_ERROR_EXCESS_SHARED_MEMORY      (-2) 
1877 #define FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO  (-3) 
1878 #define FREEZER_ERROR_NO_COMPRESSOR_SPACE       (-4) 
1879 #define FREEZER_ERROR_NO_SWAP_SPACE             (-5) 
1886  * In some cases, we don't have a real VM object but still want to return a 
1887  * unique ID (to avoid a memory region looking like shared memory), so build 
1888  * a fake pointer based on the map's ledger and the index of the ledger being 
1891 #define VM_OBJECT_ID_FAKE(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id))) 
1893 #endif  /* KERNEL_PRIVATE */ 
1895 #endif  /* _VM_VM_MAP_H_ */