2  * Copyright (c) 2009 Apple Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  29 #include <vm/vm_map_store_ll.h> 
  32 first_free_is_valid_ll( vm_map_t map 
) 
  34         vm_map_entry_t  entry
, next
; 
  35         entry 
= vm_map_to_entry(map
); 
  36         next 
= entry
->vme_next
; 
  37         while (vm_map_trunc_page(next
->vme_start
, 
  38             VM_MAP_PAGE_MASK(map
)) == 
  39             vm_map_trunc_page(entry
->vme_end
, 
  40             VM_MAP_PAGE_MASK(map
)) || 
  41             (vm_map_trunc_page(next
->vme_start
, 
  42             VM_MAP_PAGE_MASK(map
)) == 
  43             vm_map_trunc_page(entry
->vme_start
, 
  44             VM_MAP_PAGE_MASK(map
)) && 
  45             next 
!= vm_map_to_entry(map
))) { 
  47                 next 
= entry
->vme_next
; 
  48                 if (entry 
== vm_map_to_entry(map
)) { 
  52         if (map
->first_free 
!= entry
) { 
  53                 printf("Bad first_free for map %p: %p should be %p\n", 
  54                     map
, map
->first_free
, entry
); 
  63  *      Updates the map->first_free pointer to the 
  64  *      entry immediately before the first hole in the map. 
  65  *      The map should be locked. 
  67 #define UPDATE_FIRST_FREE_LL(map, new_first_free)                       \ 
  69         if( map->disable_vmentry_reuse == FALSE){                       \ 
  71                 vm_map_entry_t  UFF_first_free;                         \ 
  72                 vm_map_entry_t  UFF_next_entry;                         \ 
  74                 UFF_first_free = (new_first_free);                      \ 
  75                 UFF_next_entry = UFF_first_free->vme_next;              \ 
  76                 while (vm_map_trunc_page(UFF_next_entry->vme_start,     \ 
  77                                          VM_MAP_PAGE_MASK(UFF_map)) ==  \ 
  78                        vm_map_trunc_page(UFF_first_free->vme_end,       \ 
  79                                          VM_MAP_PAGE_MASK(UFF_map)) ||  \ 
  80                        (vm_map_trunc_page(UFF_next_entry->vme_start,    \ 
  81                                           VM_MAP_PAGE_MASK(UFF_map)) == \ 
  82                         vm_map_trunc_page(UFF_first_free->vme_start,    \ 
  83                                           VM_MAP_PAGE_MASK(UFF_map)) && \ 
  84                         UFF_next_entry != vm_map_to_entry(UFF_map))) {  \ 
  85                         UFF_first_free = UFF_next_entry;                \ 
  86                         UFF_next_entry = UFF_first_free->vme_next;      \ 
  87                         if (UFF_first_free == vm_map_to_entry(UFF_map)) \ 
  90                 UFF_map->first_free = UFF_first_free;                   \ 
  91                 assert(first_free_is_valid(UFF_map));                   \ 
  95 #define _vm_map_entry_link_ll(hdr, after_where, entry)                  \ 
  97         if (entry->map_aligned) {                                       \ 
  98                 assert(VM_MAP_PAGE_ALIGNED((entry->vme_start),          \ 
  99                                            VM_MAP_HDR_PAGE_MASK((hdr))));\ 
 100                 assert(VM_MAP_PAGE_ALIGNED((entry->vme_end),            \ 
 101                                            VM_MAP_HDR_PAGE_MASK((hdr))));\ 
 104         (entry)->vme_prev = (after_where);                              \ 
 105         (entry)->vme_next = (after_where)->vme_next;                    \ 
 106         (entry)->vme_prev->vme_next = (entry)->vme_next->vme_prev = (entry); \ 
 109 #define _vm_map_entry_unlink_ll(hdr, entry)                             \ 
 112         (entry)->vme_next->vme_prev = (entry)->vme_prev;                \ 
 113         (entry)->vme_prev->vme_next = (entry)->vme_next;                \ 
 116  *      Macro:          vm_map_copy_insert 
 119  *              Link a copy chain ("copy") into a map at the 
 120  *              specified location (after "where"). 
 122  *              The copy chain is destroyed. 
 124  *              The arguments are evaluated multiple times. 
 126 #define _vm_map_copy_insert_ll(map, where, copy)                                \ 
 129         vm_map_entry_t VMCI_where;                                      \ 
 130         vm_map_copy_t VMCI_copy;                                        \ 
 132         VMCI_where = (where);                                           \ 
 133         VMCI_copy = (copy);                                             \ 
 134         ((VMCI_where->vme_next)->vme_prev = vm_map_copy_last_entry(VMCI_copy))\ 
 135                 ->vme_next = (VMCI_where->vme_next);                    \ 
 136         ((VMCI_where)->vme_next = vm_map_copy_first_entry(VMCI_copy))   \ 
 137                 ->vme_prev = VMCI_where;                                \ 
 138         VMCI_map->hdr.nentries += VMCI_copy->cpy_hdr.nentries;          \ 
 139         update_first_free_ll(VMCI_map, VMCI_map->first_free);           \ 
 145 vm_map_store_init_ll( __unused 
struct vm_map_header 
*hdr
) 
 151  *      vm_map_lookup_entry_ll: [ internal use only ] 
 152  *      Use the linked list to find the map entry containing (or 
 153  *      immediately preceding) the specified address 
 154  *      in the given map; the entry is returned 
 155  *      in the "entry" parameter.  The boolean 
 156  *      result indicates whether the address is 
 157  *      actually contained in the map. 
 160 vm_map_store_lookup_entry_ll( 
 162         vm_map_offset_t         address
, 
 163         vm_map_entry_t          
*entry
)         /* OUT */ 
 169          *      Start looking either from the head of the 
 170          *      list, or from the hint. 
 174         if (cur 
== vm_map_to_entry(map
)) { 
 178         if (address 
>= cur
->vme_start
) { 
 180                  *      Go from hint to end of list. 
 182                  *      But first, make a quick check to see if 
 183                  *      we are already looking at the entry we 
 184                  *      want (which is usually the case). 
 185                  *      Note also that we don't need to save the hint 
 186                  *      here... it is the same hint (unless we are 
 187                  *      at the header, in which case the hint didn't 
 188                  *      buy us anything anyway). 
 190                 last 
= vm_map_to_entry(map
); 
 191                 if ((cur 
!= last
) && (cur
->vme_end 
> address
)) { 
 197                  *      Go from start to hint, *inclusively* 
 199                 last 
= cur
->vme_next
; 
 200                 cur 
= vm_map_first_entry(map
); 
 207         while (cur 
!= last
) { 
 208                 if (cur
->vme_end 
> address
) { 
 209                         if (address 
>= cur
->vme_start
) { 
 211                                  *      Save this lookup for future 
 216                                 SAVE_HINT_MAP_READ(map
, cur
); 
 224         *entry 
= cur
->vme_prev
; 
 225         SAVE_HINT_MAP_READ(map
, *entry
); 
 231  *  vm_map_store_find_last_free_ll: 
 233  *  Finds and returns in O_ENTRY the entry *after* the last hole (if one exists) in MAP. 
 234  *  Returns NULL if map is full and no hole can be found. 
 237 vm_map_store_find_last_free_ll( 
 239         vm_map_entry_t 
*o_entry
)        /* OUT */ 
 241         vm_map_entry_t  entry
, prev_entry
; 
 244         entry 
= vm_map_to_entry(map
); 
 245         end 
= map
->max_offset
; 
 247         /* Skip over contiguous entries from end of map until wraps around */ 
 248         while ((prev_entry 
= entry
->vme_prev
) != vm_map_to_entry(map
) 
 249             && (prev_entry
->vme_end 
== end 
|| prev_entry
->vme_end 
> map
->max_offset
)) { 
 251                 end 
= entry
->vme_start
; 
 254         if (entry 
!= vm_map_to_entry(map
) && entry
->vme_start 
== map
->min_offset
) { 
 255                 /* Map is completely full, no holes exist */ 
 264 vm_map_store_entry_link_ll( struct vm_map_header 
*mapHdr
, vm_map_entry_t after_where
, vm_map_entry_t entry
) 
 266         _vm_map_entry_link_ll( mapHdr
, after_where
, entry
); 
 270 vm_map_store_entry_unlink_ll( struct vm_map_header 
*mapHdr
, vm_map_entry_t entry
) 
 272         _vm_map_entry_unlink_ll( mapHdr
, entry
); 
 276 vm_map_store_copy_reset_ll( vm_map_copy_t copy
, __unused vm_map_entry_t entry
, __unused 
int nentries
) 
 278         copy
->cpy_hdr
.nentries 
= 0; 
 279         vm_map_copy_first_entry(copy
) = 
 280             vm_map_copy_last_entry(copy
) = 
 281             vm_map_copy_to_entry(copy
); 
 285 update_first_free_ll( vm_map_t map
, vm_map_entry_t new_first_free
) 
 287         if (map
->holelistenabled
) { 
 291         UPDATE_FIRST_FREE_LL( map
, new_first_free
);