/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
*/
-/*
+/*
* Mach Operating System
* Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
* All Rights Reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
+ *
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
- *
+ *
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
- *
+ *
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
#include <vm/vm_pageout.h>
#include <kern/misc_protos.h>
#include <vm/cpm.h>
+#include <kern/ledger.h>
+#include <kern/bits.h>
+#include <kern/startup.h>
#include <string.h>
#include <libkern/OSDebug.h>
#include <libkern/crypto/sha2.h>
+#include <libkern/section_keywords.h>
#include <sys/kdebug.h>
#include <san/kasan.h>
* Variables exported by this module.
*/
-vm_map_t kernel_map;
-vm_map_t kernel_pageable_map;
-
-extern boolean_t vm_kernel_ready;
+SECURITY_READ_ONLY_LATE(vm_map_t) kernel_map;
+vm_map_t kernel_pageable_map;
/*
* Forward declarations for internal functions.
*/
extern kern_return_t kmem_alloc_pages(
- vm_object_t object,
- vm_object_offset_t offset,
- vm_object_size_t size);
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_object_size_t size);
kern_return_t
kmem_alloc_contig(
- vm_map_t map,
- vm_offset_t *addrp,
- vm_size_t size,
- vm_offset_t mask,
- ppnum_t max_pnum,
- ppnum_t pnum_mask,
- int flags,
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size,
+ vm_offset_t mask,
+ ppnum_t max_pnum,
+ ppnum_t pnum_mask,
+ kma_flags_t flags,
vm_tag_t tag)
{
- vm_object_t object;
- vm_object_offset_t offset;
- vm_map_offset_t map_addr;
- vm_map_offset_t map_mask;
- vm_map_size_t map_size, i;
- vm_map_entry_t entry;
- vm_page_t m, pages;
- kern_return_t kr;
-
- assert(VM_KERN_MEMORY_NONE != tag);
-
- if (map == VM_MAP_NULL || (flags & ~(KMA_KOBJECT | KMA_LOMEM | KMA_NOPAGEWAIT)))
+ vm_object_t object;
+ vm_object_offset_t offset;
+ vm_map_offset_t map_addr;
+ vm_map_offset_t map_mask;
+ vm_map_size_t map_size, i;
+ vm_map_entry_t entry;
+ vm_page_t m, pages;
+ kern_return_t kr;
+
+ assert(VM_KERN_MEMORY_NONE != tag);
+
+ if (map == VM_MAP_NULL || (flags & ~(KMA_KOBJECT | KMA_LOMEM | KMA_NOPAGEWAIT))) {
return KERN_INVALID_ARGUMENT;
+ }
map_size = vm_map_round_page(size,
- VM_MAP_PAGE_MASK(map));
+ VM_MAP_PAGE_MASK(map));
map_mask = (vm_map_offset_t)mask;
-
+
/* Check for zero allocation size (either directly or via overflow) */
if (map_size == 0) {
*addrp = 0;
}
kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0,
- VM_MAP_KERNEL_FLAGS_NONE, tag, &entry);
+ VM_MAP_KERNEL_FLAGS_NONE, tag, &entry);
if (KERN_SUCCESS != kr) {
vm_object_deallocate(object);
return kr;
if (kr != KERN_SUCCESS) {
vm_map_remove(map,
- vm_map_trunc_page(map_addr,
- VM_MAP_PAGE_MASK(map)),
- vm_map_round_page(map_addr + map_size,
- VM_MAP_PAGE_MASK(map)),
- 0);
+ vm_map_trunc_page(map_addr,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page(map_addr + map_size,
+ VM_MAP_PAGE_MASK(map)),
+ VM_MAP_REMOVE_NO_FLAGS);
vm_object_deallocate(object);
*addrp = 0;
return kr;
m = pages;
pages = NEXT_PAGE(m);
*(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL;
- m->busy = FALSE;
+ m->vmp_busy = FALSE;
vm_page_insert(m, object, offset + i);
}
vm_object_unlock(object);
kr = vm_map_wire_kernel(map,
- vm_map_trunc_page(map_addr,
- VM_MAP_PAGE_MASK(map)),
- vm_map_round_page(map_addr + map_size,
- VM_MAP_PAGE_MASK(map)),
- VM_PROT_DEFAULT, tag,
- FALSE);
+ vm_map_trunc_page(map_addr,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page(map_addr + map_size,
+ VM_MAP_PAGE_MASK(map)),
+ VM_PROT_DEFAULT, tag,
+ FALSE);
if (kr != KERN_SUCCESS) {
if (object == kernel_object) {
vm_object_unlock(object);
}
vm_map_remove(map,
- vm_map_trunc_page(map_addr,
- VM_MAP_PAGE_MASK(map)),
- vm_map_round_page(map_addr + map_size,
- VM_MAP_PAGE_MASK(map)),
- 0);
+ vm_map_trunc_page(map_addr,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page(map_addr + map_size,
+ VM_MAP_PAGE_MASK(map)),
+ VM_MAP_REMOVE_NO_FLAGS);
vm_object_deallocate(object);
return kr;
}
if (object == kernel_object) {
vm_map_simplify(map, map_addr);
- vm_tag_update_size(tag, map_size);
- }
+ vm_tag_update_size(tag, map_size);
+ }
*addrp = (vm_offset_t) map_addr;
assert((vm_map_offset_t) *addrp == map_addr);
kern_return_t
kernel_memory_allocate(
- vm_map_t map,
- vm_offset_t *addrp,
- vm_size_t size,
- vm_offset_t mask,
- int flags,
- vm_tag_t tag)
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size,
+ vm_offset_t mask,
+ kma_flags_t flags,
+ vm_tag_t tag)
{
- vm_object_t object;
- vm_object_offset_t offset;
- vm_object_offset_t pg_offset;
- vm_map_entry_t entry = NULL;
- vm_map_offset_t map_addr, fill_start;
- vm_map_offset_t map_mask;
- vm_map_size_t map_size, fill_size;
- kern_return_t kr, pe_result;
- vm_page_t mem;
- vm_page_t guard_page_list = NULL;
- vm_page_t wired_page_list = NULL;
- int guard_page_count = 0;
- int wired_page_count = 0;
- int i;
- int vm_alloc_flags;
- vm_map_kernel_flags_t vmk_flags;
- vm_prot_t kma_prot;
-
- if (! vm_kernel_ready) {
+ vm_object_t object;
+ vm_object_offset_t offset;
+ vm_object_offset_t pg_offset;
+ vm_map_entry_t entry = NULL;
+ vm_map_offset_t map_addr, fill_start;
+ vm_map_offset_t map_mask;
+ vm_map_size_t map_size, fill_size;
+ kern_return_t kr, pe_result;
+ vm_page_t mem;
+ vm_page_t guard_page_list = NULL;
+ vm_page_t wired_page_list = NULL;
+ int guard_page_count = 0;
+ int wired_page_count = 0;
+ int vm_alloc_flags;
+ vm_map_kernel_flags_t vmk_flags;
+ vm_prot_t kma_prot;
+
+ if (startup_phase < STARTUP_SUB_KMEM) {
panic("kernel_memory_allocate: VM is not ready");
}
map_size = vm_map_round_page(size,
- VM_MAP_PAGE_MASK(map));
+ VM_MAP_PAGE_MASK(map));
map_mask = (vm_map_offset_t) mask;
vm_alloc_flags = 0; //VM_MAKE_TAG(tag);
* limit raised to 2GB with 128GB max physical limit,
* but scaled by installed memory above this
*/
- if ( !(flags & KMA_VAONLY) && map_size > MAX(1ULL<<31, sane_size/64)) {
- return KERN_RESOURCE_SHORTAGE;
- }
+ if (!(flags & (KMA_VAONLY | KMA_PAGEABLE)) &&
+ map_size > MAX(1ULL << 31, sane_size / 64)) {
+ return KERN_RESOURCE_SHORTAGE;
+ }
/*
* Guard pages:
wired_page_count = (int) (fill_size / PAGE_SIZE_64);
assert(wired_page_count * PAGE_SIZE_64 == fill_size);
- for (i = 0; i < guard_page_count; i++) {
- for (;;) {
- mem = vm_page_grab_guard();
+#if DEBUG || DEVELOPMENT
+ VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_START,
+ size, 0, 0, 0);
+#endif
- if (mem != VM_PAGE_NULL)
- break;
- if (flags & KMA_NOPAGEWAIT) {
- kr = KERN_RESOURCE_SHORTAGE;
- goto out;
- }
- vm_page_more_fictitious();
+ for (int i = 0; i < guard_page_count; i++) {
+ mem = vm_page_grab_guard((flags & KMA_NOPAGEWAIT) == 0);
+ if (mem == VM_PAGE_NULL) {
+ kr = KERN_RESOURCE_SHORTAGE;
+ goto out;
}
- mem->snext = guard_page_list;
+ mem->vmp_snext = guard_page_list;
guard_page_list = mem;
}
- if (! (flags & KMA_VAONLY)) {
- for (i = 0; i < wired_page_count; i++) {
- uint64_t unavailable;
-
- for (;;) {
- if (flags & KMA_LOMEM)
- mem = vm_page_grablo();
- else
- mem = vm_page_grab();
-
- if (mem != VM_PAGE_NULL)
- break;
-
- if (flags & KMA_NOPAGEWAIT) {
- kr = KERN_RESOURCE_SHORTAGE;
- goto out;
- }
- if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) {
- kr = KERN_RESOURCE_SHORTAGE;
- goto out;
- }
- unavailable = (vm_page_wire_count + vm_page_free_target) * PAGE_SIZE;
-
- if (unavailable > max_mem || map_size > (max_mem - unavailable)) {
- kr = KERN_RESOURCE_SHORTAGE;
- goto out;
- }
- VM_PAGE_WAIT();
+ if (!(flags & (KMA_VAONLY | KMA_PAGEABLE))) {
+ kr = vm_page_alloc_list(wired_page_count, flags,
+ &wired_page_list);
+ if (kr != KERN_SUCCESS) {
+ goto out;
}
- if (KMA_ZERO & flags) vm_page_zero_fill(mem);
- mem->snext = wired_page_list;
- wired_page_list = mem;
- }
}
/*
object = vm_object_allocate(map_size);
}
- if (flags & KMA_ATOMIC)
+ if (flags & KMA_ATOMIC) {
vmk_flags.vmkf_atomic_entry = TRUE;
+ }
+
+ if (flags & KMA_KHEAP) {
+ vm_alloc_flags |= VM_MAP_FIND_LAST_FREE;
+ }
kr = vm_map_find_space(map, &map_addr,
- fill_size, map_mask,
- vm_alloc_flags, vmk_flags, tag, &entry);
+ fill_size, map_mask,
+ vm_alloc_flags, vmk_flags, tag, &entry);
if (KERN_SUCCESS != kr) {
vm_object_deallocate(object);
goto out;
}
VME_OBJECT_SET(entry, object);
VME_OFFSET_SET(entry, offset);
-
- if (object != compressor_object)
+
+ if (!(flags & (KMA_COMPRESSOR | KMA_PAGEABLE))) {
entry->wired_count++;
+ }
- if (flags & KMA_PERMANENT)
+ if (flags & KMA_PERMANENT) {
entry->permanent = TRUE;
+ }
- if (object != kernel_object && object != compressor_object)
+ if (object != kernel_object && object != compressor_object) {
vm_object_reference(object);
+ }
vm_object_lock(object);
vm_map_unlock(map);
pg_offset = 0;
if (fill_start) {
- if (guard_page_list == NULL)
+ if (guard_page_list == NULL) {
panic("kernel_memory_allocate: guard_page_list == NULL");
+ }
mem = guard_page_list;
- guard_page_list = mem->snext;
- mem->snext = NULL;
+ guard_page_list = mem->vmp_snext;
+ mem->vmp_snext = NULL;
vm_page_insert(mem, object, offset + pg_offset);
- mem->busy = FALSE;
+ mem->vmp_busy = FALSE;
pg_offset += PAGE_SIZE_64;
}
}
#endif
- if (flags & KMA_VAONLY) {
+ if (flags & (KMA_VAONLY | KMA_PAGEABLE)) {
pg_offset = fill_start + fill_size;
} else {
- for (pg_offset = fill_start; pg_offset < fill_start + fill_size; pg_offset += PAGE_SIZE_64) {
- if (wired_page_list == NULL)
- panic("kernel_memory_allocate: wired_page_list == NULL");
-
- mem = wired_page_list;
- wired_page_list = mem->snext;
- mem->snext = NULL;
-
- assert(mem->wire_count == 0);
- assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
-
- mem->vm_page_q_state = VM_PAGE_IS_WIRED;
- mem->wire_count++;
- if (__improbable(mem->wire_count == 0)) {
- panic("kernel_memory_allocate(%p): wire_count overflow",
- mem);
- }
+ for (pg_offset = fill_start; pg_offset < fill_start + fill_size; pg_offset += PAGE_SIZE_64) {
+ if (wired_page_list == NULL) {
+ panic("kernel_memory_allocate: wired_page_list == NULL");
+ }
- vm_page_insert_wired(mem, object, offset + pg_offset, tag);
+ mem = wired_page_list;
+ wired_page_list = mem->vmp_snext;
+ mem->vmp_snext = NULL;
- mem->busy = FALSE;
- mem->pmapped = TRUE;
- mem->wpmapped = TRUE;
+ assert(mem->vmp_wire_count == 0);
+ assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
- PMAP_ENTER_OPTIONS(kernel_pmap, map_addr + pg_offset, mem,
- kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
- PMAP_OPTIONS_NOWAIT, pe_result);
+ mem->vmp_q_state = VM_PAGE_IS_WIRED;
+ mem->vmp_wire_count++;
+ if (__improbable(mem->vmp_wire_count == 0)) {
+ panic("kernel_memory_allocate(%p): wire_count overflow",
+ mem);
+ }
- if (pe_result == KERN_RESOURCE_SHORTAGE) {
- vm_object_unlock(object);
+ vm_page_insert_wired(mem, object, offset + pg_offset, tag);
- PMAP_ENTER(kernel_pmap, map_addr + pg_offset, mem,
- kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
- pe_result);
+ mem->vmp_busy = FALSE;
+ mem->vmp_pmapped = TRUE;
+ mem->vmp_wpmapped = TRUE;
- vm_object_lock(object);
- }
+ PMAP_ENTER_OPTIONS(kernel_pmap, map_addr + pg_offset,
+ 0, /* fault_phys_offset */
+ mem,
+ kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
+ PMAP_OPTIONS_NOWAIT, pe_result);
- assert(pe_result == KERN_SUCCESS);
+ if (pe_result == KERN_RESOURCE_SHORTAGE) {
+ vm_object_unlock(object);
- if (flags & KMA_NOENCRYPT) {
- bzero(CAST_DOWN(void *, (map_addr + pg_offset)), PAGE_SIZE);
+ PMAP_ENTER(kernel_pmap, map_addr + pg_offset, mem,
+ kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
+ pe_result);
- pmap_set_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
+ vm_object_lock(object);
+ }
+
+ assert(pe_result == KERN_SUCCESS);
+
+ if (flags & KMA_NOENCRYPT) {
+ bzero(CAST_DOWN(void *, (map_addr + pg_offset)), PAGE_SIZE);
+
+ pmap_set_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
+ }
+ }
+ if (kernel_object == object) {
+ vm_tag_update_size(tag, fill_size);
}
- }
- if (kernel_object == object) vm_tag_update_size(tag, fill_size);
}
if ((fill_start + fill_size) < map_size) {
- if (guard_page_list == NULL)
+ if (guard_page_list == NULL) {
panic("kernel_memory_allocate: guard_page_list == NULL");
+ }
mem = guard_page_list;
- guard_page_list = mem->snext;
- mem->snext = NULL;
+ guard_page_list = mem->vmp_snext;
+ mem->vmp_snext = NULL;
vm_page_insert(mem, object, offset + pg_offset);
- mem->busy = FALSE;
+ mem->vmp_busy = FALSE;
}
- if (guard_page_list || wired_page_list)
+ if (guard_page_list || wired_page_list) {
panic("kernel_memory_allocate: non empty list\n");
+ }
- if (! (flags & KMA_VAONLY)) {
- vm_page_lockspin_queues();
- vm_page_wire_count += wired_page_count;
- vm_page_unlock_queues();
+ if (!(flags & (KMA_VAONLY | KMA_PAGEABLE))) {
+ vm_page_lockspin_queues();
+ vm_page_wire_count += wired_page_count;
+ vm_page_unlock_queues();
}
vm_object_unlock(object);
/*
* now that the pages are wired, we no longer have to fear coalesce
*/
- if (object == kernel_object || object == compressor_object)
+ if (object == kernel_object || object == compressor_object) {
vm_map_simplify(map, map_addr);
- else
+ } else {
vm_object_deallocate(object);
+ }
+#if DEBUG || DEVELOPMENT
+ VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END,
+ wired_page_count, 0, 0, 0);
+#endif
/*
* Return the memory, not zeroed.
*/
return KERN_SUCCESS;
out:
- if (guard_page_list)
+ if (guard_page_list) {
vm_page_free_list(guard_page_list, FALSE);
+ }
- if (wired_page_list)
+ if (wired_page_list) {
vm_page_free_list(wired_page_list, FALSE);
+ }
+#if DEBUG || DEVELOPMENT
+ VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END,
+ wired_page_count, 0, 0, 0);
+#endif
return kr;
}
-kern_return_t
-kernel_memory_populate(
- vm_map_t map,
- vm_offset_t addr,
- vm_size_t size,
- int flags,
+void
+kernel_memory_populate_with_pages(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size,
+ vm_page_t page_list,
+ kma_flags_t flags,
vm_tag_t tag)
{
- vm_object_t object;
- vm_object_offset_t offset, pg_offset;
- kern_return_t kr, pe_result;
- vm_page_t mem;
- vm_page_t page_list = NULL;
- int page_count = 0;
- int i;
-
- page_count = (int) (size / PAGE_SIZE_64);
-
- assert((flags & (KMA_COMPRESSOR|KMA_KOBJECT)) != (KMA_COMPRESSOR|KMA_KOBJECT));
+ vm_object_t object;
+ kern_return_t pe_result;
+ vm_page_t mem;
+ int page_count = atop_64(size);
if (flags & KMA_COMPRESSOR) {
-
- pg_offset = page_count * PAGE_SIZE_64;
-
- do {
- for (;;) {
- mem = vm_page_grab();
-
- if (mem != VM_PAGE_NULL)
- break;
-
- VM_PAGE_WAIT();
- }
- if (KMA_ZERO & flags) vm_page_zero_fill(mem);
- mem->snext = page_list;
- page_list = mem;
-
- pg_offset -= PAGE_SIZE_64;
-
- kr = pmap_enter_options(kernel_pmap,
- addr + pg_offset, VM_PAGE_GET_PHYS_PAGE(mem),
- VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE,
- PMAP_OPTIONS_INTERNAL, NULL);
- assert(kr == KERN_SUCCESS);
-
- } while (pg_offset);
-
- offset = addr;
- object = compressor_object;
-
- vm_object_lock(object);
-
- for (pg_offset = 0;
- pg_offset < size;
- pg_offset += PAGE_SIZE_64) {
-
- mem = page_list;
- page_list = mem->snext;
- mem->snext = NULL;
-
- vm_page_insert(mem, object, offset + pg_offset);
- assert(mem->busy);
-
- mem->busy = FALSE;
- mem->pmapped = TRUE;
- mem->wpmapped = TRUE;
- mem->vm_page_q_state = VM_PAGE_USED_BY_COMPRESSOR;
- }
- vm_object_unlock(object);
-
-#if KASAN
- if (map == compressor_map) {
- kasan_notify_address_nopoison(addr, size);
- } else {
- kasan_notify_address(addr, size);
- }
-#endif
- return KERN_SUCCESS;
+ panic("%s(%p,0x%llx,0x%llx,0x%x): KMA_COMPRESSOR", __func__,
+ map, (uint64_t) addr, (uint64_t) size, flags);
}
- for (i = 0; i < page_count; i++) {
- for (;;) {
- if (flags & KMA_LOMEM)
- mem = vm_page_grablo();
- else
- mem = vm_page_grab();
-
- if (mem != VM_PAGE_NULL)
- break;
-
- if (flags & KMA_NOPAGEWAIT) {
- kr = KERN_RESOURCE_SHORTAGE;
- goto out;
- }
- if ((flags & KMA_LOMEM) &&
- (vm_lopage_needed == TRUE)) {
- kr = KERN_RESOURCE_SHORTAGE;
- goto out;
- }
- VM_PAGE_WAIT();
- }
- if (KMA_ZERO & flags) vm_page_zero_fill(mem);
- mem->snext = page_list;
- page_list = mem;
- }
if (flags & KMA_KOBJECT) {
- offset = addr;
object = kernel_object;
vm_object_lock(object);
* take reference on object;
* unlock map;
*/
- panic("kernel_memory_populate(%p,0x%llx,0x%llx,0x%x): "
- "!KMA_KOBJECT",
- map, (uint64_t) addr, (uint64_t) size, flags);
+ panic("%s(%p,0x%llx,0x%llx,0x%x): !KMA_KOBJECT", __func__,
+ map, (uint64_t) addr, (uint64_t) size, flags);
}
- for (pg_offset = 0;
- pg_offset < size;
- pg_offset += PAGE_SIZE_64) {
-
- if (page_list == NULL)
- panic("kernel_memory_populate: page_list == NULL");
+ for (vm_object_offset_t pg_offset = 0;
+ pg_offset < size;
+ pg_offset += PAGE_SIZE_64) {
+ if (page_list == NULL) {
+ panic("%s: page_list too short", __func__);
+ }
mem = page_list;
- page_list = mem->snext;
- mem->snext = NULL;
-
- assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
- mem->vm_page_q_state = VM_PAGE_IS_WIRED;
- mem->wire_count++;
- if (__improbable(mem->wire_count == 0)) {
- panic("kernel_memory_populate(%p): wire_count overflow",
- mem);
+ page_list = mem->vmp_snext;
+ mem->vmp_snext = NULL;
+
+ assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
+ mem->vmp_q_state = VM_PAGE_IS_WIRED;
+ mem->vmp_wire_count++;
+ if (mem->vmp_wire_count == 0) {
+ panic("%s(%p): wire_count overflow", __func__, mem);
}
- vm_page_insert_wired(mem, object, offset + pg_offset, tag);
+ vm_page_insert_wired(mem, object, addr + pg_offset, tag);
- mem->busy = FALSE;
- mem->pmapped = TRUE;
- mem->wpmapped = TRUE;
+ mem->vmp_busy = FALSE;
+ mem->vmp_pmapped = TRUE;
+ mem->vmp_wpmapped = TRUE;
- PMAP_ENTER_OPTIONS(kernel_pmap, addr + pg_offset, mem,
- VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
- ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
- PMAP_OPTIONS_NOWAIT, pe_result);
+ PMAP_ENTER_OPTIONS(kernel_pmap, addr + pg_offset,
+ 0, /* fault_phys_offset */
+ mem,
+ VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
+ ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
+ PMAP_OPTIONS_NOWAIT, pe_result);
if (pe_result == KERN_RESOURCE_SHORTAGE) {
-
vm_object_unlock(object);
PMAP_ENTER(kernel_pmap, addr + pg_offset, mem,
- VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
- ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
- pe_result);
+ VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
+ ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE,
+ pe_result);
vm_object_lock(object);
}
assert(pe_result == KERN_SUCCESS);
if (flags & KMA_NOENCRYPT) {
- bzero(CAST_DOWN(void *, (addr + pg_offset)), PAGE_SIZE);
+ __nosan_bzero(CAST_DOWN(void *, (addr + pg_offset)), PAGE_SIZE);
pmap_set_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
}
}
- vm_page_lock_queues();
+ if (page_list) {
+ panic("%s: page_list too long", __func__);
+ }
+ vm_object_unlock(object);
+
+ vm_page_lockspin_queues();
vm_page_wire_count += page_count;
vm_page_unlock_queues();
-
- if (kernel_object == object) vm_tag_update_size(tag, size);
-
- vm_object_unlock(object);
+ vm_tag_update_size(tag, size);
#if KASAN
if (map == compressor_map) {
kasan_notify_address(addr, size);
}
#endif
- return KERN_SUCCESS;
+}
-out:
- if (page_list)
- vm_page_free_list(page_list, FALSE);
+kern_return_t
+kernel_memory_populate(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size,
+ kma_flags_t flags,
+ vm_tag_t tag)
+{
+ vm_object_t object;
+ vm_object_offset_t offset, pg_offset;
+ kern_return_t kr = KERN_SUCCESS;
+ vm_page_t mem;
+ vm_page_t page_list = NULL;
+ int page_count = atop_64(size);
+
+#if DEBUG || DEVELOPMENT
+ VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_START,
+ size, 0, 0, 0);
+#endif
+
+ assert((flags & (KMA_COMPRESSOR | KMA_KOBJECT)) != (KMA_COMPRESSOR | KMA_KOBJECT));
+
+ if (flags & KMA_COMPRESSOR) {
+ pg_offset = page_count * PAGE_SIZE_64;
+
+ do {
+ for (;;) {
+ mem = vm_page_grab();
+
+ if (mem != VM_PAGE_NULL) {
+ break;
+ }
+
+ VM_PAGE_WAIT();
+ }
+ if (KMA_ZERO & flags) {
+ vm_page_zero_fill(mem);
+ }
+ mem->vmp_snext = page_list;
+ page_list = mem;
+
+ pg_offset -= PAGE_SIZE_64;
+
+ kr = pmap_enter_options(kernel_pmap,
+ addr + pg_offset, VM_PAGE_GET_PHYS_PAGE(mem),
+ VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE,
+ PMAP_OPTIONS_INTERNAL, NULL);
+ assert(kr == KERN_SUCCESS);
+ } while (pg_offset);
+
+ offset = addr;
+ object = compressor_object;
+
+ vm_object_lock(object);
+
+ for (pg_offset = 0;
+ pg_offset < size;
+ pg_offset += PAGE_SIZE_64) {
+ mem = page_list;
+ page_list = mem->vmp_snext;
+ mem->vmp_snext = NULL;
+
+ vm_page_insert(mem, object, offset + pg_offset);
+ assert(mem->vmp_busy);
+
+ mem->vmp_busy = FALSE;
+ mem->vmp_pmapped = TRUE;
+ mem->vmp_wpmapped = TRUE;
+ mem->vmp_q_state = VM_PAGE_USED_BY_COMPRESSOR;
+ }
+ vm_object_unlock(object);
+#if KASAN
+ if (map == compressor_map) {
+ kasan_notify_address_nopoison(addr, size);
+ } else {
+ kasan_notify_address(addr, size);
+ }
+#endif
+
+#if DEBUG || DEVELOPMENT
+ task_t task = current_task();
+ if (task != NULL) {
+ ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, page_count);
+ }
+#endif
+ } else {
+ kr = vm_page_alloc_list(page_count, flags, &page_list);
+ if (kr == KERN_SUCCESS) {
+ kernel_memory_populate_with_pages(map, addr, size,
+ page_list, flags, tag);
+ }
+ }
+
+#if DEBUG || DEVELOPMENT
+ VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END,
+ page_count, 0, 0, 0);
+#endif
return kr;
}
void
kernel_memory_depopulate(
- vm_map_t map,
- vm_offset_t addr,
- vm_size_t size,
- int flags)
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size,
+ kma_flags_t flags,
+ vm_tag_t tag)
{
- vm_object_t object;
- vm_object_offset_t offset, pg_offset;
- vm_page_t mem;
- vm_page_t local_freeq = NULL;
+ vm_object_t object;
+ vm_object_offset_t offset, pg_offset;
+ vm_page_t mem;
+ vm_page_t local_freeq = NULL;
+ unsigned int pages_unwired;
- assert((flags & (KMA_COMPRESSOR|KMA_KOBJECT)) != (KMA_COMPRESSOR|KMA_KOBJECT));
+ assert((flags & (KMA_COMPRESSOR | KMA_KOBJECT)) != (KMA_COMPRESSOR | KMA_KOBJECT));
if (flags & KMA_COMPRESSOR) {
offset = addr;
} else {
offset = 0;
object = NULL;
- /*
- * If it's not the kernel object, we need to:
- * lock map;
- * lookup entry;
- * lock object;
- * unlock map;
- */
+ /*
+ * If it's not the kernel object, we need to:
+ * lock map;
+ * lookup entry;
+ * lock object;
+ * unlock map;
+ */
panic("kernel_memory_depopulate(%p,0x%llx,0x%llx,0x%x): "
- "!KMA_KOBJECT",
- map, (uint64_t) addr, (uint64_t) size, flags);
+ "!KMA_KOBJECT",
+ map, (uint64_t) addr, (uint64_t) size, flags);
}
pmap_protect(kernel_map->pmap, offset, offset + size, VM_PROT_NONE);
- for (pg_offset = 0;
- pg_offset < size;
- pg_offset += PAGE_SIZE_64) {
-
+ for (pg_offset = 0, pages_unwired = 0;
+ pg_offset < size;
+ pg_offset += PAGE_SIZE_64) {
mem = vm_page_lookup(object, offset + pg_offset);
assert(mem);
-
- if (mem->vm_page_q_state != VM_PAGE_USED_BY_COMPRESSOR)
+
+ if (mem->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR) {
pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem));
+ pages_unwired++;
+ }
- mem->busy = TRUE;
+ mem->vmp_busy = TRUE;
- assert(mem->tabled);
+ assert(mem->vmp_tabled);
vm_page_remove(mem, TRUE);
- assert(mem->busy);
+ assert(mem->vmp_busy);
- assert(mem->pageq.next == 0 && mem->pageq.prev == 0);
- assert((mem->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
- (mem->vm_page_q_state == VM_PAGE_NOT_ON_Q));
+ assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
+ assert((mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
+ (mem->vmp_q_state == VM_PAGE_IS_WIRED));
- mem->vm_page_q_state = VM_PAGE_NOT_ON_Q;
- mem->snext = local_freeq;
+ mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
+ mem->vmp_snext = local_freeq;
local_freeq = mem;
}
vm_object_unlock(object);
- if (local_freeq)
+
+ if (local_freeq) {
vm_page_free_list(local_freeq, TRUE);
+ if (pages_unwired != 0) {
+ vm_page_lockspin_queues();
+ vm_page_wire_count -= pages_unwired;
+ vm_page_unlock_queues();
+ vm_tag_update_size(tag, -ptoa_64(pages_unwired));
+ }
+ }
}
/*
kern_return_t
kmem_alloc_external(
- vm_map_t map,
- vm_offset_t *addrp,
- vm_size_t size)
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
- return (kmem_alloc(map, addrp, size, vm_tag_bt()));
+ return kmem_alloc(map, addrp, size, vm_tag_bt());
}
kern_return_t
kmem_alloc(
- vm_map_t map,
- vm_offset_t *addrp,
- vm_size_t size,
- vm_tag_t tag)
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size,
+ vm_tag_t tag)
{
return kmem_alloc_flags(map, addrp, size, tag, 0);
}
kern_return_t
kmem_alloc_flags(
- vm_map_t map,
- vm_offset_t *addrp,
- vm_size_t size,
- vm_tag_t tag,
- int flags)
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size,
+ vm_tag_t tag,
+ kma_flags_t flags)
{
kern_return_t kr = kernel_memory_allocate(map, addrp, size, 0, flags, tag);
- TRACE_MACHLEAKS(KMEM_ALLOC_CODE, KMEM_ALLOC_CODE_2, size, *addrp);
+ if (kr == KERN_SUCCESS) {
+ TRACE_MACHLEAKS(KMEM_ALLOC_CODE, KMEM_ALLOC_CODE_2, size, *addrp);
+ }
return kr;
}
*/
kern_return_t
kmem_realloc(
- vm_map_t map,
- vm_offset_t oldaddr,
- vm_size_t oldsize,
- vm_offset_t *newaddrp,
- vm_size_t newsize,
- vm_tag_t tag)
+ vm_map_t map,
+ vm_offset_t oldaddr,
+ vm_size_t oldsize,
+ vm_offset_t *newaddrp,
+ vm_size_t newsize,
+ vm_tag_t tag)
{
- vm_object_t object;
- vm_object_offset_t offset;
- vm_map_offset_t oldmapmin;
- vm_map_offset_t oldmapmax;
- vm_map_offset_t newmapaddr;
- vm_map_size_t oldmapsize;
- vm_map_size_t newmapsize;
- vm_map_entry_t oldentry;
- vm_map_entry_t newentry;
- vm_page_t mem;
- kern_return_t kr;
+ vm_object_t object;
+ vm_object_offset_t offset;
+ vm_map_offset_t oldmapmin;
+ vm_map_offset_t oldmapmax;
+ vm_map_offset_t newmapaddr;
+ vm_map_size_t oldmapsize;
+ vm_map_size_t newmapsize;
+ vm_map_entry_t oldentry;
+ vm_map_entry_t newentry;
+ vm_page_t mem;
+ kern_return_t kr;
oldmapmin = vm_map_trunc_page(oldaddr,
- VM_MAP_PAGE_MASK(map));
+ VM_MAP_PAGE_MASK(map));
oldmapmax = vm_map_round_page(oldaddr + oldsize,
- VM_MAP_PAGE_MASK(map));
+ VM_MAP_PAGE_MASK(map));
oldmapsize = oldmapmax - oldmapmin;
newmapsize = vm_map_round_page(newsize,
- VM_MAP_PAGE_MASK(map));
+ VM_MAP_PAGE_MASK(map));
if (newmapsize < newsize) {
/* overflow */
*newaddrp = 0;
vm_map_lock(map);
- if (!vm_map_lookup_entry(map, oldmapmin, &oldentry))
+ if (!vm_map_lookup_entry(map, oldmapmin, &oldentry)) {
panic("kmem_realloc");
+ }
object = VME_OBJECT(oldentry);
/*
/* attempt is made to realloc a kmem_alloc'd area */
vm_object_lock(object);
vm_map_unlock(map);
- if (object->vo_size != oldmapsize)
+ if (object->vo_size != oldmapsize) {
panic("kmem_realloc");
+ }
object->vo_size = newmapsize;
vm_object_unlock(object);
/* allocate the new pages while expanded portion of the */
/* object is still not mapped */
kmem_alloc_pages(object, vm_object_round_page(oldmapsize),
- vm_object_round_page(newmapsize-oldmapsize));
+ vm_object_round_page(newmapsize - oldmapsize));
/*
* Find space for the new region.
*/
kr = vm_map_find_space(map, &newmapaddr, newmapsize,
- (vm_map_offset_t) 0, 0,
- VM_MAP_KERNEL_FLAGS_NONE,
- tag,
- &newentry);
+ (vm_map_offset_t) 0, 0,
+ VM_MAP_KERNEL_FLAGS_NONE,
+ tag,
+ &newentry);
if (kr != KERN_SUCCESS) {
vm_object_lock(object);
- for(offset = oldmapsize;
+ for (offset = oldmapsize;
offset < newmapsize; offset += PAGE_SIZE) {
- if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
+ if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
VM_PAGE_FREE(mem);
}
}
VME_OFFSET_SET(newentry, 0);
assert(newentry->wired_count == 0);
-
+
/* add an extra reference in case we have someone doing an */
/* unexpected deallocate */
vm_object_reference(object);
vm_map_unlock(map);
kr = vm_map_wire_kernel(map, newmapaddr, newmapaddr + newmapsize,
- VM_PROT_DEFAULT, tag, FALSE);
+ VM_PROT_DEFAULT, tag, FALSE);
if (KERN_SUCCESS != kr) {
- vm_map_remove(map, newmapaddr, newmapaddr + newmapsize, 0);
+ vm_map_remove(map, newmapaddr, newmapaddr + newmapsize, VM_MAP_REMOVE_NO_FLAGS);
vm_object_lock(object);
- for(offset = oldsize; offset < newmapsize; offset += PAGE_SIZE) {
- if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
+ for (offset = oldsize; offset < newmapsize; offset += PAGE_SIZE) {
+ if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
VM_PAGE_FREE(mem);
}
}
object->vo_size = oldmapsize;
vm_object_unlock(object);
vm_object_deallocate(object);
- return (kr);
+ return kr;
}
vm_object_deallocate(object);
- if (kernel_object == object) vm_tag_update_size(tag, newmapsize);
+ if (kernel_object == object) {
+ vm_tag_update_size(tag, newmapsize);
+ }
*newaddrp = CAST_DOWN(vm_offset_t, newmapaddr);
return KERN_SUCCESS;
kern_return_t
kmem_alloc_kobject_external(
- vm_map_t map,
- vm_offset_t *addrp,
- vm_size_t size)
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
- return (kmem_alloc_kobject(map, addrp, size, vm_tag_bt()));
+ return kmem_alloc_kobject(map, addrp, size, vm_tag_bt());
}
kern_return_t
kmem_alloc_kobject(
- vm_map_t map,
- vm_offset_t *addrp,
- vm_size_t size,
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size,
vm_tag_t tag)
{
return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT, tag);
kern_return_t
kmem_alloc_aligned(
- vm_map_t map,
- vm_offset_t *addrp,
- vm_size_t size,
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size,
vm_tag_t tag)
{
- if ((size & (size - 1)) != 0)
+ if ((size & (size - 1)) != 0) {
panic("kmem_alloc_aligned: size not aligned");
+ }
return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT, tag);
}
kern_return_t
kmem_alloc_pageable_external(
- vm_map_t map,
- vm_offset_t *addrp,
- vm_size_t size)
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
- return (kmem_alloc_pageable(map, addrp, size, vm_tag_bt()));
+ return kmem_alloc_pageable(map, addrp, size, vm_tag_bt());
}
kern_return_t
kmem_alloc_pageable(
- vm_map_t map,
- vm_offset_t *addrp,
- vm_size_t size,
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size,
vm_tag_t tag)
{
vm_map_offset_t map_addr;
- vm_map_size_t map_size;
+ vm_map_size_t map_size;
kern_return_t kr;
#ifndef normal
map_addr = vm_map_min(map);
#endif
map_size = vm_map_round_page(size,
- VM_MAP_PAGE_MASK(map));
+ VM_MAP_PAGE_MASK(map));
if (map_size < size) {
/* overflow */
*addrp = 0;
}
kr = vm_map_enter(map, &map_addr, map_size,
- (vm_map_offset_t) 0,
- VM_FLAGS_ANYWHERE,
- VM_MAP_KERNEL_FLAGS_NONE,
- tag,
- VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE,
- VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
-
- if (kr != KERN_SUCCESS)
+ (vm_map_offset_t) 0,
+ VM_FLAGS_ANYWHERE,
+ VM_MAP_KERNEL_FLAGS_NONE,
+ tag,
+ VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
+
+ if (kr != KERN_SUCCESS) {
return kr;
+ }
#if KASAN
kasan_notify_address(map_addr, map_size);
void
kmem_free(
- vm_map_t map,
- vm_offset_t addr,
- vm_size_t size)
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size)
{
kern_return_t kr;
TRACE_MACHLEAKS(KMEM_FREE_CODE, KMEM_FREE_CODE_2, size, addr);
- if(size == 0) {
+ if (size == 0) {
#if MACH_ASSERT
- printf("kmem_free called with size==0 for map: %p with addr: 0x%llx\n",map,(uint64_t)addr);
+ printf("kmem_free called with size==0 for map: %p with addr: 0x%llx\n", map, (uint64_t)addr);
#endif
return;
}
kr = vm_map_remove(map,
- vm_map_trunc_page(addr,
- VM_MAP_PAGE_MASK(map)),
- vm_map_round_page(addr + size,
- VM_MAP_PAGE_MASK(map)),
- VM_MAP_REMOVE_KUNWIRE);
- if (kr != KERN_SUCCESS)
+ vm_map_trunc_page(addr,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page(addr + size,
+ VM_MAP_PAGE_MASK(map)),
+ VM_MAP_REMOVE_KUNWIRE);
+ if (kr != KERN_SUCCESS) {
panic("kmem_free");
+ }
}
/*
kern_return_t
kmem_alloc_pages(
- vm_object_t object,
- vm_object_offset_t offset,
- vm_object_size_t size)
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_object_size_t size)
{
- vm_object_size_t alloc_size;
+ vm_object_size_t alloc_size;
alloc_size = vm_object_round_page(size);
- vm_object_lock(object);
+ vm_object_lock(object);
while (alloc_size) {
- vm_page_t mem;
+ vm_page_t mem;
- /*
- * Allocate a page
- */
- while (VM_PAGE_NULL ==
- (mem = vm_page_alloc(object, offset))) {
- vm_object_unlock(object);
- VM_PAGE_WAIT();
- vm_object_lock(object);
- }
- mem->busy = FALSE;
+ /*
+ * Allocate a page
+ */
+ while (VM_PAGE_NULL ==
+ (mem = vm_page_alloc(object, offset))) {
+ vm_object_unlock(object);
+ VM_PAGE_WAIT();
+ vm_object_lock(object);
+ }
+ mem->vmp_busy = FALSE;
- alloc_size -= PAGE_SIZE;
- offset += PAGE_SIZE;
+ alloc_size -= PAGE_SIZE;
+ offset += PAGE_SIZE;
}
vm_object_unlock(object);
return KERN_SUCCESS;
*/
kern_return_t
kmem_suballoc(
- vm_map_t parent,
- vm_offset_t *addr,
- vm_size_t size,
- boolean_t pageable,
- int flags,
+ vm_map_t parent,
+ vm_offset_t *addr,
+ vm_size_t size,
+ boolean_t pageable,
+ int flags,
vm_map_kernel_flags_t vmk_flags,
vm_tag_t tag,
- vm_map_t *new_map)
+ vm_map_t *new_map)
{
- vm_map_t map;
- vm_map_offset_t map_addr;
- vm_map_size_t map_size;
- kern_return_t kr;
+ vm_map_t map;
+ vm_map_offset_t map_addr;
+ vm_map_size_t map_size;
+ kern_return_t kr;
map_size = vm_map_round_page(size,
- VM_MAP_PAGE_MASK(parent));
+ VM_MAP_PAGE_MASK(parent));
if (map_size < size) {
/* overflow */
*addr = 0;
vm_object_reference(vm_submap_object);
map_addr = ((flags & VM_FLAGS_ANYWHERE)
- ? vm_map_min(parent)
- : vm_map_trunc_page(*addr,
- VM_MAP_PAGE_MASK(parent)));
+ ? vm_map_min(parent)
+ : vm_map_trunc_page(*addr,
+ VM_MAP_PAGE_MASK(parent)));
kr = vm_map_enter(parent, &map_addr, map_size,
- (vm_map_offset_t) 0, flags, vmk_flags, tag,
- vm_submap_object, (vm_object_offset_t) 0, FALSE,
- VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
+ (vm_map_offset_t) 0, flags, vmk_flags, tag,
+ vm_submap_object, (vm_object_offset_t) 0, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
if (kr != KERN_SUCCESS) {
vm_object_deallocate(vm_submap_object);
- return (kr);
+ return kr;
}
pmap_reference(vm_map_pmap(parent));
map = vm_map_create(vm_map_pmap(parent), map_addr, map_addr + map_size, pageable);
- if (map == VM_MAP_NULL)
- panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */
+ if (map == VM_MAP_NULL) {
+ panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */
+ }
/* inherit the parent map's page size */
vm_map_set_page_shift(map, VM_MAP_PAGE_SHIFT(parent));
/*
* See comment preceding vm_map_submap().
*/
- vm_map_remove(parent, map_addr, map_addr + map_size, VM_MAP_NO_FLAGS);
- vm_map_deallocate(map); /* also removes ref to pmap */
+ vm_map_remove(parent, map_addr, map_addr + map_size,
+ VM_MAP_REMOVE_NO_FLAGS);
+ vm_map_deallocate(map); /* also removes ref to pmap */
vm_object_deallocate(vm_submap_object);
- return (kr);
+ return kr;
}
*addr = CAST_DOWN(vm_offset_t, map_addr);
*new_map = map;
- return (KERN_SUCCESS);
+ return KERN_SUCCESS;
}
+/*
+ * The default percentage of memory that can be mlocked is scaled based on the total
+ * amount of memory in the system. These percentages are caclulated
+ * offline and stored in this table. We index this table by
+ * log2(max_mem) - VM_USER_WIREABLE_MIN_CONFIG. We clamp this index in the range
+ * [0, sizeof(wire_limit_percents) / sizeof(vm_map_size_t))
+ *
+ * Note that these values were picked for mac.
+ * If we ever have very large memory config arm devices, we may want to revisit
+ * since the kernel overhead is smaller there due to the larger page size.
+ */
+
+/* Start scaling iff we're managing > 2^32 = 4GB of RAM. */
+#define VM_USER_WIREABLE_MIN_CONFIG 32
+static vm_map_size_t wire_limit_percents[] =
+{ 70, 73, 76, 79, 82, 85, 88, 91, 94, 97};
+
+/*
+ * Sets the default global user wire limit which limits the amount of
+ * memory that can be locked via mlock() based on the above algorithm..
+ * This can be overridden via a sysctl.
+ */
+static void
+kmem_set_user_wire_limits(void)
+{
+ uint64_t available_mem_log;
+ uint64_t max_wire_percent;
+ size_t wire_limit_percents_length = sizeof(wire_limit_percents) /
+ sizeof(vm_map_size_t);
+ vm_map_size_t limit;
+ uint64_t config_memsize = max_mem;
+#if defined(XNU_TARGET_OS_OSX)
+ config_memsize = max_mem_actual;
+#endif /* defined(XNU_TARGET_OS_OSX) */
+
+ available_mem_log = bit_floor(config_memsize);
+
+ if (available_mem_log < VM_USER_WIREABLE_MIN_CONFIG) {
+ available_mem_log = 0;
+ } else {
+ available_mem_log -= VM_USER_WIREABLE_MIN_CONFIG;
+ }
+ if (available_mem_log >= wire_limit_percents_length) {
+ available_mem_log = wire_limit_percents_length - 1;
+ }
+ max_wire_percent = wire_limit_percents[available_mem_log];
+
+ limit = config_memsize * max_wire_percent / 100;
+ /* Cap the number of non lockable bytes at VM_NOT_USER_WIREABLE_MAX */
+ if (config_memsize - limit > VM_NOT_USER_WIREABLE_MAX) {
+ limit = config_memsize - VM_NOT_USER_WIREABLE_MAX;
+ }
+
+ vm_global_user_wire_limit = limit;
+ /* the default per task limit is the same as the global limit */
+ vm_per_task_user_wire_limit = limit;
+ vm_add_wire_count_over_global_limit = 0;
+ vm_add_wire_count_over_user_limit = 0;
+}
+
/*
* kmem_init:
* Initialize the kernel's virtual memory map, taking
* into account all memory allocated up to this time.
*/
+__startup_func
void
kmem_init(
- vm_offset_t start,
- vm_offset_t end)
+ vm_offset_t start,
+ vm_offset_t end)
{
vm_map_offset_t map_start;
vm_map_offset_t map_end;
vmk_flags.vmkf_no_pmap_check = TRUE;
map_start = vm_map_trunc_page(start,
- VM_MAP_PAGE_MASK(kernel_map));
+ VM_MAP_PAGE_MASK(kernel_map));
map_end = vm_map_round_page(end,
- VM_MAP_PAGE_MASK(kernel_map));
+ VM_MAP_PAGE_MASK(kernel_map));
-#if defined(__arm__) || defined(__arm64__)
- kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_AND_KEXT_ADDRESS,
- VM_MAX_KERNEL_ADDRESS, FALSE);
+#if defined(__arm__) || defined(__arm64__)
+ kernel_map = vm_map_create(pmap_kernel(), VM_MIN_KERNEL_AND_KEXT_ADDRESS,
+ VM_MAX_KERNEL_ADDRESS, FALSE);
/*
* Reserve virtual memory allocated up to this time.
*/
{
- unsigned int region_select = 0;
- vm_map_offset_t region_start;
- vm_map_size_t region_size;
+ unsigned int region_select = 0;
+ vm_map_offset_t region_start;
+ vm_map_size_t region_size;
vm_map_offset_t map_addr;
kern_return_t kr;
while (pmap_virtual_region(region_select, ®ion_start, ®ion_size)) {
-
map_addr = region_start;
kr = vm_map_enter(kernel_map, &map_addr,
- vm_map_round_page(region_size,
- VM_MAP_PAGE_MASK(kernel_map)),
- (vm_map_offset_t) 0,
- VM_FLAGS_FIXED,
- vmk_flags,
- VM_KERN_MEMORY_NONE,
- VM_OBJECT_NULL,
- (vm_object_offset_t) 0, FALSE, VM_PROT_NONE, VM_PROT_NONE,
- VM_INHERIT_DEFAULT);
+ vm_map_round_page(region_size,
+ VM_MAP_PAGE_MASK(kernel_map)),
+ (vm_map_offset_t) 0,
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ VM_OBJECT_NULL,
+ (vm_object_offset_t) 0, FALSE, VM_PROT_NONE, VM_PROT_NONE,
+ VM_INHERIT_DEFAULT);
if (kr != KERN_SUCCESS) {
panic("kmem_init(0x%llx,0x%llx): vm_map_enter(0x%llx,0x%llx) error 0x%x\n",
- (uint64_t) start, (uint64_t) end, (uint64_t) region_start,
- (uint64_t) region_size, kr);
- }
+ (uint64_t) start, (uint64_t) end, (uint64_t) region_start,
+ (uint64_t) region_size, kr);
+ }
region_select++;
- }
+ }
}
#else
- kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_AND_KEXT_ADDRESS,
- map_end, FALSE);
+ kernel_map = vm_map_create(pmap_kernel(), VM_MIN_KERNEL_AND_KEXT_ADDRESS,
+ map_end, FALSE);
/*
* Reserve virtual memory allocated up to this time.
*/
if (start != VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
vm_map_offset_t map_addr;
kern_return_t kr;
-
+
vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
vmk_flags.vmkf_no_pmap_check = TRUE;
map_addr = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
kr = vm_map_enter(kernel_map,
- &map_addr,
- (vm_map_size_t)(map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS),
- (vm_map_offset_t) 0,
- VM_FLAGS_FIXED,
- vmk_flags,
- VM_KERN_MEMORY_NONE,
- VM_OBJECT_NULL,
- (vm_object_offset_t) 0, FALSE,
- VM_PROT_NONE, VM_PROT_NONE,
- VM_INHERIT_DEFAULT);
-
+ &map_addr,
+ (vm_map_size_t)(map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS),
+ (vm_map_offset_t) 0,
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ VM_OBJECT_NULL,
+ (vm_object_offset_t) 0, FALSE,
+ VM_PROT_NONE, VM_PROT_NONE,
+ VM_INHERIT_DEFAULT);
+
if (kr != KERN_SUCCESS) {
panic("kmem_init(0x%llx,0x%llx): vm_map_enter(0x%llx,0x%llx) error 0x%x\n",
- (uint64_t) start, (uint64_t) end,
- (uint64_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS,
- (uint64_t) (map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS),
- kr);
- }
+ (uint64_t) start, (uint64_t) end,
+ (uint64_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS,
+ (uint64_t) (map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS),
+ kr);
+ }
}
#endif
- /*
- * Set the default global user wire limit which limits the amount of
- * memory that can be locked via mlock(). We set this to the total
- * amount of memory that are potentially usable by a user app (max_mem)
- * minus a certain amount. This can be overridden via a sysctl.
- */
- vm_global_no_user_wire_amount = MIN(max_mem*20/100,
- VM_NOT_USER_WIREABLE);
- vm_global_user_wire_limit = max_mem - vm_global_no_user_wire_amount;
-
- /* the default per user limit is the same as the global limit */
- vm_user_wire_limit = vm_global_user_wire_limit;
+ kmem_set_user_wire_limits();
}
-
/*
* Routine: copyinmap
* Purpose:
*/
kern_return_t
copyinmap(
- vm_map_t map,
- vm_map_offset_t fromaddr,
- void *todata,
- vm_size_t length)
+ vm_map_t map,
+ vm_map_offset_t fromaddr,
+ void *todata,
+ vm_size_t length)
{
- kern_return_t kr = KERN_SUCCESS;
+ kern_return_t kr = KERN_SUCCESS;
vm_map_t oldmap;
- if (vm_map_pmap(map) == pmap_kernel())
- {
+ if (vm_map_pmap(map) == pmap_kernel()) {
/* assume a correct copy */
memcpy(todata, CAST_DOWN(void *, fromaddr), length);
- }
- else if (current_map() == map)
- {
- if (copyin(fromaddr, todata, length) != 0)
+ } else if (current_map() == map) {
+ if (copyin(fromaddr, todata, length) != 0) {
kr = KERN_INVALID_ADDRESS;
- }
- else
- {
+ }
+ } else {
vm_map_reference(map);
oldmap = vm_map_switch(map);
- if (copyin(fromaddr, todata, length) != 0)
+ if (copyin(fromaddr, todata, length) != 0) {
kr = KERN_INVALID_ADDRESS;
+ }
vm_map_switch(oldmap);
vm_map_deallocate(map);
}
* Routine: copyoutmap
* Purpose:
* Like copyout, except that toaddr is an address
- * in the specified VM map. This implementation
- * is incomplete; it handles the current user map
- * and the kernel map/submaps.
+ * in the specified VM map.
*/
kern_return_t
copyoutmap(
- vm_map_t map,
- void *fromdata,
- vm_map_address_t toaddr,
- vm_size_t length)
+ vm_map_t map,
+ void *fromdata,
+ vm_map_address_t toaddr,
+ vm_size_t length)
{
+ kern_return_t kr = KERN_SUCCESS;
+ vm_map_t oldmap;
+
if (vm_map_pmap(map) == pmap_kernel()) {
/* assume a correct copy */
memcpy(CAST_DOWN(void *, toaddr), fromdata, length);
- return KERN_SUCCESS;
+ } else if (current_map() == map) {
+ if (copyout(fromdata, toaddr, length) != 0) {
+ kr = KERN_INVALID_ADDRESS;
+ }
+ } else {
+ vm_map_reference(map);
+ oldmap = vm_map_switch(map);
+ if (copyout(fromdata, toaddr, length) != 0) {
+ kr = KERN_INVALID_ADDRESS;
+ }
+ vm_map_switch(oldmap);
+ vm_map_deallocate(map);
}
+ return kr;
+}
- if (current_map() != map)
- return KERN_NOT_SUPPORTED;
+/*
+ * Routine: copyoutmap_atomic{32, 64}
+ * Purpose:
+ * Like copyoutmap, except that the operation is atomic.
+ * Takes in value rather than *fromdata pointer.
+ */
+kern_return_t
+copyoutmap_atomic32(
+ vm_map_t map,
+ uint32_t value,
+ vm_map_address_t toaddr)
+{
+ kern_return_t kr = KERN_SUCCESS;
+ vm_map_t oldmap;
- if (copyout(fromdata, toaddr, length) != 0)
- return KERN_INVALID_ADDRESS;
+ if (vm_map_pmap(map) == pmap_kernel()) {
+ /* assume a correct toaddr */
+ *(uint32_t *)toaddr = value;
+ } else if (current_map() == map) {
+ if (copyout_atomic32(value, toaddr) != 0) {
+ kr = KERN_INVALID_ADDRESS;
+ }
+ } else {
+ vm_map_reference(map);
+ oldmap = vm_map_switch(map);
+ if (copyout_atomic32(value, toaddr) != 0) {
+ kr = KERN_INVALID_ADDRESS;
+ }
+ vm_map_switch(oldmap);
+ vm_map_deallocate(map);
+ }
+ return kr;
+}
- return KERN_SUCCESS;
+kern_return_t
+copyoutmap_atomic64(
+ vm_map_t map,
+ uint64_t value,
+ vm_map_address_t toaddr)
+{
+ kern_return_t kr = KERN_SUCCESS;
+ vm_map_t oldmap;
+
+ if (vm_map_pmap(map) == pmap_kernel()) {
+ /* assume a correct toaddr */
+ *(uint64_t *)toaddr = value;
+ } else if (current_map() == map) {
+ if (copyout_atomic64(value, toaddr) != 0) {
+ kr = KERN_INVALID_ADDRESS;
+ }
+ } else {
+ vm_map_reference(map);
+ oldmap = vm_map_switch(map);
+ if (copyout_atomic64(value, toaddr) != 0) {
+ kr = KERN_INVALID_ADDRESS;
+ }
+ vm_map_switch(oldmap);
+ vm_map_deallocate(map);
+ }
+ return kr;
}
/*
return;
}
- vm_offset_t sha_digest[SHA256_DIGEST_LENGTH/sizeof(vm_offset_t)];
+ vm_offset_t sha_digest[SHA256_DIGEST_LENGTH / sizeof(vm_offset_t)];
SHA256_CTX sha_ctx;
SHA256_Init(&sha_ctx);
{
vm_kernel_addrperm_external(addr, up_addr);
}
+
+void
+vm_packing_pointer_invalid(vm_offset_t ptr, vm_packing_params_t params)
+{
+ if (ptr & ((1ul << params.vmpp_shift) - 1)) {
+ panic("pointer %p can't be packed: low %d bits aren't 0",
+ (void *)ptr, params.vmpp_shift);
+ } else if (ptr <= params.vmpp_base) {
+ panic("pointer %p can't be packed: below base %p",
+ (void *)ptr, (void *)params.vmpp_base);
+ } else {
+ panic("pointer %p can't be packed: maximum encodable pointer is %p",
+ (void *)ptr, (void *)vm_packing_max_packable(params));
+ }
+}
+
+void
+vm_packing_verify_range(
+ const char *subsystem,
+ vm_offset_t min_address,
+ vm_offset_t max_address,
+ vm_packing_params_t params)
+{
+ if (min_address > max_address) {
+ panic("%s: %s range invalid min:%p > max:%p",
+ __func__, subsystem, (void *)min_address, (void *)max_address);
+ }
+
+ if (!params.vmpp_base_relative) {
+ return;
+ }
+
+ if (min_address <= params.vmpp_base) {
+ panic("%s: %s range invalid min:%p <= base:%p",
+ __func__, subsystem, (void *)min_address, (void *)params.vmpp_base);
+ }
+
+ if (max_address > vm_packing_max_packable(params)) {
+ panic("%s: %s range invalid max:%p >= max packable:%p",
+ __func__, subsystem, (void *)max_address,
+ (void *)vm_packing_max_packable(params));
+ }
+}