/*
- * Copyright (c) 2008 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2008-2020 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#include <kern/kalloc.h>
#include <kern/queue.h>
+#include <vm/memory_object.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#include <vm/vm_pageout.h>
#include <vm/vm_protos.h>
-/*
- * APPLE SWAPFILE MEMORY PAGER
+/*
+ * APPLE SWAPFILE MEMORY PAGER
*
* This external memory manager (EMM) handles mappings of the swap files.
* Swap files are not regular files and are used solely to store contents of
* the disk blocks they were allocated. The "super-user" could see the
* contents of free blocks anyway, so this is not a new security issue but
* it may be perceive as one.
- * * ENCRYPTED SWAP:
- * When swap is encrypted, one does not expect to find any clear contents
- * in the swap files. Since unused blocks are not scrubbed, they could still
- * contain clear contents. If these contents are visible through a mapping
- * of the swap file, it makes it look like swap is not really encrypted.
- *
+ *
* We can't legitimately prevent a user process with appropriate privileges
* from mapping a swap file, but we can prevent it from accessing its actual
* contents.
void swapfile_pager_reference(memory_object_t mem_obj);
void swapfile_pager_deallocate(memory_object_t mem_obj);
kern_return_t swapfile_pager_init(memory_object_t mem_obj,
- memory_object_control_t control,
- memory_object_cluster_size_t pg_size);
+ memory_object_control_t control,
+ memory_object_cluster_size_t pg_size);
kern_return_t swapfile_pager_terminate(memory_object_t mem_obj);
kern_return_t swapfile_pager_data_request(memory_object_t mem_obj,
- memory_object_offset_t offset,
- memory_object_cluster_size_t length,
- vm_prot_t protection_required,
- memory_object_fault_info_t fault_info);
+ memory_object_offset_t offset,
+ memory_object_cluster_size_t length,
+ vm_prot_t protection_required,
+ memory_object_fault_info_t fault_info);
kern_return_t swapfile_pager_data_return(memory_object_t mem_obj,
- memory_object_offset_t offset,
- memory_object_cluster_size_t data_cnt,
- memory_object_offset_t *resid_offset,
- int *io_error,
- boolean_t dirty,
- boolean_t kernel_copy,
- int upl_flags);
+ memory_object_offset_t offset,
+ memory_object_cluster_size_t data_cnt,
+ memory_object_offset_t *resid_offset,
+ int *io_error,
+ boolean_t dirty,
+ boolean_t kernel_copy,
+ int upl_flags);
kern_return_t swapfile_pager_data_initialize(memory_object_t mem_obj,
- memory_object_offset_t offset,
- memory_object_cluster_size_t data_cnt);
+ memory_object_offset_t offset,
+ memory_object_cluster_size_t data_cnt);
kern_return_t swapfile_pager_data_unlock(memory_object_t mem_obj,
- memory_object_offset_t offset,
- memory_object_size_t size,
- vm_prot_t desired_access);
+ memory_object_offset_t offset,
+ memory_object_size_t size,
+ vm_prot_t desired_access);
kern_return_t swapfile_pager_synchronize(memory_object_t mem_obj,
- memory_object_offset_t offset,
- memory_object_size_t length,
- vm_sync_t sync_flags);
+ memory_object_offset_t offset,
+ memory_object_size_t length,
+ vm_sync_t sync_flags);
kern_return_t swapfile_pager_map(memory_object_t mem_obj,
- vm_prot_t prot);
+ vm_prot_t prot);
kern_return_t swapfile_pager_last_unmap(memory_object_t mem_obj);
/*
* These routines are invoked by VM via the memory_object_*() interfaces.
*/
const struct memory_object_pager_ops swapfile_pager_ops = {
- swapfile_pager_reference,
- swapfile_pager_deallocate,
- swapfile_pager_init,
- swapfile_pager_terminate,
- swapfile_pager_data_request,
- swapfile_pager_data_return,
- swapfile_pager_data_initialize,
- swapfile_pager_data_unlock,
- swapfile_pager_synchronize,
- swapfile_pager_map,
- swapfile_pager_last_unmap,
- "swapfile pager"
+ .memory_object_reference = swapfile_pager_reference,
+ .memory_object_deallocate = swapfile_pager_deallocate,
+ .memory_object_init = swapfile_pager_init,
+ .memory_object_terminate = swapfile_pager_terminate,
+ .memory_object_data_request = swapfile_pager_data_request,
+ .memory_object_data_return = swapfile_pager_data_return,
+ .memory_object_data_initialize = swapfile_pager_data_initialize,
+ .memory_object_data_unlock = swapfile_pager_data_unlock,
+ .memory_object_synchronize = swapfile_pager_synchronize,
+ .memory_object_map = swapfile_pager_map,
+ .memory_object_last_unmap = swapfile_pager_last_unmap,
+ .memory_object_data_reclaim = NULL,
+ .memory_object_pager_name = "swapfile pager"
};
/*
* the "swapfile" EMM.
*/
typedef struct swapfile_pager {
- struct ipc_object_header pager_header; /* fake ip_kotype() */
- memory_object_pager_ops_t pager_ops; /* == &swapfile_pager_ops */
- queue_chain_t pager_queue; /* next & prev pagers */
- unsigned int ref_count; /* reference count */
- boolean_t is_ready; /* is this pager ready ? */
- boolean_t is_mapped; /* is this pager mapped ? */
- memory_object_control_t pager_control; /* mem object control handle */
- struct vnode *swapfile_vnode;/* the swapfile's vnode */
+ /* mandatory generic header */
+ struct memory_object swp_pgr_hdr;
+
+ /* pager-specific data */
+ queue_chain_t pager_queue; /* next & prev pagers */
+ unsigned int ref_count; /* reference count */
+ boolean_t is_ready; /* is this pager ready ? */
+ boolean_t is_mapped; /* is this pager mapped ? */
+ struct vnode *swapfile_vnode;/* the swapfile's vnode */
} *swapfile_pager_t;
-#define SWAPFILE_PAGER_NULL ((swapfile_pager_t) NULL)
-#define pager_ikot pager_header.io_bits
+#define SWAPFILE_PAGER_NULL ((swapfile_pager_t) NULL)
/*
* List of memory objects managed by this EMM.
* The list is protected by the "swapfile_pager_lock" lock.
*/
-int swapfile_pager_count = 0; /* number of pagers */
-queue_head_t swapfile_pager_queue;
-decl_lck_mtx_data(,swapfile_pager_lock)
+int swapfile_pager_count = 0; /* number of pagers */
+queue_head_t swapfile_pager_queue = QUEUE_HEAD_INITIALIZER(swapfile_pager_queue);
+LCK_GRP_DECLARE(swapfile_pager_lck_grp, "swapfile pager");
+LCK_MTX_DECLARE(swapfile_pager_lock, &swapfile_pager_lck_grp);
/*
* Statistics & counters.
*/
int swapfile_pager_count_max = 0;
-
-lck_grp_t swapfile_pager_lck_grp;
-lck_grp_attr_t swapfile_pager_lck_grp_attr;
-lck_attr_t swapfile_pager_lck_attr;
-
-
/* internal prototypes */
swapfile_pager_t swapfile_pager_create(struct vnode *vp);
swapfile_pager_t swapfile_pager_lookup(memory_object_t mem_obj);
void swapfile_pager_dequeue(swapfile_pager_t pager);
void swapfile_pager_deallocate_internal(swapfile_pager_t pager,
- boolean_t locked);
+ boolean_t locked);
void swapfile_pager_terminate_internal(swapfile_pager_t pager);
#if DEBUG
int swapfile_pagerdebug = 0;
-#define PAGER_ALL 0xffffffff
-#define PAGER_INIT 0x00000001
-#define PAGER_PAGEIN 0x00000002
-
-#define PAGER_DEBUG(LEVEL, A) \
- MACRO_BEGIN \
- if ((swapfile_pagerdebug & LEVEL)==LEVEL) { \
- printf A; \
- } \
+#define PAGER_ALL 0xffffffff
+#define PAGER_INIT 0x00000001
+#define PAGER_PAGEIN 0x00000002
+
+#define PAGER_DEBUG(LEVEL, A) \
+ MACRO_BEGIN \
+ if ((swapfile_pagerdebug & LEVEL)==LEVEL) { \
+ printf A; \
+ } \
MACRO_END
#else
#define PAGER_DEBUG(LEVEL, A)
#endif
-void
-swapfile_pager_bootstrap(void)
-{
- lck_grp_attr_setdefault(&swapfile_pager_lck_grp_attr);
- lck_grp_init(&swapfile_pager_lck_grp, "swapfile pager", &swapfile_pager_lck_grp_attr);
- lck_attr_setdefault(&swapfile_pager_lck_attr);
- lck_mtx_init(&swapfile_pager_lock, &swapfile_pager_lck_grp, &swapfile_pager_lck_attr);
- queue_init(&swapfile_pager_queue);
-}
-
/*
* swapfile_pager_init()
*
*/
kern_return_t
swapfile_pager_init(
- memory_object_t mem_obj,
- memory_object_control_t control,
+ memory_object_t mem_obj,
+ memory_object_control_t control,
#if !DEBUG
__unused
#endif
memory_object_cluster_size_t pg_size)
{
- swapfile_pager_t pager;
- kern_return_t kr;
+ swapfile_pager_t pager;
+ kern_return_t kr;
memory_object_attr_info_data_t attributes;
PAGER_DEBUG(PAGER_ALL,
- ("swapfile_pager_init: %p, %p, %x\n",
- mem_obj, control, pg_size));
+ ("swapfile_pager_init: %p, %p, %x\n",
+ mem_obj, control, pg_size));
- if (control == MEMORY_OBJECT_CONTROL_NULL)
+ if (control == MEMORY_OBJECT_CONTROL_NULL) {
return KERN_INVALID_ARGUMENT;
+ }
pager = swapfile_pager_lookup(mem_obj);
memory_object_control_reference(control);
- pager->pager_control = control;
+ pager->swp_pgr_hdr.mo_control = control;
attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
attributes.cluster_size = (1 << (PAGE_SHIFT));
attributes.temporary = TRUE;
kr = memory_object_change_attributes(
- control,
- MEMORY_OBJECT_ATTRIBUTE_INFO,
- (memory_object_info_t) &attributes,
- MEMORY_OBJECT_ATTR_INFO_COUNT);
- if (kr != KERN_SUCCESS)
+ control,
+ MEMORY_OBJECT_ATTRIBUTE_INFO,
+ (memory_object_info_t) &attributes,
+ MEMORY_OBJECT_ATTR_INFO_COUNT);
+ if (kr != KERN_SUCCESS) {
panic("swapfile_pager_init: "
- "memory_object_change_attributes() failed");
+ "memory_object_change_attributes() failed");
+ }
return KERN_SUCCESS;
}
*/
kern_return_t
swapfile_pager_data_return(
- __unused memory_object_t mem_obj,
- __unused memory_object_offset_t offset,
- __unused memory_object_cluster_size_t data_cnt,
- __unused memory_object_offset_t *resid_offset,
- __unused int *io_error,
- __unused boolean_t dirty,
- __unused boolean_t kernel_copy,
- __unused int upl_flags)
+ __unused memory_object_t mem_obj,
+ __unused memory_object_offset_t offset,
+ __unused memory_object_cluster_size_t data_cnt,
+ __unused memory_object_offset_t *resid_offset,
+ __unused int *io_error,
+ __unused boolean_t dirty,
+ __unused boolean_t kernel_copy,
+ __unused int upl_flags)
{
panic("swapfile_pager_data_return: should never get called");
return KERN_FAILURE;
kern_return_t
swapfile_pager_data_initialize(
- __unused memory_object_t mem_obj,
- __unused memory_object_offset_t offset,
- __unused memory_object_cluster_size_t data_cnt)
+ __unused memory_object_t mem_obj,
+ __unused memory_object_offset_t offset,
+ __unused memory_object_cluster_size_t data_cnt)
{
panic("swapfile_pager_data_initialize: should never get called");
return KERN_FAILURE;
kern_return_t
swapfile_pager_data_unlock(
- __unused memory_object_t mem_obj,
- __unused memory_object_offset_t offset,
- __unused memory_object_size_t size,
- __unused vm_prot_t desired_access)
+ __unused memory_object_t mem_obj,
+ __unused memory_object_offset_t offset,
+ __unused memory_object_size_t size,
+ __unused vm_prot_t desired_access)
{
return KERN_FAILURE;
}
*
* Handles page-in requests from VM.
*/
-kern_return_t
+kern_return_t
swapfile_pager_data_request(
- memory_object_t mem_obj,
- memory_object_offset_t offset,
- memory_object_cluster_size_t length,
+ memory_object_t mem_obj,
+ memory_object_offset_t offset,
+ memory_object_cluster_size_t length,
#if !DEBUG
__unused
#endif
- vm_prot_t protection_required,
+ vm_prot_t protection_required,
__unused memory_object_fault_info_t mo_fault_info)
{
- swapfile_pager_t pager;
- memory_object_control_t mo_control;
- upl_t upl;
- int upl_flags;
- upl_size_t upl_size;
- upl_page_info_t *upl_pl = NULL;
- unsigned int pl_count;
- vm_object_t dst_object;
- kern_return_t kr, retval;
- vm_map_offset_t kernel_mapping;
- vm_offset_t dst_vaddr;
- char *dst_ptr;
- vm_offset_t cur_offset;
- vm_map_entry_t map_entry;
+ swapfile_pager_t pager;
+ memory_object_control_t mo_control;
+ upl_t upl;
+ int upl_flags;
+ upl_size_t upl_size;
+ upl_page_info_t *upl_pl = NULL;
+ unsigned int pl_count;
+ vm_object_t dst_object;
+ kern_return_t kr, retval;
+ vm_map_offset_t kernel_mapping;
+ vm_offset_t dst_vaddr;
+ char *dst_ptr;
+ vm_offset_t cur_offset;
+ vm_map_entry_t map_entry;
PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
/*
* Gather in a UPL all the VM pages requested by VM.
*/
- mo_control = pager->pager_control;
+ mo_control = pager->swp_pgr_hdr.mo_control;
upl_size = length;
upl_flags =
- UPL_RET_ONLY_ABSENT |
- UPL_SET_LITE |
- UPL_NO_SYNC |
- UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
- UPL_SET_INTERNAL;
+ UPL_RET_ONLY_ABSENT |
+ UPL_SET_LITE |
+ UPL_NO_SYNC |
+ UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
+ UPL_SET_INTERNAL;
pl_count = 0;
kr = memory_object_upl_request(mo_control,
- offset, upl_size,
- &upl, NULL, NULL, upl_flags);
+ offset, upl_size,
+ &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_OSFMK);
if (kr != KERN_SUCCESS) {
retval = kr;
goto done;
* Reserve a virtual page in the kernel address space to map each
* destination physical page when it's its turn to be processed.
*/
- vm_object_reference(kernel_object); /* ref. for mapping */
+ vm_object_reference(kernel_object); /* ref. for mapping */
kr = vm_map_find_space(kernel_map,
- &kernel_mapping,
- PAGE_SIZE_64,
- 0,
- 0,
- &map_entry);
+ &kernel_mapping,
+ PAGE_SIZE_64,
+ 0,
+ 0,
+ VM_MAP_KERNEL_FLAGS_NONE,
+ VM_KERN_MEMORY_NONE,
+ &map_entry);
if (kr != KERN_SUCCESS) {
vm_object_deallocate(kernel_object);
retval = kr;
goto done;
}
- map_entry->object.vm_object = kernel_object;
- map_entry->offset = kernel_mapping - VM_MIN_KERNEL_ADDRESS;
+ VME_OBJECT_SET(map_entry, kernel_object);
+ VME_OFFSET_SET(map_entry, kernel_mapping - VM_MIN_KERNEL_ADDRESS);
vm_map_unlock(kernel_map);
dst_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping);
dst_ptr = (char *) dst_vaddr;
* is "busy".
*/
dst_pnum = (ppnum_t)
- upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
+ upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
assert(dst_pnum != 0);
- pmap_enter(kernel_pmap,
- kernel_mapping,
- dst_pnum,
- VM_PROT_READ | VM_PROT_WRITE,
- dst_object->wimg_bits & VM_WIMG_MASK,
- TRUE);
+ retval = pmap_enter(kernel_pmap,
+ kernel_mapping,
+ dst_pnum,
+ VM_PROT_READ | VM_PROT_WRITE,
+ VM_PROT_NONE,
+ 0,
+ TRUE);
+
+ assert(retval == KERN_SUCCESS);
+
+ if (retval != KERN_SUCCESS) {
+ goto done;
+ }
memset(dst_ptr, '\0', PAGE_SIZE);
/* add an end-of-line to keep line counters happy */
- dst_ptr[PAGE_SIZE-1] = '\n';
-
+ dst_ptr[PAGE_SIZE - 1] = '\n';
+
/*
* Remove the pmap mapping of the destination page
* in the kernel.
*/
pmap_remove(kernel_pmap,
- (addr64_t) kernel_mapping,
- (addr64_t) (kernel_mapping + PAGE_SIZE_64));
-
+ (addr64_t) kernel_mapping,
+ (addr64_t) (kernel_mapping + PAGE_SIZE_64));
}
retval = KERN_SUCCESS;
upl_abort(upl, 0);
} else {
boolean_t empty;
- upl_commit_range(upl, 0, upl->size,
- UPL_COMMIT_CS_VALIDATED,
- upl_pl, pl_count, &empty);
+ assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
+ "upl %p offset 0x%llx size 0x%x",
+ upl, upl->u_offset, upl->u_size);
+ upl_commit_range(upl, 0, upl->u_size,
+ UPL_COMMIT_CS_VALIDATED,
+ upl_pl, pl_count, &empty);
}
/* and deallocate the UPL */
if (kernel_mapping != 0) {
/* clean up the mapping of the source and destination pages */
kr = vm_map_remove(kernel_map,
- kernel_mapping,
- kernel_mapping + PAGE_SIZE_64,
- VM_MAP_NO_FLAGS);
+ kernel_mapping,
+ kernel_mapping + PAGE_SIZE_64,
+ VM_MAP_REMOVE_NO_FLAGS);
assert(kr == KERN_SUCCESS);
kernel_mapping = 0;
dst_vaddr = 0;
*/
void
swapfile_pager_reference(
- memory_object_t mem_obj)
-{
- swapfile_pager_t pager;
+ memory_object_t mem_obj)
+{
+ swapfile_pager_t pager;
pager = swapfile_pager_lookup(mem_obj);
assert(!pager->is_mapped);
queue_remove(&swapfile_pager_queue,
- pager,
- swapfile_pager_t,
- pager_queue);
+ pager,
+ swapfile_pager_t,
+ pager_queue);
pager->pager_queue.next = NULL;
pager->pager_queue.prev = NULL;
-
+
swapfile_pager_count--;
}
}
/* trigger the destruction of the memory object */
- memory_object_destroy(pager->pager_control, 0);
+ memory_object_destroy(pager->swp_pgr_hdr.mo_control, 0);
}
/*
*/
void
swapfile_pager_deallocate_internal(
- swapfile_pager_t pager,
- boolean_t locked)
+ swapfile_pager_t pager,
+ boolean_t locked)
{
- if (! locked) {
+ if (!locked) {
lck_mtx_lock(&swapfile_pager_lock);
}
* pager structure.
*/
lck_mtx_unlock(&swapfile_pager_lock);
- if (pager->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
- memory_object_control_deallocate(pager->pager_control);
- pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
+ if (pager->swp_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
+ memory_object_control_deallocate(pager->swp_pgr_hdr.mo_control);
+ pager->swp_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
}
- kfree(pager, sizeof (*pager));
+ kfree(pager, sizeof(*pager));
pager = SWAPFILE_PAGER_NULL;
} else {
/* there are still plenty of references: keep going... */
*/
void
swapfile_pager_deallocate(
- memory_object_t mem_obj)
+ memory_object_t mem_obj)
{
- swapfile_pager_t pager;
+ swapfile_pager_t pager;
PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_deallocate: %p\n", mem_obj));
pager = swapfile_pager_lookup(mem_obj);
#if !DEBUG
__unused
#endif
- memory_object_t mem_obj)
+ memory_object_t mem_obj)
{
PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_terminate: %p\n", mem_obj));
*/
kern_return_t
swapfile_pager_synchronize(
- memory_object_t mem_obj,
- memory_object_offset_t offset,
- memory_object_size_t length,
- __unused vm_sync_t sync_flags)
+ __unused memory_object_t mem_obbj,
+ __unused memory_object_offset_t offset,
+ __unused memory_object_size_t length,
+ __unused vm_sync_t sync_flags)
{
- swapfile_pager_t pager;
-
- PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_synchronize: %p\n", mem_obj));
-
- pager = swapfile_pager_lookup(mem_obj);
-
- memory_object_synchronize_completed(pager->pager_control,
- offset, length);
-
- return KERN_SUCCESS;
+ panic("swapfile_pager_synchronize: memory_object_synchronize no longer supported\n");
+ return KERN_FAILURE;
}
/*
*/
kern_return_t
swapfile_pager_map(
- memory_object_t mem_obj,
- __unused vm_prot_t prot)
+ memory_object_t mem_obj,
+ __unused vm_prot_t prot)
{
- swapfile_pager_t pager;
+ swapfile_pager_t pager;
PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_map: %p\n", mem_obj));
*/
kern_return_t
swapfile_pager_last_unmap(
- memory_object_t mem_obj)
+ memory_object_t mem_obj)
{
- swapfile_pager_t pager;
+ swapfile_pager_t pager;
PAGER_DEBUG(PAGER_ALL,
- ("swapfile_pager_last_unmap: %p\n", mem_obj));
+ ("swapfile_pager_last_unmap: %p\n", mem_obj));
pager = swapfile_pager_lookup(mem_obj);
} else {
lck_mtx_unlock(&swapfile_pager_lock);
}
-
+
return KERN_SUCCESS;
}
*/
swapfile_pager_t
swapfile_pager_lookup(
- memory_object_t mem_obj)
+ memory_object_t mem_obj)
{
- swapfile_pager_t pager;
+ swapfile_pager_t pager;
- pager = (swapfile_pager_t) mem_obj;
- assert(pager->pager_ops == &swapfile_pager_ops);
+ assert(mem_obj->mo_pager_ops == &swapfile_pager_ops);
+ __IGNORE_WCASTALIGN(pager = (swapfile_pager_t) mem_obj);
assert(pager->ref_count > 0);
return pager;
}
swapfile_pager_t
swapfile_pager_create(
- struct vnode *vp)
+ struct vnode *vp)
{
- swapfile_pager_t pager, pager2;
- memory_object_control_t control;
- kern_return_t kr;
+ swapfile_pager_t pager, pager2;
+ memory_object_control_t control;
+ kern_return_t kr;
- pager = (swapfile_pager_t) kalloc(sizeof (*pager));
+ pager = (swapfile_pager_t) kalloc(sizeof(*pager));
if (pager == SWAPFILE_PAGER_NULL) {
return SWAPFILE_PAGER_NULL;
}
* we reserve the second word in the object for a fake ip_kotype
* setting - that will tell vm_map to use it as a memory object.
*/
- pager->pager_ops = &swapfile_pager_ops;
- pager->pager_ikot = IKOT_MEMORY_OBJECT;
+ pager->swp_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
+ pager->swp_pgr_hdr.mo_pager_ops = &swapfile_pager_ops;
+ pager->swp_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
+
pager->is_ready = FALSE;/* not ready until it has a "name" */
- pager->ref_count = 1; /* setup reference */
+ pager->ref_count = 1; /* setup reference */
pager->is_mapped = FALSE;
- pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
pager->swapfile_vnode = vp;
-
+
lck_mtx_lock(&swapfile_pager_lock);
/* see if anyone raced us to create a pager for the same object */
queue_iterate(&swapfile_pager_queue,
- pager2,
- swapfile_pager_t,
- pager_queue) {
+ pager2,
+ swapfile_pager_t,
+ pager_queue) {
if (pager2->swapfile_vnode == vp) {
break;
}
}
- if (! queue_end(&swapfile_pager_queue,
- (queue_entry_t) pager2)) {
+ if (!queue_end(&swapfile_pager_queue,
+ (queue_entry_t) pager2)) {
/* while we hold the lock, transfer our setup ref to winner */
pager2->ref_count++;
/* we lost the race, down with the loser... */
lck_mtx_unlock(&swapfile_pager_lock);
pager->swapfile_vnode = NULL;
- kfree(pager, sizeof (*pager));
+ kfree(pager, sizeof(*pager));
/* ... and go with the winner */
pager = pager2;
/* let the winner make sure the pager gets ready */
/* enter new pager at the head of our list of pagers */
queue_enter_first(&swapfile_pager_queue,
- pager,
- swapfile_pager_t,
- pager_queue);
+ pager,
+ swapfile_pager_t,
+ pager_queue);
swapfile_pager_count++;
if (swapfile_pager_count > swapfile_pager_count_max) {
swapfile_pager_count_max = swapfile_pager_count;
lck_mtx_unlock(&swapfile_pager_lock);
kr = memory_object_create_named((memory_object_t) pager,
- 0,
- &control);
+ 0,
+ &control);
assert(kr == KERN_SUCCESS);
+ memory_object_mark_trusted(control);
+
lck_mtx_lock(&swapfile_pager_lock);
/* the new pager is now ready to be used */
pager->is_ready = TRUE;
swapfile_pager_setup(
struct vnode *vp)
{
- swapfile_pager_t pager;
+ swapfile_pager_t pager;
lck_mtx_lock(&swapfile_pager_lock);
queue_iterate(&swapfile_pager_queue,
- pager,
- swapfile_pager_t,
- pager_queue) {
+ pager,
+ swapfile_pager_t,
+ pager_queue) {
if (pager->swapfile_vnode == vp) {
break;
}
}
if (queue_end(&swapfile_pager_queue,
- (queue_entry_t) pager)) {
+ (queue_entry_t) pager)) {
/* no existing pager for this backing object */
pager = SWAPFILE_PAGER_NULL;
} else {
lck_mtx_lock(&swapfile_pager_lock);
while (!pager->is_ready) {
lck_mtx_sleep(&swapfile_pager_lock,
- LCK_SLEEP_DEFAULT,
- &pager->is_ready,
- THREAD_UNINT);
+ LCK_SLEEP_DEFAULT,
+ &pager->is_ready,
+ THREAD_UNINT);
}
lck_mtx_unlock(&swapfile_pager_lock);
return (memory_object_t) pager;
-}
+}
memory_object_control_t
swapfile_pager_control(
- memory_object_t mem_obj)
+ memory_object_t mem_obj)
{
- swapfile_pager_t pager;
+ swapfile_pager_t pager;
+ if (mem_obj == MEMORY_OBJECT_NULL ||
+ mem_obj->mo_pager_ops != &swapfile_pager_ops) {
+ return MEMORY_OBJECT_CONTROL_NULL;
+ }
pager = swapfile_pager_lookup(mem_obj);
-
- return pager->pager_control;
+ return pager->swp_pgr_hdr.mo_control;
}