/*
- * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#include <vm/vm_pageout.h>
#include <vm/vm_protos.h>
#include <mach/sdt.h>
+#include <os/refcnt.h>
/* Device VM COMPONENT INTERFACES */
-/*
- * Device PAGER
+/*
+ * Device PAGER
*/
/* until component support available */
const struct memory_object_pager_ops device_pager_ops = {
- device_pager_reference,
- device_pager_deallocate,
- device_pager_init,
- device_pager_terminate,
- device_pager_data_request,
- device_pager_data_return,
- device_pager_data_initialize,
- device_pager_data_unlock,
- device_pager_synchronize,
- device_pager_map,
- device_pager_last_unmap,
- NULL, /* data_reclaim */
- "device pager"
+ .memory_object_reference = device_pager_reference,
+ .memory_object_deallocate = device_pager_deallocate,
+ .memory_object_init = device_pager_init,
+ .memory_object_terminate = device_pager_terminate,
+ .memory_object_data_request = device_pager_data_request,
+ .memory_object_data_return = device_pager_data_return,
+ .memory_object_data_initialize = device_pager_data_initialize,
+ .memory_object_data_unlock = device_pager_data_unlock,
+ .memory_object_synchronize = device_pager_synchronize,
+ .memory_object_map = device_pager_map,
+ .memory_object_last_unmap = device_pager_last_unmap,
+ .memory_object_data_reclaim = NULL,
+ .memory_object_backing_object = NULL,
+ .memory_object_pager_name = "device pager"
};
typedef uintptr_t device_port_t;
struct memory_object dev_pgr_hdr;
/* pager-specific data */
- lck_mtx_t lock;
- unsigned int ref_count; /* reference count */
+ lck_mtx_t lock;
+ struct os_refcnt ref_count; /* reference count */
device_port_t device_handle; /* device_handle */
- vm_size_t size;
- int flags;
- boolean_t is_mapped;
+ vm_size_t size;
+ int flags;
+ boolean_t is_mapped;
} *device_pager_t;
-lck_grp_t device_pager_lck_grp;
-lck_grp_attr_t device_pager_lck_grp_attr;
-lck_attr_t device_pager_lck_attr;
+LCK_GRP_DECLARE(device_pager_lck_grp, "device_pager");
+
+ZONE_DECLARE(device_pager_zone, "device node pager structures",
+ sizeof(struct device_pager), ZC_NONE);
-#define device_pager_lock_init(pager) \
- lck_mtx_init(&(pager)->lock, \
- &device_pager_lck_grp, \
- &device_pager_lck_attr)
-#define device_pager_lock_destroy(pager) \
+#define device_pager_lock_init(pager) \
+ lck_mtx_init(&(pager)->lock, &device_pager_lck_grp, LCK_ATTR_NULL)
+#define device_pager_lock_destroy(pager) \
lck_mtx_destroy(&(pager)->lock, &device_pager_lck_grp)
#define device_pager_lock(pager) lck_mtx_lock(&(pager)->lock)
#define device_pager_unlock(pager) lck_mtx_unlock(&(pager)->lock)
device_pager_t
-device_pager_lookup( /* forward */
+device_pager_lookup( /* forward */
memory_object_t);
device_pager_t
-device_object_create(void); /* forward */
-
-zone_t device_pager_zone;
-
-
-#define DEVICE_PAGER_NULL ((device_pager_t) 0)
-
-
-#define MAX_DNODE 10000
-
+device_object_create(void); /* forward */
+#define DEVICE_PAGER_NULL ((device_pager_t) 0)
+#define MAX_DNODE 10000
-/*
- *
- */
-void
-device_pager_bootstrap(void)
-{
- vm_size_t size;
-
- size = (vm_size_t) sizeof(struct device_pager);
- device_pager_zone = zinit(size, (vm_size_t) MAX_DNODE*size,
- PAGE_SIZE, "device node pager structures");
- zone_change(device_pager_zone, Z_CALLERACCT, FALSE);
-
- lck_grp_attr_setdefault(&device_pager_lck_grp_attr);
- lck_grp_init(&device_pager_lck_grp, "device_pager", &device_pager_lck_grp_attr);
- lck_attr_setdefault(&device_pager_lck_attr);
-
- return;
-}
-
/*
*
*/
memory_object_t
device_pager_setup(
__unused memory_object_t device,
- uintptr_t device_handle,
- vm_size_t size,
- int flags)
+ uintptr_t device_handle,
+ vm_size_t size,
+ int flags)
{
- device_pager_t device_object;
+ device_pager_t device_object;
memory_object_control_t control;
- vm_object_t object;
+ vm_object_t object;
device_object = device_object_create();
- if (device_object == DEVICE_PAGER_NULL)
+ if (device_object == DEVICE_PAGER_NULL) {
panic("device_pager_setup: device_object_create() failed");
+ }
device_object->device_handle = device_handle;
device_object->size = size;
device_object->flags = flags;
memory_object_create_named((memory_object_t) device_object,
- size,
- &control);
+ size,
+ &control);
object = memory_object_control_to_vm_object(control);
+ memory_object_mark_trusted(control);
+
assert(object != VM_OBJECT_NULL);
vm_object_lock(object);
object->true_share = TRUE;
*/
kern_return_t
device_pager_populate_object(
- memory_object_t device,
- memory_object_offset_t offset,
- ppnum_t page_num,
- vm_size_t size)
+ memory_object_t device,
+ memory_object_offset_t offset,
+ ppnum_t page_num,
+ vm_size_t size)
{
- device_pager_t device_object;
- vm_object_t vm_object;
- kern_return_t kr;
- upl_t upl;
+ device_pager_t device_object;
+ vm_object_t vm_object;
+ kern_return_t kr;
+ upl_t upl;
device_object = device_pager_lookup(device);
- if(device_object == DEVICE_PAGER_NULL)
+ if (device_object == DEVICE_PAGER_NULL) {
return KERN_FAILURE;
+ }
vm_object = (vm_object_t)memory_object_control_to_vm_object(
- device_object->dev_pgr_hdr.mo_control);
- if(vm_object == NULL)
+ device_object->dev_pgr_hdr.mo_control);
+ if (vm_object == NULL) {
return KERN_FAILURE;
+ }
kr = vm_object_populate_with_private(
- vm_object, offset, page_num, size);
- if(kr != KERN_SUCCESS)
+ vm_object, offset, page_num, size);
+ if (kr != KERN_SUCCESS) {
return kr;
+ }
- if(!vm_object->phys_contiguous) {
+ if (!vm_object->phys_contiguous) {
unsigned int null_size = 0;
assert((upl_size_t) size == size);
- kr = vm_object_upl_request(vm_object,
- (vm_object_offset_t)offset,
- (upl_size_t) size, &upl, NULL,
- &null_size,
- (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE),
- VM_KERN_MEMORY_NONE);
- if(kr != KERN_SUCCESS)
+ kr = vm_object_upl_request(vm_object,
+ (vm_object_offset_t)offset,
+ (upl_size_t) size, &upl, NULL,
+ &null_size,
+ (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE),
+ VM_KERN_MEMORY_NONE);
+ if (kr != KERN_SUCCESS) {
panic("device_pager_populate_object: list_req failed");
+ }
upl_commit(upl, NULL, 0);
upl_deallocate(upl);
*/
device_pager_t
device_pager_lookup(
- memory_object_t mem_obj)
+ memory_object_t mem_obj)
{
- device_pager_t device_object;
+ device_pager_t device_object;
assert(mem_obj->mo_pager_ops == &device_pager_ops);
device_object = (device_pager_t)mem_obj;
- assert(device_object->ref_count > 0);
+ assert(os_ref_get_count(&device_object->ref_count) > 0);
return device_object;
}
*/
kern_return_t
device_pager_init(
- memory_object_t mem_obj,
- memory_object_control_t control,
+ memory_object_t mem_obj,
+ memory_object_control_t control,
__unused memory_object_cluster_size_t pg_size)
{
device_pager_t device_object;
kern_return_t kr;
memory_object_attr_info_data_t attributes;
- vm_object_t vm_object;
+ vm_object_t vm_object;
- if (control == MEMORY_OBJECT_CONTROL_NULL)
+ if (control == MEMORY_OBJECT_CONTROL_NULL) {
return KERN_INVALID_ARGUMENT;
+ }
device_object = device_pager_lookup(mem_obj);
vm_object = (vm_object_t)memory_object_control_to_vm_object(control);
vm_object_lock(vm_object);
vm_object->private = TRUE;
- if(device_object->flags & DEVICE_PAGER_CONTIGUOUS)
+ if (device_object->flags & DEVICE_PAGER_CONTIGUOUS) {
vm_object->phys_contiguous = TRUE;
- if(device_object->flags & DEVICE_PAGER_NOPHYSCACHE)
+ }
+ if (device_object->flags & DEVICE_PAGER_NOPHYSCACHE) {
vm_object->nophyscache = TRUE;
+ }
vm_object->wimg_bits = device_object->flags & VM_WIMG_MASK;
vm_object_unlock(vm_object);
attributes.temporary = TRUE;
kr = memory_object_change_attributes(
- control,
- MEMORY_OBJECT_ATTRIBUTE_INFO,
- (memory_object_info_t) &attributes,
- MEMORY_OBJECT_ATTR_INFO_COUNT);
- if (kr != KERN_SUCCESS)
+ control,
+ MEMORY_OBJECT_ATTRIBUTE_INFO,
+ (memory_object_info_t) &attributes,
+ MEMORY_OBJECT_ATTR_INFO_COUNT);
+ if (kr != KERN_SUCCESS) {
panic("device_pager_init: memory_object_change_attributes() failed");
+ }
- return(KERN_SUCCESS);
+ return KERN_SUCCESS;
}
/*
/*ARGSUSED6*/
kern_return_t
device_pager_data_return(
- memory_object_t mem_obj,
- memory_object_offset_t offset,
- memory_object_cluster_size_t data_cnt,
- __unused memory_object_offset_t *resid_offset,
- __unused int *io_error,
- __unused boolean_t dirty,
- __unused boolean_t kernel_copy,
- __unused int upl_flags)
+ memory_object_t mem_obj,
+ memory_object_offset_t offset,
+ memory_object_cluster_size_t data_cnt,
+ __unused memory_object_offset_t *resid_offset,
+ __unused int *io_error,
+ __unused boolean_t dirty,
+ __unused boolean_t kernel_copy,
+ __unused int upl_flags)
{
- device_pager_t device_object;
+ device_pager_t device_object;
device_object = device_pager_lookup(mem_obj);
- if (device_object == DEVICE_PAGER_NULL)
+ if (device_object == DEVICE_PAGER_NULL) {
panic("device_pager_data_return: lookup failed");
+ }
__IGNORE_WCASTALIGN(return device_data_action(device_object->device_handle,
- (ipc_port_t) device_object,
- VM_PROT_READ | VM_PROT_WRITE,
- offset, data_cnt));
+ (ipc_port_t) device_object,
+ VM_PROT_READ | VM_PROT_WRITE,
+ offset, data_cnt));
}
/*
*
*/
-kern_return_t
+kern_return_t
device_pager_data_request(
- memory_object_t mem_obj,
- memory_object_offset_t offset,
- memory_object_cluster_size_t length,
- __unused vm_prot_t protection_required,
- __unused memory_object_fault_info_t fault_info)
+ memory_object_t mem_obj,
+ memory_object_offset_t offset,
+ memory_object_cluster_size_t length,
+ __unused vm_prot_t protection_required,
+ __unused memory_object_fault_info_t fault_info)
{
- device_pager_t device_object;
+ device_pager_t device_object;
device_object = device_pager_lookup(mem_obj);
- if (device_object == DEVICE_PAGER_NULL)
+ if (device_object == DEVICE_PAGER_NULL) {
panic("device_pager_data_request: lookup failed");
+ }
__IGNORE_WCASTALIGN(device_data_action(device_object->device_handle,
- (ipc_port_t) device_object,
- VM_PROT_READ, offset, length));
+ (ipc_port_t) device_object,
+ VM_PROT_READ, offset, length));
return KERN_SUCCESS;
}
*/
void
device_pager_reference(
- memory_object_t mem_obj)
-{
- device_pager_t device_object;
- unsigned int new_ref_count;
+ memory_object_t mem_obj)
+{
+ device_pager_t device_object;
device_object = device_pager_lookup(mem_obj);
- new_ref_count = hw_atomic_add(&device_object->ref_count, 1);
- assert(new_ref_count > 1);
+ os_ref_retain(&device_object->ref_count);
DTRACE_VM2(device_pager_reference,
- device_pager_t, device_object,
- unsigned int, device_object->ref_count);
-
+ device_pager_t, device_object,
+ unsigned int, os_ref_get_count(&device_object->ref_count));
}
/*
*/
void
device_pager_deallocate(
- memory_object_t mem_obj)
+ memory_object_t mem_obj)
{
- device_pager_t device_object;
- memory_object_control_t device_control;
- unsigned int ref_count;
+ device_pager_t device_object;
+ memory_object_control_t device_control;
device_object = device_pager_lookup(mem_obj);
- assert(device_object->ref_count > 0);
DTRACE_VM2(device_pager_deallocate,
- device_pager_t, device_object,
- unsigned int, device_object->ref_count);
+ device_pager_t, device_object,
+ unsigned int, os_ref_get_count(&device_object->ref_count));
- ref_count = hw_atomic_sub(&device_object->ref_count, 1);
+ os_ref_count_t ref_count = os_ref_release(&device_object->ref_count);
if (ref_count == 1) {
/*
*/
DTRACE_VM2(device_pager_destroy,
- device_pager_t, device_object,
- unsigned int, device_object->ref_count);
+ device_pager_t, device_object,
+ unsigned int, os_ref_get_count(&device_object->ref_count));
assert(device_object->is_mapped == FALSE);
if (device_object->device_handle != (device_port_t) NULL) {
* No more references: free the pager.
*/
DTRACE_VM2(device_pager_free,
- device_pager_t, device_object,
- unsigned int, device_object->ref_count);
+ device_pager_t, device_object,
+ unsigned int, os_ref_get_count(&device_object->ref_count));
device_pager_lock_destroy(device_object);
kern_return_t
device_pager_data_initialize(
- __unused memory_object_t mem_obj,
- __unused memory_object_offset_t offset,
- __unused memory_object_cluster_size_t data_cnt)
+ __unused memory_object_t mem_obj,
+ __unused memory_object_offset_t offset,
+ __unused memory_object_cluster_size_t data_cnt)
{
panic("device_pager_data_initialize");
return KERN_FAILURE;
kern_return_t
device_pager_data_unlock(
- __unused memory_object_t mem_obj,
- __unused memory_object_offset_t offset,
- __unused memory_object_size_t size,
- __unused vm_prot_t desired_access)
+ __unused memory_object_t mem_obj,
+ __unused memory_object_offset_t offset,
+ __unused memory_object_size_t size,
+ __unused vm_prot_t desired_access)
{
return KERN_FAILURE;
}
kern_return_t
device_pager_terminate(
- __unused memory_object_t mem_obj)
+ __unused memory_object_t mem_obj)
{
return KERN_SUCCESS;
}
*/
kern_return_t
device_pager_synchronize(
- __unused memory_object_t mem_obj,
- __unused memory_object_offset_t offset,
- __unused memory_object_size_t length,
- __unused vm_sync_t sync_flags)
+ __unused memory_object_t mem_obj,
+ __unused memory_object_offset_t offset,
+ __unused memory_object_size_t length,
+ __unused vm_sync_t sync_flags)
{
panic("device_pager_synchronize: memory_object_synchronize no longer supported\n");
return KERN_FAILURE;
*/
kern_return_t
device_pager_map(
- memory_object_t mem_obj,
- __unused vm_prot_t prot)
+ memory_object_t mem_obj,
+ __unused vm_prot_t prot)
{
- device_pager_t device_object;
+ device_pager_t device_object;
device_object = device_pager_lookup(mem_obj);
device_pager_lock(device_object);
- assert(device_object->ref_count > 0);
+ assert(os_ref_get_count(&device_object->ref_count) > 0);
if (device_object->is_mapped == FALSE) {
/*
* First mapping of this pager: take an extra reference
kern_return_t
device_pager_last_unmap(
- memory_object_t mem_obj)
+ memory_object_t mem_obj)
{
- device_pager_t device_object;
- boolean_t drop_ref;
+ device_pager_t device_object;
+ boolean_t drop_ref;
device_object = device_pager_lookup(mem_obj);
device_pager_lock(device_object);
- assert(device_object->ref_count > 0);
+ assert(os_ref_get_count(&device_object->ref_count) > 0);
if (device_object->is_mapped) {
device_object->is_mapped = FALSE;
drop_ref = TRUE;
device_pager_t device_object;
device_object = (struct device_pager *) zalloc(device_pager_zone);
- if (device_object == DEVICE_PAGER_NULL)
- return(DEVICE_PAGER_NULL);
+ if (device_object == DEVICE_PAGER_NULL) {
+ return DEVICE_PAGER_NULL;
+ }
- bzero(device_object, sizeof (*device_object));
+ bzero(device_object, sizeof(*device_object));
device_object->dev_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
device_object->dev_pgr_hdr.mo_pager_ops = &device_pager_ops;
device_object->dev_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
device_pager_lock_init(device_object);
- device_object->ref_count = 1;
+ os_ref_init(&device_object->ref_count, NULL);
device_object->is_mapped = FALSE;
DTRACE_VM2(device_pager_create,
- device_pager_t, device_object,
- unsigned int, device_object->ref_count);
+ device_pager_t, device_object,
+ unsigned int, os_ref_get_count(&device_object->ref_count));
- return(device_object);
+ return device_object;
}
boolean_t