+mach_port_t
+convert_memory_object_to_port(
+ __unused memory_object_t object)
+{
+ return (MACH_PORT_NULL);
+}
+
+
+/* Routine memory_object_reference */
+void memory_object_reference(
+ memory_object_t memory_object)
+{
+ (memory_object->mo_pager_ops->memory_object_reference)(
+ memory_object);
+}
+
+/* Routine memory_object_deallocate */
+void memory_object_deallocate(
+ memory_object_t memory_object)
+{
+ (memory_object->mo_pager_ops->memory_object_deallocate)(
+ memory_object);
+}
+
+
+/* Routine memory_object_init */
+kern_return_t memory_object_init
+(
+ memory_object_t memory_object,
+ memory_object_control_t memory_control,
+ memory_object_cluster_size_t memory_object_page_size
+)
+{
+ return (memory_object->mo_pager_ops->memory_object_init)(
+ memory_object,
+ memory_control,
+ memory_object_page_size);
+}
+
+/* Routine memory_object_terminate */
+kern_return_t memory_object_terminate
+(
+ memory_object_t memory_object
+)
+{
+ return (memory_object->mo_pager_ops->memory_object_terminate)(
+ memory_object);
+}
+
+/* Routine memory_object_data_request */
+kern_return_t memory_object_data_request
+(
+ memory_object_t memory_object,
+ memory_object_offset_t offset,
+ memory_object_cluster_size_t length,
+ vm_prot_t desired_access,
+ memory_object_fault_info_t fault_info
+)
+{
+ return (memory_object->mo_pager_ops->memory_object_data_request)(
+ memory_object,
+ offset,
+ length,
+ desired_access,
+ fault_info);
+}
+
+/* Routine memory_object_data_return */
+kern_return_t memory_object_data_return
+(
+ memory_object_t memory_object,
+ memory_object_offset_t offset,
+ memory_object_cluster_size_t size,
+ memory_object_offset_t *resid_offset,
+ int *io_error,
+ boolean_t dirty,
+ boolean_t kernel_copy,
+ int upl_flags
+)
+{
+ return (memory_object->mo_pager_ops->memory_object_data_return)(
+ memory_object,
+ offset,
+ size,
+ resid_offset,
+ io_error,
+ dirty,
+ kernel_copy,
+ upl_flags);
+}
+
+/* Routine memory_object_data_initialize */
+kern_return_t memory_object_data_initialize
+(
+ memory_object_t memory_object,
+ memory_object_offset_t offset,
+ memory_object_cluster_size_t size
+)
+{
+ return (memory_object->mo_pager_ops->memory_object_data_initialize)(
+ memory_object,
+ offset,
+ size);
+}
+
+/* Routine memory_object_data_unlock */
+kern_return_t memory_object_data_unlock
+(
+ memory_object_t memory_object,
+ memory_object_offset_t offset,
+ memory_object_size_t size,
+ vm_prot_t desired_access
+)
+{
+ return (memory_object->mo_pager_ops->memory_object_data_unlock)(
+ memory_object,
+ offset,
+ size,
+ desired_access);
+}
+
+/* Routine memory_object_synchronize */
+kern_return_t memory_object_synchronize
+(
+ memory_object_t memory_object,
+ memory_object_offset_t offset,
+ memory_object_size_t size,
+ vm_sync_t sync_flags
+)
+{
+ return (memory_object->mo_pager_ops->memory_object_synchronize)(
+ memory_object,
+ offset,
+ size,
+ sync_flags);
+}
+
+
+/*
+ * memory_object_map() is called by VM (in vm_map_enter() and its variants)
+ * each time a "named" VM object gets mapped directly or indirectly
+ * (copy-on-write mapping). A "named" VM object has an extra reference held
+ * by the pager to keep it alive until the pager decides that the
+ * memory object (and its VM object) can be reclaimed.
+ * VM calls memory_object_last_unmap() (in vm_object_deallocate()) when all
+ * the mappings of that memory object have been removed.
+ *
+ * For a given VM object, calls to memory_object_map() and memory_object_unmap()
+ * are serialized (through object->mapping_in_progress), to ensure that the
+ * pager gets a consistent view of the mapping status of the memory object.
+ *
+ * This allows the pager to keep track of how many times a memory object
+ * has been mapped and with which protections, to decide when it can be
+ * reclaimed.
+ */
+
+/* Routine memory_object_map */
+kern_return_t memory_object_map
+(
+ memory_object_t memory_object,
+ vm_prot_t prot
+)
+{
+ return (memory_object->mo_pager_ops->memory_object_map)(
+ memory_object,
+ prot);
+}
+
+/* Routine memory_object_last_unmap */
+kern_return_t memory_object_last_unmap
+(
+ memory_object_t memory_object
+)
+{
+ return (memory_object->mo_pager_ops->memory_object_last_unmap)(
+ memory_object);
+}
+
+/* Routine memory_object_data_reclaim */
+kern_return_t memory_object_data_reclaim
+(
+ memory_object_t memory_object,
+ boolean_t reclaim_backing_store
+)
+{
+ if (memory_object->mo_pager_ops->memory_object_data_reclaim == NULL)
+ return KERN_NOT_SUPPORTED;
+ return (memory_object->mo_pager_ops->memory_object_data_reclaim)(
+ memory_object,
+ reclaim_backing_store);
+}
+
+/* Routine memory_object_create */
+kern_return_t memory_object_create
+(
+ memory_object_default_t default_memory_manager,
+ vm_size_t new_memory_object_size,
+ memory_object_t *new_memory_object
+)
+{
+ return default_pager_memory_object_create(default_memory_manager,
+ new_memory_object_size,
+ new_memory_object);
+}
+
+upl_t
+convert_port_to_upl(
+ ipc_port_t port)
+{
+ upl_t upl;
+
+ ip_lock(port);
+ if (!ip_active(port) || (ip_kotype(port) != IKOT_UPL)) {
+ ip_unlock(port);
+ return (upl_t)NULL;
+ }
+ upl = (upl_t) port->ip_kobject;
+ ip_unlock(port);
+ upl_lock(upl);
+ upl->ref_count+=1;
+ upl_unlock(upl);
+ return upl;
+}
+
+mach_port_t
+convert_upl_to_port(
+ __unused upl_t upl)
+{
+ return MACH_PORT_NULL;
+}
+
+__private_extern__ void
+upl_no_senders(
+ __unused ipc_port_t port,
+ __unused mach_port_mscount_t mscount)
+{
+ return;
+}