2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/errno.h>
31 #include <mach/mach_types.h>
32 #include <mach/kern_return.h>
33 #include <mach/memory_object_control.h>
34 #include <mach/memory_object_types.h>
35 #include <mach/port.h>
36 #include <mach/policy.h>
38 #include <kern/kern_types.h>
39 #include <kern/ipc_kobject.h>
40 #include <kern/host.h>
41 #include <kern/thread.h>
42 #include <ipc/ipc_port.h>
43 #include <ipc/ipc_space.h>
44 #include <device/device_port.h>
45 #include <vm/memory_object.h>
46 #include <vm/vm_pageout.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_kern.h>
49 #include <vm/vm_pageout.h>
50 #include <vm/vm_protos.h>
52 #include <os/refcnt.h>
55 /* Device VM COMPONENT INTERFACES */
63 /* until component support available */
67 /* until component support available */
68 const struct memory_object_pager_ops device_pager_ops
= {
69 .memory_object_reference
= device_pager_reference
,
70 .memory_object_deallocate
= device_pager_deallocate
,
71 .memory_object_init
= device_pager_init
,
72 .memory_object_terminate
= device_pager_terminate
,
73 .memory_object_data_request
= device_pager_data_request
,
74 .memory_object_data_return
= device_pager_data_return
,
75 .memory_object_data_initialize
= device_pager_data_initialize
,
76 .memory_object_data_unlock
= device_pager_data_unlock
,
77 .memory_object_synchronize
= device_pager_synchronize
,
78 .memory_object_map
= device_pager_map
,
79 .memory_object_last_unmap
= device_pager_last_unmap
,
80 .memory_object_data_reclaim
= NULL
,
81 .memory_object_backing_object
= NULL
,
82 .memory_object_pager_name
= "device pager"
85 typedef uintptr_t device_port_t
;
88 * The start of "struct device_pager" MUST match a "struct memory_object".
90 typedef struct device_pager
{
91 /* mandatory generic header */
92 struct memory_object dev_pgr_hdr
;
94 /* pager-specific data */
96 device_port_t device_handle
; /* device_handle */
98 #if MEMORY_OBJECT_HAS_REFCOUNT
99 #define dev_pgr_hdr_ref dev_pgr_hdr.mo_ref
101 os_ref_atomic_t dev_pgr_hdr_ref
;
107 __header_always_inline os_ref_count_t
108 device_pager_get_refcount(device_pager_t device_object
)
110 return os_ref_get_count_raw(&device_object
->dev_pgr_hdr_ref
);
113 LCK_GRP_DECLARE(device_pager_lck_grp
, "device_pager");
115 ZONE_DECLARE(device_pager_zone
, "device node pager structures",
116 sizeof(struct device_pager
), ZC_NONE
);
118 #define device_pager_lock_init(pager) \
119 lck_mtx_init(&(pager)->lock, &device_pager_lck_grp, LCK_ATTR_NULL)
120 #define device_pager_lock_destroy(pager) \
121 lck_mtx_destroy(&(pager)->lock, &device_pager_lck_grp)
122 #define device_pager_lock(pager) lck_mtx_lock(&(pager)->lock)
123 #define device_pager_unlock(pager) lck_mtx_unlock(&(pager)->lock)
126 device_pager_lookup( /* forward */
130 device_object_create(void); /* forward */
132 #define DEVICE_PAGER_NULL ((device_pager_t) 0)
134 #define MAX_DNODE 10000
142 __unused memory_object_t device
,
143 uintptr_t device_handle
,
147 device_pager_t device_object
;
148 memory_object_control_t control
;
151 device_object
= device_object_create();
152 if (device_object
== DEVICE_PAGER_NULL
) {
153 panic("device_pager_setup: device_object_create() failed");
156 device_object
->device_handle
= device_handle
;
157 device_object
->size
= size
;
158 device_object
->flags
= flags
;
160 memory_object_create_named((memory_object_t
) device_object
,
163 object
= memory_object_control_to_vm_object(control
);
165 memory_object_mark_trusted(control
);
167 assert(object
!= VM_OBJECT_NULL
);
168 vm_object_lock(object
);
169 object
->true_share
= TRUE
;
170 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) {
171 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
173 vm_object_unlock(object
);
175 return (memory_object_t
)device_object
;
182 device_pager_populate_object(
183 memory_object_t device
,
184 memory_object_offset_t offset
,
188 device_pager_t device_object
;
189 vm_object_t vm_object
;
193 device_object
= device_pager_lookup(device
);
194 if (device_object
== DEVICE_PAGER_NULL
) {
198 vm_object
= (vm_object_t
)memory_object_control_to_vm_object(
199 device_object
->dev_pgr_hdr
.mo_control
);
200 if (vm_object
== NULL
) {
204 kr
= vm_object_populate_with_private(
205 vm_object
, offset
, page_num
, size
);
206 if (kr
!= KERN_SUCCESS
) {
210 if (!vm_object
->phys_contiguous
) {
211 unsigned int null_size
= 0;
212 assert((upl_size_t
) size
== size
);
213 kr
= vm_object_upl_request(vm_object
,
214 (vm_object_offset_t
)offset
,
215 (upl_size_t
) size
, &upl
, NULL
,
217 (UPL_NO_SYNC
| UPL_CLEAN_IN_PLACE
),
218 VM_KERN_MEMORY_NONE
);
219 if (kr
!= KERN_SUCCESS
) {
220 panic("device_pager_populate_object: list_req failed");
223 upl_commit(upl
, NULL
, 0);
236 memory_object_t mem_obj
)
238 device_pager_t device_object
;
240 assert(mem_obj
->mo_pager_ops
== &device_pager_ops
);
241 device_object
= (device_pager_t
)mem_obj
;
242 assert(device_pager_get_refcount(device_object
) > 0);
243 return device_object
;
251 memory_object_t mem_obj
,
252 memory_object_control_t control
,
253 __unused memory_object_cluster_size_t pg_size
)
255 device_pager_t device_object
;
257 memory_object_attr_info_data_t attributes
;
259 vm_object_t vm_object
;
262 if (control
== MEMORY_OBJECT_CONTROL_NULL
) {
263 return KERN_INVALID_ARGUMENT
;
266 device_object
= device_pager_lookup(mem_obj
);
268 memory_object_control_reference(control
);
269 device_object
->dev_pgr_hdr
.mo_control
= control
;
272 /* The following settings should be done through an expanded change */
273 /* attributes call */
275 vm_object
= (vm_object_t
)memory_object_control_to_vm_object(control
);
276 vm_object_lock(vm_object
);
277 vm_object
->private = TRUE
;
278 if (device_object
->flags
& DEVICE_PAGER_CONTIGUOUS
) {
279 vm_object
->phys_contiguous
= TRUE
;
281 if (device_object
->flags
& DEVICE_PAGER_NOPHYSCACHE
) {
282 vm_object
->nophyscache
= TRUE
;
285 vm_object
->wimg_bits
= device_object
->flags
& VM_WIMG_MASK
;
286 vm_object_unlock(vm_object
);
289 attributes
.copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
290 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
291 attributes
.cluster_size
= (1 << (PAGE_SHIFT
));
292 attributes
.may_cache_object
= FALSE
;
293 attributes
.temporary
= TRUE
;
295 kr
= memory_object_change_attributes(
297 MEMORY_OBJECT_ATTRIBUTE_INFO
,
298 (memory_object_info_t
) &attributes
,
299 MEMORY_OBJECT_ATTR_INFO_COUNT
);
300 if (kr
!= KERN_SUCCESS
) {
301 panic("device_pager_init: memory_object_change_attributes() failed");
312 device_pager_data_return(
313 memory_object_t mem_obj
,
314 memory_object_offset_t offset
,
315 memory_object_cluster_size_t data_cnt
,
316 __unused memory_object_offset_t
*resid_offset
,
317 __unused
int *io_error
,
318 __unused boolean_t dirty
,
319 __unused boolean_t kernel_copy
,
320 __unused
int upl_flags
)
322 device_pager_t device_object
;
324 device_object
= device_pager_lookup(mem_obj
);
325 if (device_object
== DEVICE_PAGER_NULL
) {
326 panic("device_pager_data_return: lookup failed");
329 __IGNORE_WCASTALIGN(return device_data_action(device_object
->device_handle
,
330 (ipc_port_t
) device_object
,
331 VM_PROT_READ
| VM_PROT_WRITE
,
339 device_pager_data_request(
340 memory_object_t mem_obj
,
341 memory_object_offset_t offset
,
342 memory_object_cluster_size_t length
,
343 __unused vm_prot_t protection_required
,
344 __unused memory_object_fault_info_t fault_info
)
346 device_pager_t device_object
;
348 device_object
= device_pager_lookup(mem_obj
);
350 if (device_object
== DEVICE_PAGER_NULL
) {
351 panic("device_pager_data_request: lookup failed");
354 __IGNORE_WCASTALIGN(device_data_action(device_object
->device_handle
,
355 (ipc_port_t
) device_object
,
356 VM_PROT_READ
, offset
, length
));
364 device_pager_reference(
365 memory_object_t mem_obj
)
367 device_pager_t device_object
;
369 device_object
= device_pager_lookup(mem_obj
);
370 os_ref_retain_raw(&device_object
->dev_pgr_hdr_ref
, NULL
);
371 DTRACE_VM2(device_pager_reference
,
372 device_pager_t
, device_object
,
373 unsigned int, device_pager_get_refcount(device_object
));
380 device_pager_deallocate(
381 memory_object_t mem_obj
)
383 device_pager_t device_object
;
384 memory_object_control_t device_control
;
385 os_ref_count_t ref_count
;
387 device_object
= device_pager_lookup(mem_obj
);
389 DTRACE_VM2(device_pager_deallocate
,
390 device_pager_t
, device_object
,
391 unsigned int, device_pager_get_refcount(device_object
));
393 ref_count
= os_ref_release_raw(&device_object
->dev_pgr_hdr_ref
, NULL
);
395 if (ref_count
== 1) {
397 * The last reference is our "named" reference.
398 * Close the device and "destroy" the VM object.
401 DTRACE_VM2(device_pager_destroy
,
402 device_pager_t
, device_object
,
403 unsigned int, device_pager_get_refcount(device_object
));
405 assert(device_object
->is_mapped
== FALSE
);
406 if (device_object
->device_handle
!= (device_port_t
) NULL
) {
407 device_close(device_object
->device_handle
);
408 device_object
->device_handle
= (device_port_t
) NULL
;
410 device_control
= device_object
->dev_pgr_hdr
.mo_control
;
411 memory_object_destroy(device_control
, 0);
412 } else if (ref_count
== 0) {
414 * No more references: free the pager.
416 DTRACE_VM2(device_pager_free
,
417 device_pager_t
, device_object
,
418 unsigned int, device_pager_get_refcount(device_object
));
420 device_control
= device_object
->dev_pgr_hdr
.mo_control
;
422 if (device_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
423 memory_object_control_deallocate(device_control
);
424 device_object
->dev_pgr_hdr
.mo_control
= MEMORY_OBJECT_CONTROL_NULL
;
426 device_pager_lock_destroy(device_object
);
428 zfree(device_pager_zone
, device_object
);
434 device_pager_data_initialize(
435 __unused memory_object_t mem_obj
,
436 __unused memory_object_offset_t offset
,
437 __unused memory_object_cluster_size_t data_cnt
)
439 panic("device_pager_data_initialize");
444 device_pager_data_unlock(
445 __unused memory_object_t mem_obj
,
446 __unused memory_object_offset_t offset
,
447 __unused memory_object_size_t size
,
448 __unused vm_prot_t desired_access
)
454 device_pager_terminate(
455 __unused memory_object_t mem_obj
)
466 device_pager_synchronize(
467 __unused memory_object_t mem_obj
,
468 __unused memory_object_offset_t offset
,
469 __unused memory_object_size_t length
,
470 __unused vm_sync_t sync_flags
)
472 panic("device_pager_synchronize: memory_object_synchronize no longer supported\n");
481 memory_object_t mem_obj
,
482 __unused vm_prot_t prot
)
484 device_pager_t device_object
;
486 device_object
= device_pager_lookup(mem_obj
);
488 device_pager_lock(device_object
);
489 assert(device_pager_get_refcount(device_object
) > 0);
490 if (device_object
->is_mapped
== FALSE
) {
492 * First mapping of this pager: take an extra reference
493 * that will remain until all the mappings of this pager
496 device_object
->is_mapped
= TRUE
;
497 device_pager_reference(mem_obj
);
499 device_pager_unlock(device_object
);
505 device_pager_last_unmap(
506 memory_object_t mem_obj
)
508 device_pager_t device_object
;
511 device_object
= device_pager_lookup(mem_obj
);
513 device_pager_lock(device_object
);
514 assert(device_pager_get_refcount(device_object
) > 0);
515 if (device_object
->is_mapped
) {
516 device_object
->is_mapped
= FALSE
;
521 device_pager_unlock(device_object
);
524 device_pager_deallocate(mem_obj
);
536 device_object_create(void)
538 device_pager_t device_object
;
540 device_object
= (struct device_pager
*) zalloc(device_pager_zone
);
541 if (device_object
== DEVICE_PAGER_NULL
) {
542 return DEVICE_PAGER_NULL
;
545 bzero(device_object
, sizeof(*device_object
));
547 device_object
->dev_pgr_hdr
.mo_ikot
= IKOT_MEMORY_OBJECT
;
548 device_object
->dev_pgr_hdr
.mo_pager_ops
= &device_pager_ops
;
549 device_object
->dev_pgr_hdr
.mo_control
= MEMORY_OBJECT_CONTROL_NULL
;
551 device_pager_lock_init(device_object
);
552 os_ref_init_raw(&device_object
->dev_pgr_hdr_ref
, NULL
);
553 device_object
->is_mapped
= FALSE
;
555 DTRACE_VM2(device_pager_create
,
556 device_pager_t
, device_object
,
557 unsigned int, device_pager_get_refcount(device_object
));
559 return device_object
;
563 is_device_pager_ops(const struct memory_object_pager_ops
*pager_ops
)
565 if (pager_ops
== &device_pager_ops
) {