2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/errno.h>
31 #include <mach/mach_types.h>
32 #include <mach/kern_return.h>
33 #include <mach/memory_object_control.h>
34 #include <mach/memory_object_types.h>
35 #include <mach/port.h>
36 #include <mach/policy.h>
38 #include <kern/kern_types.h>
39 #include <kern/ipc_kobject.h>
40 #include <kern/host.h>
41 #include <kern/thread.h>
42 #include <ipc/ipc_port.h>
43 #include <ipc/ipc_space.h>
44 #include <device/device_port.h>
45 #include <vm/memory_object.h>
46 #include <vm/vm_pageout.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_kern.h>
49 #include <vm/vm_pageout.h>
50 #include <vm/vm_protos.h>
53 /* Device VM COMPONENT INTERFACES */
61 /* until component support available */
65 /* until component support available */
66 const struct memory_object_pager_ops device_pager_ops
= {
67 device_pager_reference
,
68 device_pager_deallocate
,
70 device_pager_terminate
,
71 device_pager_data_request
,
72 device_pager_data_return
,
73 device_pager_data_initialize
,
74 device_pager_data_unlock
,
75 device_pager_synchronize
,
77 device_pager_last_unmap
,
81 typedef uintptr_t device_port_t
;
84 * The start of "struct device_pager" MUST match a "struct memory_object".
86 typedef struct device_pager
{
87 struct ipc_object_header pager_header
; /* fake ip_kotype() */
88 memory_object_pager_ops_t pager_ops
; /* == &device_pager_ops */
89 unsigned int ref_count
; /* reference count */
90 memory_object_control_t control_handle
; /* mem object's cntrl handle */
91 device_port_t device_handle
; /* device_handle */
96 #define pager_ikot pager_header.io_bits
100 device_pager_lookup( /* forward */
104 device_object_create(void); /* forward */
106 zone_t device_pager_zone
;
109 #define DEVICE_PAGER_NULL ((device_pager_t) 0)
112 #define MAX_DNODE 10000
122 device_pager_bootstrap(void)
124 register vm_size_t size
;
126 size
= (vm_size_t
) sizeof(struct device_pager
);
127 device_pager_zone
= zinit(size
, (vm_size_t
) MAX_DNODE
*size
,
128 PAGE_SIZE
, "device node pager structures");
138 __unused memory_object_t device
,
139 uintptr_t device_handle
,
143 device_pager_t device_object
;
145 device_object
= device_object_create();
146 if (device_object
== DEVICE_PAGER_NULL
)
147 panic("device_pager_setup: device_object_create() failed");
149 device_object
->device_handle
= device_handle
;
150 device_object
->size
= size
;
151 device_object
->flags
= flags
;
153 return((memory_object_t
)device_object
);
160 device_pager_populate_object(
161 memory_object_t device
,
162 memory_object_offset_t offset
,
166 device_pager_t device_object
;
167 vm_object_t vm_object
;
171 device_object
= device_pager_lookup(device
);
172 if(device_object
== DEVICE_PAGER_NULL
)
175 vm_object
= (vm_object_t
)memory_object_control_to_vm_object(
176 device_object
->control_handle
);
177 if(vm_object
== NULL
)
180 kr
= vm_object_populate_with_private(
181 vm_object
, offset
, page_num
, size
);
182 if(kr
!= KERN_SUCCESS
)
185 if(!vm_object
->phys_contiguous
) {
186 unsigned int null_size
= 0;
187 assert((upl_size_t
) size
== size
);
188 kr
= vm_object_upl_request(vm_object
,
189 (vm_object_offset_t
)offset
,
190 (upl_size_t
) size
, &upl
, NULL
,
192 (UPL_NO_SYNC
| UPL_CLEAN_IN_PLACE
));
193 if(kr
!= KERN_SUCCESS
)
194 panic("device_pager_populate_object: list_req failed");
196 upl_commit(upl
, NULL
, 0);
209 memory_object_t name
)
211 device_pager_t device_object
;
213 device_object
= (device_pager_t
)name
;
214 assert(device_object
->pager_ops
== &device_pager_ops
);
215 return (device_object
);
223 memory_object_t mem_obj
,
224 memory_object_control_t control
,
225 __unused memory_object_cluster_size_t pg_size
)
227 device_pager_t device_object
;
229 memory_object_attr_info_data_t attributes
;
231 vm_object_t vm_object
;
234 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
235 return KERN_INVALID_ARGUMENT
;
237 device_object
= device_pager_lookup(mem_obj
);
239 memory_object_control_reference(control
);
240 device_object
->control_handle
= control
;
243 /* The following settings should be done through an expanded change */
244 /* attributes call */
246 vm_object
= (vm_object_t
)memory_object_control_to_vm_object(control
);
247 vm_object_lock(vm_object
);
248 vm_object
->private = TRUE
;
249 if(device_object
->flags
& DEVICE_PAGER_CONTIGUOUS
)
250 vm_object
->phys_contiguous
= TRUE
;
251 if(device_object
->flags
& DEVICE_PAGER_NOPHYSCACHE
)
252 vm_object
->nophyscache
= TRUE
;
254 vm_object
->wimg_bits
= device_object
->flags
& VM_WIMG_MASK
;
255 vm_object_unlock(vm_object
);
258 attributes
.copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
259 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
260 attributes
.cluster_size
= (1 << (PAGE_SHIFT
));
261 attributes
.may_cache_object
= FALSE
;
262 attributes
.temporary
= TRUE
;
264 kr
= memory_object_change_attributes(
266 MEMORY_OBJECT_ATTRIBUTE_INFO
,
267 (memory_object_info_t
) &attributes
,
268 MEMORY_OBJECT_ATTR_INFO_COUNT
);
269 if (kr
!= KERN_SUCCESS
)
270 panic("device_pager_init: memory_object_change_attributes() failed");
272 return(KERN_SUCCESS
);
280 device_pager_data_return(
281 memory_object_t mem_obj
,
282 memory_object_offset_t offset
,
283 memory_object_cluster_size_t data_cnt
,
284 __unused memory_object_offset_t
*resid_offset
,
285 __unused
int *io_error
,
286 __unused boolean_t dirty
,
287 __unused boolean_t kernel_copy
,
288 __unused
int upl_flags
)
290 device_pager_t device_object
;
292 device_object
= device_pager_lookup(mem_obj
);
293 if (device_object
== DEVICE_PAGER_NULL
)
294 panic("device_pager_data_return: lookup failed");
296 return device_data_action(device_object
->device_handle
,
297 (ipc_port_t
) device_object
,
298 VM_PROT_READ
| VM_PROT_WRITE
,
306 device_pager_data_request(
307 memory_object_t mem_obj
,
308 memory_object_offset_t offset
,
309 memory_object_cluster_size_t length
,
310 __unused vm_prot_t protection_required
,
311 __unused memory_object_fault_info_t fault_info
)
313 device_pager_t device_object
;
315 device_object
= device_pager_lookup(mem_obj
);
317 if (device_object
== DEVICE_PAGER_NULL
)
318 panic("device_pager_data_request: lookup failed");
320 device_data_action(device_object
->device_handle
,
321 (ipc_port_t
) device_object
,
322 VM_PROT_READ
, offset
, length
);
330 device_pager_reference(
331 memory_object_t mem_obj
)
333 device_pager_t device_object
;
334 unsigned int new_ref_count
;
336 device_object
= device_pager_lookup(mem_obj
);
337 new_ref_count
= hw_atomic_add(&device_object
->ref_count
, 1);
338 assert(new_ref_count
> 1);
345 device_pager_deallocate(
346 memory_object_t mem_obj
)
348 device_pager_t device_object
;
349 memory_object_control_t device_control
;
351 device_object
= device_pager_lookup(mem_obj
);
353 if (hw_atomic_sub(&device_object
->ref_count
, 1) == 0) {
354 if (device_object
->device_handle
!= (device_port_t
) NULL
) {
355 device_close(device_object
->device_handle
);
356 device_object
->device_handle
= (device_port_t
) NULL
;
358 device_control
= device_object
->control_handle
;
359 if (device_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
361 * The VM object should already have been disconnected
362 * from the pager at this point.
363 * We still have to release the "memory object control"
366 assert(device_control
->moc_object
== VM_OBJECT_NULL
);
367 memory_object_control_deallocate(device_control
);
368 device_object
->control_handle
=
369 MEMORY_OBJECT_CONTROL_NULL
;
372 zfree(device_pager_zone
, device_object
);
378 device_pager_data_initialize(
379 __unused memory_object_t mem_obj
,
380 __unused memory_object_offset_t offset
,
381 __unused memory_object_cluster_size_t data_cnt
)
383 panic("device_pager_data_initialize");
388 device_pager_data_unlock(
389 __unused memory_object_t mem_obj
,
390 __unused memory_object_offset_t offset
,
391 __unused memory_object_size_t size
,
392 __unused vm_prot_t desired_access
)
398 device_pager_terminate(
399 __unused memory_object_t mem_obj
)
410 device_pager_synchronize(
411 memory_object_t mem_obj
,
412 memory_object_offset_t offset
,
413 memory_object_size_t length
,
414 __unused vm_sync_t sync_flags
)
416 device_pager_t device_object
;
418 device_object
= device_pager_lookup(mem_obj
);
420 memory_object_synchronize_completed(
421 device_object
->control_handle
, offset
, length
);
431 __unused memory_object_t mem_obj
,
432 __unused vm_prot_t prot
)
438 device_pager_last_unmap(
439 __unused memory_object_t mem_obj
)
450 device_object_create(void)
452 register device_pager_t device_object
;
454 device_object
= (struct device_pager
*) zalloc(device_pager_zone
);
455 if (device_object
== DEVICE_PAGER_NULL
)
456 return(DEVICE_PAGER_NULL
);
457 device_object
->pager_ops
= &device_pager_ops
;
458 device_object
->pager_ikot
= IKOT_MEMORY_OBJECT
;
459 device_object
->ref_count
= 1;
460 device_object
->control_handle
= MEMORY_OBJECT_CONTROL_NULL
;
463 return(device_object
);