2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
31 #include <sys/errno.h>
33 #include <mach/mach_types.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
40 #include <kern/kern_types.h>
41 #include <kern/ipc_kobject.h>
42 #include <kern/host.h>
43 #include <kern/thread.h>
44 #include <ipc/ipc_port.h>
45 #include <ipc/ipc_space.h>
46 #include <device/device_port.h>
47 #include <vm/memory_object.h>
48 #include <vm/vm_pageout.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_pageout.h>
52 #include <vm/vm_protos.h>
55 /* Device VM COMPONENT INTERFACES */
63 /* until component support available */
67 /* until component support available */
68 int device_pager_workaround
;
70 typedef int device_port_t
;
72 typedef struct device_pager
{
73 int *pager
; /* pager workaround pointer */
74 unsigned int pager_ikot
; /* fake ip_kotype() */
75 unsigned int ref_count
; /* reference count */
76 memory_object_control_t control_handle
; /* mem object's cntrl handle */
77 device_port_t device_handle
; /* device_handle */
86 device_pager_lookup( /* forward */
90 device_object_create(void); /* forward */
92 zone_t device_pager_zone
;
95 #define DEVICE_PAGER_NULL ((device_pager_t) 0)
98 #define MAX_DNODE 10000
108 device_pager_bootstrap(void)
110 register vm_size_t size
;
112 size
= (vm_size_t
) sizeof(struct device_pager
);
113 device_pager_zone
= zinit(size
, (vm_size_t
) MAX_DNODE
*size
,
114 PAGE_SIZE
, "device node pager structures");
124 __unused memory_object_t device
,
129 device_pager_t device_object
;
131 device_object
= device_object_create();
132 if (device_object
== DEVICE_PAGER_NULL
)
133 panic("device_pager_setup: device_object_create() failed");
135 device_object
->device_handle
= device_handle
;
136 device_object
->size
= size
;
137 device_object
->flags
= flags
;
139 return((memory_object_t
)device_object
);
146 device_pager_populate_object(
147 memory_object_t device
,
148 memory_object_offset_t offset
,
152 device_pager_t device_object
;
153 vm_object_t vm_object
;
157 device_object
= device_pager_lookup(device
);
158 if(device_object
== DEVICE_PAGER_NULL
)
161 vm_object
= (vm_object_t
)memory_object_control_to_vm_object(
162 device_object
->control_handle
);
163 if(vm_object
== NULL
)
166 kr
= vm_object_populate_with_private(
167 vm_object
, offset
, page_num
, size
);
168 if(kr
!= KERN_SUCCESS
)
171 if(!vm_object
->phys_contiguous
) {
173 kr
= vm_object_upl_request(vm_object
,
174 (vm_object_offset_t
)offset
, size
, &upl
, NULL
,
175 &null_size
, (UPL_NO_SYNC
| UPL_CLEAN_IN_PLACE
));
177 if(kr
!= KERN_SUCCESS
)
178 panic("device_pager_populate_object: list_req failed");
180 upl_commit(upl
, NULL
, 0);
193 memory_object_t name
)
195 device_pager_t device_object
;
197 device_object
= (device_pager_t
)name
;
198 assert(device_object
->pager
== &device_pager_workaround
);
199 return (device_object
);
207 memory_object_t mem_obj
,
208 memory_object_control_t control
,
209 __unused vm_size_t pg_size
)
211 device_pager_t device_object
;
213 memory_object_attr_info_data_t attributes
;
215 vm_object_t vm_object
;
218 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
219 return KERN_INVALID_ARGUMENT
;
221 device_object
= device_pager_lookup(mem_obj
);
223 memory_object_control_reference(control
);
224 device_object
->control_handle
= control
;
227 /* The following settings should be done through an expanded change */
228 /* attributes call */
230 vm_object
= (vm_object_t
)memory_object_control_to_vm_object(control
);
231 vm_object_lock(vm_object
);
232 vm_object
->private = TRUE
;
233 if(device_object
->flags
& DEVICE_PAGER_CONTIGUOUS
)
234 vm_object
->phys_contiguous
= TRUE
;
235 if(device_object
->flags
& DEVICE_PAGER_NOPHYSCACHE
)
236 vm_object
->nophyscache
= TRUE
;
238 vm_object
->wimg_bits
= device_object
->flags
& VM_WIMG_MASK
;
239 vm_object_unlock(vm_object
);
242 attributes
.copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
243 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
244 attributes
.cluster_size
= (1 << (PAGE_SHIFT
));
245 attributes
.may_cache_object
= FALSE
;
246 attributes
.temporary
= TRUE
;
248 kr
= memory_object_change_attributes(
250 MEMORY_OBJECT_ATTRIBUTE_INFO
,
251 (memory_object_info_t
) &attributes
,
252 MEMORY_OBJECT_ATTR_INFO_COUNT
);
253 if (kr
!= KERN_SUCCESS
)
254 panic("device_pager_init: memory_object_change_attributes() failed");
256 return(KERN_SUCCESS
);
264 device_pager_data_return(
265 memory_object_t mem_obj
,
266 memory_object_offset_t offset
,
268 __unused boolean_t dirty
,
269 __unused boolean_t kernel_copy
,
270 __unused
int upl_flags
)
272 device_pager_t device_object
;
274 device_object
= device_pager_lookup(mem_obj
);
275 if (device_object
== DEVICE_PAGER_NULL
)
276 panic("device_pager_data_return: lookup failed");
278 return device_data_action(device_object
->device_handle
,
279 (ipc_port_t
) device_object
,
280 VM_PROT_READ
| VM_PROT_WRITE
,
288 device_pager_data_request(
289 memory_object_t mem_obj
,
290 memory_object_offset_t offset
,
292 __unused vm_prot_t protection_required
)
294 device_pager_t device_object
;
296 device_object
= device_pager_lookup(mem_obj
);
298 if (device_object
== DEVICE_PAGER_NULL
)
299 panic("device_pager_data_request: lookup failed");
301 device_data_action(device_object
->device_handle
,
302 (ipc_port_t
) device_object
,
303 VM_PROT_READ
, offset
, length
);
311 device_pager_reference(
312 memory_object_t mem_obj
)
314 device_pager_t device_object
;
315 unsigned int new_ref_count
;
317 device_object
= device_pager_lookup(mem_obj
);
318 new_ref_count
= hw_atomic_add(&device_object
->ref_count
, 1);
319 assert(new_ref_count
> 1);
326 device_pager_deallocate(
327 memory_object_t mem_obj
)
329 device_pager_t device_object
;
330 memory_object_control_t device_control
;
332 device_object
= device_pager_lookup(mem_obj
);
334 if (hw_atomic_sub(&device_object
->ref_count
, 1) == 0) {
335 if (device_object
->device_handle
!= (device_port_t
) NULL
) {
336 device_close(device_object
->device_handle
);
337 device_object
->device_handle
= (device_port_t
) NULL
;
339 device_control
= device_object
->control_handle
;
340 if (device_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
342 * The VM object should already have been disconnected
343 * from the pager at this point.
344 * We still have to release the "memory object control"
347 assert(device_control
->object
== VM_OBJECT_NULL
);
348 memory_object_control_deallocate(device_control
);
349 device_object
->control_handle
=
350 MEMORY_OBJECT_CONTROL_NULL
;
353 zfree(device_pager_zone
, device_object
);
359 device_pager_data_initialize(
360 __unused memory_object_t mem_obj
,
361 __unused memory_object_offset_t offset
,
362 __unused vm_size_t data_cnt
)
364 panic("device_pager_data_initialize");
369 device_pager_data_unlock(
370 __unused memory_object_t mem_obj
,
371 __unused memory_object_offset_t offset
,
372 __unused vm_size_t size
,
373 __unused vm_prot_t desired_access
)
379 device_pager_terminate(
380 __unused memory_object_t mem_obj
)
391 device_pager_synchronize(
392 memory_object_t mem_obj
,
393 memory_object_offset_t offset
,
395 __unused vm_sync_t sync_flags
)
397 device_pager_t device_object
;
399 device_object
= device_pager_lookup(mem_obj
);
401 memory_object_synchronize_completed(
402 device_object
->control_handle
, offset
, length
);
412 __unused memory_object_t mem_obj
)
423 device_object_create()
425 register device_pager_t device_object
;
427 device_object
= (struct device_pager
*) zalloc(device_pager_zone
);
428 if (device_object
== DEVICE_PAGER_NULL
)
429 return(DEVICE_PAGER_NULL
);
430 device_object
->pager
= &device_pager_workaround
;
431 device_object
->pager_ikot
= IKOT_MEMORY_OBJECT
;
432 device_object
->ref_count
= 1;
433 device_object
->control_handle
= MEMORY_OBJECT_CONTROL_NULL
;
436 return(device_object
);