2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <sys/errno.h>
25 #include <mach/mach_types.h>
26 #include <mach/kern_return.h>
27 #include <mach/memory_object_control.h>
28 #include <mach/memory_object_types.h>
29 #include <mach/port.h>
30 #include <mach/policy.h>
32 #include <kern/kern_types.h>
33 #include <kern/ipc_kobject.h>
34 #include <kern/host.h>
35 #include <kern/thread.h>
36 #include <ipc/ipc_port.h>
37 #include <ipc/ipc_space.h>
38 #include <device/device_port.h>
39 #include <vm/memory_object.h>
40 #include <vm/vm_pageout.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_kern.h>
43 #include <vm/vm_pageout.h>
44 #include <vm/vm_protos.h>
47 /* Device VM COMPONENT INTERFACES */
55 /* until component support available */
59 /* until component support available */
60 int device_pager_workaround
;
62 typedef int device_port_t
;
64 typedef struct device_pager
{
65 int *pager
; /* pager workaround pointer */
66 unsigned int pager_ikot
; /* fake ip_kotype() */
67 unsigned int ref_count
; /* reference count */
68 memory_object_control_t control_handle
; /* mem object's cntrl handle */
69 device_port_t device_handle
; /* device_handle */
78 device_pager_lookup( /* forward */
82 device_object_create(void); /* forward */
84 zone_t device_pager_zone
;
87 #define DEVICE_PAGER_NULL ((device_pager_t) 0)
90 #define MAX_DNODE 10000
100 device_pager_bootstrap(void)
102 register vm_size_t size
;
104 size
= (vm_size_t
) sizeof(struct device_pager
);
105 device_pager_zone
= zinit(size
, (vm_size_t
) MAX_DNODE
*size
,
106 PAGE_SIZE
, "device node pager structures");
116 __unused memory_object_t device
,
121 device_pager_t device_object
;
123 device_object
= device_object_create();
124 if (device_object
== DEVICE_PAGER_NULL
)
125 panic("device_pager_setup: device_object_create() failed");
127 device_object
->device_handle
= device_handle
;
128 device_object
->size
= size
;
129 device_object
->flags
= flags
;
131 return((memory_object_t
)device_object
);
138 device_pager_populate_object(
139 memory_object_t device
,
140 memory_object_offset_t offset
,
144 device_pager_t device_object
;
145 vm_object_t vm_object
;
149 device_object
= device_pager_lookup(device
);
150 if(device_object
== DEVICE_PAGER_NULL
)
153 vm_object
= (vm_object_t
)memory_object_control_to_vm_object(
154 device_object
->control_handle
);
155 if(vm_object
== NULL
)
158 kr
= vm_object_populate_with_private(
159 vm_object
, offset
, page_num
, size
);
160 if(kr
!= KERN_SUCCESS
)
163 if(!vm_object
->phys_contiguous
) {
165 kr
= vm_object_upl_request(vm_object
,
166 (vm_object_offset_t
)offset
, size
, &upl
, NULL
,
167 &null_size
, (UPL_NO_SYNC
| UPL_CLEAN_IN_PLACE
));
169 if(kr
!= KERN_SUCCESS
)
170 panic("device_pager_populate_object: list_req failed");
172 upl_commit(upl
, NULL
, 0);
185 memory_object_t name
)
187 device_pager_t device_object
;
189 device_object
= (device_pager_t
)name
;
190 assert(device_object
->pager
== &device_pager_workaround
);
191 return (device_object
);
199 memory_object_t mem_obj
,
200 memory_object_control_t control
,
201 __unused vm_size_t pg_size
)
203 device_pager_t device_object
;
205 memory_object_attr_info_data_t attributes
;
207 vm_object_t vm_object
;
210 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
211 return KERN_INVALID_ARGUMENT
;
213 device_object
= device_pager_lookup(mem_obj
);
215 memory_object_control_reference(control
);
216 device_object
->control_handle
= control
;
219 /* The following settings should be done through an expanded change */
220 /* attributes call */
222 vm_object
= (vm_object_t
)memory_object_control_to_vm_object(control
);
223 vm_object_lock(vm_object
);
224 vm_object
->private = TRUE
;
225 if(device_object
->flags
& DEVICE_PAGER_CONTIGUOUS
)
226 vm_object
->phys_contiguous
= TRUE
;
227 if(device_object
->flags
& DEVICE_PAGER_NOPHYSCACHE
)
228 vm_object
->nophyscache
= TRUE
;
230 vm_object
->wimg_bits
= device_object
->flags
& VM_WIMG_MASK
;
231 vm_object_unlock(vm_object
);
234 attributes
.copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
235 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
236 attributes
.cluster_size
= (1 << (PAGE_SHIFT
));
237 attributes
.may_cache_object
= FALSE
;
238 attributes
.temporary
= TRUE
;
240 kr
= memory_object_change_attributes(
242 MEMORY_OBJECT_ATTRIBUTE_INFO
,
243 (memory_object_info_t
) &attributes
,
244 MEMORY_OBJECT_ATTR_INFO_COUNT
);
245 if (kr
!= KERN_SUCCESS
)
246 panic("device_pager_init: memory_object_change_attributes() failed");
248 return(KERN_SUCCESS
);
256 device_pager_data_return(
257 memory_object_t mem_obj
,
258 memory_object_offset_t offset
,
260 __unused boolean_t dirty
,
261 __unused boolean_t kernel_copy
,
262 __unused
int upl_flags
)
264 device_pager_t device_object
;
266 device_object
= device_pager_lookup(mem_obj
);
267 if (device_object
== DEVICE_PAGER_NULL
)
268 panic("device_pager_data_return: lookup failed");
270 return device_data_action(device_object
->device_handle
,
271 (ipc_port_t
) device_object
,
272 VM_PROT_READ
| VM_PROT_WRITE
,
280 device_pager_data_request(
281 memory_object_t mem_obj
,
282 memory_object_offset_t offset
,
284 __unused vm_prot_t protection_required
)
286 device_pager_t device_object
;
288 device_object
= device_pager_lookup(mem_obj
);
290 if (device_object
== DEVICE_PAGER_NULL
)
291 panic("device_pager_data_request: lookup failed");
293 device_data_action(device_object
->device_handle
,
294 (ipc_port_t
) device_object
,
295 VM_PROT_READ
, offset
, length
);
303 device_pager_reference(
304 memory_object_t mem_obj
)
306 device_pager_t device_object
;
307 unsigned int new_ref_count
;
309 device_object
= device_pager_lookup(mem_obj
);
310 new_ref_count
= hw_atomic_add(&device_object
->ref_count
, 1);
311 assert(new_ref_count
> 1);
318 device_pager_deallocate(
319 memory_object_t mem_obj
)
321 device_pager_t device_object
;
322 memory_object_control_t device_control
;
324 device_object
= device_pager_lookup(mem_obj
);
326 if (hw_atomic_sub(&device_object
->ref_count
, 1) == 0) {
327 if (device_object
->device_handle
!= (device_port_t
) NULL
) {
328 device_close(device_object
->device_handle
);
329 device_object
->device_handle
= (device_port_t
) NULL
;
331 device_control
= device_object
->control_handle
;
332 if (device_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
334 * The VM object should already have been disconnected
335 * from the pager at this point.
336 * We still have to release the "memory object control"
339 assert(device_control
->object
== VM_OBJECT_NULL
);
340 memory_object_control_deallocate(device_control
);
341 device_object
->control_handle
=
342 MEMORY_OBJECT_CONTROL_NULL
;
345 zfree(device_pager_zone
, device_object
);
351 device_pager_data_initialize(
352 __unused memory_object_t mem_obj
,
353 __unused memory_object_offset_t offset
,
354 __unused vm_size_t data_cnt
)
356 panic("device_pager_data_initialize");
361 device_pager_data_unlock(
362 __unused memory_object_t mem_obj
,
363 __unused memory_object_offset_t offset
,
364 __unused vm_size_t size
,
365 __unused vm_prot_t desired_access
)
371 device_pager_terminate(
372 __unused memory_object_t mem_obj
)
383 device_pager_synchronize(
384 memory_object_t mem_obj
,
385 memory_object_offset_t offset
,
387 __unused vm_sync_t sync_flags
)
389 device_pager_t device_object
;
391 device_object
= device_pager_lookup(mem_obj
);
393 memory_object_synchronize_completed(
394 device_object
->control_handle
, offset
, length
);
404 __unused memory_object_t mem_obj
)
415 device_object_create()
417 register device_pager_t device_object
;
419 device_object
= (struct device_pager
*) zalloc(device_pager_zone
);
420 if (device_object
== DEVICE_PAGER_NULL
)
421 return(DEVICE_PAGER_NULL
);
422 device_object
->pager
= &device_pager_workaround
;
423 device_object
->pager_ikot
= IKOT_MEMORY_OBJECT
;
424 device_object
->ref_count
= 1;
425 device_object
->control_handle
= MEMORY_OBJECT_CONTROL_NULL
;
428 return(device_object
);