2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 #include <sys/errno.h>
27 #include <kern/host.h>
28 #include <mach/mach_types.h>
29 #include <vm/vm_map.h>
30 #include <vm/vm_kern.h>
31 #include <vm/vm_pageout.h>
32 #include <mach/kern_return.h>
33 #include <mach/memory_object_types.h>
34 #include <mach/port.h>
35 #include <mach/policy.h>
36 #include <ipc/ipc_port.h>
37 #include <ipc/ipc_space.h>
38 #include <kern/thread.h>
39 #include <device/device_port.h>
40 #include <vm/vm_pageout.h>
42 /* Device VM COMPONENT INTERFACES */
50 /* until component support available */
54 /* until component support available */
55 int device_pager_workaround
;
57 typedef int device_port_t
;
59 typedef struct device_pager
{
60 int *pager
; /* pager workaround pointer */
61 unsigned int pager_ikot
; /* fake ip_kotype() */
62 unsigned int ref_count
; /* reference count */
63 memory_object_control_t control_handle
; /* mem object's cntrl handle */
64 device_port_t device_handle
; /* device_handle */
72 device_pager_bootstrap(
90 memory_object_control_t
,
95 device_pager_data_request(
97 memory_object_offset_t
,
102 device_pager_data_return(
104 memory_object_offset_t
,
110 device_pager_reference(
114 device_pager_deallocate(
118 device_pager_data_initialize(
120 memory_object_offset_t
,
124 device_pager_data_unlock(
126 memory_object_offset_t
,
131 device_pager_terminate(
135 device_pager_synchronize(
137 memory_object_offset_t
,
146 device_object_create(void);
148 zone_t device_pager_zone
;
151 #define DEVICE_PAGER_NULL ((device_pager_t) 0)
154 #define MAX_DNODE 10000
164 device_pager_bootstrap(void)
166 register vm_size_t size
;
168 size
= (vm_size_t
) sizeof(struct device_pager
);
169 device_pager_zone
= zinit(size
, (vm_size_t
) MAX_DNODE
*size
,
170 PAGE_SIZE
, "device node pager structures");
180 memory_object_t device
,
185 device_pager_t device_object
;
187 device_object
= device_object_create();
188 if (device_object
== DEVICE_PAGER_NULL
)
189 panic("device_pager_setup: device_object_create() failed");
191 device_object
->device_handle
= device_handle
;
192 device_object
->size
= size
;
193 device_object
->flags
= flags
;
195 return((memory_object_t
)device_object
);
202 device_pager_populate_object(
203 memory_object_t device
,
204 memory_object_offset_t offset
,
208 device_pager_t device_object
;
209 vm_object_t vm_object
;
214 device_object
= device_pager_lookup(device
);
215 if(device_object
== DEVICE_PAGER_NULL
)
218 vm_object
= (vm_object_t
)memory_object_control_to_vm_object(
219 device_object
->control_handle
);
220 if(vm_object
== NULL
)
223 kr
= vm_object_populate_with_private(
224 vm_object
, offset
, page_num
, size
);
225 if(kr
!= KERN_SUCCESS
)
228 if(!vm_object
->phys_contiguous
) {
230 kr
= vm_object_upl_request(vm_object
,
231 (vm_object_offset_t
)offset
, size
, &upl
, NULL
,
232 &null_size
, (UPL_NO_SYNC
| UPL_CLEAN_IN_PLACE
));
234 if(kr
!= KERN_SUCCESS
)
235 panic("device_pager_populate_object: list_req failed");
237 upl_commit(upl
, NULL
);
250 memory_object_t name
)
252 device_pager_t device_object
;
254 device_object
= (device_pager_t
)name
;
255 assert(device_object
->pager
== &device_pager_workaround
);
256 return (device_object
);
263 device_pager_init(memory_object_t mem_obj
,
264 memory_object_control_t control
,
267 device_pager_t device_object
;
269 memory_object_attr_info_data_t attributes
;
271 vm_object_t vm_object
;
274 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
275 return KERN_INVALID_ARGUMENT
;
277 device_object
= device_pager_lookup(mem_obj
);
279 memory_object_control_reference(control
);
280 device_object
->control_handle
= control
;
283 /* The following settings should be done through an expanded change */
284 /* attributes call */
286 vm_object
= (vm_object_t
)memory_object_control_to_vm_object(control
);
287 vm_object_lock(vm_object
);
288 vm_object
->private = TRUE
;
289 if(device_object
->flags
& DEVICE_PAGER_CONTIGUOUS
)
290 vm_object
->phys_contiguous
= TRUE
;
291 if(device_object
->flags
& DEVICE_PAGER_NOPHYSCACHE
)
292 vm_object
->nophyscache
= TRUE
;
294 vm_object
->wimg_bits
= device_object
->flags
& VM_WIMG_MASK
;
295 vm_object_unlock(vm_object
);
298 attributes
.copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
299 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
300 attributes
.cluster_size
= (1 << (PAGE_SHIFT
));
301 attributes
.may_cache_object
= FALSE
;
302 attributes
.temporary
= TRUE
;
304 kr
= memory_object_change_attributes(
306 MEMORY_OBJECT_ATTRIBUTE_INFO
,
307 (memory_object_info_t
) &attributes
,
308 MEMORY_OBJECT_ATTR_INFO_COUNT
);
309 if (kr
!= KERN_SUCCESS
)
310 panic("device_pager_init: memory_object_change_attributes() failed");
312 return(KERN_SUCCESS
);
319 device_pager_data_return(
320 memory_object_t mem_obj
,
321 memory_object_offset_t offset
,
324 boolean_t kernel_copy
)
326 device_pager_t device_object
;
328 device_object
= device_pager_lookup(mem_obj
);
329 if (device_object
== DEVICE_PAGER_NULL
)
330 panic("device_pager_data_return: lookup failed");
332 return device_data_action(device_object
->device_handle
, device_object
,
333 VM_PROT_READ
| VM_PROT_WRITE
, offset
, data_cnt
);
340 device_pager_data_request(
341 memory_object_t mem_obj
,
342 memory_object_offset_t offset
,
344 vm_prot_t protection_required
)
346 device_pager_t device_object
;
348 device_object
= device_pager_lookup(mem_obj
);
350 if (device_object
== DEVICE_PAGER_NULL
)
351 panic("device_pager_data_request: lookup failed");
353 device_data_action(device_object
->device_handle
, device_object
,
354 VM_PROT_READ
, offset
, length
);
362 device_pager_reference(
363 memory_object_t mem_obj
)
365 device_pager_t device_object
;
366 unsigned int new_ref_count
;
368 device_object
= device_pager_lookup(mem_obj
);
369 new_ref_count
= hw_atomic_add(&device_object
->ref_count
, 1);
370 assert(new_ref_count
> 1);
377 device_pager_deallocate(
378 memory_object_t mem_obj
)
380 device_pager_t device_object
;
382 device_object
= device_pager_lookup(mem_obj
);
384 if (hw_atomic_sub(&device_object
->ref_count
, 1) == 0) {
385 if (device_object
->device_handle
!= (device_port_t
) NULL
) {
386 device_close(device_object
->device_handle
);
388 zfree(device_pager_zone
, (vm_offset_t
) device_object
);
394 device_pager_data_initialize(
395 memory_object_t mem_obj
,
396 memory_object_offset_t offset
,
403 device_pager_data_unlock(
404 memory_object_t mem_obj
,
405 memory_object_offset_t offset
,
407 vm_prot_t desired_access
)
412 device_pager_terminate(
413 memory_object_t mem_obj
)
424 device_pager_synchronize(
425 memory_object_t mem_obj
,
426 memory_object_offset_t offset
,
428 vm_sync_t sync_flags
)
430 device_pager_t device_object
;
432 device_object
= device_pager_lookup(mem_obj
);
434 memory_object_synchronize_completed(
435 device_object
->control_handle
, offset
, length
);
445 memory_object_t mem_obj
)
456 device_object_create()
458 register device_pager_t device_object
;
460 device_object
= (struct device_pager
*) zalloc(device_pager_zone
);
461 if (device_object
== DEVICE_PAGER_NULL
)
462 return(DEVICE_PAGER_NULL
);
463 device_object
->pager
= &device_pager_workaround
;
464 device_object
->pager_ikot
= IKOT_MEMORY_OBJECT
;
465 device_object
->ref_count
= 1;
466 device_object
->control_handle
= MEMORY_OBJECT_CONTROL_NULL
;
469 return(device_object
);