2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <sys/errno.h>
25 #include <mach/mach_types.h>
26 #include <mach/kern_return.h>
27 #include <mach/memory_object_control.h>
28 #include <mach/memory_object_types.h>
29 #include <mach/port.h>
30 #include <mach/policy.h>
32 #include <kern/kern_types.h>
33 #include <kern/ipc_kobject.h>
34 #include <kern/host.h>
35 #include <kern/thread.h>
36 #include <ipc/ipc_port.h>
37 #include <ipc/ipc_space.h>
38 #include <device/device_port.h>
39 #include <vm/memory_object.h>
40 #include <vm/vm_pageout.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_kern.h>
43 #include <vm/vm_pageout.h>
44 #include <vm/vm_protos.h>
47 /* Device VM COMPONENT INTERFACES */
55 /* until component support available */
59 /* until component support available */
60 const struct memory_object_pager_ops device_pager_ops
= {
61 device_pager_reference
,
62 device_pager_deallocate
,
64 device_pager_terminate
,
65 device_pager_data_request
,
66 device_pager_data_return
,
67 device_pager_data_initialize
,
68 device_pager_data_unlock
,
69 device_pager_synchronize
,
74 typedef int device_port_t
;
77 * The start of "struct device_pager" MUST match a "struct memory_object".
79 typedef struct device_pager
{
80 memory_object_pager_ops_t pager_ops
; /* == &device_pager_ops */
81 unsigned int pager_ikot
; /* fake ip_kotype() */
82 unsigned int ref_count
; /* reference count */
83 memory_object_control_t control_handle
; /* mem object's cntrl handle */
84 device_port_t device_handle
; /* device_handle */
93 device_pager_lookup( /* forward */
97 device_object_create(void); /* forward */
99 zone_t device_pager_zone
;
102 #define DEVICE_PAGER_NULL ((device_pager_t) 0)
105 #define MAX_DNODE 10000
115 device_pager_bootstrap(void)
117 register vm_size_t size
;
119 size
= (vm_size_t
) sizeof(struct device_pager
);
120 device_pager_zone
= zinit(size
, (vm_size_t
) MAX_DNODE
*size
,
121 PAGE_SIZE
, "device node pager structures");
131 __unused memory_object_t device
,
136 device_pager_t device_object
;
138 device_object
= device_object_create();
139 if (device_object
== DEVICE_PAGER_NULL
)
140 panic("device_pager_setup: device_object_create() failed");
142 device_object
->device_handle
= device_handle
;
143 device_object
->size
= size
;
144 device_object
->flags
= flags
;
146 return((memory_object_t
)device_object
);
153 device_pager_populate_object(
154 memory_object_t device
,
155 memory_object_offset_t offset
,
159 device_pager_t device_object
;
160 vm_object_t vm_object
;
164 device_object
= device_pager_lookup(device
);
165 if(device_object
== DEVICE_PAGER_NULL
)
168 vm_object
= (vm_object_t
)memory_object_control_to_vm_object(
169 device_object
->control_handle
);
170 if(vm_object
== NULL
)
173 kr
= vm_object_populate_with_private(
174 vm_object
, offset
, page_num
, size
);
175 if(kr
!= KERN_SUCCESS
)
178 if(!vm_object
->phys_contiguous
) {
179 unsigned int null_size
= 0;
180 kr
= vm_object_upl_request(vm_object
,
181 (vm_object_offset_t
)offset
, size
, &upl
, NULL
,
182 &null_size
, (UPL_NO_SYNC
| UPL_CLEAN_IN_PLACE
));
184 if(kr
!= KERN_SUCCESS
)
185 panic("device_pager_populate_object: list_req failed");
187 upl_commit(upl
, NULL
, 0);
200 memory_object_t name
)
202 device_pager_t device_object
;
204 device_object
= (device_pager_t
)name
;
205 assert(device_object
->pager_ops
== &device_pager_ops
);
206 return (device_object
);
214 memory_object_t mem_obj
,
215 memory_object_control_t control
,
216 __unused vm_size_t pg_size
)
218 device_pager_t device_object
;
220 memory_object_attr_info_data_t attributes
;
222 vm_object_t vm_object
;
225 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
226 return KERN_INVALID_ARGUMENT
;
228 device_object
= device_pager_lookup(mem_obj
);
230 memory_object_control_reference(control
);
231 device_object
->control_handle
= control
;
234 /* The following settings should be done through an expanded change */
235 /* attributes call */
237 vm_object
= (vm_object_t
)memory_object_control_to_vm_object(control
);
238 vm_object_lock(vm_object
);
239 vm_object
->private = TRUE
;
240 if(device_object
->flags
& DEVICE_PAGER_CONTIGUOUS
)
241 vm_object
->phys_contiguous
= TRUE
;
242 if(device_object
->flags
& DEVICE_PAGER_NOPHYSCACHE
)
243 vm_object
->nophyscache
= TRUE
;
245 vm_object
->wimg_bits
= device_object
->flags
& VM_WIMG_MASK
;
246 vm_object_unlock(vm_object
);
249 attributes
.copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
250 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
251 attributes
.cluster_size
= (1 << (PAGE_SHIFT
));
252 attributes
.may_cache_object
= FALSE
;
253 attributes
.temporary
= TRUE
;
255 kr
= memory_object_change_attributes(
257 MEMORY_OBJECT_ATTRIBUTE_INFO
,
258 (memory_object_info_t
) &attributes
,
259 MEMORY_OBJECT_ATTR_INFO_COUNT
);
260 if (kr
!= KERN_SUCCESS
)
261 panic("device_pager_init: memory_object_change_attributes() failed");
263 return(KERN_SUCCESS
);
271 device_pager_data_return(
272 memory_object_t mem_obj
,
273 memory_object_offset_t offset
,
275 __unused memory_object_offset_t
*resid_offset
,
276 __unused
int *io_error
,
277 __unused boolean_t dirty
,
278 __unused boolean_t kernel_copy
,
279 __unused
int upl_flags
)
281 device_pager_t device_object
;
283 device_object
= device_pager_lookup(mem_obj
);
284 if (device_object
== DEVICE_PAGER_NULL
)
285 panic("device_pager_data_return: lookup failed");
287 return device_data_action(device_object
->device_handle
,
288 (ipc_port_t
) device_object
,
289 VM_PROT_READ
| VM_PROT_WRITE
,
297 device_pager_data_request(
298 memory_object_t mem_obj
,
299 memory_object_offset_t offset
,
301 __unused vm_prot_t protection_required
)
303 device_pager_t device_object
;
305 device_object
= device_pager_lookup(mem_obj
);
307 if (device_object
== DEVICE_PAGER_NULL
)
308 panic("device_pager_data_request: lookup failed");
310 device_data_action(device_object
->device_handle
,
311 (ipc_port_t
) device_object
,
312 VM_PROT_READ
, offset
, length
);
320 device_pager_reference(
321 memory_object_t mem_obj
)
323 device_pager_t device_object
;
324 unsigned int new_ref_count
;
326 device_object
= device_pager_lookup(mem_obj
);
327 new_ref_count
= hw_atomic_add(&device_object
->ref_count
, 1);
328 assert(new_ref_count
> 1);
335 device_pager_deallocate(
336 memory_object_t mem_obj
)
338 device_pager_t device_object
;
339 memory_object_control_t device_control
;
341 device_object
= device_pager_lookup(mem_obj
);
343 if (hw_atomic_sub(&device_object
->ref_count
, 1) == 0) {
344 if (device_object
->device_handle
!= (device_port_t
) NULL
) {
345 device_close(device_object
->device_handle
);
346 device_object
->device_handle
= (device_port_t
) NULL
;
348 device_control
= device_object
->control_handle
;
349 if (device_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
351 * The VM object should already have been disconnected
352 * from the pager at this point.
353 * We still have to release the "memory object control"
356 assert(device_control
->moc_object
== VM_OBJECT_NULL
);
357 memory_object_control_deallocate(device_control
);
358 device_object
->control_handle
=
359 MEMORY_OBJECT_CONTROL_NULL
;
362 zfree(device_pager_zone
, device_object
);
368 device_pager_data_initialize(
369 __unused memory_object_t mem_obj
,
370 __unused memory_object_offset_t offset
,
371 __unused vm_size_t data_cnt
)
373 panic("device_pager_data_initialize");
378 device_pager_data_unlock(
379 __unused memory_object_t mem_obj
,
380 __unused memory_object_offset_t offset
,
381 __unused vm_size_t size
,
382 __unused vm_prot_t desired_access
)
388 device_pager_terminate(
389 __unused memory_object_t mem_obj
)
400 device_pager_synchronize(
401 memory_object_t mem_obj
,
402 memory_object_offset_t offset
,
404 __unused vm_sync_t sync_flags
)
406 device_pager_t device_object
;
408 device_object
= device_pager_lookup(mem_obj
);
410 memory_object_synchronize_completed(
411 device_object
->control_handle
, offset
, length
);
421 __unused memory_object_t mem_obj
)
432 device_object_create()
434 register device_pager_t device_object
;
436 device_object
= (struct device_pager
*) zalloc(device_pager_zone
);
437 if (device_object
== DEVICE_PAGER_NULL
)
438 return(DEVICE_PAGER_NULL
);
439 device_object
->pager_ops
= &device_pager_ops
;
440 device_object
->pager_ikot
= IKOT_MEMORY_OBJECT
;
441 device_object
->ref_count
= 1;
442 device_object
->control_handle
= MEMORY_OBJECT_CONTROL_NULL
;
445 return(device_object
);