]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/device_vm.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / vm / device_vm.c
CommitLineData
0b4e3aa0 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
0b4e3aa0
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
ff6e181a
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
0b4e3aa0 12 *
ff6e181a
A
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
0b4e3aa0
A
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
ff6e181a
A
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
0b4e3aa0
A
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24#include <sys/errno.h>
91447636 25
0b4e3aa0 26#include <mach/mach_types.h>
0b4e3aa0 27#include <mach/kern_return.h>
91447636 28#include <mach/memory_object_control.h>
0b4e3aa0
A
29#include <mach/memory_object_types.h>
30#include <mach/port.h>
31#include <mach/policy.h>
91447636
A
32#include <mach/upl.h>
33#include <kern/kern_types.h>
34#include <kern/ipc_kobject.h>
35#include <kern/host.h>
36#include <kern/thread.h>
0b4e3aa0
A
37#include <ipc/ipc_port.h>
38#include <ipc/ipc_space.h>
0b4e3aa0 39#include <device/device_port.h>
91447636 40#include <vm/memory_object.h>
0b4e3aa0 41#include <vm/vm_pageout.h>
91447636
A
42#include <vm/vm_map.h>
43#include <vm/vm_kern.h>
44#include <vm/vm_pageout.h>
45#include <vm/vm_protos.h>
46
0b4e3aa0 47
0b4e3aa0
A
48/* Device VM COMPONENT INTERFACES */
49
50
51/*
52 * Device PAGER
53 */
54
55
56/* until component support available */
57
58
59
60/* until component support available */
61int device_pager_workaround;
62
63typedef int device_port_t;
64
65typedef struct device_pager {
66 int *pager; /* pager workaround pointer */
67 unsigned int pager_ikot; /* fake ip_kotype() */
68 unsigned int ref_count; /* reference count */
69 memory_object_control_t control_handle; /* mem object's cntrl handle */
70 device_port_t device_handle; /* device_handle */
71 vm_size_t size;
72 int flags;
73} *device_pager_t;
74
75
76
0b4e3aa0
A
77
78device_pager_t
91447636 79device_pager_lookup( /* forward */
0b4e3aa0
A
80 memory_object_t);
81
82device_pager_t
91447636 83device_object_create(void); /* forward */
0b4e3aa0
A
84
85zone_t device_pager_zone;
86
87
88#define DEVICE_PAGER_NULL ((device_pager_t) 0)
89
90
91#define MAX_DNODE 10000
92
93
94
95
96
97/*
98 *
99 */
100void
101device_pager_bootstrap(void)
102{
103 register vm_size_t size;
104
105 size = (vm_size_t) sizeof(struct device_pager);
106 device_pager_zone = zinit(size, (vm_size_t) MAX_DNODE*size,
107 PAGE_SIZE, "device node pager structures");
108
109 return;
110}
111
112/*
113 *
114 */
115memory_object_t
116device_pager_setup(
91447636 117 __unused memory_object_t device,
0b4e3aa0
A
118 int device_handle,
119 vm_size_t size,
120 int flags)
121{
122 device_pager_t device_object;
123
124 device_object = device_object_create();
125 if (device_object == DEVICE_PAGER_NULL)
126 panic("device_pager_setup: device_object_create() failed");
127
128 device_object->device_handle = device_handle;
129 device_object->size = size;
9bccf70c 130 device_object->flags = flags;
0b4e3aa0
A
131
132 return((memory_object_t)device_object);
133}
134
135/*
136 *
137 */
138kern_return_t
139device_pager_populate_object(
140 memory_object_t device,
141 memory_object_offset_t offset,
55e303ae 142 ppnum_t page_num,
0b4e3aa0
A
143 vm_size_t size)
144{
145 device_pager_t device_object;
146 vm_object_t vm_object;
147 kern_return_t kr;
148 upl_t upl;
0b4e3aa0
A
149
150 device_object = device_pager_lookup(device);
151 if(device_object == DEVICE_PAGER_NULL)
152 return KERN_FAILURE;
153
154 vm_object = (vm_object_t)memory_object_control_to_vm_object(
155 device_object->control_handle);
156 if(vm_object == NULL)
157 return KERN_FAILURE;
158
159 kr = vm_object_populate_with_private(
55e303ae 160 vm_object, offset, page_num, size);
0b4e3aa0
A
161 if(kr != KERN_SUCCESS)
162 return kr;
163
164 if(!vm_object->phys_contiguous) {
165 int null_size = 0;
166 kr = vm_object_upl_request(vm_object,
167 (vm_object_offset_t)offset, size, &upl, NULL,
168 &null_size, (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE));
169
170 if(kr != KERN_SUCCESS)
171 panic("device_pager_populate_object: list_req failed");
172
91447636 173 upl_commit(upl, NULL, 0);
0b4e3aa0
A
174 upl_deallocate(upl);
175 }
176
177
178 return kr;
179}
180
181/*
182 *
183 */
184device_pager_t
185device_pager_lookup(
186 memory_object_t name)
187{
188 device_pager_t device_object;
189
190 device_object = (device_pager_t)name;
191 assert(device_object->pager == &device_pager_workaround);
192 return (device_object);
193}
194
195/*
196 *
197 */
198kern_return_t
91447636
A
199device_pager_init(
200 memory_object_t mem_obj,
201 memory_object_control_t control,
202 __unused vm_size_t pg_size)
0b4e3aa0
A
203{
204 device_pager_t device_object;
205 kern_return_t kr;
206 memory_object_attr_info_data_t attributes;
207
208 vm_object_t vm_object;
209
210
211 if (control == MEMORY_OBJECT_CONTROL_NULL)
212 return KERN_INVALID_ARGUMENT;
213
214 device_object = device_pager_lookup(mem_obj);
215
216 memory_object_control_reference(control);
217 device_object->control_handle = control;
218
219
220/* The following settings should be done through an expanded change */
221/* attributes call */
222
223 vm_object = (vm_object_t)memory_object_control_to_vm_object(control);
224 vm_object_lock(vm_object);
225 vm_object->private = TRUE;
226 if(device_object->flags & DEVICE_PAGER_CONTIGUOUS)
227 vm_object->phys_contiguous = TRUE;
228 if(device_object->flags & DEVICE_PAGER_NOPHYSCACHE)
229 vm_object->nophyscache = TRUE;
9bccf70c
A
230
231 vm_object->wimg_bits = device_object->flags & VM_WIMG_MASK;
0b4e3aa0
A
232 vm_object_unlock(vm_object);
233
234
235 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
236 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
237 attributes.cluster_size = (1 << (PAGE_SHIFT));
238 attributes.may_cache_object = FALSE;
239 attributes.temporary = TRUE;
240
241 kr = memory_object_change_attributes(
242 control,
243 MEMORY_OBJECT_ATTRIBUTE_INFO,
244 (memory_object_info_t) &attributes,
245 MEMORY_OBJECT_ATTR_INFO_COUNT);
246 if (kr != KERN_SUCCESS)
247 panic("device_pager_init: memory_object_change_attributes() failed");
248
249 return(KERN_SUCCESS);
250}
251
252/*
253 *
254 */
91447636 255/*ARGSUSED6*/
0b4e3aa0
A
256kern_return_t
257device_pager_data_return(
91447636
A
258 memory_object_t mem_obj,
259 memory_object_offset_t offset,
260 vm_size_t data_cnt,
261 __unused boolean_t dirty,
262 __unused boolean_t kernel_copy,
263 __unused int upl_flags)
0b4e3aa0
A
264{
265 device_pager_t device_object;
266
267 device_object = device_pager_lookup(mem_obj);
268 if (device_object == DEVICE_PAGER_NULL)
269 panic("device_pager_data_return: lookup failed");
270
91447636
A
271 return device_data_action(device_object->device_handle,
272 (ipc_port_t) device_object,
273 VM_PROT_READ | VM_PROT_WRITE,
274 offset, data_cnt);
0b4e3aa0
A
275}
276
277/*
278 *
279 */
280kern_return_t
281device_pager_data_request(
282 memory_object_t mem_obj,
283 memory_object_offset_t offset,
284 vm_size_t length,
91447636 285 __unused vm_prot_t protection_required)
0b4e3aa0
A
286{
287 device_pager_t device_object;
288
289 device_object = device_pager_lookup(mem_obj);
290
291 if (device_object == DEVICE_PAGER_NULL)
292 panic("device_pager_data_request: lookup failed");
293
91447636
A
294 device_data_action(device_object->device_handle,
295 (ipc_port_t) device_object,
296 VM_PROT_READ, offset, length);
0b4e3aa0
A
297 return KERN_SUCCESS;
298}
299
300/*
301 *
302 */
303void
304device_pager_reference(
305 memory_object_t mem_obj)
306{
307 device_pager_t device_object;
9bccf70c 308 unsigned int new_ref_count;
0b4e3aa0
A
309
310 device_object = device_pager_lookup(mem_obj);
9bccf70c
A
311 new_ref_count = hw_atomic_add(&device_object->ref_count, 1);
312 assert(new_ref_count > 1);
0b4e3aa0
A
313}
314
315/*
316 *
317 */
318void
319device_pager_deallocate(
320 memory_object_t mem_obj)
321{
91447636
A
322 device_pager_t device_object;
323 memory_object_control_t device_control;
0b4e3aa0
A
324
325 device_object = device_pager_lookup(mem_obj);
326
9bccf70c 327 if (hw_atomic_sub(&device_object->ref_count, 1) == 0) {
0b4e3aa0
A
328 if (device_object->device_handle != (device_port_t) NULL) {
329 device_close(device_object->device_handle);
91447636 330 device_object->device_handle = (device_port_t) NULL;
0b4e3aa0 331 }
91447636
A
332 device_control = device_object->control_handle;
333 if (device_control != MEMORY_OBJECT_CONTROL_NULL) {
334 /*
335 * The VM object should already have been disconnected
336 * from the pager at this point.
337 * We still have to release the "memory object control"
338 * handle.
339 */
340 assert(device_control->object == VM_OBJECT_NULL);
341 memory_object_control_deallocate(device_control);
342 device_object->control_handle =
343 MEMORY_OBJECT_CONTROL_NULL;
344 }
345
346 zfree(device_pager_zone, device_object);
0b4e3aa0
A
347 }
348 return;
349}
350
351kern_return_t
352device_pager_data_initialize(
91447636
A
353 __unused memory_object_t mem_obj,
354 __unused memory_object_offset_t offset,
355 __unused vm_size_t data_cnt)
0b4e3aa0 356{
91447636 357 panic("device_pager_data_initialize");
0b4e3aa0
A
358 return KERN_FAILURE;
359}
360
361kern_return_t
362device_pager_data_unlock(
91447636
A
363 __unused memory_object_t mem_obj,
364 __unused memory_object_offset_t offset,
365 __unused vm_size_t size,
366 __unused vm_prot_t desired_access)
0b4e3aa0
A
367{
368 return KERN_FAILURE;
369}
370
91447636 371kern_return_t
0b4e3aa0 372device_pager_terminate(
91447636 373 __unused memory_object_t mem_obj)
0b4e3aa0
A
374{
375 return KERN_SUCCESS;
376}
377
378
379
380/*
381 *
382 */
383kern_return_t
384device_pager_synchronize(
385 memory_object_t mem_obj,
386 memory_object_offset_t offset,
387 vm_offset_t length,
91447636 388 __unused vm_sync_t sync_flags)
0b4e3aa0
A
389{
390 device_pager_t device_object;
391
392 device_object = device_pager_lookup(mem_obj);
393
394 memory_object_synchronize_completed(
395 device_object->control_handle, offset, length);
396
397 return KERN_SUCCESS;
398}
399
400/*
401 *
402 */
403kern_return_t
404device_pager_unmap(
91447636 405 __unused memory_object_t mem_obj)
0b4e3aa0
A
406{
407 return KERN_SUCCESS;
408}
409
410
411
412/*
413 *
414 */
415device_pager_t
416device_object_create()
417{
418 register device_pager_t device_object;
419
420 device_object = (struct device_pager *) zalloc(device_pager_zone);
421 if (device_object == DEVICE_PAGER_NULL)
422 return(DEVICE_PAGER_NULL);
423 device_object->pager = &device_pager_workaround;
424 device_object->pager_ikot = IKOT_MEMORY_OBJECT;
425 device_object->ref_count = 1;
426 device_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
427
428
429 return(device_object);
430}
431