]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/device_vm.c
xnu-792.25.20.tar.gz
[apple/xnu.git] / osfmk / vm / device_vm.c
CommitLineData
0b4e3aa0 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
0b4e3aa0 3 *
6601e61a 4 * @APPLE_LICENSE_HEADER_START@
0b4e3aa0 5 *
6601e61a
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
8f6c56a5 11 *
6601e61a
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
6601e61a
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
8f6c56a5 19 *
6601e61a 20 * @APPLE_LICENSE_HEADER_END@
0b4e3aa0
A
21 */
22
23#include <sys/errno.h>
91447636 24
0b4e3aa0 25#include <mach/mach_types.h>
0b4e3aa0 26#include <mach/kern_return.h>
91447636 27#include <mach/memory_object_control.h>
0b4e3aa0
A
28#include <mach/memory_object_types.h>
29#include <mach/port.h>
30#include <mach/policy.h>
91447636
A
31#include <mach/upl.h>
32#include <kern/kern_types.h>
33#include <kern/ipc_kobject.h>
34#include <kern/host.h>
35#include <kern/thread.h>
0b4e3aa0
A
36#include <ipc/ipc_port.h>
37#include <ipc/ipc_space.h>
0b4e3aa0 38#include <device/device_port.h>
91447636 39#include <vm/memory_object.h>
0b4e3aa0 40#include <vm/vm_pageout.h>
91447636
A
41#include <vm/vm_map.h>
42#include <vm/vm_kern.h>
43#include <vm/vm_pageout.h>
44#include <vm/vm_protos.h>
45
0b4e3aa0 46
0b4e3aa0
A
47/* Device VM COMPONENT INTERFACES */
48
49
50/*
51 * Device PAGER
52 */
53
54
55/* until component support available */
56
57
58
59/* until component support available */
0c530ab8
A
60const struct memory_object_pager_ops device_pager_ops = {
61 device_pager_reference,
62 device_pager_deallocate,
63 device_pager_init,
64 device_pager_terminate,
65 device_pager_data_request,
66 device_pager_data_return,
67 device_pager_data_initialize,
68 device_pager_data_unlock,
69 device_pager_synchronize,
70 device_pager_unmap,
71 "device pager"
72};
0b4e3aa0
A
73
74typedef int device_port_t;
75
0c530ab8
A
76/*
77 * The start of "struct device_pager" MUST match a "struct memory_object".
78 */
0b4e3aa0 79typedef struct device_pager {
0c530ab8 80 memory_object_pager_ops_t pager_ops; /* == &device_pager_ops */
0b4e3aa0
A
81 unsigned int pager_ikot; /* fake ip_kotype() */
82 unsigned int ref_count; /* reference count */
83 memory_object_control_t control_handle; /* mem object's cntrl handle */
84 device_port_t device_handle; /* device_handle */
85 vm_size_t size;
86 int flags;
87} *device_pager_t;
88
89
90
0b4e3aa0
A
91
92device_pager_t
91447636 93device_pager_lookup( /* forward */
0b4e3aa0
A
94 memory_object_t);
95
96device_pager_t
91447636 97device_object_create(void); /* forward */
0b4e3aa0
A
98
99zone_t device_pager_zone;
100
101
102#define DEVICE_PAGER_NULL ((device_pager_t) 0)
103
104
105#define MAX_DNODE 10000
106
107
108
109
110
111/*
112 *
113 */
114void
115device_pager_bootstrap(void)
116{
117 register vm_size_t size;
118
119 size = (vm_size_t) sizeof(struct device_pager);
120 device_pager_zone = zinit(size, (vm_size_t) MAX_DNODE*size,
121 PAGE_SIZE, "device node pager structures");
122
123 return;
124}
125
126/*
127 *
128 */
129memory_object_t
130device_pager_setup(
91447636 131 __unused memory_object_t device,
0b4e3aa0
A
132 int device_handle,
133 vm_size_t size,
134 int flags)
135{
136 device_pager_t device_object;
137
138 device_object = device_object_create();
139 if (device_object == DEVICE_PAGER_NULL)
140 panic("device_pager_setup: device_object_create() failed");
141
142 device_object->device_handle = device_handle;
143 device_object->size = size;
9bccf70c 144 device_object->flags = flags;
0b4e3aa0
A
145
146 return((memory_object_t)device_object);
147}
148
149/*
150 *
151 */
152kern_return_t
153device_pager_populate_object(
154 memory_object_t device,
155 memory_object_offset_t offset,
55e303ae 156 ppnum_t page_num,
0b4e3aa0
A
157 vm_size_t size)
158{
159 device_pager_t device_object;
160 vm_object_t vm_object;
161 kern_return_t kr;
162 upl_t upl;
0b4e3aa0
A
163
164 device_object = device_pager_lookup(device);
165 if(device_object == DEVICE_PAGER_NULL)
166 return KERN_FAILURE;
167
168 vm_object = (vm_object_t)memory_object_control_to_vm_object(
169 device_object->control_handle);
170 if(vm_object == NULL)
171 return KERN_FAILURE;
172
173 kr = vm_object_populate_with_private(
55e303ae 174 vm_object, offset, page_num, size);
0b4e3aa0
A
175 if(kr != KERN_SUCCESS)
176 return kr;
177
178 if(!vm_object->phys_contiguous) {
0c530ab8 179 unsigned int null_size = 0;
0b4e3aa0
A
180 kr = vm_object_upl_request(vm_object,
181 (vm_object_offset_t)offset, size, &upl, NULL,
182 &null_size, (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE));
183
184 if(kr != KERN_SUCCESS)
185 panic("device_pager_populate_object: list_req failed");
186
91447636 187 upl_commit(upl, NULL, 0);
0b4e3aa0
A
188 upl_deallocate(upl);
189 }
190
191
192 return kr;
193}
194
195/*
196 *
197 */
198device_pager_t
199device_pager_lookup(
200 memory_object_t name)
201{
202 device_pager_t device_object;
203
204 device_object = (device_pager_t)name;
0c530ab8 205 assert(device_object->pager_ops == &device_pager_ops);
0b4e3aa0
A
206 return (device_object);
207}
208
209/*
210 *
211 */
212kern_return_t
91447636
A
213device_pager_init(
214 memory_object_t mem_obj,
215 memory_object_control_t control,
216 __unused vm_size_t pg_size)
0b4e3aa0
A
217{
218 device_pager_t device_object;
219 kern_return_t kr;
220 memory_object_attr_info_data_t attributes;
221
222 vm_object_t vm_object;
223
224
225 if (control == MEMORY_OBJECT_CONTROL_NULL)
226 return KERN_INVALID_ARGUMENT;
227
228 device_object = device_pager_lookup(mem_obj);
229
230 memory_object_control_reference(control);
231 device_object->control_handle = control;
232
233
234/* The following settings should be done through an expanded change */
235/* attributes call */
236
237 vm_object = (vm_object_t)memory_object_control_to_vm_object(control);
238 vm_object_lock(vm_object);
239 vm_object->private = TRUE;
240 if(device_object->flags & DEVICE_PAGER_CONTIGUOUS)
241 vm_object->phys_contiguous = TRUE;
242 if(device_object->flags & DEVICE_PAGER_NOPHYSCACHE)
243 vm_object->nophyscache = TRUE;
9bccf70c
A
244
245 vm_object->wimg_bits = device_object->flags & VM_WIMG_MASK;
0b4e3aa0
A
246 vm_object_unlock(vm_object);
247
248
249 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
250 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
251 attributes.cluster_size = (1 << (PAGE_SHIFT));
252 attributes.may_cache_object = FALSE;
253 attributes.temporary = TRUE;
254
255 kr = memory_object_change_attributes(
256 control,
257 MEMORY_OBJECT_ATTRIBUTE_INFO,
258 (memory_object_info_t) &attributes,
259 MEMORY_OBJECT_ATTR_INFO_COUNT);
260 if (kr != KERN_SUCCESS)
261 panic("device_pager_init: memory_object_change_attributes() failed");
262
263 return(KERN_SUCCESS);
264}
265
266/*
267 *
268 */
91447636 269/*ARGSUSED6*/
0b4e3aa0
A
270kern_return_t
271device_pager_data_return(
0c530ab8
A
272 memory_object_t mem_obj,
273 memory_object_offset_t offset,
274 vm_size_t data_cnt,
275 __unused memory_object_offset_t *resid_offset,
276 __unused int *io_error,
91447636
A
277 __unused boolean_t dirty,
278 __unused boolean_t kernel_copy,
279 __unused int upl_flags)
0b4e3aa0
A
280{
281 device_pager_t device_object;
282
283 device_object = device_pager_lookup(mem_obj);
284 if (device_object == DEVICE_PAGER_NULL)
285 panic("device_pager_data_return: lookup failed");
286
91447636
A
287 return device_data_action(device_object->device_handle,
288 (ipc_port_t) device_object,
289 VM_PROT_READ | VM_PROT_WRITE,
290 offset, data_cnt);
0b4e3aa0
A
291}
292
293/*
294 *
295 */
296kern_return_t
297device_pager_data_request(
298 memory_object_t mem_obj,
299 memory_object_offset_t offset,
300 vm_size_t length,
91447636 301 __unused vm_prot_t protection_required)
0b4e3aa0
A
302{
303 device_pager_t device_object;
304
305 device_object = device_pager_lookup(mem_obj);
306
307 if (device_object == DEVICE_PAGER_NULL)
308 panic("device_pager_data_request: lookup failed");
309
91447636
A
310 device_data_action(device_object->device_handle,
311 (ipc_port_t) device_object,
312 VM_PROT_READ, offset, length);
0b4e3aa0
A
313 return KERN_SUCCESS;
314}
315
316/*
317 *
318 */
319void
320device_pager_reference(
321 memory_object_t mem_obj)
322{
323 device_pager_t device_object;
9bccf70c 324 unsigned int new_ref_count;
0b4e3aa0
A
325
326 device_object = device_pager_lookup(mem_obj);
9bccf70c
A
327 new_ref_count = hw_atomic_add(&device_object->ref_count, 1);
328 assert(new_ref_count > 1);
0b4e3aa0
A
329}
330
331/*
332 *
333 */
334void
335device_pager_deallocate(
336 memory_object_t mem_obj)
337{
91447636
A
338 device_pager_t device_object;
339 memory_object_control_t device_control;
0b4e3aa0
A
340
341 device_object = device_pager_lookup(mem_obj);
342
9bccf70c 343 if (hw_atomic_sub(&device_object->ref_count, 1) == 0) {
0b4e3aa0
A
344 if (device_object->device_handle != (device_port_t) NULL) {
345 device_close(device_object->device_handle);
91447636 346 device_object->device_handle = (device_port_t) NULL;
0b4e3aa0 347 }
91447636
A
348 device_control = device_object->control_handle;
349 if (device_control != MEMORY_OBJECT_CONTROL_NULL) {
350 /*
351 * The VM object should already have been disconnected
352 * from the pager at this point.
353 * We still have to release the "memory object control"
354 * handle.
355 */
0c530ab8 356 assert(device_control->moc_object == VM_OBJECT_NULL);
91447636
A
357 memory_object_control_deallocate(device_control);
358 device_object->control_handle =
359 MEMORY_OBJECT_CONTROL_NULL;
360 }
361
362 zfree(device_pager_zone, device_object);
0b4e3aa0
A
363 }
364 return;
365}
366
367kern_return_t
368device_pager_data_initialize(
91447636
A
369 __unused memory_object_t mem_obj,
370 __unused memory_object_offset_t offset,
371 __unused vm_size_t data_cnt)
0b4e3aa0 372{
91447636 373 panic("device_pager_data_initialize");
0b4e3aa0
A
374 return KERN_FAILURE;
375}
376
377kern_return_t
378device_pager_data_unlock(
91447636
A
379 __unused memory_object_t mem_obj,
380 __unused memory_object_offset_t offset,
381 __unused vm_size_t size,
382 __unused vm_prot_t desired_access)
0b4e3aa0
A
383{
384 return KERN_FAILURE;
385}
386
91447636 387kern_return_t
0b4e3aa0 388device_pager_terminate(
91447636 389 __unused memory_object_t mem_obj)
0b4e3aa0
A
390{
391 return KERN_SUCCESS;
392}
393
394
395
396/*
397 *
398 */
399kern_return_t
400device_pager_synchronize(
401 memory_object_t mem_obj,
402 memory_object_offset_t offset,
403 vm_offset_t length,
91447636 404 __unused vm_sync_t sync_flags)
0b4e3aa0
A
405{
406 device_pager_t device_object;
407
408 device_object = device_pager_lookup(mem_obj);
409
410 memory_object_synchronize_completed(
411 device_object->control_handle, offset, length);
412
413 return KERN_SUCCESS;
414}
415
416/*
417 *
418 */
419kern_return_t
420device_pager_unmap(
91447636 421 __unused memory_object_t mem_obj)
0b4e3aa0
A
422{
423 return KERN_SUCCESS;
424}
425
426
427
428/*
429 *
430 */
431device_pager_t
432device_object_create()
433{
434 register device_pager_t device_object;
435
436 device_object = (struct device_pager *) zalloc(device_pager_zone);
437 if (device_object == DEVICE_PAGER_NULL)
438 return(DEVICE_PAGER_NULL);
0c530ab8 439 device_object->pager_ops = &device_pager_ops;
0b4e3aa0
A
440 device_object->pager_ikot = IKOT_MEMORY_OBJECT;
441 device_object->ref_count = 1;
442 device_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
443
444
445 return(device_object);
446}
447