]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/device_vm.c
xnu-344.21.74.tar.gz
[apple/xnu.git] / osfmk / vm / device_vm.c
CommitLineData
0b4e3aa0
A
1/*
2 * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
d7e50217 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
0b4e3aa0 7 *
d7e50217
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
0b4e3aa0
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
d7e50217
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
0b4e3aa0
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25
26#include <sys/errno.h>
27#include <kern/host.h>
28#include <mach/mach_types.h>
29#include <vm/vm_map.h>
30#include <vm/vm_kern.h>
31#include <vm/vm_pageout.h>
32#include <mach/kern_return.h>
33#include <mach/memory_object_types.h>
34#include <mach/port.h>
35#include <mach/policy.h>
36#include <ipc/ipc_port.h>
37#include <ipc/ipc_space.h>
38#include <kern/thread.h>
39#include <device/device_port.h>
40#include <vm/vm_pageout.h>
41
0b4e3aa0
A
42/* Device VM COMPONENT INTERFACES */
43
44
45/*
46 * Device PAGER
47 */
48
49
50/* until component support available */
51
52
53
54/* until component support available */
55int device_pager_workaround;
56
57typedef int device_port_t;
58
59typedef struct device_pager {
60 int *pager; /* pager workaround pointer */
61 unsigned int pager_ikot; /* fake ip_kotype() */
62 unsigned int ref_count; /* reference count */
63 memory_object_control_t control_handle; /* mem object's cntrl handle */
64 device_port_t device_handle; /* device_handle */
65 vm_size_t size;
66 int flags;
67} *device_pager_t;
68
69
70
71void
72device_pager_bootstrap(
73 void);
74
75
76memory_object_t
77device_pager_setup(
78 memory_object_t,
79 int,
80 vm_size_t,
81 int);
82
83device_pager_t
84device_pager_lookup(
85 memory_object_t);
86
87kern_return_t
88device_pager_init(
89 memory_object_t,
90 memory_object_control_t,
91 vm_size_t);
92
93
94kern_return_t
95device_pager_data_request(
96 memory_object_t,
97 memory_object_offset_t,
98 vm_size_t,
99 vm_prot_t);
100
101kern_return_t
102device_pager_data_return(
103 memory_object_t,
104 memory_object_offset_t,
105 vm_size_t,
106 boolean_t,
107 boolean_t);
108
109void
110device_pager_reference(
111 memory_object_t);
112
113void
114device_pager_deallocate(
115 memory_object_t);
116
117kern_return_t
118device_pager_data_initialize(
119 memory_object_t,
120 memory_object_offset_t,
121 vm_size_t);
122
123kern_return_t
124device_pager_data_unlock(
125 memory_object_t,
126 memory_object_offset_t,
127 vm_size_t,
128 vm_prot_t);
129
130kern_return_t
131device_pager_terminate(
132 memory_object_t);
133
134kern_return_t
135device_pager_synchronize(
136 memory_object_t,
137 memory_object_offset_t,
138 vm_offset_t,
139 vm_sync_t);
140
141kern_return_t
142device_pager_unmap(
143 memory_object_t);
144
145device_pager_t
146device_object_create(void);
147
148zone_t device_pager_zone;
149
150
151#define DEVICE_PAGER_NULL ((device_pager_t) 0)
152
153
154#define MAX_DNODE 10000
155
156
157
158
159
160/*
161 *
162 */
163void
164device_pager_bootstrap(void)
165{
166 register vm_size_t size;
167
168 size = (vm_size_t) sizeof(struct device_pager);
169 device_pager_zone = zinit(size, (vm_size_t) MAX_DNODE*size,
170 PAGE_SIZE, "device node pager structures");
171
172 return;
173}
174
175/*
176 *
177 */
178memory_object_t
179device_pager_setup(
180 memory_object_t device,
181 int device_handle,
182 vm_size_t size,
183 int flags)
184{
185 device_pager_t device_object;
186
187 device_object = device_object_create();
188 if (device_object == DEVICE_PAGER_NULL)
189 panic("device_pager_setup: device_object_create() failed");
190
191 device_object->device_handle = device_handle;
192 device_object->size = size;
9bccf70c 193 device_object->flags = flags;
0b4e3aa0
A
194
195 return((memory_object_t)device_object);
196}
197
198/*
199 *
200 */
201kern_return_t
202device_pager_populate_object(
203 memory_object_t device,
204 memory_object_offset_t offset,
d7e50217 205 ppnum_t page_num,
0b4e3aa0
A
206 vm_size_t size)
207{
208 device_pager_t device_object;
209 vm_object_t vm_object;
210 kern_return_t kr;
211 upl_t upl;
212 ipc_port_t previous;
213
214 device_object = device_pager_lookup(device);
215 if(device_object == DEVICE_PAGER_NULL)
216 return KERN_FAILURE;
217
218 vm_object = (vm_object_t)memory_object_control_to_vm_object(
219 device_object->control_handle);
220 if(vm_object == NULL)
221 return KERN_FAILURE;
222
223 kr = vm_object_populate_with_private(
d7e50217 224 vm_object, offset, page_num, size);
0b4e3aa0
A
225 if(kr != KERN_SUCCESS)
226 return kr;
227
228 if(!vm_object->phys_contiguous) {
229 int null_size = 0;
230 kr = vm_object_upl_request(vm_object,
231 (vm_object_offset_t)offset, size, &upl, NULL,
232 &null_size, (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE));
233
234 if(kr != KERN_SUCCESS)
235 panic("device_pager_populate_object: list_req failed");
236
237 upl_commit(upl, NULL);
238 upl_deallocate(upl);
239 }
240
241
242 return kr;
243}
244
245/*
246 *
247 */
248device_pager_t
249device_pager_lookup(
250 memory_object_t name)
251{
252 device_pager_t device_object;
253
254 device_object = (device_pager_t)name;
255 assert(device_object->pager == &device_pager_workaround);
256 return (device_object);
257}
258
259/*
260 *
261 */
262kern_return_t
263device_pager_init(memory_object_t mem_obj,
264 memory_object_control_t control,
265 vm_size_t pg_size)
266{
267 device_pager_t device_object;
268 kern_return_t kr;
269 memory_object_attr_info_data_t attributes;
270
271 vm_object_t vm_object;
272
273
274 if (control == MEMORY_OBJECT_CONTROL_NULL)
275 return KERN_INVALID_ARGUMENT;
276
277 device_object = device_pager_lookup(mem_obj);
278
279 memory_object_control_reference(control);
280 device_object->control_handle = control;
281
282
283/* The following settings should be done through an expanded change */
284/* attributes call */
285
286 vm_object = (vm_object_t)memory_object_control_to_vm_object(control);
287 vm_object_lock(vm_object);
288 vm_object->private = TRUE;
289 if(device_object->flags & DEVICE_PAGER_CONTIGUOUS)
290 vm_object->phys_contiguous = TRUE;
291 if(device_object->flags & DEVICE_PAGER_NOPHYSCACHE)
292 vm_object->nophyscache = TRUE;
9bccf70c
A
293
294 vm_object->wimg_bits = device_object->flags & VM_WIMG_MASK;
0b4e3aa0
A
295 vm_object_unlock(vm_object);
296
297
298 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
299 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
300 attributes.cluster_size = (1 << (PAGE_SHIFT));
301 attributes.may_cache_object = FALSE;
302 attributes.temporary = TRUE;
303
304 kr = memory_object_change_attributes(
305 control,
306 MEMORY_OBJECT_ATTRIBUTE_INFO,
307 (memory_object_info_t) &attributes,
308 MEMORY_OBJECT_ATTR_INFO_COUNT);
309 if (kr != KERN_SUCCESS)
310 panic("device_pager_init: memory_object_change_attributes() failed");
311
312 return(KERN_SUCCESS);
313}
314
315/*
316 *
317 */
318kern_return_t
319device_pager_data_return(
320 memory_object_t mem_obj,
321 memory_object_offset_t offset,
322 vm_size_t data_cnt,
323 boolean_t dirty,
324 boolean_t kernel_copy)
325{
326 device_pager_t device_object;
327
328 device_object = device_pager_lookup(mem_obj);
329 if (device_object == DEVICE_PAGER_NULL)
330 panic("device_pager_data_return: lookup failed");
331
332 return device_data_action(device_object->device_handle, device_object,
333 VM_PROT_READ | VM_PROT_WRITE, offset, data_cnt);
334}
335
336/*
337 *
338 */
339kern_return_t
340device_pager_data_request(
341 memory_object_t mem_obj,
342 memory_object_offset_t offset,
343 vm_size_t length,
344 vm_prot_t protection_required)
345{
346 device_pager_t device_object;
347
348 device_object = device_pager_lookup(mem_obj);
349
350 if (device_object == DEVICE_PAGER_NULL)
351 panic("device_pager_data_request: lookup failed");
352
353 device_data_action(device_object->device_handle, device_object,
354 VM_PROT_READ, offset, length);
355 return KERN_SUCCESS;
356}
357
358/*
359 *
360 */
361void
362device_pager_reference(
363 memory_object_t mem_obj)
364{
365 device_pager_t device_object;
9bccf70c 366 unsigned int new_ref_count;
0b4e3aa0
A
367
368 device_object = device_pager_lookup(mem_obj);
9bccf70c
A
369 new_ref_count = hw_atomic_add(&device_object->ref_count, 1);
370 assert(new_ref_count > 1);
0b4e3aa0
A
371}
372
373/*
374 *
375 */
376void
377device_pager_deallocate(
378 memory_object_t mem_obj)
379{
380 device_pager_t device_object;
381
382 device_object = device_pager_lookup(mem_obj);
383
9bccf70c 384 if (hw_atomic_sub(&device_object->ref_count, 1) == 0) {
0b4e3aa0
A
385 if (device_object->device_handle != (device_port_t) NULL) {
386 device_close(device_object->device_handle);
387 }
388 zfree(device_pager_zone, (vm_offset_t) device_object);
389 }
390 return;
391}
392
393kern_return_t
394device_pager_data_initialize(
395 memory_object_t mem_obj,
396 memory_object_offset_t offset,
397 vm_size_t data_cnt)
398{
399 return KERN_FAILURE;
400}
401
402kern_return_t
403device_pager_data_unlock(
404 memory_object_t mem_obj,
405 memory_object_offset_t offset,
406 vm_size_t size,
407 vm_prot_t desired_access)
408{
409 return KERN_FAILURE;
410}
411
412device_pager_terminate(
413 memory_object_t mem_obj)
414{
415 return KERN_SUCCESS;
416}
417
418
419
420/*
421 *
422 */
423kern_return_t
424device_pager_synchronize(
425 memory_object_t mem_obj,
426 memory_object_offset_t offset,
427 vm_offset_t length,
428 vm_sync_t sync_flags)
429{
430 device_pager_t device_object;
431
432 device_object = device_pager_lookup(mem_obj);
433
434 memory_object_synchronize_completed(
435 device_object->control_handle, offset, length);
436
437 return KERN_SUCCESS;
438}
439
440/*
441 *
442 */
443kern_return_t
444device_pager_unmap(
445 memory_object_t mem_obj)
446{
447 return KERN_SUCCESS;
448}
449
450
451
452/*
453 *
454 */
455device_pager_t
456device_object_create()
457{
458 register device_pager_t device_object;
459
460 device_object = (struct device_pager *) zalloc(device_pager_zone);
461 if (device_object == DEVICE_PAGER_NULL)
462 return(DEVICE_PAGER_NULL);
463 device_object->pager = &device_pager_workaround;
464 device_object->pager_ikot = IKOT_MEMORY_OBJECT;
465 device_object->ref_count = 1;
466 device_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
467
468
469 return(device_object);
470}
471