]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/device_vm.c
f07832c4f791b0de5628a380c03e6f4fc51a8752
[apple/xnu.git] / osfmk / vm / device_vm.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 #include <sys/errno.h>
32
33 #include <mach/mach_types.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <kern/kern_types.h>
41 #include <kern/ipc_kobject.h>
42 #include <kern/host.h>
43 #include <kern/thread.h>
44 #include <ipc/ipc_port.h>
45 #include <ipc/ipc_space.h>
46 #include <device/device_port.h>
47 #include <vm/memory_object.h>
48 #include <vm/vm_pageout.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_pageout.h>
52 #include <vm/vm_protos.h>
53
54
55 /* Device VM COMPONENT INTERFACES */
56
57
58 /*
59 * Device PAGER
60 */
61
62
63 /* until component support available */
64
65
66
67 /* until component support available */
68 int device_pager_workaround;
69
70 typedef int device_port_t;
71
72 typedef struct device_pager {
73 int *pager; /* pager workaround pointer */
74 unsigned int pager_ikot; /* fake ip_kotype() */
75 unsigned int ref_count; /* reference count */
76 memory_object_control_t control_handle; /* mem object's cntrl handle */
77 device_port_t device_handle; /* device_handle */
78 vm_size_t size;
79 int flags;
80 } *device_pager_t;
81
82
83
84
85 device_pager_t
86 device_pager_lookup( /* forward */
87 memory_object_t);
88
89 device_pager_t
90 device_object_create(void); /* forward */
91
92 zone_t device_pager_zone;
93
94
95 #define DEVICE_PAGER_NULL ((device_pager_t) 0)
96
97
98 #define MAX_DNODE 10000
99
100
101
102
103
104 /*
105 *
106 */
107 void
108 device_pager_bootstrap(void)
109 {
110 register vm_size_t size;
111
112 size = (vm_size_t) sizeof(struct device_pager);
113 device_pager_zone = zinit(size, (vm_size_t) MAX_DNODE*size,
114 PAGE_SIZE, "device node pager structures");
115
116 return;
117 }
118
119 /*
120 *
121 */
122 memory_object_t
123 device_pager_setup(
124 __unused memory_object_t device,
125 int device_handle,
126 vm_size_t size,
127 int flags)
128 {
129 device_pager_t device_object;
130
131 device_object = device_object_create();
132 if (device_object == DEVICE_PAGER_NULL)
133 panic("device_pager_setup: device_object_create() failed");
134
135 device_object->device_handle = device_handle;
136 device_object->size = size;
137 device_object->flags = flags;
138
139 return((memory_object_t)device_object);
140 }
141
142 /*
143 *
144 */
145 kern_return_t
146 device_pager_populate_object(
147 memory_object_t device,
148 memory_object_offset_t offset,
149 ppnum_t page_num,
150 vm_size_t size)
151 {
152 device_pager_t device_object;
153 vm_object_t vm_object;
154 kern_return_t kr;
155 upl_t upl;
156
157 device_object = device_pager_lookup(device);
158 if(device_object == DEVICE_PAGER_NULL)
159 return KERN_FAILURE;
160
161 vm_object = (vm_object_t)memory_object_control_to_vm_object(
162 device_object->control_handle);
163 if(vm_object == NULL)
164 return KERN_FAILURE;
165
166 kr = vm_object_populate_with_private(
167 vm_object, offset, page_num, size);
168 if(kr != KERN_SUCCESS)
169 return kr;
170
171 if(!vm_object->phys_contiguous) {
172 int null_size = 0;
173 kr = vm_object_upl_request(vm_object,
174 (vm_object_offset_t)offset, size, &upl, NULL,
175 &null_size, (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE));
176
177 if(kr != KERN_SUCCESS)
178 panic("device_pager_populate_object: list_req failed");
179
180 upl_commit(upl, NULL, 0);
181 upl_deallocate(upl);
182 }
183
184
185 return kr;
186 }
187
188 /*
189 *
190 */
191 device_pager_t
192 device_pager_lookup(
193 memory_object_t name)
194 {
195 device_pager_t device_object;
196
197 device_object = (device_pager_t)name;
198 assert(device_object->pager == &device_pager_workaround);
199 return (device_object);
200 }
201
202 /*
203 *
204 */
205 kern_return_t
206 device_pager_init(
207 memory_object_t mem_obj,
208 memory_object_control_t control,
209 __unused vm_size_t pg_size)
210 {
211 device_pager_t device_object;
212 kern_return_t kr;
213 memory_object_attr_info_data_t attributes;
214
215 vm_object_t vm_object;
216
217
218 if (control == MEMORY_OBJECT_CONTROL_NULL)
219 return KERN_INVALID_ARGUMENT;
220
221 device_object = device_pager_lookup(mem_obj);
222
223 memory_object_control_reference(control);
224 device_object->control_handle = control;
225
226
227 /* The following settings should be done through an expanded change */
228 /* attributes call */
229
230 vm_object = (vm_object_t)memory_object_control_to_vm_object(control);
231 vm_object_lock(vm_object);
232 vm_object->private = TRUE;
233 if(device_object->flags & DEVICE_PAGER_CONTIGUOUS)
234 vm_object->phys_contiguous = TRUE;
235 if(device_object->flags & DEVICE_PAGER_NOPHYSCACHE)
236 vm_object->nophyscache = TRUE;
237
238 vm_object->wimg_bits = device_object->flags & VM_WIMG_MASK;
239 vm_object_unlock(vm_object);
240
241
242 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
243 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
244 attributes.cluster_size = (1 << (PAGE_SHIFT));
245 attributes.may_cache_object = FALSE;
246 attributes.temporary = TRUE;
247
248 kr = memory_object_change_attributes(
249 control,
250 MEMORY_OBJECT_ATTRIBUTE_INFO,
251 (memory_object_info_t) &attributes,
252 MEMORY_OBJECT_ATTR_INFO_COUNT);
253 if (kr != KERN_SUCCESS)
254 panic("device_pager_init: memory_object_change_attributes() failed");
255
256 return(KERN_SUCCESS);
257 }
258
259 /*
260 *
261 */
262 /*ARGSUSED6*/
263 kern_return_t
264 device_pager_data_return(
265 memory_object_t mem_obj,
266 memory_object_offset_t offset,
267 vm_size_t data_cnt,
268 __unused boolean_t dirty,
269 __unused boolean_t kernel_copy,
270 __unused int upl_flags)
271 {
272 device_pager_t device_object;
273
274 device_object = device_pager_lookup(mem_obj);
275 if (device_object == DEVICE_PAGER_NULL)
276 panic("device_pager_data_return: lookup failed");
277
278 return device_data_action(device_object->device_handle,
279 (ipc_port_t) device_object,
280 VM_PROT_READ | VM_PROT_WRITE,
281 offset, data_cnt);
282 }
283
284 /*
285 *
286 */
287 kern_return_t
288 device_pager_data_request(
289 memory_object_t mem_obj,
290 memory_object_offset_t offset,
291 vm_size_t length,
292 __unused vm_prot_t protection_required)
293 {
294 device_pager_t device_object;
295
296 device_object = device_pager_lookup(mem_obj);
297
298 if (device_object == DEVICE_PAGER_NULL)
299 panic("device_pager_data_request: lookup failed");
300
301 device_data_action(device_object->device_handle,
302 (ipc_port_t) device_object,
303 VM_PROT_READ, offset, length);
304 return KERN_SUCCESS;
305 }
306
307 /*
308 *
309 */
310 void
311 device_pager_reference(
312 memory_object_t mem_obj)
313 {
314 device_pager_t device_object;
315 unsigned int new_ref_count;
316
317 device_object = device_pager_lookup(mem_obj);
318 new_ref_count = hw_atomic_add(&device_object->ref_count, 1);
319 assert(new_ref_count > 1);
320 }
321
322 /*
323 *
324 */
325 void
326 device_pager_deallocate(
327 memory_object_t mem_obj)
328 {
329 device_pager_t device_object;
330 memory_object_control_t device_control;
331
332 device_object = device_pager_lookup(mem_obj);
333
334 if (hw_atomic_sub(&device_object->ref_count, 1) == 0) {
335 if (device_object->device_handle != (device_port_t) NULL) {
336 device_close(device_object->device_handle);
337 device_object->device_handle = (device_port_t) NULL;
338 }
339 device_control = device_object->control_handle;
340 if (device_control != MEMORY_OBJECT_CONTROL_NULL) {
341 /*
342 * The VM object should already have been disconnected
343 * from the pager at this point.
344 * We still have to release the "memory object control"
345 * handle.
346 */
347 assert(device_control->object == VM_OBJECT_NULL);
348 memory_object_control_deallocate(device_control);
349 device_object->control_handle =
350 MEMORY_OBJECT_CONTROL_NULL;
351 }
352
353 zfree(device_pager_zone, device_object);
354 }
355 return;
356 }
357
358 kern_return_t
359 device_pager_data_initialize(
360 __unused memory_object_t mem_obj,
361 __unused memory_object_offset_t offset,
362 __unused vm_size_t data_cnt)
363 {
364 panic("device_pager_data_initialize");
365 return KERN_FAILURE;
366 }
367
368 kern_return_t
369 device_pager_data_unlock(
370 __unused memory_object_t mem_obj,
371 __unused memory_object_offset_t offset,
372 __unused vm_size_t size,
373 __unused vm_prot_t desired_access)
374 {
375 return KERN_FAILURE;
376 }
377
378 kern_return_t
379 device_pager_terminate(
380 __unused memory_object_t mem_obj)
381 {
382 return KERN_SUCCESS;
383 }
384
385
386
387 /*
388 *
389 */
390 kern_return_t
391 device_pager_synchronize(
392 memory_object_t mem_obj,
393 memory_object_offset_t offset,
394 vm_offset_t length,
395 __unused vm_sync_t sync_flags)
396 {
397 device_pager_t device_object;
398
399 device_object = device_pager_lookup(mem_obj);
400
401 memory_object_synchronize_completed(
402 device_object->control_handle, offset, length);
403
404 return KERN_SUCCESS;
405 }
406
407 /*
408 *
409 */
410 kern_return_t
411 device_pager_unmap(
412 __unused memory_object_t mem_obj)
413 {
414 return KERN_SUCCESS;
415 }
416
417
418
419 /*
420 *
421 */
422 device_pager_t
423 device_object_create()
424 {
425 register device_pager_t device_object;
426
427 device_object = (struct device_pager *) zalloc(device_pager_zone);
428 if (device_object == DEVICE_PAGER_NULL)
429 return(DEVICE_PAGER_NULL);
430 device_object->pager = &device_pager_workaround;
431 device_object->pager_ikot = IKOT_MEMORY_OBJECT;
432 device_object->ref_count = 1;
433 device_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
434
435
436 return(device_object);
437 }
438