]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/device_vm.c
xnu-1228.5.18.tar.gz
[apple/xnu.git] / osfmk / vm / device_vm.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/kern_return.h>
33 #include <mach/memory_object_control.h>
34 #include <mach/memory_object_types.h>
35 #include <mach/port.h>
36 #include <mach/policy.h>
37 #include <mach/upl.h>
38 #include <kern/kern_types.h>
39 #include <kern/ipc_kobject.h>
40 #include <kern/host.h>
41 #include <kern/thread.h>
42 #include <ipc/ipc_port.h>
43 #include <ipc/ipc_space.h>
44 #include <device/device_port.h>
45 #include <vm/memory_object.h>
46 #include <vm/vm_pageout.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_kern.h>
49 #include <vm/vm_pageout.h>
50 #include <vm/vm_protos.h>
51
52
53 /* Device VM COMPONENT INTERFACES */
54
55
56 /*
57 * Device PAGER
58 */
59
60
61 /* until component support available */
62
63
64
65 /* until component support available */
66 const struct memory_object_pager_ops device_pager_ops = {
67 device_pager_reference,
68 device_pager_deallocate,
69 device_pager_init,
70 device_pager_terminate,
71 device_pager_data_request,
72 device_pager_data_return,
73 device_pager_data_initialize,
74 device_pager_data_unlock,
75 device_pager_synchronize,
76 device_pager_unmap,
77 "device pager"
78 };
79
80 typedef int device_port_t;
81
82 /*
83 * The start of "struct device_pager" MUST match a "struct memory_object".
84 */
85 typedef struct device_pager {
86 memory_object_pager_ops_t pager_ops; /* == &device_pager_ops */
87 unsigned int pager_ikot; /* fake ip_kotype() */
88 unsigned int ref_count; /* reference count */
89 memory_object_control_t control_handle; /* mem object's cntrl handle */
90 device_port_t device_handle; /* device_handle */
91 vm_size_t size;
92 int flags;
93 } *device_pager_t;
94
95
96
97
98 device_pager_t
99 device_pager_lookup( /* forward */
100 memory_object_t);
101
102 device_pager_t
103 device_object_create(void); /* forward */
104
105 zone_t device_pager_zone;
106
107
108 #define DEVICE_PAGER_NULL ((device_pager_t) 0)
109
110
111 #define MAX_DNODE 10000
112
113
114
115
116
117 /*
118 *
119 */
120 void
121 device_pager_bootstrap(void)
122 {
123 register vm_size_t size;
124
125 size = (vm_size_t) sizeof(struct device_pager);
126 device_pager_zone = zinit(size, (vm_size_t) MAX_DNODE*size,
127 PAGE_SIZE, "device node pager structures");
128
129 return;
130 }
131
132 /*
133 *
134 */
135 memory_object_t
136 device_pager_setup(
137 __unused memory_object_t device,
138 int device_handle,
139 vm_size_t size,
140 int flags)
141 {
142 device_pager_t device_object;
143
144 device_object = device_object_create();
145 if (device_object == DEVICE_PAGER_NULL)
146 panic("device_pager_setup: device_object_create() failed");
147
148 device_object->device_handle = device_handle;
149 device_object->size = size;
150 device_object->flags = flags;
151
152 return((memory_object_t)device_object);
153 }
154
155 /*
156 *
157 */
158 kern_return_t
159 device_pager_populate_object(
160 memory_object_t device,
161 memory_object_offset_t offset,
162 ppnum_t page_num,
163 vm_size_t size)
164 {
165 device_pager_t device_object;
166 vm_object_t vm_object;
167 kern_return_t kr;
168 upl_t upl;
169
170 device_object = device_pager_lookup(device);
171 if(device_object == DEVICE_PAGER_NULL)
172 return KERN_FAILURE;
173
174 vm_object = (vm_object_t)memory_object_control_to_vm_object(
175 device_object->control_handle);
176 if(vm_object == NULL)
177 return KERN_FAILURE;
178
179 kr = vm_object_populate_with_private(
180 vm_object, offset, page_num, size);
181 if(kr != KERN_SUCCESS)
182 return kr;
183
184 if(!vm_object->phys_contiguous) {
185 unsigned int null_size = 0;
186 kr = vm_object_upl_request(vm_object,
187 (vm_object_offset_t)offset, size, &upl, NULL,
188 &null_size, (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE));
189
190 if(kr != KERN_SUCCESS)
191 panic("device_pager_populate_object: list_req failed");
192
193 upl_commit(upl, NULL, 0);
194 upl_deallocate(upl);
195 }
196
197
198 return kr;
199 }
200
201 /*
202 *
203 */
204 device_pager_t
205 device_pager_lookup(
206 memory_object_t name)
207 {
208 device_pager_t device_object;
209
210 device_object = (device_pager_t)name;
211 assert(device_object->pager_ops == &device_pager_ops);
212 return (device_object);
213 }
214
215 /*
216 *
217 */
218 kern_return_t
219 device_pager_init(
220 memory_object_t mem_obj,
221 memory_object_control_t control,
222 __unused vm_size_t pg_size)
223 {
224 device_pager_t device_object;
225 kern_return_t kr;
226 memory_object_attr_info_data_t attributes;
227
228 vm_object_t vm_object;
229
230
231 if (control == MEMORY_OBJECT_CONTROL_NULL)
232 return KERN_INVALID_ARGUMENT;
233
234 device_object = device_pager_lookup(mem_obj);
235
236 memory_object_control_reference(control);
237 device_object->control_handle = control;
238
239
240 /* The following settings should be done through an expanded change */
241 /* attributes call */
242
243 vm_object = (vm_object_t)memory_object_control_to_vm_object(control);
244 vm_object_lock(vm_object);
245 vm_object->private = TRUE;
246 if(device_object->flags & DEVICE_PAGER_CONTIGUOUS)
247 vm_object->phys_contiguous = TRUE;
248 if(device_object->flags & DEVICE_PAGER_NOPHYSCACHE)
249 vm_object->nophyscache = TRUE;
250
251 vm_object->wimg_bits = device_object->flags & VM_WIMG_MASK;
252 vm_object_unlock(vm_object);
253
254
255 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
256 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
257 attributes.cluster_size = (1 << (PAGE_SHIFT));
258 attributes.may_cache_object = FALSE;
259 attributes.temporary = TRUE;
260
261 kr = memory_object_change_attributes(
262 control,
263 MEMORY_OBJECT_ATTRIBUTE_INFO,
264 (memory_object_info_t) &attributes,
265 MEMORY_OBJECT_ATTR_INFO_COUNT);
266 if (kr != KERN_SUCCESS)
267 panic("device_pager_init: memory_object_change_attributes() failed");
268
269 return(KERN_SUCCESS);
270 }
271
272 /*
273 *
274 */
275 /*ARGSUSED6*/
276 kern_return_t
277 device_pager_data_return(
278 memory_object_t mem_obj,
279 memory_object_offset_t offset,
280 vm_size_t data_cnt,
281 __unused memory_object_offset_t *resid_offset,
282 __unused int *io_error,
283 __unused boolean_t dirty,
284 __unused boolean_t kernel_copy,
285 __unused int upl_flags)
286 {
287 device_pager_t device_object;
288
289 device_object = device_pager_lookup(mem_obj);
290 if (device_object == DEVICE_PAGER_NULL)
291 panic("device_pager_data_return: lookup failed");
292
293 return device_data_action(device_object->device_handle,
294 (ipc_port_t) device_object,
295 VM_PROT_READ | VM_PROT_WRITE,
296 offset, data_cnt);
297 }
298
299 /*
300 *
301 */
302 kern_return_t
303 device_pager_data_request(
304 memory_object_t mem_obj,
305 memory_object_offset_t offset,
306 vm_size_t length,
307 __unused vm_prot_t protection_required,
308 __unused memory_object_fault_info_t fault_info)
309 {
310 device_pager_t device_object;
311
312 device_object = device_pager_lookup(mem_obj);
313
314 if (device_object == DEVICE_PAGER_NULL)
315 panic("device_pager_data_request: lookup failed");
316
317 device_data_action(device_object->device_handle,
318 (ipc_port_t) device_object,
319 VM_PROT_READ, offset, length);
320 return KERN_SUCCESS;
321 }
322
323 /*
324 *
325 */
326 void
327 device_pager_reference(
328 memory_object_t mem_obj)
329 {
330 device_pager_t device_object;
331 unsigned int new_ref_count;
332
333 device_object = device_pager_lookup(mem_obj);
334 new_ref_count = hw_atomic_add(&device_object->ref_count, 1);
335 assert(new_ref_count > 1);
336 }
337
338 /*
339 *
340 */
341 void
342 device_pager_deallocate(
343 memory_object_t mem_obj)
344 {
345 device_pager_t device_object;
346 memory_object_control_t device_control;
347
348 device_object = device_pager_lookup(mem_obj);
349
350 if (hw_atomic_sub(&device_object->ref_count, 1) == 0) {
351 if (device_object->device_handle != (device_port_t) NULL) {
352 device_close(device_object->device_handle);
353 device_object->device_handle = (device_port_t) NULL;
354 }
355 device_control = device_object->control_handle;
356 if (device_control != MEMORY_OBJECT_CONTROL_NULL) {
357 /*
358 * The VM object should already have been disconnected
359 * from the pager at this point.
360 * We still have to release the "memory object control"
361 * handle.
362 */
363 assert(device_control->moc_object == VM_OBJECT_NULL);
364 memory_object_control_deallocate(device_control);
365 device_object->control_handle =
366 MEMORY_OBJECT_CONTROL_NULL;
367 }
368
369 zfree(device_pager_zone, device_object);
370 }
371 return;
372 }
373
374 kern_return_t
375 device_pager_data_initialize(
376 __unused memory_object_t mem_obj,
377 __unused memory_object_offset_t offset,
378 __unused vm_size_t data_cnt)
379 {
380 panic("device_pager_data_initialize");
381 return KERN_FAILURE;
382 }
383
384 kern_return_t
385 device_pager_data_unlock(
386 __unused memory_object_t mem_obj,
387 __unused memory_object_offset_t offset,
388 __unused vm_size_t size,
389 __unused vm_prot_t desired_access)
390 {
391 return KERN_FAILURE;
392 }
393
394 kern_return_t
395 device_pager_terminate(
396 __unused memory_object_t mem_obj)
397 {
398 return KERN_SUCCESS;
399 }
400
401
402
403 /*
404 *
405 */
406 kern_return_t
407 device_pager_synchronize(
408 memory_object_t mem_obj,
409 memory_object_offset_t offset,
410 vm_offset_t length,
411 __unused vm_sync_t sync_flags)
412 {
413 device_pager_t device_object;
414
415 device_object = device_pager_lookup(mem_obj);
416
417 memory_object_synchronize_completed(
418 device_object->control_handle, offset, length);
419
420 return KERN_SUCCESS;
421 }
422
423 /*
424 *
425 */
426 kern_return_t
427 device_pager_unmap(
428 __unused memory_object_t mem_obj)
429 {
430 return KERN_SUCCESS;
431 }
432
433
434
435 /*
436 *
437 */
438 device_pager_t
439 device_object_create(void)
440 {
441 register device_pager_t device_object;
442
443 device_object = (struct device_pager *) zalloc(device_pager_zone);
444 if (device_object == DEVICE_PAGER_NULL)
445 return(DEVICE_PAGER_NULL);
446 device_object->pager_ops = &device_pager_ops;
447 device_object->pager_ikot = IKOT_MEMORY_OBJECT;
448 device_object->ref_count = 1;
449 device_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
450
451
452 return(device_object);
453 }
454