]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/device_vm.c
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / vm / device_vm.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 #include <sys/errno.h>
32
33 #include <mach/mach_types.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <kern/kern_types.h>
41 #include <kern/ipc_kobject.h>
42 #include <kern/host.h>
43 #include <kern/thread.h>
44 #include <ipc/ipc_port.h>
45 #include <ipc/ipc_space.h>
46 #include <device/device_port.h>
47 #include <vm/memory_object.h>
48 #include <vm/vm_pageout.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_pageout.h>
52 #include <vm/vm_protos.h>
53
54
55 /* Device VM COMPONENT INTERFACES */
56
57
58 /*
59 * Device PAGER
60 */
61
62
63 /* until component support available */
64
65
66
67 /* until component support available */
68 const struct memory_object_pager_ops device_pager_ops = {
69 device_pager_reference,
70 device_pager_deallocate,
71 device_pager_init,
72 device_pager_terminate,
73 device_pager_data_request,
74 device_pager_data_return,
75 device_pager_data_initialize,
76 device_pager_data_unlock,
77 device_pager_synchronize,
78 device_pager_unmap,
79 "device pager"
80 };
81
82 typedef int device_port_t;
83
84 /*
85 * The start of "struct device_pager" MUST match a "struct memory_object".
86 */
87 typedef struct device_pager {
88 memory_object_pager_ops_t pager_ops; /* == &device_pager_ops */
89 unsigned int pager_ikot; /* fake ip_kotype() */
90 unsigned int ref_count; /* reference count */
91 memory_object_control_t control_handle; /* mem object's cntrl handle */
92 device_port_t device_handle; /* device_handle */
93 vm_size_t size;
94 int flags;
95 } *device_pager_t;
96
97
98
99
100 device_pager_t
101 device_pager_lookup( /* forward */
102 memory_object_t);
103
104 device_pager_t
105 device_object_create(void); /* forward */
106
107 zone_t device_pager_zone;
108
109
110 #define DEVICE_PAGER_NULL ((device_pager_t) 0)
111
112
113 #define MAX_DNODE 10000
114
115
116
117
118
119 /*
120 *
121 */
122 void
123 device_pager_bootstrap(void)
124 {
125 register vm_size_t size;
126
127 size = (vm_size_t) sizeof(struct device_pager);
128 device_pager_zone = zinit(size, (vm_size_t) MAX_DNODE*size,
129 PAGE_SIZE, "device node pager structures");
130
131 return;
132 }
133
134 /*
135 *
136 */
137 memory_object_t
138 device_pager_setup(
139 __unused memory_object_t device,
140 int device_handle,
141 vm_size_t size,
142 int flags)
143 {
144 device_pager_t device_object;
145
146 device_object = device_object_create();
147 if (device_object == DEVICE_PAGER_NULL)
148 panic("device_pager_setup: device_object_create() failed");
149
150 device_object->device_handle = device_handle;
151 device_object->size = size;
152 device_object->flags = flags;
153
154 return((memory_object_t)device_object);
155 }
156
157 /*
158 *
159 */
160 kern_return_t
161 device_pager_populate_object(
162 memory_object_t device,
163 memory_object_offset_t offset,
164 ppnum_t page_num,
165 vm_size_t size)
166 {
167 device_pager_t device_object;
168 vm_object_t vm_object;
169 kern_return_t kr;
170 upl_t upl;
171
172 device_object = device_pager_lookup(device);
173 if(device_object == DEVICE_PAGER_NULL)
174 return KERN_FAILURE;
175
176 vm_object = (vm_object_t)memory_object_control_to_vm_object(
177 device_object->control_handle);
178 if(vm_object == NULL)
179 return KERN_FAILURE;
180
181 kr = vm_object_populate_with_private(
182 vm_object, offset, page_num, size);
183 if(kr != KERN_SUCCESS)
184 return kr;
185
186 if(!vm_object->phys_contiguous) {
187 unsigned int null_size = 0;
188 kr = vm_object_upl_request(vm_object,
189 (vm_object_offset_t)offset, size, &upl, NULL,
190 &null_size, (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE));
191
192 if(kr != KERN_SUCCESS)
193 panic("device_pager_populate_object: list_req failed");
194
195 upl_commit(upl, NULL, 0);
196 upl_deallocate(upl);
197 }
198
199
200 return kr;
201 }
202
203 /*
204 *
205 */
206 device_pager_t
207 device_pager_lookup(
208 memory_object_t name)
209 {
210 device_pager_t device_object;
211
212 device_object = (device_pager_t)name;
213 assert(device_object->pager_ops == &device_pager_ops);
214 return (device_object);
215 }
216
217 /*
218 *
219 */
220 kern_return_t
221 device_pager_init(
222 memory_object_t mem_obj,
223 memory_object_control_t control,
224 __unused vm_size_t pg_size)
225 {
226 device_pager_t device_object;
227 kern_return_t kr;
228 memory_object_attr_info_data_t attributes;
229
230 vm_object_t vm_object;
231
232
233 if (control == MEMORY_OBJECT_CONTROL_NULL)
234 return KERN_INVALID_ARGUMENT;
235
236 device_object = device_pager_lookup(mem_obj);
237
238 memory_object_control_reference(control);
239 device_object->control_handle = control;
240
241
242 /* The following settings should be done through an expanded change */
243 /* attributes call */
244
245 vm_object = (vm_object_t)memory_object_control_to_vm_object(control);
246 vm_object_lock(vm_object);
247 vm_object->private = TRUE;
248 if(device_object->flags & DEVICE_PAGER_CONTIGUOUS)
249 vm_object->phys_contiguous = TRUE;
250 if(device_object->flags & DEVICE_PAGER_NOPHYSCACHE)
251 vm_object->nophyscache = TRUE;
252
253 vm_object->wimg_bits = device_object->flags & VM_WIMG_MASK;
254 vm_object_unlock(vm_object);
255
256
257 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
258 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
259 attributes.cluster_size = (1 << (PAGE_SHIFT));
260 attributes.may_cache_object = FALSE;
261 attributes.temporary = TRUE;
262
263 kr = memory_object_change_attributes(
264 control,
265 MEMORY_OBJECT_ATTRIBUTE_INFO,
266 (memory_object_info_t) &attributes,
267 MEMORY_OBJECT_ATTR_INFO_COUNT);
268 if (kr != KERN_SUCCESS)
269 panic("device_pager_init: memory_object_change_attributes() failed");
270
271 return(KERN_SUCCESS);
272 }
273
274 /*
275 *
276 */
277 /*ARGSUSED6*/
278 kern_return_t
279 device_pager_data_return(
280 memory_object_t mem_obj,
281 memory_object_offset_t offset,
282 vm_size_t data_cnt,
283 __unused memory_object_offset_t *resid_offset,
284 __unused int *io_error,
285 __unused boolean_t dirty,
286 __unused boolean_t kernel_copy,
287 __unused int upl_flags)
288 {
289 device_pager_t device_object;
290
291 device_object = device_pager_lookup(mem_obj);
292 if (device_object == DEVICE_PAGER_NULL)
293 panic("device_pager_data_return: lookup failed");
294
295 return device_data_action(device_object->device_handle,
296 (ipc_port_t) device_object,
297 VM_PROT_READ | VM_PROT_WRITE,
298 offset, data_cnt);
299 }
300
301 /*
302 *
303 */
304 kern_return_t
305 device_pager_data_request(
306 memory_object_t mem_obj,
307 memory_object_offset_t offset,
308 vm_size_t length,
309 __unused vm_prot_t protection_required)
310 {
311 device_pager_t device_object;
312
313 device_object = device_pager_lookup(mem_obj);
314
315 if (device_object == DEVICE_PAGER_NULL)
316 panic("device_pager_data_request: lookup failed");
317
318 device_data_action(device_object->device_handle,
319 (ipc_port_t) device_object,
320 VM_PROT_READ, offset, length);
321 return KERN_SUCCESS;
322 }
323
324 /*
325 *
326 */
327 void
328 device_pager_reference(
329 memory_object_t mem_obj)
330 {
331 device_pager_t device_object;
332 unsigned int new_ref_count;
333
334 device_object = device_pager_lookup(mem_obj);
335 new_ref_count = hw_atomic_add(&device_object->ref_count, 1);
336 assert(new_ref_count > 1);
337 }
338
339 /*
340 *
341 */
342 void
343 device_pager_deallocate(
344 memory_object_t mem_obj)
345 {
346 device_pager_t device_object;
347 memory_object_control_t device_control;
348
349 device_object = device_pager_lookup(mem_obj);
350
351 if (hw_atomic_sub(&device_object->ref_count, 1) == 0) {
352 if (device_object->device_handle != (device_port_t) NULL) {
353 device_close(device_object->device_handle);
354 device_object->device_handle = (device_port_t) NULL;
355 }
356 device_control = device_object->control_handle;
357 if (device_control != MEMORY_OBJECT_CONTROL_NULL) {
358 /*
359 * The VM object should already have been disconnected
360 * from the pager at this point.
361 * We still have to release the "memory object control"
362 * handle.
363 */
364 assert(device_control->moc_object == VM_OBJECT_NULL);
365 memory_object_control_deallocate(device_control);
366 device_object->control_handle =
367 MEMORY_OBJECT_CONTROL_NULL;
368 }
369
370 zfree(device_pager_zone, device_object);
371 }
372 return;
373 }
374
375 kern_return_t
376 device_pager_data_initialize(
377 __unused memory_object_t mem_obj,
378 __unused memory_object_offset_t offset,
379 __unused vm_size_t data_cnt)
380 {
381 panic("device_pager_data_initialize");
382 return KERN_FAILURE;
383 }
384
385 kern_return_t
386 device_pager_data_unlock(
387 __unused memory_object_t mem_obj,
388 __unused memory_object_offset_t offset,
389 __unused vm_size_t size,
390 __unused vm_prot_t desired_access)
391 {
392 return KERN_FAILURE;
393 }
394
395 kern_return_t
396 device_pager_terminate(
397 __unused memory_object_t mem_obj)
398 {
399 return KERN_SUCCESS;
400 }
401
402
403
404 /*
405 *
406 */
407 kern_return_t
408 device_pager_synchronize(
409 memory_object_t mem_obj,
410 memory_object_offset_t offset,
411 vm_offset_t length,
412 __unused vm_sync_t sync_flags)
413 {
414 device_pager_t device_object;
415
416 device_object = device_pager_lookup(mem_obj);
417
418 memory_object_synchronize_completed(
419 device_object->control_handle, offset, length);
420
421 return KERN_SUCCESS;
422 }
423
424 /*
425 *
426 */
427 kern_return_t
428 device_pager_unmap(
429 __unused memory_object_t mem_obj)
430 {
431 return KERN_SUCCESS;
432 }
433
434
435
436 /*
437 *
438 */
439 device_pager_t
440 device_object_create()
441 {
442 register device_pager_t device_object;
443
444 device_object = (struct device_pager *) zalloc(device_pager_zone);
445 if (device_object == DEVICE_PAGER_NULL)
446 return(DEVICE_PAGER_NULL);
447 device_object->pager_ops = &device_pager_ops;
448 device_object->pager_ikot = IKOT_MEMORY_OBJECT;
449 device_object->ref_count = 1;
450 device_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
451
452
453 return(device_object);
454 }
455