]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/device_vm.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / osfmk / vm / device_vm.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/kern_return.h>
33 #include <mach/memory_object_control.h>
34 #include <mach/memory_object_types.h>
35 #include <mach/port.h>
36 #include <mach/policy.h>
37 #include <mach/upl.h>
38 #include <kern/kern_types.h>
39 #include <kern/ipc_kobject.h>
40 #include <kern/host.h>
41 #include <kern/thread.h>
42 #include <ipc/ipc_port.h>
43 #include <ipc/ipc_space.h>
44 #include <device/device_port.h>
45 #include <vm/memory_object.h>
46 #include <vm/vm_pageout.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_kern.h>
49 #include <vm/vm_pageout.h>
50 #include <vm/vm_protos.h>
51
52
53 /* Device VM COMPONENT INTERFACES */
54
55
56 /*
57 * Device PAGER
58 */
59
60
61 /* until component support available */
62
63
64
65 /* until component support available */
66 const struct memory_object_pager_ops device_pager_ops = {
67 device_pager_reference,
68 device_pager_deallocate,
69 device_pager_init,
70 device_pager_terminate,
71 device_pager_data_request,
72 device_pager_data_return,
73 device_pager_data_initialize,
74 device_pager_data_unlock,
75 device_pager_synchronize,
76 device_pager_map,
77 device_pager_last_unmap,
78 "device pager"
79 };
80
81 typedef uintptr_t device_port_t;
82
83 /*
84 * The start of "struct device_pager" MUST match a "struct memory_object".
85 */
86 typedef struct device_pager {
87 struct ipc_object_header pager_header; /* fake ip_kotype() */
88 memory_object_pager_ops_t pager_ops; /* == &device_pager_ops */
89 unsigned int ref_count; /* reference count */
90 memory_object_control_t control_handle; /* mem object's cntrl handle */
91 device_port_t device_handle; /* device_handle */
92 vm_size_t size;
93 int flags;
94 } *device_pager_t;
95
96 #define pager_ikot pager_header.io_bits
97
98
99 device_pager_t
100 device_pager_lookup( /* forward */
101 memory_object_t);
102
103 device_pager_t
104 device_object_create(void); /* forward */
105
106 zone_t device_pager_zone;
107
108
109 #define DEVICE_PAGER_NULL ((device_pager_t) 0)
110
111
112 #define MAX_DNODE 10000
113
114
115
116
117
118 /*
119 *
120 */
121 void
122 device_pager_bootstrap(void)
123 {
124 register vm_size_t size;
125
126 size = (vm_size_t) sizeof(struct device_pager);
127 device_pager_zone = zinit(size, (vm_size_t) MAX_DNODE*size,
128 PAGE_SIZE, "device node pager structures");
129
130 return;
131 }
132
133 /*
134 *
135 */
136 memory_object_t
137 device_pager_setup(
138 __unused memory_object_t device,
139 uintptr_t device_handle,
140 vm_size_t size,
141 int flags)
142 {
143 device_pager_t device_object;
144
145 device_object = device_object_create();
146 if (device_object == DEVICE_PAGER_NULL)
147 panic("device_pager_setup: device_object_create() failed");
148
149 device_object->device_handle = device_handle;
150 device_object->size = size;
151 device_object->flags = flags;
152
153 return((memory_object_t)device_object);
154 }
155
156 /*
157 *
158 */
159 kern_return_t
160 device_pager_populate_object(
161 memory_object_t device,
162 memory_object_offset_t offset,
163 ppnum_t page_num,
164 vm_size_t size)
165 {
166 device_pager_t device_object;
167 vm_object_t vm_object;
168 kern_return_t kr;
169 upl_t upl;
170
171 device_object = device_pager_lookup(device);
172 if(device_object == DEVICE_PAGER_NULL)
173 return KERN_FAILURE;
174
175 vm_object = (vm_object_t)memory_object_control_to_vm_object(
176 device_object->control_handle);
177 if(vm_object == NULL)
178 return KERN_FAILURE;
179
180 kr = vm_object_populate_with_private(
181 vm_object, offset, page_num, size);
182 if(kr != KERN_SUCCESS)
183 return kr;
184
185 if(!vm_object->phys_contiguous) {
186 unsigned int null_size = 0;
187 assert((upl_size_t) size == size);
188 kr = vm_object_upl_request(vm_object,
189 (vm_object_offset_t)offset,
190 (upl_size_t) size, &upl, NULL,
191 &null_size,
192 (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE));
193 if(kr != KERN_SUCCESS)
194 panic("device_pager_populate_object: list_req failed");
195
196 upl_commit(upl, NULL, 0);
197 upl_deallocate(upl);
198 }
199
200
201 return kr;
202 }
203
204 /*
205 *
206 */
207 device_pager_t
208 device_pager_lookup(
209 memory_object_t name)
210 {
211 device_pager_t device_object;
212
213 device_object = (device_pager_t)name;
214 assert(device_object->pager_ops == &device_pager_ops);
215 return (device_object);
216 }
217
218 /*
219 *
220 */
221 kern_return_t
222 device_pager_init(
223 memory_object_t mem_obj,
224 memory_object_control_t control,
225 __unused memory_object_cluster_size_t pg_size)
226 {
227 device_pager_t device_object;
228 kern_return_t kr;
229 memory_object_attr_info_data_t attributes;
230
231 vm_object_t vm_object;
232
233
234 if (control == MEMORY_OBJECT_CONTROL_NULL)
235 return KERN_INVALID_ARGUMENT;
236
237 device_object = device_pager_lookup(mem_obj);
238
239 memory_object_control_reference(control);
240 device_object->control_handle = control;
241
242
243 /* The following settings should be done through an expanded change */
244 /* attributes call */
245
246 vm_object = (vm_object_t)memory_object_control_to_vm_object(control);
247 vm_object_lock(vm_object);
248 vm_object->private = TRUE;
249 if(device_object->flags & DEVICE_PAGER_CONTIGUOUS)
250 vm_object->phys_contiguous = TRUE;
251 if(device_object->flags & DEVICE_PAGER_NOPHYSCACHE)
252 vm_object->nophyscache = TRUE;
253
254 vm_object->wimg_bits = device_object->flags & VM_WIMG_MASK;
255 vm_object_unlock(vm_object);
256
257
258 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
259 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
260 attributes.cluster_size = (1 << (PAGE_SHIFT));
261 attributes.may_cache_object = FALSE;
262 attributes.temporary = TRUE;
263
264 kr = memory_object_change_attributes(
265 control,
266 MEMORY_OBJECT_ATTRIBUTE_INFO,
267 (memory_object_info_t) &attributes,
268 MEMORY_OBJECT_ATTR_INFO_COUNT);
269 if (kr != KERN_SUCCESS)
270 panic("device_pager_init: memory_object_change_attributes() failed");
271
272 return(KERN_SUCCESS);
273 }
274
275 /*
276 *
277 */
278 /*ARGSUSED6*/
279 kern_return_t
280 device_pager_data_return(
281 memory_object_t mem_obj,
282 memory_object_offset_t offset,
283 memory_object_cluster_size_t data_cnt,
284 __unused memory_object_offset_t *resid_offset,
285 __unused int *io_error,
286 __unused boolean_t dirty,
287 __unused boolean_t kernel_copy,
288 __unused int upl_flags)
289 {
290 device_pager_t device_object;
291
292 device_object = device_pager_lookup(mem_obj);
293 if (device_object == DEVICE_PAGER_NULL)
294 panic("device_pager_data_return: lookup failed");
295
296 return device_data_action(device_object->device_handle,
297 (ipc_port_t) device_object,
298 VM_PROT_READ | VM_PROT_WRITE,
299 offset, data_cnt);
300 }
301
302 /*
303 *
304 */
305 kern_return_t
306 device_pager_data_request(
307 memory_object_t mem_obj,
308 memory_object_offset_t offset,
309 memory_object_cluster_size_t length,
310 __unused vm_prot_t protection_required,
311 __unused memory_object_fault_info_t fault_info)
312 {
313 device_pager_t device_object;
314
315 device_object = device_pager_lookup(mem_obj);
316
317 if (device_object == DEVICE_PAGER_NULL)
318 panic("device_pager_data_request: lookup failed");
319
320 device_data_action(device_object->device_handle,
321 (ipc_port_t) device_object,
322 VM_PROT_READ, offset, length);
323 return KERN_SUCCESS;
324 }
325
326 /*
327 *
328 */
329 void
330 device_pager_reference(
331 memory_object_t mem_obj)
332 {
333 device_pager_t device_object;
334 unsigned int new_ref_count;
335
336 device_object = device_pager_lookup(mem_obj);
337 new_ref_count = hw_atomic_add(&device_object->ref_count, 1);
338 assert(new_ref_count > 1);
339 }
340
341 /*
342 *
343 */
344 void
345 device_pager_deallocate(
346 memory_object_t mem_obj)
347 {
348 device_pager_t device_object;
349 memory_object_control_t device_control;
350
351 device_object = device_pager_lookup(mem_obj);
352
353 if (hw_atomic_sub(&device_object->ref_count, 1) == 0) {
354 if (device_object->device_handle != (device_port_t) NULL) {
355 device_close(device_object->device_handle);
356 device_object->device_handle = (device_port_t) NULL;
357 }
358 device_control = device_object->control_handle;
359 if (device_control != MEMORY_OBJECT_CONTROL_NULL) {
360 /*
361 * The VM object should already have been disconnected
362 * from the pager at this point.
363 * We still have to release the "memory object control"
364 * handle.
365 */
366 assert(device_control->moc_object == VM_OBJECT_NULL);
367 memory_object_control_deallocate(device_control);
368 device_object->control_handle =
369 MEMORY_OBJECT_CONTROL_NULL;
370 }
371
372 zfree(device_pager_zone, device_object);
373 }
374 return;
375 }
376
377 kern_return_t
378 device_pager_data_initialize(
379 __unused memory_object_t mem_obj,
380 __unused memory_object_offset_t offset,
381 __unused memory_object_cluster_size_t data_cnt)
382 {
383 panic("device_pager_data_initialize");
384 return KERN_FAILURE;
385 }
386
387 kern_return_t
388 device_pager_data_unlock(
389 __unused memory_object_t mem_obj,
390 __unused memory_object_offset_t offset,
391 __unused memory_object_size_t size,
392 __unused vm_prot_t desired_access)
393 {
394 return KERN_FAILURE;
395 }
396
397 kern_return_t
398 device_pager_terminate(
399 __unused memory_object_t mem_obj)
400 {
401 return KERN_SUCCESS;
402 }
403
404
405
406 /*
407 *
408 */
409 kern_return_t
410 device_pager_synchronize(
411 memory_object_t mem_obj,
412 memory_object_offset_t offset,
413 memory_object_size_t length,
414 __unused vm_sync_t sync_flags)
415 {
416 device_pager_t device_object;
417
418 device_object = device_pager_lookup(mem_obj);
419
420 memory_object_synchronize_completed(
421 device_object->control_handle, offset, length);
422
423 return KERN_SUCCESS;
424 }
425
426 /*
427 *
428 */
429 kern_return_t
430 device_pager_map(
431 __unused memory_object_t mem_obj,
432 __unused vm_prot_t prot)
433 {
434 return KERN_SUCCESS;
435 }
436
437 kern_return_t
438 device_pager_last_unmap(
439 __unused memory_object_t mem_obj)
440 {
441 return KERN_SUCCESS;
442 }
443
444
445
446 /*
447 *
448 */
449 device_pager_t
450 device_object_create(void)
451 {
452 register device_pager_t device_object;
453
454 device_object = (struct device_pager *) zalloc(device_pager_zone);
455 if (device_object == DEVICE_PAGER_NULL)
456 return(DEVICE_PAGER_NULL);
457 device_object->pager_ops = &device_pager_ops;
458 device_object->pager_ikot = IKOT_MEMORY_OBJECT;
459 device_object->ref_count = 1;
460 device_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
461
462
463 return(device_object);
464 }
465