]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/device_vm.c
xnu-1228.9.59.tar.gz
[apple/xnu.git] / osfmk / vm / device_vm.c
CommitLineData
0b4e3aa0 1/*
2d21ac55 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
0b4e3aa0 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0b4e3aa0 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
0b4e3aa0
A
27 */
28
29#include <sys/errno.h>
91447636 30
0b4e3aa0 31#include <mach/mach_types.h>
0b4e3aa0 32#include <mach/kern_return.h>
91447636 33#include <mach/memory_object_control.h>
0b4e3aa0
A
34#include <mach/memory_object_types.h>
35#include <mach/port.h>
36#include <mach/policy.h>
91447636
A
37#include <mach/upl.h>
38#include <kern/kern_types.h>
39#include <kern/ipc_kobject.h>
40#include <kern/host.h>
41#include <kern/thread.h>
0b4e3aa0
A
42#include <ipc/ipc_port.h>
43#include <ipc/ipc_space.h>
0b4e3aa0 44#include <device/device_port.h>
91447636 45#include <vm/memory_object.h>
0b4e3aa0 46#include <vm/vm_pageout.h>
91447636
A
47#include <vm/vm_map.h>
48#include <vm/vm_kern.h>
49#include <vm/vm_pageout.h>
50#include <vm/vm_protos.h>
51
0b4e3aa0 52
0b4e3aa0
A
53/* Device VM COMPONENT INTERFACES */
54
55
56/*
57 * Device PAGER
58 */
59
60
61/* until component support available */
62
63
64
65/* until component support available */
0c530ab8
A
66const struct memory_object_pager_ops device_pager_ops = {
67 device_pager_reference,
68 device_pager_deallocate,
69 device_pager_init,
70 device_pager_terminate,
71 device_pager_data_request,
72 device_pager_data_return,
73 device_pager_data_initialize,
74 device_pager_data_unlock,
75 device_pager_synchronize,
593a1d5f
A
76 device_pager_map,
77 device_pager_last_unmap,
0c530ab8
A
78 "device pager"
79};
0b4e3aa0
A
80
81typedef int device_port_t;
82
0c530ab8
A
83/*
84 * The start of "struct device_pager" MUST match a "struct memory_object".
85 */
0b4e3aa0 86typedef struct device_pager {
0c530ab8 87 memory_object_pager_ops_t pager_ops; /* == &device_pager_ops */
0b4e3aa0
A
88 unsigned int pager_ikot; /* fake ip_kotype() */
89 unsigned int ref_count; /* reference count */
90 memory_object_control_t control_handle; /* mem object's cntrl handle */
91 device_port_t device_handle; /* device_handle */
92 vm_size_t size;
93 int flags;
94} *device_pager_t;
95
96
97
0b4e3aa0
A
98
99device_pager_t
91447636 100device_pager_lookup( /* forward */
0b4e3aa0
A
101 memory_object_t);
102
103device_pager_t
91447636 104device_object_create(void); /* forward */
0b4e3aa0
A
105
106zone_t device_pager_zone;
107
108
109#define DEVICE_PAGER_NULL ((device_pager_t) 0)
110
111
112#define MAX_DNODE 10000
113
114
115
116
117
118/*
119 *
120 */
121void
122device_pager_bootstrap(void)
123{
124 register vm_size_t size;
125
126 size = (vm_size_t) sizeof(struct device_pager);
127 device_pager_zone = zinit(size, (vm_size_t) MAX_DNODE*size,
128 PAGE_SIZE, "device node pager structures");
129
130 return;
131}
132
133/*
134 *
135 */
136memory_object_t
137device_pager_setup(
91447636 138 __unused memory_object_t device,
0b4e3aa0
A
139 int device_handle,
140 vm_size_t size,
141 int flags)
142{
143 device_pager_t device_object;
144
145 device_object = device_object_create();
146 if (device_object == DEVICE_PAGER_NULL)
147 panic("device_pager_setup: device_object_create() failed");
148
149 device_object->device_handle = device_handle;
150 device_object->size = size;
9bccf70c 151 device_object->flags = flags;
0b4e3aa0
A
152
153 return((memory_object_t)device_object);
154}
155
156/*
157 *
158 */
159kern_return_t
160device_pager_populate_object(
161 memory_object_t device,
162 memory_object_offset_t offset,
55e303ae 163 ppnum_t page_num,
0b4e3aa0
A
164 vm_size_t size)
165{
166 device_pager_t device_object;
167 vm_object_t vm_object;
168 kern_return_t kr;
169 upl_t upl;
0b4e3aa0
A
170
171 device_object = device_pager_lookup(device);
172 if(device_object == DEVICE_PAGER_NULL)
173 return KERN_FAILURE;
174
175 vm_object = (vm_object_t)memory_object_control_to_vm_object(
176 device_object->control_handle);
177 if(vm_object == NULL)
178 return KERN_FAILURE;
179
180 kr = vm_object_populate_with_private(
55e303ae 181 vm_object, offset, page_num, size);
0b4e3aa0
A
182 if(kr != KERN_SUCCESS)
183 return kr;
184
185 if(!vm_object->phys_contiguous) {
0c530ab8 186 unsigned int null_size = 0;
0b4e3aa0
A
187 kr = vm_object_upl_request(vm_object,
188 (vm_object_offset_t)offset, size, &upl, NULL,
189 &null_size, (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE));
190
191 if(kr != KERN_SUCCESS)
192 panic("device_pager_populate_object: list_req failed");
193
91447636 194 upl_commit(upl, NULL, 0);
0b4e3aa0
A
195 upl_deallocate(upl);
196 }
197
198
199 return kr;
200}
201
202/*
203 *
204 */
205device_pager_t
206device_pager_lookup(
207 memory_object_t name)
208{
209 device_pager_t device_object;
210
211 device_object = (device_pager_t)name;
0c530ab8 212 assert(device_object->pager_ops == &device_pager_ops);
0b4e3aa0
A
213 return (device_object);
214}
215
216/*
217 *
218 */
219kern_return_t
91447636
A
220device_pager_init(
221 memory_object_t mem_obj,
222 memory_object_control_t control,
223 __unused vm_size_t pg_size)
0b4e3aa0
A
224{
225 device_pager_t device_object;
226 kern_return_t kr;
227 memory_object_attr_info_data_t attributes;
228
229 vm_object_t vm_object;
230
231
232 if (control == MEMORY_OBJECT_CONTROL_NULL)
233 return KERN_INVALID_ARGUMENT;
234
235 device_object = device_pager_lookup(mem_obj);
236
237 memory_object_control_reference(control);
238 device_object->control_handle = control;
239
240
241/* The following settings should be done through an expanded change */
242/* attributes call */
243
244 vm_object = (vm_object_t)memory_object_control_to_vm_object(control);
245 vm_object_lock(vm_object);
246 vm_object->private = TRUE;
247 if(device_object->flags & DEVICE_PAGER_CONTIGUOUS)
248 vm_object->phys_contiguous = TRUE;
249 if(device_object->flags & DEVICE_PAGER_NOPHYSCACHE)
250 vm_object->nophyscache = TRUE;
9bccf70c
A
251
252 vm_object->wimg_bits = device_object->flags & VM_WIMG_MASK;
0b4e3aa0
A
253 vm_object_unlock(vm_object);
254
255
256 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
257 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
258 attributes.cluster_size = (1 << (PAGE_SHIFT));
259 attributes.may_cache_object = FALSE;
260 attributes.temporary = TRUE;
261
262 kr = memory_object_change_attributes(
263 control,
264 MEMORY_OBJECT_ATTRIBUTE_INFO,
265 (memory_object_info_t) &attributes,
266 MEMORY_OBJECT_ATTR_INFO_COUNT);
267 if (kr != KERN_SUCCESS)
268 panic("device_pager_init: memory_object_change_attributes() failed");
269
270 return(KERN_SUCCESS);
271}
272
273/*
274 *
275 */
91447636 276/*ARGSUSED6*/
0b4e3aa0
A
277kern_return_t
278device_pager_data_return(
0c530ab8
A
279 memory_object_t mem_obj,
280 memory_object_offset_t offset,
281 vm_size_t data_cnt,
282 __unused memory_object_offset_t *resid_offset,
283 __unused int *io_error,
91447636
A
284 __unused boolean_t dirty,
285 __unused boolean_t kernel_copy,
286 __unused int upl_flags)
0b4e3aa0
A
287{
288 device_pager_t device_object;
289
290 device_object = device_pager_lookup(mem_obj);
291 if (device_object == DEVICE_PAGER_NULL)
292 panic("device_pager_data_return: lookup failed");
293
91447636
A
294 return device_data_action(device_object->device_handle,
295 (ipc_port_t) device_object,
296 VM_PROT_READ | VM_PROT_WRITE,
297 offset, data_cnt);
0b4e3aa0
A
298}
299
300/*
301 *
302 */
303kern_return_t
304device_pager_data_request(
305 memory_object_t mem_obj,
306 memory_object_offset_t offset,
307 vm_size_t length,
2d21ac55
A
308 __unused vm_prot_t protection_required,
309 __unused memory_object_fault_info_t fault_info)
0b4e3aa0
A
310{
311 device_pager_t device_object;
312
313 device_object = device_pager_lookup(mem_obj);
314
315 if (device_object == DEVICE_PAGER_NULL)
316 panic("device_pager_data_request: lookup failed");
317
91447636
A
318 device_data_action(device_object->device_handle,
319 (ipc_port_t) device_object,
320 VM_PROT_READ, offset, length);
0b4e3aa0
A
321 return KERN_SUCCESS;
322}
323
324/*
325 *
326 */
327void
328device_pager_reference(
329 memory_object_t mem_obj)
330{
331 device_pager_t device_object;
9bccf70c 332 unsigned int new_ref_count;
0b4e3aa0
A
333
334 device_object = device_pager_lookup(mem_obj);
9bccf70c
A
335 new_ref_count = hw_atomic_add(&device_object->ref_count, 1);
336 assert(new_ref_count > 1);
0b4e3aa0
A
337}
338
339/*
340 *
341 */
342void
343device_pager_deallocate(
344 memory_object_t mem_obj)
345{
91447636
A
346 device_pager_t device_object;
347 memory_object_control_t device_control;
0b4e3aa0
A
348
349 device_object = device_pager_lookup(mem_obj);
350
9bccf70c 351 if (hw_atomic_sub(&device_object->ref_count, 1) == 0) {
0b4e3aa0
A
352 if (device_object->device_handle != (device_port_t) NULL) {
353 device_close(device_object->device_handle);
91447636 354 device_object->device_handle = (device_port_t) NULL;
0b4e3aa0 355 }
91447636
A
356 device_control = device_object->control_handle;
357 if (device_control != MEMORY_OBJECT_CONTROL_NULL) {
358 /*
359 * The VM object should already have been disconnected
360 * from the pager at this point.
361 * We still have to release the "memory object control"
362 * handle.
363 */
0c530ab8 364 assert(device_control->moc_object == VM_OBJECT_NULL);
91447636
A
365 memory_object_control_deallocate(device_control);
366 device_object->control_handle =
367 MEMORY_OBJECT_CONTROL_NULL;
368 }
369
370 zfree(device_pager_zone, device_object);
0b4e3aa0
A
371 }
372 return;
373}
374
375kern_return_t
376device_pager_data_initialize(
91447636
A
377 __unused memory_object_t mem_obj,
378 __unused memory_object_offset_t offset,
379 __unused vm_size_t data_cnt)
0b4e3aa0 380{
91447636 381 panic("device_pager_data_initialize");
0b4e3aa0
A
382 return KERN_FAILURE;
383}
384
385kern_return_t
386device_pager_data_unlock(
91447636
A
387 __unused memory_object_t mem_obj,
388 __unused memory_object_offset_t offset,
389 __unused vm_size_t size,
390 __unused vm_prot_t desired_access)
0b4e3aa0
A
391{
392 return KERN_FAILURE;
393}
394
91447636 395kern_return_t
0b4e3aa0 396device_pager_terminate(
91447636 397 __unused memory_object_t mem_obj)
0b4e3aa0
A
398{
399 return KERN_SUCCESS;
400}
401
402
403
404/*
405 *
406 */
407kern_return_t
408device_pager_synchronize(
409 memory_object_t mem_obj,
410 memory_object_offset_t offset,
411 vm_offset_t length,
91447636 412 __unused vm_sync_t sync_flags)
0b4e3aa0
A
413{
414 device_pager_t device_object;
415
416 device_object = device_pager_lookup(mem_obj);
417
418 memory_object_synchronize_completed(
419 device_object->control_handle, offset, length);
420
421 return KERN_SUCCESS;
422}
423
424/*
425 *
426 */
427kern_return_t
593a1d5f
A
428device_pager_map(
429 __unused memory_object_t mem_obj,
430 __unused vm_prot_t prot)
431{
432 return KERN_SUCCESS;
433}
434
435kern_return_t
436device_pager_last_unmap(
91447636 437 __unused memory_object_t mem_obj)
0b4e3aa0
A
438{
439 return KERN_SUCCESS;
440}
441
442
443
444/*
445 *
446 */
447device_pager_t
2d21ac55 448device_object_create(void)
0b4e3aa0
A
449{
450 register device_pager_t device_object;
451
452 device_object = (struct device_pager *) zalloc(device_pager_zone);
453 if (device_object == DEVICE_PAGER_NULL)
454 return(DEVICE_PAGER_NULL);
0c530ab8 455 device_object->pager_ops = &device_pager_ops;
0b4e3aa0
A
456 device_object->pager_ikot = IKOT_MEMORY_OBJECT;
457 device_object->ref_count = 1;
458 device_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
459
460
461 return(device_object);
462}
463