]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/device_vm.c
xnu-792.21.3.tar.gz
[apple/xnu.git] / osfmk / vm / device_vm.c
CommitLineData
0b4e3aa0 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
0b4e3aa0 3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0b4e3aa0 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
0b4e3aa0
A
27 */
28
29#include <sys/errno.h>
91447636 30
0b4e3aa0 31#include <mach/mach_types.h>
0b4e3aa0 32#include <mach/kern_return.h>
91447636 33#include <mach/memory_object_control.h>
0b4e3aa0
A
34#include <mach/memory_object_types.h>
35#include <mach/port.h>
36#include <mach/policy.h>
91447636
A
37#include <mach/upl.h>
38#include <kern/kern_types.h>
39#include <kern/ipc_kobject.h>
40#include <kern/host.h>
41#include <kern/thread.h>
0b4e3aa0
A
42#include <ipc/ipc_port.h>
43#include <ipc/ipc_space.h>
0b4e3aa0 44#include <device/device_port.h>
91447636 45#include <vm/memory_object.h>
0b4e3aa0 46#include <vm/vm_pageout.h>
91447636
A
47#include <vm/vm_map.h>
48#include <vm/vm_kern.h>
49#include <vm/vm_pageout.h>
50#include <vm/vm_protos.h>
51
0b4e3aa0 52
0b4e3aa0
A
53/* Device VM COMPONENT INTERFACES */
54
55
56/*
57 * Device PAGER
58 */
59
60
61/* until component support available */
62
63
64
65/* until component support available */
21362eb3 66int device_pager_workaround;
0b4e3aa0
A
67
68typedef int device_port_t;
69
70typedef struct device_pager {
21362eb3 71 int *pager; /* pager workaround pointer */
0b4e3aa0
A
72 unsigned int pager_ikot; /* fake ip_kotype() */
73 unsigned int ref_count; /* reference count */
74 memory_object_control_t control_handle; /* mem object's cntrl handle */
75 device_port_t device_handle; /* device_handle */
76 vm_size_t size;
77 int flags;
78} *device_pager_t;
79
80
81
0b4e3aa0
A
82
83device_pager_t
91447636 84device_pager_lookup( /* forward */
0b4e3aa0
A
85 memory_object_t);
86
87device_pager_t
91447636 88device_object_create(void); /* forward */
0b4e3aa0
A
89
90zone_t device_pager_zone;
91
92
93#define DEVICE_PAGER_NULL ((device_pager_t) 0)
94
95
96#define MAX_DNODE 10000
97
98
99
100
101
102/*
103 *
104 */
105void
106device_pager_bootstrap(void)
107{
108 register vm_size_t size;
109
110 size = (vm_size_t) sizeof(struct device_pager);
111 device_pager_zone = zinit(size, (vm_size_t) MAX_DNODE*size,
112 PAGE_SIZE, "device node pager structures");
113
114 return;
115}
116
117/*
118 *
119 */
120memory_object_t
121device_pager_setup(
91447636 122 __unused memory_object_t device,
0b4e3aa0
A
123 int device_handle,
124 vm_size_t size,
125 int flags)
126{
127 device_pager_t device_object;
128
129 device_object = device_object_create();
130 if (device_object == DEVICE_PAGER_NULL)
131 panic("device_pager_setup: device_object_create() failed");
132
133 device_object->device_handle = device_handle;
134 device_object->size = size;
9bccf70c 135 device_object->flags = flags;
0b4e3aa0
A
136
137 return((memory_object_t)device_object);
138}
139
140/*
141 *
142 */
143kern_return_t
144device_pager_populate_object(
145 memory_object_t device,
146 memory_object_offset_t offset,
55e303ae 147 ppnum_t page_num,
0b4e3aa0
A
148 vm_size_t size)
149{
150 device_pager_t device_object;
151 vm_object_t vm_object;
152 kern_return_t kr;
153 upl_t upl;
0b4e3aa0
A
154
155 device_object = device_pager_lookup(device);
156 if(device_object == DEVICE_PAGER_NULL)
157 return KERN_FAILURE;
158
159 vm_object = (vm_object_t)memory_object_control_to_vm_object(
160 device_object->control_handle);
161 if(vm_object == NULL)
162 return KERN_FAILURE;
163
164 kr = vm_object_populate_with_private(
55e303ae 165 vm_object, offset, page_num, size);
0b4e3aa0
A
166 if(kr != KERN_SUCCESS)
167 return kr;
168
169 if(!vm_object->phys_contiguous) {
21362eb3 170 int null_size = 0;
0b4e3aa0
A
171 kr = vm_object_upl_request(vm_object,
172 (vm_object_offset_t)offset, size, &upl, NULL,
173 &null_size, (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE));
174
175 if(kr != KERN_SUCCESS)
176 panic("device_pager_populate_object: list_req failed");
177
91447636 178 upl_commit(upl, NULL, 0);
0b4e3aa0
A
179 upl_deallocate(upl);
180 }
181
182
183 return kr;
184}
185
186/*
187 *
188 */
189device_pager_t
190device_pager_lookup(
191 memory_object_t name)
192{
193 device_pager_t device_object;
194
195 device_object = (device_pager_t)name;
21362eb3 196 assert(device_object->pager == &device_pager_workaround);
0b4e3aa0
A
197 return (device_object);
198}
199
200/*
201 *
202 */
203kern_return_t
91447636
A
204device_pager_init(
205 memory_object_t mem_obj,
206 memory_object_control_t control,
207 __unused vm_size_t pg_size)
0b4e3aa0
A
208{
209 device_pager_t device_object;
210 kern_return_t kr;
211 memory_object_attr_info_data_t attributes;
212
213 vm_object_t vm_object;
214
215
216 if (control == MEMORY_OBJECT_CONTROL_NULL)
217 return KERN_INVALID_ARGUMENT;
218
219 device_object = device_pager_lookup(mem_obj);
220
221 memory_object_control_reference(control);
222 device_object->control_handle = control;
223
224
225/* The following settings should be done through an expanded change */
226/* attributes call */
227
228 vm_object = (vm_object_t)memory_object_control_to_vm_object(control);
229 vm_object_lock(vm_object);
230 vm_object->private = TRUE;
231 if(device_object->flags & DEVICE_PAGER_CONTIGUOUS)
232 vm_object->phys_contiguous = TRUE;
233 if(device_object->flags & DEVICE_PAGER_NOPHYSCACHE)
234 vm_object->nophyscache = TRUE;
9bccf70c
A
235
236 vm_object->wimg_bits = device_object->flags & VM_WIMG_MASK;
0b4e3aa0
A
237 vm_object_unlock(vm_object);
238
239
240 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
241 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
242 attributes.cluster_size = (1 << (PAGE_SHIFT));
243 attributes.may_cache_object = FALSE;
244 attributes.temporary = TRUE;
245
246 kr = memory_object_change_attributes(
247 control,
248 MEMORY_OBJECT_ATTRIBUTE_INFO,
249 (memory_object_info_t) &attributes,
250 MEMORY_OBJECT_ATTR_INFO_COUNT);
251 if (kr != KERN_SUCCESS)
252 panic("device_pager_init: memory_object_change_attributes() failed");
253
254 return(KERN_SUCCESS);
255}
256
257/*
258 *
259 */
91447636 260/*ARGSUSED6*/
0b4e3aa0
A
261kern_return_t
262device_pager_data_return(
21362eb3
A
263 memory_object_t mem_obj,
264 memory_object_offset_t offset,
265 vm_size_t data_cnt,
91447636
A
266 __unused boolean_t dirty,
267 __unused boolean_t kernel_copy,
268 __unused int upl_flags)
0b4e3aa0
A
269{
270 device_pager_t device_object;
271
272 device_object = device_pager_lookup(mem_obj);
273 if (device_object == DEVICE_PAGER_NULL)
274 panic("device_pager_data_return: lookup failed");
275
91447636
A
276 return device_data_action(device_object->device_handle,
277 (ipc_port_t) device_object,
278 VM_PROT_READ | VM_PROT_WRITE,
279 offset, data_cnt);
0b4e3aa0
A
280}
281
282/*
283 *
284 */
285kern_return_t
286device_pager_data_request(
287 memory_object_t mem_obj,
288 memory_object_offset_t offset,
289 vm_size_t length,
91447636 290 __unused vm_prot_t protection_required)
0b4e3aa0
A
291{
292 device_pager_t device_object;
293
294 device_object = device_pager_lookup(mem_obj);
295
296 if (device_object == DEVICE_PAGER_NULL)
297 panic("device_pager_data_request: lookup failed");
298
91447636
A
299 device_data_action(device_object->device_handle,
300 (ipc_port_t) device_object,
301 VM_PROT_READ, offset, length);
0b4e3aa0
A
302 return KERN_SUCCESS;
303}
304
305/*
306 *
307 */
308void
309device_pager_reference(
310 memory_object_t mem_obj)
311{
312 device_pager_t device_object;
9bccf70c 313 unsigned int new_ref_count;
0b4e3aa0
A
314
315 device_object = device_pager_lookup(mem_obj);
9bccf70c
A
316 new_ref_count = hw_atomic_add(&device_object->ref_count, 1);
317 assert(new_ref_count > 1);
0b4e3aa0
A
318}
319
320/*
321 *
322 */
323void
324device_pager_deallocate(
325 memory_object_t mem_obj)
326{
91447636
A
327 device_pager_t device_object;
328 memory_object_control_t device_control;
0b4e3aa0
A
329
330 device_object = device_pager_lookup(mem_obj);
331
9bccf70c 332 if (hw_atomic_sub(&device_object->ref_count, 1) == 0) {
0b4e3aa0
A
333 if (device_object->device_handle != (device_port_t) NULL) {
334 device_close(device_object->device_handle);
91447636 335 device_object->device_handle = (device_port_t) NULL;
0b4e3aa0 336 }
91447636
A
337 device_control = device_object->control_handle;
338 if (device_control != MEMORY_OBJECT_CONTROL_NULL) {
339 /*
340 * The VM object should already have been disconnected
341 * from the pager at this point.
342 * We still have to release the "memory object control"
343 * handle.
344 */
21362eb3 345 assert(device_control->object == VM_OBJECT_NULL);
91447636
A
346 memory_object_control_deallocate(device_control);
347 device_object->control_handle =
348 MEMORY_OBJECT_CONTROL_NULL;
349 }
350
351 zfree(device_pager_zone, device_object);
0b4e3aa0
A
352 }
353 return;
354}
355
356kern_return_t
357device_pager_data_initialize(
91447636
A
358 __unused memory_object_t mem_obj,
359 __unused memory_object_offset_t offset,
360 __unused vm_size_t data_cnt)
0b4e3aa0 361{
91447636 362 panic("device_pager_data_initialize");
0b4e3aa0
A
363 return KERN_FAILURE;
364}
365
366kern_return_t
367device_pager_data_unlock(
91447636
A
368 __unused memory_object_t mem_obj,
369 __unused memory_object_offset_t offset,
370 __unused vm_size_t size,
371 __unused vm_prot_t desired_access)
0b4e3aa0
A
372{
373 return KERN_FAILURE;
374}
375
91447636 376kern_return_t
0b4e3aa0 377device_pager_terminate(
91447636 378 __unused memory_object_t mem_obj)
0b4e3aa0
A
379{
380 return KERN_SUCCESS;
381}
382
383
384
385/*
386 *
387 */
388kern_return_t
389device_pager_synchronize(
390 memory_object_t mem_obj,
391 memory_object_offset_t offset,
392 vm_offset_t length,
91447636 393 __unused vm_sync_t sync_flags)
0b4e3aa0
A
394{
395 device_pager_t device_object;
396
397 device_object = device_pager_lookup(mem_obj);
398
399 memory_object_synchronize_completed(
400 device_object->control_handle, offset, length);
401
402 return KERN_SUCCESS;
403}
404
405/*
406 *
407 */
408kern_return_t
409device_pager_unmap(
91447636 410 __unused memory_object_t mem_obj)
0b4e3aa0
A
411{
412 return KERN_SUCCESS;
413}
414
415
416
417/*
418 *
419 */
420device_pager_t
421device_object_create()
422{
423 register device_pager_t device_object;
424
425 device_object = (struct device_pager *) zalloc(device_pager_zone);
426 if (device_object == DEVICE_PAGER_NULL)
427 return(DEVICE_PAGER_NULL);
21362eb3 428 device_object->pager = &device_pager_workaround;
0b4e3aa0
A
429 device_object->pager_ikot = IKOT_MEMORY_OBJECT;
430 device_object->ref_count = 1;
431 device_object->control_handle = MEMORY_OBJECT_CONTROL_NULL;
432
433
434 return(device_object);
435}
436