]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/device_vm.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / osfmk / vm / device_vm.c
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/kern_return.h>
33 #include <mach/memory_object_control.h>
34 #include <mach/memory_object_types.h>
35 #include <mach/port.h>
36 #include <mach/policy.h>
37 #include <mach/upl.h>
38 #include <kern/kern_types.h>
39 #include <kern/ipc_kobject.h>
40 #include <kern/host.h>
41 #include <kern/thread.h>
42 #include <ipc/ipc_port.h>
43 #include <ipc/ipc_space.h>
44 #include <device/device_port.h>
45 #include <vm/memory_object.h>
46 #include <vm/vm_pageout.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_kern.h>
49 #include <vm/vm_pageout.h>
50 #include <vm/vm_protos.h>
51 #include <mach/sdt.h>
52 #include <os/refcnt.h>
53
54
55 /* Device VM COMPONENT INTERFACES */
56
57
58 /*
59 * Device PAGER
60 */
61
62
63 /* until component support available */
64
65
66
67 /* until component support available */
68 const struct memory_object_pager_ops device_pager_ops = {
69 .memory_object_reference = device_pager_reference,
70 .memory_object_deallocate = device_pager_deallocate,
71 .memory_object_init = device_pager_init,
72 .memory_object_terminate = device_pager_terminate,
73 .memory_object_data_request = device_pager_data_request,
74 .memory_object_data_return = device_pager_data_return,
75 .memory_object_data_initialize = device_pager_data_initialize,
76 .memory_object_data_unlock = device_pager_data_unlock,
77 .memory_object_synchronize = device_pager_synchronize,
78 .memory_object_map = device_pager_map,
79 .memory_object_last_unmap = device_pager_last_unmap,
80 .memory_object_data_reclaim = NULL,
81 .memory_object_pager_name = "device pager"
82 };
83
84 typedef uintptr_t device_port_t;
85
86 /*
87 * The start of "struct device_pager" MUST match a "struct memory_object".
88 */
89 typedef struct device_pager {
90 /* mandatory generic header */
91 struct memory_object dev_pgr_hdr;
92
93 /* pager-specific data */
94 lck_mtx_t lock;
95 struct os_refcnt ref_count; /* reference count */
96 device_port_t device_handle; /* device_handle */
97 vm_size_t size;
98 int flags;
99 boolean_t is_mapped;
100 } *device_pager_t;
101
102 LCK_GRP_DECLARE(device_pager_lck_grp, "device_pager");
103
104 ZONE_DECLARE(device_pager_zone, "device node pager structures",
105 sizeof(struct device_pager), ZC_NONE);
106
107 #define device_pager_lock_init(pager) \
108 lck_mtx_init(&(pager)->lock, &device_pager_lck_grp, LCK_ATTR_NULL)
109 #define device_pager_lock_destroy(pager) \
110 lck_mtx_destroy(&(pager)->lock, &device_pager_lck_grp)
111 #define device_pager_lock(pager) lck_mtx_lock(&(pager)->lock)
112 #define device_pager_unlock(pager) lck_mtx_unlock(&(pager)->lock)
113
114 device_pager_t
115 device_pager_lookup( /* forward */
116 memory_object_t);
117
118 device_pager_t
119 device_object_create(void); /* forward */
120
121 #define DEVICE_PAGER_NULL ((device_pager_t) 0)
122
123 #define MAX_DNODE 10000
124
125
126 /*
127 *
128 */
129 memory_object_t
130 device_pager_setup(
131 __unused memory_object_t device,
132 uintptr_t device_handle,
133 vm_size_t size,
134 int flags)
135 {
136 device_pager_t device_object;
137 memory_object_control_t control;
138 vm_object_t object;
139
140 device_object = device_object_create();
141 if (device_object == DEVICE_PAGER_NULL) {
142 panic("device_pager_setup: device_object_create() failed");
143 }
144
145 device_object->device_handle = device_handle;
146 device_object->size = size;
147 device_object->flags = flags;
148
149 memory_object_create_named((memory_object_t) device_object,
150 size,
151 &control);
152 object = memory_object_control_to_vm_object(control);
153
154 memory_object_mark_trusted(control);
155
156 assert(object != VM_OBJECT_NULL);
157 vm_object_lock(object);
158 object->true_share = TRUE;
159 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
160 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
161 }
162 vm_object_unlock(object);
163
164 return (memory_object_t)device_object;
165 }
166
167 /*
168 *
169 */
170 kern_return_t
171 device_pager_populate_object(
172 memory_object_t device,
173 memory_object_offset_t offset,
174 ppnum_t page_num,
175 vm_size_t size)
176 {
177 device_pager_t device_object;
178 vm_object_t vm_object;
179 kern_return_t kr;
180 upl_t upl;
181
182 device_object = device_pager_lookup(device);
183 if (device_object == DEVICE_PAGER_NULL) {
184 return KERN_FAILURE;
185 }
186
187 vm_object = (vm_object_t)memory_object_control_to_vm_object(
188 device_object->dev_pgr_hdr.mo_control);
189 if (vm_object == NULL) {
190 return KERN_FAILURE;
191 }
192
193 kr = vm_object_populate_with_private(
194 vm_object, offset, page_num, size);
195 if (kr != KERN_SUCCESS) {
196 return kr;
197 }
198
199 if (!vm_object->phys_contiguous) {
200 unsigned int null_size = 0;
201 assert((upl_size_t) size == size);
202 kr = vm_object_upl_request(vm_object,
203 (vm_object_offset_t)offset,
204 (upl_size_t) size, &upl, NULL,
205 &null_size,
206 (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE),
207 VM_KERN_MEMORY_NONE);
208 if (kr != KERN_SUCCESS) {
209 panic("device_pager_populate_object: list_req failed");
210 }
211
212 upl_commit(upl, NULL, 0);
213 upl_deallocate(upl);
214 }
215
216
217 return kr;
218 }
219
220 /*
221 *
222 */
223 device_pager_t
224 device_pager_lookup(
225 memory_object_t mem_obj)
226 {
227 device_pager_t device_object;
228
229 assert(mem_obj->mo_pager_ops == &device_pager_ops);
230 device_object = (device_pager_t)mem_obj;
231 assert(os_ref_get_count(&device_object->ref_count) > 0);
232 return device_object;
233 }
234
235 /*
236 *
237 */
238 kern_return_t
239 device_pager_init(
240 memory_object_t mem_obj,
241 memory_object_control_t control,
242 __unused memory_object_cluster_size_t pg_size)
243 {
244 device_pager_t device_object;
245 kern_return_t kr;
246 memory_object_attr_info_data_t attributes;
247
248 vm_object_t vm_object;
249
250
251 if (control == MEMORY_OBJECT_CONTROL_NULL) {
252 return KERN_INVALID_ARGUMENT;
253 }
254
255 device_object = device_pager_lookup(mem_obj);
256
257 memory_object_control_reference(control);
258 device_object->dev_pgr_hdr.mo_control = control;
259
260
261 /* The following settings should be done through an expanded change */
262 /* attributes call */
263
264 vm_object = (vm_object_t)memory_object_control_to_vm_object(control);
265 vm_object_lock(vm_object);
266 vm_object->private = TRUE;
267 if (device_object->flags & DEVICE_PAGER_CONTIGUOUS) {
268 vm_object->phys_contiguous = TRUE;
269 }
270 if (device_object->flags & DEVICE_PAGER_NOPHYSCACHE) {
271 vm_object->nophyscache = TRUE;
272 }
273
274 vm_object->wimg_bits = device_object->flags & VM_WIMG_MASK;
275 vm_object_unlock(vm_object);
276
277
278 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
279 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
280 attributes.cluster_size = (1 << (PAGE_SHIFT));
281 attributes.may_cache_object = FALSE;
282 attributes.temporary = TRUE;
283
284 kr = memory_object_change_attributes(
285 control,
286 MEMORY_OBJECT_ATTRIBUTE_INFO,
287 (memory_object_info_t) &attributes,
288 MEMORY_OBJECT_ATTR_INFO_COUNT);
289 if (kr != KERN_SUCCESS) {
290 panic("device_pager_init: memory_object_change_attributes() failed");
291 }
292
293 return KERN_SUCCESS;
294 }
295
296 /*
297 *
298 */
299 /*ARGSUSED6*/
300 kern_return_t
301 device_pager_data_return(
302 memory_object_t mem_obj,
303 memory_object_offset_t offset,
304 memory_object_cluster_size_t data_cnt,
305 __unused memory_object_offset_t *resid_offset,
306 __unused int *io_error,
307 __unused boolean_t dirty,
308 __unused boolean_t kernel_copy,
309 __unused int upl_flags)
310 {
311 device_pager_t device_object;
312
313 device_object = device_pager_lookup(mem_obj);
314 if (device_object == DEVICE_PAGER_NULL) {
315 panic("device_pager_data_return: lookup failed");
316 }
317
318 __IGNORE_WCASTALIGN(return device_data_action(device_object->device_handle,
319 (ipc_port_t) device_object,
320 VM_PROT_READ | VM_PROT_WRITE,
321 offset, data_cnt));
322 }
323
324 /*
325 *
326 */
327 kern_return_t
328 device_pager_data_request(
329 memory_object_t mem_obj,
330 memory_object_offset_t offset,
331 memory_object_cluster_size_t length,
332 __unused vm_prot_t protection_required,
333 __unused memory_object_fault_info_t fault_info)
334 {
335 device_pager_t device_object;
336
337 device_object = device_pager_lookup(mem_obj);
338
339 if (device_object == DEVICE_PAGER_NULL) {
340 panic("device_pager_data_request: lookup failed");
341 }
342
343 __IGNORE_WCASTALIGN(device_data_action(device_object->device_handle,
344 (ipc_port_t) device_object,
345 VM_PROT_READ, offset, length));
346 return KERN_SUCCESS;
347 }
348
349 /*
350 *
351 */
352 void
353 device_pager_reference(
354 memory_object_t mem_obj)
355 {
356 device_pager_t device_object;
357
358 device_object = device_pager_lookup(mem_obj);
359 os_ref_retain(&device_object->ref_count);
360 DTRACE_VM2(device_pager_reference,
361 device_pager_t, device_object,
362 unsigned int, os_ref_get_count(&device_object->ref_count));
363 }
364
365 /*
366 *
367 */
368 void
369 device_pager_deallocate(
370 memory_object_t mem_obj)
371 {
372 device_pager_t device_object;
373 memory_object_control_t device_control;
374
375 device_object = device_pager_lookup(mem_obj);
376
377 DTRACE_VM2(device_pager_deallocate,
378 device_pager_t, device_object,
379 unsigned int, os_ref_get_count(&device_object->ref_count));
380
381 os_ref_count_t ref_count = os_ref_release(&device_object->ref_count);
382
383 if (ref_count == 1) {
384 /*
385 * The last reference is our "named" reference.
386 * Close the device and "destroy" the VM object.
387 */
388
389 DTRACE_VM2(device_pager_destroy,
390 device_pager_t, device_object,
391 unsigned int, os_ref_get_count(&device_object->ref_count));
392
393 assert(device_object->is_mapped == FALSE);
394 if (device_object->device_handle != (device_port_t) NULL) {
395 device_close(device_object->device_handle);
396 device_object->device_handle = (device_port_t) NULL;
397 }
398 device_control = device_object->dev_pgr_hdr.mo_control;
399 memory_object_destroy(device_control, 0);
400 } else if (ref_count == 0) {
401 /*
402 * No more references: free the pager.
403 */
404 DTRACE_VM2(device_pager_free,
405 device_pager_t, device_object,
406 unsigned int, os_ref_get_count(&device_object->ref_count));
407
408 device_pager_lock_destroy(device_object);
409
410 zfree(device_pager_zone, device_object);
411 }
412 return;
413 }
414
415 kern_return_t
416 device_pager_data_initialize(
417 __unused memory_object_t mem_obj,
418 __unused memory_object_offset_t offset,
419 __unused memory_object_cluster_size_t data_cnt)
420 {
421 panic("device_pager_data_initialize");
422 return KERN_FAILURE;
423 }
424
425 kern_return_t
426 device_pager_data_unlock(
427 __unused memory_object_t mem_obj,
428 __unused memory_object_offset_t offset,
429 __unused memory_object_size_t size,
430 __unused vm_prot_t desired_access)
431 {
432 return KERN_FAILURE;
433 }
434
435 kern_return_t
436 device_pager_terminate(
437 __unused memory_object_t mem_obj)
438 {
439 return KERN_SUCCESS;
440 }
441
442
443
444 /*
445 *
446 */
447 kern_return_t
448 device_pager_synchronize(
449 __unused memory_object_t mem_obj,
450 __unused memory_object_offset_t offset,
451 __unused memory_object_size_t length,
452 __unused vm_sync_t sync_flags)
453 {
454 panic("device_pager_synchronize: memory_object_synchronize no longer supported\n");
455 return KERN_FAILURE;
456 }
457
458 /*
459 *
460 */
461 kern_return_t
462 device_pager_map(
463 memory_object_t mem_obj,
464 __unused vm_prot_t prot)
465 {
466 device_pager_t device_object;
467
468 device_object = device_pager_lookup(mem_obj);
469
470 device_pager_lock(device_object);
471 assert(os_ref_get_count(&device_object->ref_count) > 0);
472 if (device_object->is_mapped == FALSE) {
473 /*
474 * First mapping of this pager: take an extra reference
475 * that will remain until all the mappings of this pager
476 * are removed.
477 */
478 device_object->is_mapped = TRUE;
479 device_pager_reference(mem_obj);
480 }
481 device_pager_unlock(device_object);
482
483 return KERN_SUCCESS;
484 }
485
486 kern_return_t
487 device_pager_last_unmap(
488 memory_object_t mem_obj)
489 {
490 device_pager_t device_object;
491 boolean_t drop_ref;
492
493 device_object = device_pager_lookup(mem_obj);
494
495 device_pager_lock(device_object);
496 assert(os_ref_get_count(&device_object->ref_count) > 0);
497 if (device_object->is_mapped) {
498 device_object->is_mapped = FALSE;
499 drop_ref = TRUE;
500 } else {
501 drop_ref = FALSE;
502 }
503 device_pager_unlock(device_object);
504
505 if (drop_ref) {
506 device_pager_deallocate(mem_obj);
507 }
508
509 return KERN_SUCCESS;
510 }
511
512
513
514 /*
515 *
516 */
517 device_pager_t
518 device_object_create(void)
519 {
520 device_pager_t device_object;
521
522 device_object = (struct device_pager *) zalloc(device_pager_zone);
523 if (device_object == DEVICE_PAGER_NULL) {
524 return DEVICE_PAGER_NULL;
525 }
526
527 bzero(device_object, sizeof(*device_object));
528
529 device_object->dev_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
530 device_object->dev_pgr_hdr.mo_pager_ops = &device_pager_ops;
531 device_object->dev_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
532
533 device_pager_lock_init(device_object);
534 os_ref_init(&device_object->ref_count, NULL);
535 device_object->is_mapped = FALSE;
536
537 DTRACE_VM2(device_pager_create,
538 device_pager_t, device_object,
539 unsigned int, os_ref_get_count(&device_object->ref_count));
540
541 return device_object;
542 }
543
544 boolean_t
545 is_device_pager_ops(const struct memory_object_pager_ops *pager_ops)
546 {
547 if (pager_ops == &device_pager_ops) {
548 return TRUE;
549 }
550 return FALSE;
551 }