]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/device_vm.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / vm / device_vm.c
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/kern_return.h>
33 #include <mach/memory_object_control.h>
34 #include <mach/memory_object_types.h>
35 #include <mach/port.h>
36 #include <mach/policy.h>
37 #include <mach/upl.h>
38 #include <kern/kern_types.h>
39 #include <kern/ipc_kobject.h>
40 #include <kern/host.h>
41 #include <kern/thread.h>
42 #include <ipc/ipc_port.h>
43 #include <ipc/ipc_space.h>
44 #include <device/device_port.h>
45 #include <vm/memory_object.h>
46 #include <vm/vm_pageout.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_kern.h>
49 #include <vm/vm_pageout.h>
50 #include <vm/vm_protos.h>
51 #include <mach/sdt.h>
52 #include <os/refcnt.h>
53
54
55 /* Device VM COMPONENT INTERFACES */
56
57
58 /*
59 * Device PAGER
60 */
61
62
63 /* until component support available */
64
65
66
67 /* until component support available */
68 const struct memory_object_pager_ops device_pager_ops = {
69 .memory_object_reference = device_pager_reference,
70 .memory_object_deallocate = device_pager_deallocate,
71 .memory_object_init = device_pager_init,
72 .memory_object_terminate = device_pager_terminate,
73 .memory_object_data_request = device_pager_data_request,
74 .memory_object_data_return = device_pager_data_return,
75 .memory_object_data_initialize = device_pager_data_initialize,
76 .memory_object_data_unlock = device_pager_data_unlock,
77 .memory_object_synchronize = device_pager_synchronize,
78 .memory_object_map = device_pager_map,
79 .memory_object_last_unmap = device_pager_last_unmap,
80 .memory_object_data_reclaim = NULL,
81 .memory_object_backing_object = NULL,
82 .memory_object_pager_name = "device pager"
83 };
84
85 typedef uintptr_t device_port_t;
86
87 /*
88 * The start of "struct device_pager" MUST match a "struct memory_object".
89 */
90 typedef struct device_pager {
91 /* mandatory generic header */
92 struct memory_object dev_pgr_hdr;
93
94 /* pager-specific data */
95 lck_mtx_t lock;
96 device_port_t device_handle; /* device_handle */
97 vm_size_t size;
98 #if MEMORY_OBJECT_HAS_REFCOUNT
99 #define dev_pgr_hdr_ref dev_pgr_hdr.mo_ref
100 #else
101 os_ref_atomic_t dev_pgr_hdr_ref;
102 #endif
103 int flags;
104 boolean_t is_mapped;
105 } *device_pager_t;
106
107 __header_always_inline os_ref_count_t
108 device_pager_get_refcount(device_pager_t device_object)
109 {
110 return os_ref_get_count_raw(&device_object->dev_pgr_hdr_ref);
111 }
112
113 LCK_GRP_DECLARE(device_pager_lck_grp, "device_pager");
114
115 ZONE_DECLARE(device_pager_zone, "device node pager structures",
116 sizeof(struct device_pager), ZC_NONE);
117
118 #define device_pager_lock_init(pager) \
119 lck_mtx_init(&(pager)->lock, &device_pager_lck_grp, LCK_ATTR_NULL)
120 #define device_pager_lock_destroy(pager) \
121 lck_mtx_destroy(&(pager)->lock, &device_pager_lck_grp)
122 #define device_pager_lock(pager) lck_mtx_lock(&(pager)->lock)
123 #define device_pager_unlock(pager) lck_mtx_unlock(&(pager)->lock)
124
125 device_pager_t
126 device_pager_lookup( /* forward */
127 memory_object_t);
128
129 device_pager_t
130 device_object_create(void); /* forward */
131
132 #define DEVICE_PAGER_NULL ((device_pager_t) 0)
133
134 #define MAX_DNODE 10000
135
136
137 /*
138 *
139 */
140 memory_object_t
141 device_pager_setup(
142 __unused memory_object_t device,
143 uintptr_t device_handle,
144 vm_size_t size,
145 int flags)
146 {
147 device_pager_t device_object;
148 memory_object_control_t control;
149 vm_object_t object;
150
151 device_object = device_object_create();
152 if (device_object == DEVICE_PAGER_NULL) {
153 panic("device_pager_setup: device_object_create() failed");
154 }
155
156 device_object->device_handle = device_handle;
157 device_object->size = size;
158 device_object->flags = flags;
159
160 memory_object_create_named((memory_object_t) device_object,
161 size,
162 &control);
163 object = memory_object_control_to_vm_object(control);
164
165 memory_object_mark_trusted(control);
166
167 assert(object != VM_OBJECT_NULL);
168 vm_object_lock(object);
169 object->true_share = TRUE;
170 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
171 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
172 }
173 vm_object_unlock(object);
174
175 return (memory_object_t)device_object;
176 }
177
178 /*
179 *
180 */
181 kern_return_t
182 device_pager_populate_object(
183 memory_object_t device,
184 memory_object_offset_t offset,
185 ppnum_t page_num,
186 vm_size_t size)
187 {
188 device_pager_t device_object;
189 vm_object_t vm_object;
190 kern_return_t kr;
191 upl_t upl;
192
193 device_object = device_pager_lookup(device);
194 if (device_object == DEVICE_PAGER_NULL) {
195 return KERN_FAILURE;
196 }
197
198 vm_object = (vm_object_t)memory_object_control_to_vm_object(
199 device_object->dev_pgr_hdr.mo_control);
200 if (vm_object == NULL) {
201 return KERN_FAILURE;
202 }
203
204 kr = vm_object_populate_with_private(
205 vm_object, offset, page_num, size);
206 if (kr != KERN_SUCCESS) {
207 return kr;
208 }
209
210 if (!vm_object->phys_contiguous) {
211 unsigned int null_size = 0;
212 assert((upl_size_t) size == size);
213 kr = vm_object_upl_request(vm_object,
214 (vm_object_offset_t)offset,
215 (upl_size_t) size, &upl, NULL,
216 &null_size,
217 (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE),
218 VM_KERN_MEMORY_NONE);
219 if (kr != KERN_SUCCESS) {
220 panic("device_pager_populate_object: list_req failed");
221 }
222
223 upl_commit(upl, NULL, 0);
224 upl_deallocate(upl);
225 }
226
227
228 return kr;
229 }
230
231 /*
232 *
233 */
234 device_pager_t
235 device_pager_lookup(
236 memory_object_t mem_obj)
237 {
238 device_pager_t device_object;
239
240 assert(mem_obj->mo_pager_ops == &device_pager_ops);
241 device_object = (device_pager_t)mem_obj;
242 assert(device_pager_get_refcount(device_object) > 0);
243 return device_object;
244 }
245
246 /*
247 *
248 */
249 kern_return_t
250 device_pager_init(
251 memory_object_t mem_obj,
252 memory_object_control_t control,
253 __unused memory_object_cluster_size_t pg_size)
254 {
255 device_pager_t device_object;
256 kern_return_t kr;
257 memory_object_attr_info_data_t attributes;
258
259 vm_object_t vm_object;
260
261
262 if (control == MEMORY_OBJECT_CONTROL_NULL) {
263 return KERN_INVALID_ARGUMENT;
264 }
265
266 device_object = device_pager_lookup(mem_obj);
267
268 memory_object_control_reference(control);
269 device_object->dev_pgr_hdr.mo_control = control;
270
271
272 /* The following settings should be done through an expanded change */
273 /* attributes call */
274
275 vm_object = (vm_object_t)memory_object_control_to_vm_object(control);
276 vm_object_lock(vm_object);
277 vm_object->private = TRUE;
278 if (device_object->flags & DEVICE_PAGER_CONTIGUOUS) {
279 vm_object->phys_contiguous = TRUE;
280 }
281 if (device_object->flags & DEVICE_PAGER_NOPHYSCACHE) {
282 vm_object->nophyscache = TRUE;
283 }
284
285 vm_object->wimg_bits = device_object->flags & VM_WIMG_MASK;
286 vm_object_unlock(vm_object);
287
288
289 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
290 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
291 attributes.cluster_size = (1 << (PAGE_SHIFT));
292 attributes.may_cache_object = FALSE;
293 attributes.temporary = TRUE;
294
295 kr = memory_object_change_attributes(
296 control,
297 MEMORY_OBJECT_ATTRIBUTE_INFO,
298 (memory_object_info_t) &attributes,
299 MEMORY_OBJECT_ATTR_INFO_COUNT);
300 if (kr != KERN_SUCCESS) {
301 panic("device_pager_init: memory_object_change_attributes() failed");
302 }
303
304 return KERN_SUCCESS;
305 }
306
307 /*
308 *
309 */
310 /*ARGSUSED6*/
311 kern_return_t
312 device_pager_data_return(
313 memory_object_t mem_obj,
314 memory_object_offset_t offset,
315 memory_object_cluster_size_t data_cnt,
316 __unused memory_object_offset_t *resid_offset,
317 __unused int *io_error,
318 __unused boolean_t dirty,
319 __unused boolean_t kernel_copy,
320 __unused int upl_flags)
321 {
322 device_pager_t device_object;
323
324 device_object = device_pager_lookup(mem_obj);
325 if (device_object == DEVICE_PAGER_NULL) {
326 panic("device_pager_data_return: lookup failed");
327 }
328
329 __IGNORE_WCASTALIGN(return device_data_action(device_object->device_handle,
330 (ipc_port_t) device_object,
331 VM_PROT_READ | VM_PROT_WRITE,
332 offset, data_cnt));
333 }
334
335 /*
336 *
337 */
338 kern_return_t
339 device_pager_data_request(
340 memory_object_t mem_obj,
341 memory_object_offset_t offset,
342 memory_object_cluster_size_t length,
343 __unused vm_prot_t protection_required,
344 __unused memory_object_fault_info_t fault_info)
345 {
346 device_pager_t device_object;
347
348 device_object = device_pager_lookup(mem_obj);
349
350 if (device_object == DEVICE_PAGER_NULL) {
351 panic("device_pager_data_request: lookup failed");
352 }
353
354 __IGNORE_WCASTALIGN(device_data_action(device_object->device_handle,
355 (ipc_port_t) device_object,
356 VM_PROT_READ, offset, length));
357 return KERN_SUCCESS;
358 }
359
360 /*
361 *
362 */
363 void
364 device_pager_reference(
365 memory_object_t mem_obj)
366 {
367 device_pager_t device_object;
368
369 device_object = device_pager_lookup(mem_obj);
370 os_ref_retain_raw(&device_object->dev_pgr_hdr_ref, NULL);
371 DTRACE_VM2(device_pager_reference,
372 device_pager_t, device_object,
373 unsigned int, device_pager_get_refcount(device_object));
374 }
375
376 /*
377 *
378 */
379 void
380 device_pager_deallocate(
381 memory_object_t mem_obj)
382 {
383 device_pager_t device_object;
384 memory_object_control_t device_control;
385 os_ref_count_t ref_count;
386
387 device_object = device_pager_lookup(mem_obj);
388
389 DTRACE_VM2(device_pager_deallocate,
390 device_pager_t, device_object,
391 unsigned int, device_pager_get_refcount(device_object));
392
393 ref_count = os_ref_release_raw(&device_object->dev_pgr_hdr_ref, NULL);
394
395 if (ref_count == 1) {
396 /*
397 * The last reference is our "named" reference.
398 * Close the device and "destroy" the VM object.
399 */
400
401 DTRACE_VM2(device_pager_destroy,
402 device_pager_t, device_object,
403 unsigned int, device_pager_get_refcount(device_object));
404
405 assert(device_object->is_mapped == FALSE);
406 if (device_object->device_handle != (device_port_t) NULL) {
407 device_close(device_object->device_handle);
408 device_object->device_handle = (device_port_t) NULL;
409 }
410 device_control = device_object->dev_pgr_hdr.mo_control;
411 memory_object_destroy(device_control, 0);
412 } else if (ref_count == 0) {
413 /*
414 * No more references: free the pager.
415 */
416 DTRACE_VM2(device_pager_free,
417 device_pager_t, device_object,
418 unsigned int, device_pager_get_refcount(device_object));
419
420 device_control = device_object->dev_pgr_hdr.mo_control;
421
422 if (device_control != MEMORY_OBJECT_CONTROL_NULL) {
423 memory_object_control_deallocate(device_control);
424 device_object->dev_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
425 }
426 device_pager_lock_destroy(device_object);
427
428 zfree(device_pager_zone, device_object);
429 }
430 return;
431 }
432
433 kern_return_t
434 device_pager_data_initialize(
435 __unused memory_object_t mem_obj,
436 __unused memory_object_offset_t offset,
437 __unused memory_object_cluster_size_t data_cnt)
438 {
439 panic("device_pager_data_initialize");
440 return KERN_FAILURE;
441 }
442
443 kern_return_t
444 device_pager_data_unlock(
445 __unused memory_object_t mem_obj,
446 __unused memory_object_offset_t offset,
447 __unused memory_object_size_t size,
448 __unused vm_prot_t desired_access)
449 {
450 return KERN_FAILURE;
451 }
452
453 kern_return_t
454 device_pager_terminate(
455 __unused memory_object_t mem_obj)
456 {
457 return KERN_SUCCESS;
458 }
459
460
461
462 /*
463 *
464 */
465 kern_return_t
466 device_pager_synchronize(
467 __unused memory_object_t mem_obj,
468 __unused memory_object_offset_t offset,
469 __unused memory_object_size_t length,
470 __unused vm_sync_t sync_flags)
471 {
472 panic("device_pager_synchronize: memory_object_synchronize no longer supported\n");
473 return KERN_FAILURE;
474 }
475
476 /*
477 *
478 */
479 kern_return_t
480 device_pager_map(
481 memory_object_t mem_obj,
482 __unused vm_prot_t prot)
483 {
484 device_pager_t device_object;
485
486 device_object = device_pager_lookup(mem_obj);
487
488 device_pager_lock(device_object);
489 assert(device_pager_get_refcount(device_object) > 0);
490 if (device_object->is_mapped == FALSE) {
491 /*
492 * First mapping of this pager: take an extra reference
493 * that will remain until all the mappings of this pager
494 * are removed.
495 */
496 device_object->is_mapped = TRUE;
497 device_pager_reference(mem_obj);
498 }
499 device_pager_unlock(device_object);
500
501 return KERN_SUCCESS;
502 }
503
504 kern_return_t
505 device_pager_last_unmap(
506 memory_object_t mem_obj)
507 {
508 device_pager_t device_object;
509 boolean_t drop_ref;
510
511 device_object = device_pager_lookup(mem_obj);
512
513 device_pager_lock(device_object);
514 assert(device_pager_get_refcount(device_object) > 0);
515 if (device_object->is_mapped) {
516 device_object->is_mapped = FALSE;
517 drop_ref = TRUE;
518 } else {
519 drop_ref = FALSE;
520 }
521 device_pager_unlock(device_object);
522
523 if (drop_ref) {
524 device_pager_deallocate(mem_obj);
525 }
526
527 return KERN_SUCCESS;
528 }
529
530
531
532 /*
533 *
534 */
535 device_pager_t
536 device_object_create(void)
537 {
538 device_pager_t device_object;
539
540 device_object = (struct device_pager *) zalloc(device_pager_zone);
541 if (device_object == DEVICE_PAGER_NULL) {
542 return DEVICE_PAGER_NULL;
543 }
544
545 bzero(device_object, sizeof(*device_object));
546
547 device_object->dev_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
548 device_object->dev_pgr_hdr.mo_pager_ops = &device_pager_ops;
549 device_object->dev_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
550
551 device_pager_lock_init(device_object);
552 os_ref_init_raw(&device_object->dev_pgr_hdr_ref, NULL);
553 device_object->is_mapped = FALSE;
554
555 DTRACE_VM2(device_pager_create,
556 device_pager_t, device_object,
557 unsigned int, device_pager_get_refcount(device_object));
558
559 return device_object;
560 }
561
562 boolean_t
563 is_device_pager_ops(const struct memory_object_pager_ops *pager_ops)
564 {
565 if (pager_ops == &device_pager_ops) {
566 return TRUE;
567 }
568 return FALSE;
569 }