]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/device_vm.c
xnu-6153.81.5.tar.gz
[apple/xnu.git] / osfmk / vm / device_vm.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/kern_return.h>
33 #include <mach/memory_object_control.h>
34 #include <mach/memory_object_types.h>
35 #include <mach/port.h>
36 #include <mach/policy.h>
37 #include <mach/upl.h>
38 #include <kern/kern_types.h>
39 #include <kern/ipc_kobject.h>
40 #include <kern/host.h>
41 #include <kern/thread.h>
42 #include <ipc/ipc_port.h>
43 #include <ipc/ipc_space.h>
44 #include <device/device_port.h>
45 #include <vm/memory_object.h>
46 #include <vm/vm_pageout.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_kern.h>
49 #include <vm/vm_pageout.h>
50 #include <vm/vm_protos.h>
51 #include <mach/sdt.h>
52 #include <os/refcnt.h>
53
54
55 /* Device VM COMPONENT INTERFACES */
56
57
58 /*
59 * Device PAGER
60 */
61
62
63 /* until component support available */
64
65
66
67 /* until component support available */
68 const struct memory_object_pager_ops device_pager_ops = {
69 .memory_object_reference = device_pager_reference,
70 .memory_object_deallocate = device_pager_deallocate,
71 .memory_object_init = device_pager_init,
72 .memory_object_terminate = device_pager_terminate,
73 .memory_object_data_request = device_pager_data_request,
74 .memory_object_data_return = device_pager_data_return,
75 .memory_object_data_initialize = device_pager_data_initialize,
76 .memory_object_data_unlock = device_pager_data_unlock,
77 .memory_object_synchronize = device_pager_synchronize,
78 .memory_object_map = device_pager_map,
79 .memory_object_last_unmap = device_pager_last_unmap,
80 .memory_object_data_reclaim = NULL,
81 .memory_object_pager_name = "device pager"
82 };
83
84 typedef uintptr_t device_port_t;
85
86 /*
87 * The start of "struct device_pager" MUST match a "struct memory_object".
88 */
89 typedef struct device_pager {
90 /* mandatory generic header */
91 struct memory_object dev_pgr_hdr;
92
93 /* pager-specific data */
94 lck_mtx_t lock;
95 struct os_refcnt ref_count; /* reference count */
96 device_port_t device_handle; /* device_handle */
97 vm_size_t size;
98 int flags;
99 boolean_t is_mapped;
100 } *device_pager_t;
101
102 lck_grp_t device_pager_lck_grp;
103 lck_grp_attr_t device_pager_lck_grp_attr;
104 lck_attr_t device_pager_lck_attr;
105
106 #define device_pager_lock_init(pager) \
107 lck_mtx_init(&(pager)->lock, \
108 &device_pager_lck_grp, \
109 &device_pager_lck_attr)
110 #define device_pager_lock_destroy(pager) \
111 lck_mtx_destroy(&(pager)->lock, &device_pager_lck_grp)
112 #define device_pager_lock(pager) lck_mtx_lock(&(pager)->lock)
113 #define device_pager_unlock(pager) lck_mtx_unlock(&(pager)->lock)
114
115 device_pager_t
116 device_pager_lookup( /* forward */
117 memory_object_t);
118
119 device_pager_t
120 device_object_create(void); /* forward */
121
122 zone_t device_pager_zone;
123
124
125 #define DEVICE_PAGER_NULL ((device_pager_t) 0)
126
127
128 #define MAX_DNODE 10000
129
130
131
132
133
134 /*
135 *
136 */
137 void
138 device_pager_bootstrap(void)
139 {
140 vm_size_t size;
141
142 size = (vm_size_t) sizeof(struct device_pager);
143 device_pager_zone = zinit(size, (vm_size_t) MAX_DNODE * size,
144 PAGE_SIZE, "device node pager structures");
145 zone_change(device_pager_zone, Z_CALLERACCT, FALSE);
146
147 lck_grp_attr_setdefault(&device_pager_lck_grp_attr);
148 lck_grp_init(&device_pager_lck_grp, "device_pager", &device_pager_lck_grp_attr);
149 lck_attr_setdefault(&device_pager_lck_attr);
150
151 return;
152 }
153
154 /*
155 *
156 */
157 memory_object_t
158 device_pager_setup(
159 __unused memory_object_t device,
160 uintptr_t device_handle,
161 vm_size_t size,
162 int flags)
163 {
164 device_pager_t device_object;
165 memory_object_control_t control;
166 vm_object_t object;
167
168 device_object = device_object_create();
169 if (device_object == DEVICE_PAGER_NULL) {
170 panic("device_pager_setup: device_object_create() failed");
171 }
172
173 device_object->device_handle = device_handle;
174 device_object->size = size;
175 device_object->flags = flags;
176
177 memory_object_create_named((memory_object_t) device_object,
178 size,
179 &control);
180 object = memory_object_control_to_vm_object(control);
181
182 memory_object_mark_trusted(control);
183
184 assert(object != VM_OBJECT_NULL);
185 vm_object_lock(object);
186 object->true_share = TRUE;
187 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
188 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
189 }
190 vm_object_unlock(object);
191
192 return (memory_object_t)device_object;
193 }
194
195 /*
196 *
197 */
198 kern_return_t
199 device_pager_populate_object(
200 memory_object_t device,
201 memory_object_offset_t offset,
202 ppnum_t page_num,
203 vm_size_t size)
204 {
205 device_pager_t device_object;
206 vm_object_t vm_object;
207 kern_return_t kr;
208 upl_t upl;
209
210 device_object = device_pager_lookup(device);
211 if (device_object == DEVICE_PAGER_NULL) {
212 return KERN_FAILURE;
213 }
214
215 vm_object = (vm_object_t)memory_object_control_to_vm_object(
216 device_object->dev_pgr_hdr.mo_control);
217 if (vm_object == NULL) {
218 return KERN_FAILURE;
219 }
220
221 kr = vm_object_populate_with_private(
222 vm_object, offset, page_num, size);
223 if (kr != KERN_SUCCESS) {
224 return kr;
225 }
226
227 if (!vm_object->phys_contiguous) {
228 unsigned int null_size = 0;
229 assert((upl_size_t) size == size);
230 kr = vm_object_upl_request(vm_object,
231 (vm_object_offset_t)offset,
232 (upl_size_t) size, &upl, NULL,
233 &null_size,
234 (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE),
235 VM_KERN_MEMORY_NONE);
236 if (kr != KERN_SUCCESS) {
237 panic("device_pager_populate_object: list_req failed");
238 }
239
240 upl_commit(upl, NULL, 0);
241 upl_deallocate(upl);
242 }
243
244
245 return kr;
246 }
247
248 /*
249 *
250 */
251 device_pager_t
252 device_pager_lookup(
253 memory_object_t mem_obj)
254 {
255 device_pager_t device_object;
256
257 assert(mem_obj->mo_pager_ops == &device_pager_ops);
258 device_object = (device_pager_t)mem_obj;
259 assert(os_ref_get_count(&device_object->ref_count) > 0);
260 return device_object;
261 }
262
263 /*
264 *
265 */
266 kern_return_t
267 device_pager_init(
268 memory_object_t mem_obj,
269 memory_object_control_t control,
270 __unused memory_object_cluster_size_t pg_size)
271 {
272 device_pager_t device_object;
273 kern_return_t kr;
274 memory_object_attr_info_data_t attributes;
275
276 vm_object_t vm_object;
277
278
279 if (control == MEMORY_OBJECT_CONTROL_NULL) {
280 return KERN_INVALID_ARGUMENT;
281 }
282
283 device_object = device_pager_lookup(mem_obj);
284
285 memory_object_control_reference(control);
286 device_object->dev_pgr_hdr.mo_control = control;
287
288
289 /* The following settings should be done through an expanded change */
290 /* attributes call */
291
292 vm_object = (vm_object_t)memory_object_control_to_vm_object(control);
293 vm_object_lock(vm_object);
294 vm_object->private = TRUE;
295 if (device_object->flags & DEVICE_PAGER_CONTIGUOUS) {
296 vm_object->phys_contiguous = TRUE;
297 }
298 if (device_object->flags & DEVICE_PAGER_NOPHYSCACHE) {
299 vm_object->nophyscache = TRUE;
300 }
301
302 vm_object->wimg_bits = device_object->flags & VM_WIMG_MASK;
303 vm_object_unlock(vm_object);
304
305
306 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
307 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
308 attributes.cluster_size = (1 << (PAGE_SHIFT));
309 attributes.may_cache_object = FALSE;
310 attributes.temporary = TRUE;
311
312 kr = memory_object_change_attributes(
313 control,
314 MEMORY_OBJECT_ATTRIBUTE_INFO,
315 (memory_object_info_t) &attributes,
316 MEMORY_OBJECT_ATTR_INFO_COUNT);
317 if (kr != KERN_SUCCESS) {
318 panic("device_pager_init: memory_object_change_attributes() failed");
319 }
320
321 return KERN_SUCCESS;
322 }
323
324 /*
325 *
326 */
327 /*ARGSUSED6*/
328 kern_return_t
329 device_pager_data_return(
330 memory_object_t mem_obj,
331 memory_object_offset_t offset,
332 memory_object_cluster_size_t data_cnt,
333 __unused memory_object_offset_t *resid_offset,
334 __unused int *io_error,
335 __unused boolean_t dirty,
336 __unused boolean_t kernel_copy,
337 __unused int upl_flags)
338 {
339 device_pager_t device_object;
340
341 device_object = device_pager_lookup(mem_obj);
342 if (device_object == DEVICE_PAGER_NULL) {
343 panic("device_pager_data_return: lookup failed");
344 }
345
346 __IGNORE_WCASTALIGN(return device_data_action(device_object->device_handle,
347 (ipc_port_t) device_object,
348 VM_PROT_READ | VM_PROT_WRITE,
349 offset, data_cnt));
350 }
351
352 /*
353 *
354 */
355 kern_return_t
356 device_pager_data_request(
357 memory_object_t mem_obj,
358 memory_object_offset_t offset,
359 memory_object_cluster_size_t length,
360 __unused vm_prot_t protection_required,
361 __unused memory_object_fault_info_t fault_info)
362 {
363 device_pager_t device_object;
364
365 device_object = device_pager_lookup(mem_obj);
366
367 if (device_object == DEVICE_PAGER_NULL) {
368 panic("device_pager_data_request: lookup failed");
369 }
370
371 __IGNORE_WCASTALIGN(device_data_action(device_object->device_handle,
372 (ipc_port_t) device_object,
373 VM_PROT_READ, offset, length));
374 return KERN_SUCCESS;
375 }
376
377 /*
378 *
379 */
380 void
381 device_pager_reference(
382 memory_object_t mem_obj)
383 {
384 device_pager_t device_object;
385
386 device_object = device_pager_lookup(mem_obj);
387 os_ref_retain(&device_object->ref_count);
388 DTRACE_VM2(device_pager_reference,
389 device_pager_t, device_object,
390 unsigned int, os_ref_get_count(&device_object->ref_count));
391 }
392
393 /*
394 *
395 */
396 void
397 device_pager_deallocate(
398 memory_object_t mem_obj)
399 {
400 device_pager_t device_object;
401 memory_object_control_t device_control;
402
403 device_object = device_pager_lookup(mem_obj);
404
405 DTRACE_VM2(device_pager_deallocate,
406 device_pager_t, device_object,
407 unsigned int, os_ref_get_count(&device_object->ref_count));
408
409 os_ref_count_t ref_count = os_ref_release(&device_object->ref_count);
410
411 if (ref_count == 1) {
412 /*
413 * The last reference is our "named" reference.
414 * Close the device and "destroy" the VM object.
415 */
416
417 DTRACE_VM2(device_pager_destroy,
418 device_pager_t, device_object,
419 unsigned int, os_ref_get_count(&device_object->ref_count));
420
421 assert(device_object->is_mapped == FALSE);
422 if (device_object->device_handle != (device_port_t) NULL) {
423 device_close(device_object->device_handle);
424 device_object->device_handle = (device_port_t) NULL;
425 }
426 device_control = device_object->dev_pgr_hdr.mo_control;
427 memory_object_destroy(device_control, 0);
428 } else if (ref_count == 0) {
429 /*
430 * No more references: free the pager.
431 */
432 DTRACE_VM2(device_pager_free,
433 device_pager_t, device_object,
434 unsigned int, os_ref_get_count(&device_object->ref_count));
435
436 device_pager_lock_destroy(device_object);
437
438 zfree(device_pager_zone, device_object);
439 }
440 return;
441 }
442
443 kern_return_t
444 device_pager_data_initialize(
445 __unused memory_object_t mem_obj,
446 __unused memory_object_offset_t offset,
447 __unused memory_object_cluster_size_t data_cnt)
448 {
449 panic("device_pager_data_initialize");
450 return KERN_FAILURE;
451 }
452
453 kern_return_t
454 device_pager_data_unlock(
455 __unused memory_object_t mem_obj,
456 __unused memory_object_offset_t offset,
457 __unused memory_object_size_t size,
458 __unused vm_prot_t desired_access)
459 {
460 return KERN_FAILURE;
461 }
462
463 kern_return_t
464 device_pager_terminate(
465 __unused memory_object_t mem_obj)
466 {
467 return KERN_SUCCESS;
468 }
469
470
471
472 /*
473 *
474 */
475 kern_return_t
476 device_pager_synchronize(
477 __unused memory_object_t mem_obj,
478 __unused memory_object_offset_t offset,
479 __unused memory_object_size_t length,
480 __unused vm_sync_t sync_flags)
481 {
482 panic("device_pager_synchronize: memory_object_synchronize no longer supported\n");
483 return KERN_FAILURE;
484 }
485
486 /*
487 *
488 */
489 kern_return_t
490 device_pager_map(
491 memory_object_t mem_obj,
492 __unused vm_prot_t prot)
493 {
494 device_pager_t device_object;
495
496 device_object = device_pager_lookup(mem_obj);
497
498 device_pager_lock(device_object);
499 assert(os_ref_get_count(&device_object->ref_count) > 0);
500 if (device_object->is_mapped == FALSE) {
501 /*
502 * First mapping of this pager: take an extra reference
503 * that will remain until all the mappings of this pager
504 * are removed.
505 */
506 device_object->is_mapped = TRUE;
507 device_pager_reference(mem_obj);
508 }
509 device_pager_unlock(device_object);
510
511 return KERN_SUCCESS;
512 }
513
514 kern_return_t
515 device_pager_last_unmap(
516 memory_object_t mem_obj)
517 {
518 device_pager_t device_object;
519 boolean_t drop_ref;
520
521 device_object = device_pager_lookup(mem_obj);
522
523 device_pager_lock(device_object);
524 assert(os_ref_get_count(&device_object->ref_count) > 0);
525 if (device_object->is_mapped) {
526 device_object->is_mapped = FALSE;
527 drop_ref = TRUE;
528 } else {
529 drop_ref = FALSE;
530 }
531 device_pager_unlock(device_object);
532
533 if (drop_ref) {
534 device_pager_deallocate(mem_obj);
535 }
536
537 return KERN_SUCCESS;
538 }
539
540
541
542 /*
543 *
544 */
545 device_pager_t
546 device_object_create(void)
547 {
548 device_pager_t device_object;
549
550 device_object = (struct device_pager *) zalloc(device_pager_zone);
551 if (device_object == DEVICE_PAGER_NULL) {
552 return DEVICE_PAGER_NULL;
553 }
554
555 bzero(device_object, sizeof(*device_object));
556
557 device_object->dev_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
558 device_object->dev_pgr_hdr.mo_pager_ops = &device_pager_ops;
559 device_object->dev_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
560
561 device_pager_lock_init(device_object);
562 os_ref_init(&device_object->ref_count, NULL);
563 device_object->is_mapped = FALSE;
564
565 DTRACE_VM2(device_pager_create,
566 device_pager_t, device_object,
567 unsigned int, os_ref_get_count(&device_object->ref_count));
568
569 return device_object;
570 }
571
572 boolean_t
573 is_device_pager_ops(const struct memory_object_pager_ops *pager_ops)
574 {
575 if (pager_ops == &device_pager_ops) {
576 return TRUE;
577 }
578 return FALSE;
579 }