]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_swapfile_pager.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_swapfile_pager.c
1 /*
2 * Copyright (c) 2008-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/kern_return.h>
30 #include <mach/memory_object_control.h>
31 #include <mach/upl.h>
32
33 #include <kern/ipc_kobject.h>
34 #include <kern/kalloc.h>
35 #include <kern/queue.h>
36
37 #include <vm/memory_object.h>
38 #include <vm/vm_kern.h>
39 #include <vm/vm_map.h>
40 #include <vm/vm_pageout.h>
41 #include <vm/vm_protos.h>
42
43
44 /*
45 * APPLE SWAPFILE MEMORY PAGER
46 *
47 * This external memory manager (EMM) handles mappings of the swap files.
48 * Swap files are not regular files and are used solely to store contents of
49 * anonymous memory mappings while not resident in memory.
50 * There's no valid reason to map a swap file. This just puts extra burden
51 * on the system, is potentially a security issue and is not reliable since
52 * the contents can change at any time with pageout operations.
53 * Here are some of the issues with mapping a swap file.
54 * * PERFORMANCE:
55 * Each page in the swap file belong to an anonymous memory object. Mapping
56 * the swap file makes those pages also accessible via a vnode memory
57 * object and each page can now be resident twice.
58 * * SECURITY:
59 * Mapping a swap file allows access to other processes' memory. Swap files
60 * are only accessible by the "root" super-user, who can already access any
61 * process's memory, so this is not a real issue but if permissions on the
62 * swap file got changed, it could become one.
63 * Swap files are not "zero-filled" on creation, so until their contents are
64 * overwritten with pageout operations, they still contain whatever was on
65 * the disk blocks they were allocated. The "super-user" could see the
66 * contents of free blocks anyway, so this is not a new security issue but
67 * it may be perceive as one.
68 *
69 * We can't legitimately prevent a user process with appropriate privileges
70 * from mapping a swap file, but we can prevent it from accessing its actual
71 * contents.
72 * This pager mostly handles page-in request (from memory_object_data_request())
73 * for swap file mappings and just returns bogus data.
74 * Pageouts are not handled, so mmap() has to make sure it does not allow
75 * writable (i.e. MAP_SHARED and PROT_WRITE) mappings of swap files.
76 */
77
78 /* forward declarations */
79 void swapfile_pager_reference(memory_object_t mem_obj);
80 void swapfile_pager_deallocate(memory_object_t mem_obj);
81 kern_return_t swapfile_pager_init(memory_object_t mem_obj,
82 memory_object_control_t control,
83 memory_object_cluster_size_t pg_size);
84 kern_return_t swapfile_pager_terminate(memory_object_t mem_obj);
85 kern_return_t swapfile_pager_data_request(memory_object_t mem_obj,
86 memory_object_offset_t offset,
87 memory_object_cluster_size_t length,
88 vm_prot_t protection_required,
89 memory_object_fault_info_t fault_info);
90 kern_return_t swapfile_pager_data_return(memory_object_t mem_obj,
91 memory_object_offset_t offset,
92 memory_object_cluster_size_t data_cnt,
93 memory_object_offset_t *resid_offset,
94 int *io_error,
95 boolean_t dirty,
96 boolean_t kernel_copy,
97 int upl_flags);
98 kern_return_t swapfile_pager_data_initialize(memory_object_t mem_obj,
99 memory_object_offset_t offset,
100 memory_object_cluster_size_t data_cnt);
101 kern_return_t swapfile_pager_data_unlock(memory_object_t mem_obj,
102 memory_object_offset_t offset,
103 memory_object_size_t size,
104 vm_prot_t desired_access);
105 kern_return_t swapfile_pager_synchronize(memory_object_t mem_obj,
106 memory_object_offset_t offset,
107 memory_object_size_t length,
108 vm_sync_t sync_flags);
109 kern_return_t swapfile_pager_map(memory_object_t mem_obj,
110 vm_prot_t prot);
111 kern_return_t swapfile_pager_last_unmap(memory_object_t mem_obj);
112
113 /*
114 * Vector of VM operations for this EMM.
115 * These routines are invoked by VM via the memory_object_*() interfaces.
116 */
117 const struct memory_object_pager_ops swapfile_pager_ops = {
118 .memory_object_reference = swapfile_pager_reference,
119 .memory_object_deallocate = swapfile_pager_deallocate,
120 .memory_object_init = swapfile_pager_init,
121 .memory_object_terminate = swapfile_pager_terminate,
122 .memory_object_data_request = swapfile_pager_data_request,
123 .memory_object_data_return = swapfile_pager_data_return,
124 .memory_object_data_initialize = swapfile_pager_data_initialize,
125 .memory_object_data_unlock = swapfile_pager_data_unlock,
126 .memory_object_synchronize = swapfile_pager_synchronize,
127 .memory_object_map = swapfile_pager_map,
128 .memory_object_last_unmap = swapfile_pager_last_unmap,
129 .memory_object_data_reclaim = NULL,
130 .memory_object_backing_object = NULL,
131 .memory_object_pager_name = "swapfile pager"
132 };
133
134 /*
135 * The "swapfile_pager" describes a memory object backed by
136 * the "swapfile" EMM.
137 */
138 typedef struct swapfile_pager {
139 /* mandatory generic header */
140 struct memory_object swp_pgr_hdr;
141
142 /* pager-specific data */
143 queue_chain_t pager_queue; /* next & prev pagers */
144 #if MEMORY_OBJECT_HAS_REFCOUNT
145 #define swp_pgr_hdr_ref swp_pgr_hdr.mo_ref
146 #else
147 os_ref_atomic_t swp_pgr_hdr_ref; /* reference count */
148 #endif
149 bool is_ready; /* is this pager ready ? */
150 bool is_mapped; /* is this pager mapped ? */
151 struct vnode *swapfile_vnode;/* the swapfile's vnode */
152 } *swapfile_pager_t;
153 #define SWAPFILE_PAGER_NULL ((swapfile_pager_t) NULL)
154
155 /*
156 * List of memory objects managed by this EMM.
157 * The list is protected by the "swapfile_pager_lock" lock.
158 */
159 int swapfile_pager_count = 0; /* number of pagers */
160 queue_head_t swapfile_pager_queue = QUEUE_HEAD_INITIALIZER(swapfile_pager_queue);
161 LCK_GRP_DECLARE(swapfile_pager_lck_grp, "swapfile pager");
162 LCK_MTX_DECLARE(swapfile_pager_lock, &swapfile_pager_lck_grp);
163
164 /*
165 * Statistics & counters.
166 */
167 int swapfile_pager_count_max = 0;
168
169 /* internal prototypes */
170 swapfile_pager_t swapfile_pager_create(struct vnode *vp);
171 swapfile_pager_t swapfile_pager_lookup(memory_object_t mem_obj);
172 void swapfile_pager_dequeue(swapfile_pager_t pager);
173 void swapfile_pager_deallocate_internal(swapfile_pager_t pager,
174 boolean_t locked);
175 void swapfile_pager_terminate_internal(swapfile_pager_t pager);
176
177
178 #if DEBUG
179 int swapfile_pagerdebug = 0;
180 #define PAGER_ALL 0xffffffff
181 #define PAGER_INIT 0x00000001
182 #define PAGER_PAGEIN 0x00000002
183
184 #define PAGER_DEBUG(LEVEL, A) \
185 MACRO_BEGIN \
186 if ((swapfile_pagerdebug & LEVEL)==LEVEL) { \
187 printf A; \
188 } \
189 MACRO_END
190 #else
191 #define PAGER_DEBUG(LEVEL, A)
192 #endif
193
194
195 /*
196 * swapfile_pager_init()
197 *
198 * Initialize the memory object and makes it ready to be used and mapped.
199 */
200 kern_return_t
201 swapfile_pager_init(
202 memory_object_t mem_obj,
203 memory_object_control_t control,
204 #if !DEBUG
205 __unused
206 #endif
207 memory_object_cluster_size_t pg_size)
208 {
209 swapfile_pager_t pager;
210 kern_return_t kr;
211 memory_object_attr_info_data_t attributes;
212
213 PAGER_DEBUG(PAGER_ALL,
214 ("swapfile_pager_init: %p, %p, %x\n",
215 mem_obj, control, pg_size));
216
217 if (control == MEMORY_OBJECT_CONTROL_NULL) {
218 return KERN_INVALID_ARGUMENT;
219 }
220
221 pager = swapfile_pager_lookup(mem_obj);
222
223 memory_object_control_reference(control);
224
225 pager->swp_pgr_hdr.mo_control = control;
226
227 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
228 attributes.cluster_size = (1 << (PAGE_SHIFT));
229 attributes.may_cache_object = FALSE;
230 attributes.temporary = TRUE;
231
232 kr = memory_object_change_attributes(
233 control,
234 MEMORY_OBJECT_ATTRIBUTE_INFO,
235 (memory_object_info_t) &attributes,
236 MEMORY_OBJECT_ATTR_INFO_COUNT);
237 if (kr != KERN_SUCCESS) {
238 panic("swapfile_pager_init: "
239 "memory_object_change_attributes() failed");
240 }
241
242 return KERN_SUCCESS;
243 }
244
245 /*
246 * swapfile_data_return()
247 *
248 * Handles page-out requests from VM. This should never happen since
249 * the pages provided by this EMM are not supposed to be dirty or dirtied
250 * and VM should simply discard the contents and reclaim the pages if it
251 * needs to.
252 */
253 kern_return_t
254 swapfile_pager_data_return(
255 __unused memory_object_t mem_obj,
256 __unused memory_object_offset_t offset,
257 __unused memory_object_cluster_size_t data_cnt,
258 __unused memory_object_offset_t *resid_offset,
259 __unused int *io_error,
260 __unused boolean_t dirty,
261 __unused boolean_t kernel_copy,
262 __unused int upl_flags)
263 {
264 panic("swapfile_pager_data_return: should never get called");
265 return KERN_FAILURE;
266 }
267
268 kern_return_t
269 swapfile_pager_data_initialize(
270 __unused memory_object_t mem_obj,
271 __unused memory_object_offset_t offset,
272 __unused memory_object_cluster_size_t data_cnt)
273 {
274 panic("swapfile_pager_data_initialize: should never get called");
275 return KERN_FAILURE;
276 }
277
278 kern_return_t
279 swapfile_pager_data_unlock(
280 __unused memory_object_t mem_obj,
281 __unused memory_object_offset_t offset,
282 __unused memory_object_size_t size,
283 __unused vm_prot_t desired_access)
284 {
285 return KERN_FAILURE;
286 }
287
288 /*
289 * swapfile_pager_data_request()
290 *
291 * Handles page-in requests from VM.
292 */
293 kern_return_t
294 swapfile_pager_data_request(
295 memory_object_t mem_obj,
296 memory_object_offset_t offset,
297 memory_object_cluster_size_t length,
298 #if !DEBUG
299 __unused
300 #endif
301 vm_prot_t protection_required,
302 __unused memory_object_fault_info_t mo_fault_info)
303 {
304 swapfile_pager_t pager;
305 memory_object_control_t mo_control;
306 upl_t upl;
307 int upl_flags;
308 upl_size_t upl_size;
309 upl_page_info_t *upl_pl = NULL;
310 unsigned int pl_count;
311 vm_object_t dst_object;
312 kern_return_t kr, retval;
313 vm_map_offset_t kernel_mapping;
314 vm_offset_t dst_vaddr;
315 char *dst_ptr;
316 vm_offset_t cur_offset;
317 vm_map_entry_t map_entry;
318
319 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
320
321 kernel_mapping = 0;
322 upl = NULL;
323 upl_pl = NULL;
324
325 pager = swapfile_pager_lookup(mem_obj);
326 assert(pager->is_ready);
327 assert(os_ref_get_count_raw(&pager->swp_pgr_hdr_ref) > 1); /* pager is alive and mapped */
328
329 PAGER_DEBUG(PAGER_PAGEIN, ("swapfile_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
330
331 /*
332 * Gather in a UPL all the VM pages requested by VM.
333 */
334 mo_control = pager->swp_pgr_hdr.mo_control;
335
336 upl_size = length;
337 upl_flags =
338 UPL_RET_ONLY_ABSENT |
339 UPL_SET_LITE |
340 UPL_NO_SYNC |
341 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
342 UPL_SET_INTERNAL;
343 pl_count = 0;
344 kr = memory_object_upl_request(mo_control,
345 offset, upl_size,
346 &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_OSFMK);
347 if (kr != KERN_SUCCESS) {
348 retval = kr;
349 goto done;
350 }
351 dst_object = memory_object_control_to_vm_object(mo_control);
352 assert(dst_object != VM_OBJECT_NULL);
353
354
355 /*
356 * Reserve a virtual page in the kernel address space to map each
357 * destination physical page when it's its turn to be processed.
358 */
359 vm_object_reference(kernel_object); /* ref. for mapping */
360 kr = vm_map_find_space(kernel_map,
361 &kernel_mapping,
362 PAGE_SIZE_64,
363 0,
364 0,
365 VM_MAP_KERNEL_FLAGS_NONE,
366 VM_KERN_MEMORY_NONE,
367 &map_entry);
368 if (kr != KERN_SUCCESS) {
369 vm_object_deallocate(kernel_object);
370 retval = kr;
371 goto done;
372 }
373 VME_OBJECT_SET(map_entry, kernel_object);
374 VME_OFFSET_SET(map_entry, kernel_mapping - VM_MIN_KERNEL_ADDRESS);
375 vm_map_unlock(kernel_map);
376 dst_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping);
377 dst_ptr = (char *) dst_vaddr;
378
379 /*
380 * Fill in the contents of the pages requested by VM.
381 */
382 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
383 pl_count = length / PAGE_SIZE;
384 for (cur_offset = 0; cur_offset < length; cur_offset += PAGE_SIZE) {
385 ppnum_t dst_pnum;
386
387 if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
388 /* this page is not in the UPL: skip it */
389 continue;
390 }
391
392 /*
393 * Establish an explicit pmap mapping of the destination
394 * physical page.
395 * We can't do a regular VM mapping because the VM page
396 * is "busy".
397 */
398 dst_pnum = (ppnum_t)
399 upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
400 assert(dst_pnum != 0);
401 retval = pmap_enter(kernel_pmap,
402 kernel_mapping,
403 dst_pnum,
404 VM_PROT_READ | VM_PROT_WRITE,
405 VM_PROT_NONE,
406 0,
407 TRUE);
408
409 assert(retval == KERN_SUCCESS);
410
411 if (retval != KERN_SUCCESS) {
412 goto done;
413 }
414
415 memset(dst_ptr, '\0', PAGE_SIZE);
416 /* add an end-of-line to keep line counters happy */
417 dst_ptr[PAGE_SIZE - 1] = '\n';
418
419 /*
420 * Remove the pmap mapping of the destination page
421 * in the kernel.
422 */
423 pmap_remove(kernel_pmap,
424 (addr64_t) kernel_mapping,
425 (addr64_t) (kernel_mapping + PAGE_SIZE_64));
426 }
427
428 retval = KERN_SUCCESS;
429 done:
430 if (upl != NULL) {
431 /* clean up the UPL */
432
433 /*
434 * The pages are currently dirty because we've just been
435 * writing on them, but as far as we're concerned, they're
436 * clean since they contain their "original" contents as
437 * provided by us, the pager.
438 * Tell the UPL to mark them "clean".
439 */
440 upl_clear_dirty(upl, TRUE);
441
442 /* abort or commit the UPL */
443 if (retval != KERN_SUCCESS) {
444 upl_abort(upl, 0);
445 } else {
446 boolean_t empty;
447 assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
448 "upl %p offset 0x%llx size 0x%x",
449 upl, upl->u_offset, upl->u_size);
450 upl_commit_range(upl, 0, upl->u_size,
451 UPL_COMMIT_CS_VALIDATED,
452 upl_pl, pl_count, &empty);
453 }
454
455 /* and deallocate the UPL */
456 upl_deallocate(upl);
457 upl = NULL;
458 }
459 if (kernel_mapping != 0) {
460 /* clean up the mapping of the source and destination pages */
461 kr = vm_map_remove(kernel_map,
462 kernel_mapping,
463 kernel_mapping + PAGE_SIZE_64,
464 VM_MAP_REMOVE_NO_FLAGS);
465 assert(kr == KERN_SUCCESS);
466 kernel_mapping = 0;
467 dst_vaddr = 0;
468 }
469
470 return retval;
471 }
472
473 /*
474 * swapfile_pager_reference()
475 *
476 * Get a reference on this memory object.
477 * For external usage only. Assumes that the initial reference count is not 0,
478 * i.e one should not "revive" a dead pager this way.
479 */
480 void
481 swapfile_pager_reference(
482 memory_object_t mem_obj)
483 {
484 swapfile_pager_t pager;
485
486 pager = swapfile_pager_lookup(mem_obj);
487
488 lck_mtx_lock(&swapfile_pager_lock);
489 os_ref_retain_locked_raw(&pager->swp_pgr_hdr_ref, NULL);
490 lck_mtx_unlock(&swapfile_pager_lock);
491 }
492
493
494 /*
495 * swapfile_pager_dequeue:
496 *
497 * Removes a pager from the list of pagers.
498 *
499 * The caller must hold "swapfile_pager_lock".
500 */
501 void
502 swapfile_pager_dequeue(
503 swapfile_pager_t pager)
504 {
505 assert(!pager->is_mapped);
506
507 queue_remove(&swapfile_pager_queue,
508 pager,
509 swapfile_pager_t,
510 pager_queue);
511 pager->pager_queue.next = NULL;
512 pager->pager_queue.prev = NULL;
513
514 swapfile_pager_count--;
515 }
516
517 /*
518 * swapfile_pager_terminate_internal:
519 *
520 * Trigger the asynchronous termination of the memory object associated
521 * with this pager.
522 * When the memory object is terminated, there will be one more call
523 * to memory_object_deallocate() (i.e. swapfile_pager_deallocate())
524 * to finish the clean up.
525 *
526 * "swapfile_pager_lock" should not be held by the caller.
527 * We don't need the lock because the pager has already been removed from
528 * the pagers' list and is now ours exclusively.
529 */
530 void
531 swapfile_pager_terminate_internal(
532 swapfile_pager_t pager)
533 {
534 assert(pager->is_ready);
535 assert(!pager->is_mapped);
536
537 if (pager->swapfile_vnode != NULL) {
538 pager->swapfile_vnode = NULL;
539 }
540
541 /* trigger the destruction of the memory object */
542 memory_object_destroy(pager->swp_pgr_hdr.mo_control, 0);
543 }
544
545 /*
546 * swapfile_pager_deallocate_internal()
547 *
548 * Release a reference on this pager and free it when the last
549 * reference goes away.
550 * Can be called with swapfile_pager_lock held or not but always returns
551 * with it unlocked.
552 */
553 void
554 swapfile_pager_deallocate_internal(
555 swapfile_pager_t pager,
556 boolean_t locked)
557 {
558 os_ref_count_t ref_count;
559
560 if (!locked) {
561 lck_mtx_lock(&swapfile_pager_lock);
562 }
563
564 /* drop a reference on this pager */
565 ref_count = os_ref_release_locked_raw(&pager->swp_pgr_hdr_ref, NULL);
566
567 if (ref_count == 1) {
568 /*
569 * Only the "named" reference is left, which means that
570 * no one is really holding on to this pager anymore.
571 * Terminate it.
572 */
573 swapfile_pager_dequeue(pager);
574 /* the pager is all ours: no need for the lock now */
575 lck_mtx_unlock(&swapfile_pager_lock);
576 swapfile_pager_terminate_internal(pager);
577 } else if (ref_count == 0) {
578 /*
579 * Dropped the existence reference; the memory object has
580 * been terminated. Do some final cleanup and release the
581 * pager structure.
582 */
583 lck_mtx_unlock(&swapfile_pager_lock);
584 if (pager->swp_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
585 memory_object_control_deallocate(pager->swp_pgr_hdr.mo_control);
586 pager->swp_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
587 }
588 kfree(pager, sizeof(*pager));
589 pager = SWAPFILE_PAGER_NULL;
590 } else {
591 /* there are still plenty of references: keep going... */
592 lck_mtx_unlock(&swapfile_pager_lock);
593 }
594
595 /* caution: lock is not held on return... */
596 }
597
598 /*
599 * swapfile_pager_deallocate()
600 *
601 * Release a reference on this pager and free it when the last
602 * reference goes away.
603 */
604 void
605 swapfile_pager_deallocate(
606 memory_object_t mem_obj)
607 {
608 swapfile_pager_t pager;
609
610 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_deallocate: %p\n", mem_obj));
611 pager = swapfile_pager_lookup(mem_obj);
612 swapfile_pager_deallocate_internal(pager, FALSE);
613 }
614
615 /*
616 *
617 */
618 kern_return_t
619 swapfile_pager_terminate(
620 #if !DEBUG
621 __unused
622 #endif
623 memory_object_t mem_obj)
624 {
625 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_terminate: %p\n", mem_obj));
626
627 return KERN_SUCCESS;
628 }
629
630 /*
631 *
632 */
633 kern_return_t
634 swapfile_pager_synchronize(
635 __unused memory_object_t mem_obbj,
636 __unused memory_object_offset_t offset,
637 __unused memory_object_size_t length,
638 __unused vm_sync_t sync_flags)
639 {
640 panic("swapfile_pager_synchronize: memory_object_synchronize no longer supported\n");
641 return KERN_FAILURE;
642 }
643
644 /*
645 * swapfile_pager_map()
646 *
647 * This allows VM to let us, the EMM, know that this memory object
648 * is currently mapped one or more times. This is called by VM each time
649 * the memory object gets mapped and we take one extra reference on the
650 * memory object to account for all its mappings.
651 */
652 kern_return_t
653 swapfile_pager_map(
654 memory_object_t mem_obj,
655 __unused vm_prot_t prot)
656 {
657 swapfile_pager_t pager;
658
659 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_map: %p\n", mem_obj));
660
661 pager = swapfile_pager_lookup(mem_obj);
662
663 lck_mtx_lock(&swapfile_pager_lock);
664 assert(pager->is_ready);
665 assert(os_ref_get_count_raw(&pager->swp_pgr_hdr_ref) > 0); /* pager is alive */
666 if (pager->is_mapped == FALSE) {
667 /*
668 * First mapping of this pager: take an extra reference
669 * that will remain until all the mappings of this pager
670 * are removed.
671 */
672 pager->is_mapped = TRUE;
673 os_ref_retain_locked_raw(&pager->swp_pgr_hdr_ref, NULL);
674 }
675 lck_mtx_unlock(&swapfile_pager_lock);
676
677 return KERN_SUCCESS;
678 }
679
680 /*
681 * swapfile_pager_last_unmap()
682 *
683 * This is called by VM when this memory object is no longer mapped anywhere.
684 */
685 kern_return_t
686 swapfile_pager_last_unmap(
687 memory_object_t mem_obj)
688 {
689 swapfile_pager_t pager;
690
691 PAGER_DEBUG(PAGER_ALL,
692 ("swapfile_pager_last_unmap: %p\n", mem_obj));
693
694 pager = swapfile_pager_lookup(mem_obj);
695
696 lck_mtx_lock(&swapfile_pager_lock);
697 if (pager->is_mapped) {
698 /*
699 * All the mappings are gone, so let go of the one extra
700 * reference that represents all the mappings of this pager.
701 */
702 pager->is_mapped = FALSE;
703 swapfile_pager_deallocate_internal(pager, TRUE);
704 /* caution: deallocate_internal() released the lock ! */
705 } else {
706 lck_mtx_unlock(&swapfile_pager_lock);
707 }
708
709 return KERN_SUCCESS;
710 }
711
712
713 /*
714 *
715 */
716 swapfile_pager_t
717 swapfile_pager_lookup(
718 memory_object_t mem_obj)
719 {
720 swapfile_pager_t pager;
721
722 assert(mem_obj->mo_pager_ops == &swapfile_pager_ops);
723 __IGNORE_WCASTALIGN(pager = (swapfile_pager_t) mem_obj);
724 assert(os_ref_get_count_raw(&pager->swp_pgr_hdr_ref) > 0);
725 return pager;
726 }
727
728 swapfile_pager_t
729 swapfile_pager_create(
730 struct vnode *vp)
731 {
732 swapfile_pager_t pager, pager2;
733 memory_object_control_t control;
734 kern_return_t kr;
735
736 pager = (swapfile_pager_t) kalloc(sizeof(*pager));
737 if (pager == SWAPFILE_PAGER_NULL) {
738 return SWAPFILE_PAGER_NULL;
739 }
740
741 /*
742 * The vm_map call takes both named entry ports and raw memory
743 * objects in the same parameter. We need to make sure that
744 * vm_map does not see this object as a named entry port. So,
745 * we reserve the second word in the object for a fake ip_kotype
746 * setting - that will tell vm_map to use it as a memory object.
747 */
748 pager->swp_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
749 pager->swp_pgr_hdr.mo_pager_ops = &swapfile_pager_ops;
750 pager->swp_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
751
752 pager->is_ready = FALSE;/* not ready until it has a "name" */
753 os_ref_init_raw(&pager->swp_pgr_hdr_ref, NULL); /* setup reference */
754 pager->is_mapped = FALSE;
755 pager->swapfile_vnode = vp;
756
757 lck_mtx_lock(&swapfile_pager_lock);
758 /* see if anyone raced us to create a pager for the same object */
759 queue_iterate(&swapfile_pager_queue,
760 pager2,
761 swapfile_pager_t,
762 pager_queue) {
763 if (pager2->swapfile_vnode == vp) {
764 break;
765 }
766 }
767 if (!queue_end(&swapfile_pager_queue,
768 (queue_entry_t) pager2)) {
769 /* while we hold the lock, transfer our setup ref to winner */
770 os_ref_retain_locked_raw(&pager2->swp_pgr_hdr_ref, NULL);
771 /* we lost the race, down with the loser... */
772 lck_mtx_unlock(&swapfile_pager_lock);
773 pager->swapfile_vnode = NULL;
774 kfree(pager, sizeof(*pager));
775 /* ... and go with the winner */
776 pager = pager2;
777 /* let the winner make sure the pager gets ready */
778 return pager;
779 }
780
781 /* enter new pager at the head of our list of pagers */
782 queue_enter_first(&swapfile_pager_queue,
783 pager,
784 swapfile_pager_t,
785 pager_queue);
786 swapfile_pager_count++;
787 if (swapfile_pager_count > swapfile_pager_count_max) {
788 swapfile_pager_count_max = swapfile_pager_count;
789 }
790 lck_mtx_unlock(&swapfile_pager_lock);
791
792 kr = memory_object_create_named((memory_object_t) pager,
793 0,
794 &control);
795 assert(kr == KERN_SUCCESS);
796
797 memory_object_mark_trusted(control);
798
799 lck_mtx_lock(&swapfile_pager_lock);
800 /* the new pager is now ready to be used */
801 pager->is_ready = TRUE;
802 lck_mtx_unlock(&swapfile_pager_lock);
803
804 /* wakeup anyone waiting for this pager to be ready */
805 thread_wakeup(&pager->is_ready);
806
807 return pager;
808 }
809
810 /*
811 * swapfile_pager_setup()
812 *
813 * Provide the caller with a memory object backed by the provided
814 * "backing_object" VM object. If such a memory object already exists,
815 * re-use it, otherwise create a new memory object.
816 */
817 memory_object_t
818 swapfile_pager_setup(
819 struct vnode *vp)
820 {
821 swapfile_pager_t pager;
822
823 lck_mtx_lock(&swapfile_pager_lock);
824
825 queue_iterate(&swapfile_pager_queue,
826 pager,
827 swapfile_pager_t,
828 pager_queue) {
829 if (pager->swapfile_vnode == vp) {
830 break;
831 }
832 }
833 if (queue_end(&swapfile_pager_queue,
834 (queue_entry_t) pager)) {
835 /* no existing pager for this backing object */
836 pager = SWAPFILE_PAGER_NULL;
837 } else {
838 /* make sure pager doesn't disappear */
839 os_ref_retain_raw(&pager->swp_pgr_hdr_ref, NULL);
840 }
841
842 lck_mtx_unlock(&swapfile_pager_lock);
843
844 if (pager == SWAPFILE_PAGER_NULL) {
845 pager = swapfile_pager_create(vp);
846 if (pager == SWAPFILE_PAGER_NULL) {
847 return MEMORY_OBJECT_NULL;
848 }
849 }
850
851 lck_mtx_lock(&swapfile_pager_lock);
852 while (!pager->is_ready) {
853 lck_mtx_sleep(&swapfile_pager_lock,
854 LCK_SLEEP_DEFAULT,
855 &pager->is_ready,
856 THREAD_UNINT);
857 }
858 lck_mtx_unlock(&swapfile_pager_lock);
859
860 return (memory_object_t) pager;
861 }
862
863 memory_object_control_t
864 swapfile_pager_control(
865 memory_object_t mem_obj)
866 {
867 swapfile_pager_t pager;
868
869 if (mem_obj == MEMORY_OBJECT_NULL ||
870 mem_obj->mo_pager_ops != &swapfile_pager_ops) {
871 return MEMORY_OBJECT_CONTROL_NULL;
872 }
873 pager = swapfile_pager_lookup(mem_obj);
874 return pager->swp_pgr_hdr.mo_control;
875 }