2 * Copyright (c) 2008-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/kern_return.h>
30 #include <mach/memory_object_control.h>
33 #include <kern/ipc_kobject.h>
34 #include <kern/kalloc.h>
35 #include <kern/queue.h>
37 #include <vm/memory_object.h>
38 #include <vm/vm_kern.h>
39 #include <vm/vm_map.h>
40 #include <vm/vm_pageout.h>
41 #include <vm/vm_protos.h>
45 * APPLE SWAPFILE MEMORY PAGER
47 * This external memory manager (EMM) handles mappings of the swap files.
48 * Swap files are not regular files and are used solely to store contents of
49 * anonymous memory mappings while not resident in memory.
50 * There's no valid reason to map a swap file. This just puts extra burden
51 * on the system, is potentially a security issue and is not reliable since
52 * the contents can change at any time with pageout operations.
53 * Here are some of the issues with mapping a swap file.
55 * Each page in the swap file belong to an anonymous memory object. Mapping
56 * the swap file makes those pages also accessible via a vnode memory
57 * object and each page can now be resident twice.
59 * Mapping a swap file allows access to other processes' memory. Swap files
60 * are only accessible by the "root" super-user, who can already access any
61 * process's memory, so this is not a real issue but if permissions on the
62 * swap file got changed, it could become one.
63 * Swap files are not "zero-filled" on creation, so until their contents are
64 * overwritten with pageout operations, they still contain whatever was on
65 * the disk blocks they were allocated. The "super-user" could see the
66 * contents of free blocks anyway, so this is not a new security issue but
67 * it may be perceive as one.
69 * We can't legitimately prevent a user process with appropriate privileges
70 * from mapping a swap file, but we can prevent it from accessing its actual
72 * This pager mostly handles page-in request (from memory_object_data_request())
73 * for swap file mappings and just returns bogus data.
74 * Pageouts are not handled, so mmap() has to make sure it does not allow
75 * writable (i.e. MAP_SHARED and PROT_WRITE) mappings of swap files.
78 /* forward declarations */
79 void swapfile_pager_reference(memory_object_t mem_obj
);
80 void swapfile_pager_deallocate(memory_object_t mem_obj
);
81 kern_return_t
swapfile_pager_init(memory_object_t mem_obj
,
82 memory_object_control_t control
,
83 memory_object_cluster_size_t pg_size
);
84 kern_return_t
swapfile_pager_terminate(memory_object_t mem_obj
);
85 kern_return_t
swapfile_pager_data_request(memory_object_t mem_obj
,
86 memory_object_offset_t offset
,
87 memory_object_cluster_size_t length
,
88 vm_prot_t protection_required
,
89 memory_object_fault_info_t fault_info
);
90 kern_return_t
swapfile_pager_data_return(memory_object_t mem_obj
,
91 memory_object_offset_t offset
,
92 memory_object_cluster_size_t data_cnt
,
93 memory_object_offset_t
*resid_offset
,
96 boolean_t kernel_copy
,
98 kern_return_t
swapfile_pager_data_initialize(memory_object_t mem_obj
,
99 memory_object_offset_t offset
,
100 memory_object_cluster_size_t data_cnt
);
101 kern_return_t
swapfile_pager_data_unlock(memory_object_t mem_obj
,
102 memory_object_offset_t offset
,
103 memory_object_size_t size
,
104 vm_prot_t desired_access
);
105 kern_return_t
swapfile_pager_synchronize(memory_object_t mem_obj
,
106 memory_object_offset_t offset
,
107 memory_object_size_t length
,
108 vm_sync_t sync_flags
);
109 kern_return_t
swapfile_pager_map(memory_object_t mem_obj
,
111 kern_return_t
swapfile_pager_last_unmap(memory_object_t mem_obj
);
114 * Vector of VM operations for this EMM.
115 * These routines are invoked by VM via the memory_object_*() interfaces.
117 const struct memory_object_pager_ops swapfile_pager_ops
= {
118 .memory_object_reference
= swapfile_pager_reference
,
119 .memory_object_deallocate
= swapfile_pager_deallocate
,
120 .memory_object_init
= swapfile_pager_init
,
121 .memory_object_terminate
= swapfile_pager_terminate
,
122 .memory_object_data_request
= swapfile_pager_data_request
,
123 .memory_object_data_return
= swapfile_pager_data_return
,
124 .memory_object_data_initialize
= swapfile_pager_data_initialize
,
125 .memory_object_data_unlock
= swapfile_pager_data_unlock
,
126 .memory_object_synchronize
= swapfile_pager_synchronize
,
127 .memory_object_map
= swapfile_pager_map
,
128 .memory_object_last_unmap
= swapfile_pager_last_unmap
,
129 .memory_object_data_reclaim
= NULL
,
130 .memory_object_backing_object
= NULL
,
131 .memory_object_pager_name
= "swapfile pager"
135 * The "swapfile_pager" describes a memory object backed by
136 * the "swapfile" EMM.
138 typedef struct swapfile_pager
{
139 /* mandatory generic header */
140 struct memory_object swp_pgr_hdr
;
142 /* pager-specific data */
143 queue_chain_t pager_queue
; /* next & prev pagers */
144 #if MEMORY_OBJECT_HAS_REFCOUNT
145 #define swp_pgr_hdr_ref swp_pgr_hdr.mo_ref
147 os_ref_atomic_t swp_pgr_hdr_ref
; /* reference count */
149 bool is_ready
; /* is this pager ready ? */
150 bool is_mapped
; /* is this pager mapped ? */
151 struct vnode
*swapfile_vnode
;/* the swapfile's vnode */
153 #define SWAPFILE_PAGER_NULL ((swapfile_pager_t) NULL)
156 * List of memory objects managed by this EMM.
157 * The list is protected by the "swapfile_pager_lock" lock.
159 int swapfile_pager_count
= 0; /* number of pagers */
160 queue_head_t swapfile_pager_queue
= QUEUE_HEAD_INITIALIZER(swapfile_pager_queue
);
161 LCK_GRP_DECLARE(swapfile_pager_lck_grp
, "swapfile pager");
162 LCK_MTX_DECLARE(swapfile_pager_lock
, &swapfile_pager_lck_grp
);
165 * Statistics & counters.
167 int swapfile_pager_count_max
= 0;
169 /* internal prototypes */
170 swapfile_pager_t
swapfile_pager_create(struct vnode
*vp
);
171 swapfile_pager_t
swapfile_pager_lookup(memory_object_t mem_obj
);
172 void swapfile_pager_dequeue(swapfile_pager_t pager
);
173 void swapfile_pager_deallocate_internal(swapfile_pager_t pager
,
175 void swapfile_pager_terminate_internal(swapfile_pager_t pager
);
179 int swapfile_pagerdebug
= 0;
180 #define PAGER_ALL 0xffffffff
181 #define PAGER_INIT 0x00000001
182 #define PAGER_PAGEIN 0x00000002
184 #define PAGER_DEBUG(LEVEL, A) \
186 if ((swapfile_pagerdebug & LEVEL)==LEVEL) { \
191 #define PAGER_DEBUG(LEVEL, A)
196 * swapfile_pager_init()
198 * Initialize the memory object and makes it ready to be used and mapped.
202 memory_object_t mem_obj
,
203 memory_object_control_t control
,
207 memory_object_cluster_size_t pg_size
)
209 swapfile_pager_t pager
;
211 memory_object_attr_info_data_t attributes
;
213 PAGER_DEBUG(PAGER_ALL
,
214 ("swapfile_pager_init: %p, %p, %x\n",
215 mem_obj
, control
, pg_size
));
217 if (control
== MEMORY_OBJECT_CONTROL_NULL
) {
218 return KERN_INVALID_ARGUMENT
;
221 pager
= swapfile_pager_lookup(mem_obj
);
223 memory_object_control_reference(control
);
225 pager
->swp_pgr_hdr
.mo_control
= control
;
227 attributes
.copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
228 attributes
.cluster_size
= (1 << (PAGE_SHIFT
));
229 attributes
.may_cache_object
= FALSE
;
230 attributes
.temporary
= TRUE
;
232 kr
= memory_object_change_attributes(
234 MEMORY_OBJECT_ATTRIBUTE_INFO
,
235 (memory_object_info_t
) &attributes
,
236 MEMORY_OBJECT_ATTR_INFO_COUNT
);
237 if (kr
!= KERN_SUCCESS
) {
238 panic("swapfile_pager_init: "
239 "memory_object_change_attributes() failed");
246 * swapfile_data_return()
248 * Handles page-out requests from VM. This should never happen since
249 * the pages provided by this EMM are not supposed to be dirty or dirtied
250 * and VM should simply discard the contents and reclaim the pages if it
254 swapfile_pager_data_return(
255 __unused memory_object_t mem_obj
,
256 __unused memory_object_offset_t offset
,
257 __unused memory_object_cluster_size_t data_cnt
,
258 __unused memory_object_offset_t
*resid_offset
,
259 __unused
int *io_error
,
260 __unused boolean_t dirty
,
261 __unused boolean_t kernel_copy
,
262 __unused
int upl_flags
)
264 panic("swapfile_pager_data_return: should never get called");
269 swapfile_pager_data_initialize(
270 __unused memory_object_t mem_obj
,
271 __unused memory_object_offset_t offset
,
272 __unused memory_object_cluster_size_t data_cnt
)
274 panic("swapfile_pager_data_initialize: should never get called");
279 swapfile_pager_data_unlock(
280 __unused memory_object_t mem_obj
,
281 __unused memory_object_offset_t offset
,
282 __unused memory_object_size_t size
,
283 __unused vm_prot_t desired_access
)
289 * swapfile_pager_data_request()
291 * Handles page-in requests from VM.
294 swapfile_pager_data_request(
295 memory_object_t mem_obj
,
296 memory_object_offset_t offset
,
297 memory_object_cluster_size_t length
,
301 vm_prot_t protection_required
,
302 __unused memory_object_fault_info_t mo_fault_info
)
304 swapfile_pager_t pager
;
305 memory_object_control_t mo_control
;
309 upl_page_info_t
*upl_pl
= NULL
;
310 unsigned int pl_count
;
311 vm_object_t dst_object
;
312 kern_return_t kr
, retval
;
313 vm_map_offset_t kernel_mapping
;
314 vm_offset_t dst_vaddr
;
316 vm_offset_t cur_offset
;
317 vm_map_entry_t map_entry
;
319 PAGER_DEBUG(PAGER_ALL
, ("swapfile_pager_data_request: %p, %llx, %x, %x\n", mem_obj
, offset
, length
, protection_required
));
325 pager
= swapfile_pager_lookup(mem_obj
);
326 assert(pager
->is_ready
);
327 assert(os_ref_get_count_raw(&pager
->swp_pgr_hdr_ref
) > 1); /* pager is alive and mapped */
329 PAGER_DEBUG(PAGER_PAGEIN
, ("swapfile_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj
, offset
, length
, protection_required
, pager
));
332 * Gather in a UPL all the VM pages requested by VM.
334 mo_control
= pager
->swp_pgr_hdr
.mo_control
;
338 UPL_RET_ONLY_ABSENT
|
341 UPL_CLEAN_IN_PLACE
| /* triggers UPL_CLEAR_DIRTY */
344 kr
= memory_object_upl_request(mo_control
,
346 &upl
, NULL
, NULL
, upl_flags
, VM_KERN_MEMORY_OSFMK
);
347 if (kr
!= KERN_SUCCESS
) {
351 dst_object
= memory_object_control_to_vm_object(mo_control
);
352 assert(dst_object
!= VM_OBJECT_NULL
);
356 * Reserve a virtual page in the kernel address space to map each
357 * destination physical page when it's its turn to be processed.
359 vm_object_reference(kernel_object
); /* ref. for mapping */
360 kr
= vm_map_find_space(kernel_map
,
365 VM_MAP_KERNEL_FLAGS_NONE
,
368 if (kr
!= KERN_SUCCESS
) {
369 vm_object_deallocate(kernel_object
);
373 VME_OBJECT_SET(map_entry
, kernel_object
);
374 VME_OFFSET_SET(map_entry
, kernel_mapping
- VM_MIN_KERNEL_ADDRESS
);
375 vm_map_unlock(kernel_map
);
376 dst_vaddr
= CAST_DOWN(vm_offset_t
, kernel_mapping
);
377 dst_ptr
= (char *) dst_vaddr
;
380 * Fill in the contents of the pages requested by VM.
382 upl_pl
= UPL_GET_INTERNAL_PAGE_LIST(upl
);
383 pl_count
= length
/ PAGE_SIZE
;
384 for (cur_offset
= 0; cur_offset
< length
; cur_offset
+= PAGE_SIZE
) {
387 if (!upl_page_present(upl_pl
, (int)(cur_offset
/ PAGE_SIZE
))) {
388 /* this page is not in the UPL: skip it */
393 * Establish an explicit pmap mapping of the destination
395 * We can't do a regular VM mapping because the VM page
399 upl_phys_page(upl_pl
, (int)(cur_offset
/ PAGE_SIZE
));
400 assert(dst_pnum
!= 0);
401 retval
= pmap_enter(kernel_pmap
,
404 VM_PROT_READ
| VM_PROT_WRITE
,
409 assert(retval
== KERN_SUCCESS
);
411 if (retval
!= KERN_SUCCESS
) {
415 memset(dst_ptr
, '\0', PAGE_SIZE
);
416 /* add an end-of-line to keep line counters happy */
417 dst_ptr
[PAGE_SIZE
- 1] = '\n';
420 * Remove the pmap mapping of the destination page
423 pmap_remove(kernel_pmap
,
424 (addr64_t
) kernel_mapping
,
425 (addr64_t
) (kernel_mapping
+ PAGE_SIZE_64
));
428 retval
= KERN_SUCCESS
;
431 /* clean up the UPL */
434 * The pages are currently dirty because we've just been
435 * writing on them, but as far as we're concerned, they're
436 * clean since they contain their "original" contents as
437 * provided by us, the pager.
438 * Tell the UPL to mark them "clean".
440 upl_clear_dirty(upl
, TRUE
);
442 /* abort or commit the UPL */
443 if (retval
!= KERN_SUCCESS
) {
447 assertf(page_aligned(upl
->u_offset
) && page_aligned(upl
->u_size
),
448 "upl %p offset 0x%llx size 0x%x",
449 upl
, upl
->u_offset
, upl
->u_size
);
450 upl_commit_range(upl
, 0, upl
->u_size
,
451 UPL_COMMIT_CS_VALIDATED
,
452 upl_pl
, pl_count
, &empty
);
455 /* and deallocate the UPL */
459 if (kernel_mapping
!= 0) {
460 /* clean up the mapping of the source and destination pages */
461 kr
= vm_map_remove(kernel_map
,
463 kernel_mapping
+ PAGE_SIZE_64
,
464 VM_MAP_REMOVE_NO_FLAGS
);
465 assert(kr
== KERN_SUCCESS
);
474 * swapfile_pager_reference()
476 * Get a reference on this memory object.
477 * For external usage only. Assumes that the initial reference count is not 0,
478 * i.e one should not "revive" a dead pager this way.
481 swapfile_pager_reference(
482 memory_object_t mem_obj
)
484 swapfile_pager_t pager
;
486 pager
= swapfile_pager_lookup(mem_obj
);
488 lck_mtx_lock(&swapfile_pager_lock
);
489 os_ref_retain_locked_raw(&pager
->swp_pgr_hdr_ref
, NULL
);
490 lck_mtx_unlock(&swapfile_pager_lock
);
495 * swapfile_pager_dequeue:
497 * Removes a pager from the list of pagers.
499 * The caller must hold "swapfile_pager_lock".
502 swapfile_pager_dequeue(
503 swapfile_pager_t pager
)
505 assert(!pager
->is_mapped
);
507 queue_remove(&swapfile_pager_queue
,
511 pager
->pager_queue
.next
= NULL
;
512 pager
->pager_queue
.prev
= NULL
;
514 swapfile_pager_count
--;
518 * swapfile_pager_terminate_internal:
520 * Trigger the asynchronous termination of the memory object associated
522 * When the memory object is terminated, there will be one more call
523 * to memory_object_deallocate() (i.e. swapfile_pager_deallocate())
524 * to finish the clean up.
526 * "swapfile_pager_lock" should not be held by the caller.
527 * We don't need the lock because the pager has already been removed from
528 * the pagers' list and is now ours exclusively.
531 swapfile_pager_terminate_internal(
532 swapfile_pager_t pager
)
534 assert(pager
->is_ready
);
535 assert(!pager
->is_mapped
);
537 if (pager
->swapfile_vnode
!= NULL
) {
538 pager
->swapfile_vnode
= NULL
;
541 /* trigger the destruction of the memory object */
542 memory_object_destroy(pager
->swp_pgr_hdr
.mo_control
, 0);
546 * swapfile_pager_deallocate_internal()
548 * Release a reference on this pager and free it when the last
549 * reference goes away.
550 * Can be called with swapfile_pager_lock held or not but always returns
554 swapfile_pager_deallocate_internal(
555 swapfile_pager_t pager
,
558 os_ref_count_t ref_count
;
561 lck_mtx_lock(&swapfile_pager_lock
);
564 /* drop a reference on this pager */
565 ref_count
= os_ref_release_locked_raw(&pager
->swp_pgr_hdr_ref
, NULL
);
567 if (ref_count
== 1) {
569 * Only the "named" reference is left, which means that
570 * no one is really holding on to this pager anymore.
573 swapfile_pager_dequeue(pager
);
574 /* the pager is all ours: no need for the lock now */
575 lck_mtx_unlock(&swapfile_pager_lock
);
576 swapfile_pager_terminate_internal(pager
);
577 } else if (ref_count
== 0) {
579 * Dropped the existence reference; the memory object has
580 * been terminated. Do some final cleanup and release the
583 lck_mtx_unlock(&swapfile_pager_lock
);
584 if (pager
->swp_pgr_hdr
.mo_control
!= MEMORY_OBJECT_CONTROL_NULL
) {
585 memory_object_control_deallocate(pager
->swp_pgr_hdr
.mo_control
);
586 pager
->swp_pgr_hdr
.mo_control
= MEMORY_OBJECT_CONTROL_NULL
;
588 kfree(pager
, sizeof(*pager
));
589 pager
= SWAPFILE_PAGER_NULL
;
591 /* there are still plenty of references: keep going... */
592 lck_mtx_unlock(&swapfile_pager_lock
);
595 /* caution: lock is not held on return... */
599 * swapfile_pager_deallocate()
601 * Release a reference on this pager and free it when the last
602 * reference goes away.
605 swapfile_pager_deallocate(
606 memory_object_t mem_obj
)
608 swapfile_pager_t pager
;
610 PAGER_DEBUG(PAGER_ALL
, ("swapfile_pager_deallocate: %p\n", mem_obj
));
611 pager
= swapfile_pager_lookup(mem_obj
);
612 swapfile_pager_deallocate_internal(pager
, FALSE
);
619 swapfile_pager_terminate(
623 memory_object_t mem_obj
)
625 PAGER_DEBUG(PAGER_ALL
, ("swapfile_pager_terminate: %p\n", mem_obj
));
634 swapfile_pager_synchronize(
635 __unused memory_object_t mem_obbj
,
636 __unused memory_object_offset_t offset
,
637 __unused memory_object_size_t length
,
638 __unused vm_sync_t sync_flags
)
640 panic("swapfile_pager_synchronize: memory_object_synchronize no longer supported\n");
645 * swapfile_pager_map()
647 * This allows VM to let us, the EMM, know that this memory object
648 * is currently mapped one or more times. This is called by VM each time
649 * the memory object gets mapped and we take one extra reference on the
650 * memory object to account for all its mappings.
654 memory_object_t mem_obj
,
655 __unused vm_prot_t prot
)
657 swapfile_pager_t pager
;
659 PAGER_DEBUG(PAGER_ALL
, ("swapfile_pager_map: %p\n", mem_obj
));
661 pager
= swapfile_pager_lookup(mem_obj
);
663 lck_mtx_lock(&swapfile_pager_lock
);
664 assert(pager
->is_ready
);
665 assert(os_ref_get_count_raw(&pager
->swp_pgr_hdr_ref
) > 0); /* pager is alive */
666 if (pager
->is_mapped
== FALSE
) {
668 * First mapping of this pager: take an extra reference
669 * that will remain until all the mappings of this pager
672 pager
->is_mapped
= TRUE
;
673 os_ref_retain_locked_raw(&pager
->swp_pgr_hdr_ref
, NULL
);
675 lck_mtx_unlock(&swapfile_pager_lock
);
681 * swapfile_pager_last_unmap()
683 * This is called by VM when this memory object is no longer mapped anywhere.
686 swapfile_pager_last_unmap(
687 memory_object_t mem_obj
)
689 swapfile_pager_t pager
;
691 PAGER_DEBUG(PAGER_ALL
,
692 ("swapfile_pager_last_unmap: %p\n", mem_obj
));
694 pager
= swapfile_pager_lookup(mem_obj
);
696 lck_mtx_lock(&swapfile_pager_lock
);
697 if (pager
->is_mapped
) {
699 * All the mappings are gone, so let go of the one extra
700 * reference that represents all the mappings of this pager.
702 pager
->is_mapped
= FALSE
;
703 swapfile_pager_deallocate_internal(pager
, TRUE
);
704 /* caution: deallocate_internal() released the lock ! */
706 lck_mtx_unlock(&swapfile_pager_lock
);
717 swapfile_pager_lookup(
718 memory_object_t mem_obj
)
720 swapfile_pager_t pager
;
722 assert(mem_obj
->mo_pager_ops
== &swapfile_pager_ops
);
723 __IGNORE_WCASTALIGN(pager
= (swapfile_pager_t
) mem_obj
);
724 assert(os_ref_get_count_raw(&pager
->swp_pgr_hdr_ref
) > 0);
729 swapfile_pager_create(
732 swapfile_pager_t pager
, pager2
;
733 memory_object_control_t control
;
736 pager
= (swapfile_pager_t
) kalloc(sizeof(*pager
));
737 if (pager
== SWAPFILE_PAGER_NULL
) {
738 return SWAPFILE_PAGER_NULL
;
742 * The vm_map call takes both named entry ports and raw memory
743 * objects in the same parameter. We need to make sure that
744 * vm_map does not see this object as a named entry port. So,
745 * we reserve the second word in the object for a fake ip_kotype
746 * setting - that will tell vm_map to use it as a memory object.
748 pager
->swp_pgr_hdr
.mo_ikot
= IKOT_MEMORY_OBJECT
;
749 pager
->swp_pgr_hdr
.mo_pager_ops
= &swapfile_pager_ops
;
750 pager
->swp_pgr_hdr
.mo_control
= MEMORY_OBJECT_CONTROL_NULL
;
752 pager
->is_ready
= FALSE
;/* not ready until it has a "name" */
753 os_ref_init_raw(&pager
->swp_pgr_hdr_ref
, NULL
); /* setup reference */
754 pager
->is_mapped
= FALSE
;
755 pager
->swapfile_vnode
= vp
;
757 lck_mtx_lock(&swapfile_pager_lock
);
758 /* see if anyone raced us to create a pager for the same object */
759 queue_iterate(&swapfile_pager_queue
,
763 if (pager2
->swapfile_vnode
== vp
) {
767 if (!queue_end(&swapfile_pager_queue
,
768 (queue_entry_t
) pager2
)) {
769 /* while we hold the lock, transfer our setup ref to winner */
770 os_ref_retain_locked_raw(&pager2
->swp_pgr_hdr_ref
, NULL
);
771 /* we lost the race, down with the loser... */
772 lck_mtx_unlock(&swapfile_pager_lock
);
773 pager
->swapfile_vnode
= NULL
;
774 kfree(pager
, sizeof(*pager
));
775 /* ... and go with the winner */
777 /* let the winner make sure the pager gets ready */
781 /* enter new pager at the head of our list of pagers */
782 queue_enter_first(&swapfile_pager_queue
,
786 swapfile_pager_count
++;
787 if (swapfile_pager_count
> swapfile_pager_count_max
) {
788 swapfile_pager_count_max
= swapfile_pager_count
;
790 lck_mtx_unlock(&swapfile_pager_lock
);
792 kr
= memory_object_create_named((memory_object_t
) pager
,
795 assert(kr
== KERN_SUCCESS
);
797 memory_object_mark_trusted(control
);
799 lck_mtx_lock(&swapfile_pager_lock
);
800 /* the new pager is now ready to be used */
801 pager
->is_ready
= TRUE
;
802 lck_mtx_unlock(&swapfile_pager_lock
);
804 /* wakeup anyone waiting for this pager to be ready */
805 thread_wakeup(&pager
->is_ready
);
811 * swapfile_pager_setup()
813 * Provide the caller with a memory object backed by the provided
814 * "backing_object" VM object. If such a memory object already exists,
815 * re-use it, otherwise create a new memory object.
818 swapfile_pager_setup(
821 swapfile_pager_t pager
;
823 lck_mtx_lock(&swapfile_pager_lock
);
825 queue_iterate(&swapfile_pager_queue
,
829 if (pager
->swapfile_vnode
== vp
) {
833 if (queue_end(&swapfile_pager_queue
,
834 (queue_entry_t
) pager
)) {
835 /* no existing pager for this backing object */
836 pager
= SWAPFILE_PAGER_NULL
;
838 /* make sure pager doesn't disappear */
839 os_ref_retain_raw(&pager
->swp_pgr_hdr_ref
, NULL
);
842 lck_mtx_unlock(&swapfile_pager_lock
);
844 if (pager
== SWAPFILE_PAGER_NULL
) {
845 pager
= swapfile_pager_create(vp
);
846 if (pager
== SWAPFILE_PAGER_NULL
) {
847 return MEMORY_OBJECT_NULL
;
851 lck_mtx_lock(&swapfile_pager_lock
);
852 while (!pager
->is_ready
) {
853 lck_mtx_sleep(&swapfile_pager_lock
,
858 lck_mtx_unlock(&swapfile_pager_lock
);
860 return (memory_object_t
) pager
;
863 memory_object_control_t
864 swapfile_pager_control(
865 memory_object_t mem_obj
)
867 swapfile_pager_t pager
;
869 if (mem_obj
== MEMORY_OBJECT_NULL
||
870 mem_obj
->mo_pager_ops
!= &swapfile_pager_ops
) {
871 return MEMORY_OBJECT_CONTROL_NULL
;
873 pager
= swapfile_pager_lookup(mem_obj
);
874 return pager
->swp_pgr_hdr
.mo_control
;