]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_swapfile_pager.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / osfmk / vm / vm_swapfile_pager.c
1 /*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/kern_return.h>
30 #include <mach/memory_object_control.h>
31 #include <mach/upl.h>
32
33 #include <kern/ipc_kobject.h>
34 #include <kern/kalloc.h>
35 #include <kern/queue.h>
36
37 #include <vm/memory_object.h>
38 #include <vm/vm_kern.h>
39 #include <vm/vm_map.h>
40 #include <vm/vm_pageout.h>
41 #include <vm/vm_protos.h>
42
43
44 /*
45 * APPLE SWAPFILE MEMORY PAGER
46 *
47 * This external memory manager (EMM) handles mappings of the swap files.
48 * Swap files are not regular files and are used solely to store contents of
49 * anonymous memory mappings while not resident in memory.
50 * There's no valid reason to map a swap file. This just puts extra burden
51 * on the system, is potentially a security issue and is not reliable since
52 * the contents can change at any time with pageout operations.
53 * Here are some of the issues with mapping a swap file.
54 * * PERFORMANCE:
55 * Each page in the swap file belong to an anonymous memory object. Mapping
56 * the swap file makes those pages also accessible via a vnode memory
57 * object and each page can now be resident twice.
58 * * SECURITY:
59 * Mapping a swap file allows access to other processes' memory. Swap files
60 * are only accessible by the "root" super-user, who can already access any
61 * process's memory, so this is not a real issue but if permissions on the
62 * swap file got changed, it could become one.
63 * Swap files are not "zero-filled" on creation, so until their contents are
64 * overwritten with pageout operations, they still contain whatever was on
65 * the disk blocks they were allocated. The "super-user" could see the
66 * contents of free blocks anyway, so this is not a new security issue but
67 * it may be perceive as one.
68 *
69 * We can't legitimately prevent a user process with appropriate privileges
70 * from mapping a swap file, but we can prevent it from accessing its actual
71 * contents.
72 * This pager mostly handles page-in request (from memory_object_data_request())
73 * for swap file mappings and just returns bogus data.
74 * Pageouts are not handled, so mmap() has to make sure it does not allow
75 * writable (i.e. MAP_SHARED and PROT_WRITE) mappings of swap files.
76 */
77
78 /* forward declarations */
79 void swapfile_pager_reference(memory_object_t mem_obj);
80 void swapfile_pager_deallocate(memory_object_t mem_obj);
81 kern_return_t swapfile_pager_init(memory_object_t mem_obj,
82 memory_object_control_t control,
83 memory_object_cluster_size_t pg_size);
84 kern_return_t swapfile_pager_terminate(memory_object_t mem_obj);
85 kern_return_t swapfile_pager_data_request(memory_object_t mem_obj,
86 memory_object_offset_t offset,
87 memory_object_cluster_size_t length,
88 vm_prot_t protection_required,
89 memory_object_fault_info_t fault_info);
90 kern_return_t swapfile_pager_data_return(memory_object_t mem_obj,
91 memory_object_offset_t offset,
92 memory_object_cluster_size_t data_cnt,
93 memory_object_offset_t *resid_offset,
94 int *io_error,
95 boolean_t dirty,
96 boolean_t kernel_copy,
97 int upl_flags);
98 kern_return_t swapfile_pager_data_initialize(memory_object_t mem_obj,
99 memory_object_offset_t offset,
100 memory_object_cluster_size_t data_cnt);
101 kern_return_t swapfile_pager_data_unlock(memory_object_t mem_obj,
102 memory_object_offset_t offset,
103 memory_object_size_t size,
104 vm_prot_t desired_access);
105 kern_return_t swapfile_pager_synchronize(memory_object_t mem_obj,
106 memory_object_offset_t offset,
107 memory_object_size_t length,
108 vm_sync_t sync_flags);
109 kern_return_t swapfile_pager_map(memory_object_t mem_obj,
110 vm_prot_t prot);
111 kern_return_t swapfile_pager_last_unmap(memory_object_t mem_obj);
112
113 /*
114 * Vector of VM operations for this EMM.
115 * These routines are invoked by VM via the memory_object_*() interfaces.
116 */
117 const struct memory_object_pager_ops swapfile_pager_ops = {
118 .memory_object_reference = swapfile_pager_reference,
119 .memory_object_deallocate = swapfile_pager_deallocate,
120 .memory_object_init = swapfile_pager_init,
121 .memory_object_terminate = swapfile_pager_terminate,
122 .memory_object_data_request = swapfile_pager_data_request,
123 .memory_object_data_return = swapfile_pager_data_return,
124 .memory_object_data_initialize = swapfile_pager_data_initialize,
125 .memory_object_data_unlock = swapfile_pager_data_unlock,
126 .memory_object_synchronize = swapfile_pager_synchronize,
127 .memory_object_map = swapfile_pager_map,
128 .memory_object_last_unmap = swapfile_pager_last_unmap,
129 .memory_object_data_reclaim = NULL,
130 .memory_object_pager_name = "swapfile pager"
131 };
132
133 /*
134 * The "swapfile_pager" describes a memory object backed by
135 * the "swapfile" EMM.
136 */
137 typedef struct swapfile_pager {
138 /* mandatory generic header */
139 struct memory_object swp_pgr_hdr;
140
141 /* pager-specific data */
142 queue_chain_t pager_queue; /* next & prev pagers */
143 unsigned int ref_count; /* reference count */
144 boolean_t is_ready; /* is this pager ready ? */
145 boolean_t is_mapped; /* is this pager mapped ? */
146 struct vnode *swapfile_vnode;/* the swapfile's vnode */
147 } *swapfile_pager_t;
148 #define SWAPFILE_PAGER_NULL ((swapfile_pager_t) NULL)
149
150 /*
151 * List of memory objects managed by this EMM.
152 * The list is protected by the "swapfile_pager_lock" lock.
153 */
154 int swapfile_pager_count = 0; /* number of pagers */
155 queue_head_t swapfile_pager_queue;
156 decl_lck_mtx_data(, swapfile_pager_lock);
157
158 /*
159 * Statistics & counters.
160 */
161 int swapfile_pager_count_max = 0;
162
163
164 lck_grp_t swapfile_pager_lck_grp;
165 lck_grp_attr_t swapfile_pager_lck_grp_attr;
166 lck_attr_t swapfile_pager_lck_attr;
167
168
169 /* internal prototypes */
170 swapfile_pager_t swapfile_pager_create(struct vnode *vp);
171 swapfile_pager_t swapfile_pager_lookup(memory_object_t mem_obj);
172 void swapfile_pager_dequeue(swapfile_pager_t pager);
173 void swapfile_pager_deallocate_internal(swapfile_pager_t pager,
174 boolean_t locked);
175 void swapfile_pager_terminate_internal(swapfile_pager_t pager);
176
177
178 #if DEBUG
179 int swapfile_pagerdebug = 0;
180 #define PAGER_ALL 0xffffffff
181 #define PAGER_INIT 0x00000001
182 #define PAGER_PAGEIN 0x00000002
183
184 #define PAGER_DEBUG(LEVEL, A) \
185 MACRO_BEGIN \
186 if ((swapfile_pagerdebug & LEVEL)==LEVEL) { \
187 printf A; \
188 } \
189 MACRO_END
190 #else
191 #define PAGER_DEBUG(LEVEL, A)
192 #endif
193
194
195 void
196 swapfile_pager_bootstrap(void)
197 {
198 lck_grp_attr_setdefault(&swapfile_pager_lck_grp_attr);
199 lck_grp_init(&swapfile_pager_lck_grp, "swapfile pager", &swapfile_pager_lck_grp_attr);
200 lck_attr_setdefault(&swapfile_pager_lck_attr);
201 lck_mtx_init(&swapfile_pager_lock, &swapfile_pager_lck_grp, &swapfile_pager_lck_attr);
202 queue_init(&swapfile_pager_queue);
203 }
204
205 /*
206 * swapfile_pager_init()
207 *
208 * Initialize the memory object and makes it ready to be used and mapped.
209 */
210 kern_return_t
211 swapfile_pager_init(
212 memory_object_t mem_obj,
213 memory_object_control_t control,
214 #if !DEBUG
215 __unused
216 #endif
217 memory_object_cluster_size_t pg_size)
218 {
219 swapfile_pager_t pager;
220 kern_return_t kr;
221 memory_object_attr_info_data_t attributes;
222
223 PAGER_DEBUG(PAGER_ALL,
224 ("swapfile_pager_init: %p, %p, %x\n",
225 mem_obj, control, pg_size));
226
227 if (control == MEMORY_OBJECT_CONTROL_NULL) {
228 return KERN_INVALID_ARGUMENT;
229 }
230
231 pager = swapfile_pager_lookup(mem_obj);
232
233 memory_object_control_reference(control);
234
235 pager->swp_pgr_hdr.mo_control = control;
236
237 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
238 attributes.cluster_size = (1 << (PAGE_SHIFT));
239 attributes.may_cache_object = FALSE;
240 attributes.temporary = TRUE;
241
242 kr = memory_object_change_attributes(
243 control,
244 MEMORY_OBJECT_ATTRIBUTE_INFO,
245 (memory_object_info_t) &attributes,
246 MEMORY_OBJECT_ATTR_INFO_COUNT);
247 if (kr != KERN_SUCCESS) {
248 panic("swapfile_pager_init: "
249 "memory_object_change_attributes() failed");
250 }
251
252 return KERN_SUCCESS;
253 }
254
255 /*
256 * swapfile_data_return()
257 *
258 * Handles page-out requests from VM. This should never happen since
259 * the pages provided by this EMM are not supposed to be dirty or dirtied
260 * and VM should simply discard the contents and reclaim the pages if it
261 * needs to.
262 */
263 kern_return_t
264 swapfile_pager_data_return(
265 __unused memory_object_t mem_obj,
266 __unused memory_object_offset_t offset,
267 __unused memory_object_cluster_size_t data_cnt,
268 __unused memory_object_offset_t *resid_offset,
269 __unused int *io_error,
270 __unused boolean_t dirty,
271 __unused boolean_t kernel_copy,
272 __unused int upl_flags)
273 {
274 panic("swapfile_pager_data_return: should never get called");
275 return KERN_FAILURE;
276 }
277
278 kern_return_t
279 swapfile_pager_data_initialize(
280 __unused memory_object_t mem_obj,
281 __unused memory_object_offset_t offset,
282 __unused memory_object_cluster_size_t data_cnt)
283 {
284 panic("swapfile_pager_data_initialize: should never get called");
285 return KERN_FAILURE;
286 }
287
288 kern_return_t
289 swapfile_pager_data_unlock(
290 __unused memory_object_t mem_obj,
291 __unused memory_object_offset_t offset,
292 __unused memory_object_size_t size,
293 __unused vm_prot_t desired_access)
294 {
295 return KERN_FAILURE;
296 }
297
298 /*
299 * swapfile_pager_data_request()
300 *
301 * Handles page-in requests from VM.
302 */
303 kern_return_t
304 swapfile_pager_data_request(
305 memory_object_t mem_obj,
306 memory_object_offset_t offset,
307 memory_object_cluster_size_t length,
308 #if !DEBUG
309 __unused
310 #endif
311 vm_prot_t protection_required,
312 __unused memory_object_fault_info_t mo_fault_info)
313 {
314 swapfile_pager_t pager;
315 memory_object_control_t mo_control;
316 upl_t upl;
317 int upl_flags;
318 upl_size_t upl_size;
319 upl_page_info_t *upl_pl = NULL;
320 unsigned int pl_count;
321 vm_object_t dst_object;
322 kern_return_t kr, retval;
323 vm_map_offset_t kernel_mapping;
324 vm_offset_t dst_vaddr;
325 char *dst_ptr;
326 vm_offset_t cur_offset;
327 vm_map_entry_t map_entry;
328
329 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
330
331 kernel_mapping = 0;
332 upl = NULL;
333 upl_pl = NULL;
334
335 pager = swapfile_pager_lookup(mem_obj);
336 assert(pager->is_ready);
337 assert(pager->ref_count > 1); /* pager is alive and mapped */
338
339 PAGER_DEBUG(PAGER_PAGEIN, ("swapfile_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
340
341 /*
342 * Gather in a UPL all the VM pages requested by VM.
343 */
344 mo_control = pager->swp_pgr_hdr.mo_control;
345
346 upl_size = length;
347 upl_flags =
348 UPL_RET_ONLY_ABSENT |
349 UPL_SET_LITE |
350 UPL_NO_SYNC |
351 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
352 UPL_SET_INTERNAL;
353 pl_count = 0;
354 kr = memory_object_upl_request(mo_control,
355 offset, upl_size,
356 &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_OSFMK);
357 if (kr != KERN_SUCCESS) {
358 retval = kr;
359 goto done;
360 }
361 dst_object = mo_control->moc_object;
362 assert(dst_object != VM_OBJECT_NULL);
363
364
365 /*
366 * Reserve a virtual page in the kernel address space to map each
367 * destination physical page when it's its turn to be processed.
368 */
369 vm_object_reference(kernel_object); /* ref. for mapping */
370 kr = vm_map_find_space(kernel_map,
371 &kernel_mapping,
372 PAGE_SIZE_64,
373 0,
374 0,
375 VM_MAP_KERNEL_FLAGS_NONE,
376 VM_KERN_MEMORY_NONE,
377 &map_entry);
378 if (kr != KERN_SUCCESS) {
379 vm_object_deallocate(kernel_object);
380 retval = kr;
381 goto done;
382 }
383 VME_OBJECT_SET(map_entry, kernel_object);
384 VME_OFFSET_SET(map_entry, kernel_mapping - VM_MIN_KERNEL_ADDRESS);
385 vm_map_unlock(kernel_map);
386 dst_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping);
387 dst_ptr = (char *) dst_vaddr;
388
389 /*
390 * Fill in the contents of the pages requested by VM.
391 */
392 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
393 pl_count = length / PAGE_SIZE;
394 for (cur_offset = 0; cur_offset < length; cur_offset += PAGE_SIZE) {
395 ppnum_t dst_pnum;
396
397 if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
398 /* this page is not in the UPL: skip it */
399 continue;
400 }
401
402 /*
403 * Establish an explicit pmap mapping of the destination
404 * physical page.
405 * We can't do a regular VM mapping because the VM page
406 * is "busy".
407 */
408 dst_pnum = (ppnum_t)
409 upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
410 assert(dst_pnum != 0);
411 retval = pmap_enter(kernel_pmap,
412 kernel_mapping,
413 dst_pnum,
414 VM_PROT_READ | VM_PROT_WRITE,
415 VM_PROT_NONE,
416 0,
417 TRUE);
418
419 assert(retval == KERN_SUCCESS);
420
421 if (retval != KERN_SUCCESS) {
422 goto done;
423 }
424
425 memset(dst_ptr, '\0', PAGE_SIZE);
426 /* add an end-of-line to keep line counters happy */
427 dst_ptr[PAGE_SIZE - 1] = '\n';
428
429 /*
430 * Remove the pmap mapping of the destination page
431 * in the kernel.
432 */
433 pmap_remove(kernel_pmap,
434 (addr64_t) kernel_mapping,
435 (addr64_t) (kernel_mapping + PAGE_SIZE_64));
436 }
437
438 retval = KERN_SUCCESS;
439 done:
440 if (upl != NULL) {
441 /* clean up the UPL */
442
443 /*
444 * The pages are currently dirty because we've just been
445 * writing on them, but as far as we're concerned, they're
446 * clean since they contain their "original" contents as
447 * provided by us, the pager.
448 * Tell the UPL to mark them "clean".
449 */
450 upl_clear_dirty(upl, TRUE);
451
452 /* abort or commit the UPL */
453 if (retval != KERN_SUCCESS) {
454 upl_abort(upl, 0);
455 } else {
456 boolean_t empty;
457 upl_commit_range(upl, 0, upl->size,
458 UPL_COMMIT_CS_VALIDATED,
459 upl_pl, pl_count, &empty);
460 }
461
462 /* and deallocate the UPL */
463 upl_deallocate(upl);
464 upl = NULL;
465 }
466 if (kernel_mapping != 0) {
467 /* clean up the mapping of the source and destination pages */
468 kr = vm_map_remove(kernel_map,
469 kernel_mapping,
470 kernel_mapping + PAGE_SIZE_64,
471 VM_MAP_REMOVE_NO_FLAGS);
472 assert(kr == KERN_SUCCESS);
473 kernel_mapping = 0;
474 dst_vaddr = 0;
475 }
476
477 return retval;
478 }
479
480 /*
481 * swapfile_pager_reference()
482 *
483 * Get a reference on this memory object.
484 * For external usage only. Assumes that the initial reference count is not 0,
485 * i.e one should not "revive" a dead pager this way.
486 */
487 void
488 swapfile_pager_reference(
489 memory_object_t mem_obj)
490 {
491 swapfile_pager_t pager;
492
493 pager = swapfile_pager_lookup(mem_obj);
494
495 lck_mtx_lock(&swapfile_pager_lock);
496 assert(pager->ref_count > 0);
497 pager->ref_count++;
498 lck_mtx_unlock(&swapfile_pager_lock);
499 }
500
501
502 /*
503 * swapfile_pager_dequeue:
504 *
505 * Removes a pager from the list of pagers.
506 *
507 * The caller must hold "swapfile_pager_lock".
508 */
509 void
510 swapfile_pager_dequeue(
511 swapfile_pager_t pager)
512 {
513 assert(!pager->is_mapped);
514
515 queue_remove(&swapfile_pager_queue,
516 pager,
517 swapfile_pager_t,
518 pager_queue);
519 pager->pager_queue.next = NULL;
520 pager->pager_queue.prev = NULL;
521
522 swapfile_pager_count--;
523 }
524
525 /*
526 * swapfile_pager_terminate_internal:
527 *
528 * Trigger the asynchronous termination of the memory object associated
529 * with this pager.
530 * When the memory object is terminated, there will be one more call
531 * to memory_object_deallocate() (i.e. swapfile_pager_deallocate())
532 * to finish the clean up.
533 *
534 * "swapfile_pager_lock" should not be held by the caller.
535 * We don't need the lock because the pager has already been removed from
536 * the pagers' list and is now ours exclusively.
537 */
538 void
539 swapfile_pager_terminate_internal(
540 swapfile_pager_t pager)
541 {
542 assert(pager->is_ready);
543 assert(!pager->is_mapped);
544
545 if (pager->swapfile_vnode != NULL) {
546 pager->swapfile_vnode = NULL;
547 }
548
549 /* trigger the destruction of the memory object */
550 memory_object_destroy(pager->swp_pgr_hdr.mo_control, 0);
551 }
552
553 /*
554 * swapfile_pager_deallocate_internal()
555 *
556 * Release a reference on this pager and free it when the last
557 * reference goes away.
558 * Can be called with swapfile_pager_lock held or not but always returns
559 * with it unlocked.
560 */
561 void
562 swapfile_pager_deallocate_internal(
563 swapfile_pager_t pager,
564 boolean_t locked)
565 {
566 if (!locked) {
567 lck_mtx_lock(&swapfile_pager_lock);
568 }
569
570 /* drop a reference on this pager */
571 pager->ref_count--;
572
573 if (pager->ref_count == 1) {
574 /*
575 * Only the "named" reference is left, which means that
576 * no one is really holding on to this pager anymore.
577 * Terminate it.
578 */
579 swapfile_pager_dequeue(pager);
580 /* the pager is all ours: no need for the lock now */
581 lck_mtx_unlock(&swapfile_pager_lock);
582 swapfile_pager_terminate_internal(pager);
583 } else if (pager->ref_count == 0) {
584 /*
585 * Dropped the existence reference; the memory object has
586 * been terminated. Do some final cleanup and release the
587 * pager structure.
588 */
589 lck_mtx_unlock(&swapfile_pager_lock);
590 if (pager->swp_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
591 memory_object_control_deallocate(pager->swp_pgr_hdr.mo_control);
592 pager->swp_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
593 }
594 kfree(pager, sizeof(*pager));
595 pager = SWAPFILE_PAGER_NULL;
596 } else {
597 /* there are still plenty of references: keep going... */
598 lck_mtx_unlock(&swapfile_pager_lock);
599 }
600
601 /* caution: lock is not held on return... */
602 }
603
604 /*
605 * swapfile_pager_deallocate()
606 *
607 * Release a reference on this pager and free it when the last
608 * reference goes away.
609 */
610 void
611 swapfile_pager_deallocate(
612 memory_object_t mem_obj)
613 {
614 swapfile_pager_t pager;
615
616 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_deallocate: %p\n", mem_obj));
617 pager = swapfile_pager_lookup(mem_obj);
618 swapfile_pager_deallocate_internal(pager, FALSE);
619 }
620
621 /*
622 *
623 */
624 kern_return_t
625 swapfile_pager_terminate(
626 #if !DEBUG
627 __unused
628 #endif
629 memory_object_t mem_obj)
630 {
631 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_terminate: %p\n", mem_obj));
632
633 return KERN_SUCCESS;
634 }
635
636 /*
637 *
638 */
639 kern_return_t
640 swapfile_pager_synchronize(
641 __unused memory_object_t mem_obbj,
642 __unused memory_object_offset_t offset,
643 __unused memory_object_size_t length,
644 __unused vm_sync_t sync_flags)
645 {
646 panic("swapfile_pager_synchronize: memory_object_synchronize no longer supported\n");
647 return KERN_FAILURE;
648 }
649
650 /*
651 * swapfile_pager_map()
652 *
653 * This allows VM to let us, the EMM, know that this memory object
654 * is currently mapped one or more times. This is called by VM each time
655 * the memory object gets mapped and we take one extra reference on the
656 * memory object to account for all its mappings.
657 */
658 kern_return_t
659 swapfile_pager_map(
660 memory_object_t mem_obj,
661 __unused vm_prot_t prot)
662 {
663 swapfile_pager_t pager;
664
665 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_map: %p\n", mem_obj));
666
667 pager = swapfile_pager_lookup(mem_obj);
668
669 lck_mtx_lock(&swapfile_pager_lock);
670 assert(pager->is_ready);
671 assert(pager->ref_count > 0); /* pager is alive */
672 if (pager->is_mapped == FALSE) {
673 /*
674 * First mapping of this pager: take an extra reference
675 * that will remain until all the mappings of this pager
676 * are removed.
677 */
678 pager->is_mapped = TRUE;
679 pager->ref_count++;
680 }
681 lck_mtx_unlock(&swapfile_pager_lock);
682
683 return KERN_SUCCESS;
684 }
685
686 /*
687 * swapfile_pager_last_unmap()
688 *
689 * This is called by VM when this memory object is no longer mapped anywhere.
690 */
691 kern_return_t
692 swapfile_pager_last_unmap(
693 memory_object_t mem_obj)
694 {
695 swapfile_pager_t pager;
696
697 PAGER_DEBUG(PAGER_ALL,
698 ("swapfile_pager_last_unmap: %p\n", mem_obj));
699
700 pager = swapfile_pager_lookup(mem_obj);
701
702 lck_mtx_lock(&swapfile_pager_lock);
703 if (pager->is_mapped) {
704 /*
705 * All the mappings are gone, so let go of the one extra
706 * reference that represents all the mappings of this pager.
707 */
708 pager->is_mapped = FALSE;
709 swapfile_pager_deallocate_internal(pager, TRUE);
710 /* caution: deallocate_internal() released the lock ! */
711 } else {
712 lck_mtx_unlock(&swapfile_pager_lock);
713 }
714
715 return KERN_SUCCESS;
716 }
717
718
719 /*
720 *
721 */
722 swapfile_pager_t
723 swapfile_pager_lookup(
724 memory_object_t mem_obj)
725 {
726 swapfile_pager_t pager;
727
728 assert(mem_obj->mo_pager_ops == &swapfile_pager_ops);
729 __IGNORE_WCASTALIGN(pager = (swapfile_pager_t) mem_obj);
730 assert(pager->ref_count > 0);
731 return pager;
732 }
733
734 swapfile_pager_t
735 swapfile_pager_create(
736 struct vnode *vp)
737 {
738 swapfile_pager_t pager, pager2;
739 memory_object_control_t control;
740 kern_return_t kr;
741
742 pager = (swapfile_pager_t) kalloc(sizeof(*pager));
743 if (pager == SWAPFILE_PAGER_NULL) {
744 return SWAPFILE_PAGER_NULL;
745 }
746
747 /*
748 * The vm_map call takes both named entry ports and raw memory
749 * objects in the same parameter. We need to make sure that
750 * vm_map does not see this object as a named entry port. So,
751 * we reserve the second word in the object for a fake ip_kotype
752 * setting - that will tell vm_map to use it as a memory object.
753 */
754 pager->swp_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
755 pager->swp_pgr_hdr.mo_pager_ops = &swapfile_pager_ops;
756 pager->swp_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
757
758 pager->is_ready = FALSE;/* not ready until it has a "name" */
759 pager->ref_count = 1; /* setup reference */
760 pager->is_mapped = FALSE;
761 pager->swapfile_vnode = vp;
762
763 lck_mtx_lock(&swapfile_pager_lock);
764 /* see if anyone raced us to create a pager for the same object */
765 queue_iterate(&swapfile_pager_queue,
766 pager2,
767 swapfile_pager_t,
768 pager_queue) {
769 if (pager2->swapfile_vnode == vp) {
770 break;
771 }
772 }
773 if (!queue_end(&swapfile_pager_queue,
774 (queue_entry_t) pager2)) {
775 /* while we hold the lock, transfer our setup ref to winner */
776 pager2->ref_count++;
777 /* we lost the race, down with the loser... */
778 lck_mtx_unlock(&swapfile_pager_lock);
779 pager->swapfile_vnode = NULL;
780 kfree(pager, sizeof(*pager));
781 /* ... and go with the winner */
782 pager = pager2;
783 /* let the winner make sure the pager gets ready */
784 return pager;
785 }
786
787 /* enter new pager at the head of our list of pagers */
788 queue_enter_first(&swapfile_pager_queue,
789 pager,
790 swapfile_pager_t,
791 pager_queue);
792 swapfile_pager_count++;
793 if (swapfile_pager_count > swapfile_pager_count_max) {
794 swapfile_pager_count_max = swapfile_pager_count;
795 }
796 lck_mtx_unlock(&swapfile_pager_lock);
797
798 kr = memory_object_create_named((memory_object_t) pager,
799 0,
800 &control);
801 assert(kr == KERN_SUCCESS);
802
803 memory_object_mark_trusted(control);
804
805 lck_mtx_lock(&swapfile_pager_lock);
806 /* the new pager is now ready to be used */
807 pager->is_ready = TRUE;
808 lck_mtx_unlock(&swapfile_pager_lock);
809
810 /* wakeup anyone waiting for this pager to be ready */
811 thread_wakeup(&pager->is_ready);
812
813 return pager;
814 }
815
816 /*
817 * swapfile_pager_setup()
818 *
819 * Provide the caller with a memory object backed by the provided
820 * "backing_object" VM object. If such a memory object already exists,
821 * re-use it, otherwise create a new memory object.
822 */
823 memory_object_t
824 swapfile_pager_setup(
825 struct vnode *vp)
826 {
827 swapfile_pager_t pager;
828
829 lck_mtx_lock(&swapfile_pager_lock);
830
831 queue_iterate(&swapfile_pager_queue,
832 pager,
833 swapfile_pager_t,
834 pager_queue) {
835 if (pager->swapfile_vnode == vp) {
836 break;
837 }
838 }
839 if (queue_end(&swapfile_pager_queue,
840 (queue_entry_t) pager)) {
841 /* no existing pager for this backing object */
842 pager = SWAPFILE_PAGER_NULL;
843 } else {
844 /* make sure pager doesn't disappear */
845 pager->ref_count++;
846 }
847
848 lck_mtx_unlock(&swapfile_pager_lock);
849
850 if (pager == SWAPFILE_PAGER_NULL) {
851 pager = swapfile_pager_create(vp);
852 if (pager == SWAPFILE_PAGER_NULL) {
853 return MEMORY_OBJECT_NULL;
854 }
855 }
856
857 lck_mtx_lock(&swapfile_pager_lock);
858 while (!pager->is_ready) {
859 lck_mtx_sleep(&swapfile_pager_lock,
860 LCK_SLEEP_DEFAULT,
861 &pager->is_ready,
862 THREAD_UNINT);
863 }
864 lck_mtx_unlock(&swapfile_pager_lock);
865
866 return (memory_object_t) pager;
867 }
868
869 memory_object_control_t
870 swapfile_pager_control(
871 memory_object_t mem_obj)
872 {
873 swapfile_pager_t pager;
874
875 if (mem_obj == MEMORY_OBJECT_NULL ||
876 mem_obj->mo_pager_ops != &swapfile_pager_ops) {
877 return MEMORY_OBJECT_CONTROL_NULL;
878 }
879 pager = swapfile_pager_lookup(mem_obj);
880 return pager->swp_pgr_hdr.mo_control;
881 }