]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_swapfile_pager.c
xnu-1699.22.81.tar.gz
[apple/xnu.git] / osfmk / vm / vm_swapfile_pager.c
CommitLineData
b0d623f7
A
1/*
2 * Copyright (c) 2008 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/kern_return.h>
30#include <mach/memory_object_control.h>
31#include <mach/upl.h>
32
33#include <kern/ipc_kobject.h>
34#include <kern/kalloc.h>
35#include <kern/queue.h>
36
37#include <vm/vm_kern.h>
38#include <vm/vm_map.h>
39#include <vm/vm_pageout.h>
40#include <vm/vm_protos.h>
41
42
43/*
44 * APPLE SWAPFILE MEMORY PAGER
45 *
46 * This external memory manager (EMM) handles mappings of the swap files.
47 * Swap files are not regular files and are used solely to store contents of
48 * anonymous memory mappings while not resident in memory.
49 * There's no valid reason to map a swap file. This just puts extra burden
50 * on the system, is potentially a security issue and is not reliable since
51 * the contents can change at any time with pageout operations.
52 * Here are some of the issues with mapping a swap file.
53 * * PERFORMANCE:
54 * Each page in the swap file belong to an anonymous memory object. Mapping
55 * the swap file makes those pages also accessible via a vnode memory
56 * object and each page can now be resident twice.
57 * * SECURITY:
58 * Mapping a swap file allows access to other processes' memory. Swap files
59 * are only accessible by the "root" super-user, who can already access any
60 * process's memory, so this is not a real issue but if permissions on the
61 * swap file got changed, it could become one.
62 * Swap files are not "zero-filled" on creation, so until their contents are
63 * overwritten with pageout operations, they still contain whatever was on
64 * the disk blocks they were allocated. The "super-user" could see the
65 * contents of free blocks anyway, so this is not a new security issue but
66 * it may be perceive as one.
67 * * ENCRYPTED SWAP:
68 * When swap is encrypted, one does not expect to find any clear contents
69 * in the swap files. Since unused blocks are not scrubbed, they could still
70 * contain clear contents. If these contents are visible through a mapping
71 * of the swap file, it makes it look like swap is not really encrypted.
72 *
73 * We can't legitimately prevent a user process with appropriate privileges
74 * from mapping a swap file, but we can prevent it from accessing its actual
75 * contents.
76 * This pager mostly handles page-in request (from memory_object_data_request())
77 * for swap file mappings and just returns bogus data.
78 * Pageouts are not handled, so mmap() has to make sure it does not allow
79 * writable (i.e. MAP_SHARED and PROT_WRITE) mappings of swap files.
80 */
81
82/* forward declarations */
83void swapfile_pager_reference(memory_object_t mem_obj);
84void swapfile_pager_deallocate(memory_object_t mem_obj);
85kern_return_t swapfile_pager_init(memory_object_t mem_obj,
86 memory_object_control_t control,
87 memory_object_cluster_size_t pg_size);
88kern_return_t swapfile_pager_terminate(memory_object_t mem_obj);
89kern_return_t swapfile_pager_data_request(memory_object_t mem_obj,
90 memory_object_offset_t offset,
91 memory_object_cluster_size_t length,
92 vm_prot_t protection_required,
93 memory_object_fault_info_t fault_info);
94kern_return_t swapfile_pager_data_return(memory_object_t mem_obj,
95 memory_object_offset_t offset,
96 memory_object_cluster_size_t data_cnt,
97 memory_object_offset_t *resid_offset,
98 int *io_error,
99 boolean_t dirty,
100 boolean_t kernel_copy,
101 int upl_flags);
102kern_return_t swapfile_pager_data_initialize(memory_object_t mem_obj,
103 memory_object_offset_t offset,
104 memory_object_cluster_size_t data_cnt);
105kern_return_t swapfile_pager_data_unlock(memory_object_t mem_obj,
106 memory_object_offset_t offset,
107 memory_object_size_t size,
108 vm_prot_t desired_access);
109kern_return_t swapfile_pager_synchronize(memory_object_t mem_obj,
110 memory_object_offset_t offset,
111 memory_object_size_t length,
112 vm_sync_t sync_flags);
113kern_return_t swapfile_pager_map(memory_object_t mem_obj,
114 vm_prot_t prot);
115kern_return_t swapfile_pager_last_unmap(memory_object_t mem_obj);
116
117/*
118 * Vector of VM operations for this EMM.
119 * These routines are invoked by VM via the memory_object_*() interfaces.
120 */
121const struct memory_object_pager_ops swapfile_pager_ops = {
122 swapfile_pager_reference,
123 swapfile_pager_deallocate,
124 swapfile_pager_init,
125 swapfile_pager_terminate,
126 swapfile_pager_data_request,
127 swapfile_pager_data_return,
128 swapfile_pager_data_initialize,
129 swapfile_pager_data_unlock,
130 swapfile_pager_synchronize,
131 swapfile_pager_map,
132 swapfile_pager_last_unmap,
6d2010ae 133 NULL, /* data_reclaim */
b0d623f7
A
134 "swapfile pager"
135};
136
137/*
138 * The "swapfile_pager" describes a memory object backed by
139 * the "swapfile" EMM.
140 */
141typedef struct swapfile_pager {
142 struct ipc_object_header pager_header; /* fake ip_kotype() */
143 memory_object_pager_ops_t pager_ops; /* == &swapfile_pager_ops */
144 queue_chain_t pager_queue; /* next & prev pagers */
145 unsigned int ref_count; /* reference count */
146 boolean_t is_ready; /* is this pager ready ? */
147 boolean_t is_mapped; /* is this pager mapped ? */
148 memory_object_control_t pager_control; /* mem object control handle */
149 struct vnode *swapfile_vnode;/* the swapfile's vnode */
150} *swapfile_pager_t;
151#define SWAPFILE_PAGER_NULL ((swapfile_pager_t) NULL)
152#define pager_ikot pager_header.io_bits
153
154/*
155 * List of memory objects managed by this EMM.
156 * The list is protected by the "swapfile_pager_lock" lock.
157 */
158int swapfile_pager_count = 0; /* number of pagers */
159queue_head_t swapfile_pager_queue;
160decl_lck_mtx_data(,swapfile_pager_lock)
161
162/*
163 * Statistics & counters.
164 */
165int swapfile_pager_count_max = 0;
166
167
168lck_grp_t swapfile_pager_lck_grp;
169lck_grp_attr_t swapfile_pager_lck_grp_attr;
170lck_attr_t swapfile_pager_lck_attr;
171
172
173/* internal prototypes */
174swapfile_pager_t swapfile_pager_create(struct vnode *vp);
175swapfile_pager_t swapfile_pager_lookup(memory_object_t mem_obj);
176void swapfile_pager_dequeue(swapfile_pager_t pager);
177void swapfile_pager_deallocate_internal(swapfile_pager_t pager,
178 boolean_t locked);
179void swapfile_pager_terminate_internal(swapfile_pager_t pager);
180
181
182#if DEBUG
183int swapfile_pagerdebug = 0;
184#define PAGER_ALL 0xffffffff
185#define PAGER_INIT 0x00000001
186#define PAGER_PAGEIN 0x00000002
187
188#define PAGER_DEBUG(LEVEL, A) \
189 MACRO_BEGIN \
190 if ((swapfile_pagerdebug & LEVEL)==LEVEL) { \
191 printf A; \
192 } \
193 MACRO_END
194#else
195#define PAGER_DEBUG(LEVEL, A)
196#endif
197
198
199void
200swapfile_pager_bootstrap(void)
201{
202 lck_grp_attr_setdefault(&swapfile_pager_lck_grp_attr);
203 lck_grp_init(&swapfile_pager_lck_grp, "swapfile pager", &swapfile_pager_lck_grp_attr);
204 lck_attr_setdefault(&swapfile_pager_lck_attr);
205 lck_mtx_init(&swapfile_pager_lock, &swapfile_pager_lck_grp, &swapfile_pager_lck_attr);
206 queue_init(&swapfile_pager_queue);
207}
208
209/*
210 * swapfile_pager_init()
211 *
212 * Initialize the memory object and makes it ready to be used and mapped.
213 */
214kern_return_t
215swapfile_pager_init(
216 memory_object_t mem_obj,
217 memory_object_control_t control,
218#if !DEBUG
219 __unused
220#endif
221 memory_object_cluster_size_t pg_size)
222{
223 swapfile_pager_t pager;
224 kern_return_t kr;
225 memory_object_attr_info_data_t attributes;
226
227 PAGER_DEBUG(PAGER_ALL,
228 ("swapfile_pager_init: %p, %p, %x\n",
229 mem_obj, control, pg_size));
230
231 if (control == MEMORY_OBJECT_CONTROL_NULL)
232 return KERN_INVALID_ARGUMENT;
233
234 pager = swapfile_pager_lookup(mem_obj);
235
236 memory_object_control_reference(control);
237
238 pager->pager_control = control;
239
240 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
241 attributes.cluster_size = (1 << (PAGE_SHIFT));
242 attributes.may_cache_object = FALSE;
243 attributes.temporary = TRUE;
244
245 kr = memory_object_change_attributes(
246 control,
247 MEMORY_OBJECT_ATTRIBUTE_INFO,
248 (memory_object_info_t) &attributes,
249 MEMORY_OBJECT_ATTR_INFO_COUNT);
250 if (kr != KERN_SUCCESS)
251 panic("swapfile_pager_init: "
252 "memory_object_change_attributes() failed");
253
254 return KERN_SUCCESS;
255}
256
257/*
258 * swapfile_data_return()
259 *
260 * Handles page-out requests from VM. This should never happen since
261 * the pages provided by this EMM are not supposed to be dirty or dirtied
262 * and VM should simply discard the contents and reclaim the pages if it
263 * needs to.
264 */
265kern_return_t
266swapfile_pager_data_return(
267 __unused memory_object_t mem_obj,
268 __unused memory_object_offset_t offset,
269 __unused memory_object_cluster_size_t data_cnt,
270 __unused memory_object_offset_t *resid_offset,
271 __unused int *io_error,
272 __unused boolean_t dirty,
273 __unused boolean_t kernel_copy,
274 __unused int upl_flags)
275{
276 panic("swapfile_pager_data_return: should never get called");
277 return KERN_FAILURE;
278}
279
280kern_return_t
281swapfile_pager_data_initialize(
282 __unused memory_object_t mem_obj,
283 __unused memory_object_offset_t offset,
284 __unused memory_object_cluster_size_t data_cnt)
285{
286 panic("swapfile_pager_data_initialize: should never get called");
287 return KERN_FAILURE;
288}
289
290kern_return_t
291swapfile_pager_data_unlock(
292 __unused memory_object_t mem_obj,
293 __unused memory_object_offset_t offset,
294 __unused memory_object_size_t size,
295 __unused vm_prot_t desired_access)
296{
297 return KERN_FAILURE;
298}
299
300/*
301 * swapfile_pager_data_request()
302 *
303 * Handles page-in requests from VM.
304 */
305kern_return_t
306swapfile_pager_data_request(
307 memory_object_t mem_obj,
308 memory_object_offset_t offset,
309 memory_object_cluster_size_t length,
310#if !DEBUG
311 __unused
312#endif
313 vm_prot_t protection_required,
314 __unused memory_object_fault_info_t mo_fault_info)
315{
316 swapfile_pager_t pager;
317 memory_object_control_t mo_control;
318 upl_t upl;
319 int upl_flags;
320 upl_size_t upl_size;
321 upl_page_info_t *upl_pl = NULL;
322 unsigned int pl_count;
323 vm_object_t dst_object;
324 kern_return_t kr, retval;
325 vm_map_offset_t kernel_mapping;
326 vm_offset_t dst_vaddr;
327 char *dst_ptr;
328 vm_offset_t cur_offset;
329 vm_map_entry_t map_entry;
330
331 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
332
333 kernel_mapping = 0;
334 upl = NULL;
335 upl_pl = NULL;
336
337 pager = swapfile_pager_lookup(mem_obj);
338 assert(pager->is_ready);
339 assert(pager->ref_count > 1); /* pager is alive and mapped */
340
341 PAGER_DEBUG(PAGER_PAGEIN, ("swapfile_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
342
343 /*
344 * Gather in a UPL all the VM pages requested by VM.
345 */
346 mo_control = pager->pager_control;
347
348 upl_size = length;
349 upl_flags =
350 UPL_RET_ONLY_ABSENT |
351 UPL_SET_LITE |
352 UPL_NO_SYNC |
353 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
354 UPL_SET_INTERNAL;
355 pl_count = 0;
356 kr = memory_object_upl_request(mo_control,
357 offset, upl_size,
358 &upl, NULL, NULL, upl_flags);
359 if (kr != KERN_SUCCESS) {
360 retval = kr;
361 goto done;
362 }
363 dst_object = mo_control->moc_object;
364 assert(dst_object != VM_OBJECT_NULL);
365
366
367 /*
368 * Reserve a virtual page in the kernel address space to map each
369 * destination physical page when it's its turn to be processed.
370 */
371 vm_object_reference(kernel_object); /* ref. for mapping */
372 kr = vm_map_find_space(kernel_map,
373 &kernel_mapping,
374 PAGE_SIZE_64,
375 0,
376 0,
377 &map_entry);
378 if (kr != KERN_SUCCESS) {
379 vm_object_deallocate(kernel_object);
380 retval = kr;
381 goto done;
382 }
383 map_entry->object.vm_object = kernel_object;
384 map_entry->offset = kernel_mapping - VM_MIN_KERNEL_ADDRESS;
385 vm_map_unlock(kernel_map);
386 dst_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping);
387 dst_ptr = (char *) dst_vaddr;
388
389 /*
390 * Fill in the contents of the pages requested by VM.
391 */
392 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
393 pl_count = length / PAGE_SIZE;
394 for (cur_offset = 0; cur_offset < length; cur_offset += PAGE_SIZE) {
395 ppnum_t dst_pnum;
396
397 if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
398 /* this page is not in the UPL: skip it */
399 continue;
400 }
401
402 /*
403 * Establish an explicit pmap mapping of the destination
404 * physical page.
405 * We can't do a regular VM mapping because the VM page
406 * is "busy".
407 */
408 dst_pnum = (ppnum_t)
409 upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
410 assert(dst_pnum != 0);
411 pmap_enter(kernel_pmap,
412 kernel_mapping,
413 dst_pnum,
414 VM_PROT_READ | VM_PROT_WRITE,
6d2010ae 415 0,
b0d623f7
A
416 TRUE);
417
418 memset(dst_ptr, '\0', PAGE_SIZE);
419 /* add an end-of-line to keep line counters happy */
420 dst_ptr[PAGE_SIZE-1] = '\n';
421
422 /*
423 * Remove the pmap mapping of the destination page
424 * in the kernel.
425 */
426 pmap_remove(kernel_pmap,
427 (addr64_t) kernel_mapping,
428 (addr64_t) (kernel_mapping + PAGE_SIZE_64));
429
430 }
431
432 retval = KERN_SUCCESS;
433done:
434 if (upl != NULL) {
435 /* clean up the UPL */
436
437 /*
438 * The pages are currently dirty because we've just been
439 * writing on them, but as far as we're concerned, they're
440 * clean since they contain their "original" contents as
441 * provided by us, the pager.
442 * Tell the UPL to mark them "clean".
443 */
444 upl_clear_dirty(upl, TRUE);
445
446 /* abort or commit the UPL */
447 if (retval != KERN_SUCCESS) {
448 upl_abort(upl, 0);
449 } else {
450 boolean_t empty;
451 upl_commit_range(upl, 0, upl->size,
452 UPL_COMMIT_CS_VALIDATED,
453 upl_pl, pl_count, &empty);
454 }
455
456 /* and deallocate the UPL */
457 upl_deallocate(upl);
458 upl = NULL;
459 }
460 if (kernel_mapping != 0) {
461 /* clean up the mapping of the source and destination pages */
462 kr = vm_map_remove(kernel_map,
463 kernel_mapping,
464 kernel_mapping + PAGE_SIZE_64,
465 VM_MAP_NO_FLAGS);
466 assert(kr == KERN_SUCCESS);
467 kernel_mapping = 0;
468 dst_vaddr = 0;
469 }
470
471 return retval;
472}
473
474/*
475 * swapfile_pager_reference()
476 *
477 * Get a reference on this memory object.
478 * For external usage only. Assumes that the initial reference count is not 0,
479 * i.e one should not "revive" a dead pager this way.
480 */
481void
482swapfile_pager_reference(
483 memory_object_t mem_obj)
484{
485 swapfile_pager_t pager;
486
487 pager = swapfile_pager_lookup(mem_obj);
488
489 lck_mtx_lock(&swapfile_pager_lock);
490 assert(pager->ref_count > 0);
491 pager->ref_count++;
492 lck_mtx_unlock(&swapfile_pager_lock);
493}
494
495
496/*
497 * swapfile_pager_dequeue:
498 *
499 * Removes a pager from the list of pagers.
500 *
501 * The caller must hold "swapfile_pager_lock".
502 */
503void
504swapfile_pager_dequeue(
505 swapfile_pager_t pager)
506{
507 assert(!pager->is_mapped);
508
509 queue_remove(&swapfile_pager_queue,
510 pager,
511 swapfile_pager_t,
512 pager_queue);
513 pager->pager_queue.next = NULL;
514 pager->pager_queue.prev = NULL;
515
516 swapfile_pager_count--;
517}
518
519/*
520 * swapfile_pager_terminate_internal:
521 *
522 * Trigger the asynchronous termination of the memory object associated
523 * with this pager.
524 * When the memory object is terminated, there will be one more call
525 * to memory_object_deallocate() (i.e. swapfile_pager_deallocate())
526 * to finish the clean up.
527 *
528 * "swapfile_pager_lock" should not be held by the caller.
529 * We don't need the lock because the pager has already been removed from
530 * the pagers' list and is now ours exclusively.
531 */
532void
533swapfile_pager_terminate_internal(
534 swapfile_pager_t pager)
535{
536 assert(pager->is_ready);
537 assert(!pager->is_mapped);
538
539 if (pager->swapfile_vnode != NULL) {
540 pager->swapfile_vnode = NULL;
541 }
542
543 /* trigger the destruction of the memory object */
544 memory_object_destroy(pager->pager_control, 0);
545}
546
547/*
548 * swapfile_pager_deallocate_internal()
549 *
550 * Release a reference on this pager and free it when the last
551 * reference goes away.
552 * Can be called with swapfile_pager_lock held or not but always returns
553 * with it unlocked.
554 */
555void
556swapfile_pager_deallocate_internal(
557 swapfile_pager_t pager,
558 boolean_t locked)
559{
560 if (! locked) {
561 lck_mtx_lock(&swapfile_pager_lock);
562 }
563
564 /* drop a reference on this pager */
565 pager->ref_count--;
566
567 if (pager->ref_count == 1) {
568 /*
569 * Only the "named" reference is left, which means that
570 * no one is really holding on to this pager anymore.
571 * Terminate it.
572 */
573 swapfile_pager_dequeue(pager);
574 /* the pager is all ours: no need for the lock now */
575 lck_mtx_unlock(&swapfile_pager_lock);
576 swapfile_pager_terminate_internal(pager);
577 } else if (pager->ref_count == 0) {
578 /*
579 * Dropped the existence reference; the memory object has
580 * been terminated. Do some final cleanup and release the
581 * pager structure.
582 */
583 lck_mtx_unlock(&swapfile_pager_lock);
584 if (pager->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
585 memory_object_control_deallocate(pager->pager_control);
586 pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
587 }
588 kfree(pager, sizeof (*pager));
589 pager = SWAPFILE_PAGER_NULL;
590 } else {
591 /* there are still plenty of references: keep going... */
592 lck_mtx_unlock(&swapfile_pager_lock);
593 }
594
595 /* caution: lock is not held on return... */
596}
597
598/*
599 * swapfile_pager_deallocate()
600 *
601 * Release a reference on this pager and free it when the last
602 * reference goes away.
603 */
604void
605swapfile_pager_deallocate(
606 memory_object_t mem_obj)
607{
608 swapfile_pager_t pager;
609
610 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_deallocate: %p\n", mem_obj));
611 pager = swapfile_pager_lookup(mem_obj);
612 swapfile_pager_deallocate_internal(pager, FALSE);
613}
614
615/*
616 *
617 */
618kern_return_t
619swapfile_pager_terminate(
620#if !DEBUG
621 __unused
622#endif
623 memory_object_t mem_obj)
624{
625 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_terminate: %p\n", mem_obj));
626
627 return KERN_SUCCESS;
628}
629
630/*
631 *
632 */
633kern_return_t
634swapfile_pager_synchronize(
635 memory_object_t mem_obj,
636 memory_object_offset_t offset,
637 memory_object_size_t length,
638 __unused vm_sync_t sync_flags)
639{
640 swapfile_pager_t pager;
641
642 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_synchronize: %p\n", mem_obj));
643
644 pager = swapfile_pager_lookup(mem_obj);
645
646 memory_object_synchronize_completed(pager->pager_control,
647 offset, length);
648
649 return KERN_SUCCESS;
650}
651
652/*
653 * swapfile_pager_map()
654 *
655 * This allows VM to let us, the EMM, know that this memory object
656 * is currently mapped one or more times. This is called by VM each time
657 * the memory object gets mapped and we take one extra reference on the
658 * memory object to account for all its mappings.
659 */
660kern_return_t
661swapfile_pager_map(
662 memory_object_t mem_obj,
663 __unused vm_prot_t prot)
664{
665 swapfile_pager_t pager;
666
667 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_map: %p\n", mem_obj));
668
669 pager = swapfile_pager_lookup(mem_obj);
670
671 lck_mtx_lock(&swapfile_pager_lock);
672 assert(pager->is_ready);
673 assert(pager->ref_count > 0); /* pager is alive */
674 if (pager->is_mapped == FALSE) {
675 /*
676 * First mapping of this pager: take an extra reference
677 * that will remain until all the mappings of this pager
678 * are removed.
679 */
680 pager->is_mapped = TRUE;
681 pager->ref_count++;
682 }
683 lck_mtx_unlock(&swapfile_pager_lock);
684
685 return KERN_SUCCESS;
686}
687
688/*
689 * swapfile_pager_last_unmap()
690 *
691 * This is called by VM when this memory object is no longer mapped anywhere.
692 */
693kern_return_t
694swapfile_pager_last_unmap(
695 memory_object_t mem_obj)
696{
697 swapfile_pager_t pager;
698
699 PAGER_DEBUG(PAGER_ALL,
700 ("swapfile_pager_last_unmap: %p\n", mem_obj));
701
702 pager = swapfile_pager_lookup(mem_obj);
703
704 lck_mtx_lock(&swapfile_pager_lock);
705 if (pager->is_mapped) {
706 /*
707 * All the mappings are gone, so let go of the one extra
708 * reference that represents all the mappings of this pager.
709 */
710 pager->is_mapped = FALSE;
711 swapfile_pager_deallocate_internal(pager, TRUE);
712 /* caution: deallocate_internal() released the lock ! */
713 } else {
714 lck_mtx_unlock(&swapfile_pager_lock);
715 }
716
717 return KERN_SUCCESS;
718}
719
720
721/*
722 *
723 */
724swapfile_pager_t
725swapfile_pager_lookup(
726 memory_object_t mem_obj)
727{
728 swapfile_pager_t pager;
729
730 pager = (swapfile_pager_t) mem_obj;
731 assert(pager->pager_ops == &swapfile_pager_ops);
732 assert(pager->ref_count > 0);
733 return pager;
734}
735
736swapfile_pager_t
737swapfile_pager_create(
738 struct vnode *vp)
739{
740 swapfile_pager_t pager, pager2;
741 memory_object_control_t control;
742 kern_return_t kr;
743
744 pager = (swapfile_pager_t) kalloc(sizeof (*pager));
745 if (pager == SWAPFILE_PAGER_NULL) {
746 return SWAPFILE_PAGER_NULL;
747 }
748
749 /*
750 * The vm_map call takes both named entry ports and raw memory
751 * objects in the same parameter. We need to make sure that
752 * vm_map does not see this object as a named entry port. So,
753 * we reserve the second word in the object for a fake ip_kotype
754 * setting - that will tell vm_map to use it as a memory object.
755 */
756 pager->pager_ops = &swapfile_pager_ops;
757 pager->pager_ikot = IKOT_MEMORY_OBJECT;
758 pager->is_ready = FALSE;/* not ready until it has a "name" */
759 pager->ref_count = 1; /* setup reference */
760 pager->is_mapped = FALSE;
761 pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
762 pager->swapfile_vnode = vp;
763
764 lck_mtx_lock(&swapfile_pager_lock);
765 /* see if anyone raced us to create a pager for the same object */
766 queue_iterate(&swapfile_pager_queue,
767 pager2,
768 swapfile_pager_t,
769 pager_queue) {
770 if (pager2->swapfile_vnode == vp) {
771 break;
772 }
773 }
774 if (! queue_end(&swapfile_pager_queue,
775 (queue_entry_t) pager2)) {
776 /* while we hold the lock, transfer our setup ref to winner */
777 pager2->ref_count++;
778 /* we lost the race, down with the loser... */
779 lck_mtx_unlock(&swapfile_pager_lock);
780 pager->swapfile_vnode = NULL;
781 kfree(pager, sizeof (*pager));
782 /* ... and go with the winner */
783 pager = pager2;
784 /* let the winner make sure the pager gets ready */
785 return pager;
786 }
787
788 /* enter new pager at the head of our list of pagers */
789 queue_enter_first(&swapfile_pager_queue,
790 pager,
791 swapfile_pager_t,
792 pager_queue);
793 swapfile_pager_count++;
794 if (swapfile_pager_count > swapfile_pager_count_max) {
795 swapfile_pager_count_max = swapfile_pager_count;
796 }
797 lck_mtx_unlock(&swapfile_pager_lock);
798
799 kr = memory_object_create_named((memory_object_t) pager,
800 0,
801 &control);
802 assert(kr == KERN_SUCCESS);
803
804 lck_mtx_lock(&swapfile_pager_lock);
805 /* the new pager is now ready to be used */
806 pager->is_ready = TRUE;
807 lck_mtx_unlock(&swapfile_pager_lock);
808
809 /* wakeup anyone waiting for this pager to be ready */
810 thread_wakeup(&pager->is_ready);
811
812 return pager;
813}
814
815/*
816 * swapfile_pager_setup()
817 *
818 * Provide the caller with a memory object backed by the provided
819 * "backing_object" VM object. If such a memory object already exists,
820 * re-use it, otherwise create a new memory object.
821 */
822memory_object_t
823swapfile_pager_setup(
824 struct vnode *vp)
825{
826 swapfile_pager_t pager;
827
828 lck_mtx_lock(&swapfile_pager_lock);
829
830 queue_iterate(&swapfile_pager_queue,
831 pager,
832 swapfile_pager_t,
833 pager_queue) {
834 if (pager->swapfile_vnode == vp) {
835 break;
836 }
837 }
838 if (queue_end(&swapfile_pager_queue,
839 (queue_entry_t) pager)) {
840 /* no existing pager for this backing object */
841 pager = SWAPFILE_PAGER_NULL;
842 } else {
843 /* make sure pager doesn't disappear */
844 pager->ref_count++;
845 }
846
847 lck_mtx_unlock(&swapfile_pager_lock);
848
849 if (pager == SWAPFILE_PAGER_NULL) {
850 pager = swapfile_pager_create(vp);
851 if (pager == SWAPFILE_PAGER_NULL) {
852 return MEMORY_OBJECT_NULL;
853 }
854 }
855
856 lck_mtx_lock(&swapfile_pager_lock);
857 while (!pager->is_ready) {
858 lck_mtx_sleep(&swapfile_pager_lock,
859 LCK_SLEEP_DEFAULT,
860 &pager->is_ready,
861 THREAD_UNINT);
862 }
863 lck_mtx_unlock(&swapfile_pager_lock);
864
865 return (memory_object_t) pager;
866}
867
868memory_object_control_t
869swapfile_pager_control(
870 memory_object_t mem_obj)
871{
872 swapfile_pager_t pager;
873
874 pager = swapfile_pager_lookup(mem_obj);
875
876 return pager->pager_control;
877}