]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_swapfile_pager.c
xnu-2050.7.9.tar.gz
[apple/xnu.git] / osfmk / vm / vm_swapfile_pager.c
CommitLineData
b0d623f7
A
1/*
2 * Copyright (c) 2008 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/kern_return.h>
30#include <mach/memory_object_control.h>
31#include <mach/upl.h>
32
33#include <kern/ipc_kobject.h>
34#include <kern/kalloc.h>
35#include <kern/queue.h>
36
37#include <vm/vm_kern.h>
38#include <vm/vm_map.h>
39#include <vm/vm_pageout.h>
40#include <vm/vm_protos.h>
41
42
43/*
44 * APPLE SWAPFILE MEMORY PAGER
45 *
46 * This external memory manager (EMM) handles mappings of the swap files.
47 * Swap files are not regular files and are used solely to store contents of
48 * anonymous memory mappings while not resident in memory.
49 * There's no valid reason to map a swap file. This just puts extra burden
50 * on the system, is potentially a security issue and is not reliable since
51 * the contents can change at any time with pageout operations.
52 * Here are some of the issues with mapping a swap file.
53 * * PERFORMANCE:
54 * Each page in the swap file belong to an anonymous memory object. Mapping
55 * the swap file makes those pages also accessible via a vnode memory
56 * object and each page can now be resident twice.
57 * * SECURITY:
58 * Mapping a swap file allows access to other processes' memory. Swap files
59 * are only accessible by the "root" super-user, who can already access any
60 * process's memory, so this is not a real issue but if permissions on the
61 * swap file got changed, it could become one.
62 * Swap files are not "zero-filled" on creation, so until their contents are
63 * overwritten with pageout operations, they still contain whatever was on
64 * the disk blocks they were allocated. The "super-user" could see the
65 * contents of free blocks anyway, so this is not a new security issue but
66 * it may be perceive as one.
67 * * ENCRYPTED SWAP:
68 * When swap is encrypted, one does not expect to find any clear contents
69 * in the swap files. Since unused blocks are not scrubbed, they could still
70 * contain clear contents. If these contents are visible through a mapping
71 * of the swap file, it makes it look like swap is not really encrypted.
72 *
73 * We can't legitimately prevent a user process with appropriate privileges
74 * from mapping a swap file, but we can prevent it from accessing its actual
75 * contents.
76 * This pager mostly handles page-in request (from memory_object_data_request())
77 * for swap file mappings and just returns bogus data.
78 * Pageouts are not handled, so mmap() has to make sure it does not allow
79 * writable (i.e. MAP_SHARED and PROT_WRITE) mappings of swap files.
80 */
81
82/* forward declarations */
83void swapfile_pager_reference(memory_object_t mem_obj);
84void swapfile_pager_deallocate(memory_object_t mem_obj);
85kern_return_t swapfile_pager_init(memory_object_t mem_obj,
86 memory_object_control_t control,
87 memory_object_cluster_size_t pg_size);
88kern_return_t swapfile_pager_terminate(memory_object_t mem_obj);
89kern_return_t swapfile_pager_data_request(memory_object_t mem_obj,
90 memory_object_offset_t offset,
91 memory_object_cluster_size_t length,
92 vm_prot_t protection_required,
93 memory_object_fault_info_t fault_info);
94kern_return_t swapfile_pager_data_return(memory_object_t mem_obj,
95 memory_object_offset_t offset,
96 memory_object_cluster_size_t data_cnt,
97 memory_object_offset_t *resid_offset,
98 int *io_error,
99 boolean_t dirty,
100 boolean_t kernel_copy,
101 int upl_flags);
102kern_return_t swapfile_pager_data_initialize(memory_object_t mem_obj,
103 memory_object_offset_t offset,
104 memory_object_cluster_size_t data_cnt);
105kern_return_t swapfile_pager_data_unlock(memory_object_t mem_obj,
106 memory_object_offset_t offset,
107 memory_object_size_t size,
108 vm_prot_t desired_access);
109kern_return_t swapfile_pager_synchronize(memory_object_t mem_obj,
110 memory_object_offset_t offset,
111 memory_object_size_t length,
112 vm_sync_t sync_flags);
113kern_return_t swapfile_pager_map(memory_object_t mem_obj,
114 vm_prot_t prot);
115kern_return_t swapfile_pager_last_unmap(memory_object_t mem_obj);
116
117/*
118 * Vector of VM operations for this EMM.
119 * These routines are invoked by VM via the memory_object_*() interfaces.
120 */
121const struct memory_object_pager_ops swapfile_pager_ops = {
122 swapfile_pager_reference,
123 swapfile_pager_deallocate,
124 swapfile_pager_init,
125 swapfile_pager_terminate,
126 swapfile_pager_data_request,
127 swapfile_pager_data_return,
128 swapfile_pager_data_initialize,
129 swapfile_pager_data_unlock,
130 swapfile_pager_synchronize,
131 swapfile_pager_map,
132 swapfile_pager_last_unmap,
6d2010ae 133 NULL, /* data_reclaim */
b0d623f7
A
134 "swapfile pager"
135};
136
137/*
138 * The "swapfile_pager" describes a memory object backed by
139 * the "swapfile" EMM.
140 */
141typedef struct swapfile_pager {
142 struct ipc_object_header pager_header; /* fake ip_kotype() */
143 memory_object_pager_ops_t pager_ops; /* == &swapfile_pager_ops */
144 queue_chain_t pager_queue; /* next & prev pagers */
145 unsigned int ref_count; /* reference count */
146 boolean_t is_ready; /* is this pager ready ? */
147 boolean_t is_mapped; /* is this pager mapped ? */
148 memory_object_control_t pager_control; /* mem object control handle */
149 struct vnode *swapfile_vnode;/* the swapfile's vnode */
150} *swapfile_pager_t;
151#define SWAPFILE_PAGER_NULL ((swapfile_pager_t) NULL)
152#define pager_ikot pager_header.io_bits
153
154/*
155 * List of memory objects managed by this EMM.
156 * The list is protected by the "swapfile_pager_lock" lock.
157 */
158int swapfile_pager_count = 0; /* number of pagers */
159queue_head_t swapfile_pager_queue;
160decl_lck_mtx_data(,swapfile_pager_lock)
161
162/*
163 * Statistics & counters.
164 */
165int swapfile_pager_count_max = 0;
166
167
168lck_grp_t swapfile_pager_lck_grp;
169lck_grp_attr_t swapfile_pager_lck_grp_attr;
170lck_attr_t swapfile_pager_lck_attr;
171
172
173/* internal prototypes */
174swapfile_pager_t swapfile_pager_create(struct vnode *vp);
175swapfile_pager_t swapfile_pager_lookup(memory_object_t mem_obj);
176void swapfile_pager_dequeue(swapfile_pager_t pager);
177void swapfile_pager_deallocate_internal(swapfile_pager_t pager,
178 boolean_t locked);
179void swapfile_pager_terminate_internal(swapfile_pager_t pager);
180
181
182#if DEBUG
183int swapfile_pagerdebug = 0;
184#define PAGER_ALL 0xffffffff
185#define PAGER_INIT 0x00000001
186#define PAGER_PAGEIN 0x00000002
187
188#define PAGER_DEBUG(LEVEL, A) \
189 MACRO_BEGIN \
190 if ((swapfile_pagerdebug & LEVEL)==LEVEL) { \
191 printf A; \
192 } \
193 MACRO_END
194#else
195#define PAGER_DEBUG(LEVEL, A)
196#endif
197
198
199void
200swapfile_pager_bootstrap(void)
201{
202 lck_grp_attr_setdefault(&swapfile_pager_lck_grp_attr);
203 lck_grp_init(&swapfile_pager_lck_grp, "swapfile pager", &swapfile_pager_lck_grp_attr);
204 lck_attr_setdefault(&swapfile_pager_lck_attr);
205 lck_mtx_init(&swapfile_pager_lock, &swapfile_pager_lck_grp, &swapfile_pager_lck_attr);
206 queue_init(&swapfile_pager_queue);
207}
208
209/*
210 * swapfile_pager_init()
211 *
212 * Initialize the memory object and makes it ready to be used and mapped.
213 */
214kern_return_t
215swapfile_pager_init(
216 memory_object_t mem_obj,
217 memory_object_control_t control,
218#if !DEBUG
219 __unused
220#endif
221 memory_object_cluster_size_t pg_size)
222{
223 swapfile_pager_t pager;
224 kern_return_t kr;
225 memory_object_attr_info_data_t attributes;
226
227 PAGER_DEBUG(PAGER_ALL,
228 ("swapfile_pager_init: %p, %p, %x\n",
229 mem_obj, control, pg_size));
230
231 if (control == MEMORY_OBJECT_CONTROL_NULL)
232 return KERN_INVALID_ARGUMENT;
233
234 pager = swapfile_pager_lookup(mem_obj);
235
236 memory_object_control_reference(control);
237
238 pager->pager_control = control;
239
240 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
241 attributes.cluster_size = (1 << (PAGE_SHIFT));
242 attributes.may_cache_object = FALSE;
243 attributes.temporary = TRUE;
244
245 kr = memory_object_change_attributes(
246 control,
247 MEMORY_OBJECT_ATTRIBUTE_INFO,
248 (memory_object_info_t) &attributes,
249 MEMORY_OBJECT_ATTR_INFO_COUNT);
250 if (kr != KERN_SUCCESS)
251 panic("swapfile_pager_init: "
252 "memory_object_change_attributes() failed");
253
254 return KERN_SUCCESS;
255}
256
257/*
258 * swapfile_data_return()
259 *
260 * Handles page-out requests from VM. This should never happen since
261 * the pages provided by this EMM are not supposed to be dirty or dirtied
262 * and VM should simply discard the contents and reclaim the pages if it
263 * needs to.
264 */
265kern_return_t
266swapfile_pager_data_return(
267 __unused memory_object_t mem_obj,
268 __unused memory_object_offset_t offset,
269 __unused memory_object_cluster_size_t data_cnt,
270 __unused memory_object_offset_t *resid_offset,
271 __unused int *io_error,
272 __unused boolean_t dirty,
273 __unused boolean_t kernel_copy,
274 __unused int upl_flags)
275{
276 panic("swapfile_pager_data_return: should never get called");
277 return KERN_FAILURE;
278}
279
280kern_return_t
281swapfile_pager_data_initialize(
282 __unused memory_object_t mem_obj,
283 __unused memory_object_offset_t offset,
284 __unused memory_object_cluster_size_t data_cnt)
285{
286 panic("swapfile_pager_data_initialize: should never get called");
287 return KERN_FAILURE;
288}
289
290kern_return_t
291swapfile_pager_data_unlock(
292 __unused memory_object_t mem_obj,
293 __unused memory_object_offset_t offset,
294 __unused memory_object_size_t size,
295 __unused vm_prot_t desired_access)
296{
297 return KERN_FAILURE;
298}
299
300/*
301 * swapfile_pager_data_request()
302 *
303 * Handles page-in requests from VM.
304 */
305kern_return_t
306swapfile_pager_data_request(
307 memory_object_t mem_obj,
308 memory_object_offset_t offset,
309 memory_object_cluster_size_t length,
310#if !DEBUG
311 __unused
312#endif
313 vm_prot_t protection_required,
314 __unused memory_object_fault_info_t mo_fault_info)
315{
316 swapfile_pager_t pager;
317 memory_object_control_t mo_control;
318 upl_t upl;
319 int upl_flags;
320 upl_size_t upl_size;
321 upl_page_info_t *upl_pl = NULL;
322 unsigned int pl_count;
323 vm_object_t dst_object;
324 kern_return_t kr, retval;
325 vm_map_offset_t kernel_mapping;
326 vm_offset_t dst_vaddr;
327 char *dst_ptr;
328 vm_offset_t cur_offset;
329 vm_map_entry_t map_entry;
330
331 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
332
333 kernel_mapping = 0;
334 upl = NULL;
335 upl_pl = NULL;
336
337 pager = swapfile_pager_lookup(mem_obj);
338 assert(pager->is_ready);
339 assert(pager->ref_count > 1); /* pager is alive and mapped */
340
341 PAGER_DEBUG(PAGER_PAGEIN, ("swapfile_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
342
343 /*
344 * Gather in a UPL all the VM pages requested by VM.
345 */
346 mo_control = pager->pager_control;
347
348 upl_size = length;
349 upl_flags =
350 UPL_RET_ONLY_ABSENT |
351 UPL_SET_LITE |
352 UPL_NO_SYNC |
353 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
354 UPL_SET_INTERNAL;
355 pl_count = 0;
356 kr = memory_object_upl_request(mo_control,
357 offset, upl_size,
358 &upl, NULL, NULL, upl_flags);
359 if (kr != KERN_SUCCESS) {
360 retval = kr;
361 goto done;
362 }
363 dst_object = mo_control->moc_object;
364 assert(dst_object != VM_OBJECT_NULL);
365
366
367 /*
368 * Reserve a virtual page in the kernel address space to map each
369 * destination physical page when it's its turn to be processed.
370 */
371 vm_object_reference(kernel_object); /* ref. for mapping */
372 kr = vm_map_find_space(kernel_map,
373 &kernel_mapping,
374 PAGE_SIZE_64,
375 0,
376 0,
377 &map_entry);
378 if (kr != KERN_SUCCESS) {
379 vm_object_deallocate(kernel_object);
380 retval = kr;
381 goto done;
382 }
383 map_entry->object.vm_object = kernel_object;
384 map_entry->offset = kernel_mapping - VM_MIN_KERNEL_ADDRESS;
385 vm_map_unlock(kernel_map);
386 dst_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping);
387 dst_ptr = (char *) dst_vaddr;
388
389 /*
390 * Fill in the contents of the pages requested by VM.
391 */
392 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
393 pl_count = length / PAGE_SIZE;
394 for (cur_offset = 0; cur_offset < length; cur_offset += PAGE_SIZE) {
395 ppnum_t dst_pnum;
396
397 if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
398 /* this page is not in the UPL: skip it */
399 continue;
400 }
401
402 /*
403 * Establish an explicit pmap mapping of the destination
404 * physical page.
405 * We can't do a regular VM mapping because the VM page
406 * is "busy".
407 */
408 dst_pnum = (ppnum_t)
409 upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
410 assert(dst_pnum != 0);
411 pmap_enter(kernel_pmap,
412 kernel_mapping,
413 dst_pnum,
414 VM_PROT_READ | VM_PROT_WRITE,
316670eb 415 VM_PROT_NONE,
6d2010ae 416 0,
b0d623f7
A
417 TRUE);
418
419 memset(dst_ptr, '\0', PAGE_SIZE);
420 /* add an end-of-line to keep line counters happy */
421 dst_ptr[PAGE_SIZE-1] = '\n';
422
423 /*
424 * Remove the pmap mapping of the destination page
425 * in the kernel.
426 */
427 pmap_remove(kernel_pmap,
428 (addr64_t) kernel_mapping,
429 (addr64_t) (kernel_mapping + PAGE_SIZE_64));
430
431 }
432
433 retval = KERN_SUCCESS;
434done:
435 if (upl != NULL) {
436 /* clean up the UPL */
437
438 /*
439 * The pages are currently dirty because we've just been
440 * writing on them, but as far as we're concerned, they're
441 * clean since they contain their "original" contents as
442 * provided by us, the pager.
443 * Tell the UPL to mark them "clean".
444 */
445 upl_clear_dirty(upl, TRUE);
446
447 /* abort or commit the UPL */
448 if (retval != KERN_SUCCESS) {
449 upl_abort(upl, 0);
450 } else {
451 boolean_t empty;
452 upl_commit_range(upl, 0, upl->size,
453 UPL_COMMIT_CS_VALIDATED,
454 upl_pl, pl_count, &empty);
455 }
456
457 /* and deallocate the UPL */
458 upl_deallocate(upl);
459 upl = NULL;
460 }
461 if (kernel_mapping != 0) {
462 /* clean up the mapping of the source and destination pages */
463 kr = vm_map_remove(kernel_map,
464 kernel_mapping,
465 kernel_mapping + PAGE_SIZE_64,
466 VM_MAP_NO_FLAGS);
467 assert(kr == KERN_SUCCESS);
468 kernel_mapping = 0;
469 dst_vaddr = 0;
470 }
471
472 return retval;
473}
474
475/*
476 * swapfile_pager_reference()
477 *
478 * Get a reference on this memory object.
479 * For external usage only. Assumes that the initial reference count is not 0,
480 * i.e one should not "revive" a dead pager this way.
481 */
482void
483swapfile_pager_reference(
484 memory_object_t mem_obj)
485{
486 swapfile_pager_t pager;
487
488 pager = swapfile_pager_lookup(mem_obj);
489
490 lck_mtx_lock(&swapfile_pager_lock);
491 assert(pager->ref_count > 0);
492 pager->ref_count++;
493 lck_mtx_unlock(&swapfile_pager_lock);
494}
495
496
497/*
498 * swapfile_pager_dequeue:
499 *
500 * Removes a pager from the list of pagers.
501 *
502 * The caller must hold "swapfile_pager_lock".
503 */
504void
505swapfile_pager_dequeue(
506 swapfile_pager_t pager)
507{
508 assert(!pager->is_mapped);
509
510 queue_remove(&swapfile_pager_queue,
511 pager,
512 swapfile_pager_t,
513 pager_queue);
514 pager->pager_queue.next = NULL;
515 pager->pager_queue.prev = NULL;
516
517 swapfile_pager_count--;
518}
519
520/*
521 * swapfile_pager_terminate_internal:
522 *
523 * Trigger the asynchronous termination of the memory object associated
524 * with this pager.
525 * When the memory object is terminated, there will be one more call
526 * to memory_object_deallocate() (i.e. swapfile_pager_deallocate())
527 * to finish the clean up.
528 *
529 * "swapfile_pager_lock" should not be held by the caller.
530 * We don't need the lock because the pager has already been removed from
531 * the pagers' list and is now ours exclusively.
532 */
533void
534swapfile_pager_terminate_internal(
535 swapfile_pager_t pager)
536{
537 assert(pager->is_ready);
538 assert(!pager->is_mapped);
539
540 if (pager->swapfile_vnode != NULL) {
541 pager->swapfile_vnode = NULL;
542 }
543
544 /* trigger the destruction of the memory object */
545 memory_object_destroy(pager->pager_control, 0);
546}
547
548/*
549 * swapfile_pager_deallocate_internal()
550 *
551 * Release a reference on this pager and free it when the last
552 * reference goes away.
553 * Can be called with swapfile_pager_lock held or not but always returns
554 * with it unlocked.
555 */
556void
557swapfile_pager_deallocate_internal(
558 swapfile_pager_t pager,
559 boolean_t locked)
560{
561 if (! locked) {
562 lck_mtx_lock(&swapfile_pager_lock);
563 }
564
565 /* drop a reference on this pager */
566 pager->ref_count--;
567
568 if (pager->ref_count == 1) {
569 /*
570 * Only the "named" reference is left, which means that
571 * no one is really holding on to this pager anymore.
572 * Terminate it.
573 */
574 swapfile_pager_dequeue(pager);
575 /* the pager is all ours: no need for the lock now */
576 lck_mtx_unlock(&swapfile_pager_lock);
577 swapfile_pager_terminate_internal(pager);
578 } else if (pager->ref_count == 0) {
579 /*
580 * Dropped the existence reference; the memory object has
581 * been terminated. Do some final cleanup and release the
582 * pager structure.
583 */
584 lck_mtx_unlock(&swapfile_pager_lock);
585 if (pager->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
586 memory_object_control_deallocate(pager->pager_control);
587 pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
588 }
589 kfree(pager, sizeof (*pager));
590 pager = SWAPFILE_PAGER_NULL;
591 } else {
592 /* there are still plenty of references: keep going... */
593 lck_mtx_unlock(&swapfile_pager_lock);
594 }
595
596 /* caution: lock is not held on return... */
597}
598
599/*
600 * swapfile_pager_deallocate()
601 *
602 * Release a reference on this pager and free it when the last
603 * reference goes away.
604 */
605void
606swapfile_pager_deallocate(
607 memory_object_t mem_obj)
608{
609 swapfile_pager_t pager;
610
611 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_deallocate: %p\n", mem_obj));
612 pager = swapfile_pager_lookup(mem_obj);
613 swapfile_pager_deallocate_internal(pager, FALSE);
614}
615
616/*
617 *
618 */
619kern_return_t
620swapfile_pager_terminate(
621#if !DEBUG
622 __unused
623#endif
624 memory_object_t mem_obj)
625{
626 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_terminate: %p\n", mem_obj));
627
628 return KERN_SUCCESS;
629}
630
631/*
632 *
633 */
634kern_return_t
635swapfile_pager_synchronize(
636 memory_object_t mem_obj,
637 memory_object_offset_t offset,
638 memory_object_size_t length,
639 __unused vm_sync_t sync_flags)
640{
641 swapfile_pager_t pager;
642
643 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_synchronize: %p\n", mem_obj));
644
645 pager = swapfile_pager_lookup(mem_obj);
646
647 memory_object_synchronize_completed(pager->pager_control,
648 offset, length);
649
650 return KERN_SUCCESS;
651}
652
653/*
654 * swapfile_pager_map()
655 *
656 * This allows VM to let us, the EMM, know that this memory object
657 * is currently mapped one or more times. This is called by VM each time
658 * the memory object gets mapped and we take one extra reference on the
659 * memory object to account for all its mappings.
660 */
661kern_return_t
662swapfile_pager_map(
663 memory_object_t mem_obj,
664 __unused vm_prot_t prot)
665{
666 swapfile_pager_t pager;
667
668 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_map: %p\n", mem_obj));
669
670 pager = swapfile_pager_lookup(mem_obj);
671
672 lck_mtx_lock(&swapfile_pager_lock);
673 assert(pager->is_ready);
674 assert(pager->ref_count > 0); /* pager is alive */
675 if (pager->is_mapped == FALSE) {
676 /*
677 * First mapping of this pager: take an extra reference
678 * that will remain until all the mappings of this pager
679 * are removed.
680 */
681 pager->is_mapped = TRUE;
682 pager->ref_count++;
683 }
684 lck_mtx_unlock(&swapfile_pager_lock);
685
686 return KERN_SUCCESS;
687}
688
689/*
690 * swapfile_pager_last_unmap()
691 *
692 * This is called by VM when this memory object is no longer mapped anywhere.
693 */
694kern_return_t
695swapfile_pager_last_unmap(
696 memory_object_t mem_obj)
697{
698 swapfile_pager_t pager;
699
700 PAGER_DEBUG(PAGER_ALL,
701 ("swapfile_pager_last_unmap: %p\n", mem_obj));
702
703 pager = swapfile_pager_lookup(mem_obj);
704
705 lck_mtx_lock(&swapfile_pager_lock);
706 if (pager->is_mapped) {
707 /*
708 * All the mappings are gone, so let go of the one extra
709 * reference that represents all the mappings of this pager.
710 */
711 pager->is_mapped = FALSE;
712 swapfile_pager_deallocate_internal(pager, TRUE);
713 /* caution: deallocate_internal() released the lock ! */
714 } else {
715 lck_mtx_unlock(&swapfile_pager_lock);
716 }
717
718 return KERN_SUCCESS;
719}
720
721
722/*
723 *
724 */
725swapfile_pager_t
726swapfile_pager_lookup(
727 memory_object_t mem_obj)
728{
729 swapfile_pager_t pager;
730
731 pager = (swapfile_pager_t) mem_obj;
732 assert(pager->pager_ops == &swapfile_pager_ops);
733 assert(pager->ref_count > 0);
734 return pager;
735}
736
737swapfile_pager_t
738swapfile_pager_create(
739 struct vnode *vp)
740{
741 swapfile_pager_t pager, pager2;
742 memory_object_control_t control;
743 kern_return_t kr;
744
745 pager = (swapfile_pager_t) kalloc(sizeof (*pager));
746 if (pager == SWAPFILE_PAGER_NULL) {
747 return SWAPFILE_PAGER_NULL;
748 }
749
750 /*
751 * The vm_map call takes both named entry ports and raw memory
752 * objects in the same parameter. We need to make sure that
753 * vm_map does not see this object as a named entry port. So,
754 * we reserve the second word in the object for a fake ip_kotype
755 * setting - that will tell vm_map to use it as a memory object.
756 */
757 pager->pager_ops = &swapfile_pager_ops;
758 pager->pager_ikot = IKOT_MEMORY_OBJECT;
759 pager->is_ready = FALSE;/* not ready until it has a "name" */
760 pager->ref_count = 1; /* setup reference */
761 pager->is_mapped = FALSE;
762 pager->pager_control = MEMORY_OBJECT_CONTROL_NULL;
763 pager->swapfile_vnode = vp;
764
765 lck_mtx_lock(&swapfile_pager_lock);
766 /* see if anyone raced us to create a pager for the same object */
767 queue_iterate(&swapfile_pager_queue,
768 pager2,
769 swapfile_pager_t,
770 pager_queue) {
771 if (pager2->swapfile_vnode == vp) {
772 break;
773 }
774 }
775 if (! queue_end(&swapfile_pager_queue,
776 (queue_entry_t) pager2)) {
777 /* while we hold the lock, transfer our setup ref to winner */
778 pager2->ref_count++;
779 /* we lost the race, down with the loser... */
780 lck_mtx_unlock(&swapfile_pager_lock);
781 pager->swapfile_vnode = NULL;
782 kfree(pager, sizeof (*pager));
783 /* ... and go with the winner */
784 pager = pager2;
785 /* let the winner make sure the pager gets ready */
786 return pager;
787 }
788
789 /* enter new pager at the head of our list of pagers */
790 queue_enter_first(&swapfile_pager_queue,
791 pager,
792 swapfile_pager_t,
793 pager_queue);
794 swapfile_pager_count++;
795 if (swapfile_pager_count > swapfile_pager_count_max) {
796 swapfile_pager_count_max = swapfile_pager_count;
797 }
798 lck_mtx_unlock(&swapfile_pager_lock);
799
800 kr = memory_object_create_named((memory_object_t) pager,
801 0,
802 &control);
803 assert(kr == KERN_SUCCESS);
804
805 lck_mtx_lock(&swapfile_pager_lock);
806 /* the new pager is now ready to be used */
807 pager->is_ready = TRUE;
808 lck_mtx_unlock(&swapfile_pager_lock);
809
810 /* wakeup anyone waiting for this pager to be ready */
811 thread_wakeup(&pager->is_ready);
812
813 return pager;
814}
815
816/*
817 * swapfile_pager_setup()
818 *
819 * Provide the caller with a memory object backed by the provided
820 * "backing_object" VM object. If such a memory object already exists,
821 * re-use it, otherwise create a new memory object.
822 */
823memory_object_t
824swapfile_pager_setup(
825 struct vnode *vp)
826{
827 swapfile_pager_t pager;
828
829 lck_mtx_lock(&swapfile_pager_lock);
830
831 queue_iterate(&swapfile_pager_queue,
832 pager,
833 swapfile_pager_t,
834 pager_queue) {
835 if (pager->swapfile_vnode == vp) {
836 break;
837 }
838 }
839 if (queue_end(&swapfile_pager_queue,
840 (queue_entry_t) pager)) {
841 /* no existing pager for this backing object */
842 pager = SWAPFILE_PAGER_NULL;
843 } else {
844 /* make sure pager doesn't disappear */
845 pager->ref_count++;
846 }
847
848 lck_mtx_unlock(&swapfile_pager_lock);
849
850 if (pager == SWAPFILE_PAGER_NULL) {
851 pager = swapfile_pager_create(vp);
852 if (pager == SWAPFILE_PAGER_NULL) {
853 return MEMORY_OBJECT_NULL;
854 }
855 }
856
857 lck_mtx_lock(&swapfile_pager_lock);
858 while (!pager->is_ready) {
859 lck_mtx_sleep(&swapfile_pager_lock,
860 LCK_SLEEP_DEFAULT,
861 &pager->is_ready,
862 THREAD_UNINT);
863 }
864 lck_mtx_unlock(&swapfile_pager_lock);
865
866 return (memory_object_t) pager;
867}
868
869memory_object_control_t
870swapfile_pager_control(
871 memory_object_t mem_obj)
872{
873 swapfile_pager_t pager;
874
875 pager = swapfile_pager_lookup(mem_obj);
876
877 return pager->pager_control;
878}