]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_swapfile_pager.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / vm / vm_swapfile_pager.c
1 /*
2 * Copyright (c) 2008 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/kern_return.h>
30 #include <mach/memory_object_control.h>
31 #include <mach/upl.h>
32
33 #include <kern/ipc_kobject.h>
34 #include <kern/kalloc.h>
35 #include <kern/queue.h>
36 #include <os/refcnt.h>
37
38 #include <vm/vm_kern.h>
39 #include <vm/vm_map.h>
40 #include <vm/vm_pageout.h>
41 #include <vm/vm_protos.h>
42
43
44 /*
45 * APPLE SWAPFILE MEMORY PAGER
46 *
47 * This external memory manager (EMM) handles mappings of the swap files.
48 * Swap files are not regular files and are used solely to store contents of
49 * anonymous memory mappings while not resident in memory.
50 * There's no valid reason to map a swap file. This just puts extra burden
51 * on the system, is potentially a security issue and is not reliable since
52 * the contents can change at any time with pageout operations.
53 * Here are some of the issues with mapping a swap file.
54 * * PERFORMANCE:
55 * Each page in the swap file belong to an anonymous memory object. Mapping
56 * the swap file makes those pages also accessible via a vnode memory
57 * object and each page can now be resident twice.
58 * * SECURITY:
59 * Mapping a swap file allows access to other processes' memory. Swap files
60 * are only accessible by the "root" super-user, who can already access any
61 * process's memory, so this is not a real issue but if permissions on the
62 * swap file got changed, it could become one.
63 * Swap files are not "zero-filled" on creation, so until their contents are
64 * overwritten with pageout operations, they still contain whatever was on
65 * the disk blocks they were allocated. The "super-user" could see the
66 * contents of free blocks anyway, so this is not a new security issue but
67 * it may be perceive as one.
68 *
69 * We can't legitimately prevent a user process with appropriate privileges
70 * from mapping a swap file, but we can prevent it from accessing its actual
71 * contents.
72 * This pager mostly handles page-in request (from memory_object_data_request())
73 * for swap file mappings and just returns bogus data.
74 * Pageouts are not handled, so mmap() has to make sure it does not allow
75 * writable (i.e. MAP_SHARED and PROT_WRITE) mappings of swap files.
76 */
77
78 /* forward declarations */
79 void swapfile_pager_reference(memory_object_t mem_obj);
80 void swapfile_pager_deallocate(memory_object_t mem_obj);
81 kern_return_t swapfile_pager_init(memory_object_t mem_obj,
82 memory_object_control_t control,
83 memory_object_cluster_size_t pg_size);
84 kern_return_t swapfile_pager_terminate(memory_object_t mem_obj);
85 kern_return_t swapfile_pager_data_request(memory_object_t mem_obj,
86 memory_object_offset_t offset,
87 memory_object_cluster_size_t length,
88 vm_prot_t protection_required,
89 memory_object_fault_info_t fault_info);
90 kern_return_t swapfile_pager_data_return(memory_object_t mem_obj,
91 memory_object_offset_t offset,
92 memory_object_cluster_size_t data_cnt,
93 memory_object_offset_t *resid_offset,
94 int *io_error,
95 boolean_t dirty,
96 boolean_t kernel_copy,
97 int upl_flags);
98 kern_return_t swapfile_pager_data_initialize(memory_object_t mem_obj,
99 memory_object_offset_t offset,
100 memory_object_cluster_size_t data_cnt);
101 kern_return_t swapfile_pager_data_unlock(memory_object_t mem_obj,
102 memory_object_offset_t offset,
103 memory_object_size_t size,
104 vm_prot_t desired_access);
105 kern_return_t swapfile_pager_synchronize(memory_object_t mem_obj,
106 memory_object_offset_t offset,
107 memory_object_size_t length,
108 vm_sync_t sync_flags);
109 kern_return_t swapfile_pager_map(memory_object_t mem_obj,
110 vm_prot_t prot);
111 kern_return_t swapfile_pager_last_unmap(memory_object_t mem_obj);
112
113 /*
114 * Vector of VM operations for this EMM.
115 * These routines are invoked by VM via the memory_object_*() interfaces.
116 */
117 const struct memory_object_pager_ops swapfile_pager_ops = {
118 swapfile_pager_reference,
119 swapfile_pager_deallocate,
120 swapfile_pager_init,
121 swapfile_pager_terminate,
122 swapfile_pager_data_request,
123 swapfile_pager_data_return,
124 swapfile_pager_data_initialize,
125 swapfile_pager_data_unlock,
126 swapfile_pager_synchronize,
127 swapfile_pager_map,
128 swapfile_pager_last_unmap,
129 NULL, /* data_reclaim */
130 "swapfile pager"
131 };
132
133 /*
134 * The "swapfile_pager" describes a memory object backed by
135 * the "swapfile" EMM.
136 */
137 typedef struct swapfile_pager {
138 /* mandatory generic header */
139 struct memory_object swp_pgr_hdr;
140
141 /* pager-specific data */
142 queue_chain_t pager_queue; /* next & prev pagers */
143 struct os_refcnt ref_count; /* reference count */
144 boolean_t is_ready; /* is this pager ready ? */
145 boolean_t is_mapped; /* is this pager mapped ? */
146 struct vnode *swapfile_vnode;/* the swapfile's vnode */
147 } *swapfile_pager_t;
148 #define SWAPFILE_PAGER_NULL ((swapfile_pager_t) NULL)
149
150 /*
151 * List of memory objects managed by this EMM.
152 * The list is protected by the "swapfile_pager_lock" lock.
153 */
154 int swapfile_pager_count = 0; /* number of pagers */
155 queue_head_t swapfile_pager_queue;
156 decl_lck_mtx_data(, swapfile_pager_lock)
157
158 /*
159 * Statistics & counters.
160 */
161 int swapfile_pager_count_max = 0;
162
163
164 lck_grp_t swapfile_pager_lck_grp;
165 lck_grp_attr_t swapfile_pager_lck_grp_attr;
166 lck_attr_t swapfile_pager_lck_attr;
167
168
169 /* internal prototypes */
170 swapfile_pager_t swapfile_pager_create(struct vnode *vp);
171 swapfile_pager_t swapfile_pager_lookup(memory_object_t mem_obj);
172 void swapfile_pager_dequeue(swapfile_pager_t pager);
173 void swapfile_pager_deallocate_internal(swapfile_pager_t pager,
174 boolean_t locked);
175 void swapfile_pager_terminate_internal(swapfile_pager_t pager);
176
177
178 #if DEBUG
179 int swapfile_pagerdebug = 0;
180 #define PAGER_ALL 0xffffffff
181 #define PAGER_INIT 0x00000001
182 #define PAGER_PAGEIN 0x00000002
183
184 #define PAGER_DEBUG(LEVEL, A) \
185 MACRO_BEGIN \
186 if ((swapfile_pagerdebug & LEVEL)==LEVEL) { \
187 printf A; \
188 } \
189 MACRO_END
190 #else
191 #define PAGER_DEBUG(LEVEL, A)
192 #endif
193
194
195 void
196 swapfile_pager_bootstrap(void)
197 {
198 lck_grp_attr_setdefault(&swapfile_pager_lck_grp_attr);
199 lck_grp_init(&swapfile_pager_lck_grp, "swapfile pager", &swapfile_pager_lck_grp_attr);
200 lck_attr_setdefault(&swapfile_pager_lck_attr);
201 lck_mtx_init(&swapfile_pager_lock, &swapfile_pager_lck_grp, &swapfile_pager_lck_attr);
202 queue_init(&swapfile_pager_queue);
203 }
204
205 /*
206 * swapfile_pager_init()
207 *
208 * Initialize the memory object and makes it ready to be used and mapped.
209 */
210 kern_return_t
211 swapfile_pager_init(
212 memory_object_t mem_obj,
213 memory_object_control_t control,
214 #if !DEBUG
215 __unused
216 #endif
217 memory_object_cluster_size_t pg_size)
218 {
219 swapfile_pager_t pager;
220 kern_return_t kr;
221 memory_object_attr_info_data_t attributes;
222
223 PAGER_DEBUG(PAGER_ALL,
224 ("swapfile_pager_init: %p, %p, %x\n",
225 mem_obj, control, pg_size));
226
227 if (control == MEMORY_OBJECT_CONTROL_NULL) {
228 return KERN_INVALID_ARGUMENT;
229 }
230
231 pager = swapfile_pager_lookup(mem_obj);
232
233 memory_object_control_reference(control);
234
235 pager->swp_pgr_hdr.mo_control = control;
236
237 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
238 attributes.cluster_size = (1 << (PAGE_SHIFT));
239 attributes.may_cache_object = FALSE;
240 attributes.temporary = TRUE;
241
242 kr = memory_object_change_attributes(
243 control,
244 MEMORY_OBJECT_ATTRIBUTE_INFO,
245 (memory_object_info_t) &attributes,
246 MEMORY_OBJECT_ATTR_INFO_COUNT);
247 if (kr != KERN_SUCCESS) {
248 panic("swapfile_pager_init: "
249 "memory_object_change_attributes() failed");
250 }
251
252 return KERN_SUCCESS;
253 }
254
255 /*
256 * swapfile_data_return()
257 *
258 * Handles page-out requests from VM. This should never happen since
259 * the pages provided by this EMM are not supposed to be dirty or dirtied
260 * and VM should simply discard the contents and reclaim the pages if it
261 * needs to.
262 */
263 kern_return_t
264 swapfile_pager_data_return(
265 __unused memory_object_t mem_obj,
266 __unused memory_object_offset_t offset,
267 __unused memory_object_cluster_size_t data_cnt,
268 __unused memory_object_offset_t *resid_offset,
269 __unused int *io_error,
270 __unused boolean_t dirty,
271 __unused boolean_t kernel_copy,
272 __unused int upl_flags)
273 {
274 panic("swapfile_pager_data_return: should never get called");
275 return KERN_FAILURE;
276 }
277
278 kern_return_t
279 swapfile_pager_data_initialize(
280 __unused memory_object_t mem_obj,
281 __unused memory_object_offset_t offset,
282 __unused memory_object_cluster_size_t data_cnt)
283 {
284 panic("swapfile_pager_data_initialize: should never get called");
285 return KERN_FAILURE;
286 }
287
288 kern_return_t
289 swapfile_pager_data_unlock(
290 __unused memory_object_t mem_obj,
291 __unused memory_object_offset_t offset,
292 __unused memory_object_size_t size,
293 __unused vm_prot_t desired_access)
294 {
295 return KERN_FAILURE;
296 }
297
298 /*
299 * swapfile_pager_data_request()
300 *
301 * Handles page-in requests from VM.
302 */
303 kern_return_t
304 swapfile_pager_data_request(
305 memory_object_t mem_obj,
306 memory_object_offset_t offset,
307 memory_object_cluster_size_t length,
308 #if !DEBUG
309 __unused
310 #endif
311 vm_prot_t protection_required,
312 __unused memory_object_fault_info_t mo_fault_info)
313 {
314 swapfile_pager_t pager;
315 memory_object_control_t mo_control;
316 upl_t upl;
317 int upl_flags;
318 upl_size_t upl_size;
319 upl_page_info_t *upl_pl = NULL;
320 unsigned int pl_count;
321 vm_object_t dst_object;
322 kern_return_t kr, retval;
323 vm_map_offset_t kernel_mapping;
324 vm_offset_t dst_vaddr;
325 char *dst_ptr;
326 vm_offset_t cur_offset;
327 vm_map_entry_t map_entry;
328
329 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
330
331 kernel_mapping = 0;
332 upl = NULL;
333 upl_pl = NULL;
334
335 pager = swapfile_pager_lookup(mem_obj);
336 assert(pager->is_ready);
337 assert(os_ref_get_count(&pager->ref_count) > 1); /* pager is alive and mapped */
338
339 PAGER_DEBUG(PAGER_PAGEIN, ("swapfile_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
340
341 /*
342 * Gather in a UPL all the VM pages requested by VM.
343 */
344 mo_control = pager->swp_pgr_hdr.mo_control;
345
346 upl_size = length;
347 upl_flags =
348 UPL_RET_ONLY_ABSENT |
349 UPL_SET_LITE |
350 UPL_NO_SYNC |
351 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
352 UPL_SET_INTERNAL;
353 pl_count = 0;
354 kr = memory_object_upl_request(mo_control,
355 offset, upl_size,
356 &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_OSFMK);
357 if (kr != KERN_SUCCESS) {
358 retval = kr;
359 goto done;
360 }
361 dst_object = mo_control->moc_object;
362 assert(dst_object != VM_OBJECT_NULL);
363
364
365 /*
366 * Reserve a virtual page in the kernel address space to map each
367 * destination physical page when it's its turn to be processed.
368 */
369 vm_object_reference(kernel_object); /* ref. for mapping */
370 kr = vm_map_find_space(kernel_map,
371 &kernel_mapping,
372 PAGE_SIZE_64,
373 0,
374 0,
375 VM_MAP_KERNEL_FLAGS_NONE,
376 VM_KERN_MEMORY_NONE,
377 &map_entry);
378 if (kr != KERN_SUCCESS) {
379 vm_object_deallocate(kernel_object);
380 retval = kr;
381 goto done;
382 }
383 VME_OBJECT_SET(map_entry, kernel_object);
384 VME_OFFSET_SET(map_entry, kernel_mapping - VM_MIN_KERNEL_ADDRESS);
385 vm_map_unlock(kernel_map);
386 dst_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping);
387 dst_ptr = (char *) dst_vaddr;
388
389 /*
390 * Fill in the contents of the pages requested by VM.
391 */
392 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
393 pl_count = length / PAGE_SIZE;
394 for (cur_offset = 0; cur_offset < length; cur_offset += PAGE_SIZE) {
395 ppnum_t dst_pnum;
396
397 if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
398 /* this page is not in the UPL: skip it */
399 continue;
400 }
401
402 /*
403 * Establish an explicit pmap mapping of the destination
404 * physical page.
405 * We can't do a regular VM mapping because the VM page
406 * is "busy".
407 */
408 dst_pnum = (ppnum_t)
409 upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
410 assert(dst_pnum != 0);
411 retval = pmap_enter(kernel_pmap,
412 kernel_mapping,
413 dst_pnum,
414 VM_PROT_READ | VM_PROT_WRITE,
415 VM_PROT_NONE,
416 0,
417 TRUE);
418
419 assert(retval == KERN_SUCCESS);
420
421 if (retval != KERN_SUCCESS) {
422 goto done;
423 }
424
425 memset(dst_ptr, '\0', PAGE_SIZE);
426 /* add an end-of-line to keep line counters happy */
427 dst_ptr[PAGE_SIZE - 1] = '\n';
428
429 /*
430 * Remove the pmap mapping of the destination page
431 * in the kernel.
432 */
433 pmap_remove(kernel_pmap,
434 (addr64_t) kernel_mapping,
435 (addr64_t) (kernel_mapping + PAGE_SIZE_64));
436 }
437
438 retval = KERN_SUCCESS;
439 done:
440 if (upl != NULL) {
441 /* clean up the UPL */
442
443 /*
444 * The pages are currently dirty because we've just been
445 * writing on them, but as far as we're concerned, they're
446 * clean since they contain their "original" contents as
447 * provided by us, the pager.
448 * Tell the UPL to mark them "clean".
449 */
450 upl_clear_dirty(upl, TRUE);
451
452 /* abort or commit the UPL */
453 if (retval != KERN_SUCCESS) {
454 upl_abort(upl, 0);
455 } else {
456 boolean_t empty;
457 upl_commit_range(upl, 0, upl->size,
458 UPL_COMMIT_CS_VALIDATED,
459 upl_pl, pl_count, &empty);
460 }
461
462 /* and deallocate the UPL */
463 upl_deallocate(upl);
464 upl = NULL;
465 }
466 if (kernel_mapping != 0) {
467 /* clean up the mapping of the source and destination pages */
468 kr = vm_map_remove(kernel_map,
469 kernel_mapping,
470 kernel_mapping + PAGE_SIZE_64,
471 VM_MAP_REMOVE_NO_FLAGS);
472 assert(kr == KERN_SUCCESS);
473 kernel_mapping = 0;
474 dst_vaddr = 0;
475 }
476
477 return retval;
478 }
479
480 /*
481 * swapfile_pager_reference()
482 *
483 * Get a reference on this memory object.
484 * For external usage only. Assumes that the initial reference count is not 0,
485 * i.e one should not "revive" a dead pager this way.
486 */
487 void
488 swapfile_pager_reference(
489 memory_object_t mem_obj)
490 {
491 swapfile_pager_t pager;
492
493 pager = swapfile_pager_lookup(mem_obj);
494
495 lck_mtx_lock(&swapfile_pager_lock);
496 os_ref_retain_locked(&pager->ref_count);
497 lck_mtx_unlock(&swapfile_pager_lock);
498 }
499
500
501 /*
502 * swapfile_pager_dequeue:
503 *
504 * Removes a pager from the list of pagers.
505 *
506 * The caller must hold "swapfile_pager_lock".
507 */
508 void
509 swapfile_pager_dequeue(
510 swapfile_pager_t pager)
511 {
512 assert(!pager->is_mapped);
513
514 queue_remove(&swapfile_pager_queue,
515 pager,
516 swapfile_pager_t,
517 pager_queue);
518 pager->pager_queue.next = NULL;
519 pager->pager_queue.prev = NULL;
520
521 swapfile_pager_count--;
522 }
523
524 /*
525 * swapfile_pager_terminate_internal:
526 *
527 * Trigger the asynchronous termination of the memory object associated
528 * with this pager.
529 * When the memory object is terminated, there will be one more call
530 * to memory_object_deallocate() (i.e. swapfile_pager_deallocate())
531 * to finish the clean up.
532 *
533 * "swapfile_pager_lock" should not be held by the caller.
534 * We don't need the lock because the pager has already been removed from
535 * the pagers' list and is now ours exclusively.
536 */
537 void
538 swapfile_pager_terminate_internal(
539 swapfile_pager_t pager)
540 {
541 assert(pager->is_ready);
542 assert(!pager->is_mapped);
543
544 if (pager->swapfile_vnode != NULL) {
545 pager->swapfile_vnode = NULL;
546 }
547
548 /* trigger the destruction of the memory object */
549 memory_object_destroy(pager->swp_pgr_hdr.mo_control, 0);
550 }
551
552 /*
553 * swapfile_pager_deallocate_internal()
554 *
555 * Release a reference on this pager and free it when the last
556 * reference goes away.
557 * Can be called with swapfile_pager_lock held or not but always returns
558 * with it unlocked.
559 */
560 void
561 swapfile_pager_deallocate_internal(
562 swapfile_pager_t pager,
563 boolean_t locked)
564 {
565 if (!locked) {
566 lck_mtx_lock(&swapfile_pager_lock);
567 }
568
569 /* drop a reference on this pager */
570 os_ref_count_t refcount = os_ref_release_locked(&pager->ref_count);
571
572 if (refcount == 1) {
573 /*
574 * Only the "named" reference is left, which means that
575 * no one is really holding on to this pager anymore.
576 * Terminate it.
577 */
578 swapfile_pager_dequeue(pager);
579 /* the pager is all ours: no need for the lock now */
580 lck_mtx_unlock(&swapfile_pager_lock);
581 swapfile_pager_terminate_internal(pager);
582 } else if (refcount == 0) {
583 /*
584 * Dropped the existence reference; the memory object has
585 * been terminated. Do some final cleanup and release the
586 * pager structure.
587 */
588 lck_mtx_unlock(&swapfile_pager_lock);
589 if (pager->swp_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
590 memory_object_control_deallocate(pager->swp_pgr_hdr.mo_control);
591 pager->swp_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
592 }
593 kfree(pager, sizeof(*pager));
594 pager = SWAPFILE_PAGER_NULL;
595 } else {
596 /* there are still plenty of references: keep going... */
597 lck_mtx_unlock(&swapfile_pager_lock);
598 }
599
600 /* caution: lock is not held on return... */
601 }
602
603 /*
604 * swapfile_pager_deallocate()
605 *
606 * Release a reference on this pager and free it when the last
607 * reference goes away.
608 */
609 void
610 swapfile_pager_deallocate(
611 memory_object_t mem_obj)
612 {
613 swapfile_pager_t pager;
614
615 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_deallocate: %p\n", mem_obj));
616 pager = swapfile_pager_lookup(mem_obj);
617 swapfile_pager_deallocate_internal(pager, FALSE);
618 }
619
620 /*
621 *
622 */
623 kern_return_t
624 swapfile_pager_terminate(
625 #if !DEBUG
626 __unused
627 #endif
628 memory_object_t mem_obj)
629 {
630 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_terminate: %p\n", mem_obj));
631
632 return KERN_SUCCESS;
633 }
634
635 /*
636 *
637 */
638 kern_return_t
639 swapfile_pager_synchronize(
640 __unused memory_object_t mem_obbj,
641 __unused memory_object_offset_t offset,
642 __unused memory_object_size_t length,
643 __unused vm_sync_t sync_flags)
644 {
645 panic("swapfile_pager_synchronize: memory_object_synchronize no longer supported\n");
646 return KERN_FAILURE;
647 }
648
649 /*
650 * swapfile_pager_map()
651 *
652 * This allows VM to let us, the EMM, know that this memory object
653 * is currently mapped one or more times. This is called by VM each time
654 * the memory object gets mapped and we take one extra reference on the
655 * memory object to account for all its mappings.
656 */
657 kern_return_t
658 swapfile_pager_map(
659 memory_object_t mem_obj,
660 __unused vm_prot_t prot)
661 {
662 swapfile_pager_t pager;
663
664 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_map: %p\n", mem_obj));
665
666 pager = swapfile_pager_lookup(mem_obj);
667
668 lck_mtx_lock(&swapfile_pager_lock);
669 assert(pager->is_ready);
670 assert(os_ref_get_count(&pager->ref_count) > 0); /* pager is alive */
671 if (pager->is_mapped == FALSE) {
672 /*
673 * First mapping of this pager: take an extra reference
674 * that will remain until all the mappings of this pager
675 * are removed.
676 */
677 pager->is_mapped = TRUE;
678 os_ref_retain_locked(&pager->ref_count);
679 }
680 lck_mtx_unlock(&swapfile_pager_lock);
681
682 return KERN_SUCCESS;
683 }
684
685 /*
686 * swapfile_pager_last_unmap()
687 *
688 * This is called by VM when this memory object is no longer mapped anywhere.
689 */
690 kern_return_t
691 swapfile_pager_last_unmap(
692 memory_object_t mem_obj)
693 {
694 swapfile_pager_t pager;
695
696 PAGER_DEBUG(PAGER_ALL,
697 ("swapfile_pager_last_unmap: %p\n", mem_obj));
698
699 pager = swapfile_pager_lookup(mem_obj);
700
701 lck_mtx_lock(&swapfile_pager_lock);
702 if (pager->is_mapped) {
703 /*
704 * All the mappings are gone, so let go of the one extra
705 * reference that represents all the mappings of this pager.
706 */
707 pager->is_mapped = FALSE;
708 swapfile_pager_deallocate_internal(pager, TRUE);
709 /* caution: deallocate_internal() released the lock ! */
710 } else {
711 lck_mtx_unlock(&swapfile_pager_lock);
712 }
713
714 return KERN_SUCCESS;
715 }
716
717
718 /*
719 *
720 */
721 swapfile_pager_t
722 swapfile_pager_lookup(
723 memory_object_t mem_obj)
724 {
725 swapfile_pager_t pager;
726
727 assert(mem_obj->mo_pager_ops == &swapfile_pager_ops);
728 __IGNORE_WCASTALIGN(pager = (swapfile_pager_t) mem_obj);
729 assert(os_ref_get_count(&pager->ref_count) > 0);
730 return pager;
731 }
732
733 swapfile_pager_t
734 swapfile_pager_create(
735 struct vnode *vp)
736 {
737 swapfile_pager_t pager, pager2;
738 memory_object_control_t control;
739 kern_return_t kr;
740
741 pager = (swapfile_pager_t) kalloc(sizeof(*pager));
742 if (pager == SWAPFILE_PAGER_NULL) {
743 return SWAPFILE_PAGER_NULL;
744 }
745
746 /*
747 * The vm_map call takes both named entry ports and raw memory
748 * objects in the same parameter. We need to make sure that
749 * vm_map does not see this object as a named entry port. So,
750 * we reserve the second word in the object for a fake ip_kotype
751 * setting - that will tell vm_map to use it as a memory object.
752 */
753 pager->swp_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
754 pager->swp_pgr_hdr.mo_pager_ops = &swapfile_pager_ops;
755 pager->swp_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
756
757 pager->is_ready = FALSE;/* not ready until it has a "name" */
758 os_ref_init(&pager->ref_count, NULL); /* setup reference */
759 pager->is_mapped = FALSE;
760 pager->swapfile_vnode = vp;
761
762 lck_mtx_lock(&swapfile_pager_lock);
763 /* see if anyone raced us to create a pager for the same object */
764 queue_iterate(&swapfile_pager_queue,
765 pager2,
766 swapfile_pager_t,
767 pager_queue) {
768 if (pager2->swapfile_vnode == vp) {
769 break;
770 }
771 }
772 if (!queue_end(&swapfile_pager_queue,
773 (queue_entry_t) pager2)) {
774 /* while we hold the lock, transfer our setup ref to winner */
775 os_ref_retain_locked(&pager2->ref_count);
776 /* we lost the race, down with the loser... */
777 lck_mtx_unlock(&swapfile_pager_lock);
778 pager->swapfile_vnode = NULL;
779 kfree(pager, sizeof(*pager));
780 /* ... and go with the winner */
781 pager = pager2;
782 /* let the winner make sure the pager gets ready */
783 return pager;
784 }
785
786 /* enter new pager at the head of our list of pagers */
787 queue_enter_first(&swapfile_pager_queue,
788 pager,
789 swapfile_pager_t,
790 pager_queue);
791 swapfile_pager_count++;
792 if (swapfile_pager_count > swapfile_pager_count_max) {
793 swapfile_pager_count_max = swapfile_pager_count;
794 }
795 lck_mtx_unlock(&swapfile_pager_lock);
796
797 kr = memory_object_create_named((memory_object_t) pager,
798 0,
799 &control);
800 assert(kr == KERN_SUCCESS);
801
802 lck_mtx_lock(&swapfile_pager_lock);
803 /* the new pager is now ready to be used */
804 pager->is_ready = TRUE;
805 lck_mtx_unlock(&swapfile_pager_lock);
806
807 /* wakeup anyone waiting for this pager to be ready */
808 thread_wakeup(&pager->is_ready);
809
810 return pager;
811 }
812
813 /*
814 * swapfile_pager_setup()
815 *
816 * Provide the caller with a memory object backed by the provided
817 * "backing_object" VM object. If such a memory object already exists,
818 * re-use it, otherwise create a new memory object.
819 */
820 memory_object_t
821 swapfile_pager_setup(
822 struct vnode *vp)
823 {
824 swapfile_pager_t pager;
825
826 lck_mtx_lock(&swapfile_pager_lock);
827
828 queue_iterate(&swapfile_pager_queue,
829 pager,
830 swapfile_pager_t,
831 pager_queue) {
832 if (pager->swapfile_vnode == vp) {
833 break;
834 }
835 }
836 if (queue_end(&swapfile_pager_queue,
837 (queue_entry_t) pager)) {
838 /* no existing pager for this backing object */
839 pager = SWAPFILE_PAGER_NULL;
840 } else {
841 /* make sure pager doesn't disappear */
842 os_ref_retain_locked(&pager->ref_count);
843 }
844
845 lck_mtx_unlock(&swapfile_pager_lock);
846
847 if (pager == SWAPFILE_PAGER_NULL) {
848 pager = swapfile_pager_create(vp);
849 if (pager == SWAPFILE_PAGER_NULL) {
850 return MEMORY_OBJECT_NULL;
851 }
852 }
853
854 lck_mtx_lock(&swapfile_pager_lock);
855 while (!pager->is_ready) {
856 lck_mtx_sleep(&swapfile_pager_lock,
857 LCK_SLEEP_DEFAULT,
858 &pager->is_ready,
859 THREAD_UNINT);
860 }
861 lck_mtx_unlock(&swapfile_pager_lock);
862
863 return (memory_object_t) pager;
864 }
865
866 memory_object_control_t
867 swapfile_pager_control(
868 memory_object_t mem_obj)
869 {
870 swapfile_pager_t pager;
871
872 if (mem_obj == MEMORY_OBJECT_NULL ||
873 mem_obj->mo_pager_ops != &swapfile_pager_ops) {
874 return MEMORY_OBJECT_CONTROL_NULL;
875 }
876 pager = swapfile_pager_lookup(mem_obj);
877 return pager->swp_pgr_hdr.mo_control;
878 }