]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_swapfile_pager.c
xnu-4903.221.2.tar.gz
[apple/xnu.git] / osfmk / vm / vm_swapfile_pager.c
CommitLineData
b0d623f7
A
1/*
2 * Copyright (c) 2008 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/kern_return.h>
30#include <mach/memory_object_control.h>
31#include <mach/upl.h>
32
33#include <kern/ipc_kobject.h>
34#include <kern/kalloc.h>
35#include <kern/queue.h>
36
37#include <vm/vm_kern.h>
38#include <vm/vm_map.h>
39#include <vm/vm_pageout.h>
40#include <vm/vm_protos.h>
41
42
43/*
44 * APPLE SWAPFILE MEMORY PAGER
45 *
46 * This external memory manager (EMM) handles mappings of the swap files.
47 * Swap files are not regular files and are used solely to store contents of
48 * anonymous memory mappings while not resident in memory.
49 * There's no valid reason to map a swap file. This just puts extra burden
50 * on the system, is potentially a security issue and is not reliable since
51 * the contents can change at any time with pageout operations.
52 * Here are some of the issues with mapping a swap file.
53 * * PERFORMANCE:
54 * Each page in the swap file belong to an anonymous memory object. Mapping
55 * the swap file makes those pages also accessible via a vnode memory
56 * object and each page can now be resident twice.
57 * * SECURITY:
58 * Mapping a swap file allows access to other processes' memory. Swap files
59 * are only accessible by the "root" super-user, who can already access any
60 * process's memory, so this is not a real issue but if permissions on the
61 * swap file got changed, it could become one.
62 * Swap files are not "zero-filled" on creation, so until their contents are
63 * overwritten with pageout operations, they still contain whatever was on
64 * the disk blocks they were allocated. The "super-user" could see the
65 * contents of free blocks anyway, so this is not a new security issue but
66 * it may be perceive as one.
5ba3f43e 67 *
b0d623f7
A
68 * We can't legitimately prevent a user process with appropriate privileges
69 * from mapping a swap file, but we can prevent it from accessing its actual
70 * contents.
71 * This pager mostly handles page-in request (from memory_object_data_request())
72 * for swap file mappings and just returns bogus data.
73 * Pageouts are not handled, so mmap() has to make sure it does not allow
74 * writable (i.e. MAP_SHARED and PROT_WRITE) mappings of swap files.
75 */
76
77/* forward declarations */
78void swapfile_pager_reference(memory_object_t mem_obj);
79void swapfile_pager_deallocate(memory_object_t mem_obj);
80kern_return_t swapfile_pager_init(memory_object_t mem_obj,
81 memory_object_control_t control,
82 memory_object_cluster_size_t pg_size);
83kern_return_t swapfile_pager_terminate(memory_object_t mem_obj);
84kern_return_t swapfile_pager_data_request(memory_object_t mem_obj,
85 memory_object_offset_t offset,
86 memory_object_cluster_size_t length,
87 vm_prot_t protection_required,
88 memory_object_fault_info_t fault_info);
89kern_return_t swapfile_pager_data_return(memory_object_t mem_obj,
90 memory_object_offset_t offset,
91 memory_object_cluster_size_t data_cnt,
92 memory_object_offset_t *resid_offset,
93 int *io_error,
94 boolean_t dirty,
95 boolean_t kernel_copy,
96 int upl_flags);
97kern_return_t swapfile_pager_data_initialize(memory_object_t mem_obj,
98 memory_object_offset_t offset,
99 memory_object_cluster_size_t data_cnt);
100kern_return_t swapfile_pager_data_unlock(memory_object_t mem_obj,
101 memory_object_offset_t offset,
102 memory_object_size_t size,
103 vm_prot_t desired_access);
104kern_return_t swapfile_pager_synchronize(memory_object_t mem_obj,
105 memory_object_offset_t offset,
106 memory_object_size_t length,
107 vm_sync_t sync_flags);
108kern_return_t swapfile_pager_map(memory_object_t mem_obj,
109 vm_prot_t prot);
110kern_return_t swapfile_pager_last_unmap(memory_object_t mem_obj);
111
112/*
113 * Vector of VM operations for this EMM.
114 * These routines are invoked by VM via the memory_object_*() interfaces.
115 */
116const struct memory_object_pager_ops swapfile_pager_ops = {
117 swapfile_pager_reference,
118 swapfile_pager_deallocate,
119 swapfile_pager_init,
120 swapfile_pager_terminate,
121 swapfile_pager_data_request,
122 swapfile_pager_data_return,
123 swapfile_pager_data_initialize,
124 swapfile_pager_data_unlock,
125 swapfile_pager_synchronize,
126 swapfile_pager_map,
127 swapfile_pager_last_unmap,
6d2010ae 128 NULL, /* data_reclaim */
b0d623f7
A
129 "swapfile pager"
130};
131
132/*
133 * The "swapfile_pager" describes a memory object backed by
134 * the "swapfile" EMM.
135 */
136typedef struct swapfile_pager {
5ba3f43e
A
137 /* mandatory generic header */
138 struct memory_object swp_pgr_hdr;
139
140 /* pager-specific data */
b0d623f7
A
141 queue_chain_t pager_queue; /* next & prev pagers */
142 unsigned int ref_count; /* reference count */
143 boolean_t is_ready; /* is this pager ready ? */
144 boolean_t is_mapped; /* is this pager mapped ? */
b0d623f7
A
145 struct vnode *swapfile_vnode;/* the swapfile's vnode */
146} *swapfile_pager_t;
147#define SWAPFILE_PAGER_NULL ((swapfile_pager_t) NULL)
b0d623f7
A
148
149/*
150 * List of memory objects managed by this EMM.
151 * The list is protected by the "swapfile_pager_lock" lock.
152 */
153int swapfile_pager_count = 0; /* number of pagers */
154queue_head_t swapfile_pager_queue;
155decl_lck_mtx_data(,swapfile_pager_lock)
156
157/*
158 * Statistics & counters.
159 */
160int swapfile_pager_count_max = 0;
161
162
163lck_grp_t swapfile_pager_lck_grp;
164lck_grp_attr_t swapfile_pager_lck_grp_attr;
165lck_attr_t swapfile_pager_lck_attr;
166
167
168/* internal prototypes */
169swapfile_pager_t swapfile_pager_create(struct vnode *vp);
170swapfile_pager_t swapfile_pager_lookup(memory_object_t mem_obj);
171void swapfile_pager_dequeue(swapfile_pager_t pager);
172void swapfile_pager_deallocate_internal(swapfile_pager_t pager,
173 boolean_t locked);
174void swapfile_pager_terminate_internal(swapfile_pager_t pager);
175
176
177#if DEBUG
178int swapfile_pagerdebug = 0;
179#define PAGER_ALL 0xffffffff
180#define PAGER_INIT 0x00000001
181#define PAGER_PAGEIN 0x00000002
182
183#define PAGER_DEBUG(LEVEL, A) \
184 MACRO_BEGIN \
185 if ((swapfile_pagerdebug & LEVEL)==LEVEL) { \
186 printf A; \
187 } \
188 MACRO_END
189#else
190#define PAGER_DEBUG(LEVEL, A)
191#endif
192
193
194void
195swapfile_pager_bootstrap(void)
196{
197 lck_grp_attr_setdefault(&swapfile_pager_lck_grp_attr);
198 lck_grp_init(&swapfile_pager_lck_grp, "swapfile pager", &swapfile_pager_lck_grp_attr);
199 lck_attr_setdefault(&swapfile_pager_lck_attr);
200 lck_mtx_init(&swapfile_pager_lock, &swapfile_pager_lck_grp, &swapfile_pager_lck_attr);
201 queue_init(&swapfile_pager_queue);
202}
203
204/*
205 * swapfile_pager_init()
206 *
207 * Initialize the memory object and makes it ready to be used and mapped.
208 */
209kern_return_t
210swapfile_pager_init(
211 memory_object_t mem_obj,
212 memory_object_control_t control,
213#if !DEBUG
214 __unused
215#endif
216 memory_object_cluster_size_t pg_size)
217{
218 swapfile_pager_t pager;
219 kern_return_t kr;
220 memory_object_attr_info_data_t attributes;
221
222 PAGER_DEBUG(PAGER_ALL,
223 ("swapfile_pager_init: %p, %p, %x\n",
224 mem_obj, control, pg_size));
225
226 if (control == MEMORY_OBJECT_CONTROL_NULL)
227 return KERN_INVALID_ARGUMENT;
228
229 pager = swapfile_pager_lookup(mem_obj);
230
231 memory_object_control_reference(control);
232
5ba3f43e 233 pager->swp_pgr_hdr.mo_control = control;
b0d623f7
A
234
235 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
236 attributes.cluster_size = (1 << (PAGE_SHIFT));
237 attributes.may_cache_object = FALSE;
238 attributes.temporary = TRUE;
239
240 kr = memory_object_change_attributes(
241 control,
242 MEMORY_OBJECT_ATTRIBUTE_INFO,
243 (memory_object_info_t) &attributes,
244 MEMORY_OBJECT_ATTR_INFO_COUNT);
245 if (kr != KERN_SUCCESS)
246 panic("swapfile_pager_init: "
247 "memory_object_change_attributes() failed");
248
249 return KERN_SUCCESS;
250}
251
252/*
253 * swapfile_data_return()
254 *
255 * Handles page-out requests from VM. This should never happen since
256 * the pages provided by this EMM are not supposed to be dirty or dirtied
257 * and VM should simply discard the contents and reclaim the pages if it
258 * needs to.
259 */
260kern_return_t
261swapfile_pager_data_return(
262 __unused memory_object_t mem_obj,
263 __unused memory_object_offset_t offset,
264 __unused memory_object_cluster_size_t data_cnt,
265 __unused memory_object_offset_t *resid_offset,
266 __unused int *io_error,
267 __unused boolean_t dirty,
268 __unused boolean_t kernel_copy,
269 __unused int upl_flags)
270{
271 panic("swapfile_pager_data_return: should never get called");
272 return KERN_FAILURE;
273}
274
275kern_return_t
276swapfile_pager_data_initialize(
277 __unused memory_object_t mem_obj,
278 __unused memory_object_offset_t offset,
279 __unused memory_object_cluster_size_t data_cnt)
280{
281 panic("swapfile_pager_data_initialize: should never get called");
282 return KERN_FAILURE;
283}
284
285kern_return_t
286swapfile_pager_data_unlock(
287 __unused memory_object_t mem_obj,
288 __unused memory_object_offset_t offset,
289 __unused memory_object_size_t size,
290 __unused vm_prot_t desired_access)
291{
292 return KERN_FAILURE;
293}
294
295/*
296 * swapfile_pager_data_request()
297 *
298 * Handles page-in requests from VM.
299 */
300kern_return_t
301swapfile_pager_data_request(
302 memory_object_t mem_obj,
303 memory_object_offset_t offset,
304 memory_object_cluster_size_t length,
305#if !DEBUG
306 __unused
307#endif
308 vm_prot_t protection_required,
309 __unused memory_object_fault_info_t mo_fault_info)
310{
311 swapfile_pager_t pager;
312 memory_object_control_t mo_control;
313 upl_t upl;
314 int upl_flags;
315 upl_size_t upl_size;
316 upl_page_info_t *upl_pl = NULL;
317 unsigned int pl_count;
318 vm_object_t dst_object;
319 kern_return_t kr, retval;
320 vm_map_offset_t kernel_mapping;
321 vm_offset_t dst_vaddr;
322 char *dst_ptr;
323 vm_offset_t cur_offset;
324 vm_map_entry_t map_entry;
325
326 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
327
328 kernel_mapping = 0;
329 upl = NULL;
330 upl_pl = NULL;
331
332 pager = swapfile_pager_lookup(mem_obj);
333 assert(pager->is_ready);
334 assert(pager->ref_count > 1); /* pager is alive and mapped */
335
336 PAGER_DEBUG(PAGER_PAGEIN, ("swapfile_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
337
338 /*
339 * Gather in a UPL all the VM pages requested by VM.
340 */
5ba3f43e 341 mo_control = pager->swp_pgr_hdr.mo_control;
b0d623f7
A
342
343 upl_size = length;
344 upl_flags =
345 UPL_RET_ONLY_ABSENT |
346 UPL_SET_LITE |
347 UPL_NO_SYNC |
348 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
349 UPL_SET_INTERNAL;
350 pl_count = 0;
351 kr = memory_object_upl_request(mo_control,
352 offset, upl_size,
5ba3f43e 353 &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_OSFMK);
b0d623f7
A
354 if (kr != KERN_SUCCESS) {
355 retval = kr;
356 goto done;
357 }
358 dst_object = mo_control->moc_object;
359 assert(dst_object != VM_OBJECT_NULL);
360
361
362 /*
363 * Reserve a virtual page in the kernel address space to map each
364 * destination physical page when it's its turn to be processed.
365 */
366 vm_object_reference(kernel_object); /* ref. for mapping */
367 kr = vm_map_find_space(kernel_map,
368 &kernel_mapping,
369 PAGE_SIZE_64,
370 0,
371 0,
5ba3f43e
A
372 VM_MAP_KERNEL_FLAGS_NONE,
373 VM_KERN_MEMORY_NONE,
b0d623f7
A
374 &map_entry);
375 if (kr != KERN_SUCCESS) {
376 vm_object_deallocate(kernel_object);
377 retval = kr;
378 goto done;
379 }
3e170ce0
A
380 VME_OBJECT_SET(map_entry, kernel_object);
381 VME_OFFSET_SET(map_entry, kernel_mapping - VM_MIN_KERNEL_ADDRESS);
b0d623f7
A
382 vm_map_unlock(kernel_map);
383 dst_vaddr = CAST_DOWN(vm_offset_t, kernel_mapping);
384 dst_ptr = (char *) dst_vaddr;
385
386 /*
387 * Fill in the contents of the pages requested by VM.
388 */
389 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
390 pl_count = length / PAGE_SIZE;
391 for (cur_offset = 0; cur_offset < length; cur_offset += PAGE_SIZE) {
392 ppnum_t dst_pnum;
393
394 if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
395 /* this page is not in the UPL: skip it */
396 continue;
397 }
398
399 /*
400 * Establish an explicit pmap mapping of the destination
401 * physical page.
402 * We can't do a regular VM mapping because the VM page
403 * is "busy".
404 */
405 dst_pnum = (ppnum_t)
406 upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
407 assert(dst_pnum != 0);
5ba3f43e
A
408 retval = pmap_enter(kernel_pmap,
409 kernel_mapping,
410 dst_pnum,
411 VM_PROT_READ | VM_PROT_WRITE,
412 VM_PROT_NONE,
413 0,
414 TRUE);
415
416 assert(retval == KERN_SUCCESS);
417
418 if (retval != KERN_SUCCESS) {
419 goto done;
420 }
b0d623f7
A
421
422 memset(dst_ptr, '\0', PAGE_SIZE);
423 /* add an end-of-line to keep line counters happy */
424 dst_ptr[PAGE_SIZE-1] = '\n';
425
426 /*
427 * Remove the pmap mapping of the destination page
428 * in the kernel.
429 */
430 pmap_remove(kernel_pmap,
431 (addr64_t) kernel_mapping,
432 (addr64_t) (kernel_mapping + PAGE_SIZE_64));
433
434 }
435
436 retval = KERN_SUCCESS;
437done:
438 if (upl != NULL) {
439 /* clean up the UPL */
440
441 /*
442 * The pages are currently dirty because we've just been
443 * writing on them, but as far as we're concerned, they're
444 * clean since they contain their "original" contents as
445 * provided by us, the pager.
446 * Tell the UPL to mark them "clean".
447 */
448 upl_clear_dirty(upl, TRUE);
449
450 /* abort or commit the UPL */
451 if (retval != KERN_SUCCESS) {
452 upl_abort(upl, 0);
453 } else {
454 boolean_t empty;
455 upl_commit_range(upl, 0, upl->size,
456 UPL_COMMIT_CS_VALIDATED,
457 upl_pl, pl_count, &empty);
458 }
459
460 /* and deallocate the UPL */
461 upl_deallocate(upl);
462 upl = NULL;
463 }
464 if (kernel_mapping != 0) {
465 /* clean up the mapping of the source and destination pages */
466 kr = vm_map_remove(kernel_map,
467 kernel_mapping,
468 kernel_mapping + PAGE_SIZE_64,
d9a64523 469 VM_MAP_REMOVE_NO_FLAGS);
b0d623f7
A
470 assert(kr == KERN_SUCCESS);
471 kernel_mapping = 0;
472 dst_vaddr = 0;
473 }
474
475 return retval;
476}
477
478/*
479 * swapfile_pager_reference()
480 *
481 * Get a reference on this memory object.
482 * For external usage only. Assumes that the initial reference count is not 0,
483 * i.e one should not "revive" a dead pager this way.
484 */
485void
486swapfile_pager_reference(
487 memory_object_t mem_obj)
488{
489 swapfile_pager_t pager;
490
491 pager = swapfile_pager_lookup(mem_obj);
492
493 lck_mtx_lock(&swapfile_pager_lock);
494 assert(pager->ref_count > 0);
495 pager->ref_count++;
496 lck_mtx_unlock(&swapfile_pager_lock);
497}
498
499
500/*
501 * swapfile_pager_dequeue:
502 *
503 * Removes a pager from the list of pagers.
504 *
505 * The caller must hold "swapfile_pager_lock".
506 */
507void
508swapfile_pager_dequeue(
509 swapfile_pager_t pager)
510{
511 assert(!pager->is_mapped);
512
513 queue_remove(&swapfile_pager_queue,
514 pager,
515 swapfile_pager_t,
516 pager_queue);
517 pager->pager_queue.next = NULL;
518 pager->pager_queue.prev = NULL;
519
520 swapfile_pager_count--;
521}
522
523/*
524 * swapfile_pager_terminate_internal:
525 *
526 * Trigger the asynchronous termination of the memory object associated
527 * with this pager.
528 * When the memory object is terminated, there will be one more call
529 * to memory_object_deallocate() (i.e. swapfile_pager_deallocate())
530 * to finish the clean up.
531 *
532 * "swapfile_pager_lock" should not be held by the caller.
533 * We don't need the lock because the pager has already been removed from
534 * the pagers' list and is now ours exclusively.
535 */
536void
537swapfile_pager_terminate_internal(
538 swapfile_pager_t pager)
539{
540 assert(pager->is_ready);
541 assert(!pager->is_mapped);
542
543 if (pager->swapfile_vnode != NULL) {
544 pager->swapfile_vnode = NULL;
545 }
546
547 /* trigger the destruction of the memory object */
5ba3f43e 548 memory_object_destroy(pager->swp_pgr_hdr.mo_control, 0);
b0d623f7
A
549}
550
551/*
552 * swapfile_pager_deallocate_internal()
553 *
554 * Release a reference on this pager and free it when the last
555 * reference goes away.
556 * Can be called with swapfile_pager_lock held or not but always returns
557 * with it unlocked.
558 */
559void
560swapfile_pager_deallocate_internal(
561 swapfile_pager_t pager,
562 boolean_t locked)
563{
564 if (! locked) {
565 lck_mtx_lock(&swapfile_pager_lock);
566 }
567
568 /* drop a reference on this pager */
569 pager->ref_count--;
570
571 if (pager->ref_count == 1) {
572 /*
573 * Only the "named" reference is left, which means that
574 * no one is really holding on to this pager anymore.
575 * Terminate it.
576 */
577 swapfile_pager_dequeue(pager);
578 /* the pager is all ours: no need for the lock now */
579 lck_mtx_unlock(&swapfile_pager_lock);
580 swapfile_pager_terminate_internal(pager);
581 } else if (pager->ref_count == 0) {
582 /*
583 * Dropped the existence reference; the memory object has
584 * been terminated. Do some final cleanup and release the
585 * pager structure.
586 */
587 lck_mtx_unlock(&swapfile_pager_lock);
5ba3f43e
A
588 if (pager->swp_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
589 memory_object_control_deallocate(pager->swp_pgr_hdr.mo_control);
590 pager->swp_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
b0d623f7
A
591 }
592 kfree(pager, sizeof (*pager));
593 pager = SWAPFILE_PAGER_NULL;
594 } else {
595 /* there are still plenty of references: keep going... */
596 lck_mtx_unlock(&swapfile_pager_lock);
597 }
598
599 /* caution: lock is not held on return... */
600}
601
602/*
603 * swapfile_pager_deallocate()
604 *
605 * Release a reference on this pager and free it when the last
606 * reference goes away.
607 */
608void
609swapfile_pager_deallocate(
610 memory_object_t mem_obj)
611{
612 swapfile_pager_t pager;
613
614 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_deallocate: %p\n", mem_obj));
615 pager = swapfile_pager_lookup(mem_obj);
616 swapfile_pager_deallocate_internal(pager, FALSE);
617}
618
619/*
620 *
621 */
622kern_return_t
623swapfile_pager_terminate(
624#if !DEBUG
625 __unused
626#endif
627 memory_object_t mem_obj)
628{
629 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_terminate: %p\n", mem_obj));
630
631 return KERN_SUCCESS;
632}
633
634/*
635 *
636 */
637kern_return_t
638swapfile_pager_synchronize(
5ba3f43e
A
639 __unused memory_object_t mem_obbj,
640 __unused memory_object_offset_t offset,
641 __unused memory_object_size_t length,
b0d623f7
A
642 __unused vm_sync_t sync_flags)
643{
5ba3f43e
A
644 panic("swapfile_pager_synchronize: memory_object_synchronize no longer supported\n");
645 return (KERN_FAILURE);
b0d623f7
A
646}
647
648/*
649 * swapfile_pager_map()
650 *
651 * This allows VM to let us, the EMM, know that this memory object
652 * is currently mapped one or more times. This is called by VM each time
653 * the memory object gets mapped and we take one extra reference on the
654 * memory object to account for all its mappings.
655 */
656kern_return_t
657swapfile_pager_map(
658 memory_object_t mem_obj,
659 __unused vm_prot_t prot)
660{
661 swapfile_pager_t pager;
662
663 PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_map: %p\n", mem_obj));
664
665 pager = swapfile_pager_lookup(mem_obj);
666
667 lck_mtx_lock(&swapfile_pager_lock);
668 assert(pager->is_ready);
669 assert(pager->ref_count > 0); /* pager is alive */
670 if (pager->is_mapped == FALSE) {
671 /*
672 * First mapping of this pager: take an extra reference
673 * that will remain until all the mappings of this pager
674 * are removed.
675 */
676 pager->is_mapped = TRUE;
677 pager->ref_count++;
678 }
679 lck_mtx_unlock(&swapfile_pager_lock);
680
681 return KERN_SUCCESS;
682}
683
684/*
685 * swapfile_pager_last_unmap()
686 *
687 * This is called by VM when this memory object is no longer mapped anywhere.
688 */
689kern_return_t
690swapfile_pager_last_unmap(
691 memory_object_t mem_obj)
692{
693 swapfile_pager_t pager;
694
695 PAGER_DEBUG(PAGER_ALL,
696 ("swapfile_pager_last_unmap: %p\n", mem_obj));
697
698 pager = swapfile_pager_lookup(mem_obj);
699
700 lck_mtx_lock(&swapfile_pager_lock);
701 if (pager->is_mapped) {
702 /*
703 * All the mappings are gone, so let go of the one extra
704 * reference that represents all the mappings of this pager.
705 */
706 pager->is_mapped = FALSE;
707 swapfile_pager_deallocate_internal(pager, TRUE);
708 /* caution: deallocate_internal() released the lock ! */
709 } else {
710 lck_mtx_unlock(&swapfile_pager_lock);
711 }
712
713 return KERN_SUCCESS;
714}
715
716
717/*
718 *
719 */
720swapfile_pager_t
721swapfile_pager_lookup(
722 memory_object_t mem_obj)
723{
724 swapfile_pager_t pager;
725
5ba3f43e 726 assert(mem_obj->mo_pager_ops == &swapfile_pager_ops);
3e170ce0 727 __IGNORE_WCASTALIGN(pager = (swapfile_pager_t) mem_obj);
b0d623f7
A
728 assert(pager->ref_count > 0);
729 return pager;
730}
731
732swapfile_pager_t
733swapfile_pager_create(
734 struct vnode *vp)
735{
736 swapfile_pager_t pager, pager2;
737 memory_object_control_t control;
738 kern_return_t kr;
739
740 pager = (swapfile_pager_t) kalloc(sizeof (*pager));
741 if (pager == SWAPFILE_PAGER_NULL) {
742 return SWAPFILE_PAGER_NULL;
743 }
744
745 /*
746 * The vm_map call takes both named entry ports and raw memory
747 * objects in the same parameter. We need to make sure that
748 * vm_map does not see this object as a named entry port. So,
749 * we reserve the second word in the object for a fake ip_kotype
750 * setting - that will tell vm_map to use it as a memory object.
751 */
5ba3f43e
A
752 pager->swp_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
753 pager->swp_pgr_hdr.mo_pager_ops = &swapfile_pager_ops;
754 pager->swp_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
755
b0d623f7
A
756 pager->is_ready = FALSE;/* not ready until it has a "name" */
757 pager->ref_count = 1; /* setup reference */
758 pager->is_mapped = FALSE;
b0d623f7
A
759 pager->swapfile_vnode = vp;
760
761 lck_mtx_lock(&swapfile_pager_lock);
762 /* see if anyone raced us to create a pager for the same object */
763 queue_iterate(&swapfile_pager_queue,
764 pager2,
765 swapfile_pager_t,
766 pager_queue) {
767 if (pager2->swapfile_vnode == vp) {
768 break;
769 }
770 }
771 if (! queue_end(&swapfile_pager_queue,
772 (queue_entry_t) pager2)) {
773 /* while we hold the lock, transfer our setup ref to winner */
774 pager2->ref_count++;
775 /* we lost the race, down with the loser... */
776 lck_mtx_unlock(&swapfile_pager_lock);
777 pager->swapfile_vnode = NULL;
778 kfree(pager, sizeof (*pager));
779 /* ... and go with the winner */
780 pager = pager2;
781 /* let the winner make sure the pager gets ready */
782 return pager;
783 }
784
785 /* enter new pager at the head of our list of pagers */
786 queue_enter_first(&swapfile_pager_queue,
787 pager,
788 swapfile_pager_t,
789 pager_queue);
790 swapfile_pager_count++;
791 if (swapfile_pager_count > swapfile_pager_count_max) {
792 swapfile_pager_count_max = swapfile_pager_count;
793 }
794 lck_mtx_unlock(&swapfile_pager_lock);
795
796 kr = memory_object_create_named((memory_object_t) pager,
797 0,
798 &control);
799 assert(kr == KERN_SUCCESS);
800
801 lck_mtx_lock(&swapfile_pager_lock);
802 /* the new pager is now ready to be used */
803 pager->is_ready = TRUE;
804 lck_mtx_unlock(&swapfile_pager_lock);
805
806 /* wakeup anyone waiting for this pager to be ready */
807 thread_wakeup(&pager->is_ready);
808
809 return pager;
810}
811
812/*
813 * swapfile_pager_setup()
814 *
815 * Provide the caller with a memory object backed by the provided
816 * "backing_object" VM object. If such a memory object already exists,
817 * re-use it, otherwise create a new memory object.
818 */
819memory_object_t
820swapfile_pager_setup(
821 struct vnode *vp)
822{
823 swapfile_pager_t pager;
824
825 lck_mtx_lock(&swapfile_pager_lock);
826
827 queue_iterate(&swapfile_pager_queue,
828 pager,
829 swapfile_pager_t,
830 pager_queue) {
831 if (pager->swapfile_vnode == vp) {
832 break;
833 }
834 }
835 if (queue_end(&swapfile_pager_queue,
836 (queue_entry_t) pager)) {
837 /* no existing pager for this backing object */
838 pager = SWAPFILE_PAGER_NULL;
839 } else {
840 /* make sure pager doesn't disappear */
841 pager->ref_count++;
842 }
843
844 lck_mtx_unlock(&swapfile_pager_lock);
845
846 if (pager == SWAPFILE_PAGER_NULL) {
847 pager = swapfile_pager_create(vp);
848 if (pager == SWAPFILE_PAGER_NULL) {
849 return MEMORY_OBJECT_NULL;
850 }
851 }
852
853 lck_mtx_lock(&swapfile_pager_lock);
854 while (!pager->is_ready) {
855 lck_mtx_sleep(&swapfile_pager_lock,
856 LCK_SLEEP_DEFAULT,
857 &pager->is_ready,
858 THREAD_UNINT);
859 }
860 lck_mtx_unlock(&swapfile_pager_lock);
861
862 return (memory_object_t) pager;
863}
864
865memory_object_control_t
866swapfile_pager_control(
867 memory_object_t mem_obj)
868{
869 swapfile_pager_t pager;
870
5ba3f43e
A
871 if (mem_obj == MEMORY_OBJECT_NULL ||
872 mem_obj->mo_pager_ops != &swapfile_pager_ops) {
873 return MEMORY_OBJECT_CONTROL_NULL;
874 }
b0d623f7 875 pager = swapfile_pager_lookup(mem_obj);
5ba3f43e 876 return pager->swp_pgr_hdr.mo_control;
b0d623f7 877}