2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <sys/fcntl.h>
31 #include <sys/vnode_internal.h>
32 #include <sys/vnode.h>
33 #include <sys/kauth.h>
34 #include <sys/mount_internal.h>
35 #include <sys/buf_internal.h>
36 #include <kern/debug.h>
37 #include <kern/kalloc.h>
38 #include <sys/cprotect.h>
40 #include <vm/vm_protos.h>
41 #include <vm/vm_pageout.h>
43 void vm_swapfile_open(const char *path
, vnode_t
*vp
);
44 void vm_swapfile_close(uint64_t path
, vnode_t vp
);
45 int vm_swapfile_preallocate(vnode_t vp
, uint64_t *size
, boolean_t
*pin
);
46 uint64_t vm_swapfile_get_blksize(vnode_t vp
);
47 uint64_t vm_swapfile_get_transfer_size(vnode_t vp
);
48 int vm_swapfile_io(vnode_t vp
, uint64_t offset
, uint64_t start
, int npages
, int flags
);
49 int vm_record_file_write(struct vnode
*vp
, uint64_t offset
, char *buf
, int size
);
53 vm_swapfile_open(const char *path
, vnode_t
*vp
)
56 vfs_context_t ctx
= vfs_context_kernel();
58 if ((error
= vnode_open(path
, (O_CREAT
| O_TRUNC
| FREAD
| FWRITE
), S_IRUSR
| S_IWUSR
, 0, vp
, ctx
))) {
59 printf("Failed to open swap file %d\n", error
);
65 * If MNT_IOFLAGS_NOSWAP is set, opening the swap file should fail.
66 * To avoid a race on the mount we only make this check after creating the
69 if ((*vp
)->v_mount
->mnt_kern_flag
& MNTK_NOSWAP
) {
71 vm_swapfile_close((uint64_t)path
, *vp
);
80 vm_swapfile_get_blksize(vnode_t vp
)
82 return ((uint64_t)vfs_devblocksize(vnode_mount(vp
)));
86 vm_swapfile_get_transfer_size(vnode_t vp
)
88 return((uint64_t)vp
->v_mount
->mnt_vfsstat
.f_iosize
);
91 int unlink1(vfs_context_t
, vnode_t
, user_addr_t
, enum uio_seg
, int);
94 vm_swapfile_close(uint64_t path_addr
, vnode_t vp
)
96 vfs_context_t context
= vfs_context_kernel();
100 vnode_close(vp
, 0, context
);
102 error
= unlink1(context
, NULLVP
, CAST_USER_ADDR_T(path_addr
),
105 #if DEVELOPMENT || DEBUG
107 printf("%s : unlink of %s failed with error %d", __FUNCTION__
,
108 (char *)path_addr
, error
);
113 vm_swapfile_preallocate(vnode_t vp
, uint64_t *size
, boolean_t
*pin
)
116 uint64_t file_size
= 0;
117 vfs_context_t ctx
= NULL
;
120 ctx
= vfs_context_kernel();
122 error
= vnode_setsize(vp
, *size
, IO_NOZEROFILL
, ctx
);
125 printf("vnode_setsize for swap files failed: %d\n", error
);
129 error
= vnode_size(vp
, (off_t
*) &file_size
, ctx
);
132 printf("vnode_size (new file) for swap file failed: %d\n", error
);
135 assert(file_size
== *size
);
137 if (pin
!= NULL
&& *pin
!= FALSE
) {
138 error
= VNOP_IOCTL(vp
, FIOPINSWAP
, NULL
, 0, ctx
);
141 printf("pin for swap files failed: %d, file_size = %lld\n", error
, file_size
);
142 /* this is not fatal, carry on with files wherever they landed */
149 SET(vp
->v_flag
, VSWAP
);
157 vm_record_file_write(vnode_t vp
, uint64_t offset
, char *buf
, int size
)
162 ctx
= vfs_context_kernel();
164 error
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)buf
, size
, offset
,
165 UIO_SYSSPACE
, IO_NODELOCKED
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
173 vm_swapfile_io(vnode_t vp
, uint64_t offset
, uint64_t start
, int npages
, int flags
)
176 uint64_t io_size
= npages
* PAGE_SIZE_64
;
178 kern_return_t kr
= KERN_SUCCESS
;
180 unsigned int count
= 0;
181 upl_control_flags_t upl_create_flags
= 0;
182 int upl_control_flags
= 0;
183 upl_size_t upl_size
= 0;
185 upl_create_flags
= UPL_SET_INTERNAL
| UPL_SET_LITE
186 | UPL_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK
);
189 upl_control_flags
= UPL_IOSYNC
| UPL_PAGING_ENCRYPTED
;
191 upl_control_flags
= UPL_IOSYNC
;
193 if ((flags
& SWAP_READ
) == FALSE
) {
194 upl_create_flags
|= UPL_COPYOUT_FROM
;
198 kr
= vm_map_create_upl( kernel_map
,
206 if (kr
!= KERN_SUCCESS
|| (upl_size
!= io_size
)) {
207 panic("vm_map_create_upl failed with %d\n", kr
);
210 if (flags
& SWAP_READ
) {
216 upl_control_flags
| UPL_IGNORE_VALID_PAGE_CHECK
,
220 printf("vm_swapfile_io: vnode_pagein failed with %d (vp: %p, offset: 0x%llx, size:%llu)\n", error
, vp
, offset
, io_size
);
222 printf("vm_swapfile_io: vnode_pagein failed with %d.\n", error
);
236 printf("vm_swapfile_io: vnode_pageout failed with %d (vp: %p, offset: 0x%llx, size:%llu)\n", error
, vp
, offset
, io_size
);
238 printf("vm_swapfile_io: vnode_pageout failed with %d.\n", error
);
246 ctx
= vfs_context_kernel();
248 error
= vn_rdwr((flags
& SWAP_READ
) ? UIO_READ
: UIO_WRITE
, vp
, (caddr_t
)start
, io_size
, offset
,
249 UIO_SYSSPACE
, IO_SYNC
| IO_NODELOCKED
| IO_UNIT
| IO_NOCACHE
| IO_SWAP_DISPATCH
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
252 printf("vn_rdwr: Swap I/O failed with %d\n", error
);
259 #define MAX_BATCH_TO_TRIM 256
261 #define ROUTE_ONLY 0x10 /* if corestorage is present, tell it to just pass */
262 /* the DKIOUNMAP command through w/o acting on it */
263 /* this is used by the compressed swap system to reclaim empty space */
266 u_int32_t
vnode_trim_list (vnode_t vp
, struct trim_list
*tl
, boolean_t route_only
)
270 u_int32_t blocksize
= 0;
272 dk_extent_t
*extents
;
274 _dk_cs_unmap_t cs_unmap
;
276 if ( !(vp
->v_mount
->mnt_ioflags
& MNT_IOFLAGS_UNMAP_SUPPORTED
))
283 * Get the underlying device vnode and physical block size
285 devvp
= vp
->v_mount
->mnt_devvp
;
286 blocksize
= vp
->v_mount
->mnt_devblocksize
;
288 extents
= kalloc(sizeof(dk_extent_t
) * MAX_BATCH_TO_TRIM
);
290 if (vp
->v_mount
->mnt_ioflags
& MNT_IOFLAGS_CSUNMAP_SUPPORTED
) {
291 memset (&cs_unmap
, 0, sizeof(_dk_cs_unmap_t
));
292 cs_unmap
.extents
= extents
;
294 if (route_only
== TRUE
)
295 cs_unmap
.options
= ROUTE_ONLY
;
297 memset (&unmap
, 0, sizeof(dk_unmap_t
));
298 unmap
.extents
= extents
;
302 daddr64_t io_blockno
; /* Block number corresponding to the start of the extent */
303 size_t io_bytecount
; /* Number of bytes in current extent for the specified range */
305 size_t remaining_length
;
306 off_t current_offset
;
308 current_offset
= tl
->tl_offset
;
309 remaining_length
= tl
->tl_length
;
313 * We may not get the entire range from tl_offset -> tl_offset+tl_length in a single
314 * extent from the blockmap call. Keep looping/going until we are sure we've hit
315 * the whole range or if we encounter an error.
317 while (trimmed
< tl
->tl_length
) {
319 * VNOP_BLOCKMAP will tell us the logical to physical block number mapping for the
320 * specified offset. It returns blocks in contiguous chunks, so if the logical range is
321 * broken into multiple extents, it must be called multiple times, increasing the offset
322 * in each call to ensure that the entire range is covered.
324 error
= VNOP_BLOCKMAP (vp
, current_offset
, remaining_length
,
325 &io_blockno
, &io_bytecount
, NULL
, VNODE_READ
, NULL
);
331 extents
[trim_index
].offset
= (uint64_t) io_blockno
* (u_int64_t
) blocksize
;
332 extents
[trim_index
].length
= io_bytecount
;
336 if (trim_index
== MAX_BATCH_TO_TRIM
) {
338 if (vp
->v_mount
->mnt_ioflags
& MNT_IOFLAGS_CSUNMAP_SUPPORTED
) {
339 cs_unmap
.extentsCount
= trim_index
;
340 error
= VNOP_IOCTL(devvp
, _DKIOCCSUNMAP
, (caddr_t
)&cs_unmap
, 0, vfs_context_kernel());
342 unmap
.extentsCount
= trim_index
;
343 error
= VNOP_IOCTL(devvp
, DKIOCUNMAP
, (caddr_t
)&unmap
, 0, vfs_context_kernel());
350 trimmed
+= io_bytecount
;
351 current_offset
+= io_bytecount
;
352 remaining_length
-= io_bytecount
;
357 if (vp
->v_mount
->mnt_ioflags
& MNT_IOFLAGS_CSUNMAP_SUPPORTED
) {
358 cs_unmap
.extentsCount
= trim_index
;
359 error
= VNOP_IOCTL(devvp
, _DKIOCCSUNMAP
, (caddr_t
)&cs_unmap
, 0, vfs_context_kernel());
361 unmap
.extentsCount
= trim_index
;
362 error
= VNOP_IOCTL(devvp
, DKIOCUNMAP
, (caddr_t
)&unmap
, 0, vfs_context_kernel());
366 kfree(extents
, sizeof(dk_extent_t
) * MAX_BATCH_TO_TRIM
);