2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <sys/fcntl.h>
31 #include <sys/vnode_internal.h>
32 #include <sys/vnode.h>
33 #include <sys/kauth.h>
34 #include <sys/mount_internal.h>
35 #include <sys/buf_internal.h>
36 #include <kern/debug.h>
37 #include <kern/kalloc.h>
38 #include <sys/cprotect.h>
40 #include <vm/vm_protos.h>
41 #include <vm/vm_pageout.h>
44 void vm_swapfile_open(const char *path
, vnode_t
*vp
);
45 void vm_swapfile_close(uint64_t path
, vnode_t vp
);
46 int vm_swapfile_preallocate(vnode_t vp
, uint64_t *size
, boolean_t
*pin
);
47 uint64_t vm_swapfile_get_blksize(vnode_t vp
);
48 uint64_t vm_swapfile_get_transfer_size(vnode_t vp
);
49 int vm_swapfile_io(vnode_t vp
, uint64_t offset
, uint64_t start
, int npages
, int flags
);
50 int vm_record_file_write(struct vnode
*vp
, uint64_t offset
, char *buf
, int size
);
54 vm_swapfile_open(const char *path
, vnode_t
*vp
)
57 vfs_context_t ctx
= vfs_context_current();
59 if ((error
= vnode_open(path
, (O_CREAT
| O_TRUNC
| FREAD
| FWRITE
), S_IRUSR
| S_IWUSR
, 0, vp
, ctx
))) {
60 printf("Failed to open swap file %d\n", error
);
69 vm_swapfile_get_blksize(vnode_t vp
)
71 return ((uint64_t)vfs_devblocksize(vnode_mount(vp
)));
75 vm_swapfile_get_transfer_size(vnode_t vp
)
77 return((uint64_t)vp
->v_mount
->mnt_vfsstat
.f_iosize
);
80 int unlink1(vfs_context_t
, vnode_t
, user_addr_t
, enum uio_seg
, int);
83 vm_swapfile_close(uint64_t path_addr
, vnode_t vp
)
85 vfs_context_t context
= vfs_context_current();
89 vnode_close(vp
, 0, context
);
91 error
= unlink1(context
, NULLVP
, CAST_USER_ADDR_T(path_addr
),
94 #if DEVELOPMENT || DEBUG
96 printf("%s : unlink of %s failed with error %d", __FUNCTION__
,
97 (char *)path_addr
, error
);
102 vm_swapfile_preallocate(vnode_t vp
, uint64_t *size
, boolean_t
*pin
)
105 uint64_t file_size
= 0;
106 vfs_context_t ctx
= NULL
;
109 ctx
= vfs_context_current();
113 #if 0 // <rdar://11771612>
115 if ((error
= cp_vnode_setclass(vp
, PROTECTION_CLASS_F
))) {
116 if(config_protect_bug
) {
117 printf("swap protection class set failed with %d\n", error
);
119 panic("swap protection class set failed with %d\n", error
);
123 /* initialize content protection keys manually */
124 if ((error
= cp_handle_vnop(vp
, CP_WRITE_ACCESS
, 0)) != 0) {
125 printf("Content Protection key failure on swap: %d\n", error
);
132 error
= vnode_setsize(vp
, *size
, IO_NOZEROFILL
, ctx
);
135 printf("vnode_setsize for swap files failed: %d\n", error
);
139 error
= vnode_size(vp
, (off_t
*) &file_size
, ctx
);
142 printf("vnode_size (new file) for swap file failed: %d\n", error
);
145 assert(file_size
== *size
);
147 if (pin
!= NULL
&& *pin
!= FALSE
) {
149 assert(vnode_tag(vp
) == VT_HFS
);
151 error
= hfs_pin_vnode(VTOHFS(vp
), vp
, HFS_PIN_IT
| HFS_DATALESS_PIN
, NULL
, ctx
);
154 printf("hfs_pin_vnode for swap files failed: %d\n", error
);
155 /* this is not fatal, carry on with files wherever they landed */
162 SET(vp
->v_flag
, VSWAP
);
170 vm_record_file_write(vnode_t vp
, uint64_t offset
, char *buf
, int size
)
175 ctx
= vfs_context_kernel();
177 error
= vn_rdwr(UIO_WRITE
, vp
, (caddr_t
)buf
, size
, offset
,
178 UIO_SYSSPACE
, IO_NODELOCKED
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
186 vm_swapfile_io(vnode_t vp
, uint64_t offset
, uint64_t start
, int npages
, int flags
)
189 uint64_t io_size
= npages
* PAGE_SIZE_64
;
191 kern_return_t kr
= KERN_SUCCESS
;
193 unsigned int count
= 0;
194 upl_control_flags_t upl_create_flags
= 0;
195 int upl_control_flags
= 0;
196 upl_size_t upl_size
= 0;
198 upl_create_flags
= UPL_SET_INTERNAL
| UPL_SET_LITE
199 | UPL_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK
);
202 upl_control_flags
= UPL_IOSYNC
| UPL_PAGING_ENCRYPTED
;
204 upl_control_flags
= UPL_IOSYNC
;
206 if ((flags
& SWAP_READ
) == FALSE
) {
207 upl_create_flags
|= UPL_COPYOUT_FROM
;
211 kr
= vm_map_create_upl( kernel_map
,
219 if (kr
!= KERN_SUCCESS
|| (upl_size
!= io_size
)) {
220 panic("vm_map_create_upl failed with %d\n", kr
);
223 if (flags
& SWAP_READ
) {
229 upl_control_flags
| UPL_IGNORE_VALID_PAGE_CHECK
,
233 printf("vm_swapfile_io: vnode_pagein failed with %d (vp: %p, offset: 0x%llx, size:%llu)\n", error
, vp
, offset
, io_size
);
235 printf("vm_swapfile_io: vnode_pagein failed with %d.\n", error
);
249 printf("vm_swapfile_io: vnode_pageout failed with %d (vp: %p, offset: 0x%llx, size:%llu)\n", error
, vp
, offset
, io_size
);
251 printf("vm_swapfile_io: vnode_pageout failed with %d.\n", error
);
259 ctx
= vfs_context_kernel();
261 error
= vn_rdwr((flags
& SWAP_READ
) ? UIO_READ
: UIO_WRITE
, vp
, (caddr_t
)start
, io_size
, offset
,
262 UIO_SYSSPACE
, IO_SYNC
| IO_NODELOCKED
| IO_UNIT
| IO_NOCACHE
| IO_SWAP_DISPATCH
, vfs_context_ucred(ctx
), (int *) 0, vfs_context_proc(ctx
));
265 printf("vn_rdwr: Swap I/O failed with %d\n", error
);
272 #define MAX_BATCH_TO_TRIM 256
274 #define ROUTE_ONLY 0x10 /* if corestorage is present, tell it to just pass */
275 /* the DKIOUNMAP command through w/o acting on it */
276 /* this is used by the compressed swap system to reclaim empty space */
279 u_int32_t
vnode_trim_list (vnode_t vp
, struct trim_list
*tl
, boolean_t route_only
)
283 u_int32_t blocksize
= 0;
285 dk_extent_t
*extents
;
287 _dk_cs_unmap_t cs_unmap
;
289 if ( !(vp
->v_mount
->mnt_ioflags
& MNT_IOFLAGS_UNMAP_SUPPORTED
))
296 * Get the underlying device vnode and physical block size
298 devvp
= vp
->v_mount
->mnt_devvp
;
299 blocksize
= vp
->v_mount
->mnt_devblocksize
;
301 extents
= kalloc(sizeof(dk_extent_t
) * MAX_BATCH_TO_TRIM
);
303 if (vp
->v_mount
->mnt_ioflags
& MNT_IOFLAGS_CSUNMAP_SUPPORTED
) {
304 memset (&cs_unmap
, 0, sizeof(_dk_cs_unmap_t
));
305 cs_unmap
.extents
= extents
;
307 if (route_only
== TRUE
)
308 cs_unmap
.options
= ROUTE_ONLY
;
310 memset (&unmap
, 0, sizeof(dk_unmap_t
));
311 unmap
.extents
= extents
;
315 daddr64_t io_blockno
; /* Block number corresponding to the start of the extent */
316 size_t io_bytecount
; /* Number of bytes in current extent for the specified range */
318 size_t remaining_length
;
319 off_t current_offset
;
321 current_offset
= tl
->tl_offset
;
322 remaining_length
= tl
->tl_length
;
326 * We may not get the entire range from tl_offset -> tl_offset+tl_length in a single
327 * extent from the blockmap call. Keep looping/going until we are sure we've hit
328 * the whole range or if we encounter an error.
330 while (trimmed
< tl
->tl_length
) {
332 * VNOP_BLOCKMAP will tell us the logical to physical block number mapping for the
333 * specified offset. It returns blocks in contiguous chunks, so if the logical range is
334 * broken into multiple extents, it must be called multiple times, increasing the offset
335 * in each call to ensure that the entire range is covered.
337 error
= VNOP_BLOCKMAP (vp
, current_offset
, remaining_length
,
338 &io_blockno
, &io_bytecount
, NULL
, VNODE_READ
, NULL
);
344 extents
[trim_index
].offset
= (uint64_t) io_blockno
* (u_int64_t
) blocksize
;
345 extents
[trim_index
].length
= io_bytecount
;
349 if (trim_index
== MAX_BATCH_TO_TRIM
) {
351 if (vp
->v_mount
->mnt_ioflags
& MNT_IOFLAGS_CSUNMAP_SUPPORTED
) {
352 cs_unmap
.extentsCount
= trim_index
;
353 error
= VNOP_IOCTL(devvp
, _DKIOCCSUNMAP
, (caddr_t
)&cs_unmap
, 0, vfs_context_kernel());
355 unmap
.extentsCount
= trim_index
;
356 error
= VNOP_IOCTL(devvp
, DKIOCUNMAP
, (caddr_t
)&unmap
, 0, vfs_context_kernel());
363 trimmed
+= io_bytecount
;
364 current_offset
+= io_bytecount
;
365 remaining_length
-= io_bytecount
;
370 if (vp
->v_mount
->mnt_ioflags
& MNT_IOFLAGS_CSUNMAP_SUPPORTED
) {
371 cs_unmap
.extentsCount
= trim_index
;
372 error
= VNOP_IOCTL(devvp
, _DKIOCCSUNMAP
, (caddr_t
)&cs_unmap
, 0, vfs_context_kernel());
374 unmap
.extentsCount
= trim_index
;
375 error
= VNOP_IOCTL(devvp
, DKIOCUNMAP
, (caddr_t
)&unmap
, 0, vfs_context_kernel());
379 kfree(extents
, sizeof(dk_extent_t
) * MAX_BATCH_TO_TRIM
);