#include <stdbool.h>
+#include <vfs/vfs_disk_conditioner.h>
+
#if 0
#undef KERNEL_DEBUG
#define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
#define WRITE_BEHIND 1
#define WRITE_BEHIND_SSD 1
+#if CONFIG_EMBEDDED
+#define PREFETCH 1
+#define PREFETCH_SSD 1
+uint32_t speculative_prefetch_max = (2048 * 1024); /* maximum bytes in a specluative read-ahead */
+uint32_t speculative_prefetch_max_iosize = (512 * 1024); /* maximum I/O size to use in a specluative read-ahead */
+#else
#define PREFETCH 3
#define PREFETCH_SSD 2
uint32_t speculative_prefetch_max = (MAX_UPL_SIZE_BYTES * 3); /* maximum bytes in a specluative read-ahead */
uint32_t speculative_prefetch_max_iosize = (512 * 1024); /* maximum I/O size to use in a specluative read-ahead on SSDs*/
+#endif
#define IO_SCALE(vp, base) (vp->v_mount->mnt_ioscale * (base))
#define MAX_CLUSTER_SIZE(vp) (cluster_max_io_size(vp->v_mount, CL_WRITE))
-#define MAX_PREFETCH(vp, size, is_ssd) (size * IO_SCALE(vp, ((is_ssd && !ignore_is_ssd) ? PREFETCH_SSD : PREFETCH)))
+#define MAX_PREFETCH(vp, size, is_ssd) (size * IO_SCALE(vp, ((is_ssd) ? PREFETCH_SSD : PREFETCH)))
-int ignore_is_ssd = 0;
int speculative_reads_disabled = 0;
/*
size_t io_size;
int (*bootcache_check_fn)(dev_t device, u_int64_t blkno) = bootcache_contains_block;
- if (bootcache_check_fn) {
- if (VNOP_BLOCKMAP(vp, f_offset, PAGE_SIZE, &blkno, &io_size, NULL, VNODE_READ, NULL))
+ if (bootcache_check_fn && vp->v_mount && vp->v_mount->mnt_devvp) {
+ if (VNOP_BLOCKMAP(vp, f_offset, PAGE_SIZE, &blkno, &io_size, NULL, VNODE_READ | VNODE_BLOCKMAP_NO_TRACK, NULL))
return(0);
if (io_size == 0)
} else {
max_cluster_size = MAX_CLUSTER_SIZE(vp);
- if ((vp->v_mount->mnt_kern_flag & MNTK_SSD) && !ignore_is_ssd)
+ if (disk_conditioner_mount_is_ssd(vp->v_mount))
scale = WRITE_THROTTLE_SSD;
else
scale = WRITE_THROTTLE;
* Create a UPL to lock the pages in the cache whilst the
* write is in progress.
*/
- ubc_create_upl(vp, f_offset, non_rounded_size, &cached_upl,
- NULL, UPL_SET_LITE);
+ ubc_create_upl_kernel(vp, f_offset, non_rounded_size, &cached_upl,
+ NULL, UPL_SET_LITE, VM_KERN_MEMORY_FILE);
/*
* Attach this UPL to the other UPL so that we can find it
return;
}
- max_prefetch = MAX_PREFETCH(vp, cluster_max_io_size(vp->v_mount, CL_READ), (vp->v_mount->mnt_kern_flag & MNTK_SSD));
+ max_prefetch = MAX_PREFETCH(vp, cluster_max_io_size(vp->v_mount, CL_READ), disk_conditioner_mount_is_ssd(vp->v_mount));
if (max_prefetch > speculative_prefetch_max)
max_prefetch = speculative_prefetch_max;
pages_in_pl = 0;
upl_size = upl_needed_size;
upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
- UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE
- | UPL_MEMORY_TAG_MAKE(VM_KERN_MEMORY_FILE);
+ UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
kret = vm_map_get_upl(map,
(vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
NULL,
&pages_in_pl,
&upl_flags,
+ VM_KERN_MEMORY_FILE,
force_data_sync);
if (kret != KERN_SUCCESS) {
pages_in_pl = 0;
upl_size = upl_needed_size;
upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
- UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE
- | UPL_MEMORY_TAG_MAKE(VM_KERN_MEMORY_FILE);
+ UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
kret = vm_map_get_upl(map,
(vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
- &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, 0);
+ &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE, 0);
if (kret != KERN_SUCCESS) {
/*
* The UPL_WILL_MODIFY flag lets the UPL subsystem know
* that we intend to modify these pages.
*/
- kret = ubc_create_upl(vp,
+ kret = ubc_create_upl_kernel(vp,
upl_f_offset,
upl_size,
&upl,
&pl,
- UPL_SET_LITE | (( uio!=NULL && (uio->uio_flags & UIO_FLAGS_IS_COMPRESSED_FILE)) ? 0 : UPL_WILL_MODIFY));
+ UPL_SET_LITE | (( uio!=NULL && (uio->uio_flags & UIO_FLAGS_IS_COMPRESSED_FILE)) ? 0 : UPL_WILL_MODIFY),
+ VM_KERN_MEMORY_FILE);
if (kret != KERN_SUCCESS)
panic("cluster_write_copy: failed to get pagelist");
if (retval == 0) {
int cl_index;
int ret_cluster_try_push;
+ int do_zeroing = 1;
- io_size += start_offset;
+
+ io_size += start_offset;
+
- if ((upl_f_offset + io_size) >= newEOF && (u_int)io_size < upl_size) {
- /*
+ /* Force more restrictive zeroing behavior only on APFS */
+ if ((vnode_tag(vp) == VT_APFS) && (newEOF < oldEOF)) {
+ do_zeroing = 0;
+ }
+
+
+ if (do_zeroing && (upl_f_offset + io_size) >= newEOF && (u_int)io_size < upl_size) {
+
+ /*
* if we're extending the file with this write
* we'll zero fill the rest of the page so that
* if the file gets extended again in such a way as to leave a
* hole starting at this EOF, we'll have zero's in the correct spot
*/
- cluster_zero(upl, io_size, upl_size - io_size, NULL);
+ cluster_zero(upl, io_size, upl_size - io_size, NULL);
}
/*
* release the upl now if we hold one since...
n = 0;
if (n == 0) {
- if (vp->v_mount->mnt_kern_flag & MNTK_SSD)
+ if (disk_conditioner_mount_is_ssd(vp->v_mount))
n = WRITE_BEHIND_SSD;
else
n = WRITE_BEHIND;
bflag |= CL_ENCRYPTED;
max_io_size = cluster_max_io_size(vp->v_mount, CL_READ);
- max_prefetch = MAX_PREFETCH(vp, max_io_size, (vp->v_mount->mnt_kern_flag & MNTK_SSD));
+ max_prefetch = MAX_PREFETCH(vp, max_io_size, disk_conditioner_mount_is_ssd(vp->v_mount));
max_rd_size = max_prefetch;
last_request_offset = uio->uio_offset + io_req_size;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_START,
upl, (int)upl_f_offset, upl_size, start_offset, 0);
- kret = ubc_create_upl(vp,
+ kret = ubc_create_upl_kernel(vp,
upl_f_offset,
upl_size,
&upl,
&pl,
- UPL_FILE_IO | UPL_SET_LITE);
+ UPL_FILE_IO | UPL_SET_LITE,
+ VM_KERN_MEMORY_FILE);
if (kret != KERN_SUCCESS)
panic("cluster_read_copy: failed to get pagelist");
for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
pages_in_pl = 0;
upl_size = upl_needed_size;
- upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE
- | UPL_MEMORY_TAG_MAKE(VM_KERN_MEMORY_FILE);
+ upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
if (no_zero_fill)
upl_flags |= UPL_NOZEROFILL;
if (force_data_sync)
kret = vm_map_create_upl(map,
(vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
- &upl_size, &upl, NULL, &pages_in_pl, &upl_flags);
+ &upl_size, &upl, NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE);
if (kret != KERN_SUCCESS) {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
pages_in_pl = 0;
upl_size = upl_needed_size;
- upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE
- | UPL_MEMORY_TAG_MAKE(VM_KERN_MEMORY_FILE);
+ upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 92)) | DBG_FUNC_START,
vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
kret = vm_map_get_upl(map,
(vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
- &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, 0);
+ &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE, 0);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 92)) | DBG_FUNC_END,
(int)upl_offset, upl_size, io_size, kret, 0);
else
upl_size = (u_int32_t)iov_len;
- upl_flags = UPL_QUERY_OBJECT_TYPE | UPL_MEMORY_TAG_MAKE(VM_KERN_MEMORY_FILE);
+ upl_flags = UPL_QUERY_OBJECT_TYPE;
vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map;
if ((vm_map_get_upl(map,
(vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)),
- &upl_size, &upl, NULL, NULL, &upl_flags, 0)) != KERN_SUCCESS) {
+ &upl_size, &upl, NULL, NULL, &upl_flags, VM_KERN_MEMORY_FILE, 0)) != KERN_SUCCESS) {
/*
* the user app must have passed in an invalid address
*/
max_io_size = cluster_max_io_size(vp->v_mount, CL_READ);
- if ((vp->v_mount->mnt_kern_flag & MNTK_SSD) && !ignore_is_ssd) {
+#if CONFIG_EMBEDDED
+ if (max_io_size > speculative_prefetch_max_iosize)
+ max_io_size = speculative_prefetch_max_iosize;
+#else
+ if (disk_conditioner_mount_is_ssd(vp->v_mount)) {
if (max_io_size > speculative_prefetch_max_iosize)
max_io_size = speculative_prefetch_max_iosize;
}
+#endif
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_START,
(int)f_offset, resid, (int)filesize, 0, 0);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_START,
upl, (int)upl_f_offset, upl_size, start_offset, 0);
- kret = ubc_create_upl(vp,
+ kret = ubc_create_upl_kernel(vp,
upl_f_offset,
upl_size,
&upl,
&pl,
- UPL_RET_ONLY_ABSENT | UPL_SET_LITE);
+ UPL_RET_ONLY_ABSENT | UPL_SET_LITE,
+ VM_KERN_MEMORY_FILE);
if (kret != KERN_SUCCESS)
return(retval);
issued_io = 0;
else
upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE;
- kret = ubc_create_upl(vp,
+ kret = ubc_create_upl_kernel(vp,
upl_f_offset,
upl_size,
&upl,
&pl,
- upl_flags);
+ upl_flags,
+ VM_KERN_MEMORY_FILE);
if (kret != KERN_SUCCESS)
panic("cluster_push: failed to get pagelist");
*/
upl_flags |= UPL_FILE_IO;
}
- kret = ubc_create_upl(vp,
+ kret = ubc_create_upl_kernel(vp,
uio->uio_offset & ~PAGE_MASK_64,
PAGE_SIZE,
&upl,
&pl,
- upl_flags);
+ upl_flags,
+ VM_KERN_MEMORY_FILE);
if (kret != KERN_SUCCESS)
return(EINVAL);