/*
- * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
#include <kern/zalloc.h>
-#include <kern/kalloc.h>
#include <libkern/libkern.h>
#include <vm/vnode_pager.h>
#include <kern/assert.h>
#include <sys/kdebug.h>
+#include <nfs/nfs_conf.h>
#include <nfs/rpcv2.h>
#include <nfs/nfsproto.h>
#include <nfs/nfs.h>
#include <vfs/vfs_disk_conditioner.h>
void
-vnode_pager_throttle()
+vnode_pager_throttle(void)
{
struct uthread *ut;
set_tier.extents = &extent;
set_tier.extentsCount = 1;
- set_tier.tier = priority;
+ set_tier.tier = (uint8_t)priority;
error = VNOP_IOCTL(devvp, DKIOCSETTIER, (caddr_t)&set_tier, 0, vfs_context_kernel());
return;
isize = (int)size;
+ /*
+ * This call is non-blocking and does not ever fail but it can
+ * only be made when there is other explicit synchronization
+ * with reclaiming of the vnode which, in this path, is provided
+ * by the paging in progress counter.
+ *
+ * In addition, this may also be entered via explicit ubc_msync
+ * calls or vm_swapfile_io where the existing iocount provides
+ * the necessary synchronization. Ideally we would not take an
+ * additional iocount here in the cases where an explcit iocount
+ * has already been taken but this call doesn't cause a deadlock
+ * as other forms of vnode_get* might if this thread has already
+ * taken an iocount.
+ */
+ error = vnode_getalways_from_pager(vp);
+ if (error != 0) {
+ /* This can't happen */
+ panic("vnode_getalways returned %d for vp %p", error, vp);
+ }
+
if (isize <= 0) {
result = PAGER_ERROR;
error_ret = EINVAL;
* of it's pages
*/
for (offset = upl_offset; isize; isize -= PAGE_SIZE, offset += PAGE_SIZE) {
-#if NFSCLIENT
+#if CONFIG_NFS_CLIENT
if (vp->v_tag == VT_NFS) {
/* check with nfs if page is OK to drop */
error = nfs_buf_page_inval(vp, (off_t)f_offset);
} else
-#endif
+#endif /* CONFIG_NFS_CLIENT */
{
blkno = ubc_offtoblk(vp, (off_t)f_offset);
error = buf_invalblkno(vp, blkno, 0);
* Note we must not sleep here if the buffer is busy - that is
* a lock inversion which causes deadlock.
*/
-#if NFSCLIENT
+#if CONFIG_NFS_CLIENT
if (vp->v_tag == VT_NFS) {
/* check with nfs if page is OK to drop */
error = nfs_buf_page_inval(vp, (off_t)f_offset);
} else
-#endif
+#endif /* CONFIG_NFS_CLIENT */
{
blkno = ubc_offtoblk(vp, (off_t)f_offset);
error = buf_invalblkno(vp, blkno, 0);
pg_index += num_of_pages;
}
out:
+ vnode_put_from_pager(vp);
+
if (errorp) {
*errorp = error_ret;
}
ignore_valid_page_check = 1;
}
+ /*
+ * This call is non-blocking and does not ever fail but it can
+ * only be made when there is other explicit synchronization
+ * with reclaiming of the vnode which, in this path, is provided
+ * by the paging in progress counter.
+ *
+ * In addition, this may also be entered via vm_swapfile_io
+ * where the existing iocount provides the necessary synchronization.
+ * Ideally we would not take an additional iocount here in the cases
+ * where an explcit iocount has already been taken but this call
+ * doesn't cause a deadlock as other forms of vnode_get* might if
+ * this thread has already taken an iocount.
+ */
+ error = vnode_getalways_from_pager(vp);
+ if (error != 0) {
+ /* This can't happen */
+ panic("vnode_getalways returned %d for vp %p", error, vp);
+ }
+
if (UBCINFOEXISTS(vp) == 0) {
result = PAGER_ERROR;
error = PAGER_ERROR;
*/
if ((error = VNOP_PAGEIN(vp, NULL, upl_offset, (off_t)f_offset,
size, flags, vfs_context_current()))) {
+ set_thread_pagein_error(current_thread(), error);
result = PAGER_ERROR;
error = PAGER_ERROR;
}
ubc_upl_abort_range(upl, (upl_offset_t) xoff, xsize, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
}
}
+ set_thread_pagein_error(current_thread(), error);
result = PAGER_ERROR;
error = PAGER_ERROR;
}
}
}
out:
+ vnode_put_from_pager(vp);
+
if (errorp) {
*errorp = result;
}