2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
65 * FreeBSD-Id: nfs_bio.c,v 1.44 1997/09/10 19:52:25 phk Exp $
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/resourcevar.h>
70 #include <sys/signalvar.h>
71 #include <sys/proc_internal.h>
72 #include <sys/kauth.h>
73 #include <sys/malloc.h>
74 #include <sys/vnode.h>
75 #include <sys/dirent.h>
76 #include <sys/mount_internal.h>
77 #include <sys/kernel.h>
78 #include <sys/ubc_internal.h>
79 #include <sys/uio_internal.h>
80 #include <sys/kpi_mbuf.h>
83 #include <sys/vmparam.h>
86 #include <kern/clock.h>
87 #include <libkern/OSAtomic.h>
88 #include <kern/kalloc.h>
89 #include <kern/thread_call.h>
91 #include <nfs/rpcv2.h>
92 #include <nfs/nfsproto.h>
94 #include <nfs/nfs_gss.h>
95 #include <nfs/nfsmount.h>
96 #include <nfs/nfsnode.h>
97 #include <sys/buf_internal.h>
98 #include <libkern/OSAtomic.h>
100 kern_return_t
thread_terminate(thread_t
); /* XXX */
102 #define NFSBUFHASH(np, lbn) \
103 (&nfsbufhashtbl[((long)(np) / sizeof(*(np)) + (int)(lbn)) & nfsbufhash])
104 LIST_HEAD(nfsbufhashhead
, nfsbuf
) *nfsbufhashtbl
;
105 struct nfsbuffreehead nfsbuffree
, nfsbuffreemeta
, nfsbufdelwri
;
107 int nfsbufcnt
, nfsbufmin
, nfsbufmax
, nfsbufmetacnt
, nfsbufmetamax
;
108 int nfsbuffreecnt
, nfsbuffreemetacnt
, nfsbufdelwricnt
, nfsneedbuffer
;
110 int nfs_buf_timer_on
= 0;
111 thread_t nfsbufdelwrithd
= NULL
;
113 lck_grp_t
*nfs_buf_lck_grp
;
114 lck_mtx_t
*nfs_buf_mutex
;
116 #define NFSBUF_FREE_PERIOD 30 /* seconds */
117 #define NFSBUF_LRU_STALE 120
118 #define NFSBUF_META_STALE 240
120 /* number of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffree list */
121 #define LRU_TO_FREEUP 6
122 /* number of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffreemeta list */
123 #define META_TO_FREEUP 3
124 /* total number of nfsbufs nfs_buf_freeup() should attempt to free */
125 #define TOTAL_TO_FREEUP (LRU_TO_FREEUP+META_TO_FREEUP)
126 /* fraction of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffree list when called from timer */
127 #define LRU_FREEUP_FRAC_ON_TIMER 8
128 /* fraction of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffreemeta list when called from timer */
129 #define META_FREEUP_FRAC_ON_TIMER 16
130 /* fraction of total nfsbufs that nfsbuffreecnt should exceed before bothering to call nfs_buf_freeup() */
131 #define LRU_FREEUP_MIN_FRAC 4
132 /* fraction of total nfsbufs that nfsbuffreemetacnt should exceed before bothering to call nfs_buf_freeup() */
133 #define META_FREEUP_MIN_FRAC 2
135 #define NFS_BUF_FREEUP() \
137 /* only call nfs_buf_freeup() if it has work to do: */ \
138 if (((nfsbuffreecnt > nfsbufcnt/LRU_FREEUP_MIN_FRAC) || \
139 (nfsbuffreemetacnt > nfsbufcnt/META_FREEUP_MIN_FRAC)) && \
140 ((nfsbufcnt - TOTAL_TO_FREEUP) > nfsbufmin)) \
145 * Initialize nfsbuf lists
150 nfs_buf_lck_grp
= lck_grp_alloc_init("nfs_buf", LCK_GRP_ATTR_NULL
);
151 nfs_buf_mutex
= lck_mtx_alloc_init(nfs_buf_lck_grp
, LCK_ATTR_NULL
);
153 nfsbufcnt
= nfsbufmetacnt
=
154 nfsbuffreecnt
= nfsbuffreemetacnt
= nfsbufdelwricnt
= 0;
156 /* size nfsbufmax to cover at most half sane_size (w/default buf size) */
157 nfsbufmax
= (sane_size
>> PAGE_SHIFT
) / (2 * (NFS_RWSIZE
>> PAGE_SHIFT
));
158 nfsbufmetamax
= nfsbufmax
/ 4;
162 nfsbufhashtbl
= hashinit(nfsbufmax
/4, M_TEMP
, &nfsbufhash
);
163 TAILQ_INIT(&nfsbuffree
);
164 TAILQ_INIT(&nfsbuffreemeta
);
165 TAILQ_INIT(&nfsbufdelwri
);
170 * Check periodically for stale/unused nfs bufs
173 nfs_buf_timer(__unused
void *param0
, __unused
void *param1
)
177 lck_mtx_lock(nfs_buf_mutex
);
178 if (nfsbufcnt
<= nfsbufmin
) {
179 nfs_buf_timer_on
= 0;
180 lck_mtx_unlock(nfs_buf_mutex
);
183 lck_mtx_unlock(nfs_buf_mutex
);
185 nfs_interval_timer_start(nfs_buf_timer_call
,
186 NFSBUF_FREE_PERIOD
* 1000);
190 * try to free up some excess, unused nfsbufs
193 nfs_buf_freeup(int timer
)
198 struct nfsbuffreehead nfsbuffreeup
;
200 TAILQ_INIT(&nfsbuffreeup
);
202 lck_mtx_lock(nfs_buf_mutex
);
206 FSDBG(320, nfsbufcnt
, nfsbuffreecnt
, nfsbuffreemetacnt
, 0);
208 count
= timer
? nfsbuffreecnt
/LRU_FREEUP_FRAC_ON_TIMER
: LRU_TO_FREEUP
;
209 while ((nfsbufcnt
> nfsbufmin
) && (count
-- > 0)) {
210 fbp
= TAILQ_FIRST(&nfsbuffree
);
215 if (NBUFSTAMPVALID(fbp
) &&
216 (fbp
->nb_timestamp
+ (2*NFSBUF_LRU_STALE
)) > now
.tv_sec
)
218 nfs_buf_remfree(fbp
);
219 /* disassociate buffer from any nfsnode */
221 if (fbp
->nb_vnbufs
.le_next
!= NFSNOLIST
) {
222 LIST_REMOVE(fbp
, nb_vnbufs
);
223 fbp
->nb_vnbufs
.le_next
= NFSNOLIST
;
227 LIST_REMOVE(fbp
, nb_hash
);
228 TAILQ_INSERT_TAIL(&nfsbuffreeup
, fbp
, nb_free
);
232 count
= timer
? nfsbuffreemetacnt
/META_FREEUP_FRAC_ON_TIMER
: META_TO_FREEUP
;
233 while ((nfsbufcnt
> nfsbufmin
) && (count
-- > 0)) {
234 fbp
= TAILQ_FIRST(&nfsbuffreemeta
);
239 if (NBUFSTAMPVALID(fbp
) &&
240 (fbp
->nb_timestamp
+ (2*NFSBUF_META_STALE
)) > now
.tv_sec
)
242 nfs_buf_remfree(fbp
);
243 /* disassociate buffer from any nfsnode */
245 if (fbp
->nb_vnbufs
.le_next
!= NFSNOLIST
) {
246 LIST_REMOVE(fbp
, nb_vnbufs
);
247 fbp
->nb_vnbufs
.le_next
= NFSNOLIST
;
251 LIST_REMOVE(fbp
, nb_hash
);
252 TAILQ_INSERT_TAIL(&nfsbuffreeup
, fbp
, nb_free
);
257 FSDBG(320, nfsbufcnt
, nfsbuffreecnt
, nfsbuffreemetacnt
, 0);
260 lck_mtx_unlock(nfs_buf_mutex
);
262 while ((fbp
= TAILQ_FIRST(&nfsbuffreeup
))) {
263 TAILQ_REMOVE(&nfsbuffreeup
, fbp
, nb_free
);
265 if (IS_VALID_CRED(fbp
->nb_rcred
))
266 kauth_cred_unref(&fbp
->nb_rcred
);
267 if (IS_VALID_CRED(fbp
->nb_wcred
))
268 kauth_cred_unref(&fbp
->nb_wcred
);
269 /* if buf was NB_META, dump buffer */
270 if (ISSET(fbp
->nb_flags
, NB_META
) && fbp
->nb_data
)
271 kfree(fbp
->nb_data
, fbp
->nb_bufsize
);
278 * remove a buffer from the freelist
279 * (must be called with nfs_buf_mutex held)
282 nfs_buf_remfree(struct nfsbuf
*bp
)
284 if (bp
->nb_free
.tqe_next
== NFSNOLIST
)
285 panic("nfsbuf not on free list");
286 if (ISSET(bp
->nb_flags
, NB_DELWRI
)) {
288 TAILQ_REMOVE(&nfsbufdelwri
, bp
, nb_free
);
289 } else if (ISSET(bp
->nb_flags
, NB_META
)) {
291 TAILQ_REMOVE(&nfsbuffreemeta
, bp
, nb_free
);
294 TAILQ_REMOVE(&nfsbuffree
, bp
, nb_free
);
296 bp
->nb_free
.tqe_next
= NFSNOLIST
;
301 * check for existence of nfsbuf in cache
304 nfs_buf_is_incore(nfsnode_t np
, daddr64_t blkno
)
307 lck_mtx_lock(nfs_buf_mutex
);
308 if (nfs_buf_incore(np
, blkno
))
312 lck_mtx_unlock(nfs_buf_mutex
);
317 * return incore buffer (must be called with nfs_buf_mutex held)
320 nfs_buf_incore(nfsnode_t np
, daddr64_t blkno
)
322 /* Search hash chain */
323 struct nfsbuf
* bp
= NFSBUFHASH(np
, blkno
)->lh_first
;
324 for (; bp
!= NULL
; bp
= bp
->nb_hash
.le_next
)
325 if ((bp
->nb_lblkno
== blkno
) && (bp
->nb_np
== np
)) {
326 if (!ISSET(bp
->nb_flags
, NB_INVAL
)) {
327 FSDBG(547, bp
, blkno
, bp
->nb_flags
, bp
->nb_np
);
335 * Check if it's OK to drop a page.
337 * Called by vnode_pager() on pageout request of non-dirty page.
338 * We need to make sure that it's not part of a delayed write.
339 * If it is, we can't let the VM drop it because we may need it
340 * later when/if we need to write the data (again).
343 nfs_buf_page_inval(vnode_t vp
, off_t offset
)
345 struct nfsmount
*nmp
= VTONMP(vp
);
349 if (nfs_mount_gone(nmp
))
352 lck_mtx_lock(nfs_buf_mutex
);
353 bp
= nfs_buf_incore(VTONFS(vp
), (daddr64_t
)(offset
/ nmp
->nm_biosize
));
356 FSDBG(325, bp
, bp
->nb_flags
, bp
->nb_dirtyoff
, bp
->nb_dirtyend
);
357 if (ISSET(bp
->nb_lflags
, NBL_BUSY
)) {
362 * If there's a dirty range in the buffer, check to
363 * see if this page intersects with the dirty range.
364 * If it does, we can't let the pager drop the page.
366 if (bp
->nb_dirtyend
> 0) {
367 int start
= offset
- NBOFF(bp
);
368 if ((bp
->nb_dirtyend
> start
) &&
369 (bp
->nb_dirtyoff
< (start
+ PAGE_SIZE
))) {
371 * Before returning the bad news, move the
372 * buffer to the start of the delwri list and
373 * give the list a push to try to flush the
378 TAILQ_INSERT_HEAD(&nfsbufdelwri
, bp
, nb_free
);
380 nfs_buf_delwri_push(1);
384 lck_mtx_unlock(nfs_buf_mutex
);
389 * set up the UPL for a buffer
390 * (must NOT be called with nfs_buf_mutex held)
393 nfs_buf_upl_setup(struct nfsbuf
*bp
)
399 if (ISSET(bp
->nb_flags
, NB_PAGELIST
))
402 upl_flags
= UPL_PRECIOUS
;
403 if (!ISSET(bp
->nb_flags
, NB_READ
)) {
405 * We're doing a "write", so we intend to modify
406 * the pages we're gathering.
408 upl_flags
|= UPL_WILL_MODIFY
;
410 kret
= ubc_create_upl(NFSTOV(bp
->nb_np
), NBOFF(bp
), bp
->nb_bufsize
,
411 &upl
, NULL
, upl_flags
);
412 if (kret
== KERN_INVALID_ARGUMENT
) {
413 /* vm object probably doesn't exist any more */
414 bp
->nb_pagelist
= NULL
;
417 if (kret
!= KERN_SUCCESS
) {
418 printf("nfs_buf_upl_setup(): failed to get pagelist %d\n", kret
);
419 bp
->nb_pagelist
= NULL
;
423 FSDBG(538, bp
, NBOFF(bp
), bp
->nb_bufsize
, bp
->nb_np
);
425 bp
->nb_pagelist
= upl
;
426 SET(bp
->nb_flags
, NB_PAGELIST
);
431 * update buffer's valid/dirty info from UBC
432 * (must NOT be called with nfs_buf_mutex held)
435 nfs_buf_upl_check(struct nfsbuf
*bp
)
438 off_t filesize
, fileoffset
;
441 if (!ISSET(bp
->nb_flags
, NB_PAGELIST
))
444 npages
= round_page_32(bp
->nb_bufsize
) / PAGE_SIZE
;
445 filesize
= ubc_getsize(NFSTOV(bp
->nb_np
));
446 fileoffset
= NBOFF(bp
);
447 if (fileoffset
< filesize
)
448 SET(bp
->nb_flags
, NB_CACHE
);
450 CLR(bp
->nb_flags
, NB_CACHE
);
452 pl
= ubc_upl_pageinfo(bp
->nb_pagelist
);
453 bp
->nb_valid
= bp
->nb_dirty
= 0;
455 for (i
=0; i
< npages
; i
++, fileoffset
+= PAGE_SIZE_64
) {
456 /* anything beyond the end of the file is not valid or dirty */
457 if (fileoffset
>= filesize
)
459 if (!upl_valid_page(pl
, i
)) {
460 CLR(bp
->nb_flags
, NB_CACHE
);
464 if (upl_dirty_page(pl
, i
))
465 NBPGDIRTY_SET(bp
, i
);
467 fileoffset
= NBOFF(bp
);
468 if (ISSET(bp
->nb_flags
, NB_CACHE
)) {
470 bp
->nb_validend
= bp
->nb_bufsize
;
471 if (fileoffset
+ bp
->nb_validend
> filesize
)
472 bp
->nb_validend
= filesize
- fileoffset
;
474 bp
->nb_validoff
= bp
->nb_validend
= -1;
476 FSDBG(539, bp
, fileoffset
, bp
->nb_valid
, bp
->nb_dirty
);
477 FSDBG(539, bp
->nb_validoff
, bp
->nb_validend
, bp
->nb_dirtyoff
, bp
->nb_dirtyend
);
481 * make sure that a buffer is mapped
482 * (must NOT be called with nfs_buf_mutex held)
485 nfs_buf_map(struct nfsbuf
*bp
)
491 if (!ISSET(bp
->nb_flags
, NB_PAGELIST
))
494 kret
= ubc_upl_map(bp
->nb_pagelist
, (vm_offset_t
*)&(bp
->nb_data
));
495 if (kret
!= KERN_SUCCESS
)
496 panic("nfs_buf_map: ubc_upl_map() failed with (%d)", kret
);
497 if (bp
->nb_data
== 0)
498 panic("ubc_upl_map mapped 0");
499 FSDBG(540, bp
, bp
->nb_flags
, NBOFF(bp
), bp
->nb_data
);
504 * normalize an nfsbuf's valid range
506 * the read/write code guarantees that we'll always have a valid
507 * region that is an integral number of pages. If either end
508 * of the valid range isn't page-aligned, it gets corrected
509 * here as we extend the valid range through all of the
510 * contiguous valid pages.
513 nfs_buf_normalize_valid_range(nfsnode_t np
, struct nfsbuf
*bp
)
516 /* pull validoff back to start of contiguous valid page range */
517 pg
= bp
->nb_validoff
/PAGE_SIZE
;
518 while (pg
>= 0 && NBPGVALID(bp
,pg
))
520 bp
->nb_validoff
= (pg
+1) * PAGE_SIZE
;
521 /* push validend forward to end of contiguous valid page range */
522 npg
= bp
->nb_bufsize
/PAGE_SIZE
;
523 pg
= bp
->nb_validend
/PAGE_SIZE
;
524 while (pg
< npg
&& NBPGVALID(bp
,pg
))
526 bp
->nb_validend
= pg
* PAGE_SIZE
;
528 if (NBOFF(bp
) + bp
->nb_validend
> (off_t
)np
->n_size
)
529 bp
->nb_validend
= np
->n_size
% bp
->nb_bufsize
;
533 * process some entries on the delayed write queue
534 * (must be called with nfs_buf_mutex held)
537 nfs_buf_delwri_service(void)
543 while (i
< 8 && (bp
= TAILQ_FIRST(&nfsbufdelwri
)) != NULL
) {
547 while ((error
= nfs_buf_acquire(bp
, 0, 0, 0)) == EAGAIN
);
552 /* buffer is no longer valid */
556 if (ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
))
557 nfs_buf_check_write_verifier(np
, bp
);
558 if (ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
)) {
559 /* put buffer at end of delwri list */
560 TAILQ_INSERT_TAIL(&nfsbufdelwri
, bp
, nb_free
);
563 lck_mtx_unlock(nfs_buf_mutex
);
564 nfs_flushcommits(np
, 1);
566 SET(bp
->nb_flags
, NB_ASYNC
);
567 lck_mtx_unlock(nfs_buf_mutex
);
571 lck_mtx_lock(nfs_buf_mutex
);
576 * thread to service the delayed write queue when asked
579 nfs_buf_delwri_thread(__unused
void *arg
, __unused wait_result_t wr
)
581 struct timespec ts
= { 30, 0 };
584 lck_mtx_lock(nfs_buf_mutex
);
586 nfs_buf_delwri_service();
587 error
= msleep(&nfsbufdelwrithd
, nfs_buf_mutex
, 0, "nfsbufdelwri", &ts
);
589 nfsbufdelwrithd
= NULL
;
590 lck_mtx_unlock(nfs_buf_mutex
);
591 thread_terminate(nfsbufdelwrithd
);
595 * try to push out some delayed/uncommitted writes
596 * ("locked" indicates whether nfs_buf_mutex is already held)
599 nfs_buf_delwri_push(int locked
)
601 if (TAILQ_EMPTY(&nfsbufdelwri
))
604 lck_mtx_lock(nfs_buf_mutex
);
605 /* wake up the delayed write service thread */
607 wakeup(&nfsbufdelwrithd
);
608 else if (kernel_thread_start(nfs_buf_delwri_thread
, NULL
, &nfsbufdelwrithd
) == KERN_SUCCESS
)
609 thread_deallocate(nfsbufdelwrithd
);
610 /* otherwise, try to do some of the work ourselves */
611 if (!nfsbufdelwrithd
)
612 nfs_buf_delwri_service();
614 lck_mtx_unlock(nfs_buf_mutex
);
620 * Returns errno on error, 0 otherwise.
621 * Any buffer is returned in *bpp.
623 * If NBLK_ONLYVALID is set, only return buffer if found in cache.
624 * If NBLK_NOWAIT is set, don't wait for the buffer if it's marked BUSY.
626 * Check for existence of buffer in cache.
627 * Or attempt to reuse a buffer from one of the free lists.
628 * Or allocate a new buffer if we haven't already hit max allocation.
629 * Or wait for a free buffer.
631 * If available buffer found, prepare it, and return it.
633 * If the calling process is interrupted by a signal for
634 * an interruptible mount point, return EINTR.
645 vnode_t vp
= NFSTOV(np
);
646 struct nfsmount
*nmp
= VTONMP(vp
);
649 int slpflag
= PCATCH
;
650 int operation
= (flags
& NBLK_OPMASK
);
654 FSDBG_TOP(541, np
, blkno
, size
, flags
);
658 if (bufsize
> NFS_MAXBSIZE
)
659 panic("nfs_buf_get: buffer larger than NFS_MAXBSIZE requested");
661 if (nfs_mount_gone(nmp
)) {
662 FSDBG_BOT(541, np
, blkno
, 0, ENXIO
);
666 if (!UBCINFOEXISTS(vp
)) {
667 operation
= NBLK_META
;
668 } else if (bufsize
< (uint32_t)nmp
->nm_biosize
) {
669 /* reg files should always have biosize blocks */
670 bufsize
= nmp
->nm_biosize
;
673 /* if NBLK_WRITE, check for too many delayed/uncommitted writes */
674 if ((operation
== NBLK_WRITE
) && (nfs_nbdwrite
> NFS_A_LOT_OF_DELAYED_WRITES
)) {
675 FSDBG_TOP(542, np
, blkno
, nfs_nbdwrite
, NFS_A_LOT_OF_DELAYED_WRITES
);
677 /* poke the delwri list */
678 nfs_buf_delwri_push(0);
680 /* sleep to let other threads run... */
681 tsleep(&nfs_nbdwrite
, PCATCH
, "nfs_nbdwrite", 1);
682 FSDBG_BOT(542, np
, blkno
, nfs_nbdwrite
, NFS_A_LOT_OF_DELAYED_WRITES
);
686 lck_mtx_lock(nfs_buf_mutex
);
688 /* wait for any buffer invalidation/flushing to complete */
689 while (np
->n_bflag
& NBINVALINPROG
) {
690 np
->n_bflag
|= NBINVALWANT
;
693 msleep(&np
->n_bflag
, nfs_buf_mutex
, slpflag
, "nfs_buf_get_invalwait", &ts
);
694 if ((error
= nfs_sigintr(VTONMP(vp
), NULL
, thd
, 0))) {
695 lck_mtx_unlock(nfs_buf_mutex
);
696 FSDBG_BOT(541, np
, blkno
, 0, error
);
699 if (np
->n_bflag
& NBINVALINPROG
)
703 /* check for existence of nfsbuf in cache */
704 if ((bp
= nfs_buf_incore(np
, blkno
))) {
705 /* if busy, set wanted and wait */
706 if (ISSET(bp
->nb_lflags
, NBL_BUSY
)) {
707 if (flags
& NBLK_NOWAIT
) {
708 lck_mtx_unlock(nfs_buf_mutex
);
709 FSDBG_BOT(541, np
, blkno
, bp
, 0xbcbcbcbc);
712 FSDBG_TOP(543, np
, blkno
, bp
, bp
->nb_flags
);
713 SET(bp
->nb_lflags
, NBL_WANTED
);
717 msleep(bp
, nfs_buf_mutex
, slpflag
|(PRIBIO
+1)|PDROP
,
718 "nfsbufget", (slpflag
== PCATCH
) ? NULL
: &ts
);
720 FSDBG_BOT(543, np
, blkno
, bp
, bp
->nb_flags
);
721 if ((error
= nfs_sigintr(VTONMP(vp
), NULL
, thd
, 0))) {
722 FSDBG_BOT(541, np
, blkno
, 0, error
);
727 if (bp
->nb_bufsize
!= bufsize
)
728 panic("nfsbuf size mismatch");
729 SET(bp
->nb_lflags
, NBL_BUSY
);
730 SET(bp
->nb_flags
, NB_CACHE
);
732 /* additional paranoia: */
733 if (ISSET(bp
->nb_flags
, NB_PAGELIST
))
734 panic("pagelist buffer was not busy");
738 if (flags
& NBLK_ONLYVALID
) {
739 lck_mtx_unlock(nfs_buf_mutex
);
740 FSDBG_BOT(541, np
, blkno
, 0, 0x0000cace);
745 * where to get a free buffer:
746 * - if meta and maxmeta reached, must reuse meta
747 * - alloc new if we haven't reached min bufs
748 * - if free lists are NOT empty
749 * - if free list is stale, use it
750 * - else if freemeta list is stale, use it
751 * - else if max bufs allocated, use least-time-to-stale
752 * - alloc new if we haven't reached max allowed
753 * - start clearing out delwri list and try again
756 if ((operation
== NBLK_META
) && (nfsbufmetacnt
>= nfsbufmetamax
)) {
757 /* if we've hit max meta buffers, must reuse a meta buffer */
758 bp
= TAILQ_FIRST(&nfsbuffreemeta
);
759 } else if ((nfsbufcnt
> nfsbufmin
) &&
760 (!TAILQ_EMPTY(&nfsbuffree
) || !TAILQ_EMPTY(&nfsbuffreemeta
))) {
761 /* try to pull an nfsbuf off a free list */
762 struct nfsbuf
*lrubp
, *metabp
;
766 /* if the next LRU or META buffer is invalid or stale, use it */
767 lrubp
= TAILQ_FIRST(&nfsbuffree
);
768 if (lrubp
&& (!NBUFSTAMPVALID(lrubp
) ||
769 ((lrubp
->nb_timestamp
+ NFSBUF_LRU_STALE
) < now
.tv_sec
)))
771 metabp
= TAILQ_FIRST(&nfsbuffreemeta
);
772 if (!bp
&& metabp
&& (!NBUFSTAMPVALID(metabp
) ||
773 ((metabp
->nb_timestamp
+ NFSBUF_META_STALE
) < now
.tv_sec
)))
776 if (!bp
&& (nfsbufcnt
>= nfsbufmax
)) {
777 /* we've already allocated all bufs, so */
778 /* choose the buffer that'll go stale first */
784 int32_t lru_stale_time
, meta_stale_time
;
785 lru_stale_time
= lrubp
->nb_timestamp
+ NFSBUF_LRU_STALE
;
786 meta_stale_time
= metabp
->nb_timestamp
+ NFSBUF_META_STALE
;
787 if (lru_stale_time
<= meta_stale_time
)
796 /* we have a buffer to reuse */
797 FSDBG(544, np
, blkno
, bp
, bp
->nb_flags
);
799 if (ISSET(bp
->nb_flags
, NB_DELWRI
))
800 panic("nfs_buf_get: delwri");
801 SET(bp
->nb_lflags
, NBL_BUSY
);
802 /* disassociate buffer from previous nfsnode */
804 if (bp
->nb_vnbufs
.le_next
!= NFSNOLIST
) {
805 LIST_REMOVE(bp
, nb_vnbufs
);
806 bp
->nb_vnbufs
.le_next
= NFSNOLIST
;
810 LIST_REMOVE(bp
, nb_hash
);
811 /* nuke any creds we're holding */
812 if (IS_VALID_CRED(bp
->nb_rcred
))
813 kauth_cred_unref(&bp
->nb_rcred
);
814 if (IS_VALID_CRED(bp
->nb_wcred
))
815 kauth_cred_unref(&bp
->nb_wcred
);
816 /* if buf will no longer be NB_META, dump old buffer */
817 if (operation
== NBLK_META
) {
818 if (!ISSET(bp
->nb_flags
, NB_META
))
820 } else if (ISSET(bp
->nb_flags
, NB_META
)) {
822 kfree(bp
->nb_data
, bp
->nb_bufsize
);
827 /* re-init buf fields */
829 bp
->nb_validoff
= bp
->nb_validend
= -1;
830 bp
->nb_dirtyoff
= bp
->nb_dirtyend
= 0;
835 /* no buffer to reuse */
836 if ((nfsbufcnt
< nfsbufmax
) &&
837 ((operation
!= NBLK_META
) || (nfsbufmetacnt
< nfsbufmetamax
))) {
838 /* just alloc a new one */
839 MALLOC(bp
, struct nfsbuf
*, sizeof(struct nfsbuf
), M_TEMP
, M_WAITOK
);
841 lck_mtx_unlock(nfs_buf_mutex
);
842 FSDBG_BOT(541, np
, blkno
, 0, error
);
848 * If any excess bufs, make sure the timer
849 * is running to free them up later.
851 if (nfsbufcnt
> nfsbufmin
&& !nfs_buf_timer_on
) {
852 nfs_buf_timer_on
= 1;
853 nfs_interval_timer_start(nfs_buf_timer_call
,
854 NFSBUF_FREE_PERIOD
* 1000);
857 if (operation
== NBLK_META
)
861 bzero(bp
, sizeof(*bp
));
862 bp
->nb_free
.tqe_next
= NFSNOLIST
;
863 bp
->nb_validoff
= bp
->nb_validend
= -1;
864 FSDBG(545, np
, blkno
, bp
, 0);
866 /* too many bufs... wait for buffers to free up */
867 FSDBG_TOP(546, np
, blkno
, nfsbufcnt
, nfsbufmax
);
869 /* poke the delwri list */
870 nfs_buf_delwri_push(1);
873 msleep(&nfsneedbuffer
, nfs_buf_mutex
, PCATCH
|PDROP
, "nfsbufget", NULL
);
874 FSDBG_BOT(546, np
, blkno
, nfsbufcnt
, nfsbufmax
);
875 if ((error
= nfs_sigintr(VTONMP(vp
), NULL
, thd
, 0))) {
876 FSDBG_BOT(541, np
, blkno
, 0, error
);
884 SET(bp
->nb_lflags
, NBL_BUSY
);
886 bp
->nb_lblkno
= blkno
;
887 /* insert buf in hash */
888 LIST_INSERT_HEAD(NFSBUFHASH(np
, blkno
), bp
, nb_hash
);
889 /* associate buffer with new nfsnode */
891 LIST_INSERT_HEAD(&np
->n_cleanblkhd
, bp
, nb_vnbufs
);
896 lck_mtx_unlock(nfs_buf_mutex
);
900 SET(bp
->nb_flags
, NB_META
);
901 if ((bp
->nb_bufsize
!= bufsize
) && bp
->nb_data
) {
902 kfree(bp
->nb_data
, bp
->nb_bufsize
);
904 bp
->nb_validoff
= bp
->nb_validend
= -1;
905 bp
->nb_dirtyoff
= bp
->nb_dirtyend
= 0;
908 CLR(bp
->nb_flags
, NB_CACHE
);
911 bp
->nb_data
= kalloc(bufsize
);
913 /* Ack! couldn't allocate the data buffer! */
914 /* clean up buffer and return error */
915 lck_mtx_lock(nfs_buf_mutex
);
916 LIST_REMOVE(bp
, nb_vnbufs
);
917 bp
->nb_vnbufs
.le_next
= NFSNOLIST
;
919 /* invalidate usage timestamp to allow immediate freeing */
920 NBUFSTAMPINVALIDATE(bp
);
921 if (bp
->nb_free
.tqe_next
!= NFSNOLIST
)
922 panic("nfsbuf on freelist");
923 TAILQ_INSERT_HEAD(&nfsbuffree
, bp
, nb_free
);
925 lck_mtx_unlock(nfs_buf_mutex
);
926 FSDBG_BOT(541, np
, blkno
, 0xb00, ENOMEM
);
929 bp
->nb_bufsize
= bufsize
;
935 * Set or clear NB_READ now to let the UPL subsystem know
936 * if we intend to modify the pages or not.
938 if (operation
== NBLK_READ
) {
939 SET(bp
->nb_flags
, NB_READ
);
941 CLR(bp
->nb_flags
, NB_READ
);
943 if (bufsize
< PAGE_SIZE
)
945 bp
->nb_bufsize
= bufsize
;
946 bp
->nb_validoff
= bp
->nb_validend
= -1;
948 if (UBCINFOEXISTS(vp
)) {
950 if (nfs_buf_upl_setup(bp
)) {
951 /* unable to create upl */
952 /* vm object must no longer exist */
953 /* clean up buffer and return error */
954 lck_mtx_lock(nfs_buf_mutex
);
955 LIST_REMOVE(bp
, nb_vnbufs
);
956 bp
->nb_vnbufs
.le_next
= NFSNOLIST
;
958 /* invalidate usage timestamp to allow immediate freeing */
959 NBUFSTAMPINVALIDATE(bp
);
960 if (bp
->nb_free
.tqe_next
!= NFSNOLIST
)
961 panic("nfsbuf on freelist");
962 TAILQ_INSERT_HEAD(&nfsbuffree
, bp
, nb_free
);
964 lck_mtx_unlock(nfs_buf_mutex
);
965 FSDBG_BOT(541, np
, blkno
, 0x2bc, EIO
);
968 nfs_buf_upl_check(bp
);
973 panic("nfs_buf_get: %d unknown operation", operation
);
978 FSDBG_BOT(541, np
, blkno
, bp
, bp
->nb_flags
);
984 nfs_buf_release(struct nfsbuf
*bp
, int freeup
)
986 nfsnode_t np
= bp
->nb_np
;
989 int wakeup_needbuffer
, wakeup_buffer
, wakeup_nbdwrite
;
991 FSDBG_TOP(548, bp
, NBOFF(bp
), bp
->nb_flags
, bp
->nb_data
);
992 FSDBG(548, bp
->nb_validoff
, bp
->nb_validend
, bp
->nb_dirtyoff
, bp
->nb_dirtyend
);
993 FSDBG(548, bp
->nb_valid
, 0, bp
->nb_dirty
, 0);
995 vp
= np
? NFSTOV(np
) : NULL
;
996 if (vp
&& UBCINFOEXISTS(vp
) && bp
->nb_bufsize
) {
1001 if (!ISSET(bp
->nb_flags
, NB_PAGELIST
) && !ISSET(bp
->nb_flags
, NB_INVAL
)) {
1002 rv
= nfs_buf_upl_setup(bp
);
1004 printf("nfs_buf_release: upl create failed %d\n", rv
);
1006 nfs_buf_upl_check(bp
);
1008 upl
= bp
->nb_pagelist
;
1010 goto pagelist_cleanup_done
;
1012 if (ubc_upl_unmap(upl
) != KERN_SUCCESS
)
1013 panic("ubc_upl_unmap failed");
1017 * Abort the pages on error or: if this is an invalid or
1018 * non-needcommit nocache buffer AND no pages are dirty.
1020 if (ISSET(bp
->nb_flags
, NB_ERROR
) || (!bp
->nb_dirty
&& (ISSET(bp
->nb_flags
, NB_INVAL
) ||
1021 (ISSET(bp
->nb_flags
, NB_NOCACHE
) && !ISSET(bp
->nb_flags
, (NB_NEEDCOMMIT
| NB_DELWRI
)))))) {
1022 if (ISSET(bp
->nb_flags
, (NB_READ
| NB_INVAL
| NB_NOCACHE
)))
1023 upl_flags
= UPL_ABORT_DUMP_PAGES
;
1026 ubc_upl_abort(upl
, upl_flags
);
1027 goto pagelist_cleanup_done
;
1029 for (i
=0; i
<= (bp
->nb_bufsize
- 1)/PAGE_SIZE
; i
++) {
1030 if (!NBPGVALID(bp
,i
))
1031 ubc_upl_abort_range(upl
,
1032 i
*PAGE_SIZE
, PAGE_SIZE
,
1033 UPL_ABORT_DUMP_PAGES
|
1034 UPL_ABORT_FREE_ON_EMPTY
);
1036 if (NBPGDIRTY(bp
,i
))
1037 upl_flags
= UPL_COMMIT_SET_DIRTY
;
1039 upl_flags
= UPL_COMMIT_CLEAR_DIRTY
;
1041 if (!ISSET(bp
->nb_flags
, (NB_NEEDCOMMIT
| NB_DELWRI
)))
1042 upl_flags
|= UPL_COMMIT_CLEAR_PRECIOUS
;
1044 ubc_upl_commit_range(upl
,
1045 i
*PAGE_SIZE
, PAGE_SIZE
,
1047 UPL_COMMIT_INACTIVATE
|
1048 UPL_COMMIT_FREE_ON_EMPTY
);
1051 pagelist_cleanup_done
:
1052 /* invalidate any pages past EOF */
1053 if (NBOFF(bp
) + bp
->nb_bufsize
> (off_t
)(np
->n_size
)) {
1055 start
= trunc_page_64(np
->n_size
) + PAGE_SIZE_64
;
1056 end
= trunc_page_64(NBOFF(bp
) + bp
->nb_bufsize
);
1057 if (start
< NBOFF(bp
))
1060 if ((rv
= ubc_msync(vp
, start
, end
, NULL
, UBC_INVALIDATE
)))
1061 printf("nfs_buf_release(): ubc_msync failed!, error %d\n", rv
);
1064 CLR(bp
->nb_flags
, NB_PAGELIST
);
1065 bp
->nb_pagelist
= NULL
;
1068 lck_mtx_lock(nfs_buf_mutex
);
1070 wakeup_needbuffer
= wakeup_buffer
= wakeup_nbdwrite
= 0;
1072 /* Wake up any processes waiting for any buffer to become free. */
1073 if (nfsneedbuffer
) {
1075 wakeup_needbuffer
= 1;
1077 /* Wake up any processes waiting for _this_ buffer to become free. */
1078 if (ISSET(bp
->nb_lflags
, NBL_WANTED
)) {
1079 CLR(bp
->nb_lflags
, NBL_WANTED
);
1083 /* If it's non-needcommit nocache, or an error, mark it invalid. */
1084 if (ISSET(bp
->nb_flags
, NB_ERROR
) ||
1085 (ISSET(bp
->nb_flags
, NB_NOCACHE
) && !ISSET(bp
->nb_flags
, (NB_NEEDCOMMIT
| NB_DELWRI
))))
1086 SET(bp
->nb_flags
, NB_INVAL
);
1088 if ((bp
->nb_bufsize
<= 0) || ISSET(bp
->nb_flags
, NB_INVAL
)) {
1089 /* If it's invalid or empty, dissociate it from its nfsnode */
1090 if (bp
->nb_vnbufs
.le_next
!= NFSNOLIST
) {
1091 LIST_REMOVE(bp
, nb_vnbufs
);
1092 bp
->nb_vnbufs
.le_next
= NFSNOLIST
;
1095 /* if this was a delayed write, wakeup anyone */
1096 /* waiting for delayed writes to complete */
1097 if (ISSET(bp
->nb_flags
, NB_DELWRI
)) {
1098 CLR(bp
->nb_flags
, NB_DELWRI
);
1101 wakeup_nbdwrite
= 1;
1103 /* invalidate usage timestamp to allow immediate freeing */
1104 NBUFSTAMPINVALIDATE(bp
);
1105 /* put buffer at head of free list */
1106 if (bp
->nb_free
.tqe_next
!= NFSNOLIST
)
1107 panic("nfsbuf on freelist");
1108 SET(bp
->nb_flags
, NB_INVAL
);
1109 if (ISSET(bp
->nb_flags
, NB_META
)) {
1110 TAILQ_INSERT_HEAD(&nfsbuffreemeta
, bp
, nb_free
);
1111 nfsbuffreemetacnt
++;
1113 TAILQ_INSERT_HEAD(&nfsbuffree
, bp
, nb_free
);
1116 } else if (ISSET(bp
->nb_flags
, NB_DELWRI
)) {
1117 /* put buffer at end of delwri list */
1118 if (bp
->nb_free
.tqe_next
!= NFSNOLIST
)
1119 panic("nfsbuf on freelist");
1120 TAILQ_INSERT_TAIL(&nfsbufdelwri
, bp
, nb_free
);
1124 /* update usage timestamp */
1126 bp
->nb_timestamp
= now
.tv_sec
;
1127 /* put buffer at end of free list */
1128 if (bp
->nb_free
.tqe_next
!= NFSNOLIST
)
1129 panic("nfsbuf on freelist");
1130 if (ISSET(bp
->nb_flags
, NB_META
)) {
1131 TAILQ_INSERT_TAIL(&nfsbuffreemeta
, bp
, nb_free
);
1132 nfsbuffreemetacnt
++;
1134 TAILQ_INSERT_TAIL(&nfsbuffree
, bp
, nb_free
);
1141 /* Unlock the buffer. */
1142 CLR(bp
->nb_flags
, (NB_ASYNC
| NB_STABLE
));
1143 CLR(bp
->nb_lflags
, NBL_BUSY
);
1145 FSDBG_BOT(548, bp
, NBOFF(bp
), bp
->nb_flags
, bp
->nb_data
);
1147 lck_mtx_unlock(nfs_buf_mutex
);
1149 if (wakeup_needbuffer
)
1150 wakeup(&nfsneedbuffer
);
1153 if (wakeup_nbdwrite
)
1154 wakeup(&nfs_nbdwrite
);
1160 * Wait for operations on the buffer to complete.
1161 * When they do, extract and return the I/O's error value.
1164 nfs_buf_iowait(struct nfsbuf
*bp
)
1166 FSDBG_TOP(549, bp
, NBOFF(bp
), bp
->nb_flags
, bp
->nb_error
);
1168 lck_mtx_lock(nfs_buf_mutex
);
1170 while (!ISSET(bp
->nb_flags
, NB_DONE
))
1171 msleep(bp
, nfs_buf_mutex
, PRIBIO
+ 1, "nfs_buf_iowait", NULL
);
1173 lck_mtx_unlock(nfs_buf_mutex
);
1175 FSDBG_BOT(549, bp
, NBOFF(bp
), bp
->nb_flags
, bp
->nb_error
);
1177 /* check for interruption of I/O, then errors. */
1178 if (ISSET(bp
->nb_flags
, NB_EINTR
)) {
1179 CLR(bp
->nb_flags
, NB_EINTR
);
1181 } else if (ISSET(bp
->nb_flags
, NB_ERROR
))
1182 return (bp
->nb_error
? bp
->nb_error
: EIO
);
1187 * Mark I/O complete on a buffer.
1190 nfs_buf_iodone(struct nfsbuf
*bp
)
1193 FSDBG_TOP(550, bp
, NBOFF(bp
), bp
->nb_flags
, bp
->nb_error
);
1195 if (ISSET(bp
->nb_flags
, NB_DONE
))
1196 panic("nfs_buf_iodone already");
1198 if (!ISSET(bp
->nb_flags
, NB_READ
)) {
1199 CLR(bp
->nb_flags
, NB_WRITEINPROG
);
1201 * vnode_writedone() takes care of waking up
1202 * any throttled write operations
1204 vnode_writedone(NFSTOV(bp
->nb_np
));
1205 nfs_node_lock_force(bp
->nb_np
);
1206 bp
->nb_np
->n_numoutput
--;
1207 nfs_node_unlock(bp
->nb_np
);
1209 if (ISSET(bp
->nb_flags
, NB_ASYNC
)) { /* if async, release it */
1210 SET(bp
->nb_flags
, NB_DONE
); /* note that it's done */
1211 nfs_buf_release(bp
, 1);
1212 } else { /* or just wakeup the buffer */
1213 lck_mtx_lock(nfs_buf_mutex
);
1214 SET(bp
->nb_flags
, NB_DONE
); /* note that it's done */
1215 CLR(bp
->nb_lflags
, NBL_WANTED
);
1216 lck_mtx_unlock(nfs_buf_mutex
);
1220 FSDBG_BOT(550, bp
, NBOFF(bp
), bp
->nb_flags
, bp
->nb_error
);
1224 nfs_buf_write_delayed(struct nfsbuf
*bp
)
1226 nfsnode_t np
= bp
->nb_np
;
1228 FSDBG_TOP(551, bp
, NBOFF(bp
), bp
->nb_flags
, 0);
1229 FSDBG(551, bp
, bp
->nb_dirtyoff
, bp
->nb_dirtyend
, bp
->nb_dirty
);
1232 * If the block hasn't been seen before:
1233 * (1) Mark it as having been seen,
1234 * (2) Make sure it's on its node's correct block list,
1236 if (!ISSET(bp
->nb_flags
, NB_DELWRI
)) {
1237 SET(bp
->nb_flags
, NB_DELWRI
);
1238 /* move to dirty list */
1239 lck_mtx_lock(nfs_buf_mutex
);
1242 if (bp
->nb_vnbufs
.le_next
!= NFSNOLIST
)
1243 LIST_REMOVE(bp
, nb_vnbufs
);
1244 LIST_INSERT_HEAD(&np
->n_dirtyblkhd
, bp
, nb_vnbufs
);
1245 lck_mtx_unlock(nfs_buf_mutex
);
1249 * If the vnode has "too many" write operations in progress
1250 * wait for them to finish the IO
1252 vnode_waitforwrites(NFSTOV(np
), VNODE_ASYNC_THROTTLE
, 0, 0, "nfs_buf_write_delayed");
1254 /* the file is in a modified state, so make sure the flag's set */
1255 nfs_node_lock_force(np
);
1256 np
->n_flag
|= NMODIFIED
;
1257 nfs_node_unlock(np
);
1260 * If we have too many delayed write buffers,
1261 * just fall back to doing the async write.
1263 if (nfs_nbdwrite
< 0)
1264 panic("nfs_buf_write_delayed: Negative nfs_nbdwrite");
1265 if (nfs_nbdwrite
> NFS_A_LOT_OF_DELAYED_WRITES
) {
1266 /* issue async write */
1267 SET(bp
->nb_flags
, NB_ASYNC
);
1269 FSDBG_BOT(551, bp
, NBOFF(bp
), bp
->nb_flags
, bp
->nb_error
);
1273 /* Otherwise, the "write" is done, so mark and release the buffer. */
1274 SET(bp
->nb_flags
, NB_DONE
);
1275 nfs_buf_release(bp
, 1);
1276 FSDBG_BOT(551, bp
, NBOFF(bp
), bp
->nb_flags
, 0);
1281 * Check that a "needcommit" buffer can still be committed.
1282 * If the write verifier has changed, we need to clear the
1283 * the needcommit flag.
1286 nfs_buf_check_write_verifier(nfsnode_t np
, struct nfsbuf
*bp
)
1288 struct nfsmount
*nmp
;
1290 if (!ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
))
1294 if (nfs_mount_gone(nmp
))
1296 if (!ISSET(bp
->nb_flags
, NB_STALEWVERF
) && (bp
->nb_verf
== nmp
->nm_verf
))
1299 /* write verifier changed, clear commit/wverf flags */
1300 CLR(bp
->nb_flags
, (NB_NEEDCOMMIT
| NB_STALEWVERF
));
1302 nfs_node_lock_force(np
);
1303 np
->n_needcommitcnt
--;
1304 CHECK_NEEDCOMMITCNT(np
);
1305 nfs_node_unlock(np
);
1309 * add a reference to a buffer so it doesn't disappear while being used
1310 * (must be called with nfs_buf_mutex held)
1313 nfs_buf_refget(struct nfsbuf
*bp
)
1318 * release a reference on a buffer
1319 * (must be called with nfs_buf_mutex held)
1322 nfs_buf_refrele(struct nfsbuf
*bp
)
1328 * mark a particular buffer as BUSY
1329 * (must be called with nfs_buf_mutex held)
1332 nfs_buf_acquire(struct nfsbuf
*bp
, int flags
, int slpflag
, int slptimeo
)
1337 if (ISSET(bp
->nb_lflags
, NBL_BUSY
)) {
1339 * since the lck_mtx_lock may block, the buffer
1340 * may become BUSY, so we need to recheck for
1343 if (flags
& NBAC_NOWAIT
)
1345 SET(bp
->nb_lflags
, NBL_WANTED
);
1347 ts
.tv_sec
= (slptimeo
/100);
1348 /* the hz value is 100; which leads to 10ms */
1349 ts
.tv_nsec
= (slptimeo
% 100) * 10 * NSEC_PER_USEC
* 1000;
1351 error
= msleep(bp
, nfs_buf_mutex
, slpflag
| (PRIBIO
+ 1),
1352 "nfs_buf_acquire", &ts
);
1357 if (flags
& NBAC_REMOVE
)
1358 nfs_buf_remfree(bp
);
1359 SET(bp
->nb_lflags
, NBL_BUSY
);
1365 * simply drop the BUSY status of a buffer
1366 * (must be called with nfs_buf_mutex held)
1369 nfs_buf_drop(struct nfsbuf
*bp
)
1371 int need_wakeup
= 0;
1373 if (!ISSET(bp
->nb_lflags
, NBL_BUSY
))
1374 panic("nfs_buf_drop: buffer not busy!");
1375 if (ISSET(bp
->nb_lflags
, NBL_WANTED
)) {
1376 /* delay the actual wakeup until after we clear NBL_BUSY */
1379 /* Unlock the buffer. */
1380 CLR(bp
->nb_lflags
, (NBL_BUSY
| NBL_WANTED
));
1387 * prepare for iterating over an nfsnode's buffer list
1388 * this lock protects the queue manipulation
1389 * (must be called with nfs_buf_mutex held)
1392 nfs_buf_iterprepare(nfsnode_t np
, struct nfsbuflists
*iterheadp
, int flags
)
1394 struct nfsbuflists
*listheadp
;
1396 if (flags
& NBI_DIRTY
)
1397 listheadp
= &np
->n_dirtyblkhd
;
1399 listheadp
= &np
->n_cleanblkhd
;
1401 if ((flags
& NBI_NOWAIT
) && (np
->n_bufiterflags
& NBI_ITER
)) {
1402 LIST_INIT(iterheadp
);
1403 return(EWOULDBLOCK
);
1406 while (np
->n_bufiterflags
& NBI_ITER
) {
1407 np
->n_bufiterflags
|= NBI_ITERWANT
;
1408 msleep(&np
->n_bufiterflags
, nfs_buf_mutex
, 0, "nfs_buf_iterprepare", NULL
);
1410 if (LIST_EMPTY(listheadp
)) {
1411 LIST_INIT(iterheadp
);
1414 np
->n_bufiterflags
|= NBI_ITER
;
1416 iterheadp
->lh_first
= listheadp
->lh_first
;
1417 listheadp
->lh_first
->nb_vnbufs
.le_prev
= &iterheadp
->lh_first
;
1418 LIST_INIT(listheadp
);
1424 * clean up after iterating over an nfsnode's buffer list
1425 * this lock protects the queue manipulation
1426 * (must be called with nfs_buf_mutex held)
1429 nfs_buf_itercomplete(nfsnode_t np
, struct nfsbuflists
*iterheadp
, int flags
)
1431 struct nfsbuflists
* listheadp
;
1434 if (flags
& NBI_DIRTY
)
1435 listheadp
= &np
->n_dirtyblkhd
;
1437 listheadp
= &np
->n_cleanblkhd
;
1439 while (!LIST_EMPTY(iterheadp
)) {
1440 bp
= LIST_FIRST(iterheadp
);
1441 LIST_REMOVE(bp
, nb_vnbufs
);
1442 LIST_INSERT_HEAD(listheadp
, bp
, nb_vnbufs
);
1445 np
->n_bufiterflags
&= ~NBI_ITER
;
1446 if (np
->n_bufiterflags
& NBI_ITERWANT
) {
1447 np
->n_bufiterflags
&= ~NBI_ITERWANT
;
1448 wakeup(&np
->n_bufiterflags
);
1454 * Read an NFS buffer for a file.
1457 nfs_buf_read(struct nfsbuf
*bp
)
1465 cred
= bp
->nb_rcred
;
1466 if (IS_VALID_CRED(cred
))
1467 kauth_cred_ref(cred
);
1468 thd
= ISSET(bp
->nb_flags
, NB_ASYNC
) ? NULL
: current_thread();
1471 if (!ISSET(bp
->nb_flags
, NB_READ
))
1472 panic("nfs_buf_read: !NB_READ");
1473 if (ISSET(bp
->nb_flags
, NB_DONE
))
1474 CLR(bp
->nb_flags
, NB_DONE
);
1478 OSAddAtomic64(1, &nfsstats
.read_bios
);
1480 error
= nfs_buf_read_rpc(bp
, thd
, cred
);
1482 * For async I/O, the callbacks will finish up the
1483 * read. Otherwise, the read has already been finished.
1486 if (IS_VALID_CRED(cred
))
1487 kauth_cred_unref(&cred
);
1492 * finish the reading of a buffer
1495 nfs_buf_read_finish(struct nfsbuf
*bp
)
1497 nfsnode_t np
= bp
->nb_np
;
1498 struct nfsmount
*nmp
;
1500 if (!ISSET(bp
->nb_flags
, NB_ERROR
)) {
1501 /* update valid range */
1502 bp
->nb_validoff
= 0;
1503 bp
->nb_validend
= bp
->nb_endio
;
1504 if (bp
->nb_endio
< (int)bp
->nb_bufsize
) {
1506 * The read may be short because we have unflushed writes
1507 * that are extending the file size and the reads hit the
1508 * (old) EOF on the server. So, just make sure nb_validend
1509 * correctly tracks EOF.
1510 * Note that the missing data should have already been zeroed
1511 * in nfs_buf_read_rpc_finish().
1513 off_t boff
= NBOFF(bp
);
1514 if ((off_t
)np
->n_size
>= (boff
+ bp
->nb_bufsize
))
1515 bp
->nb_validend
= bp
->nb_bufsize
;
1516 else if ((off_t
)np
->n_size
>= boff
)
1517 bp
->nb_validend
= np
->n_size
- boff
;
1519 bp
->nb_validend
= 0;
1521 if ((nmp
= NFSTONMP(np
)) && (nmp
->nm_vers
== NFS_VER2
) &&
1522 ((NBOFF(bp
) + bp
->nb_validend
) > 0x100000000LL
))
1523 bp
->nb_validend
= 0x100000000LL
- NBOFF(bp
);
1524 bp
->nb_valid
= (1 << (round_page_32(bp
->nb_validend
) / PAGE_SIZE
)) - 1;
1525 if (bp
->nb_validend
& PAGE_MASK
) {
1526 /* zero-fill remainder of last page */
1527 bzero(bp
->nb_data
+ bp
->nb_validend
, PAGE_SIZE
- (bp
->nb_validend
& PAGE_MASK
));
1534 * initiate the NFS READ RPC(s) for a buffer
1537 nfs_buf_read_rpc(struct nfsbuf
*bp
, thread_t thd
, kauth_cred_t cred
)
1539 struct nfsmount
*nmp
;
1540 nfsnode_t np
= bp
->nb_np
;
1541 int error
= 0, nfsvers
, async
;
1543 uint32_t nmrsize
, length
, len
;
1546 struct nfsreq_cbinfo cb
;
1549 if (nfs_mount_gone(nmp
)) {
1550 bp
->nb_error
= error
= ENXIO
;
1551 SET(bp
->nb_flags
, NB_ERROR
);
1555 nfsvers
= nmp
->nm_vers
;
1556 nmrsize
= nmp
->nm_rsize
;
1560 length
= bp
->nb_bufsize
;
1562 if (nfsvers
== NFS_VER2
) {
1563 if (boff
> 0xffffffffLL
) {
1564 bp
->nb_error
= error
= EFBIG
;
1565 SET(bp
->nb_flags
, NB_ERROR
);
1569 if ((boff
+ length
- 1) > 0xffffffffLL
)
1570 length
= 0x100000000LL
- boff
;
1573 /* Note: Can only do async I/O if nfsiods are configured. */
1574 async
= (bp
->nb_flags
& NB_ASYNC
);
1575 cb
.rcb_func
= async
? nfs_buf_read_rpc_finish
: NULL
;
1578 bp
->nb_offio
= bp
->nb_endio
= 0;
1579 bp
->nb_rpcs
= nrpcs
= (length
+ nmrsize
- 1) / nmrsize
;
1580 if (async
&& (nrpcs
> 1)) {
1581 SET(bp
->nb_flags
, NB_MULTASYNCRPC
);
1583 CLR(bp
->nb_flags
, NB_MULTASYNCRPC
);
1586 while (length
> 0) {
1587 if (ISSET(bp
->nb_flags
, NB_ERROR
)) {
1588 error
= bp
->nb_error
;
1591 len
= (length
> nmrsize
) ? nmrsize
: length
;
1592 cb
.rcb_args
[0] = offset
;
1593 cb
.rcb_args
[1] = len
;
1594 if (nmp
->nm_vers
>= NFS_VER4
)
1595 cb
.rcb_args
[2] = nmp
->nm_stategenid
;
1597 error
= nmp
->nm_funcs
->nf_read_rpc_async(np
, boff
+ offset
, len
, thd
, cred
, &cb
, &req
);
1604 nfs_buf_read_rpc_finish(req
);
1605 if (ISSET(bp
->nb_flags
, NB_ERROR
)) {
1606 error
= bp
->nb_error
;
1613 * Something bad happened while trying to send the RPC(s).
1614 * Wait for any outstanding requests to complete.
1616 bp
->nb_error
= error
;
1617 SET(bp
->nb_flags
, NB_ERROR
);
1618 if (ISSET(bp
->nb_flags
, NB_MULTASYNCRPC
)) {
1619 nrpcs
= (length
+ nmrsize
- 1) / nmrsize
;
1620 lck_mtx_lock(nfs_buf_mutex
);
1621 bp
->nb_rpcs
-= nrpcs
;
1622 if (bp
->nb_rpcs
== 0) {
1623 /* No RPCs left, so the buffer's done */
1624 lck_mtx_unlock(nfs_buf_mutex
);
1627 /* wait for the last RPC to mark it done */
1628 while (bp
->nb_rpcs
> 0)
1629 msleep(&bp
->nb_rpcs
, nfs_buf_mutex
, 0,
1630 "nfs_buf_read_rpc_cancel", NULL
);
1631 lck_mtx_unlock(nfs_buf_mutex
);
1642 * finish up an NFS READ RPC on a buffer
1645 nfs_buf_read_rpc_finish(struct nfsreq
*req
)
1647 struct nfsmount
*nmp
;
1649 struct nfsreq_cbinfo cb
;
1651 int error
= 0, nfsvers
, offset
, length
, eof
= 0, multasyncrpc
, finished
;
1652 void *wakeme
= NULL
;
1653 struct nfsreq
*rreq
= NULL
;
1658 char uio_buf
[ UIO_SIZEOF(1) ];
1662 thd
= req
->r_thread
;
1664 if (IS_VALID_CRED(cred
))
1665 kauth_cred_ref(cred
);
1666 cb
= req
->r_callback
;
1668 if (cb
.rcb_func
) /* take an extra reference on the nfsreq in case we want to resend it later due to grace error */
1669 nfs_request_ref(req
, 0);
1672 if (nfs_mount_gone(nmp
)) {
1673 SET(bp
->nb_flags
, NB_ERROR
);
1674 bp
->nb_error
= error
= ENXIO
;
1676 if (error
|| ISSET(bp
->nb_flags
, NB_ERROR
)) {
1678 nfs_request_async_cancel(req
);
1682 nfsvers
= nmp
->nm_vers
;
1683 offset
= cb
.rcb_args
[0];
1684 rlen
= length
= cb
.rcb_args
[1];
1686 auio
= uio_createwithbuffer(1, NBOFF(bp
) + offset
, UIO_SYSSPACE
,
1687 UIO_READ
, &uio_buf
, sizeof(uio_buf
));
1688 uio_addiov(auio
, CAST_USER_ADDR_T(bp
->nb_data
+ offset
), length
);
1690 /* finish the RPC */
1691 error
= nmp
->nm_funcs
->nf_read_rpc_async_finish(np
, req
, auio
, &rlen
, &eof
);
1692 if ((error
== EINPROGRESS
) && cb
.rcb_func
) {
1693 /* async request restarted */
1695 nfs_request_rele(req
);
1696 if (IS_VALID_CRED(cred
))
1697 kauth_cred_unref(&cred
);
1700 if ((nmp
->nm_vers
>= NFS_VER4
) && nfs_mount_state_error_should_restart(error
) && !ISSET(bp
->nb_flags
, NB_ERROR
)) {
1701 lck_mtx_lock(&nmp
->nm_lock
);
1702 if ((error
!= NFSERR_OLD_STATEID
) && (error
!= NFSERR_GRACE
) && (cb
.rcb_args
[2] == nmp
->nm_stategenid
)) {
1703 NP(np
, "nfs_buf_read_rpc_finish: error %d @ 0x%llx, 0x%x 0x%x, initiating recovery",
1704 error
, NBOFF(bp
)+offset
, cb
.rcb_args
[2], nmp
->nm_stategenid
);
1705 nfs_need_recover(nmp
, error
);
1707 lck_mtx_unlock(&nmp
->nm_lock
);
1708 if (np
->n_flag
& NREVOKE
) {
1711 if (error
== NFSERR_GRACE
) {
1714 * For an async I/O request, handle a grace delay just like
1715 * jukebox errors. Set the resend time and queue it up.
1718 if (req
->r_nmrep
.nmc_mhead
) {
1719 mbuf_freem(req
->r_nmrep
.nmc_mhead
);
1720 req
->r_nmrep
.nmc_mhead
= NULL
;
1724 lck_mtx_lock(&req
->r_mtx
);
1725 req
->r_resendtime
= now
.tv_sec
+ 2;
1726 req
->r_xid
= 0; // get a new XID
1727 req
->r_flags
|= R_RESTART
;
1729 nfs_asyncio_resend(req
);
1730 lck_mtx_unlock(&req
->r_mtx
);
1731 if (IS_VALID_CRED(cred
))
1732 kauth_cred_unref(&cred
);
1733 /* Note: nfsreq reference taken will be dropped later when finished */
1736 /* otherwise, just pause a couple seconds and retry */
1737 tsleep(&nmp
->nm_state
, (PZERO
-1), "nfsgrace", 2*hz
);
1739 if (!(error
= nfs_mount_state_wait_for_recovery(nmp
))) {
1746 SET(bp
->nb_flags
, NB_ERROR
);
1747 bp
->nb_error
= error
;
1751 if ((rlen
> 0) && (bp
->nb_endio
< (offset
+ (int)rlen
)))
1752 bp
->nb_endio
= offset
+ rlen
;
1754 if ((nfsvers
== NFS_VER2
) || eof
|| (rlen
== 0)) {
1755 /* zero out the remaining data (up to EOF) */
1756 off_t rpcrem
, eofrem
, rem
;
1757 rpcrem
= (length
- rlen
);
1758 eofrem
= np
->n_size
- (NBOFF(bp
) + offset
+ rlen
);
1759 rem
= (rpcrem
< eofrem
) ? rpcrem
: eofrem
;
1761 bzero(bp
->nb_data
+ offset
+ rlen
, rem
);
1762 } else if (((int)rlen
< length
) && !ISSET(bp
->nb_flags
, NB_ERROR
)) {
1766 * We haven't hit EOF and we didn't get all the data
1767 * requested, so we need to issue another read for the rest.
1768 * (Don't bother if the buffer already hit an error.)
1773 cb
.rcb_args
[0] = offset
;
1774 cb
.rcb_args
[1] = length
;
1775 if (nmp
->nm_vers
>= NFS_VER4
)
1776 cb
.rcb_args
[2] = nmp
->nm_stategenid
;
1777 error
= nmp
->nm_funcs
->nf_read_rpc_async(np
, NBOFF(bp
) + offset
, length
, thd
, cred
, &cb
, &rreq
);
1779 if (IS_VALID_CRED(cred
))
1780 kauth_cred_unref(&cred
);
1782 /* if !async we'll need to wait for this RPC to finish */
1787 nfs_request_rele(req
);
1790 * Outstanding RPC count is unchanged.
1791 * Callback will be called when RPC is done.
1795 SET(bp
->nb_flags
, NB_ERROR
);
1796 bp
->nb_error
= error
;
1801 nfs_request_rele(req
);
1802 if (IS_VALID_CRED(cred
))
1803 kauth_cred_unref(&cred
);
1806 * Decrement outstanding RPC count on buffer
1807 * and call nfs_buf_read_finish on last RPC.
1809 * (Note: when there are multiple async RPCs issued for a
1810 * buffer we need nfs_buffer_mutex to avoid problems when
1811 * aborting a partially-initiated set of RPCs)
1814 multasyncrpc
= ISSET(bp
->nb_flags
, NB_MULTASYNCRPC
);
1816 lck_mtx_lock(nfs_buf_mutex
);
1819 finished
= (bp
->nb_rpcs
== 0);
1822 lck_mtx_unlock(nfs_buf_mutex
);
1826 wakeme
= &bp
->nb_rpcs
;
1827 nfs_buf_read_finish(bp
);
1834 * Do buffer readahead.
1835 * Initiate async I/O to read buffers not in cache.
1838 nfs_buf_readahead(nfsnode_t np
, int ioflag
, daddr64_t
*rabnp
, daddr64_t lastrabn
, thread_t thd
, kauth_cred_t cred
)
1840 struct nfsmount
*nmp
= NFSTONMP(np
);
1845 if (nfs_mount_gone(nmp
))
1847 if (nmp
->nm_readahead
<= 0)
1849 if (*rabnp
> lastrabn
)
1852 for (nra
= 0; (nra
< nmp
->nm_readahead
) && (*rabnp
<= lastrabn
); nra
++, *rabnp
= *rabnp
+ 1) {
1853 /* check if block exists and is valid. */
1854 if ((*rabnp
* nmp
->nm_biosize
) >= (off_t
)np
->n_size
) {
1855 /* stop reading ahead if we're beyond EOF */
1859 error
= nfs_buf_get(np
, *rabnp
, nmp
->nm_biosize
, thd
, NBLK_READ
|NBLK_NOWAIT
, &bp
);
1862 nfs_node_lock_force(np
);
1863 np
->n_lastrahead
= *rabnp
;
1864 nfs_node_unlock(np
);
1867 if ((ioflag
& IO_NOCACHE
) && ISSET(bp
->nb_flags
, NB_CACHE
) &&
1868 !bp
->nb_dirty
&& !ISSET(bp
->nb_flags
, (NB_DELWRI
|NB_NCRDAHEAD
))) {
1869 CLR(bp
->nb_flags
, NB_CACHE
);
1871 bp
->nb_validoff
= bp
->nb_validend
= -1;
1873 if ((bp
->nb_dirtyend
<= 0) && !bp
->nb_dirty
&&
1874 !ISSET(bp
->nb_flags
, (NB_CACHE
|NB_DELWRI
))) {
1875 SET(bp
->nb_flags
, (NB_READ
|NB_ASYNC
));
1876 if (ioflag
& IO_NOCACHE
)
1877 SET(bp
->nb_flags
, NB_NCRDAHEAD
);
1878 if (!IS_VALID_CRED(bp
->nb_rcred
) && IS_VALID_CRED(cred
)) {
1879 kauth_cred_ref(cred
);
1880 bp
->nb_rcred
= cred
;
1882 if ((error
= nfs_buf_read(bp
)))
1886 nfs_buf_release(bp
, 1);
1892 * NFS buffer I/O for reading files.
1895 nfs_bioread(nfsnode_t np
, uio_t uio
, int ioflag
, vfs_context_t ctx
)
1897 vnode_t vp
= NFSTOV(np
);
1898 struct nfsbuf
*bp
= NULL
;
1899 struct nfsmount
*nmp
= VTONMP(vp
);
1900 daddr64_t lbn
, rabn
= 0, lastrabn
, maxrabn
= -1;
1902 int error
= 0, n
= 0, on
= 0;
1903 int nfsvers
, biosize
, modified
, readaheads
= 0;
1908 FSDBG_TOP(514, np
, uio_offset(uio
), uio_resid(uio
), ioflag
);
1910 nfsvers
= nmp
->nm_vers
;
1911 biosize
= nmp
->nm_biosize
;
1912 thd
= vfs_context_thread(ctx
);
1913 cred
= vfs_context_ucred(ctx
);
1915 if (vnode_vtype(vp
) != VREG
) {
1916 printf("nfs_bioread: type %x unexpected\n", vnode_vtype(vp
));
1917 FSDBG_BOT(514, np
, 0xd1e0016, 0, EINVAL
);
1922 * For NFS, cache consistency can only be maintained approximately.
1923 * Although RFC1094 does not specify the criteria, the following is
1924 * believed to be compatible with the reference port.
1926 * If the file has changed since the last read RPC or you have
1927 * written to the file, you may have lost data cache consistency
1928 * with the server. So, check for a change, and flush all of the
1929 * file's data out of the cache.
1930 * NB: This implies that cache data can be read when up to
1931 * NFS_MAXATTRTIMO seconds out of date. If you find that you
1932 * need current attributes, nfs_getattr() can be forced to fetch
1933 * new attributes (via NATTRINVALIDATE() or NGA_UNCACHED).
1936 if (ISSET(np
->n_flag
, NUPDATESIZE
))
1937 nfs_data_update_size(np
, 0);
1939 if ((error
= nfs_node_lock(np
))) {
1940 FSDBG_BOT(514, np
, 0xd1e0222, 0, error
);
1944 if (np
->n_flag
& NNEEDINVALIDATE
) {
1945 np
->n_flag
&= ~NNEEDINVALIDATE
;
1946 nfs_node_unlock(np
);
1947 error
= nfs_vinvalbuf(vp
, V_SAVE
|V_IGNORE_WRITEERR
, ctx
, 1);
1949 error
= nfs_node_lock(np
);
1951 FSDBG_BOT(514, np
, 0xd1e0322, 0, error
);
1956 modified
= (np
->n_flag
& NMODIFIED
);
1957 nfs_node_unlock(np
);
1958 /* nfs_getattr() will check changed and purge caches */
1959 error
= nfs_getattr(np
, NULL
, ctx
, modified
? NGA_UNCACHED
: NGA_CACHED
);
1961 FSDBG_BOT(514, np
, 0xd1e0004, 0, error
);
1965 if (uio_resid(uio
) == 0) {
1966 FSDBG_BOT(514, np
, 0xd1e0001, 0, 0);
1969 if (uio_offset(uio
) < 0) {
1970 FSDBG_BOT(514, np
, 0xd1e0002, 0, EINVAL
);
1975 * set up readahead - which may be limited by:
1976 * + current request length (for IO_NOCACHE)
1977 * + readahead setting
1980 if (nmp
->nm_readahead
> 0) {
1981 off_t end
= uio_offset(uio
) + uio_resid(uio
);
1982 if (end
> (off_t
)np
->n_size
)
1984 rabn
= uio_offset(uio
) / biosize
;
1985 maxrabn
= (end
- 1) / biosize
;
1986 nfs_node_lock_force(np
);
1987 if (!(ioflag
& IO_NOCACHE
) &&
1988 (!rabn
|| (rabn
== np
->n_lastread
) || (rabn
== (np
->n_lastread
+1)))) {
1989 maxrabn
+= nmp
->nm_readahead
;
1990 if ((maxrabn
* biosize
) >= (off_t
)np
->n_size
)
1991 maxrabn
= ((off_t
)np
->n_size
- 1)/biosize
;
1993 if (maxrabn
< np
->n_lastrahead
)
1994 np
->n_lastrahead
= -1;
1995 if (rabn
< np
->n_lastrahead
)
1996 rabn
= np
->n_lastrahead
+ 1;
1997 nfs_node_unlock(np
);
2004 nfs_data_lock(np
, NFS_DATA_LOCK_SHARED
);
2005 lbn
= uio_offset(uio
) / biosize
;
2008 * Copy directly from any cached pages without grabbing the bufs.
2009 * (If we are NOCACHE and we've issued readahead requests, we need
2010 * to grab the NB_NCRDAHEAD bufs to drop them.)
2012 if ((!(ioflag
& IO_NOCACHE
) || !readaheads
) &&
2013 ((uio
->uio_segflg
== UIO_USERSPACE32
||
2014 uio
->uio_segflg
== UIO_USERSPACE64
||
2015 uio
->uio_segflg
== UIO_USERSPACE
))) {
2016 io_resid
= uio_resid(uio
);
2017 diff
= np
->n_size
- uio_offset(uio
);
2018 if (diff
< io_resid
)
2021 int count
= (io_resid
> INT_MAX
) ? INT_MAX
: io_resid
;
2022 error
= cluster_copy_ubc_data(vp
, uio
, &count
, 0);
2024 nfs_data_unlock(np
);
2025 FSDBG_BOT(514, np
, uio_offset(uio
), 0xcacefeed, error
);
2029 /* count any biocache reads that we just copied directly */
2030 if (lbn
!= (uio_offset(uio
)/biosize
)) {
2031 OSAddAtomic64((uio_offset(uio
)/biosize
) - lbn
, &nfsstats
.biocache_reads
);
2032 FSDBG(514, np
, 0xcacefeed, uio_offset(uio
), error
);
2036 lbn
= uio_offset(uio
) / biosize
;
2037 on
= uio_offset(uio
) % biosize
;
2038 nfs_node_lock_force(np
);
2039 np
->n_lastread
= (uio_offset(uio
) - 1) / biosize
;
2040 nfs_node_unlock(np
);
2042 if ((uio_resid(uio
) <= 0) || (uio_offset(uio
) >= (off_t
)np
->n_size
)) {
2043 nfs_data_unlock(np
);
2044 FSDBG_BOT(514, np
, uio_offset(uio
), uio_resid(uio
), 0xaaaaaaaa);
2048 /* adjust readahead block number, if necessary */
2051 lastrabn
= MIN(maxrabn
, lbn
+ nmp
->nm_readahead
);
2052 if (rabn
<= lastrabn
) { /* start readaheads */
2053 error
= nfs_buf_readahead(np
, ioflag
, &rabn
, lastrabn
, thd
, cred
);
2055 nfs_data_unlock(np
);
2056 FSDBG_BOT(514, np
, 0xd1e000b, 1, error
);
2062 OSAddAtomic64(1, &nfsstats
.biocache_reads
);
2065 * If the block is in the cache and has the required data
2066 * in a valid region, just copy it out.
2067 * Otherwise, get the block and write back/read in,
2071 io_resid
= uio_resid(uio
);
2072 n
= (io_resid
> (biosize
- on
)) ? (biosize
- on
) : io_resid
;
2073 diff
= np
->n_size
- uio_offset(uio
);
2077 error
= nfs_buf_get(np
, lbn
, biosize
, thd
, NBLK_READ
, &bp
);
2079 nfs_data_unlock(np
);
2080 FSDBG_BOT(514, np
, 0xd1e000c, 0, error
);
2084 if ((ioflag
& IO_NOCACHE
) && ISSET(bp
->nb_flags
, NB_CACHE
)) {
2086 * IO_NOCACHE found a cached buffer.
2087 * Flush the buffer if it's dirty.
2088 * Invalidate the data if it wasn't just read
2089 * in as part of a "nocache readahead".
2091 if (bp
->nb_dirty
|| (bp
->nb_dirtyend
> 0)) {
2092 /* so write the buffer out and try again */
2093 SET(bp
->nb_flags
, NB_NOCACHE
);
2096 if (ISSET(bp
->nb_flags
, NB_NCRDAHEAD
)) {
2097 CLR(bp
->nb_flags
, NB_NCRDAHEAD
);
2098 SET(bp
->nb_flags
, NB_NOCACHE
);
2102 /* if any pages are valid... */
2104 /* ...check for any invalid pages in the read range */
2105 int pg
, firstpg
, lastpg
, dirtypg
;
2106 dirtypg
= firstpg
= lastpg
= -1;
2108 while (pg
<= (on
+ n
- 1)/PAGE_SIZE
) {
2109 if (!NBPGVALID(bp
,pg
)) {
2113 } else if (firstpg
>= 0 && dirtypg
< 0 && NBPGDIRTY(bp
,pg
))
2118 /* if there are no invalid pages, we're all set */
2120 if (bp
->nb_validoff
< 0) {
2121 /* valid range isn't set up, so */
2122 /* set it to what we know is valid */
2123 bp
->nb_validoff
= trunc_page(on
);
2124 bp
->nb_validend
= round_page(on
+n
);
2125 nfs_buf_normalize_valid_range(np
, bp
);
2130 /* there are invalid pages in the read range */
2131 if (((dirtypg
> firstpg
) && (dirtypg
< lastpg
)) ||
2132 (((firstpg
*PAGE_SIZE
) < bp
->nb_dirtyend
) && (((lastpg
+1)*PAGE_SIZE
) > bp
->nb_dirtyoff
))) {
2133 /* there are also dirty page(s) (or range) in the read range, */
2134 /* so write the buffer out and try again */
2136 CLR(bp
->nb_flags
, (NB_DONE
| NB_ERROR
| NB_INVAL
));
2137 SET(bp
->nb_flags
, NB_ASYNC
);
2138 if (!IS_VALID_CRED(bp
->nb_wcred
)) {
2139 kauth_cred_ref(cred
);
2140 bp
->nb_wcred
= cred
;
2142 error
= nfs_buf_write(bp
);
2144 nfs_data_unlock(np
);
2145 FSDBG_BOT(514, np
, 0xd1e000d, 0, error
);
2150 if (!bp
->nb_dirty
&& bp
->nb_dirtyend
<= 0 &&
2151 (lastpg
- firstpg
+ 1) > (biosize
/PAGE_SIZE
)/2) {
2152 /* we need to read in more than half the buffer and the */
2153 /* buffer's not dirty, so just fetch the whole buffer */
2156 /* read the page range in */
2158 char uio_buf
[ UIO_SIZEOF(1) ];
2161 auio
= uio_createwithbuffer(1, (NBOFF(bp
) + firstpg
* PAGE_SIZE_64
),
2162 UIO_SYSSPACE
, UIO_READ
, &uio_buf
[0], sizeof(uio_buf
));
2166 uio_addiov(auio
, CAST_USER_ADDR_T(bp
->nb_data
+ (firstpg
* PAGE_SIZE
)),
2167 ((lastpg
- firstpg
+ 1) * PAGE_SIZE
));
2168 error
= nfs_read_rpc(np
, auio
, ctx
);
2171 if (ioflag
& IO_NOCACHE
)
2172 SET(bp
->nb_flags
, NB_NOCACHE
);
2173 nfs_buf_release(bp
, 1);
2174 nfs_data_unlock(np
);
2175 FSDBG_BOT(514, np
, 0xd1e000e, 0, error
);
2178 /* Make sure that the valid range is set to cover this read. */
2179 bp
->nb_validoff
= trunc_page_32(on
);
2180 bp
->nb_validend
= round_page_32(on
+n
);
2181 nfs_buf_normalize_valid_range(np
, bp
);
2182 if (uio_resid(auio
) > 0) {
2183 /* if short read, must have hit EOF, */
2184 /* so zero the rest of the range */
2185 bzero(CAST_DOWN(caddr_t
, uio_curriovbase(auio
)), uio_resid(auio
));
2187 /* mark the pages (successfully read) as valid */
2188 for (pg
=firstpg
; pg
<= lastpg
; pg
++)
2189 NBPGVALID_SET(bp
,pg
);
2192 /* if no pages are valid, read the whole block */
2193 if (!bp
->nb_valid
) {
2194 if (!IS_VALID_CRED(bp
->nb_rcred
) && IS_VALID_CRED(cred
)) {
2195 kauth_cred_ref(cred
);
2196 bp
->nb_rcred
= cred
;
2198 SET(bp
->nb_flags
, NB_READ
);
2199 CLR(bp
->nb_flags
, (NB_DONE
| NB_ERROR
| NB_INVAL
));
2200 error
= nfs_buf_read(bp
);
2201 if (ioflag
& IO_NOCACHE
)
2202 SET(bp
->nb_flags
, NB_NOCACHE
);
2204 nfs_data_unlock(np
);
2205 nfs_buf_release(bp
, 1);
2206 FSDBG_BOT(514, np
, 0xd1e000f, 0, error
);
2211 /* validate read range against valid range and clip */
2212 if (bp
->nb_validend
> 0) {
2213 diff
= (on
>= bp
->nb_validend
) ? 0 : (bp
->nb_validend
- on
);
2219 error
= uiomove(bp
->nb_data
+ on
, n
, uio
);
2222 nfs_buf_release(bp
, 1);
2223 nfs_data_unlock(np
);
2224 nfs_node_lock_force(np
);
2225 np
->n_lastread
= (uio_offset(uio
) - 1) / biosize
;
2226 nfs_node_unlock(np
);
2227 } while (error
== 0 && uio_resid(uio
) > 0 && n
> 0);
2228 FSDBG_BOT(514, np
, uio_offset(uio
), uio_resid(uio
), error
);
2233 * limit the number of outstanding async I/O writes
2236 nfs_async_write_start(struct nfsmount
*nmp
)
2238 int error
= 0, slpflag
= NMFLAG(nmp
, INTR
) ? PCATCH
: 0;
2239 struct timespec ts
= {1, 0};
2241 if (nfs_max_async_writes
<= 0)
2243 lck_mtx_lock(&nmp
->nm_lock
);
2244 while ((nfs_max_async_writes
> 0) && (nmp
->nm_asyncwrites
>= nfs_max_async_writes
)) {
2245 if ((error
= nfs_sigintr(nmp
, NULL
, current_thread(), 1)))
2247 msleep(&nmp
->nm_asyncwrites
, &nmp
->nm_lock
, slpflag
|(PZERO
-1), "nfsasyncwrites", &ts
);
2251 nmp
->nm_asyncwrites
++;
2252 lck_mtx_unlock(&nmp
->nm_lock
);
2256 nfs_async_write_done(struct nfsmount
*nmp
)
2258 if (nmp
->nm_asyncwrites
<= 0)
2260 lck_mtx_lock(&nmp
->nm_lock
);
2261 if (nmp
->nm_asyncwrites
-- >= nfs_max_async_writes
)
2262 wakeup(&nmp
->nm_asyncwrites
);
2263 lck_mtx_unlock(&nmp
->nm_lock
);
2267 * write (or commit) the given NFS buffer
2269 * Commit the buffer if we can.
2270 * Write out any dirty range.
2271 * If any dirty pages remain, write them out.
2274 * For async requests, all the work beyond sending the initial
2275 * write RPC is handled in the RPC callback(s).
2278 nfs_buf_write(struct nfsbuf
*bp
)
2280 int error
= 0, oldflags
, async
;
2284 proc_t p
= current_proc();
2285 int iomode
, doff
, dend
, firstpg
, lastpg
;
2288 FSDBG_TOP(553, bp
, NBOFF(bp
), bp
->nb_flags
, 0);
2290 if (!ISSET(bp
->nb_lflags
, NBL_BUSY
))
2291 panic("nfs_buf_write: buffer is not busy???");
2294 async
= ISSET(bp
->nb_flags
, NB_ASYNC
);
2295 oldflags
= bp
->nb_flags
;
2297 CLR(bp
->nb_flags
, (NB_READ
|NB_DONE
|NB_ERROR
|NB_DELWRI
));
2298 if (ISSET(oldflags
, NB_DELWRI
)) {
2299 lck_mtx_lock(nfs_buf_mutex
);
2302 lck_mtx_unlock(nfs_buf_mutex
);
2303 wakeup(&nfs_nbdwrite
);
2306 /* move to clean list */
2307 if (ISSET(oldflags
, (NB_ASYNC
|NB_DELWRI
))) {
2308 lck_mtx_lock(nfs_buf_mutex
);
2309 if (bp
->nb_vnbufs
.le_next
!= NFSNOLIST
)
2310 LIST_REMOVE(bp
, nb_vnbufs
);
2311 LIST_INSERT_HEAD(&np
->n_cleanblkhd
, bp
, nb_vnbufs
);
2312 lck_mtx_unlock(nfs_buf_mutex
);
2314 nfs_node_lock_force(np
);
2316 nfs_node_unlock(np
);
2317 vnode_startwrite(NFSTOV(np
));
2319 if (p
&& p
->p_stats
)
2320 OSIncrementAtomicLong(&p
->p_stats
->p_ru
.ru_oublock
);
2322 cred
= bp
->nb_wcred
;
2323 if (!IS_VALID_CRED(cred
) && ISSET(bp
->nb_flags
, NB_READ
))
2324 cred
= bp
->nb_rcred
; /* shouldn't really happen, but... */
2325 if (IS_VALID_CRED(cred
))
2326 kauth_cred_ref(cred
);
2327 thd
= async
? NULL
: current_thread();
2329 /* We need to make sure the pages are locked before doing I/O. */
2330 if (!ISSET(bp
->nb_flags
, NB_META
)) {
2331 if (UBCINFOEXISTS(NFSTOV(np
))) {
2332 if (!ISSET(bp
->nb_flags
, NB_PAGELIST
)) {
2333 error
= nfs_buf_upl_setup(bp
);
2335 printf("nfs_buf_write: upl create failed %d\n", error
);
2336 SET(bp
->nb_flags
, NB_ERROR
);
2337 bp
->nb_error
= error
= EIO
;
2341 nfs_buf_upl_check(bp
);
2344 /* We should never be in nfs_buf_write() with no UBCINFO. */
2345 printf("nfs_buf_write: ubcinfo already gone\n");
2346 SET(bp
->nb_flags
, NB_ERROR
);
2347 bp
->nb_error
= error
= EIO
;
2353 /* If NB_NEEDCOMMIT is set, a commit RPC may do the trick. */
2354 if (ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
))
2355 nfs_buf_check_write_verifier(np
, bp
);
2356 if (ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
)) {
2357 struct nfsmount
*nmp
= NFSTONMP(np
);
2358 if (nfs_mount_gone(nmp
)) {
2359 SET(bp
->nb_flags
, NB_ERROR
);
2360 bp
->nb_error
= error
= EIO
;
2364 SET(bp
->nb_flags
, NB_WRITEINPROG
);
2365 error
= nmp
->nm_funcs
->nf_commit_rpc(np
, NBOFF(bp
) + bp
->nb_dirtyoff
,
2366 bp
->nb_dirtyend
- bp
->nb_dirtyoff
, bp
->nb_wcred
, bp
->nb_verf
);
2367 CLR(bp
->nb_flags
, NB_WRITEINPROG
);
2369 if (error
!= NFSERR_STALEWRITEVERF
) {
2370 SET(bp
->nb_flags
, NB_ERROR
);
2371 bp
->nb_error
= error
;
2376 bp
->nb_dirtyoff
= bp
->nb_dirtyend
= 0;
2377 CLR(bp
->nb_flags
, NB_NEEDCOMMIT
);
2378 nfs_node_lock_force(np
);
2379 np
->n_needcommitcnt
--;
2380 CHECK_NEEDCOMMITCNT(np
);
2381 nfs_node_unlock(np
);
2383 if (!error
&& (bp
->nb_dirtyend
> 0)) {
2384 /* sanity check the dirty range */
2385 if (NBOFF(bp
) + bp
->nb_dirtyend
> (off_t
) np
->n_size
) {
2386 bp
->nb_dirtyend
= np
->n_size
- NBOFF(bp
);
2387 if (bp
->nb_dirtyoff
>= bp
->nb_dirtyend
)
2388 bp
->nb_dirtyoff
= bp
->nb_dirtyend
= 0;
2391 if (!error
&& (bp
->nb_dirtyend
> 0)) {
2392 /* there's a dirty range that needs to be written out */
2395 doff
= bp
->nb_dirtyoff
;
2396 dend
= bp
->nb_dirtyend
;
2398 /* if doff page is dirty, move doff to start of page */
2399 if (NBPGDIRTY(bp
, doff
/ PAGE_SIZE
))
2400 doff
-= doff
& PAGE_MASK
;
2401 /* try to expand write range to include preceding dirty pages */
2402 if (!(doff
& PAGE_MASK
))
2403 while ((doff
> 0) && NBPGDIRTY(bp
, (doff
- 1) / PAGE_SIZE
))
2405 /* if dend page is dirty, move dend to start of next page */
2406 if ((dend
& PAGE_MASK
) && NBPGDIRTY(bp
, dend
/ PAGE_SIZE
))
2407 dend
= round_page_32(dend
);
2408 /* try to expand write range to include trailing dirty pages */
2409 if (!(dend
& PAGE_MASK
))
2410 while ((dend
< (int)bp
->nb_bufsize
) && NBPGDIRTY(bp
, dend
/ PAGE_SIZE
))
2412 /* make sure to keep dend clipped to EOF */
2413 if ((NBOFF(bp
) + dend
) > (off_t
) np
->n_size
)
2414 dend
= np
->n_size
- NBOFF(bp
);
2415 /* calculate range of complete pages being written */
2416 firstpg
= round_page_32(doff
) / PAGE_SIZE
;
2417 lastpg
= (trunc_page_32(dend
) - 1) / PAGE_SIZE
;
2418 /* calculate mask for that page range */
2419 pagemask
= ((1 << (lastpg
+ 1)) - 1) & ~((1 << firstpg
) - 1);
2422 * compare page mask to nb_dirty; if there are other dirty pages
2423 * then write FILESYNC; otherwise, write UNSTABLE if async and
2424 * not needcommit/stable; otherwise write FILESYNC
2426 if (bp
->nb_dirty
& ~pagemask
)
2427 iomode
= NFS_WRITE_FILESYNC
;
2428 else if ((bp
->nb_flags
& (NB_ASYNC
| NB_NEEDCOMMIT
| NB_STABLE
)) == NB_ASYNC
)
2429 iomode
= NFS_WRITE_UNSTABLE
;
2431 iomode
= NFS_WRITE_FILESYNC
;
2433 /* write the whole contiguous dirty range */
2434 bp
->nb_offio
= doff
;
2435 bp
->nb_endio
= dend
;
2437 OSAddAtomic64(1, &nfsstats
.write_bios
);
2439 SET(bp
->nb_flags
, NB_WRITEINPROG
);
2440 error
= nfs_buf_write_rpc(bp
, iomode
, thd
, cred
);
2442 * For async I/O, the callbacks will finish up the
2443 * write and push out any dirty pages. Otherwise,
2444 * the write has already been finished and any dirty
2448 if (!error
&& bp
->nb_dirty
) /* write out any dirty pages */
2449 error
= nfs_buf_write_dirty_pages(bp
, thd
, cred
);
2452 /* note: bp is still valid only for !async case */
2455 error
= nfs_buf_iowait(bp
);
2456 /* move to clean list */
2457 if (oldflags
& NB_DELWRI
) {
2458 lck_mtx_lock(nfs_buf_mutex
);
2459 if (bp
->nb_vnbufs
.le_next
!= NFSNOLIST
)
2460 LIST_REMOVE(bp
, nb_vnbufs
);
2461 LIST_INSERT_HEAD(&np
->n_cleanblkhd
, bp
, nb_vnbufs
);
2462 lck_mtx_unlock(nfs_buf_mutex
);
2464 FSDBG_BOT(553, bp
, NBOFF(bp
), bp
->nb_flags
, error
);
2465 nfs_buf_release(bp
, 1);
2466 /* check if we need to invalidate (and we can) */
2467 if ((np
->n_flag
& NNEEDINVALIDATE
) &&
2468 !(np
->n_bflag
& (NBINVALINPROG
|NBFLUSHINPROG
))) {
2470 nfs_node_lock_force(np
);
2471 if (np
->n_flag
& NNEEDINVALIDATE
) {
2473 np
->n_flag
&= ~NNEEDINVALIDATE
;
2475 nfs_node_unlock(np
);
2478 * There was a write error and we need to
2479 * invalidate attrs and flush buffers in
2480 * order to sync up with the server.
2481 * (if this write was extending the file,
2482 * we may no longer know the correct size)
2484 * But we couldn't call vinvalbuf while holding
2485 * the buffer busy. So we call vinvalbuf() after
2486 * releasing the buffer.
2488 nfs_vinvalbuf2(NFSTOV(np
), V_SAVE
|V_IGNORE_WRITEERR
, thd
, cred
, 1);
2493 if (IS_VALID_CRED(cred
))
2494 kauth_cred_unref(&cred
);
2499 * finish the writing of a buffer
2502 nfs_buf_write_finish(struct nfsbuf
*bp
, thread_t thd
, kauth_cred_t cred
)
2504 nfsnode_t np
= bp
->nb_np
;
2505 int error
= (bp
->nb_flags
& NB_ERROR
) ? bp
->nb_error
: 0;
2506 int firstpg
, lastpg
;
2509 if ((error
== EINTR
) || (error
== ERESTART
)) {
2510 CLR(bp
->nb_flags
, NB_ERROR
);
2511 SET(bp
->nb_flags
, NB_EINTR
);
2515 /* calculate range of complete pages being written */
2516 firstpg
= round_page_32(bp
->nb_offio
) / PAGE_SIZE
;
2517 lastpg
= (trunc_page_32(bp
->nb_endio
) - 1) / PAGE_SIZE
;
2518 /* calculate mask for that page range written */
2519 pagemask
= ((1 << (lastpg
+ 1)) - 1) & ~((1 << firstpg
) - 1);
2520 /* clear dirty bits for pages we've written */
2521 bp
->nb_dirty
&= ~pagemask
;
2524 /* manage needcommit state */
2525 if (!error
&& (bp
->nb_commitlevel
== NFS_WRITE_UNSTABLE
)) {
2526 if (!ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
)) {
2527 nfs_node_lock_force(np
);
2528 np
->n_needcommitcnt
++;
2529 nfs_node_unlock(np
);
2530 SET(bp
->nb_flags
, NB_NEEDCOMMIT
);
2532 /* make sure nb_dirtyoff/nb_dirtyend reflect actual range written */
2533 bp
->nb_dirtyoff
= bp
->nb_offio
;
2534 bp
->nb_dirtyend
= bp
->nb_endio
;
2535 } else if (ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
)) {
2536 nfs_node_lock_force(np
);
2537 np
->n_needcommitcnt
--;
2538 CHECK_NEEDCOMMITCNT(np
);
2539 nfs_node_unlock(np
);
2540 CLR(bp
->nb_flags
, NB_NEEDCOMMIT
);
2543 CLR(bp
->nb_flags
, NB_WRITEINPROG
);
2546 * For an unstable write, the buffer is still treated as dirty until
2547 * a commit (or stable (re)write) is performed. Buffers needing only
2548 * a commit are marked with the NB_DELWRI and NB_NEEDCOMMIT flags.
2550 * If the write was interrupted we set NB_EINTR. Don't set NB_ERROR
2551 * because that would cause the buffer to be dropped. The buffer is
2552 * still valid and simply needs to be written again.
2554 if ((error
== EINTR
) || (error
== ERESTART
) || (!error
&& (bp
->nb_flags
& NB_NEEDCOMMIT
))) {
2555 CLR(bp
->nb_flags
, NB_INVAL
);
2556 if (!ISSET(bp
->nb_flags
, NB_DELWRI
)) {
2557 SET(bp
->nb_flags
, NB_DELWRI
);
2558 lck_mtx_lock(nfs_buf_mutex
);
2561 lck_mtx_unlock(nfs_buf_mutex
);
2564 * Since for the NB_ASYNC case, we've reassigned the buffer to the
2565 * clean list, we have to reassign it back to the dirty one. Ugh.
2567 if (ISSET(bp
->nb_flags
, NB_ASYNC
)) {
2568 /* move to dirty list */
2569 lck_mtx_lock(nfs_buf_mutex
);
2570 if (bp
->nb_vnbufs
.le_next
!= NFSNOLIST
)
2571 LIST_REMOVE(bp
, nb_vnbufs
);
2572 LIST_INSERT_HEAD(&np
->n_dirtyblkhd
, bp
, nb_vnbufs
);
2573 lck_mtx_unlock(nfs_buf_mutex
);
2576 /* either there's an error or we don't need to commit */
2579 * There was a write error and we need to invalidate
2580 * attrs and flush buffers in order to sync up with the
2581 * server. (if this write was extending the file, we
2582 * may no longer know the correct size)
2584 * But we can't call vinvalbuf while holding this
2585 * buffer busy. Set a flag to do it after releasing
2588 nfs_node_lock_force(np
);
2589 np
->n_error
= error
;
2590 np
->n_flag
|= (NWRITEERR
| NNEEDINVALIDATE
);
2591 NATTRINVALIDATE(np
);
2592 nfs_node_unlock(np
);
2594 /* clear the dirty range */
2595 bp
->nb_dirtyoff
= bp
->nb_dirtyend
= 0;
2598 if (!error
&& bp
->nb_dirty
)
2599 nfs_buf_write_dirty_pages(bp
, thd
, cred
);
2604 * write out any pages marked dirty in a buffer
2606 * We do use unstable writes and follow up with a commit.
2607 * If we catch the write verifier changing we'll restart
2608 * do the writes filesync.
2611 nfs_buf_write_dirty_pages(struct nfsbuf
*bp
, thread_t thd
, kauth_cred_t cred
)
2613 nfsnode_t np
= bp
->nb_np
;
2614 struct nfsmount
*nmp
= NFSTONMP(np
);
2615 int error
= 0, commit
, iomode
, iomode2
, len
, pg
, count
, npages
, off
;
2616 uint32_t dirty
= bp
->nb_dirty
;
2619 char uio_buf
[ UIO_SIZEOF(1) ];
2624 /* there are pages marked dirty that need to be written out */
2625 OSAddAtomic64(1, &nfsstats
.write_bios
);
2627 SET(bp
->nb_flags
, NB_WRITEINPROG
);
2628 npages
= bp
->nb_bufsize
/ PAGE_SIZE
;
2629 iomode
= NFS_WRITE_UNSTABLE
;
2631 auio
= uio_createwithbuffer(1, 0, UIO_SYSSPACE
, UIO_WRITE
,
2632 &uio_buf
, sizeof(uio_buf
));
2635 dirty
= bp
->nb_dirty
;
2636 wverf
= bp
->nb_verf
;
2637 commit
= NFS_WRITE_FILESYNC
;
2638 for (pg
= 0; pg
< npages
; pg
++) {
2639 if (!NBPGDIRTY(bp
, pg
))
2642 while (((pg
+ count
) < npages
) && NBPGDIRTY(bp
, pg
+ count
))
2644 /* write count pages starting with page pg */
2645 off
= pg
* PAGE_SIZE
;
2646 len
= count
* PAGE_SIZE
;
2647 /* clip writes to EOF */
2648 if (NBOFF(bp
) + off
+ len
> (off_t
) np
->n_size
)
2649 len
-= (NBOFF(bp
) + off
+ len
) - np
->n_size
;
2652 uio_reset(auio
, NBOFF(bp
) + off
, UIO_SYSSPACE
, UIO_WRITE
);
2653 uio_addiov(auio
, CAST_USER_ADDR_T(bp
->nb_data
+ off
), len
);
2654 error
= nfs_write_rpc2(np
, auio
, thd
, cred
, &iomode2
, &bp
->nb_verf
);
2657 if (iomode2
< commit
) /* Retain the lowest commitment level returned. */
2659 if ((commit
!= NFS_WRITE_FILESYNC
) && (wverf
!= bp
->nb_verf
)) {
2660 /* verifier changed, redo all the writes filesync */
2661 iomode
= NFS_WRITE_FILESYNC
;
2665 /* clear dirty bits */
2667 dirty
&= ~(1 << pg
);
2668 if (count
) /* leave pg on last page */
2672 CLR(bp
->nb_flags
, NB_WRITEINPROG
);
2674 if (!error
&& (commit
!= NFS_WRITE_FILESYNC
)) {
2675 error
= nmp
->nm_funcs
->nf_commit_rpc(np
, NBOFF(bp
), bp
->nb_bufsize
, cred
, wverf
);
2676 if (error
== NFSERR_STALEWRITEVERF
) {
2677 /* verifier changed, so we need to restart all the writes */
2678 iomode
= NFS_WRITE_FILESYNC
;
2683 bp
->nb_dirty
= dirty
;
2685 SET(bp
->nb_flags
, NB_ERROR
);
2686 bp
->nb_error
= error
;
2692 * initiate the NFS WRITE RPC(s) for a buffer
2695 nfs_buf_write_rpc(struct nfsbuf
*bp
, int iomode
, thread_t thd
, kauth_cred_t cred
)
2697 struct nfsmount
*nmp
;
2698 nfsnode_t np
= bp
->nb_np
;
2699 int error
= 0, nfsvers
, async
;
2701 uint32_t nmwsize
, length
, len
;
2703 struct nfsreq_cbinfo cb
;
2705 char uio_buf
[ UIO_SIZEOF(1) ];
2708 if (nfs_mount_gone(nmp
)) {
2709 bp
->nb_error
= error
= ENXIO
;
2710 SET(bp
->nb_flags
, NB_ERROR
);
2714 nfsvers
= nmp
->nm_vers
;
2715 nmwsize
= nmp
->nm_wsize
;
2717 offset
= bp
->nb_offio
;
2718 length
= bp
->nb_endio
- bp
->nb_offio
;
2720 /* Note: Can only do async I/O if nfsiods are configured. */
2721 async
= (bp
->nb_flags
& NB_ASYNC
) && (NFSIOD_MAX
> 0);
2722 bp
->nb_commitlevel
= NFS_WRITE_FILESYNC
;
2723 cb
.rcb_func
= async
? nfs_buf_write_rpc_finish
: NULL
;
2726 if ((nfsvers
== NFS_VER2
) && ((NBOFF(bp
) + bp
->nb_endio
) > 0xffffffffLL
)) {
2727 bp
->nb_error
= error
= EFBIG
;
2728 SET(bp
->nb_flags
, NB_ERROR
);
2733 auio
= uio_createwithbuffer(1, NBOFF(bp
) + offset
, UIO_SYSSPACE
,
2734 UIO_WRITE
, &uio_buf
, sizeof(uio_buf
));
2735 uio_addiov(auio
, CAST_USER_ADDR_T(bp
->nb_data
+ offset
), length
);
2737 bp
->nb_rpcs
= nrpcs
= (length
+ nmwsize
- 1) / nmwsize
;
2738 if (async
&& (nrpcs
> 1)) {
2739 SET(bp
->nb_flags
, NB_MULTASYNCRPC
);
2741 CLR(bp
->nb_flags
, NB_MULTASYNCRPC
);
2744 while (length
> 0) {
2745 if (ISSET(bp
->nb_flags
, NB_ERROR
)) {
2746 error
= bp
->nb_error
;
2749 len
= (length
> nmwsize
) ? nmwsize
: length
;
2750 cb
.rcb_args
[0] = offset
;
2751 cb
.rcb_args
[1] = len
;
2752 if (nmp
->nm_vers
>= NFS_VER4
)
2753 cb
.rcb_args
[2] = nmp
->nm_stategenid
;
2754 if (async
&& ((error
= nfs_async_write_start(nmp
))))
2757 error
= nmp
->nm_funcs
->nf_write_rpc_async(np
, auio
, len
, thd
, cred
,
2761 nfs_async_write_done(nmp
);
2768 nfs_buf_write_rpc_finish(req
);
2773 * Something bad happened while trying to send the RPCs.
2774 * Wait for any outstanding requests to complete.
2776 bp
->nb_error
= error
;
2777 SET(bp
->nb_flags
, NB_ERROR
);
2778 if (ISSET(bp
->nb_flags
, NB_MULTASYNCRPC
)) {
2779 nrpcs
= (length
+ nmwsize
- 1) / nmwsize
;
2780 lck_mtx_lock(nfs_buf_mutex
);
2781 bp
->nb_rpcs
-= nrpcs
;
2782 if (bp
->nb_rpcs
== 0) {
2783 /* No RPCs left, so the buffer's done */
2784 lck_mtx_unlock(nfs_buf_mutex
);
2785 nfs_buf_write_finish(bp
, thd
, cred
);
2787 /* wait for the last RPC to mark it done */
2788 while (bp
->nb_rpcs
> 0)
2789 msleep(&bp
->nb_rpcs
, nfs_buf_mutex
, 0,
2790 "nfs_buf_write_rpc_cancel", NULL
);
2791 lck_mtx_unlock(nfs_buf_mutex
);
2794 nfs_buf_write_finish(bp
, thd
, cred
);
2796 /* It may have just been an interrupt... that's OK */
2797 if (!ISSET(bp
->nb_flags
, NB_ERROR
))
2805 * finish up an NFS WRITE RPC on a buffer
2808 nfs_buf_write_rpc_finish(struct nfsreq
*req
)
2810 int error
= 0, nfsvers
, offset
, length
, multasyncrpc
, finished
;
2811 int committed
= NFS_WRITE_FILESYNC
;
2814 void *wakeme
= NULL
;
2815 struct nfsreq_cbinfo cb
;
2816 struct nfsreq
*wreq
= NULL
;
2818 struct nfsmount
*nmp
;
2823 char uio_buf
[ UIO_SIZEOF(1) ];
2827 thd
= req
->r_thread
;
2829 if (IS_VALID_CRED(cred
))
2830 kauth_cred_ref(cred
);
2831 cb
= req
->r_callback
;
2833 if (cb
.rcb_func
) /* take an extra reference on the nfsreq in case we want to resend it later due to grace error */
2834 nfs_request_ref(req
, 0);
2837 if (nfs_mount_gone(nmp
)) {
2838 SET(bp
->nb_flags
, NB_ERROR
);
2839 bp
->nb_error
= error
= ENXIO
;
2841 if (error
|| ISSET(bp
->nb_flags
, NB_ERROR
)) {
2843 nfs_request_async_cancel(req
);
2846 nfsvers
= nmp
->nm_vers
;
2848 offset
= cb
.rcb_args
[0];
2849 rlen
= length
= cb
.rcb_args
[1];
2851 /* finish the RPC */
2852 error
= nmp
->nm_funcs
->nf_write_rpc_async_finish(np
, req
, &committed
, &rlen
, &wverf
);
2853 if ((error
== EINPROGRESS
) && cb
.rcb_func
) {
2854 /* async request restarted */
2856 nfs_request_rele(req
);
2857 if (IS_VALID_CRED(cred
))
2858 kauth_cred_unref(&cred
);
2861 if ((nmp
->nm_vers
>= NFS_VER4
) && nfs_mount_state_error_should_restart(error
) && !ISSET(bp
->nb_flags
, NB_ERROR
)) {
2862 lck_mtx_lock(&nmp
->nm_lock
);
2863 if ((error
!= NFSERR_OLD_STATEID
) && (error
!= NFSERR_GRACE
) && (cb
.rcb_args
[2] == nmp
->nm_stategenid
)) {
2864 NP(np
, "nfs_buf_write_rpc_finish: error %d @ 0x%llx, 0x%x 0x%x, initiating recovery",
2865 error
, NBOFF(bp
)+offset
, cb
.rcb_args
[2], nmp
->nm_stategenid
);
2866 nfs_need_recover(nmp
, error
);
2868 lck_mtx_unlock(&nmp
->nm_lock
);
2869 if (np
->n_flag
& NREVOKE
) {
2872 if (error
== NFSERR_GRACE
) {
2875 * For an async I/O request, handle a grace delay just like
2876 * jukebox errors. Set the resend time and queue it up.
2879 if (req
->r_nmrep
.nmc_mhead
) {
2880 mbuf_freem(req
->r_nmrep
.nmc_mhead
);
2881 req
->r_nmrep
.nmc_mhead
= NULL
;
2885 lck_mtx_lock(&req
->r_mtx
);
2886 req
->r_resendtime
= now
.tv_sec
+ 2;
2887 req
->r_xid
= 0; // get a new XID
2888 req
->r_flags
|= R_RESTART
;
2890 nfs_asyncio_resend(req
);
2891 lck_mtx_unlock(&req
->r_mtx
);
2892 if (IS_VALID_CRED(cred
))
2893 kauth_cred_unref(&cred
);
2894 /* Note: nfsreq reference taken will be dropped later when finished */
2897 /* otherwise, just pause a couple seconds and retry */
2898 tsleep(&nmp
->nm_state
, (PZERO
-1), "nfsgrace", 2*hz
);
2900 if (!(error
= nfs_mount_state_wait_for_recovery(nmp
))) {
2907 SET(bp
->nb_flags
, NB_ERROR
);
2908 bp
->nb_error
= error
;
2910 if (error
|| (nfsvers
== NFS_VER2
))
2913 SET(bp
->nb_flags
, NB_ERROR
);
2914 bp
->nb_error
= error
= EIO
;
2918 /* save lowest commit level returned */
2919 if (committed
< bp
->nb_commitlevel
)
2920 bp
->nb_commitlevel
= committed
;
2922 /* check the write verifier */
2924 bp
->nb_verf
= wverf
;
2925 } else if (bp
->nb_verf
!= wverf
) {
2926 /* verifier changed, so buffer will need to be rewritten */
2927 bp
->nb_flags
|= NB_STALEWVERF
;
2928 bp
->nb_commitlevel
= NFS_WRITE_UNSTABLE
;
2929 bp
->nb_verf
= wverf
;
2933 * check for a short write
2935 * If the server didn't write all the data, then we
2936 * need to issue another write for the rest of it.
2937 * (Don't bother if the buffer hit an error or stale wverf.)
2939 if (((int)rlen
< length
) && !(bp
->nb_flags
& (NB_STALEWVERF
|NB_ERROR
))) {
2944 auio
= uio_createwithbuffer(1, NBOFF(bp
) + offset
, UIO_SYSSPACE
,
2945 UIO_WRITE
, &uio_buf
, sizeof(uio_buf
));
2946 uio_addiov(auio
, CAST_USER_ADDR_T(bp
->nb_data
+ offset
), length
);
2948 cb
.rcb_args
[0] = offset
;
2949 cb
.rcb_args
[1] = length
;
2950 if (nmp
->nm_vers
>= NFS_VER4
)
2951 cb
.rcb_args
[2] = nmp
->nm_stategenid
;
2953 // XXX iomode should really match the original request
2954 error
= nmp
->nm_funcs
->nf_write_rpc_async(np
, auio
, length
, thd
, cred
,
2955 NFS_WRITE_FILESYNC
, &cb
, &wreq
);
2957 if (IS_VALID_CRED(cred
))
2958 kauth_cred_unref(&cred
);
2960 /* if !async we'll need to wait for this RPC to finish */
2965 nfs_request_rele(req
);
2968 * Outstanding RPC count is unchanged.
2969 * Callback will be called when RPC is done.
2973 SET(bp
->nb_flags
, NB_ERROR
);
2974 bp
->nb_error
= error
;
2979 nfs_async_write_done(nmp
);
2980 nfs_request_rele(req
);
2983 * Decrement outstanding RPC count on buffer
2984 * and call nfs_buf_write_finish on last RPC.
2986 * (Note: when there are multiple async RPCs issued for a
2987 * buffer we need nfs_buffer_mutex to avoid problems when
2988 * aborting a partially-initiated set of RPCs)
2990 multasyncrpc
= ISSET(bp
->nb_flags
, NB_MULTASYNCRPC
);
2992 lck_mtx_lock(nfs_buf_mutex
);
2995 finished
= (bp
->nb_rpcs
== 0);
2998 lck_mtx_unlock(nfs_buf_mutex
);
3002 wakeme
= &bp
->nb_rpcs
;
3003 nfs_buf_write_finish(bp
, thd
, cred
);
3008 if (IS_VALID_CRED(cred
))
3009 kauth_cred_unref(&cred
);
3013 * Send commit(s) for the given node's "needcommit" buffers
3016 nfs_flushcommits(nfsnode_t np
, int nowait
)
3018 struct nfsmount
*nmp
;
3019 struct nfsbuf
*bp
, *prevlbp
, *lbp
;
3020 struct nfsbuflists blist
, commitlist
;
3021 int error
= 0, retv
, wcred_set
, flags
, dirty
;
3022 u_quad_t off
, endoff
, toff
;
3025 kauth_cred_t wcred
= NULL
;
3027 FSDBG_TOP(557, np
, 0, 0, 0);
3030 * A nb_flags == (NB_DELWRI | NB_NEEDCOMMIT) block has been written to the
3031 * server, but nas not been committed to stable storage on the server
3032 * yet. The byte range is worked out for as many nfsbufs as we can handle
3033 * and the commit rpc is done.
3035 if (!LIST_EMPTY(&np
->n_dirtyblkhd
)) {
3036 error
= nfs_node_lock(np
);
3039 np
->n_flag
|= NMODIFIED
;
3040 nfs_node_unlock(np
);
3046 LIST_INIT(&commitlist
);
3049 if (nfs_mount_gone(nmp
)) {
3053 if (nmp
->nm_vers
== NFS_VER2
) {
3060 flags
|= NBI_NOWAIT
;
3061 lck_mtx_lock(nfs_buf_mutex
);
3062 wverf
= nmp
->nm_verf
;
3063 if (!nfs_buf_iterprepare(np
, &blist
, flags
)) {
3064 while ((bp
= LIST_FIRST(&blist
))) {
3065 LIST_REMOVE(bp
, nb_vnbufs
);
3066 LIST_INSERT_HEAD(&np
->n_dirtyblkhd
, bp
, nb_vnbufs
);
3067 error
= nfs_buf_acquire(bp
, NBAC_NOWAIT
, 0, 0);
3070 if (ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
))
3071 nfs_buf_check_write_verifier(np
, bp
);
3072 if (((bp
->nb_flags
& (NB_DELWRI
| NB_NEEDCOMMIT
)) != (NB_DELWRI
| NB_NEEDCOMMIT
)) ||
3073 (bp
->nb_verf
!= wverf
)) {
3077 nfs_buf_remfree(bp
);
3079 /* buffer UPLs will be grabbed *in order* below */
3081 FSDBG(557, bp
, bp
->nb_flags
, bp
->nb_valid
, bp
->nb_dirty
);
3082 FSDBG(557, bp
->nb_validoff
, bp
->nb_validend
,
3083 bp
->nb_dirtyoff
, bp
->nb_dirtyend
);
3086 * Work out if all buffers are using the same cred
3087 * so we can deal with them all with one commit.
3089 * Note: creds in bp's must be obtained by kauth_cred_ref
3090 * on the same original cred in order for them to be equal.
3092 if (wcred_set
== 0) {
3093 wcred
= bp
->nb_wcred
;
3094 if (!IS_VALID_CRED(wcred
))
3095 panic("nfs: needcommit w/out wcred");
3097 } else if ((wcred_set
== 1) && wcred
!= bp
->nb_wcred
) {
3100 SET(bp
->nb_flags
, NB_WRITEINPROG
);
3103 * Add this buffer to the list of buffers we are committing.
3104 * Buffers are inserted into the list in ascending order so that
3105 * we can take the UPLs in order after the list is complete.
3108 LIST_FOREACH(lbp
, &commitlist
, nb_vnbufs
) {
3109 if (bp
->nb_lblkno
< lbp
->nb_lblkno
)
3113 LIST_REMOVE(bp
, nb_vnbufs
);
3115 LIST_INSERT_AFTER(prevlbp
, bp
, nb_vnbufs
);
3117 LIST_INSERT_HEAD(&commitlist
, bp
, nb_vnbufs
);
3119 /* update commit range start, end */
3120 toff
= NBOFF(bp
) + bp
->nb_dirtyoff
;
3123 toff
+= (u_quad_t
)(bp
->nb_dirtyend
- bp
->nb_dirtyoff
);
3127 nfs_buf_itercomplete(np
, &blist
, NBI_DIRTY
);
3129 lck_mtx_unlock(nfs_buf_mutex
);
3131 if (LIST_EMPTY(&commitlist
)) {
3137 * We need a UPL to prevent others from accessing the buffers during
3138 * our commit RPC(s).
3140 * We used to also check for dirty pages here; if there were any we'd
3141 * abort the commit and force the entire buffer to be written again.
3142 * Instead of doing that, we just go ahead and commit the dirty range,
3143 * and then leave the buffer around with dirty pages that will be
3144 * written out later.
3146 LIST_FOREACH(bp
, &commitlist
, nb_vnbufs
) {
3147 if (!ISSET(bp
->nb_flags
, NB_PAGELIST
)) {
3148 retv
= nfs_buf_upl_setup(bp
);
3150 /* Unable to create the UPL, the VM object probably no longer exists. */
3151 printf("nfs_flushcommits: upl create failed %d\n", retv
);
3152 bp
->nb_valid
= bp
->nb_dirty
= 0;
3155 nfs_buf_upl_check(bp
);
3159 * Commit data on the server, as required.
3160 * If all bufs are using the same wcred, then use that with
3161 * one call for all of them, otherwise commit each one
3164 if (wcred_set
== 1) {
3166 * Note, it's possible the commit range could be >2^32-1.
3167 * If it is, we'll send one commit that covers the whole file.
3169 if ((endoff
- off
) > 0xffffffff)
3172 count
= (endoff
- off
);
3173 retv
= nmp
->nm_funcs
->nf_commit_rpc(np
, off
, count
, wcred
, wverf
);
3176 LIST_FOREACH(bp
, &commitlist
, nb_vnbufs
) {
3177 toff
= NBOFF(bp
) + bp
->nb_dirtyoff
;
3178 count
= bp
->nb_dirtyend
- bp
->nb_dirtyoff
;
3179 retv
= nmp
->nm_funcs
->nf_commit_rpc(np
, toff
, count
, bp
->nb_wcred
, wverf
);
3186 * Now, either mark the blocks I/O done or mark the
3187 * blocks dirty, depending on whether the commit
3190 while ((bp
= LIST_FIRST(&commitlist
))) {
3191 LIST_REMOVE(bp
, nb_vnbufs
);
3192 FSDBG(557, bp
, retv
, bp
->nb_flags
, bp
->nb_dirty
);
3193 nfs_node_lock_force(np
);
3194 CLR(bp
->nb_flags
, (NB_NEEDCOMMIT
| NB_WRITEINPROG
));
3195 np
->n_needcommitcnt
--;
3196 CHECK_NEEDCOMMITCNT(np
);
3197 nfs_node_unlock(np
);
3200 /* move back to dirty list */
3201 lck_mtx_lock(nfs_buf_mutex
);
3202 LIST_INSERT_HEAD(&np
->n_dirtyblkhd
, bp
, nb_vnbufs
);
3203 lck_mtx_unlock(nfs_buf_mutex
);
3204 nfs_buf_release(bp
, 1);
3208 nfs_node_lock_force(np
);
3210 nfs_node_unlock(np
);
3211 vnode_startwrite(NFSTOV(np
));
3212 if (ISSET(bp
->nb_flags
, NB_DELWRI
)) {
3213 lck_mtx_lock(nfs_buf_mutex
);
3216 lck_mtx_unlock(nfs_buf_mutex
);
3217 wakeup(&nfs_nbdwrite
);
3219 CLR(bp
->nb_flags
, (NB_READ
|NB_DONE
|NB_ERROR
|NB_DELWRI
));
3220 /* if block still has dirty pages, we don't want it to */
3221 /* be released in nfs_buf_iodone(). So, don't set NB_ASYNC. */
3222 if (!(dirty
= bp
->nb_dirty
))
3223 SET(bp
->nb_flags
, NB_ASYNC
);
3225 CLR(bp
->nb_flags
, NB_ASYNC
);
3227 /* move to clean list */
3228 lck_mtx_lock(nfs_buf_mutex
);
3229 LIST_INSERT_HEAD(&np
->n_cleanblkhd
, bp
, nb_vnbufs
);
3230 lck_mtx_unlock(nfs_buf_mutex
);
3232 bp
->nb_dirtyoff
= bp
->nb_dirtyend
= 0;
3236 /* throw it back in as a delayed write buffer */
3237 CLR(bp
->nb_flags
, NB_DONE
);
3238 nfs_buf_write_delayed(bp
);
3243 FSDBG_BOT(557, np
, 0, 0, error
);
3248 * Flush all the blocks associated with a vnode.
3249 * Walk through the buffer pool and push any dirty pages
3250 * associated with the vnode.
3253 nfs_flush(nfsnode_t np
, int waitfor
, thread_t thd
, int ignore_writeerr
)
3256 struct nfsbuflists blist
;
3257 struct nfsmount
*nmp
= NFSTONMP(np
);
3258 int error
= 0, error2
, slptimeo
= 0, slpflag
= 0;
3259 int nfsvers
, flags
, passone
= 1;
3261 FSDBG_TOP(517, np
, waitfor
, ignore_writeerr
, 0);
3263 if (nfs_mount_gone(nmp
)) {
3267 nfsvers
= nmp
->nm_vers
;
3268 if (NMFLAG(nmp
, INTR
))
3271 if (!LIST_EMPTY(&np
->n_dirtyblkhd
)) {
3272 nfs_node_lock_force(np
);
3273 np
->n_flag
|= NMODIFIED
;
3274 nfs_node_unlock(np
);
3277 lck_mtx_lock(nfs_buf_mutex
);
3278 while (np
->n_bflag
& NBFLUSHINPROG
) {
3279 np
->n_bflag
|= NBFLUSHWANT
;
3280 error
= msleep(&np
->n_bflag
, nfs_buf_mutex
, slpflag
, "nfs_flush", NULL
);
3281 if ((error
&& (error
!= EWOULDBLOCK
)) ||
3282 ((error
= nfs_sigintr(NFSTONMP(np
), NULL
, thd
, 0)))) {
3283 lck_mtx_unlock(nfs_buf_mutex
);
3287 np
->n_bflag
|= NBFLUSHINPROG
;
3290 * On the first pass, start async/unstable writes on all
3291 * delayed write buffers. Then wait for all writes to complete
3292 * and call nfs_flushcommits() to commit any uncommitted buffers.
3293 * On all subsequent passes, start STABLE writes on any remaining
3294 * dirty buffers. Then wait for all writes to complete.
3297 FSDBG(518, LIST_FIRST(&np
->n_dirtyblkhd
), np
->n_flag
, 0, 0);
3298 if (!NFSTONMP(np
)) {
3299 lck_mtx_unlock(nfs_buf_mutex
);
3304 /* Start/do any write(s) that are required. */
3305 if (!nfs_buf_iterprepare(np
, &blist
, NBI_DIRTY
)) {
3306 while ((bp
= LIST_FIRST(&blist
))) {
3307 LIST_REMOVE(bp
, nb_vnbufs
);
3308 LIST_INSERT_HEAD(&np
->n_dirtyblkhd
, bp
, nb_vnbufs
);
3309 flags
= (passone
|| !(waitfor
== MNT_WAIT
|| waitfor
== MNT_DWAIT
)) ? NBAC_NOWAIT
: 0;
3310 if (flags
!= NBAC_NOWAIT
)
3312 while ((error
= nfs_buf_acquire(bp
, flags
, slpflag
, slptimeo
))) {
3313 FSDBG(524, bp
, flags
, bp
->nb_lflags
, bp
->nb_flags
);
3317 error2
= nfs_sigintr(NFSTONMP(np
), NULL
, thd
, 0);
3319 if (flags
!= NBAC_NOWAIT
)
3320 nfs_buf_refrele(bp
);
3321 nfs_buf_itercomplete(np
, &blist
, NBI_DIRTY
);
3322 lck_mtx_unlock(nfs_buf_mutex
);
3326 if (slpflag
== PCATCH
) {
3332 if (flags
!= NBAC_NOWAIT
)
3333 nfs_buf_refrele(bp
);
3337 /* buffer is no longer valid */
3341 if (ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
))
3342 nfs_buf_check_write_verifier(np
, bp
);
3343 if (!ISSET(bp
->nb_flags
, NB_DELWRI
)) {
3344 /* buffer is no longer dirty */
3348 FSDBG(525, bp
, passone
, bp
->nb_lflags
, bp
->nb_flags
);
3349 if ((passone
|| !(waitfor
== MNT_WAIT
|| waitfor
== MNT_DWAIT
)) &&
3350 ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
)) {
3354 nfs_buf_remfree(bp
);
3355 lck_mtx_unlock(nfs_buf_mutex
);
3356 if (ISSET(bp
->nb_flags
, NB_ERROR
)) {
3357 nfs_node_lock_force(np
);
3358 np
->n_error
= bp
->nb_error
? bp
->nb_error
: EIO
;
3359 np
->n_flag
|= NWRITEERR
;
3360 nfs_node_unlock(np
);
3361 nfs_buf_release(bp
, 1);
3362 lck_mtx_lock(nfs_buf_mutex
);
3365 SET(bp
->nb_flags
, NB_ASYNC
);
3367 /* NB_STABLE forces this to be written FILESYNC */
3368 SET(bp
->nb_flags
, NB_STABLE
);
3371 lck_mtx_lock(nfs_buf_mutex
);
3373 nfs_buf_itercomplete(np
, &blist
, NBI_DIRTY
);
3375 lck_mtx_unlock(nfs_buf_mutex
);
3377 if (waitfor
== MNT_WAIT
|| waitfor
== MNT_DWAIT
) {
3378 while ((error
= vnode_waitforwrites(NFSTOV(np
), 0, slpflag
, slptimeo
, "nfsflush"))) {
3379 error2
= nfs_sigintr(NFSTONMP(np
), NULL
, thd
, 0);
3384 if (slpflag
== PCATCH
) {
3391 if (nfsvers
!= NFS_VER2
) {
3392 /* loop while it looks like there are still buffers to be */
3393 /* commited and nfs_flushcommits() seems to be handling them. */
3394 while (np
->n_needcommitcnt
)
3395 if (nfs_flushcommits(np
, 0))
3401 if (!LIST_EMPTY(&np
->n_dirtyblkhd
)) {
3402 nfs_node_lock_force(np
);
3403 np
->n_flag
|= NMODIFIED
;
3404 nfs_node_unlock(np
);
3406 lck_mtx_lock(nfs_buf_mutex
);
3410 if (waitfor
== MNT_WAIT
|| waitfor
== MNT_DWAIT
) {
3411 if (!LIST_EMPTY(&np
->n_dirtyblkhd
)) {
3412 nfs_node_lock_force(np
);
3413 np
->n_flag
|= NMODIFIED
;
3414 nfs_node_unlock(np
);
3416 lck_mtx_lock(nfs_buf_mutex
);
3417 if (!LIST_EMPTY(&np
->n_dirtyblkhd
))
3419 lck_mtx_unlock(nfs_buf_mutex
);
3420 nfs_node_lock_force(np
);
3422 * OK, it looks like there are no dirty blocks. If we have no
3423 * writes in flight and no one in the write code, we can clear
3424 * the modified flag. In order to make sure we see the latest
3425 * attributes and size, we also invalidate the attributes and
3426 * advance the attribute cache XID to guarantee that attributes
3427 * newer than our clearing of NMODIFIED will get loaded next.
3428 * (If we don't do this, it's possible for the flush's final
3429 * write/commit (xid1) to be executed in parallel with a subsequent
3430 * getattr request (xid2). The getattr could return attributes
3431 * from *before* the write/commit completed but the stale attributes
3432 * would be preferred because of the xid ordering.)
3434 if (!np
->n_wrbusy
&& !np
->n_numoutput
) {
3435 np
->n_flag
&= ~NMODIFIED
;
3436 NATTRINVALIDATE(np
);
3437 nfs_get_xid(&np
->n_xid
);
3440 nfs_node_lock_force(np
);
3443 FSDBG(526, np
->n_flag
, np
->n_error
, 0, 0);
3444 if (!ignore_writeerr
&& (np
->n_flag
& NWRITEERR
)) {
3445 error
= np
->n_error
;
3446 np
->n_flag
&= ~NWRITEERR
;
3448 nfs_node_unlock(np
);
3450 lck_mtx_lock(nfs_buf_mutex
);
3451 flags
= np
->n_bflag
;
3452 np
->n_bflag
&= ~(NBFLUSHINPROG
|NBFLUSHWANT
);
3453 lck_mtx_unlock(nfs_buf_mutex
);
3454 if (flags
& NBFLUSHWANT
)
3455 wakeup(&np
->n_bflag
);
3457 FSDBG_BOT(517, np
, error
, ignore_writeerr
, 0);
3462 * Flush out and invalidate all buffers associated with a vnode.
3463 * Called with the underlying object locked.
3466 nfs_vinvalbuf_internal(
3475 struct nfsbuflists blist
;
3476 int list
, error
= 0;
3478 if (flags
& V_SAVE
) {
3479 if ((error
= nfs_flush(np
, MNT_WAIT
, thd
, (flags
& V_IGNORE_WRITEERR
))))
3483 lck_mtx_lock(nfs_buf_mutex
);
3486 if (nfs_buf_iterprepare(np
, &blist
, list
)) {
3488 if (nfs_buf_iterprepare(np
, &blist
, list
))
3491 while ((bp
= LIST_FIRST(&blist
))) {
3492 LIST_REMOVE(bp
, nb_vnbufs
);
3493 if (list
== NBI_CLEAN
)
3494 LIST_INSERT_HEAD(&np
->n_cleanblkhd
, bp
, nb_vnbufs
);
3496 LIST_INSERT_HEAD(&np
->n_dirtyblkhd
, bp
, nb_vnbufs
);
3498 while ((error
= nfs_buf_acquire(bp
, NBAC_REMOVE
, slpflag
, slptimeo
))) {
3499 FSDBG(556, np
, bp
, NBOFF(bp
), bp
->nb_flags
);
3500 if (error
!= EAGAIN
) {
3501 FSDBG(554, np
, bp
, -1, error
);
3502 nfs_buf_refrele(bp
);
3503 nfs_buf_itercomplete(np
, &blist
, list
);
3504 lck_mtx_unlock(nfs_buf_mutex
);
3508 nfs_buf_refrele(bp
);
3509 FSDBG(554, np
, bp
, NBOFF(bp
), bp
->nb_flags
);
3510 lck_mtx_unlock(nfs_buf_mutex
);
3511 if ((flags
& V_SAVE
) && UBCINFOEXISTS(NFSTOV(np
)) && bp
->nb_np
&&
3512 (NBOFF(bp
) < (off_t
)np
->n_size
)) {
3513 /* extra paranoia: make sure we're not */
3514 /* somehow leaving any dirty data around */
3516 int end
= (NBOFF(bp
) + bp
->nb_bufsize
> (off_t
)np
->n_size
) ?
3517 ((off_t
)np
->n_size
- NBOFF(bp
)) : bp
->nb_bufsize
;
3518 if (!ISSET(bp
->nb_flags
, NB_PAGELIST
)) {
3519 error
= nfs_buf_upl_setup(bp
);
3520 if (error
== EINVAL
) {
3521 /* vm object must no longer exist */
3522 /* hopefully we don't need to do */
3523 /* anything for this buffer */
3525 printf("nfs_vinvalbuf: upl setup failed %d\n", error
);
3526 bp
->nb_valid
= bp
->nb_dirty
= 0;
3528 nfs_buf_upl_check(bp
);
3529 /* check for any dirty data before the EOF */
3530 if ((bp
->nb_dirtyend
> 0) && (bp
->nb_dirtyoff
< end
)) {
3531 /* clip dirty range to EOF */
3532 if (bp
->nb_dirtyend
> end
) {
3533 bp
->nb_dirtyend
= end
;
3534 if (bp
->nb_dirtyoff
>= bp
->nb_dirtyend
)
3535 bp
->nb_dirtyoff
= bp
->nb_dirtyend
= 0;
3537 if ((bp
->nb_dirtyend
> 0) && (bp
->nb_dirtyoff
< end
))
3540 bp
->nb_dirty
&= (1 << (round_page_32(end
)/PAGE_SIZE
)) - 1;
3543 /* also make sure we'll have a credential to do the write */
3544 if (mustwrite
&& !IS_VALID_CRED(bp
->nb_wcred
) && !IS_VALID_CRED(cred
)) {
3545 printf("nfs_vinvalbuf: found dirty buffer with no write creds\n");
3549 FSDBG(554, np
, bp
, 0xd00dee, bp
->nb_flags
);
3550 if (!ISSET(bp
->nb_flags
, NB_PAGELIST
))
3551 panic("nfs_vinvalbuf: dirty buffer without upl");
3552 /* gotta write out dirty data before invalidating */
3553 /* (NB_STABLE indicates that data writes should be FILESYNC) */
3554 /* (NB_NOCACHE indicates buffer should be discarded) */
3555 CLR(bp
->nb_flags
, (NB_DONE
| NB_ERROR
| NB_INVAL
| NB_ASYNC
));
3556 SET(bp
->nb_flags
, NB_STABLE
| NB_NOCACHE
);
3557 if (!IS_VALID_CRED(bp
->nb_wcred
)) {
3558 kauth_cred_ref(cred
);
3559 bp
->nb_wcred
= cred
;
3561 error
= nfs_buf_write(bp
);
3562 // Note: bp has been released
3564 FSDBG(554, bp
, 0xd00dee, 0xbad, error
);
3565 nfs_node_lock_force(np
);
3566 if ((error
!= EINTR
) && (error
!= ERESTART
)) {
3567 np
->n_error
= error
;
3568 np
->n_flag
|= NWRITEERR
;
3571 * There was a write error and we need to
3572 * invalidate attrs to sync with server.
3573 * (if this write was extending the file,
3574 * we may no longer know the correct size)
3576 NATTRINVALIDATE(np
);
3577 nfs_node_unlock(np
);
3578 if ((error
== EINTR
) || (error
== ERESTART
)) {
3580 * Abort on EINTR. If we don't, we could
3581 * be stuck in this loop forever because
3582 * the buffer will continue to stay dirty.
3584 lck_mtx_lock(nfs_buf_mutex
);
3585 nfs_buf_itercomplete(np
, &blist
, list
);
3586 lck_mtx_unlock(nfs_buf_mutex
);
3591 lck_mtx_lock(nfs_buf_mutex
);
3595 SET(bp
->nb_flags
, NB_INVAL
);
3596 // hold off on FREEUPs until we're done here
3597 nfs_buf_release(bp
, 0);
3598 lck_mtx_lock(nfs_buf_mutex
);
3600 nfs_buf_itercomplete(np
, &blist
, list
);
3602 if (!LIST_EMPTY(&(np
)->n_dirtyblkhd
) || !LIST_EMPTY(&(np
)->n_cleanblkhd
))
3603 panic("nfs_vinvalbuf: flush/inval failed");
3604 lck_mtx_unlock(nfs_buf_mutex
);
3605 nfs_node_lock_force(np
);
3606 if (!(flags
& V_SAVE
))
3607 np
->n_flag
&= ~NMODIFIED
;
3608 if (vnode_vtype(NFSTOV(np
)) == VREG
)
3609 np
->n_lastrahead
= -1;
3610 nfs_node_unlock(np
);
3617 * Flush and invalidate all dirty buffers. If another process is already
3618 * doing the flush, just wait for completion.
3621 nfs_vinvalbuf(vnode_t vp
, int flags
, vfs_context_t ctx
, int intrflg
)
3623 return nfs_vinvalbuf2(vp
, flags
, vfs_context_thread(ctx
), vfs_context_ucred(ctx
), intrflg
);
3627 nfs_vinvalbuf2(vnode_t vp
, int flags
, thread_t thd
, kauth_cred_t cred
, int intrflg
)
3629 nfsnode_t np
= VTONFS(vp
);
3630 struct nfsmount
*nmp
= VTONMP(vp
);
3631 int error
, slpflag
, slptimeo
, nflags
, retry
= 0;
3632 int ubcflags
= UBC_PUSHALL
| UBC_SYNC
| UBC_INVALIDATE
;
3633 struct timespec ts
= { 2, 0 };
3636 FSDBG_TOP(554, np
, flags
, intrflg
, 0);
3639 * If the mount is gone no sense to try and write anything.
3640 * and hang trying to do IO.
3642 if (nfs_mount_gone(nmp
)) {
3644 ubcflags
&= ~UBC_PUSHALL
;
3647 if (nmp
&& !NMFLAG(nmp
, INTR
))
3657 /* First wait for any other process doing a flush to complete. */
3658 lck_mtx_lock(nfs_buf_mutex
);
3659 while (np
->n_bflag
& NBINVALINPROG
) {
3660 np
->n_bflag
|= NBINVALWANT
;
3661 msleep(&np
->n_bflag
, nfs_buf_mutex
, slpflag
, "nfs_vinvalbuf", &ts
);
3662 if ((error
= nfs_sigintr(VTONMP(vp
), NULL
, thd
, 0))) {
3663 lck_mtx_unlock(nfs_buf_mutex
);
3666 if (np
->n_bflag
& NBINVALINPROG
)
3669 np
->n_bflag
|= NBINVALINPROG
;
3670 lck_mtx_unlock(nfs_buf_mutex
);
3672 /* Now, flush as required. */
3674 error
= nfs_vinvalbuf_internal(np
, flags
, thd
, cred
, slpflag
, 0);
3676 FSDBG(554, np
, 0, 0, error
);
3677 if ((error
= nfs_sigintr(VTONMP(vp
), NULL
, thd
, 0)))
3679 error
= nfs_vinvalbuf_internal(np
, flags
, thd
, cred
, 0, slptimeo
);
3682 /* get the pages out of vm also */
3683 if (UBCINFOEXISTS(vp
) && (size
= ubc_getsize(vp
)))
3684 if ((error
= ubc_msync(vp
, 0, size
, NULL
, ubcflags
))) {
3685 if (error
== EINVAL
)
3686 panic("nfs_vinvalbuf(): ubc_msync failed!, error %d", error
);
3687 if (retry
++ < 10) { /* retry invalidating a few times */
3688 if (retry
> 1 || error
== ENXIO
)
3689 ubcflags
&= ~UBC_PUSHALL
;
3693 printf("nfs_vinvalbuf(): ubc_msync failed!, error %d\n", error
);
3696 lck_mtx_lock(nfs_buf_mutex
);
3697 nflags
= np
->n_bflag
;
3698 np
->n_bflag
&= ~(NBINVALINPROG
|NBINVALWANT
);
3699 lck_mtx_unlock(nfs_buf_mutex
);
3700 if (nflags
& NBINVALWANT
)
3701 wakeup(&np
->n_bflag
);
3703 FSDBG_BOT(554, np
, flags
, intrflg
, error
);
3708 * Wait for any busy buffers to complete.
3711 nfs_wait_bufs(nfsnode_t np
)
3714 struct nfsbuflists blist
;
3717 lck_mtx_lock(nfs_buf_mutex
);
3718 if (!nfs_buf_iterprepare(np
, &blist
, NBI_CLEAN
)) {
3719 while ((bp
= LIST_FIRST(&blist
))) {
3720 LIST_REMOVE(bp
, nb_vnbufs
);
3721 LIST_INSERT_HEAD(&np
->n_cleanblkhd
, bp
, nb_vnbufs
);
3723 while ((error
= nfs_buf_acquire(bp
, 0, 0, 0))) {
3724 if (error
!= EAGAIN
) {
3725 nfs_buf_refrele(bp
);
3726 nfs_buf_itercomplete(np
, &blist
, NBI_CLEAN
);
3727 lck_mtx_unlock(nfs_buf_mutex
);
3731 nfs_buf_refrele(bp
);
3734 nfs_buf_itercomplete(np
, &blist
, NBI_CLEAN
);
3736 if (!nfs_buf_iterprepare(np
, &blist
, NBI_DIRTY
)) {
3737 while ((bp
= LIST_FIRST(&blist
))) {
3738 LIST_REMOVE(bp
, nb_vnbufs
);
3739 LIST_INSERT_HEAD(&np
->n_dirtyblkhd
, bp
, nb_vnbufs
);
3741 while ((error
= nfs_buf_acquire(bp
, 0, 0, 0))) {
3742 if (error
!= EAGAIN
) {
3743 nfs_buf_refrele(bp
);
3744 nfs_buf_itercomplete(np
, &blist
, NBI_DIRTY
);
3745 lck_mtx_unlock(nfs_buf_mutex
);
3749 nfs_buf_refrele(bp
);
3752 nfs_buf_itercomplete(np
, &blist
, NBI_DIRTY
);
3754 lck_mtx_unlock(nfs_buf_mutex
);
3759 * Add an async I/O request to the mount's async I/O queue and make
3760 * sure that an nfsiod will service it.
3763 nfs_asyncio_finish(struct nfsreq
*req
)
3765 struct nfsmount
*nmp
;
3766 struct nfsiod
*niod
;
3769 FSDBG_TOP(552, nmp
, 0, 0, 0);
3776 lck_mtx_lock(nfsiod_mutex
);
3777 niod
= nmp
->nm_niod
;
3779 /* grab an nfsiod if we don't have one already */
3781 niod
= TAILQ_FIRST(&nfsiodfree
);
3783 TAILQ_REMOVE(&nfsiodfree
, niod
, niod_link
);
3784 TAILQ_INSERT_TAIL(&nfsiodwork
, niod
, niod_link
);
3785 niod
->niod_nmp
= nmp
;
3786 } else if (((nfsiod_thread_count
< NFSIOD_MAX
) || (nfsiod_thread_count
<= 0)) && (started
< 4)) {
3788 * Try starting a new thread.
3789 * We may try a couple times if other callers
3790 * get the new threads before we do.
3792 lck_mtx_unlock(nfsiod_mutex
);
3794 if (!nfsiod_start())
3796 lck_mtx_lock(nfsiod_mutex
);
3800 if (req
->r_achain
.tqe_next
== NFSREQNOLIST
)
3801 TAILQ_INSERT_TAIL(&nmp
->nm_iodq
, req
, r_achain
);
3803 /* If this mount doesn't already have an nfsiod working on it... */
3804 if (!nmp
->nm_niod
) {
3805 if (niod
) { /* give it the nfsiod we just grabbed */
3806 nmp
->nm_niod
= niod
;
3807 lck_mtx_unlock(nfsiod_mutex
);
3809 } else if (nfsiod_thread_count
> 0) {
3810 /* just queue it up on nfsiod mounts queue if needed */
3811 if (nmp
->nm_iodlink
.tqe_next
== NFSNOLIST
)
3812 TAILQ_INSERT_TAIL(&nfsiodmounts
, nmp
, nm_iodlink
);
3813 lck_mtx_unlock(nfsiod_mutex
);
3815 printf("nfs_asyncio(): no nfsiods? %d %d (%d)\n", nfsiod_thread_count
, NFSIOD_MAX
, started
);
3816 lck_mtx_unlock(nfsiod_mutex
);
3817 /* we have no other option but to be persistent */
3822 lck_mtx_unlock(nfsiod_mutex
);
3825 FSDBG_BOT(552, nmp
, 0, 0, 0);
3829 * queue up async I/O request for resend
3832 nfs_asyncio_resend(struct nfsreq
*req
)
3834 struct nfsmount
*nmp
= req
->r_nmp
;
3836 if (nfs_mount_gone(nmp
))
3838 nfs_gss_clnt_rpcdone(req
);
3839 lck_mtx_lock(&nmp
->nm_lock
);
3840 if (!(req
->r_flags
& R_RESENDQ
)) {
3841 TAILQ_INSERT_TAIL(&nmp
->nm_resendq
, req
, r_rchain
);
3842 req
->r_flags
|= R_RESENDQ
;
3844 nfs_mount_sock_thread_wake(nmp
);
3845 lck_mtx_unlock(&nmp
->nm_lock
);
3849 * Read directory data into a buffer.
3851 * Buffer will be filled (unless EOF is hit).
3852 * Buffers after this one may also be completely/partially filled.
3855 nfs_buf_readdir(struct nfsbuf
*bp
, vfs_context_t ctx
)
3857 nfsnode_t np
= bp
->nb_np
;
3858 struct nfsmount
*nmp
= NFSTONMP(np
);
3861 if (nfs_mount_gone(nmp
))
3864 if (nmp
->nm_vers
< NFS_VER4
)
3865 error
= nfs3_readdir_rpc(np
, bp
, ctx
);
3867 error
= nfs4_readdir_rpc(np
, bp
, ctx
);
3869 if (error
&& (error
!= NFSERR_DIRBUFDROPPED
)) {
3870 SET(bp
->nb_flags
, NB_ERROR
);
3871 bp
->nb_error
= error
;