2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
65 * FreeBSD-Id: nfs_bio.c,v 1.44 1997/09/10 19:52:25 phk Exp $
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/resourcevar.h>
70 #include <sys/signalvar.h>
71 #include <sys/proc_internal.h>
72 #include <sys/kauth.h>
73 #include <sys/malloc.h>
74 #include <sys/vnode.h>
75 #include <sys/dirent.h>
76 #include <sys/mount_internal.h>
77 #include <sys/kernel.h>
78 #include <sys/ubc_internal.h>
79 #include <sys/uio_internal.h>
82 #include <sys/vmparam.h>
85 #include <kern/clock.h>
86 #include <libkern/OSAtomic.h>
87 #include <kern/kalloc.h>
88 #include <kern/thread_call.h>
90 #include <nfs/rpcv2.h>
91 #include <nfs/nfsproto.h>
93 #include <nfs/nfs_gss.h>
94 #include <nfs/nfsmount.h>
95 #include <nfs/nfsnode.h>
96 #include <sys/buf_internal.h>
97 #include <libkern/OSAtomic.h>
99 kern_return_t
thread_terminate(thread_t
); /* XXX */
101 #define NFSBUFHASH(np, lbn) \
102 (&nfsbufhashtbl[((long)(np) / sizeof(*(np)) + (int)(lbn)) & nfsbufhash])
103 LIST_HEAD(nfsbufhashhead
, nfsbuf
) *nfsbufhashtbl
;
104 struct nfsbuffreehead nfsbuffree
, nfsbuffreemeta
, nfsbufdelwri
;
106 int nfsbufcnt
, nfsbufmin
, nfsbufmax
, nfsbufmetacnt
, nfsbufmetamax
;
107 int nfsbuffreecnt
, nfsbuffreemetacnt
, nfsbufdelwricnt
, nfsneedbuffer
;
109 int nfs_buf_timer_on
= 0;
110 thread_t nfsbufdelwrithd
= NULL
;
112 lck_grp_t
*nfs_buf_lck_grp
;
113 lck_mtx_t
*nfs_buf_mutex
;
115 #define NFSBUF_FREE_PERIOD 30 /* seconds */
116 #define NFSBUF_LRU_STALE 120
117 #define NFSBUF_META_STALE 240
119 /* number of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffree list */
120 #define LRU_TO_FREEUP 6
121 /* number of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffreemeta list */
122 #define META_TO_FREEUP 3
123 /* total number of nfsbufs nfs_buf_freeup() should attempt to free */
124 #define TOTAL_TO_FREEUP (LRU_TO_FREEUP+META_TO_FREEUP)
125 /* fraction of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffree list when called from timer */
126 #define LRU_FREEUP_FRAC_ON_TIMER 8
127 /* fraction of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffreemeta list when called from timer */
128 #define META_FREEUP_FRAC_ON_TIMER 16
129 /* fraction of total nfsbufs that nfsbuffreecnt should exceed before bothering to call nfs_buf_freeup() */
130 #define LRU_FREEUP_MIN_FRAC 4
131 /* fraction of total nfsbufs that nfsbuffreemetacnt should exceed before bothering to call nfs_buf_freeup() */
132 #define META_FREEUP_MIN_FRAC 2
134 #define NFS_BUF_FREEUP() \
136 /* only call nfs_buf_freeup() if it has work to do: */ \
137 if (((nfsbuffreecnt > nfsbufcnt/LRU_FREEUP_MIN_FRAC) || \
138 (nfsbuffreemetacnt > nfsbufcnt/META_FREEUP_MIN_FRAC)) && \
139 ((nfsbufcnt - TOTAL_TO_FREEUP) > nfsbufmin)) \
144 * Initialize nfsbuf lists
149 nfs_buf_lck_grp
= lck_grp_alloc_init("nfs_buf", LCK_GRP_ATTR_NULL
);
150 nfs_buf_mutex
= lck_mtx_alloc_init(nfs_buf_lck_grp
, LCK_ATTR_NULL
);
152 nfsbufcnt
= nfsbufmetacnt
=
153 nfsbuffreecnt
= nfsbuffreemetacnt
= nfsbufdelwricnt
= 0;
155 /* size nfsbufmax to cover at most half sane_size (w/default buf size) */
156 nfsbufmax
= (sane_size
>> PAGE_SHIFT
) / (2 * (NFS_RWSIZE
>> PAGE_SHIFT
));
157 nfsbufmetamax
= nfsbufmax
/ 4;
161 nfsbufhashtbl
= hashinit(nfsbufmax
/4, M_TEMP
, &nfsbufhash
);
162 TAILQ_INIT(&nfsbuffree
);
163 TAILQ_INIT(&nfsbuffreemeta
);
164 TAILQ_INIT(&nfsbufdelwri
);
169 * Check periodically for stale/unused nfs bufs
172 nfs_buf_timer(__unused
void *param0
, __unused
void *param1
)
176 lck_mtx_lock(nfs_buf_mutex
);
177 if (nfsbufcnt
<= nfsbufmin
) {
178 nfs_buf_timer_on
= 0;
179 lck_mtx_unlock(nfs_buf_mutex
);
182 lck_mtx_unlock(nfs_buf_mutex
);
184 nfs_interval_timer_start(nfs_buf_timer_call
,
185 NFSBUF_FREE_PERIOD
* 1000);
189 * try to free up some excess, unused nfsbufs
192 nfs_buf_freeup(int timer
)
197 struct nfsbuffreehead nfsbuffreeup
;
199 TAILQ_INIT(&nfsbuffreeup
);
201 lck_mtx_lock(nfs_buf_mutex
);
205 FSDBG(320, nfsbufcnt
, nfsbuffreecnt
, nfsbuffreemetacnt
, 0);
207 count
= timer
? nfsbuffreecnt
/LRU_FREEUP_FRAC_ON_TIMER
: LRU_TO_FREEUP
;
208 while ((nfsbufcnt
> nfsbufmin
) && (count
-- > 0)) {
209 fbp
= TAILQ_FIRST(&nfsbuffree
);
214 if (NBUFSTAMPVALID(fbp
) &&
215 (fbp
->nb_timestamp
+ (2*NFSBUF_LRU_STALE
)) > now
.tv_sec
)
217 nfs_buf_remfree(fbp
);
218 /* disassociate buffer from any nfsnode */
220 if (fbp
->nb_vnbufs
.le_next
!= NFSNOLIST
) {
221 LIST_REMOVE(fbp
, nb_vnbufs
);
222 fbp
->nb_vnbufs
.le_next
= NFSNOLIST
;
226 LIST_REMOVE(fbp
, nb_hash
);
227 TAILQ_INSERT_TAIL(&nfsbuffreeup
, fbp
, nb_free
);
231 count
= timer
? nfsbuffreemetacnt
/META_FREEUP_FRAC_ON_TIMER
: META_TO_FREEUP
;
232 while ((nfsbufcnt
> nfsbufmin
) && (count
-- > 0)) {
233 fbp
= TAILQ_FIRST(&nfsbuffreemeta
);
238 if (NBUFSTAMPVALID(fbp
) &&
239 (fbp
->nb_timestamp
+ (2*NFSBUF_META_STALE
)) > now
.tv_sec
)
241 nfs_buf_remfree(fbp
);
242 /* disassociate buffer from any nfsnode */
244 if (fbp
->nb_vnbufs
.le_next
!= NFSNOLIST
) {
245 LIST_REMOVE(fbp
, nb_vnbufs
);
246 fbp
->nb_vnbufs
.le_next
= NFSNOLIST
;
250 LIST_REMOVE(fbp
, nb_hash
);
251 TAILQ_INSERT_TAIL(&nfsbuffreeup
, fbp
, nb_free
);
256 FSDBG(320, nfsbufcnt
, nfsbuffreecnt
, nfsbuffreemetacnt
, 0);
259 lck_mtx_unlock(nfs_buf_mutex
);
261 while ((fbp
= TAILQ_FIRST(&nfsbuffreeup
))) {
262 TAILQ_REMOVE(&nfsbuffreeup
, fbp
, nb_free
);
264 if (IS_VALID_CRED(fbp
->nb_rcred
))
265 kauth_cred_unref(&fbp
->nb_rcred
);
266 if (IS_VALID_CRED(fbp
->nb_wcred
))
267 kauth_cred_unref(&fbp
->nb_wcred
);
268 /* if buf was NB_META, dump buffer */
269 if (ISSET(fbp
->nb_flags
, NB_META
) && fbp
->nb_data
)
270 kfree(fbp
->nb_data
, fbp
->nb_bufsize
);
277 * remove a buffer from the freelist
278 * (must be called with nfs_buf_mutex held)
281 nfs_buf_remfree(struct nfsbuf
*bp
)
283 if (bp
->nb_free
.tqe_next
== NFSNOLIST
)
284 panic("nfsbuf not on free list");
285 if (ISSET(bp
->nb_flags
, NB_DELWRI
)) {
287 TAILQ_REMOVE(&nfsbufdelwri
, bp
, nb_free
);
288 } else if (ISSET(bp
->nb_flags
, NB_META
)) {
290 TAILQ_REMOVE(&nfsbuffreemeta
, bp
, nb_free
);
293 TAILQ_REMOVE(&nfsbuffree
, bp
, nb_free
);
295 bp
->nb_free
.tqe_next
= NFSNOLIST
;
300 * check for existence of nfsbuf in cache
303 nfs_buf_is_incore(nfsnode_t np
, daddr64_t blkno
)
306 lck_mtx_lock(nfs_buf_mutex
);
307 if (nfs_buf_incore(np
, blkno
))
311 lck_mtx_unlock(nfs_buf_mutex
);
316 * return incore buffer (must be called with nfs_buf_mutex held)
319 nfs_buf_incore(nfsnode_t np
, daddr64_t blkno
)
321 /* Search hash chain */
322 struct nfsbuf
* bp
= NFSBUFHASH(np
, blkno
)->lh_first
;
323 for (; bp
!= NULL
; bp
= bp
->nb_hash
.le_next
)
324 if ((bp
->nb_lblkno
== blkno
) && (bp
->nb_np
== np
)) {
325 if (!ISSET(bp
->nb_flags
, NB_INVAL
)) {
326 FSDBG(547, bp
, blkno
, bp
->nb_flags
, bp
->nb_np
);
334 * Check if it's OK to drop a page.
336 * Called by vnode_pager() on pageout request of non-dirty page.
337 * We need to make sure that it's not part of a delayed write.
338 * If it is, we can't let the VM drop it because we may need it
339 * later when/if we need to write the data (again).
342 nfs_buf_page_inval(vnode_t vp
, off_t offset
)
344 struct nfsmount
*nmp
= VTONMP(vp
);
351 lck_mtx_lock(nfs_buf_mutex
);
352 bp
= nfs_buf_incore(VTONFS(vp
), (daddr64_t
)(offset
/ nmp
->nm_biosize
));
355 FSDBG(325, bp
, bp
->nb_flags
, bp
->nb_dirtyoff
, bp
->nb_dirtyend
);
356 if (ISSET(bp
->nb_lflags
, NBL_BUSY
)) {
361 * If there's a dirty range in the buffer, check to
362 * see if this page intersects with the dirty range.
363 * If it does, we can't let the pager drop the page.
365 if (bp
->nb_dirtyend
> 0) {
366 int start
= offset
- NBOFF(bp
);
367 if ((bp
->nb_dirtyend
> start
) &&
368 (bp
->nb_dirtyoff
< (start
+ PAGE_SIZE
))) {
370 * Before returning the bad news, move the
371 * buffer to the start of the delwri list and
372 * give the list a push to try to flush the
377 TAILQ_INSERT_HEAD(&nfsbufdelwri
, bp
, nb_free
);
379 nfs_buf_delwri_push(1);
383 lck_mtx_unlock(nfs_buf_mutex
);
388 * set up the UPL for a buffer
389 * (must NOT be called with nfs_buf_mutex held)
392 nfs_buf_upl_setup(struct nfsbuf
*bp
)
398 if (ISSET(bp
->nb_flags
, NB_PAGELIST
))
401 upl_flags
= UPL_PRECIOUS
;
402 if (!ISSET(bp
->nb_flags
, NB_READ
)) {
404 * We're doing a "write", so we intend to modify
405 * the pages we're gathering.
407 upl_flags
|= UPL_WILL_MODIFY
;
409 kret
= ubc_create_upl(NFSTOV(bp
->nb_np
), NBOFF(bp
), bp
->nb_bufsize
,
410 &upl
, NULL
, upl_flags
);
411 if (kret
== KERN_INVALID_ARGUMENT
) {
412 /* vm object probably doesn't exist any more */
413 bp
->nb_pagelist
= NULL
;
416 if (kret
!= KERN_SUCCESS
) {
417 printf("nfs_buf_upl_setup(): failed to get pagelist %d\n", kret
);
418 bp
->nb_pagelist
= NULL
;
422 FSDBG(538, bp
, NBOFF(bp
), bp
->nb_bufsize
, bp
->nb_np
);
424 bp
->nb_pagelist
= upl
;
425 SET(bp
->nb_flags
, NB_PAGELIST
);
430 * update buffer's valid/dirty info from UBC
431 * (must NOT be called with nfs_buf_mutex held)
434 nfs_buf_upl_check(struct nfsbuf
*bp
)
437 off_t filesize
, fileoffset
;
440 if (!ISSET(bp
->nb_flags
, NB_PAGELIST
))
443 npages
= round_page_32(bp
->nb_bufsize
) / PAGE_SIZE
;
444 filesize
= ubc_getsize(NFSTOV(bp
->nb_np
));
445 fileoffset
= NBOFF(bp
);
446 if (fileoffset
< filesize
)
447 SET(bp
->nb_flags
, NB_CACHE
);
449 CLR(bp
->nb_flags
, NB_CACHE
);
451 pl
= ubc_upl_pageinfo(bp
->nb_pagelist
);
452 bp
->nb_valid
= bp
->nb_dirty
= 0;
454 for (i
=0; i
< npages
; i
++, fileoffset
+= PAGE_SIZE_64
) {
455 /* anything beyond the end of the file is not valid or dirty */
456 if (fileoffset
>= filesize
)
458 if (!upl_valid_page(pl
, i
)) {
459 CLR(bp
->nb_flags
, NB_CACHE
);
463 if (upl_dirty_page(pl
, i
))
464 NBPGDIRTY_SET(bp
, i
);
466 fileoffset
= NBOFF(bp
);
467 if (ISSET(bp
->nb_flags
, NB_CACHE
)) {
469 bp
->nb_validend
= bp
->nb_bufsize
;
470 if (fileoffset
+ bp
->nb_validend
> filesize
)
471 bp
->nb_validend
= filesize
- fileoffset
;
473 bp
->nb_validoff
= bp
->nb_validend
= -1;
475 FSDBG(539, bp
, fileoffset
, bp
->nb_valid
, bp
->nb_dirty
);
476 FSDBG(539, bp
->nb_validoff
, bp
->nb_validend
, bp
->nb_dirtyoff
, bp
->nb_dirtyend
);
480 * make sure that a buffer is mapped
481 * (must NOT be called with nfs_buf_mutex held)
484 nfs_buf_map(struct nfsbuf
*bp
)
490 if (!ISSET(bp
->nb_flags
, NB_PAGELIST
))
493 kret
= ubc_upl_map(bp
->nb_pagelist
, (vm_offset_t
*)&(bp
->nb_data
));
494 if (kret
!= KERN_SUCCESS
)
495 panic("nfs_buf_map: ubc_upl_map() failed with (%d)", kret
);
496 if (bp
->nb_data
== 0)
497 panic("ubc_upl_map mapped 0");
498 FSDBG(540, bp
, bp
->nb_flags
, NBOFF(bp
), bp
->nb_data
);
503 * normalize an nfsbuf's valid range
505 * the read/write code guarantees that we'll always have a valid
506 * region that is an integral number of pages. If either end
507 * of the valid range isn't page-aligned, it gets corrected
508 * here as we extend the valid range through all of the
509 * contiguous valid pages.
512 nfs_buf_normalize_valid_range(nfsnode_t np
, struct nfsbuf
*bp
)
515 /* pull validoff back to start of contiguous valid page range */
516 pg
= bp
->nb_validoff
/PAGE_SIZE
;
517 while (pg
>= 0 && NBPGVALID(bp
,pg
))
519 bp
->nb_validoff
= (pg
+1) * PAGE_SIZE
;
520 /* push validend forward to end of contiguous valid page range */
521 npg
= bp
->nb_bufsize
/PAGE_SIZE
;
522 pg
= bp
->nb_validend
/PAGE_SIZE
;
523 while (pg
< npg
&& NBPGVALID(bp
,pg
))
525 bp
->nb_validend
= pg
* PAGE_SIZE
;
527 if (NBOFF(bp
) + bp
->nb_validend
> (off_t
)np
->n_size
)
528 bp
->nb_validend
= np
->n_size
% bp
->nb_bufsize
;
532 * process some entries on the delayed write queue
533 * (must be called with nfs_buf_mutex held)
536 nfs_buf_delwri_service(void)
542 while (i
< 8 && (bp
= TAILQ_FIRST(&nfsbufdelwri
)) != NULL
) {
546 while ((error
= nfs_buf_acquire(bp
, 0, 0, 0)) == EAGAIN
);
551 /* buffer is no longer valid */
555 if (ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
))
556 nfs_buf_check_write_verifier(np
, bp
);
557 if (ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
)) {
558 /* put buffer at end of delwri list */
559 TAILQ_INSERT_TAIL(&nfsbufdelwri
, bp
, nb_free
);
562 lck_mtx_unlock(nfs_buf_mutex
);
563 nfs_flushcommits(np
, 1);
565 SET(bp
->nb_flags
, NB_ASYNC
);
566 lck_mtx_unlock(nfs_buf_mutex
);
570 lck_mtx_lock(nfs_buf_mutex
);
575 * thread to service the delayed write queue when asked
578 nfs_buf_delwri_thread(__unused
void *arg
, __unused wait_result_t wr
)
580 struct timespec ts
= { 30, 0 };
583 lck_mtx_lock(nfs_buf_mutex
);
585 nfs_buf_delwri_service();
586 error
= msleep(&nfsbufdelwrithd
, nfs_buf_mutex
, 0, "nfsbufdelwri", &ts
);
588 nfsbufdelwrithd
= NULL
;
589 lck_mtx_unlock(nfs_buf_mutex
);
590 thread_terminate(nfsbufdelwrithd
);
594 * try to push out some delayed/uncommitted writes
595 * ("locked" indicates whether nfs_buf_mutex is already held)
598 nfs_buf_delwri_push(int locked
)
600 if (TAILQ_EMPTY(&nfsbufdelwri
))
603 lck_mtx_lock(nfs_buf_mutex
);
604 /* wake up the delayed write service thread */
606 wakeup(&nfsbufdelwrithd
);
607 else if (kernel_thread_start(nfs_buf_delwri_thread
, NULL
, &nfsbufdelwrithd
) == KERN_SUCCESS
)
608 thread_deallocate(nfsbufdelwrithd
);
609 /* otherwise, try to do some of the work ourselves */
610 if (!nfsbufdelwrithd
)
611 nfs_buf_delwri_service();
613 lck_mtx_unlock(nfs_buf_mutex
);
619 * Returns errno on error, 0 otherwise.
620 * Any buffer is returned in *bpp.
622 * If NBLK_ONLYVALID is set, only return buffer if found in cache.
623 * If NBLK_NOWAIT is set, don't wait for the buffer if it's marked BUSY.
625 * Check for existence of buffer in cache.
626 * Or attempt to reuse a buffer from one of the free lists.
627 * Or allocate a new buffer if we haven't already hit max allocation.
628 * Or wait for a free buffer.
630 * If available buffer found, prepare it, and return it.
632 * If the calling process is interrupted by a signal for
633 * an interruptible mount point, return EINTR.
644 vnode_t vp
= NFSTOV(np
);
645 struct nfsmount
*nmp
= VTONMP(vp
);
648 int slpflag
= PCATCH
;
649 int operation
= (flags
& NBLK_OPMASK
);
653 FSDBG_TOP(541, np
, blkno
, size
, flags
);
657 if (bufsize
> NFS_MAXBSIZE
)
658 panic("nfs_buf_get: buffer larger than NFS_MAXBSIZE requested");
661 FSDBG_BOT(541, np
, blkno
, 0, ENXIO
);
665 if (!UBCINFOEXISTS(vp
)) {
666 operation
= NBLK_META
;
667 } else if (bufsize
< (uint32_t)nmp
->nm_biosize
) {
668 /* reg files should always have biosize blocks */
669 bufsize
= nmp
->nm_biosize
;
672 /* if NBLK_WRITE, check for too many delayed/uncommitted writes */
673 if ((operation
== NBLK_WRITE
) && (nfs_nbdwrite
> NFS_A_LOT_OF_DELAYED_WRITES
)) {
674 FSDBG_TOP(542, np
, blkno
, nfs_nbdwrite
, NFS_A_LOT_OF_DELAYED_WRITES
);
676 /* poke the delwri list */
677 nfs_buf_delwri_push(0);
679 /* sleep to let other threads run... */
680 tsleep(&nfs_nbdwrite
, PCATCH
, "nfs_nbdwrite", 1);
681 FSDBG_BOT(542, np
, blkno
, nfs_nbdwrite
, NFS_A_LOT_OF_DELAYED_WRITES
);
685 lck_mtx_lock(nfs_buf_mutex
);
687 /* check for existence of nfsbuf in cache */
688 if ((bp
= nfs_buf_incore(np
, blkno
))) {
689 /* if busy, set wanted and wait */
690 if (ISSET(bp
->nb_lflags
, NBL_BUSY
)) {
691 if (flags
& NBLK_NOWAIT
) {
692 lck_mtx_unlock(nfs_buf_mutex
);
693 FSDBG_BOT(541, np
, blkno
, bp
, 0xbcbcbcbc);
696 FSDBG_TOP(543, np
, blkno
, bp
, bp
->nb_flags
);
697 SET(bp
->nb_lflags
, NBL_WANTED
);
701 msleep(bp
, nfs_buf_mutex
, slpflag
|(PRIBIO
+1)|PDROP
,
702 "nfsbufget", (slpflag
== PCATCH
) ? NULL
: &ts
);
704 FSDBG_BOT(543, np
, blkno
, bp
, bp
->nb_flags
);
705 if ((error
= nfs_sigintr(VTONMP(vp
), NULL
, thd
, 0))) {
706 FSDBG_BOT(541, np
, blkno
, 0, error
);
711 if (bp
->nb_bufsize
!= bufsize
)
712 panic("nfsbuf size mismatch");
713 SET(bp
->nb_lflags
, NBL_BUSY
);
714 SET(bp
->nb_flags
, NB_CACHE
);
716 /* additional paranoia: */
717 if (ISSET(bp
->nb_flags
, NB_PAGELIST
))
718 panic("pagelist buffer was not busy");
722 if (flags
& NBLK_ONLYVALID
) {
723 lck_mtx_unlock(nfs_buf_mutex
);
724 FSDBG_BOT(541, np
, blkno
, 0, 0x0000cace);
729 * where to get a free buffer:
730 * - if meta and maxmeta reached, must reuse meta
731 * - alloc new if we haven't reached min bufs
732 * - if free lists are NOT empty
733 * - if free list is stale, use it
734 * - else if freemeta list is stale, use it
735 * - else if max bufs allocated, use least-time-to-stale
736 * - alloc new if we haven't reached max allowed
737 * - start clearing out delwri list and try again
740 if ((operation
== NBLK_META
) && (nfsbufmetacnt
>= nfsbufmetamax
)) {
741 /* if we've hit max meta buffers, must reuse a meta buffer */
742 bp
= TAILQ_FIRST(&nfsbuffreemeta
);
743 } else if ((nfsbufcnt
> nfsbufmin
) &&
744 (!TAILQ_EMPTY(&nfsbuffree
) || !TAILQ_EMPTY(&nfsbuffreemeta
))) {
745 /* try to pull an nfsbuf off a free list */
746 struct nfsbuf
*lrubp
, *metabp
;
750 /* if the next LRU or META buffer is invalid or stale, use it */
751 lrubp
= TAILQ_FIRST(&nfsbuffree
);
752 if (lrubp
&& (!NBUFSTAMPVALID(lrubp
) ||
753 ((lrubp
->nb_timestamp
+ NFSBUF_LRU_STALE
) < now
.tv_sec
)))
755 metabp
= TAILQ_FIRST(&nfsbuffreemeta
);
756 if (!bp
&& metabp
&& (!NBUFSTAMPVALID(metabp
) ||
757 ((metabp
->nb_timestamp
+ NFSBUF_META_STALE
) < now
.tv_sec
)))
760 if (!bp
&& (nfsbufcnt
>= nfsbufmax
)) {
761 /* we've already allocated all bufs, so */
762 /* choose the buffer that'll go stale first */
768 int32_t lru_stale_time
, meta_stale_time
;
769 lru_stale_time
= lrubp
->nb_timestamp
+ NFSBUF_LRU_STALE
;
770 meta_stale_time
= metabp
->nb_timestamp
+ NFSBUF_META_STALE
;
771 if (lru_stale_time
<= meta_stale_time
)
780 /* we have a buffer to reuse */
781 FSDBG(544, np
, blkno
, bp
, bp
->nb_flags
);
783 if (ISSET(bp
->nb_flags
, NB_DELWRI
))
784 panic("nfs_buf_get: delwri");
785 SET(bp
->nb_lflags
, NBL_BUSY
);
786 /* disassociate buffer from previous nfsnode */
788 if (bp
->nb_vnbufs
.le_next
!= NFSNOLIST
) {
789 LIST_REMOVE(bp
, nb_vnbufs
);
790 bp
->nb_vnbufs
.le_next
= NFSNOLIST
;
794 LIST_REMOVE(bp
, nb_hash
);
795 /* nuke any creds we're holding */
796 if (IS_VALID_CRED(bp
->nb_rcred
))
797 kauth_cred_unref(&bp
->nb_rcred
);
798 if (IS_VALID_CRED(bp
->nb_wcred
))
799 kauth_cred_unref(&bp
->nb_wcred
);
800 /* if buf will no longer be NB_META, dump old buffer */
801 if (operation
== NBLK_META
) {
802 if (!ISSET(bp
->nb_flags
, NB_META
))
804 } else if (ISSET(bp
->nb_flags
, NB_META
)) {
806 kfree(bp
->nb_data
, bp
->nb_bufsize
);
811 /* re-init buf fields */
813 bp
->nb_validoff
= bp
->nb_validend
= -1;
814 bp
->nb_dirtyoff
= bp
->nb_dirtyend
= 0;
819 /* no buffer to reuse */
820 if ((nfsbufcnt
< nfsbufmax
) &&
821 ((operation
!= NBLK_META
) || (nfsbufmetacnt
< nfsbufmetamax
))) {
822 /* just alloc a new one */
823 MALLOC(bp
, struct nfsbuf
*, sizeof(struct nfsbuf
), M_TEMP
, M_WAITOK
);
825 lck_mtx_unlock(nfs_buf_mutex
);
826 FSDBG_BOT(541, np
, blkno
, 0, error
);
832 * If any excess bufs, make sure the timer
833 * is running to free them up later.
835 if (nfsbufcnt
> nfsbufmin
&& !nfs_buf_timer_on
) {
836 nfs_buf_timer_on
= 1;
837 nfs_interval_timer_start(nfs_buf_timer_call
,
838 NFSBUF_FREE_PERIOD
* 1000);
841 if (operation
== NBLK_META
)
845 bzero(bp
, sizeof(*bp
));
846 bp
->nb_free
.tqe_next
= NFSNOLIST
;
847 bp
->nb_validoff
= bp
->nb_validend
= -1;
848 FSDBG(545, np
, blkno
, bp
, 0);
850 /* too many bufs... wait for buffers to free up */
851 FSDBG_TOP(546, np
, blkno
, nfsbufcnt
, nfsbufmax
);
853 /* poke the delwri list */
854 nfs_buf_delwri_push(1);
857 msleep(&nfsneedbuffer
, nfs_buf_mutex
, PCATCH
|PDROP
, "nfsbufget", NULL
);
858 FSDBG_BOT(546, np
, blkno
, nfsbufcnt
, nfsbufmax
);
859 if ((error
= nfs_sigintr(VTONMP(vp
), NULL
, thd
, 0))) {
860 FSDBG_BOT(541, np
, blkno
, 0, error
);
868 SET(bp
->nb_lflags
, NBL_BUSY
);
870 bp
->nb_lblkno
= blkno
;
871 /* insert buf in hash */
872 LIST_INSERT_HEAD(NFSBUFHASH(np
, blkno
), bp
, nb_hash
);
873 /* associate buffer with new nfsnode */
875 LIST_INSERT_HEAD(&np
->n_cleanblkhd
, bp
, nb_vnbufs
);
880 lck_mtx_unlock(nfs_buf_mutex
);
884 SET(bp
->nb_flags
, NB_META
);
885 if ((bp
->nb_bufsize
!= bufsize
) && bp
->nb_data
) {
886 kfree(bp
->nb_data
, bp
->nb_bufsize
);
888 bp
->nb_validoff
= bp
->nb_validend
= -1;
889 bp
->nb_dirtyoff
= bp
->nb_dirtyend
= 0;
892 CLR(bp
->nb_flags
, NB_CACHE
);
895 bp
->nb_data
= kalloc(bufsize
);
897 /* Ack! couldn't allocate the data buffer! */
898 /* clean up buffer and return error */
899 lck_mtx_lock(nfs_buf_mutex
);
900 LIST_REMOVE(bp
, nb_vnbufs
);
901 bp
->nb_vnbufs
.le_next
= NFSNOLIST
;
903 /* invalidate usage timestamp to allow immediate freeing */
904 NBUFSTAMPINVALIDATE(bp
);
905 if (bp
->nb_free
.tqe_next
!= NFSNOLIST
)
906 panic("nfsbuf on freelist");
907 TAILQ_INSERT_HEAD(&nfsbuffree
, bp
, nb_free
);
909 lck_mtx_unlock(nfs_buf_mutex
);
910 FSDBG_BOT(541, np
, blkno
, 0xb00, ENOMEM
);
913 bp
->nb_bufsize
= bufsize
;
919 * Set or clear NB_READ now to let the UPL subsystem know
920 * if we intend to modify the pages or not.
922 if (operation
== NBLK_READ
) {
923 SET(bp
->nb_flags
, NB_READ
);
925 CLR(bp
->nb_flags
, NB_READ
);
927 if (bufsize
< PAGE_SIZE
)
929 bp
->nb_bufsize
= bufsize
;
930 bp
->nb_validoff
= bp
->nb_validend
= -1;
932 if (UBCINFOEXISTS(vp
)) {
934 if (nfs_buf_upl_setup(bp
)) {
935 /* unable to create upl */
936 /* vm object must no longer exist */
937 /* clean up buffer and return error */
938 lck_mtx_lock(nfs_buf_mutex
);
939 LIST_REMOVE(bp
, nb_vnbufs
);
940 bp
->nb_vnbufs
.le_next
= NFSNOLIST
;
942 /* invalidate usage timestamp to allow immediate freeing */
943 NBUFSTAMPINVALIDATE(bp
);
944 if (bp
->nb_free
.tqe_next
!= NFSNOLIST
)
945 panic("nfsbuf on freelist");
946 TAILQ_INSERT_HEAD(&nfsbuffree
, bp
, nb_free
);
948 lck_mtx_unlock(nfs_buf_mutex
);
949 FSDBG_BOT(541, np
, blkno
, 0x2bc, EIO
);
952 nfs_buf_upl_check(bp
);
957 panic("nfs_buf_get: %d unknown operation", operation
);
962 FSDBG_BOT(541, np
, blkno
, bp
, bp
->nb_flags
);
968 nfs_buf_release(struct nfsbuf
*bp
, int freeup
)
970 nfsnode_t np
= bp
->nb_np
;
973 int wakeup_needbuffer
, wakeup_buffer
, wakeup_nbdwrite
;
975 FSDBG_TOP(548, bp
, NBOFF(bp
), bp
->nb_flags
, bp
->nb_data
);
976 FSDBG(548, bp
->nb_validoff
, bp
->nb_validend
, bp
->nb_dirtyoff
, bp
->nb_dirtyend
);
977 FSDBG(548, bp
->nb_valid
, 0, bp
->nb_dirty
, 0);
979 vp
= np
? NFSTOV(np
) : NULL
;
980 if (vp
&& UBCINFOEXISTS(vp
) && bp
->nb_bufsize
) {
985 if (!ISSET(bp
->nb_flags
, NB_PAGELIST
) && !ISSET(bp
->nb_flags
, NB_INVAL
)) {
986 rv
= nfs_buf_upl_setup(bp
);
988 printf("nfs_buf_release: upl create failed %d\n", rv
);
990 nfs_buf_upl_check(bp
);
992 upl
= bp
->nb_pagelist
;
994 goto pagelist_cleanup_done
;
996 if (ubc_upl_unmap(upl
) != KERN_SUCCESS
)
997 panic("ubc_upl_unmap failed");
1001 * Abort the pages on error or: if this is an invalid or
1002 * non-needcommit nocache buffer AND no pages are dirty.
1004 if (ISSET(bp
->nb_flags
, NB_ERROR
) || (!bp
->nb_dirty
&& (ISSET(bp
->nb_flags
, NB_INVAL
) ||
1005 (ISSET(bp
->nb_flags
, NB_NOCACHE
) && !ISSET(bp
->nb_flags
, (NB_NEEDCOMMIT
| NB_DELWRI
)))))) {
1006 if (ISSET(bp
->nb_flags
, (NB_READ
| NB_INVAL
| NB_NOCACHE
)))
1007 upl_flags
= UPL_ABORT_DUMP_PAGES
;
1010 ubc_upl_abort(upl
, upl_flags
);
1011 goto pagelist_cleanup_done
;
1013 for (i
=0; i
<= (bp
->nb_bufsize
- 1)/PAGE_SIZE
; i
++) {
1014 if (!NBPGVALID(bp
,i
))
1015 ubc_upl_abort_range(upl
,
1016 i
*PAGE_SIZE
, PAGE_SIZE
,
1017 UPL_ABORT_DUMP_PAGES
|
1018 UPL_ABORT_FREE_ON_EMPTY
);
1020 if (NBPGDIRTY(bp
,i
))
1021 upl_flags
= UPL_COMMIT_SET_DIRTY
;
1023 upl_flags
= UPL_COMMIT_CLEAR_DIRTY
;
1025 if (!ISSET(bp
->nb_flags
, (NB_NEEDCOMMIT
| NB_DELWRI
)))
1026 upl_flags
|= UPL_COMMIT_CLEAR_PRECIOUS
;
1028 ubc_upl_commit_range(upl
,
1029 i
*PAGE_SIZE
, PAGE_SIZE
,
1031 UPL_COMMIT_INACTIVATE
|
1032 UPL_COMMIT_FREE_ON_EMPTY
);
1035 pagelist_cleanup_done
:
1036 /* invalidate any pages past EOF */
1037 if (NBOFF(bp
) + bp
->nb_bufsize
> (off_t
)(np
->n_size
)) {
1039 start
= trunc_page_64(np
->n_size
) + PAGE_SIZE_64
;
1040 end
= trunc_page_64(NBOFF(bp
) + bp
->nb_bufsize
);
1041 if (start
< NBOFF(bp
))
1044 if (!(rv
= ubc_sync_range(vp
, start
, end
, UBC_INVALIDATE
)))
1045 printf("nfs_buf_release(): ubc_sync_range failed!\n");
1048 CLR(bp
->nb_flags
, NB_PAGELIST
);
1049 bp
->nb_pagelist
= NULL
;
1052 lck_mtx_lock(nfs_buf_mutex
);
1054 wakeup_needbuffer
= wakeup_buffer
= wakeup_nbdwrite
= 0;
1056 /* Wake up any processes waiting for any buffer to become free. */
1057 if (nfsneedbuffer
) {
1059 wakeup_needbuffer
= 1;
1061 /* Wake up any processes waiting for _this_ buffer to become free. */
1062 if (ISSET(bp
->nb_lflags
, NBL_WANTED
)) {
1063 CLR(bp
->nb_lflags
, NBL_WANTED
);
1067 /* If it's non-needcommit nocache, or an error, mark it invalid. */
1068 if (ISSET(bp
->nb_flags
, NB_ERROR
) ||
1069 (ISSET(bp
->nb_flags
, NB_NOCACHE
) && !ISSET(bp
->nb_flags
, (NB_NEEDCOMMIT
| NB_DELWRI
))))
1070 SET(bp
->nb_flags
, NB_INVAL
);
1072 if ((bp
->nb_bufsize
<= 0) || ISSET(bp
->nb_flags
, NB_INVAL
)) {
1073 /* If it's invalid or empty, dissociate it from its nfsnode */
1074 if (bp
->nb_vnbufs
.le_next
!= NFSNOLIST
) {
1075 LIST_REMOVE(bp
, nb_vnbufs
);
1076 bp
->nb_vnbufs
.le_next
= NFSNOLIST
;
1079 /* if this was a delayed write, wakeup anyone */
1080 /* waiting for delayed writes to complete */
1081 if (ISSET(bp
->nb_flags
, NB_DELWRI
)) {
1082 CLR(bp
->nb_flags
, NB_DELWRI
);
1085 wakeup_nbdwrite
= 1;
1087 /* invalidate usage timestamp to allow immediate freeing */
1088 NBUFSTAMPINVALIDATE(bp
);
1089 /* put buffer at head of free list */
1090 if (bp
->nb_free
.tqe_next
!= NFSNOLIST
)
1091 panic("nfsbuf on freelist");
1092 SET(bp
->nb_flags
, NB_INVAL
);
1093 if (ISSET(bp
->nb_flags
, NB_META
)) {
1094 TAILQ_INSERT_HEAD(&nfsbuffreemeta
, bp
, nb_free
);
1095 nfsbuffreemetacnt
++;
1097 TAILQ_INSERT_HEAD(&nfsbuffree
, bp
, nb_free
);
1100 } else if (ISSET(bp
->nb_flags
, NB_DELWRI
)) {
1101 /* put buffer at end of delwri list */
1102 if (bp
->nb_free
.tqe_next
!= NFSNOLIST
)
1103 panic("nfsbuf on freelist");
1104 TAILQ_INSERT_TAIL(&nfsbufdelwri
, bp
, nb_free
);
1108 /* update usage timestamp */
1110 bp
->nb_timestamp
= now
.tv_sec
;
1111 /* put buffer at end of free list */
1112 if (bp
->nb_free
.tqe_next
!= NFSNOLIST
)
1113 panic("nfsbuf on freelist");
1114 if (ISSET(bp
->nb_flags
, NB_META
)) {
1115 TAILQ_INSERT_TAIL(&nfsbuffreemeta
, bp
, nb_free
);
1116 nfsbuffreemetacnt
++;
1118 TAILQ_INSERT_TAIL(&nfsbuffree
, bp
, nb_free
);
1125 /* Unlock the buffer. */
1126 CLR(bp
->nb_flags
, (NB_ASYNC
| NB_STABLE
));
1127 CLR(bp
->nb_lflags
, NBL_BUSY
);
1129 FSDBG_BOT(548, bp
, NBOFF(bp
), bp
->nb_flags
, bp
->nb_data
);
1131 lck_mtx_unlock(nfs_buf_mutex
);
1133 if (wakeup_needbuffer
)
1134 wakeup(&nfsneedbuffer
);
1137 if (wakeup_nbdwrite
)
1138 wakeup(&nfs_nbdwrite
);
1144 * Wait for operations on the buffer to complete.
1145 * When they do, extract and return the I/O's error value.
1148 nfs_buf_iowait(struct nfsbuf
*bp
)
1150 FSDBG_TOP(549, bp
, NBOFF(bp
), bp
->nb_flags
, bp
->nb_error
);
1152 lck_mtx_lock(nfs_buf_mutex
);
1154 while (!ISSET(bp
->nb_flags
, NB_DONE
))
1155 msleep(bp
, nfs_buf_mutex
, PRIBIO
+ 1, "nfs_buf_iowait", NULL
);
1157 lck_mtx_unlock(nfs_buf_mutex
);
1159 FSDBG_BOT(549, bp
, NBOFF(bp
), bp
->nb_flags
, bp
->nb_error
);
1161 /* check for interruption of I/O, then errors. */
1162 if (ISSET(bp
->nb_flags
, NB_EINTR
)) {
1163 CLR(bp
->nb_flags
, NB_EINTR
);
1165 } else if (ISSET(bp
->nb_flags
, NB_ERROR
))
1166 return (bp
->nb_error
? bp
->nb_error
: EIO
);
1171 * Mark I/O complete on a buffer.
1174 nfs_buf_iodone(struct nfsbuf
*bp
)
1177 FSDBG_TOP(550, bp
, NBOFF(bp
), bp
->nb_flags
, bp
->nb_error
);
1179 if (ISSET(bp
->nb_flags
, NB_DONE
))
1180 panic("nfs_buf_iodone already");
1182 if (!ISSET(bp
->nb_flags
, NB_READ
)) {
1183 CLR(bp
->nb_flags
, NB_WRITEINPROG
);
1185 * vnode_writedone() takes care of waking up
1186 * any throttled write operations
1188 vnode_writedone(NFSTOV(bp
->nb_np
));
1189 nfs_node_lock_force(bp
->nb_np
);
1190 bp
->nb_np
->n_numoutput
--;
1191 nfs_node_unlock(bp
->nb_np
);
1193 if (ISSET(bp
->nb_flags
, NB_ASYNC
)) { /* if async, release it */
1194 SET(bp
->nb_flags
, NB_DONE
); /* note that it's done */
1195 nfs_buf_release(bp
, 1);
1196 } else { /* or just wakeup the buffer */
1197 lck_mtx_lock(nfs_buf_mutex
);
1198 SET(bp
->nb_flags
, NB_DONE
); /* note that it's done */
1199 CLR(bp
->nb_lflags
, NBL_WANTED
);
1200 lck_mtx_unlock(nfs_buf_mutex
);
1204 FSDBG_BOT(550, bp
, NBOFF(bp
), bp
->nb_flags
, bp
->nb_error
);
1208 nfs_buf_write_delayed(struct nfsbuf
*bp
)
1210 nfsnode_t np
= bp
->nb_np
;
1212 FSDBG_TOP(551, bp
, NBOFF(bp
), bp
->nb_flags
, 0);
1213 FSDBG(551, bp
, bp
->nb_dirtyoff
, bp
->nb_dirtyend
, bp
->nb_dirty
);
1216 * If the block hasn't been seen before:
1217 * (1) Mark it as having been seen,
1218 * (2) Make sure it's on its node's correct block list,
1220 if (!ISSET(bp
->nb_flags
, NB_DELWRI
)) {
1221 SET(bp
->nb_flags
, NB_DELWRI
);
1222 /* move to dirty list */
1223 lck_mtx_lock(nfs_buf_mutex
);
1226 if (bp
->nb_vnbufs
.le_next
!= NFSNOLIST
)
1227 LIST_REMOVE(bp
, nb_vnbufs
);
1228 LIST_INSERT_HEAD(&np
->n_dirtyblkhd
, bp
, nb_vnbufs
);
1229 lck_mtx_unlock(nfs_buf_mutex
);
1233 * If the vnode has "too many" write operations in progress
1234 * wait for them to finish the IO
1236 vnode_waitforwrites(NFSTOV(np
), VNODE_ASYNC_THROTTLE
, 0, 0, "nfs_buf_write_delayed");
1238 /* the file is in a modified state, so make sure the flag's set */
1239 nfs_node_lock_force(np
);
1240 np
->n_flag
|= NMODIFIED
;
1241 nfs_node_unlock(np
);
1244 * If we have too many delayed write buffers,
1245 * just fall back to doing the async write.
1247 if (nfs_nbdwrite
< 0)
1248 panic("nfs_buf_write_delayed: Negative nfs_nbdwrite");
1249 if (nfs_nbdwrite
> NFS_A_LOT_OF_DELAYED_WRITES
) {
1250 /* issue async write */
1251 SET(bp
->nb_flags
, NB_ASYNC
);
1253 FSDBG_BOT(551, bp
, NBOFF(bp
), bp
->nb_flags
, bp
->nb_error
);
1257 /* Otherwise, the "write" is done, so mark and release the buffer. */
1258 SET(bp
->nb_flags
, NB_DONE
);
1259 nfs_buf_release(bp
, 1);
1260 FSDBG_BOT(551, bp
, NBOFF(bp
), bp
->nb_flags
, 0);
1265 * Check that a "needcommit" buffer can still be committed.
1266 * If the write verifier has changed, we need to clear the
1267 * the needcommit flag.
1270 nfs_buf_check_write_verifier(nfsnode_t np
, struct nfsbuf
*bp
)
1272 struct nfsmount
*nmp
;
1274 if (!ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
))
1280 if (!ISSET(bp
->nb_flags
, NB_STALEWVERF
) && (bp
->nb_verf
== nmp
->nm_verf
))
1283 /* write verifier changed, clear commit/wverf flags */
1284 CLR(bp
->nb_flags
, (NB_NEEDCOMMIT
| NB_STALEWVERF
));
1286 nfs_node_lock_force(np
);
1287 np
->n_needcommitcnt
--;
1288 CHECK_NEEDCOMMITCNT(np
);
1289 nfs_node_unlock(np
);
1293 * add a reference to a buffer so it doesn't disappear while being used
1294 * (must be called with nfs_buf_mutex held)
1297 nfs_buf_refget(struct nfsbuf
*bp
)
1302 * release a reference on a buffer
1303 * (must be called with nfs_buf_mutex held)
1306 nfs_buf_refrele(struct nfsbuf
*bp
)
1312 * mark a particular buffer as BUSY
1313 * (must be called with nfs_buf_mutex held)
1316 nfs_buf_acquire(struct nfsbuf
*bp
, int flags
, int slpflag
, int slptimeo
)
1321 if (ISSET(bp
->nb_lflags
, NBL_BUSY
)) {
1323 * since the lck_mtx_lock may block, the buffer
1324 * may become BUSY, so we need to recheck for
1327 if (flags
& NBAC_NOWAIT
)
1329 SET(bp
->nb_lflags
, NBL_WANTED
);
1331 ts
.tv_sec
= (slptimeo
/100);
1332 /* the hz value is 100; which leads to 10ms */
1333 ts
.tv_nsec
= (slptimeo
% 100) * 10 * NSEC_PER_USEC
* 1000;
1335 error
= msleep(bp
, nfs_buf_mutex
, slpflag
| (PRIBIO
+ 1),
1336 "nfs_buf_acquire", &ts
);
1341 if (flags
& NBAC_REMOVE
)
1342 nfs_buf_remfree(bp
);
1343 SET(bp
->nb_lflags
, NBL_BUSY
);
1349 * simply drop the BUSY status of a buffer
1350 * (must be called with nfs_buf_mutex held)
1353 nfs_buf_drop(struct nfsbuf
*bp
)
1355 int need_wakeup
= 0;
1357 if (!ISSET(bp
->nb_lflags
, NBL_BUSY
))
1358 panic("nfs_buf_drop: buffer not busy!");
1359 if (ISSET(bp
->nb_lflags
, NBL_WANTED
)) {
1360 /* delay the actual wakeup until after we clear NBL_BUSY */
1363 /* Unlock the buffer. */
1364 CLR(bp
->nb_lflags
, (NBL_BUSY
| NBL_WANTED
));
1371 * prepare for iterating over an nfsnode's buffer list
1372 * this lock protects the queue manipulation
1373 * (must be called with nfs_buf_mutex held)
1376 nfs_buf_iterprepare(nfsnode_t np
, struct nfsbuflists
*iterheadp
, int flags
)
1378 struct nfsbuflists
*listheadp
;
1380 if (flags
& NBI_DIRTY
)
1381 listheadp
= &np
->n_dirtyblkhd
;
1383 listheadp
= &np
->n_cleanblkhd
;
1385 if ((flags
& NBI_NOWAIT
) && (np
->n_bufiterflags
& NBI_ITER
)) {
1386 LIST_INIT(iterheadp
);
1387 return(EWOULDBLOCK
);
1390 while (np
->n_bufiterflags
& NBI_ITER
) {
1391 np
->n_bufiterflags
|= NBI_ITERWANT
;
1392 msleep(&np
->n_bufiterflags
, nfs_buf_mutex
, 0, "nfs_buf_iterprepare", NULL
);
1394 if (LIST_EMPTY(listheadp
)) {
1395 LIST_INIT(iterheadp
);
1398 np
->n_bufiterflags
|= NBI_ITER
;
1400 iterheadp
->lh_first
= listheadp
->lh_first
;
1401 listheadp
->lh_first
->nb_vnbufs
.le_prev
= &iterheadp
->lh_first
;
1402 LIST_INIT(listheadp
);
1408 * clean up after iterating over an nfsnode's buffer list
1409 * this lock protects the queue manipulation
1410 * (must be called with nfs_buf_mutex held)
1413 nfs_buf_itercomplete(nfsnode_t np
, struct nfsbuflists
*iterheadp
, int flags
)
1415 struct nfsbuflists
* listheadp
;
1418 if (flags
& NBI_DIRTY
)
1419 listheadp
= &np
->n_dirtyblkhd
;
1421 listheadp
= &np
->n_cleanblkhd
;
1423 while (!LIST_EMPTY(iterheadp
)) {
1424 bp
= LIST_FIRST(iterheadp
);
1425 LIST_REMOVE(bp
, nb_vnbufs
);
1426 LIST_INSERT_HEAD(listheadp
, bp
, nb_vnbufs
);
1429 np
->n_bufiterflags
&= ~NBI_ITER
;
1430 if (np
->n_bufiterflags
& NBI_ITERWANT
) {
1431 np
->n_bufiterflags
&= ~NBI_ITERWANT
;
1432 wakeup(&np
->n_bufiterflags
);
1438 * Read an NFS buffer for a file.
1441 nfs_buf_read(struct nfsbuf
*bp
)
1449 cred
= bp
->nb_rcred
;
1450 if (IS_VALID_CRED(cred
))
1451 kauth_cred_ref(cred
);
1452 thd
= ISSET(bp
->nb_flags
, NB_ASYNC
) ? NULL
: current_thread();
1455 if (!ISSET(bp
->nb_flags
, NB_READ
))
1456 panic("nfs_buf_read: !NB_READ");
1457 if (ISSET(bp
->nb_flags
, NB_DONE
))
1458 CLR(bp
->nb_flags
, NB_DONE
);
1462 OSAddAtomic(1, &nfsstats
.read_bios
);
1464 error
= nfs_buf_read_rpc(bp
, thd
, cred
);
1466 * For async I/O, the callbacks will finish up the
1467 * read. Otherwise, the read has already been finished.
1470 if (IS_VALID_CRED(cred
))
1471 kauth_cred_unref(&cred
);
1476 * finish the reading of a buffer
1479 nfs_buf_read_finish(struct nfsbuf
*bp
)
1481 nfsnode_t np
= bp
->nb_np
;
1482 struct nfsmount
*nmp
;
1484 if (!ISSET(bp
->nb_flags
, NB_ERROR
)) {
1485 /* update valid range */
1486 bp
->nb_validoff
= 0;
1487 bp
->nb_validend
= bp
->nb_endio
;
1488 if (bp
->nb_endio
< (int)bp
->nb_bufsize
) {
1490 * The read may be short because we have unflushed writes
1491 * that are extending the file size and the reads hit the
1492 * (old) EOF on the server. So, just make sure nb_validend
1493 * correctly tracks EOF.
1494 * Note that the missing data should have already been zeroed
1495 * in nfs_buf_read_rpc_finish().
1497 off_t boff
= NBOFF(bp
);
1498 if ((off_t
)np
->n_size
>= (boff
+ bp
->nb_bufsize
))
1499 bp
->nb_validend
= bp
->nb_bufsize
;
1500 else if ((off_t
)np
->n_size
>= boff
)
1501 bp
->nb_validend
= np
->n_size
- boff
;
1503 bp
->nb_validend
= 0;
1505 if ((nmp
= NFSTONMP(np
)) && (nmp
->nm_vers
== NFS_VER2
) &&
1506 ((NBOFF(bp
) + bp
->nb_validend
) > 0x100000000LL
))
1507 bp
->nb_validend
= 0x100000000LL
- NBOFF(bp
);
1508 bp
->nb_valid
= (1 << (round_page_32(bp
->nb_validend
) / PAGE_SIZE
)) - 1;
1509 if (bp
->nb_validend
& PAGE_MASK
) {
1510 /* zero-fill remainder of last page */
1511 bzero(bp
->nb_data
+ bp
->nb_validend
, bp
->nb_bufsize
- bp
->nb_validend
);
1518 * initiate the NFS READ RPC(s) for a buffer
1521 nfs_buf_read_rpc(struct nfsbuf
*bp
, thread_t thd
, kauth_cred_t cred
)
1523 struct nfsmount
*nmp
;
1524 nfsnode_t np
= bp
->nb_np
;
1525 int error
= 0, nfsvers
, async
;
1527 uint32_t nmrsize
, length
, len
;
1530 struct nfsreq_cbinfo cb
;
1534 bp
->nb_error
= error
= ENXIO
;
1535 SET(bp
->nb_flags
, NB_ERROR
);
1539 nfsvers
= nmp
->nm_vers
;
1540 nmrsize
= nmp
->nm_rsize
;
1544 length
= bp
->nb_bufsize
;
1546 if (nfsvers
== NFS_VER2
) {
1547 if (boff
> 0xffffffffLL
) {
1548 bp
->nb_error
= error
= EFBIG
;
1549 SET(bp
->nb_flags
, NB_ERROR
);
1553 if ((boff
+ length
- 1) > 0xffffffffLL
)
1554 length
= 0x100000000LL
- boff
;
1557 /* Note: Can only do async I/O if nfsiods are configured. */
1558 async
= (bp
->nb_flags
& NB_ASYNC
);
1559 cb
.rcb_func
= async
? nfs_buf_read_rpc_finish
: NULL
;
1562 bp
->nb_offio
= bp
->nb_endio
= 0;
1563 bp
->nb_rpcs
= nrpcs
= (length
+ nmrsize
- 1) / nmrsize
;
1564 if (async
&& (nrpcs
> 1)) {
1565 SET(bp
->nb_flags
, NB_MULTASYNCRPC
);
1567 CLR(bp
->nb_flags
, NB_MULTASYNCRPC
);
1570 while (length
> 0) {
1571 if (ISSET(bp
->nb_flags
, NB_ERROR
)) {
1572 error
= bp
->nb_error
;
1575 len
= (length
> nmrsize
) ? nmrsize
: length
;
1576 cb
.rcb_args
[0] = offset
;
1577 cb
.rcb_args
[1] = len
;
1578 if (nmp
->nm_vers
>= NFS_VER4
)
1579 cb
.rcb_args
[2] = nmp
->nm_stategenid
;
1581 error
= nmp
->nm_funcs
->nf_read_rpc_async(np
, boff
+ offset
, len
, thd
, cred
, &cb
, &req
);
1588 nfs_buf_read_rpc_finish(req
);
1589 if (ISSET(bp
->nb_flags
, NB_ERROR
)) {
1590 error
= bp
->nb_error
;
1597 * Something bad happened while trying to send the RPC(s).
1598 * Wait for any outstanding requests to complete.
1600 bp
->nb_error
= error
;
1601 SET(bp
->nb_flags
, NB_ERROR
);
1602 if (ISSET(bp
->nb_flags
, NB_MULTASYNCRPC
)) {
1603 nrpcs
= (length
+ nmrsize
- 1) / nmrsize
;
1604 lck_mtx_lock(nfs_buf_mutex
);
1605 bp
->nb_rpcs
-= nrpcs
;
1606 if (bp
->nb_rpcs
== 0) {
1607 /* No RPCs left, so the buffer's done */
1608 lck_mtx_unlock(nfs_buf_mutex
);
1611 /* wait for the last RPC to mark it done */
1612 while (bp
->nb_rpcs
> 0)
1613 msleep(&bp
->nb_rpcs
, nfs_buf_mutex
, 0,
1614 "nfs_buf_read_rpc_cancel", NULL
);
1615 lck_mtx_unlock(nfs_buf_mutex
);
1626 * finish up an NFS READ RPC on a buffer
1629 nfs_buf_read_rpc_finish(struct nfsreq
*req
)
1631 struct nfsmount
*nmp
;
1633 struct nfsreq_cbinfo cb
;
1635 int error
= 0, nfsvers
, offset
, length
, eof
= 0, multasyncrpc
, finished
;
1636 void *wakeme
= NULL
;
1637 struct nfsreq
*rreq
= NULL
;
1642 char uio_buf
[ UIO_SIZEOF(1) ];
1646 thd
= req
->r_thread
;
1648 if (IS_VALID_CRED(cred
))
1649 kauth_cred_ref(cred
);
1650 cb
= req
->r_callback
;
1655 SET(bp
->nb_flags
, NB_ERROR
);
1656 bp
->nb_error
= error
= ENXIO
;
1658 if (error
|| ISSET(bp
->nb_flags
, NB_ERROR
)) {
1660 nfs_request_async_cancel(req
);
1664 nfsvers
= nmp
->nm_vers
;
1665 offset
= cb
.rcb_args
[0];
1666 rlen
= length
= cb
.rcb_args
[1];
1668 auio
= uio_createwithbuffer(1, NBOFF(bp
) + offset
, UIO_SYSSPACE
,
1669 UIO_READ
, &uio_buf
, sizeof(uio_buf
));
1670 uio_addiov(auio
, CAST_USER_ADDR_T(bp
->nb_data
+ offset
), length
);
1672 /* finish the RPC */
1673 error
= nmp
->nm_funcs
->nf_read_rpc_async_finish(np
, req
, auio
, &rlen
, &eof
);
1674 if ((error
== EINPROGRESS
) && cb
.rcb_func
) {
1675 /* async request restarted */
1676 if (IS_VALID_CRED(cred
))
1677 kauth_cred_unref(&cred
);
1680 if ((nmp
->nm_vers
>= NFS_VER4
) && nfs_mount_state_error_should_restart(error
) && !ISSET(bp
->nb_flags
, NB_ERROR
)) {
1681 lck_mtx_lock(&nmp
->nm_lock
);
1682 if ((error
!= NFSERR_GRACE
) && (cb
.rcb_args
[2] == nmp
->nm_stategenid
) && !(nmp
->nm_state
& NFSSTA_RECOVER
)) {
1683 printf("nfs_buf_read_rpc_finish: error %d, initiating recovery\n", error
);
1684 nmp
->nm_state
|= NFSSTA_RECOVER
;
1685 nfs_mount_sock_thread_wake(nmp
);
1687 lck_mtx_unlock(&nmp
->nm_lock
);
1688 if (error
== NFSERR_GRACE
)
1689 tsleep(&nmp
->nm_state
, (PZERO
-1), "nfsgrace", 2*hz
);
1690 if (!(error
= nfs_mount_state_wait_for_recovery(nmp
))) {
1696 SET(bp
->nb_flags
, NB_ERROR
);
1697 bp
->nb_error
= error
;
1701 if ((rlen
> 0) && (bp
->nb_endio
< (offset
+ (int)rlen
)))
1702 bp
->nb_endio
= offset
+ rlen
;
1704 if ((nfsvers
== NFS_VER2
) || eof
|| (rlen
== 0)) {
1705 /* zero out the remaining data (up to EOF) */
1706 off_t rpcrem
, eofrem
, rem
;
1707 rpcrem
= (length
- rlen
);
1708 eofrem
= np
->n_size
- (NBOFF(bp
) + offset
+ rlen
);
1709 rem
= (rpcrem
< eofrem
) ? rpcrem
: eofrem
;
1711 bzero(bp
->nb_data
+ offset
+ rlen
, rem
);
1712 } else if (((int)rlen
< length
) && !ISSET(bp
->nb_flags
, NB_ERROR
)) {
1716 * We haven't hit EOF and we didn't get all the data
1717 * requested, so we need to issue another read for the rest.
1718 * (Don't bother if the buffer already hit an error.)
1723 cb
.rcb_args
[0] = offset
;
1724 cb
.rcb_args
[1] = length
;
1725 if (nmp
->nm_vers
>= NFS_VER4
)
1726 cb
.rcb_args
[2] = nmp
->nm_stategenid
;
1727 error
= nmp
->nm_funcs
->nf_read_rpc_async(np
, NBOFF(bp
) + offset
, length
, thd
, cred
, &cb
, &rreq
);
1729 if (IS_VALID_CRED(cred
))
1730 kauth_cred_unref(&cred
);
1732 /* if !async we'll need to wait for this RPC to finish */
1739 * Outstanding RPC count is unchanged.
1740 * Callback will be called when RPC is done.
1744 SET(bp
->nb_flags
, NB_ERROR
);
1745 bp
->nb_error
= error
;
1749 if (IS_VALID_CRED(cred
))
1750 kauth_cred_unref(&cred
);
1753 * Decrement outstanding RPC count on buffer
1754 * and call nfs_buf_read_finish on last RPC.
1756 * (Note: when there are multiple async RPCs issued for a
1757 * buffer we need nfs_buffer_mutex to avoid problems when
1758 * aborting a partially-initiated set of RPCs)
1761 multasyncrpc
= ISSET(bp
->nb_flags
, NB_MULTASYNCRPC
);
1763 lck_mtx_lock(nfs_buf_mutex
);
1766 finished
= (bp
->nb_rpcs
== 0);
1769 lck_mtx_unlock(nfs_buf_mutex
);
1773 wakeme
= &bp
->nb_rpcs
;
1774 nfs_buf_read_finish(bp
);
1781 * Do buffer readahead.
1782 * Initiate async I/O to read buffers not in cache.
1785 nfs_buf_readahead(nfsnode_t np
, int ioflag
, daddr64_t
*rabnp
, daddr64_t lastrabn
, thread_t thd
, kauth_cred_t cred
)
1787 struct nfsmount
*nmp
= NFSTONMP(np
);
1793 if (nmp
->nm_readahead
<= 0)
1795 if (*rabnp
> lastrabn
)
1798 for (nra
= 0; (nra
< nmp
->nm_readahead
) && (*rabnp
<= lastrabn
); nra
++, *rabnp
= *rabnp
+ 1) {
1799 /* check if block exists and is valid. */
1800 if ((*rabnp
* nmp
->nm_biosize
) >= (off_t
)np
->n_size
) {
1801 /* stop reading ahead if we're beyond EOF */
1805 error
= nfs_buf_get(np
, *rabnp
, nmp
->nm_biosize
, thd
, NBLK_READ
|NBLK_NOWAIT
, &bp
);
1808 nfs_node_lock_force(np
);
1809 np
->n_lastrahead
= *rabnp
;
1810 nfs_node_unlock(np
);
1813 if ((ioflag
& IO_NOCACHE
) && ISSET(bp
->nb_flags
, NB_CACHE
) &&
1814 !bp
->nb_dirty
&& !ISSET(bp
->nb_flags
, (NB_DELWRI
|NB_NCRDAHEAD
))) {
1815 CLR(bp
->nb_flags
, NB_CACHE
);
1817 bp
->nb_validoff
= bp
->nb_validend
= -1;
1819 if ((bp
->nb_dirtyend
<= 0) && !bp
->nb_dirty
&&
1820 !ISSET(bp
->nb_flags
, (NB_CACHE
|NB_DELWRI
))) {
1821 SET(bp
->nb_flags
, (NB_READ
|NB_ASYNC
));
1822 if (ioflag
& IO_NOCACHE
)
1823 SET(bp
->nb_flags
, NB_NCRDAHEAD
);
1824 if (!IS_VALID_CRED(bp
->nb_rcred
) && IS_VALID_CRED(cred
)) {
1825 kauth_cred_ref(cred
);
1826 bp
->nb_rcred
= cred
;
1828 if ((error
= nfs_buf_read(bp
)))
1832 nfs_buf_release(bp
, 1);
1838 * NFS buffer I/O for reading files.
1841 nfs_bioread(nfsnode_t np
, uio_t uio
, int ioflag
, vfs_context_t ctx
)
1843 vnode_t vp
= NFSTOV(np
);
1844 struct nfsbuf
*bp
= NULL
;
1845 struct nfs_vattr nvattr
;
1846 struct nfsmount
*nmp
= VTONMP(vp
);
1847 daddr64_t lbn
, rabn
= 0, lastrabn
, maxrabn
= -1;
1849 int error
= 0, n
= 0, on
= 0;
1850 int nfsvers
, biosize
, modified
, readaheads
= 0;
1855 FSDBG_TOP(514, np
, uio_offset(uio
), uio_resid(uio
), ioflag
);
1857 nfsvers
= nmp
->nm_vers
;
1858 biosize
= nmp
->nm_biosize
;
1859 thd
= vfs_context_thread(ctx
);
1860 cred
= vfs_context_ucred(ctx
);
1862 if (vnode_vtype(vp
) != VREG
) {
1863 printf("nfs_bioread: type %x unexpected\n", vnode_vtype(vp
));
1864 FSDBG_BOT(514, np
, 0xd1e0016, 0, EINVAL
);
1869 * For NFS, cache consistency can only be maintained approximately.
1870 * Although RFC1094 does not specify the criteria, the following is
1871 * believed to be compatible with the reference port.
1873 * If the file has changed since the last read RPC or you have
1874 * written to the file, you may have lost data cache consistency
1875 * with the server. So, check for a change, and flush all of the
1876 * file's data out of the cache.
1877 * NB: This implies that cache data can be read when up to
1878 * NFS_MAXATTRTIMO seconds out of date. If you find that you
1879 * need current attributes, nfs_getattr() can be forced to fetch
1880 * new attributes (via NATTRINVALIDATE() or NGA_UNCACHED).
1883 if (ISSET(np
->n_flag
, NUPDATESIZE
))
1884 nfs_data_update_size(np
, 0);
1886 if ((error
= nfs_node_lock(np
))) {
1887 FSDBG_BOT(514, np
, 0xd1e0222, 0, error
);
1891 if (np
->n_flag
& NNEEDINVALIDATE
) {
1892 np
->n_flag
&= ~NNEEDINVALIDATE
;
1893 nfs_node_unlock(np
);
1894 error
= nfs_vinvalbuf(vp
, V_SAVE
|V_IGNORE_WRITEERR
, ctx
, 1);
1896 error
= nfs_node_lock(np
);
1898 FSDBG_BOT(514, np
, 0xd1e0322, 0, error
);
1903 modified
= (np
->n_flag
& NMODIFIED
);
1904 nfs_node_unlock(np
);
1905 /* nfs_getattr() will check changed and purge caches */
1906 error
= nfs_getattr(np
, &nvattr
, ctx
, modified
? NGA_UNCACHED
: NGA_CACHED
);
1908 FSDBG_BOT(514, np
, 0xd1e0004, 0, error
);
1912 if (uio_resid(uio
) == 0) {
1913 FSDBG_BOT(514, np
, 0xd1e0001, 0, 0);
1916 if (uio_offset(uio
) < 0) {
1917 FSDBG_BOT(514, np
, 0xd1e0002, 0, EINVAL
);
1922 * set up readahead - which may be limited by:
1923 * + current request length (for IO_NOCACHE)
1924 * + readahead setting
1927 if (nmp
->nm_readahead
> 0) {
1928 off_t end
= uio_offset(uio
) + uio_resid(uio
);
1929 if (end
> (off_t
)np
->n_size
)
1931 rabn
= uio_offset(uio
) / biosize
;
1932 maxrabn
= (end
- 1) / biosize
;
1933 nfs_node_lock_force(np
);
1934 if (!(ioflag
& IO_NOCACHE
) &&
1935 (!rabn
|| (rabn
== np
->n_lastread
) || (rabn
== (np
->n_lastread
+1)))) {
1936 maxrabn
+= nmp
->nm_readahead
;
1937 if ((maxrabn
* biosize
) >= (off_t
)np
->n_size
)
1938 maxrabn
= ((off_t
)np
->n_size
- 1)/biosize
;
1940 if (maxrabn
< np
->n_lastrahead
)
1941 np
->n_lastrahead
= -1;
1942 if (rabn
< np
->n_lastrahead
)
1943 rabn
= np
->n_lastrahead
+ 1;
1944 nfs_node_unlock(np
);
1951 nfs_data_lock(np
, NFS_DATA_LOCK_SHARED
);
1952 lbn
= uio_offset(uio
) / biosize
;
1955 * Copy directly from any cached pages without grabbing the bufs.
1956 * (If we are NOCACHE and we've issued readahead requests, we need
1957 * to grab the NB_NCRDAHEAD bufs to drop them.)
1959 if ((!(ioflag
& IO_NOCACHE
) || !readaheads
) &&
1960 ((uio
->uio_segflg
== UIO_USERSPACE32
||
1961 uio
->uio_segflg
== UIO_USERSPACE64
||
1962 uio
->uio_segflg
== UIO_USERSPACE
))) {
1963 io_resid
= uio_resid(uio
);
1964 diff
= np
->n_size
- uio_offset(uio
);
1965 if (diff
< io_resid
)
1968 int count
= (io_resid
> INT_MAX
) ? INT_MAX
: io_resid
;
1969 error
= cluster_copy_ubc_data(vp
, uio
, &count
, 0);
1971 nfs_data_unlock(np
);
1972 FSDBG_BOT(514, np
, uio_offset(uio
), 0xcacefeed, error
);
1976 /* count any biocache reads that we just copied directly */
1977 if (lbn
!= (uio_offset(uio
)/biosize
)) {
1978 OSAddAtomic((uio_offset(uio
)/biosize
) - lbn
, &nfsstats
.biocache_reads
);
1979 FSDBG(514, np
, 0xcacefeed, uio_offset(uio
), error
);
1983 lbn
= uio_offset(uio
) / biosize
;
1984 on
= uio_offset(uio
) % biosize
;
1985 nfs_node_lock_force(np
);
1986 np
->n_lastread
= (uio_offset(uio
) - 1) / biosize
;
1987 nfs_node_unlock(np
);
1989 /* adjust readahead block number, if necessary */
1992 lastrabn
= MIN(maxrabn
, lbn
+ nmp
->nm_readahead
);
1993 if (rabn
<= lastrabn
) { /* start readaheads */
1994 error
= nfs_buf_readahead(np
, ioflag
, &rabn
, lastrabn
, thd
, cred
);
1996 nfs_data_unlock(np
);
1997 FSDBG_BOT(514, np
, 0xd1e000b, 1, error
);
2003 if ((uio_resid(uio
) <= 0) || (uio_offset(uio
) >= (off_t
)np
->n_size
)) {
2004 nfs_data_unlock(np
);
2005 FSDBG_BOT(514, np
, uio_offset(uio
), uio_resid(uio
), 0xaaaaaaaa);
2009 OSAddAtomic(1, &nfsstats
.biocache_reads
);
2012 * If the block is in the cache and has the required data
2013 * in a valid region, just copy it out.
2014 * Otherwise, get the block and write back/read in,
2018 io_resid
= uio_resid(uio
);
2019 n
= (io_resid
> (biosize
- on
)) ? (biosize
- on
) : io_resid
;
2020 diff
= np
->n_size
- uio_offset(uio
);
2024 error
= nfs_buf_get(np
, lbn
, biosize
, thd
, NBLK_READ
, &bp
);
2026 nfs_data_unlock(np
);
2027 FSDBG_BOT(514, np
, 0xd1e000c, 0, error
);
2031 if ((ioflag
& IO_NOCACHE
) && ISSET(bp
->nb_flags
, NB_CACHE
)) {
2033 * IO_NOCACHE found a cached buffer.
2034 * Flush the buffer if it's dirty.
2035 * Invalidate the data if it wasn't just read
2036 * in as part of a "nocache readahead".
2038 if (bp
->nb_dirty
|| (bp
->nb_dirtyend
> 0)) {
2039 /* so write the buffer out and try again */
2040 SET(bp
->nb_flags
, NB_NOCACHE
);
2043 if (ISSET(bp
->nb_flags
, NB_NCRDAHEAD
)) {
2044 CLR(bp
->nb_flags
, NB_NCRDAHEAD
);
2045 SET(bp
->nb_flags
, NB_NOCACHE
);
2049 /* if any pages are valid... */
2051 /* ...check for any invalid pages in the read range */
2052 int pg
, firstpg
, lastpg
, dirtypg
;
2053 dirtypg
= firstpg
= lastpg
= -1;
2055 while (pg
<= (on
+ n
- 1)/PAGE_SIZE
) {
2056 if (!NBPGVALID(bp
,pg
)) {
2060 } else if (firstpg
>= 0 && dirtypg
< 0 && NBPGDIRTY(bp
,pg
))
2065 /* if there are no invalid pages, we're all set */
2067 if (bp
->nb_validoff
< 0) {
2068 /* valid range isn't set up, so */
2069 /* set it to what we know is valid */
2070 bp
->nb_validoff
= trunc_page(on
);
2071 bp
->nb_validend
= round_page(on
+n
);
2072 nfs_buf_normalize_valid_range(np
, bp
);
2077 /* there are invalid pages in the read range */
2078 if (((dirtypg
> firstpg
) && (dirtypg
< lastpg
)) ||
2079 (((firstpg
*PAGE_SIZE
) < bp
->nb_dirtyend
) && (((lastpg
+1)*PAGE_SIZE
) > bp
->nb_dirtyoff
))) {
2080 /* there are also dirty page(s) (or range) in the read range, */
2081 /* so write the buffer out and try again */
2083 CLR(bp
->nb_flags
, (NB_DONE
| NB_ERROR
| NB_INVAL
));
2084 SET(bp
->nb_flags
, NB_ASYNC
);
2085 if (!IS_VALID_CRED(bp
->nb_wcred
)) {
2086 kauth_cred_ref(cred
);
2087 bp
->nb_wcred
= cred
;
2089 error
= nfs_buf_write(bp
);
2091 nfs_data_unlock(np
);
2092 FSDBG_BOT(514, np
, 0xd1e000d, 0, error
);
2097 if (!bp
->nb_dirty
&& bp
->nb_dirtyend
<= 0 &&
2098 (lastpg
- firstpg
+ 1) > (biosize
/PAGE_SIZE
)/2) {
2099 /* we need to read in more than half the buffer and the */
2100 /* buffer's not dirty, so just fetch the whole buffer */
2103 /* read the page range in */
2105 char uio_buf
[ UIO_SIZEOF(1) ];
2108 auio
= uio_createwithbuffer(1, (NBOFF(bp
) + firstpg
* PAGE_SIZE_64
),
2109 UIO_SYSSPACE
, UIO_READ
, &uio_buf
[0], sizeof(uio_buf
));
2113 uio_addiov(auio
, CAST_USER_ADDR_T(bp
->nb_data
+ (firstpg
* PAGE_SIZE
)),
2114 ((lastpg
- firstpg
+ 1) * PAGE_SIZE
));
2115 error
= nfs_read_rpc(np
, auio
, ctx
);
2118 if (ioflag
& IO_NOCACHE
)
2119 SET(bp
->nb_flags
, NB_NOCACHE
);
2120 nfs_buf_release(bp
, 1);
2121 nfs_data_unlock(np
);
2122 FSDBG_BOT(514, np
, 0xd1e000e, 0, error
);
2125 /* Make sure that the valid range is set to cover this read. */
2126 bp
->nb_validoff
= trunc_page_32(on
);
2127 bp
->nb_validend
= round_page_32(on
+n
);
2128 nfs_buf_normalize_valid_range(np
, bp
);
2129 if (uio_resid(auio
) > 0) {
2130 /* if short read, must have hit EOF, */
2131 /* so zero the rest of the range */
2132 bzero(CAST_DOWN(caddr_t
, uio_curriovbase(auio
)), uio_resid(auio
));
2134 /* mark the pages (successfully read) as valid */
2135 for (pg
=firstpg
; pg
<= lastpg
; pg
++)
2136 NBPGVALID_SET(bp
,pg
);
2139 /* if no pages are valid, read the whole block */
2140 if (!bp
->nb_valid
) {
2141 if (!IS_VALID_CRED(bp
->nb_rcred
) && IS_VALID_CRED(cred
)) {
2142 kauth_cred_ref(cred
);
2143 bp
->nb_rcred
= cred
;
2145 SET(bp
->nb_flags
, NB_READ
);
2146 CLR(bp
->nb_flags
, (NB_DONE
| NB_ERROR
| NB_INVAL
));
2147 error
= nfs_buf_read(bp
);
2148 if (ioflag
& IO_NOCACHE
)
2149 SET(bp
->nb_flags
, NB_NOCACHE
);
2151 nfs_data_unlock(np
);
2152 nfs_buf_release(bp
, 1);
2153 FSDBG_BOT(514, np
, 0xd1e000f, 0, error
);
2158 /* validate read range against valid range and clip */
2159 if (bp
->nb_validend
> 0) {
2160 diff
= (on
>= bp
->nb_validend
) ? 0 : (bp
->nb_validend
- on
);
2166 error
= uiomove(bp
->nb_data
+ on
, n
, uio
);
2169 nfs_buf_release(bp
, 1);
2170 nfs_data_unlock(np
);
2171 nfs_node_lock_force(np
);
2172 np
->n_lastread
= (uio_offset(uio
) - 1) / biosize
;
2173 nfs_node_unlock(np
);
2174 } while (error
== 0 && uio_resid(uio
) > 0 && n
> 0);
2175 FSDBG_BOT(514, np
, uio_offset(uio
), uio_resid(uio
), error
);
2180 * limit the number of outstanding async I/O writes
2183 nfs_async_write_start(struct nfsmount
*nmp
)
2185 int error
= 0, slpflag
= (nmp
->nm_flag
& NFSMNT_INT
) ? PCATCH
: 0;
2186 struct timespec ts
= {1, 0};
2188 if (nfs_max_async_writes
<= 0)
2190 lck_mtx_lock(&nmp
->nm_lock
);
2191 while ((nfs_max_async_writes
> 0) && (nmp
->nm_asyncwrites
>= nfs_max_async_writes
)) {
2192 if ((error
= nfs_sigintr(nmp
, NULL
, current_thread(), 1)))
2194 msleep(&nmp
->nm_asyncwrites
, &nmp
->nm_lock
, slpflag
|(PZERO
-1), "nfsasyncwrites", &ts
);
2198 nmp
->nm_asyncwrites
++;
2199 lck_mtx_unlock(&nmp
->nm_lock
);
2203 nfs_async_write_done(struct nfsmount
*nmp
)
2205 if (nmp
->nm_asyncwrites
<= 0)
2207 lck_mtx_lock(&nmp
->nm_lock
);
2208 if (nmp
->nm_asyncwrites
-- >= nfs_max_async_writes
)
2209 wakeup(&nmp
->nm_asyncwrites
);
2210 lck_mtx_unlock(&nmp
->nm_lock
);
2214 * write (or commit) the given NFS buffer
2216 * Commit the buffer if we can.
2217 * Write out any dirty range.
2218 * If any dirty pages remain, write them out.
2221 * For async requests, all the work beyond sending the initial
2222 * write RPC is handled in the RPC callback(s).
2225 nfs_buf_write(struct nfsbuf
*bp
)
2227 int error
= 0, oldflags
, async
;
2231 proc_t p
= current_proc();
2232 int iomode
, doff
, dend
, firstpg
, lastpg
;
2235 FSDBG_TOP(553, bp
, NBOFF(bp
), bp
->nb_flags
, 0);
2237 if (!ISSET(bp
->nb_lflags
, NBL_BUSY
))
2238 panic("nfs_buf_write: buffer is not busy???");
2241 async
= ISSET(bp
->nb_flags
, NB_ASYNC
);
2242 oldflags
= bp
->nb_flags
;
2244 CLR(bp
->nb_flags
, (NB_READ
|NB_DONE
|NB_ERROR
|NB_DELWRI
));
2245 if (ISSET(oldflags
, NB_DELWRI
)) {
2246 lck_mtx_lock(nfs_buf_mutex
);
2249 lck_mtx_unlock(nfs_buf_mutex
);
2250 wakeup(&nfs_nbdwrite
);
2253 /* move to clean list */
2254 if (ISSET(oldflags
, (NB_ASYNC
|NB_DELWRI
))) {
2255 lck_mtx_lock(nfs_buf_mutex
);
2256 if (bp
->nb_vnbufs
.le_next
!= NFSNOLIST
)
2257 LIST_REMOVE(bp
, nb_vnbufs
);
2258 LIST_INSERT_HEAD(&np
->n_cleanblkhd
, bp
, nb_vnbufs
);
2259 lck_mtx_unlock(nfs_buf_mutex
);
2261 nfs_node_lock_force(np
);
2263 nfs_node_unlock(np
);
2264 vnode_startwrite(NFSTOV(np
));
2266 if (p
&& p
->p_stats
)
2267 OSIncrementAtomicLong(&p
->p_stats
->p_ru
.ru_oublock
);
2269 cred
= bp
->nb_wcred
;
2270 if (!IS_VALID_CRED(cred
) && ISSET(bp
->nb_flags
, NB_READ
))
2271 cred
= bp
->nb_rcred
; /* shouldn't really happen, but... */
2272 if (IS_VALID_CRED(cred
))
2273 kauth_cred_ref(cred
);
2274 thd
= async
? NULL
: current_thread();
2276 /* We need to make sure the pages are locked before doing I/O. */
2277 if (!ISSET(bp
->nb_flags
, NB_META
) && UBCINFOEXISTS(NFSTOV(np
))) {
2278 if (!ISSET(bp
->nb_flags
, NB_PAGELIST
)) {
2279 error
= nfs_buf_upl_setup(bp
);
2281 printf("nfs_buf_write: upl create failed %d\n", error
);
2282 SET(bp
->nb_flags
, NB_ERROR
);
2283 bp
->nb_error
= error
= EIO
;
2287 nfs_buf_upl_check(bp
);
2291 /* If NB_NEEDCOMMIT is set, a commit RPC may do the trick. */
2292 if (ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
))
2293 nfs_buf_check_write_verifier(np
, bp
);
2294 if (ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
)) {
2295 struct nfsmount
*nmp
= NFSTONMP(np
);
2297 SET(bp
->nb_flags
, NB_ERROR
);
2298 bp
->nb_error
= error
= EIO
;
2302 SET(bp
->nb_flags
, NB_WRITEINPROG
);
2303 error
= nmp
->nm_funcs
->nf_commit_rpc(np
, NBOFF(bp
) + bp
->nb_dirtyoff
,
2304 bp
->nb_dirtyend
- bp
->nb_dirtyoff
, bp
->nb_wcred
);
2305 CLR(bp
->nb_flags
, NB_WRITEINPROG
);
2307 if (error
!= NFSERR_STALEWRITEVERF
) {
2308 SET(bp
->nb_flags
, NB_ERROR
);
2309 bp
->nb_error
= error
;
2314 bp
->nb_dirtyoff
= bp
->nb_dirtyend
= 0;
2315 CLR(bp
->nb_flags
, NB_NEEDCOMMIT
);
2316 nfs_node_lock_force(np
);
2317 np
->n_needcommitcnt
--;
2318 CHECK_NEEDCOMMITCNT(np
);
2319 nfs_node_unlock(np
);
2321 if (!error
&& (bp
->nb_dirtyend
> 0)) {
2322 /* sanity check the dirty range */
2323 if (NBOFF(bp
) + bp
->nb_dirtyend
> (off_t
) np
->n_size
) {
2324 bp
->nb_dirtyend
= np
->n_size
- NBOFF(bp
);
2325 if (bp
->nb_dirtyoff
>= bp
->nb_dirtyend
)
2326 bp
->nb_dirtyoff
= bp
->nb_dirtyend
= 0;
2329 if (!error
&& (bp
->nb_dirtyend
> 0)) {
2330 /* there's a dirty range that needs to be written out */
2333 doff
= bp
->nb_dirtyoff
;
2334 dend
= bp
->nb_dirtyend
;
2336 /* if doff page is dirty, move doff to start of page */
2337 if (NBPGDIRTY(bp
, doff
/ PAGE_SIZE
))
2338 doff
-= doff
& PAGE_MASK
;
2339 /* try to expand write range to include preceding dirty pages */
2340 if (!(doff
& PAGE_MASK
))
2341 while ((doff
> 0) && NBPGDIRTY(bp
, (doff
- 1) / PAGE_SIZE
))
2343 /* if dend page is dirty, move dend to start of next page */
2344 if ((dend
& PAGE_MASK
) && NBPGDIRTY(bp
, dend
/ PAGE_SIZE
))
2345 dend
= round_page_32(dend
);
2346 /* try to expand write range to include trailing dirty pages */
2347 if (!(dend
& PAGE_MASK
))
2348 while ((dend
< (int)bp
->nb_bufsize
) && NBPGDIRTY(bp
, dend
/ PAGE_SIZE
))
2350 /* make sure to keep dend clipped to EOF */
2351 if ((NBOFF(bp
) + dend
) > (off_t
) np
->n_size
)
2352 dend
= np
->n_size
- NBOFF(bp
);
2353 /* calculate range of complete pages being written */
2354 firstpg
= round_page_32(doff
) / PAGE_SIZE
;
2355 lastpg
= (trunc_page_32(dend
) - 1) / PAGE_SIZE
;
2356 /* calculate mask for that page range */
2357 pagemask
= ((1 << (lastpg
+ 1)) - 1) & ~((1 << firstpg
) - 1);
2360 * compare page mask to nb_dirty; if there are other dirty pages
2361 * then write FILESYNC; otherwise, write UNSTABLE if async and
2362 * not needcommit/stable; otherwise write FILESYNC
2364 if (bp
->nb_dirty
& ~pagemask
)
2365 iomode
= NFS_WRITE_FILESYNC
;
2366 else if ((bp
->nb_flags
& (NB_ASYNC
| NB_NEEDCOMMIT
| NB_STABLE
)) == NB_ASYNC
)
2367 iomode
= NFS_WRITE_UNSTABLE
;
2369 iomode
= NFS_WRITE_FILESYNC
;
2371 /* write the whole contiguous dirty range */
2372 bp
->nb_offio
= doff
;
2373 bp
->nb_endio
= dend
;
2375 OSAddAtomic(1, &nfsstats
.write_bios
);
2377 SET(bp
->nb_flags
, NB_WRITEINPROG
);
2378 error
= nfs_buf_write_rpc(bp
, iomode
, thd
, cred
);
2380 * For async I/O, the callbacks will finish up the
2381 * write and push out any dirty pages. Otherwise,
2382 * the write has already been finished and any dirty
2386 if (!error
&& bp
->nb_dirty
) /* write out any dirty pages */
2387 error
= nfs_buf_write_dirty_pages(bp
, thd
, cred
);
2390 /* note: bp is still valid only for !async case */
2393 error
= nfs_buf_iowait(bp
);
2394 /* move to clean list */
2395 if (oldflags
& NB_DELWRI
) {
2396 lck_mtx_lock(nfs_buf_mutex
);
2397 if (bp
->nb_vnbufs
.le_next
!= NFSNOLIST
)
2398 LIST_REMOVE(bp
, nb_vnbufs
);
2399 LIST_INSERT_HEAD(&np
->n_cleanblkhd
, bp
, nb_vnbufs
);
2400 lck_mtx_unlock(nfs_buf_mutex
);
2402 FSDBG_BOT(553, bp
, NBOFF(bp
), bp
->nb_flags
, error
);
2403 nfs_buf_release(bp
, 1);
2404 /* check if we need to invalidate (and we can) */
2405 if ((np
->n_flag
& NNEEDINVALIDATE
) &&
2406 !(np
->n_bflag
& (NBINVALINPROG
|NBFLUSHINPROG
))) {
2408 nfs_node_lock_force(np
);
2409 if (np
->n_flag
& NNEEDINVALIDATE
) {
2411 np
->n_flag
&= ~NNEEDINVALIDATE
;
2413 nfs_node_unlock(np
);
2416 * There was a write error and we need to
2417 * invalidate attrs and flush buffers in
2418 * order to sync up with the server.
2419 * (if this write was extending the file,
2420 * we may no longer know the correct size)
2422 * But we couldn't call vinvalbuf while holding
2423 * the buffer busy. So we call vinvalbuf() after
2424 * releasing the buffer.
2426 nfs_vinvalbuf2(NFSTOV(np
), V_SAVE
|V_IGNORE_WRITEERR
, thd
, cred
, 1);
2431 if (IS_VALID_CRED(cred
))
2432 kauth_cred_unref(&cred
);
2437 * finish the writing of a buffer
2440 nfs_buf_write_finish(struct nfsbuf
*bp
, thread_t thd
, kauth_cred_t cred
)
2442 nfsnode_t np
= bp
->nb_np
;
2443 int error
= (bp
->nb_flags
& NB_ERROR
) ? bp
->nb_error
: 0;
2444 int firstpg
, lastpg
;
2447 if ((error
== EINTR
) || (error
== ERESTART
)) {
2448 CLR(bp
->nb_flags
, NB_ERROR
);
2449 SET(bp
->nb_flags
, NB_EINTR
);
2453 /* calculate range of complete pages being written */
2454 firstpg
= round_page_32(bp
->nb_offio
) / PAGE_SIZE
;
2455 lastpg
= (trunc_page_32(bp
->nb_endio
) - 1) / PAGE_SIZE
;
2456 /* calculate mask for that page range written */
2457 pagemask
= ((1 << (lastpg
+ 1)) - 1) & ~((1 << firstpg
) - 1);
2458 /* clear dirty bits for pages we've written */
2459 bp
->nb_dirty
&= ~pagemask
;
2462 /* manage needcommit state */
2463 if (!error
&& (bp
->nb_commitlevel
== NFS_WRITE_UNSTABLE
)) {
2464 if (!ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
)) {
2465 nfs_node_lock_force(np
);
2466 np
->n_needcommitcnt
++;
2467 nfs_node_unlock(np
);
2468 SET(bp
->nb_flags
, NB_NEEDCOMMIT
);
2470 /* make sure nb_dirtyoff/nb_dirtyend reflect actual range written */
2471 bp
->nb_dirtyoff
= bp
->nb_offio
;
2472 bp
->nb_dirtyend
= bp
->nb_endio
;
2473 } else if (ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
)) {
2474 nfs_node_lock_force(np
);
2475 np
->n_needcommitcnt
--;
2476 CHECK_NEEDCOMMITCNT(np
);
2477 nfs_node_unlock(np
);
2478 CLR(bp
->nb_flags
, NB_NEEDCOMMIT
);
2481 CLR(bp
->nb_flags
, NB_WRITEINPROG
);
2484 * For an unstable write, the buffer is still treated as dirty until
2485 * a commit (or stable (re)write) is performed. Buffers needing only
2486 * a commit are marked with the NB_DELWRI and NB_NEEDCOMMIT flags.
2488 * If the write was interrupted we set NB_EINTR. Don't set NB_ERROR
2489 * because that would cause the buffer to be dropped. The buffer is
2490 * still valid and simply needs to be written again.
2492 if ((error
== EINTR
) || (error
== ERESTART
) || (!error
&& (bp
->nb_flags
& NB_NEEDCOMMIT
))) {
2493 CLR(bp
->nb_flags
, NB_INVAL
);
2494 if (!ISSET(bp
->nb_flags
, NB_DELWRI
)) {
2495 SET(bp
->nb_flags
, NB_DELWRI
);
2496 lck_mtx_lock(nfs_buf_mutex
);
2499 lck_mtx_unlock(nfs_buf_mutex
);
2502 * Since for the NB_ASYNC case, we've reassigned the buffer to the
2503 * clean list, we have to reassign it back to the dirty one. Ugh.
2505 if (ISSET(bp
->nb_flags
, NB_ASYNC
)) {
2506 /* move to dirty list */
2507 lck_mtx_lock(nfs_buf_mutex
);
2508 if (bp
->nb_vnbufs
.le_next
!= NFSNOLIST
)
2509 LIST_REMOVE(bp
, nb_vnbufs
);
2510 LIST_INSERT_HEAD(&np
->n_dirtyblkhd
, bp
, nb_vnbufs
);
2511 lck_mtx_unlock(nfs_buf_mutex
);
2514 /* either there's an error or we don't need to commit */
2517 * There was a write error and we need to invalidate
2518 * attrs and flush buffers in order to sync up with the
2519 * server. (if this write was extending the file, we
2520 * may no longer know the correct size)
2522 * But we can't call vinvalbuf while holding this
2523 * buffer busy. Set a flag to do it after releasing
2526 nfs_node_lock_force(np
);
2527 np
->n_error
= error
;
2528 np
->n_flag
|= (NWRITEERR
| NNEEDINVALIDATE
);
2529 NATTRINVALIDATE(np
);
2530 nfs_node_unlock(np
);
2532 /* clear the dirty range */
2533 bp
->nb_dirtyoff
= bp
->nb_dirtyend
= 0;
2536 if (!error
&& bp
->nb_dirty
)
2537 nfs_buf_write_dirty_pages(bp
, thd
, cred
);
2542 * write out any pages marked dirty in a buffer
2544 * We do use unstable writes and follow up with a commit.
2545 * If we catch the write verifier changing we'll restart
2546 * do the writes filesync.
2549 nfs_buf_write_dirty_pages(struct nfsbuf
*bp
, thread_t thd
, kauth_cred_t cred
)
2551 nfsnode_t np
= bp
->nb_np
;
2552 struct nfsmount
*nmp
= NFSTONMP(np
);
2553 int error
= 0, commit
, iomode
, iomode2
, len
, pg
, count
, npages
, off
;
2554 uint32_t dirty
= bp
->nb_dirty
;
2557 char uio_buf
[ UIO_SIZEOF(1) ];
2562 /* there are pages marked dirty that need to be written out */
2563 OSAddAtomic(1, &nfsstats
.write_bios
);
2565 SET(bp
->nb_flags
, NB_WRITEINPROG
);
2566 npages
= bp
->nb_bufsize
/ PAGE_SIZE
;
2567 iomode
= NFS_WRITE_UNSTABLE
;
2569 auio
= uio_createwithbuffer(1, 0, UIO_SYSSPACE
, UIO_WRITE
,
2570 &uio_buf
, sizeof(uio_buf
));
2573 dirty
= bp
->nb_dirty
;
2574 wverf
= bp
->nb_verf
;
2575 commit
= NFS_WRITE_FILESYNC
;
2576 for (pg
= 0; pg
< npages
; pg
++) {
2577 if (!NBPGDIRTY(bp
, pg
))
2580 while (((pg
+ count
) < npages
) && NBPGDIRTY(bp
, pg
+ count
))
2582 /* write count pages starting with page pg */
2583 off
= pg
* PAGE_SIZE
;
2584 len
= count
* PAGE_SIZE
;
2585 /* clip writes to EOF */
2586 if (NBOFF(bp
) + off
+ len
> (off_t
) np
->n_size
)
2587 len
-= (NBOFF(bp
) + off
+ len
) - np
->n_size
;
2590 uio_reset(auio
, NBOFF(bp
) + off
, UIO_SYSSPACE
, UIO_WRITE
);
2591 uio_addiov(auio
, CAST_USER_ADDR_T(bp
->nb_data
+ off
), len
);
2592 error
= nfs_write_rpc2(np
, auio
, thd
, cred
, &iomode2
, &bp
->nb_verf
);
2595 if (iomode2
< commit
) /* Retain the lowest commitment level returned. */
2597 if ((commit
!= NFS_WRITE_FILESYNC
) && (wverf
!= bp
->nb_verf
)) {
2598 /* verifier changed, redo all the writes filesync */
2599 iomode
= NFS_WRITE_FILESYNC
;
2603 /* clear dirty bits */
2605 dirty
&= ~(1 << pg
);
2606 if (count
) /* leave pg on last page */
2610 CLR(bp
->nb_flags
, NB_WRITEINPROG
);
2612 if (!error
&& (commit
!= NFS_WRITE_FILESYNC
)) {
2613 error
= nmp
->nm_funcs
->nf_commit_rpc(np
, NBOFF(bp
), bp
->nb_bufsize
, cred
);
2614 if (error
== NFSERR_STALEWRITEVERF
) {
2615 /* verifier changed, so we need to restart all the writes */
2616 iomode
= NFS_WRITE_FILESYNC
;
2621 bp
->nb_dirty
= dirty
;
2623 SET(bp
->nb_flags
, NB_ERROR
);
2624 bp
->nb_error
= error
;
2630 * initiate the NFS WRITE RPC(s) for a buffer
2633 nfs_buf_write_rpc(struct nfsbuf
*bp
, int iomode
, thread_t thd
, kauth_cred_t cred
)
2635 struct nfsmount
*nmp
;
2636 nfsnode_t np
= bp
->nb_np
;
2637 int error
= 0, nfsvers
, async
;
2639 uint32_t nmwsize
, length
, len
;
2641 struct nfsreq_cbinfo cb
;
2643 char uio_buf
[ UIO_SIZEOF(1) ];
2647 bp
->nb_error
= error
= ENXIO
;
2648 SET(bp
->nb_flags
, NB_ERROR
);
2652 nfsvers
= nmp
->nm_vers
;
2653 nmwsize
= nmp
->nm_wsize
;
2655 offset
= bp
->nb_offio
;
2656 length
= bp
->nb_endio
- bp
->nb_offio
;
2658 /* Note: Can only do async I/O if nfsiods are configured. */
2659 async
= (bp
->nb_flags
& NB_ASYNC
) && (NFSIOD_MAX
> 0);
2660 bp
->nb_commitlevel
= NFS_WRITE_FILESYNC
;
2661 cb
.rcb_func
= async
? nfs_buf_write_rpc_finish
: NULL
;
2664 if ((nfsvers
== NFS_VER2
) && ((NBOFF(bp
) + bp
->nb_endio
) > 0xffffffffLL
)) {
2665 bp
->nb_error
= error
= EFBIG
;
2666 SET(bp
->nb_flags
, NB_ERROR
);
2671 auio
= uio_createwithbuffer(1, NBOFF(bp
) + offset
, UIO_SYSSPACE
,
2672 UIO_WRITE
, &uio_buf
, sizeof(uio_buf
));
2673 uio_addiov(auio
, CAST_USER_ADDR_T(bp
->nb_data
+ offset
), length
);
2675 bp
->nb_rpcs
= nrpcs
= (length
+ nmwsize
- 1) / nmwsize
;
2676 if (async
&& (nrpcs
> 1)) {
2677 SET(bp
->nb_flags
, NB_MULTASYNCRPC
);
2679 CLR(bp
->nb_flags
, NB_MULTASYNCRPC
);
2682 while (length
> 0) {
2683 if (ISSET(bp
->nb_flags
, NB_ERROR
)) {
2684 error
= bp
->nb_error
;
2687 len
= (length
> nmwsize
) ? nmwsize
: length
;
2688 cb
.rcb_args
[0] = offset
;
2689 cb
.rcb_args
[1] = len
;
2690 if (nmp
->nm_vers
>= NFS_VER4
)
2691 cb
.rcb_args
[2] = nmp
->nm_stategenid
;
2692 if (async
&& ((error
= nfs_async_write_start(nmp
))))
2695 error
= nmp
->nm_funcs
->nf_write_rpc_async(np
, auio
, len
, thd
, cred
,
2699 nfs_async_write_done(nmp
);
2706 nfs_buf_write_rpc_finish(req
);
2711 * Something bad happened while trying to send the RPCs.
2712 * Wait for any outstanding requests to complete.
2714 bp
->nb_error
= error
;
2715 SET(bp
->nb_flags
, NB_ERROR
);
2716 if (ISSET(bp
->nb_flags
, NB_MULTASYNCRPC
)) {
2717 nrpcs
= (length
+ nmwsize
- 1) / nmwsize
;
2718 lck_mtx_lock(nfs_buf_mutex
);
2719 bp
->nb_rpcs
-= nrpcs
;
2720 if (bp
->nb_rpcs
== 0) {
2721 /* No RPCs left, so the buffer's done */
2722 lck_mtx_unlock(nfs_buf_mutex
);
2723 nfs_buf_write_finish(bp
, thd
, cred
);
2725 /* wait for the last RPC to mark it done */
2726 while (bp
->nb_rpcs
> 0)
2727 msleep(&bp
->nb_rpcs
, nfs_buf_mutex
, 0,
2728 "nfs_buf_write_rpc_cancel", NULL
);
2729 lck_mtx_unlock(nfs_buf_mutex
);
2732 nfs_buf_write_finish(bp
, thd
, cred
);
2740 * finish up an NFS WRITE RPC on a buffer
2743 nfs_buf_write_rpc_finish(struct nfsreq
*req
)
2745 int error
= 0, nfsvers
, offset
, length
, multasyncrpc
, finished
;
2746 int committed
= NFS_WRITE_FILESYNC
;
2749 void *wakeme
= NULL
;
2750 struct nfsreq_cbinfo cb
;
2751 struct nfsreq
*wreq
= NULL
;
2753 struct nfsmount
*nmp
;
2758 char uio_buf
[ UIO_SIZEOF(1) ];
2762 thd
= req
->r_thread
;
2764 if (IS_VALID_CRED(cred
))
2765 kauth_cred_ref(cred
);
2766 cb
= req
->r_callback
;
2771 SET(bp
->nb_flags
, NB_ERROR
);
2772 bp
->nb_error
= error
= ENXIO
;
2774 if (error
|| ISSET(bp
->nb_flags
, NB_ERROR
)) {
2776 nfs_request_async_cancel(req
);
2779 nfsvers
= nmp
->nm_vers
;
2781 offset
= cb
.rcb_args
[0];
2782 rlen
= length
= cb
.rcb_args
[1];
2784 /* finish the RPC */
2785 error
= nmp
->nm_funcs
->nf_write_rpc_async_finish(np
, req
, &committed
, &rlen
, &wverf
);
2786 if ((error
== EINPROGRESS
) && cb
.rcb_func
) {
2787 /* async request restarted */
2788 if (IS_VALID_CRED(cred
))
2789 kauth_cred_unref(&cred
);
2792 if ((nmp
->nm_vers
>= NFS_VER4
) && nfs_mount_state_error_should_restart(error
) && !ISSET(bp
->nb_flags
, NB_ERROR
)) {
2793 lck_mtx_lock(&nmp
->nm_lock
);
2794 if ((error
!= NFSERR_GRACE
) && (cb
.rcb_args
[2] == nmp
->nm_stategenid
) && !(nmp
->nm_state
& NFSSTA_RECOVER
)) {
2795 printf("nfs_buf_write_rpc_finish: error %d, initiating recovery\n", error
);
2796 nmp
->nm_state
|= NFSSTA_RECOVER
;
2797 nfs_mount_sock_thread_wake(nmp
);
2799 lck_mtx_unlock(&nmp
->nm_lock
);
2800 if (error
== NFSERR_GRACE
)
2801 tsleep(&nmp
->nm_state
, (PZERO
-1), "nfsgrace", 2*hz
);
2802 if (!(error
= nfs_mount_state_wait_for_recovery(nmp
))) {
2808 SET(bp
->nb_flags
, NB_ERROR
);
2809 bp
->nb_error
= error
;
2811 if (error
|| (nfsvers
== NFS_VER2
))
2814 SET(bp
->nb_flags
, NB_ERROR
);
2815 bp
->nb_error
= error
= EIO
;
2819 /* save lowest commit level returned */
2820 if (committed
< bp
->nb_commitlevel
)
2821 bp
->nb_commitlevel
= committed
;
2823 /* check the write verifier */
2825 bp
->nb_verf
= wverf
;
2826 } else if (bp
->nb_verf
!= wverf
) {
2827 /* verifier changed, so buffer will need to be rewritten */
2828 bp
->nb_flags
|= NB_STALEWVERF
;
2829 bp
->nb_commitlevel
= NFS_WRITE_UNSTABLE
;
2830 bp
->nb_verf
= wverf
;
2834 * check for a short write
2836 * If the server didn't write all the data, then we
2837 * need to issue another write for the rest of it.
2838 * (Don't bother if the buffer hit an error or stale wverf.)
2840 if (((int)rlen
< length
) && !(bp
->nb_flags
& (NB_STALEWVERF
|NB_ERROR
))) {
2845 auio
= uio_createwithbuffer(1, NBOFF(bp
) + offset
, UIO_SYSSPACE
,
2846 UIO_WRITE
, &uio_buf
, sizeof(uio_buf
));
2847 uio_addiov(auio
, CAST_USER_ADDR_T(bp
->nb_data
+ offset
), length
);
2849 cb
.rcb_args
[0] = offset
;
2850 cb
.rcb_args
[1] = length
;
2851 if (nmp
->nm_vers
>= NFS_VER4
)
2852 cb
.rcb_args
[2] = nmp
->nm_stategenid
;
2854 // XXX iomode should really match the original request
2855 error
= nmp
->nm_funcs
->nf_write_rpc_async(np
, auio
, length
, thd
, cred
,
2856 NFS_WRITE_FILESYNC
, &cb
, &wreq
);
2858 if (IS_VALID_CRED(cred
))
2859 kauth_cred_unref(&cred
);
2861 /* if !async we'll need to wait for this RPC to finish */
2868 * Outstanding RPC count is unchanged.
2869 * Callback will be called when RPC is done.
2873 SET(bp
->nb_flags
, NB_ERROR
);
2874 bp
->nb_error
= error
;
2879 nfs_async_write_done(nmp
);
2881 * Decrement outstanding RPC count on buffer
2882 * and call nfs_buf_write_finish on last RPC.
2884 * (Note: when there are multiple async RPCs issued for a
2885 * buffer we need nfs_buffer_mutex to avoid problems when
2886 * aborting a partially-initiated set of RPCs)
2888 multasyncrpc
= ISSET(bp
->nb_flags
, NB_MULTASYNCRPC
);
2890 lck_mtx_lock(nfs_buf_mutex
);
2893 finished
= (bp
->nb_rpcs
== 0);
2896 lck_mtx_unlock(nfs_buf_mutex
);
2900 wakeme
= &bp
->nb_rpcs
;
2901 nfs_buf_write_finish(bp
, thd
, cred
);
2906 if (IS_VALID_CRED(cred
))
2907 kauth_cred_unref(&cred
);
2911 * Send commit(s) for the given node's "needcommit" buffers
2914 nfs_flushcommits(nfsnode_t np
, int nowait
)
2916 struct nfsmount
*nmp
;
2917 struct nfsbuf
*bp
, *prevlbp
, *lbp
;
2918 struct nfsbuflists blist
, commitlist
;
2919 int error
= 0, retv
, wcred_set
, flags
, dirty
;
2920 u_quad_t off
, endoff
, toff
;
2922 kauth_cred_t wcred
= NULL
;
2924 FSDBG_TOP(557, np
, 0, 0, 0);
2927 * A nb_flags == (NB_DELWRI | NB_NEEDCOMMIT) block has been written to the
2928 * server, but nas not been committed to stable storage on the server
2929 * yet. The byte range is worked out for as many nfsbufs as we can handle
2930 * and the commit rpc is done.
2932 if (!LIST_EMPTY(&np
->n_dirtyblkhd
)) {
2933 error
= nfs_node_lock(np
);
2936 np
->n_flag
|= NMODIFIED
;
2937 nfs_node_unlock(np
);
2943 LIST_INIT(&commitlist
);
2950 if (nmp
->nm_vers
== NFS_VER2
) {
2957 flags
|= NBI_NOWAIT
;
2958 lck_mtx_lock(nfs_buf_mutex
);
2959 if (!nfs_buf_iterprepare(np
, &blist
, flags
)) {
2960 while ((bp
= LIST_FIRST(&blist
))) {
2961 LIST_REMOVE(bp
, nb_vnbufs
);
2962 LIST_INSERT_HEAD(&np
->n_dirtyblkhd
, bp
, nb_vnbufs
);
2963 error
= nfs_buf_acquire(bp
, NBAC_NOWAIT
, 0, 0);
2966 if (ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
))
2967 nfs_buf_check_write_verifier(np
, bp
);
2968 if (((bp
->nb_flags
& (NB_DELWRI
| NB_NEEDCOMMIT
))
2969 != (NB_DELWRI
| NB_NEEDCOMMIT
))) {
2973 nfs_buf_remfree(bp
);
2975 /* buffer UPLs will be grabbed *in order* below */
2977 FSDBG(557, bp
, bp
->nb_flags
, bp
->nb_valid
, bp
->nb_dirty
);
2978 FSDBG(557, bp
->nb_validoff
, bp
->nb_validend
,
2979 bp
->nb_dirtyoff
, bp
->nb_dirtyend
);
2982 * Work out if all buffers are using the same cred
2983 * so we can deal with them all with one commit.
2985 * Note: creds in bp's must be obtained by kauth_cred_ref
2986 * on the same original cred in order for them to be equal.
2988 if (wcred_set
== 0) {
2989 wcred
= bp
->nb_wcred
;
2990 if (!IS_VALID_CRED(wcred
))
2991 panic("nfs: needcommit w/out wcred");
2993 } else if ((wcred_set
== 1) && wcred
!= bp
->nb_wcred
) {
2996 SET(bp
->nb_flags
, NB_WRITEINPROG
);
2999 * Add this buffer to the list of buffers we are committing.
3000 * Buffers are inserted into the list in ascending order so that
3001 * we can take the UPLs in order after the list is complete.
3004 LIST_FOREACH(lbp
, &commitlist
, nb_vnbufs
) {
3005 if (bp
->nb_lblkno
< lbp
->nb_lblkno
)
3009 LIST_REMOVE(bp
, nb_vnbufs
);
3011 LIST_INSERT_AFTER(prevlbp
, bp
, nb_vnbufs
);
3013 LIST_INSERT_HEAD(&commitlist
, bp
, nb_vnbufs
);
3015 /* update commit range start, end */
3016 toff
= NBOFF(bp
) + bp
->nb_dirtyoff
;
3019 toff
+= (u_quad_t
)(bp
->nb_dirtyend
- bp
->nb_dirtyoff
);
3023 nfs_buf_itercomplete(np
, &blist
, NBI_DIRTY
);
3025 lck_mtx_unlock(nfs_buf_mutex
);
3027 if (LIST_EMPTY(&commitlist
)) {
3033 * We need a UPL to prevent others from accessing the buffers during
3034 * our commit RPC(s).
3036 * We used to also check for dirty pages here; if there were any we'd
3037 * abort the commit and force the entire buffer to be written again.
3038 * Instead of doing that, we just go ahead and commit the dirty range,
3039 * and then leave the buffer around with dirty pages that will be
3040 * written out later.
3042 LIST_FOREACH(bp
, &commitlist
, nb_vnbufs
) {
3043 if (!ISSET(bp
->nb_flags
, NB_PAGELIST
)) {
3044 retv
= nfs_buf_upl_setup(bp
);
3046 /* Unable to create the UPL, the VM object probably no longer exists. */
3047 printf("nfs_flushcommits: upl create failed %d\n", retv
);
3048 bp
->nb_valid
= bp
->nb_dirty
= 0;
3051 nfs_buf_upl_check(bp
);
3055 * Commit data on the server, as required.
3056 * If all bufs are using the same wcred, then use that with
3057 * one call for all of them, otherwise commit each one
3060 if (wcred_set
== 1) {
3062 * Note, it's possible the commit range could be >2^32-1.
3063 * If it is, we'll send one commit that covers the whole file.
3065 if ((endoff
- off
) > 0xffffffff)
3068 count
= (endoff
- off
);
3069 retv
= nmp
->nm_funcs
->nf_commit_rpc(np
, off
, count
, wcred
);
3072 LIST_FOREACH(bp
, &commitlist
, nb_vnbufs
) {
3073 toff
= NBOFF(bp
) + bp
->nb_dirtyoff
;
3074 count
= bp
->nb_dirtyend
- bp
->nb_dirtyoff
;
3075 retv
= nmp
->nm_funcs
->nf_commit_rpc(np
, toff
, count
, bp
->nb_wcred
);
3082 * Now, either mark the blocks I/O done or mark the
3083 * blocks dirty, depending on whether the commit
3086 while ((bp
= LIST_FIRST(&commitlist
))) {
3087 LIST_REMOVE(bp
, nb_vnbufs
);
3088 FSDBG(557, bp
, retv
, bp
->nb_flags
, bp
->nb_dirty
);
3089 nfs_node_lock_force(np
);
3090 CLR(bp
->nb_flags
, (NB_NEEDCOMMIT
| NB_WRITEINPROG
));
3091 np
->n_needcommitcnt
--;
3092 CHECK_NEEDCOMMITCNT(np
);
3093 nfs_node_unlock(np
);
3096 /* move back to dirty list */
3097 lck_mtx_lock(nfs_buf_mutex
);
3098 LIST_INSERT_HEAD(&np
->n_dirtyblkhd
, bp
, nb_vnbufs
);
3099 lck_mtx_unlock(nfs_buf_mutex
);
3100 nfs_buf_release(bp
, 1);
3104 nfs_node_lock_force(np
);
3106 nfs_node_unlock(np
);
3107 vnode_startwrite(NFSTOV(np
));
3108 if (ISSET(bp
->nb_flags
, NB_DELWRI
)) {
3109 lck_mtx_lock(nfs_buf_mutex
);
3112 lck_mtx_unlock(nfs_buf_mutex
);
3113 wakeup(&nfs_nbdwrite
);
3115 CLR(bp
->nb_flags
, (NB_READ
|NB_DONE
|NB_ERROR
|NB_DELWRI
));
3116 /* if block still has dirty pages, we don't want it to */
3117 /* be released in nfs_buf_iodone(). So, don't set NB_ASYNC. */
3118 if (!(dirty
= bp
->nb_dirty
))
3119 SET(bp
->nb_flags
, NB_ASYNC
);
3121 CLR(bp
->nb_flags
, NB_ASYNC
);
3123 /* move to clean list */
3124 lck_mtx_lock(nfs_buf_mutex
);
3125 LIST_INSERT_HEAD(&np
->n_cleanblkhd
, bp
, nb_vnbufs
);
3126 lck_mtx_unlock(nfs_buf_mutex
);
3128 bp
->nb_dirtyoff
= bp
->nb_dirtyend
= 0;
3132 /* throw it back in as a delayed write buffer */
3133 CLR(bp
->nb_flags
, NB_DONE
);
3134 nfs_buf_write_delayed(bp
);
3139 FSDBG_BOT(557, np
, 0, 0, error
);
3144 * Flush all the blocks associated with a vnode.
3145 * Walk through the buffer pool and push any dirty pages
3146 * associated with the vnode.
3149 nfs_flush(nfsnode_t np
, int waitfor
, thread_t thd
, int ignore_writeerr
)
3152 struct nfsbuflists blist
;
3153 struct nfsmount
*nmp
= NFSTONMP(np
);
3154 int error
= 0, error2
, slptimeo
= 0, slpflag
= 0;
3155 int nfsvers
, flags
, passone
= 1;
3157 FSDBG_TOP(517, np
, waitfor
, ignore_writeerr
, 0);
3163 nfsvers
= nmp
->nm_vers
;
3164 if (nmp
->nm_flag
& NFSMNT_INT
)
3167 if (!LIST_EMPTY(&np
->n_dirtyblkhd
)) {
3168 nfs_node_lock_force(np
);
3169 np
->n_flag
|= NMODIFIED
;
3170 nfs_node_unlock(np
);
3173 lck_mtx_lock(nfs_buf_mutex
);
3174 while (np
->n_bflag
& NBFLUSHINPROG
) {
3175 np
->n_bflag
|= NBFLUSHWANT
;
3176 msleep(&np
->n_bflag
, nfs_buf_mutex
, slpflag
, "nfs_flush", NULL
);
3177 if ((error
= nfs_sigintr(NFSTONMP(np
), NULL
, thd
, 0))) {
3178 lck_mtx_unlock(nfs_buf_mutex
);
3182 np
->n_bflag
|= NBFLUSHINPROG
;
3185 * On the first pass, start async/unstable writes on all
3186 * delayed write buffers. Then wait for all writes to complete
3187 * and call nfs_flushcommits() to commit any uncommitted buffers.
3188 * On all subsequent passes, start STABLE writes on any remaining
3189 * dirty buffers. Then wait for all writes to complete.
3192 FSDBG(518, LIST_FIRST(&np
->n_dirtyblkhd
), np
->n_flag
, 0, 0);
3193 if (!NFSTONMP(np
)) {
3194 lck_mtx_unlock(nfs_buf_mutex
);
3199 /* Start/do any write(s) that are required. */
3200 if (!nfs_buf_iterprepare(np
, &blist
, NBI_DIRTY
)) {
3201 while ((bp
= LIST_FIRST(&blist
))) {
3202 LIST_REMOVE(bp
, nb_vnbufs
);
3203 LIST_INSERT_HEAD(&np
->n_dirtyblkhd
, bp
, nb_vnbufs
);
3204 flags
= (passone
|| !(waitfor
== MNT_WAIT
|| waitfor
== MNT_DWAIT
)) ? NBAC_NOWAIT
: 0;
3205 if (flags
!= NBAC_NOWAIT
)
3207 while ((error
= nfs_buf_acquire(bp
, flags
, slpflag
, slptimeo
))) {
3208 FSDBG(524, bp
, flags
, bp
->nb_lflags
, bp
->nb_flags
);
3212 error2
= nfs_sigintr(NFSTONMP(np
), NULL
, thd
, 0);
3214 if (flags
!= NBAC_NOWAIT
)
3215 nfs_buf_refrele(bp
);
3216 nfs_buf_itercomplete(np
, &blist
, NBI_DIRTY
);
3217 lck_mtx_unlock(nfs_buf_mutex
);
3221 if (slpflag
== PCATCH
) {
3227 if (flags
!= NBAC_NOWAIT
)
3228 nfs_buf_refrele(bp
);
3232 /* buffer is no longer valid */
3236 if (ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
))
3237 nfs_buf_check_write_verifier(np
, bp
);
3238 if (!ISSET(bp
->nb_flags
, NB_DELWRI
)) {
3239 /* buffer is no longer dirty */
3243 FSDBG(525, bp
, passone
, bp
->nb_lflags
, bp
->nb_flags
);
3244 if ((passone
|| !(waitfor
== MNT_WAIT
|| waitfor
== MNT_DWAIT
)) &&
3245 ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
)) {
3249 nfs_buf_remfree(bp
);
3250 lck_mtx_unlock(nfs_buf_mutex
);
3251 if (ISSET(bp
->nb_flags
, NB_ERROR
)) {
3252 nfs_node_lock_force(np
);
3253 np
->n_error
= bp
->nb_error
? bp
->nb_error
: EIO
;
3254 np
->n_flag
|= NWRITEERR
;
3255 nfs_node_unlock(np
);
3256 nfs_buf_release(bp
, 1);
3257 lck_mtx_lock(nfs_buf_mutex
);
3260 SET(bp
->nb_flags
, NB_ASYNC
);
3262 /* NB_STABLE forces this to be written FILESYNC */
3263 SET(bp
->nb_flags
, NB_STABLE
);
3266 lck_mtx_lock(nfs_buf_mutex
);
3268 nfs_buf_itercomplete(np
, &blist
, NBI_DIRTY
);
3270 lck_mtx_unlock(nfs_buf_mutex
);
3272 if (waitfor
== MNT_WAIT
|| waitfor
== MNT_DWAIT
) {
3273 while ((error
= vnode_waitforwrites(NFSTOV(np
), 0, slpflag
, slptimeo
, "nfsflush"))) {
3274 error2
= nfs_sigintr(NFSTONMP(np
), NULL
, thd
, 0);
3279 if (slpflag
== PCATCH
) {
3286 if (nfsvers
!= NFS_VER2
) {
3287 /* loop while it looks like there are still buffers to be */
3288 /* commited and nfs_flushcommits() seems to be handling them. */
3289 while (np
->n_needcommitcnt
)
3290 if (nfs_flushcommits(np
, 0))
3296 if (!LIST_EMPTY(&np
->n_dirtyblkhd
)) {
3297 nfs_node_lock_force(np
);
3298 np
->n_flag
|= NMODIFIED
;
3299 nfs_node_unlock(np
);
3301 lck_mtx_lock(nfs_buf_mutex
);
3305 if (waitfor
== MNT_WAIT
|| waitfor
== MNT_DWAIT
) {
3306 if (!LIST_EMPTY(&np
->n_dirtyblkhd
)) {
3307 nfs_node_lock_force(np
);
3308 np
->n_flag
|= NMODIFIED
;
3309 nfs_node_unlock(np
);
3311 lck_mtx_lock(nfs_buf_mutex
);
3312 if (!LIST_EMPTY(&np
->n_dirtyblkhd
))
3314 lck_mtx_unlock(nfs_buf_mutex
);
3315 nfs_node_lock_force(np
);
3317 * OK, it looks like there are no dirty blocks. If we have no
3318 * writes in flight and no one in the write code, we can clear
3319 * the modified flag. In order to make sure we see the latest
3320 * attributes and size, we also invalidate the attributes and
3321 * advance the attribute cache XID to guarantee that attributes
3322 * newer than our clearing of NMODIFIED will get loaded next.
3323 * (If we don't do this, it's possible for the flush's final
3324 * write/commit (xid1) to be executed in parallel with a subsequent
3325 * getattr request (xid2). The getattr could return attributes
3326 * from *before* the write/commit completed but the stale attributes
3327 * would be preferred because of the xid ordering.)
3329 if (!np
->n_wrbusy
&& !np
->n_numoutput
) {
3330 np
->n_flag
&= ~NMODIFIED
;
3331 NATTRINVALIDATE(np
);
3332 nfs_get_xid(&np
->n_xid
);
3335 nfs_node_lock_force(np
);
3338 FSDBG(526, np
->n_flag
, np
->n_error
, 0, 0);
3339 if (!ignore_writeerr
&& (np
->n_flag
& NWRITEERR
)) {
3340 error
= np
->n_error
;
3341 np
->n_flag
&= ~NWRITEERR
;
3343 nfs_node_unlock(np
);
3345 lck_mtx_lock(nfs_buf_mutex
);
3346 flags
= np
->n_bflag
;
3347 np
->n_bflag
&= ~(NBFLUSHINPROG
|NBFLUSHWANT
);
3348 lck_mtx_unlock(nfs_buf_mutex
);
3349 if (flags
& NBFLUSHWANT
)
3350 wakeup(&np
->n_bflag
);
3352 FSDBG_BOT(517, np
, error
, ignore_writeerr
, 0);
3357 * Flush out and invalidate all buffers associated with a vnode.
3358 * Called with the underlying object locked.
3361 nfs_vinvalbuf_internal(
3370 struct nfsbuflists blist
;
3371 int list
, error
= 0;
3373 if (flags
& V_SAVE
) {
3374 if ((error
= nfs_flush(np
, MNT_WAIT
, thd
, (flags
& V_IGNORE_WRITEERR
))))
3378 lck_mtx_lock(nfs_buf_mutex
);
3381 if (nfs_buf_iterprepare(np
, &blist
, list
)) {
3383 if (nfs_buf_iterprepare(np
, &blist
, list
))
3386 while ((bp
= LIST_FIRST(&blist
))) {
3387 LIST_REMOVE(bp
, nb_vnbufs
);
3388 if (list
== NBI_CLEAN
)
3389 LIST_INSERT_HEAD(&np
->n_cleanblkhd
, bp
, nb_vnbufs
);
3391 LIST_INSERT_HEAD(&np
->n_dirtyblkhd
, bp
, nb_vnbufs
);
3393 while ((error
= nfs_buf_acquire(bp
, NBAC_REMOVE
, slpflag
, slptimeo
))) {
3394 FSDBG(556, np
, bp
, NBOFF(bp
), bp
->nb_flags
);
3395 if (error
!= EAGAIN
) {
3396 FSDBG(554, np
, bp
, -1, error
);
3397 nfs_buf_refrele(bp
);
3398 nfs_buf_itercomplete(np
, &blist
, list
);
3399 lck_mtx_unlock(nfs_buf_mutex
);
3403 nfs_buf_refrele(bp
);
3404 FSDBG(554, np
, bp
, NBOFF(bp
), bp
->nb_flags
);
3405 lck_mtx_unlock(nfs_buf_mutex
);
3406 if ((flags
& V_SAVE
) && UBCINFOEXISTS(NFSTOV(np
)) && bp
->nb_np
&&
3407 (NBOFF(bp
) < (off_t
)np
->n_size
)) {
3408 /* extra paranoia: make sure we're not */
3409 /* somehow leaving any dirty data around */
3411 int end
= (NBOFF(bp
) + bp
->nb_bufsize
> (off_t
)np
->n_size
) ?
3412 ((off_t
)np
->n_size
- NBOFF(bp
)) : bp
->nb_bufsize
;
3413 if (!ISSET(bp
->nb_flags
, NB_PAGELIST
)) {
3414 error
= nfs_buf_upl_setup(bp
);
3415 if (error
== EINVAL
) {
3416 /* vm object must no longer exist */
3417 /* hopefully we don't need to do */
3418 /* anything for this buffer */
3420 printf("nfs_vinvalbuf: upl setup failed %d\n", error
);
3421 bp
->nb_valid
= bp
->nb_dirty
= 0;
3423 nfs_buf_upl_check(bp
);
3424 /* check for any dirty data before the EOF */
3425 if ((bp
->nb_dirtyend
> 0) && (bp
->nb_dirtyoff
< end
)) {
3426 /* clip dirty range to EOF */
3427 if (bp
->nb_dirtyend
> end
) {
3428 bp
->nb_dirtyend
= end
;
3429 if (bp
->nb_dirtyoff
>= bp
->nb_dirtyend
)
3430 bp
->nb_dirtyoff
= bp
->nb_dirtyend
= 0;
3432 if ((bp
->nb_dirtyend
> 0) && (bp
->nb_dirtyoff
< end
))
3435 bp
->nb_dirty
&= (1 << (round_page_32(end
)/PAGE_SIZE
)) - 1;
3438 /* also make sure we'll have a credential to do the write */
3439 if (mustwrite
&& !IS_VALID_CRED(bp
->nb_wcred
) && !IS_VALID_CRED(cred
)) {
3440 printf("nfs_vinvalbuf: found dirty buffer with no write creds\n");
3444 FSDBG(554, np
, bp
, 0xd00dee, bp
->nb_flags
);
3445 if (!ISSET(bp
->nb_flags
, NB_PAGELIST
))
3446 panic("nfs_vinvalbuf: dirty buffer without upl");
3447 /* gotta write out dirty data before invalidating */
3448 /* (NB_STABLE indicates that data writes should be FILESYNC) */
3449 /* (NB_NOCACHE indicates buffer should be discarded) */
3450 CLR(bp
->nb_flags
, (NB_DONE
| NB_ERROR
| NB_INVAL
| NB_ASYNC
));
3451 SET(bp
->nb_flags
, NB_STABLE
| NB_NOCACHE
);
3452 if (!IS_VALID_CRED(bp
->nb_wcred
)) {
3453 kauth_cred_ref(cred
);
3454 bp
->nb_wcred
= cred
;
3456 error
= nfs_buf_write(bp
);
3457 // Note: bp has been released
3459 FSDBG(554, bp
, 0xd00dee, 0xbad, error
);
3460 nfs_node_lock_force(np
);
3461 np
->n_error
= error
;
3462 np
->n_flag
|= NWRITEERR
;
3464 * There was a write error and we need to
3465 * invalidate attrs to sync with server.
3466 * (if this write was extending the file,
3467 * we may no longer know the correct size)
3469 NATTRINVALIDATE(np
);
3470 nfs_node_unlock(np
);
3471 if (error
== EINTR
) {
3473 * Abort on EINTR. If we don't, we could
3474 * be stuck in this loop forever because
3475 * the buffer will continue to stay dirty.
3477 lck_mtx_lock(nfs_buf_mutex
);
3478 nfs_buf_itercomplete(np
, &blist
, list
);
3479 lck_mtx_unlock(nfs_buf_mutex
);
3484 lck_mtx_lock(nfs_buf_mutex
);
3488 SET(bp
->nb_flags
, NB_INVAL
);
3489 // hold off on FREEUPs until we're done here
3490 nfs_buf_release(bp
, 0);
3491 lck_mtx_lock(nfs_buf_mutex
);
3493 nfs_buf_itercomplete(np
, &blist
, list
);
3495 if (!LIST_EMPTY(&(np
)->n_dirtyblkhd
) || !LIST_EMPTY(&(np
)->n_cleanblkhd
))
3496 panic("nfs_vinvalbuf: flush/inval failed");
3497 lck_mtx_unlock(nfs_buf_mutex
);
3498 nfs_node_lock_force(np
);
3499 if (!(flags
& V_SAVE
))
3500 np
->n_flag
&= ~NMODIFIED
;
3501 if (vnode_vtype(NFSTOV(np
)) == VREG
)
3502 np
->n_lastrahead
= -1;
3503 nfs_node_unlock(np
);
3510 * Flush and invalidate all dirty buffers. If another process is already
3511 * doing the flush, just wait for completion.
3514 nfs_vinvalbuf(vnode_t vp
, int flags
, vfs_context_t ctx
, int intrflg
)
3516 return nfs_vinvalbuf2(vp
, flags
, vfs_context_thread(ctx
), vfs_context_ucred(ctx
), intrflg
);
3520 nfs_vinvalbuf2(vnode_t vp
, int flags
, thread_t thd
, kauth_cred_t cred
, int intrflg
)
3522 nfsnode_t np
= VTONFS(vp
);
3523 struct nfsmount
*nmp
= VTONMP(vp
);
3524 int error
, rv
, slpflag
, slptimeo
, nflags
;
3527 FSDBG_TOP(554, np
, flags
, intrflg
, 0);
3529 if (nmp
&& !(nmp
->nm_flag
& NFSMNT_INT
))
3539 /* First wait for any other process doing a flush to complete. */
3540 lck_mtx_lock(nfs_buf_mutex
);
3541 while (np
->n_bflag
& NBINVALINPROG
) {
3542 np
->n_bflag
|= NBINVALWANT
;
3543 msleep(&np
->n_bflag
, nfs_buf_mutex
, slpflag
, "nfs_vinvalbuf", NULL
);
3544 if ((error
= nfs_sigintr(VTONMP(vp
), NULL
, thd
, 0))) {
3545 lck_mtx_unlock(nfs_buf_mutex
);
3549 np
->n_bflag
|= NBINVALINPROG
;
3550 lck_mtx_unlock(nfs_buf_mutex
);
3552 /* Now, flush as required. */
3553 error
= nfs_vinvalbuf_internal(np
, flags
, thd
, cred
, slpflag
, 0);
3555 FSDBG(554, np
, 0, 0, error
);
3556 if ((error
= nfs_sigintr(VTONMP(vp
), NULL
, thd
, 0)))
3558 error
= nfs_vinvalbuf_internal(np
, flags
, thd
, cred
, 0, slptimeo
);
3561 /* get the pages out of vm also */
3562 if (UBCINFOEXISTS(vp
) && (size
= ubc_getsize(vp
)))
3563 if (!(rv
= ubc_sync_range(vp
, 0, size
, UBC_PUSHALL
| UBC_SYNC
| UBC_INVALIDATE
)))
3564 panic("nfs_vinvalbuf(): ubc_sync_range failed!");
3566 lck_mtx_lock(nfs_buf_mutex
);
3567 nflags
= np
->n_bflag
;
3568 np
->n_bflag
&= ~(NBINVALINPROG
|NBINVALWANT
);
3569 lck_mtx_unlock(nfs_buf_mutex
);
3570 if (nflags
& NBINVALWANT
)
3571 wakeup(&np
->n_bflag
);
3573 FSDBG_BOT(554, np
, flags
, intrflg
, error
);
3578 * Add an async I/O request to the mount's async I/O queue and make
3579 * sure that an nfsiod will service it.
3582 nfs_asyncio_finish(struct nfsreq
*req
)
3584 struct nfsmount
*nmp
;
3585 struct nfsiod
*niod
;
3588 FSDBG_TOP(552, nmp
, 0, 0, 0);
3590 if (((nmp
= req
->r_nmp
)) == NULL
)
3592 lck_mtx_lock(nfsiod_mutex
);
3593 niod
= nmp
->nm_niod
;
3595 /* grab an nfsiod if we don't have one already */
3597 niod
= TAILQ_FIRST(&nfsiodfree
);
3599 TAILQ_REMOVE(&nfsiodfree
, niod
, niod_link
);
3600 TAILQ_INSERT_TAIL(&nfsiodwork
, niod
, niod_link
);
3601 niod
->niod_nmp
= nmp
;
3602 } else if (((nfsiod_thread_count
< NFSIOD_MAX
) || (nfsiod_thread_count
<= 0)) && (started
< 4)) {
3604 * Try starting a new thread.
3605 * We may try a couple times if other callers
3606 * get the new threads before we do.
3608 lck_mtx_unlock(nfsiod_mutex
);
3610 if (!nfsiod_start())
3612 lck_mtx_lock(nfsiod_mutex
);
3616 if (req
->r_achain
.tqe_next
== NFSREQNOLIST
)
3617 TAILQ_INSERT_TAIL(&nmp
->nm_iodq
, req
, r_achain
);
3619 /* If this mount doesn't already have an nfsiod working on it... */
3620 if (!nmp
->nm_niod
) {
3621 if (niod
) { /* give it the nfsiod we just grabbed */
3622 nmp
->nm_niod
= niod
;
3623 lck_mtx_unlock(nfsiod_mutex
);
3625 } else if (nfsiod_thread_count
> 0) {
3626 /* just queue it up on nfsiod mounts queue */
3627 TAILQ_INSERT_TAIL(&nfsiodmounts
, nmp
, nm_iodlink
);
3628 lck_mtx_unlock(nfsiod_mutex
);
3630 printf("nfs_asyncio(): no nfsiods? %d %d (%d)\n", nfsiod_thread_count
, NFSIOD_MAX
, started
);
3631 lck_mtx_unlock(nfsiod_mutex
);
3632 /* we have no other option but to be persistent */
3637 lck_mtx_unlock(nfsiod_mutex
);
3640 FSDBG_BOT(552, nmp
, 0, 0, 0);
3644 * queue up async I/O request for resend
3647 nfs_asyncio_resend(struct nfsreq
*req
)
3649 struct nfsmount
*nmp
= req
->r_nmp
;
3653 nfs_gss_clnt_rpcdone(req
);
3654 lck_mtx_lock(&nmp
->nm_lock
);
3655 if (!(req
->r_flags
& R_RESENDQ
)) {
3656 TAILQ_INSERT_TAIL(&nmp
->nm_resendq
, req
, r_rchain
);
3657 req
->r_flags
|= R_RESENDQ
;
3659 nfs_mount_sock_thread_wake(nmp
);
3660 lck_mtx_unlock(&nmp
->nm_lock
);
3664 * Read directory data into a buffer.
3666 * Buffer will be filled (unless EOF is hit).
3667 * Buffers after this one may also be completely/partially filled.
3670 nfs_buf_readdir(struct nfsbuf
*bp
, vfs_context_t ctx
)
3672 nfsnode_t np
= bp
->nb_np
;
3673 struct nfsmount
*nmp
= NFSTONMP(np
);
3679 if (nmp
->nm_vers
< NFS_VER4
)
3680 error
= nfs3_readdir_rpc(np
, bp
, ctx
);
3682 error
= nfs4_readdir_rpc(np
, bp
, ctx
);
3684 if (error
&& (error
!= NFSERR_DIRBUFDROPPED
)) {
3685 SET(bp
->nb_flags
, NB_ERROR
);
3686 bp
->nb_error
= error
;