]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
b0d623f7 | 2 | * Copyright (c) 2000-2009 Apple Inc. All rights reserved. |
5d5c5d0d | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ | |
29 | /* | |
30 | * Copyright (c) 1989, 1993 | |
31 | * The Regents of the University of California. All rights reserved. | |
32 | * | |
33 | * This code is derived from software contributed to Berkeley by | |
34 | * Rick Macklem at The University of Guelph. | |
35 | * | |
36 | * Redistribution and use in source and binary forms, with or without | |
37 | * modification, are permitted provided that the following conditions | |
38 | * are met: | |
39 | * 1. Redistributions of source code must retain the above copyright | |
40 | * notice, this list of conditions and the following disclaimer. | |
41 | * 2. Redistributions in binary form must reproduce the above copyright | |
42 | * notice, this list of conditions and the following disclaimer in the | |
43 | * documentation and/or other materials provided with the distribution. | |
44 | * 3. All advertising materials mentioning features or use of this software | |
45 | * must display the following acknowledgement: | |
46 | * This product includes software developed by the University of | |
47 | * California, Berkeley and its contributors. | |
48 | * 4. Neither the name of the University nor the names of its contributors | |
49 | * may be used to endorse or promote products derived from this software | |
50 | * without specific prior written permission. | |
51 | * | |
52 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
53 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
54 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
55 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
56 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
57 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
58 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
59 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
60 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
61 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
62 | * SUCH DAMAGE. | |
63 | * | |
64 | * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95 | |
65 | * FreeBSD-Id: nfs_bio.c,v 1.44 1997/09/10 19:52:25 phk Exp $ | |
66 | */ | |
1c79356b A |
67 | #include <sys/param.h> |
68 | #include <sys/systm.h> | |
69 | #include <sys/resourcevar.h> | |
70 | #include <sys/signalvar.h> | |
91447636 A |
71 | #include <sys/proc_internal.h> |
72 | #include <sys/kauth.h> | |
55e303ae | 73 | #include <sys/malloc.h> |
1c79356b | 74 | #include <sys/vnode.h> |
55e303ae | 75 | #include <sys/dirent.h> |
91447636 | 76 | #include <sys/mount_internal.h> |
1c79356b | 77 | #include <sys/kernel.h> |
91447636 A |
78 | #include <sys/ubc_internal.h> |
79 | #include <sys/uio_internal.h> | |
1c79356b A |
80 | |
81 | #include <sys/vm.h> | |
82 | #include <sys/vmparam.h> | |
83 | ||
84 | #include <sys/time.h> | |
85 | #include <kern/clock.h> | |
91447636 A |
86 | #include <libkern/OSAtomic.h> |
87 | #include <kern/kalloc.h> | |
2d21ac55 | 88 | #include <kern/thread_call.h> |
1c79356b A |
89 | |
90 | #include <nfs/rpcv2.h> | |
91 | #include <nfs/nfsproto.h> | |
92 | #include <nfs/nfs.h> | |
2d21ac55 | 93 | #include <nfs/nfs_gss.h> |
1c79356b | 94 | #include <nfs/nfsmount.h> |
1c79356b | 95 | #include <nfs/nfsnode.h> |
91447636 | 96 | #include <sys/buf_internal.h> |
2d21ac55 | 97 | #include <libkern/OSAtomic.h> |
1c79356b | 98 | |
2d21ac55 | 99 | kern_return_t thread_terminate(thread_t); /* XXX */ |
55e303ae | 100 | |
91447636 A |
101 | #define NFSBUFHASH(np, lbn) \ |
102 | (&nfsbufhashtbl[((long)(np) / sizeof(*(np)) + (int)(lbn)) & nfsbufhash]) | |
55e303ae | 103 | LIST_HEAD(nfsbufhashhead, nfsbuf) *nfsbufhashtbl; |
483a1d10 | 104 | struct nfsbuffreehead nfsbuffree, nfsbuffreemeta, nfsbufdelwri; |
55e303ae | 105 | u_long nfsbufhash; |
91447636 | 106 | int nfsbufcnt, nfsbufmin, nfsbufmax, nfsbufmetacnt, nfsbufmetamax; |
483a1d10 | 107 | int nfsbuffreecnt, nfsbuffreemetacnt, nfsbufdelwricnt, nfsneedbuffer; |
55e303ae | 108 | int nfs_nbdwrite; |
2d21ac55 A |
109 | int nfs_buf_timer_on = 0; |
110 | thread_t nfsbufdelwrithd = NULL; | |
483a1d10 | 111 | |
91447636 | 112 | lck_grp_t *nfs_buf_lck_grp; |
91447636 A |
113 | lck_mtx_t *nfs_buf_mutex; |
114 | ||
2d21ac55 | 115 | #define NFSBUF_FREE_PERIOD 30 /* seconds */ |
483a1d10 A |
116 | #define NFSBUF_LRU_STALE 120 |
117 | #define NFSBUF_META_STALE 240 | |
118 | ||
119 | /* number of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffree list */ | |
120 | #define LRU_TO_FREEUP 6 | |
121 | /* number of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffreemeta list */ | |
122 | #define META_TO_FREEUP 3 | |
123 | /* total number of nfsbufs nfs_buf_freeup() should attempt to free */ | |
124 | #define TOTAL_TO_FREEUP (LRU_TO_FREEUP+META_TO_FREEUP) | |
2d21ac55 | 125 | /* fraction of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffree list when called from timer */ |
483a1d10 | 126 | #define LRU_FREEUP_FRAC_ON_TIMER 8 |
2d21ac55 | 127 | /* fraction of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffreemeta list when called from timer */ |
483a1d10 A |
128 | #define META_FREEUP_FRAC_ON_TIMER 16 |
129 | /* fraction of total nfsbufs that nfsbuffreecnt should exceed before bothering to call nfs_buf_freeup() */ | |
130 | #define LRU_FREEUP_MIN_FRAC 4 | |
131 | /* fraction of total nfsbufs that nfsbuffreemetacnt should exceed before bothering to call nfs_buf_freeup() */ | |
132 | #define META_FREEUP_MIN_FRAC 2 | |
55e303ae | 133 | |
483a1d10 | 134 | #define NFS_BUF_FREEUP() \ |
91447636 | 135 | do { \ |
483a1d10 A |
136 | /* only call nfs_buf_freeup() if it has work to do: */ \ |
137 | if (((nfsbuffreecnt > nfsbufcnt/LRU_FREEUP_MIN_FRAC) || \ | |
138 | (nfsbuffreemetacnt > nfsbufcnt/META_FREEUP_MIN_FRAC)) && \ | |
139 | ((nfsbufcnt - TOTAL_TO_FREEUP) > nfsbufmin)) \ | |
140 | nfs_buf_freeup(0); \ | |
141 | } while (0) | |
55e303ae A |
142 | |
143 | /* | |
144 | * Initialize nfsbuf lists | |
145 | */ | |
146 | void | |
147 | nfs_nbinit(void) | |
148 | { | |
2d21ac55 A |
149 | nfs_buf_lck_grp = lck_grp_alloc_init("nfs_buf", LCK_GRP_ATTR_NULL); |
150 | nfs_buf_mutex = lck_mtx_alloc_init(nfs_buf_lck_grp, LCK_ATTR_NULL); | |
91447636 A |
151 | |
152 | nfsbufcnt = nfsbufmetacnt = | |
153 | nfsbuffreecnt = nfsbuffreemetacnt = nfsbufdelwricnt = 0; | |
154 | nfsbufmin = 128; | |
2d21ac55 A |
155 | /* size nfsbufmax to cover at most half sane_size (w/default buf size) */ |
156 | nfsbufmax = (sane_size >> PAGE_SHIFT) / (2 * (NFS_RWSIZE >> PAGE_SHIFT)); | |
157 | nfsbufmetamax = nfsbufmax / 4; | |
55e303ae A |
158 | nfsneedbuffer = 0; |
159 | nfs_nbdwrite = 0; | |
91447636 A |
160 | |
161 | nfsbufhashtbl = hashinit(nfsbufmax/4, M_TEMP, &nfsbufhash); | |
162 | TAILQ_INIT(&nfsbuffree); | |
163 | TAILQ_INIT(&nfsbuffreemeta); | |
164 | TAILQ_INIT(&nfsbufdelwri); | |
165 | ||
55e303ae A |
166 | } |
167 | ||
2d21ac55 A |
168 | /* |
169 | * Check periodically for stale/unused nfs bufs | |
170 | */ | |
171 | void | |
172 | nfs_buf_timer(__unused void *param0, __unused void *param1) | |
173 | { | |
174 | nfs_buf_freeup(1); | |
175 | ||
176 | lck_mtx_lock(nfs_buf_mutex); | |
177 | if (nfsbufcnt <= nfsbufmin) { | |
178 | nfs_buf_timer_on = 0; | |
179 | lck_mtx_unlock(nfs_buf_mutex); | |
180 | return; | |
181 | } | |
182 | lck_mtx_unlock(nfs_buf_mutex); | |
183 | ||
184 | nfs_interval_timer_start(nfs_buf_timer_call, | |
185 | NFSBUF_FREE_PERIOD * 1000); | |
186 | } | |
187 | ||
55e303ae A |
188 | /* |
189 | * try to free up some excess, unused nfsbufs | |
190 | */ | |
483a1d10 A |
191 | void |
192 | nfs_buf_freeup(int timer) | |
55e303ae A |
193 | { |
194 | struct nfsbuf *fbp; | |
483a1d10 A |
195 | struct timeval now; |
196 | int count; | |
91447636 A |
197 | struct nfsbuffreehead nfsbuffreeup; |
198 | ||
199 | TAILQ_INIT(&nfsbuffreeup); | |
200 | ||
201 | lck_mtx_lock(nfs_buf_mutex); | |
55e303ae | 202 | |
483a1d10 | 203 | microuptime(&now); |
55e303ae | 204 | |
91447636 A |
205 | FSDBG(320, nfsbufcnt, nfsbuffreecnt, nfsbuffreemetacnt, 0); |
206 | ||
483a1d10 A |
207 | count = timer ? nfsbuffreecnt/LRU_FREEUP_FRAC_ON_TIMER : LRU_TO_FREEUP; |
208 | while ((nfsbufcnt > nfsbufmin) && (count-- > 0)) { | |
55e303ae A |
209 | fbp = TAILQ_FIRST(&nfsbuffree); |
210 | if (!fbp) | |
211 | break; | |
91447636 A |
212 | if (fbp->nb_refs) |
213 | break; | |
214 | if (NBUFSTAMPVALID(fbp) && | |
215 | (fbp->nb_timestamp + (2*NFSBUF_LRU_STALE)) > now.tv_sec) | |
483a1d10 A |
216 | break; |
217 | nfs_buf_remfree(fbp); | |
2d21ac55 A |
218 | /* disassociate buffer from any nfsnode */ |
219 | if (fbp->nb_np) { | |
483a1d10 A |
220 | if (fbp->nb_vnbufs.le_next != NFSNOLIST) { |
221 | LIST_REMOVE(fbp, nb_vnbufs); | |
222 | fbp->nb_vnbufs.le_next = NFSNOLIST; | |
223 | } | |
2d21ac55 | 224 | fbp->nb_np = NULL; |
483a1d10 A |
225 | } |
226 | LIST_REMOVE(fbp, nb_hash); | |
91447636 | 227 | TAILQ_INSERT_TAIL(&nfsbuffreeup, fbp, nb_free); |
483a1d10 A |
228 | nfsbufcnt--; |
229 | } | |
230 | ||
231 | count = timer ? nfsbuffreemetacnt/META_FREEUP_FRAC_ON_TIMER : META_TO_FREEUP; | |
232 | while ((nfsbufcnt > nfsbufmin) && (count-- > 0)) { | |
233 | fbp = TAILQ_FIRST(&nfsbuffreemeta); | |
234 | if (!fbp) | |
235 | break; | |
91447636 A |
236 | if (fbp->nb_refs) |
237 | break; | |
238 | if (NBUFSTAMPVALID(fbp) && | |
239 | (fbp->nb_timestamp + (2*NFSBUF_META_STALE)) > now.tv_sec) | |
483a1d10 | 240 | break; |
55e303ae | 241 | nfs_buf_remfree(fbp); |
2d21ac55 A |
242 | /* disassociate buffer from any nfsnode */ |
243 | if (fbp->nb_np) { | |
55e303ae A |
244 | if (fbp->nb_vnbufs.le_next != NFSNOLIST) { |
245 | LIST_REMOVE(fbp, nb_vnbufs); | |
246 | fbp->nb_vnbufs.le_next = NFSNOLIST; | |
247 | } | |
2d21ac55 | 248 | fbp->nb_np = NULL; |
55e303ae A |
249 | } |
250 | LIST_REMOVE(fbp, nb_hash); | |
91447636 A |
251 | TAILQ_INSERT_TAIL(&nfsbuffreeup, fbp, nb_free); |
252 | nfsbufcnt--; | |
253 | nfsbufmetacnt--; | |
254 | } | |
255 | ||
256 | FSDBG(320, nfsbufcnt, nfsbuffreecnt, nfsbuffreemetacnt, 0); | |
2d21ac55 | 257 | NFSBUFCNTCHK(); |
91447636 A |
258 | |
259 | lck_mtx_unlock(nfs_buf_mutex); | |
260 | ||
261 | while ((fbp = TAILQ_FIRST(&nfsbuffreeup))) { | |
262 | TAILQ_REMOVE(&nfsbuffreeup, fbp, nb_free); | |
55e303ae | 263 | /* nuke any creds */ |
2d21ac55 | 264 | if (IS_VALID_CRED(fbp->nb_rcred)) |
0c530ab8 | 265 | kauth_cred_unref(&fbp->nb_rcred); |
2d21ac55 | 266 | if (IS_VALID_CRED(fbp->nb_wcred)) |
0c530ab8 | 267 | kauth_cred_unref(&fbp->nb_wcred); |
91447636 A |
268 | /* if buf was NB_META, dump buffer */ |
269 | if (ISSET(fbp->nb_flags, NB_META) && fbp->nb_data) | |
270 | kfree(fbp->nb_data, fbp->nb_bufsize); | |
55e303ae | 271 | FREE(fbp, M_TEMP); |
55e303ae | 272 | } |
91447636 | 273 | |
55e303ae A |
274 | } |
275 | ||
91447636 A |
276 | /* |
277 | * remove a buffer from the freelist | |
278 | * (must be called with nfs_buf_mutex held) | |
279 | */ | |
55e303ae A |
280 | void |
281 | nfs_buf_remfree(struct nfsbuf *bp) | |
282 | { | |
283 | if (bp->nb_free.tqe_next == NFSNOLIST) | |
284 | panic("nfsbuf not on free list"); | |
285 | if (ISSET(bp->nb_flags, NB_DELWRI)) { | |
286 | nfsbufdelwricnt--; | |
287 | TAILQ_REMOVE(&nfsbufdelwri, bp, nb_free); | |
91447636 | 288 | } else if (ISSET(bp->nb_flags, NB_META)) { |
483a1d10 A |
289 | nfsbuffreemetacnt--; |
290 | TAILQ_REMOVE(&nfsbuffreemeta, bp, nb_free); | |
55e303ae A |
291 | } else { |
292 | nfsbuffreecnt--; | |
293 | TAILQ_REMOVE(&nfsbuffree, bp, nb_free); | |
294 | } | |
295 | bp->nb_free.tqe_next = NFSNOLIST; | |
2d21ac55 | 296 | NFSBUFCNTCHK(); |
55e303ae A |
297 | } |
298 | ||
299 | /* | |
300 | * check for existence of nfsbuf in cache | |
301 | */ | |
91447636 | 302 | boolean_t |
2d21ac55 | 303 | nfs_buf_is_incore(nfsnode_t np, daddr64_t blkno) |
91447636 A |
304 | { |
305 | boolean_t rv; | |
306 | lck_mtx_lock(nfs_buf_mutex); | |
2d21ac55 | 307 | if (nfs_buf_incore(np, blkno)) |
91447636 A |
308 | rv = TRUE; |
309 | else | |
310 | rv = FALSE; | |
311 | lck_mtx_unlock(nfs_buf_mutex); | |
312 | return (rv); | |
313 | } | |
314 | ||
315 | /* | |
316 | * return incore buffer (must be called with nfs_buf_mutex held) | |
317 | */ | |
55e303ae | 318 | struct nfsbuf * |
2d21ac55 | 319 | nfs_buf_incore(nfsnode_t np, daddr64_t blkno) |
55e303ae A |
320 | { |
321 | /* Search hash chain */ | |
2d21ac55 | 322 | struct nfsbuf * bp = NFSBUFHASH(np, blkno)->lh_first; |
55e303ae | 323 | for (; bp != NULL; bp = bp->nb_hash.le_next) |
2d21ac55 | 324 | if ((bp->nb_lblkno == blkno) && (bp->nb_np == np)) { |
483a1d10 | 325 | if (!ISSET(bp->nb_flags, NB_INVAL)) { |
2d21ac55 | 326 | FSDBG(547, bp, blkno, bp->nb_flags, bp->nb_np); |
483a1d10 A |
327 | return (bp); |
328 | } | |
329 | } | |
55e303ae A |
330 | return (NULL); |
331 | } | |
332 | ||
333 | /* | |
334 | * Check if it's OK to drop a page. | |
335 | * | |
336 | * Called by vnode_pager() on pageout request of non-dirty page. | |
337 | * We need to make sure that it's not part of a delayed write. | |
338 | * If it is, we can't let the VM drop it because we may need it | |
339 | * later when/if we need to write the data (again). | |
340 | */ | |
341 | int | |
91447636 | 342 | nfs_buf_page_inval(vnode_t vp, off_t offset) |
55e303ae | 343 | { |
2d21ac55 | 344 | struct nfsmount *nmp = VTONMP(vp); |
55e303ae | 345 | struct nfsbuf *bp; |
91447636 A |
346 | int error = 0; |
347 | ||
2d21ac55 A |
348 | if (!nmp) |
349 | return (ENXIO); | |
350 | ||
91447636 | 351 | lck_mtx_lock(nfs_buf_mutex); |
2d21ac55 | 352 | bp = nfs_buf_incore(VTONFS(vp), (daddr64_t)(offset / nmp->nm_biosize)); |
55e303ae | 353 | if (!bp) |
91447636 | 354 | goto out; |
55e303ae | 355 | FSDBG(325, bp, bp->nb_flags, bp->nb_dirtyoff, bp->nb_dirtyend); |
91447636 A |
356 | if (ISSET(bp->nb_lflags, NBL_BUSY)) { |
357 | error = EBUSY; | |
358 | goto out; | |
359 | } | |
55e303ae A |
360 | /* |
361 | * If there's a dirty range in the buffer, check to | |
362 | * see if this page intersects with the dirty range. | |
363 | * If it does, we can't let the pager drop the page. | |
364 | */ | |
365 | if (bp->nb_dirtyend > 0) { | |
366 | int start = offset - NBOFF(bp); | |
b0d623f7 A |
367 | if ((bp->nb_dirtyend > start) && |
368 | (bp->nb_dirtyoff < (start + PAGE_SIZE))) { | |
369 | /* | |
370 | * Before returning the bad news, move the | |
371 | * buffer to the start of the delwri list and | |
372 | * give the list a push to try to flush the | |
373 | * buffer out. | |
374 | */ | |
91447636 | 375 | error = EBUSY; |
b0d623f7 A |
376 | nfs_buf_remfree(bp); |
377 | TAILQ_INSERT_HEAD(&nfsbufdelwri, bp, nb_free); | |
378 | nfsbufdelwricnt++; | |
379 | nfs_buf_delwri_push(1); | |
380 | } | |
55e303ae | 381 | } |
91447636 A |
382 | out: |
383 | lck_mtx_unlock(nfs_buf_mutex); | |
384 | return (error); | |
55e303ae A |
385 | } |
386 | ||
91447636 A |
387 | /* |
388 | * set up the UPL for a buffer | |
389 | * (must NOT be called with nfs_buf_mutex held) | |
390 | */ | |
55e303ae A |
391 | int |
392 | nfs_buf_upl_setup(struct nfsbuf *bp) | |
393 | { | |
394 | kern_return_t kret; | |
395 | upl_t upl; | |
91447636 | 396 | int upl_flags; |
55e303ae A |
397 | |
398 | if (ISSET(bp->nb_flags, NB_PAGELIST)) | |
399 | return (0); | |
400 | ||
91447636 | 401 | upl_flags = UPL_PRECIOUS; |
2d21ac55 | 402 | if (!ISSET(bp->nb_flags, NB_READ)) { |
91447636 A |
403 | /* |
404 | * We're doing a "write", so we intend to modify | |
405 | * the pages we're gathering. | |
406 | */ | |
407 | upl_flags |= UPL_WILL_MODIFY; | |
408 | } | |
2d21ac55 | 409 | kret = ubc_create_upl(NFSTOV(bp->nb_np), NBOFF(bp), bp->nb_bufsize, |
91447636 | 410 | &upl, NULL, upl_flags); |
55e303ae A |
411 | if (kret == KERN_INVALID_ARGUMENT) { |
412 | /* vm object probably doesn't exist any more */ | |
413 | bp->nb_pagelist = NULL; | |
414 | return (EINVAL); | |
415 | } | |
416 | if (kret != KERN_SUCCESS) { | |
417 | printf("nfs_buf_upl_setup(): failed to get pagelist %d\n", kret); | |
418 | bp->nb_pagelist = NULL; | |
419 | return (EIO); | |
420 | } | |
421 | ||
2d21ac55 | 422 | FSDBG(538, bp, NBOFF(bp), bp->nb_bufsize, bp->nb_np); |
55e303ae | 423 | |
55e303ae A |
424 | bp->nb_pagelist = upl; |
425 | SET(bp->nb_flags, NB_PAGELIST); | |
55e303ae A |
426 | return (0); |
427 | } | |
428 | ||
91447636 A |
429 | /* |
430 | * update buffer's valid/dirty info from UBC | |
431 | * (must NOT be called with nfs_buf_mutex held) | |
432 | */ | |
55e303ae A |
433 | void |
434 | nfs_buf_upl_check(struct nfsbuf *bp) | |
435 | { | |
436 | upl_page_info_t *pl; | |
437 | off_t filesize, fileoffset; | |
438 | int i, npages; | |
439 | ||
440 | if (!ISSET(bp->nb_flags, NB_PAGELIST)) | |
441 | return; | |
442 | ||
443 | npages = round_page_32(bp->nb_bufsize) / PAGE_SIZE; | |
2d21ac55 | 444 | filesize = ubc_getsize(NFSTOV(bp->nb_np)); |
55e303ae A |
445 | fileoffset = NBOFF(bp); |
446 | if (fileoffset < filesize) | |
447 | SET(bp->nb_flags, NB_CACHE); | |
448 | else | |
449 | CLR(bp->nb_flags, NB_CACHE); | |
450 | ||
451 | pl = ubc_upl_pageinfo(bp->nb_pagelist); | |
452 | bp->nb_valid = bp->nb_dirty = 0; | |
453 | ||
454 | for (i=0; i < npages; i++, fileoffset += PAGE_SIZE_64) { | |
455 | /* anything beyond the end of the file is not valid or dirty */ | |
456 | if (fileoffset >= filesize) | |
457 | break; | |
458 | if (!upl_valid_page(pl, i)) { | |
459 | CLR(bp->nb_flags, NB_CACHE); | |
460 | continue; | |
461 | } | |
462 | NBPGVALID_SET(bp,i); | |
2d21ac55 | 463 | if (upl_dirty_page(pl, i)) |
55e303ae | 464 | NBPGDIRTY_SET(bp, i); |
55e303ae A |
465 | } |
466 | fileoffset = NBOFF(bp); | |
467 | if (ISSET(bp->nb_flags, NB_CACHE)) { | |
468 | bp->nb_validoff = 0; | |
469 | bp->nb_validend = bp->nb_bufsize; | |
470 | if (fileoffset + bp->nb_validend > filesize) | |
471 | bp->nb_validend = filesize - fileoffset; | |
472 | } else { | |
473 | bp->nb_validoff = bp->nb_validend = -1; | |
474 | } | |
475 | FSDBG(539, bp, fileoffset, bp->nb_valid, bp->nb_dirty); | |
476 | FSDBG(539, bp->nb_validoff, bp->nb_validend, bp->nb_dirtyoff, bp->nb_dirtyend); | |
477 | } | |
478 | ||
91447636 A |
479 | /* |
480 | * make sure that a buffer is mapped | |
481 | * (must NOT be called with nfs_buf_mutex held) | |
482 | */ | |
2d21ac55 | 483 | int |
55e303ae A |
484 | nfs_buf_map(struct nfsbuf *bp) |
485 | { | |
486 | kern_return_t kret; | |
487 | ||
488 | if (bp->nb_data) | |
489 | return (0); | |
490 | if (!ISSET(bp->nb_flags, NB_PAGELIST)) | |
491 | return (EINVAL); | |
492 | ||
b0d623f7 | 493 | kret = ubc_upl_map(bp->nb_pagelist, (vm_offset_t *)&(bp->nb_data)); |
55e303ae A |
494 | if (kret != KERN_SUCCESS) |
495 | panic("nfs_buf_map: ubc_upl_map() failed with (%d)", kret); | |
496 | if (bp->nb_data == 0) | |
497 | panic("ubc_upl_map mapped 0"); | |
498 | FSDBG(540, bp, bp->nb_flags, NBOFF(bp), bp->nb_data); | |
499 | return (0); | |
500 | } | |
501 | ||
55e303ae A |
502 | /* |
503 | * normalize an nfsbuf's valid range | |
504 | * | |
505 | * the read/write code guarantees that we'll always have a valid | |
506 | * region that is an integral number of pages. If either end | |
507 | * of the valid range isn't page-aligned, it gets corrected | |
508 | * here as we extend the valid range through all of the | |
509 | * contiguous valid pages. | |
510 | */ | |
2d21ac55 A |
511 | void |
512 | nfs_buf_normalize_valid_range(nfsnode_t np, struct nfsbuf *bp) | |
55e303ae A |
513 | { |
514 | int pg, npg; | |
515 | /* pull validoff back to start of contiguous valid page range */ | |
516 | pg = bp->nb_validoff/PAGE_SIZE; | |
517 | while (pg >= 0 && NBPGVALID(bp,pg)) | |
518 | pg--; | |
519 | bp->nb_validoff = (pg+1) * PAGE_SIZE; | |
520 | /* push validend forward to end of contiguous valid page range */ | |
521 | npg = bp->nb_bufsize/PAGE_SIZE; | |
522 | pg = bp->nb_validend/PAGE_SIZE; | |
523 | while (pg < npg && NBPGVALID(bp,pg)) | |
524 | pg++; | |
525 | bp->nb_validend = pg * PAGE_SIZE; | |
526 | /* clip to EOF */ | |
91447636 | 527 | if (NBOFF(bp) + bp->nb_validend > (off_t)np->n_size) |
55e303ae A |
528 | bp->nb_validend = np->n_size % bp->nb_bufsize; |
529 | } | |
530 | ||
531 | /* | |
2d21ac55 A |
532 | * process some entries on the delayed write queue |
533 | * (must be called with nfs_buf_mutex held) | |
55e303ae | 534 | */ |
b0d623f7 | 535 | void |
2d21ac55 | 536 | nfs_buf_delwri_service(void) |
55e303ae A |
537 | { |
538 | struct nfsbuf *bp; | |
2d21ac55 A |
539 | nfsnode_t np; |
540 | int error, i = 0; | |
55e303ae | 541 | |
55e303ae | 542 | while (i < 8 && (bp = TAILQ_FIRST(&nfsbufdelwri)) != NULL) { |
2d21ac55 | 543 | np = bp->nb_np; |
55e303ae | 544 | nfs_buf_remfree(bp); |
91447636 A |
545 | nfs_buf_refget(bp); |
546 | while ((error = nfs_buf_acquire(bp, 0, 0, 0)) == EAGAIN); | |
547 | nfs_buf_refrele(bp); | |
548 | if (error) | |
549 | break; | |
2d21ac55 | 550 | if (!bp->nb_np) { |
91447636 A |
551 | /* buffer is no longer valid */ |
552 | nfs_buf_drop(bp); | |
553 | continue; | |
554 | } | |
8f6c56a5 A |
555 | if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) |
556 | nfs_buf_check_write_verifier(np, bp); | |
55e303ae A |
557 | if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { |
558 | /* put buffer at end of delwri list */ | |
559 | TAILQ_INSERT_TAIL(&nfsbufdelwri, bp, nb_free); | |
560 | nfsbufdelwricnt++; | |
91447636 A |
561 | nfs_buf_drop(bp); |
562 | lck_mtx_unlock(nfs_buf_mutex); | |
2d21ac55 | 563 | nfs_flushcommits(np, 1); |
55e303ae | 564 | } else { |
91447636 A |
565 | SET(bp->nb_flags, NB_ASYNC); |
566 | lck_mtx_unlock(nfs_buf_mutex); | |
55e303ae A |
567 | nfs_buf_write(bp); |
568 | } | |
569 | i++; | |
91447636 | 570 | lck_mtx_lock(nfs_buf_mutex); |
55e303ae | 571 | } |
2d21ac55 A |
572 | } |
573 | ||
574 | /* | |
575 | * thread to service the delayed write queue when asked | |
576 | */ | |
b0d623f7 | 577 | void |
2d21ac55 A |
578 | nfs_buf_delwri_thread(__unused void *arg, __unused wait_result_t wr) |
579 | { | |
580 | struct timespec ts = { 30, 0 }; | |
581 | int error = 0; | |
582 | ||
583 | lck_mtx_lock(nfs_buf_mutex); | |
584 | while (!error) { | |
585 | nfs_buf_delwri_service(); | |
586 | error = msleep(&nfsbufdelwrithd, nfs_buf_mutex, 0, "nfsbufdelwri", &ts); | |
587 | } | |
588 | nfsbufdelwrithd = NULL; | |
589 | lck_mtx_unlock(nfs_buf_mutex); | |
590 | thread_terminate(nfsbufdelwrithd); | |
591 | } | |
592 | ||
593 | /* | |
594 | * try to push out some delayed/uncommitted writes | |
595 | * ("locked" indicates whether nfs_buf_mutex is already held) | |
596 | */ | |
b0d623f7 | 597 | void |
2d21ac55 A |
598 | nfs_buf_delwri_push(int locked) |
599 | { | |
600 | if (TAILQ_EMPTY(&nfsbufdelwri)) | |
601 | return; | |
602 | if (!locked) | |
603 | lck_mtx_lock(nfs_buf_mutex); | |
604 | /* wake up the delayed write service thread */ | |
605 | if (nfsbufdelwrithd) | |
606 | wakeup(&nfsbufdelwrithd); | |
607 | else if (kernel_thread_start(nfs_buf_delwri_thread, NULL, &nfsbufdelwrithd) == KERN_SUCCESS) | |
608 | thread_deallocate(nfsbufdelwrithd); | |
609 | /* otherwise, try to do some of the work ourselves */ | |
610 | if (!nfsbufdelwrithd) | |
611 | nfs_buf_delwri_service(); | |
91447636 A |
612 | if (!locked) |
613 | lck_mtx_unlock(nfs_buf_mutex); | |
55e303ae A |
614 | } |
615 | ||
616 | /* | |
91447636 A |
617 | * Get an nfs buffer. |
618 | * | |
619 | * Returns errno on error, 0 otherwise. | |
620 | * Any buffer is returned in *bpp. | |
621 | * | |
622 | * If NBLK_ONLYVALID is set, only return buffer if found in cache. | |
623 | * If NBLK_NOWAIT is set, don't wait for the buffer if it's marked BUSY. | |
624 | * | |
625 | * Check for existence of buffer in cache. | |
626 | * Or attempt to reuse a buffer from one of the free lists. | |
627 | * Or allocate a new buffer if we haven't already hit max allocation. | |
628 | * Or wait for a free buffer. | |
629 | * | |
630 | * If available buffer found, prepare it, and return it. | |
631 | * | |
632 | * If the calling process is interrupted by a signal for | |
633 | * an interruptible mount point, return EINTR. | |
55e303ae | 634 | */ |
91447636 | 635 | int |
55e303ae | 636 | nfs_buf_get( |
2d21ac55 | 637 | nfsnode_t np, |
91447636 | 638 | daddr64_t blkno, |
b0d623f7 | 639 | uint32_t size, |
2d21ac55 | 640 | thread_t thd, |
91447636 A |
641 | int flags, |
642 | struct nfsbuf **bpp) | |
55e303ae | 643 | { |
2d21ac55 A |
644 | vnode_t vp = NFSTOV(np); |
645 | struct nfsmount *nmp = VTONMP(vp); | |
55e303ae | 646 | struct nfsbuf *bp; |
b0d623f7 | 647 | uint32_t bufsize; |
55e303ae | 648 | int slpflag = PCATCH; |
91447636 A |
649 | int operation = (flags & NBLK_OPMASK); |
650 | int error = 0; | |
651 | struct timespec ts; | |
55e303ae | 652 | |
2d21ac55 | 653 | FSDBG_TOP(541, np, blkno, size, flags); |
91447636 | 654 | *bpp = NULL; |
55e303ae A |
655 | |
656 | bufsize = size; | |
0c530ab8 A |
657 | if (bufsize > NFS_MAXBSIZE) |
658 | panic("nfs_buf_get: buffer larger than NFS_MAXBSIZE requested"); | |
55e303ae | 659 | |
0c530ab8 | 660 | if (!nmp) { |
2d21ac55 | 661 | FSDBG_BOT(541, np, blkno, 0, ENXIO); |
0c530ab8 A |
662 | return (ENXIO); |
663 | } | |
55e303ae | 664 | |
2d21ac55 | 665 | if (!UBCINFOEXISTS(vp)) { |
91447636 | 666 | operation = NBLK_META; |
b0d623f7 | 667 | } else if (bufsize < (uint32_t)nmp->nm_biosize) { |
55e303ae | 668 | /* reg files should always have biosize blocks */ |
2d21ac55 | 669 | bufsize = nmp->nm_biosize; |
91447636 | 670 | } |
55e303ae | 671 | |
91447636 | 672 | /* if NBLK_WRITE, check for too many delayed/uncommitted writes */ |
2d21ac55 A |
673 | if ((operation == NBLK_WRITE) && (nfs_nbdwrite > NFS_A_LOT_OF_DELAYED_WRITES)) { |
674 | FSDBG_TOP(542, np, blkno, nfs_nbdwrite, NFS_A_LOT_OF_DELAYED_WRITES); | |
55e303ae A |
675 | |
676 | /* poke the delwri list */ | |
91447636 | 677 | nfs_buf_delwri_push(0); |
55e303ae A |
678 | |
679 | /* sleep to let other threads run... */ | |
680 | tsleep(&nfs_nbdwrite, PCATCH, "nfs_nbdwrite", 1); | |
2d21ac55 | 681 | FSDBG_BOT(542, np, blkno, nfs_nbdwrite, NFS_A_LOT_OF_DELAYED_WRITES); |
55e303ae A |
682 | } |
683 | ||
684 | loop: | |
91447636 | 685 | lck_mtx_lock(nfs_buf_mutex); |
55e303ae A |
686 | |
687 | /* check for existence of nfsbuf in cache */ | |
2d21ac55 | 688 | if ((bp = nfs_buf_incore(np, blkno))) { |
55e303ae | 689 | /* if busy, set wanted and wait */ |
91447636 A |
690 | if (ISSET(bp->nb_lflags, NBL_BUSY)) { |
691 | if (flags & NBLK_NOWAIT) { | |
692 | lck_mtx_unlock(nfs_buf_mutex); | |
2d21ac55 | 693 | FSDBG_BOT(541, np, blkno, bp, 0xbcbcbcbc); |
91447636 A |
694 | return (0); |
695 | } | |
2d21ac55 | 696 | FSDBG_TOP(543, np, blkno, bp, bp->nb_flags); |
91447636 A |
697 | SET(bp->nb_lflags, NBL_WANTED); |
698 | ||
699 | ts.tv_sec = 2; | |
700 | ts.tv_nsec = 0; | |
36401178 | 701 | msleep(bp, nfs_buf_mutex, slpflag|(PRIBIO+1)|PDROP, |
2d21ac55 | 702 | "nfsbufget", (slpflag == PCATCH) ? NULL : &ts); |
55e303ae | 703 | slpflag = 0; |
2d21ac55 | 704 | FSDBG_BOT(543, np, blkno, bp, bp->nb_flags); |
36401178 | 705 | if ((error = nfs_sigintr(VTONMP(vp), NULL, thd, 0))) { |
2d21ac55 | 706 | FSDBG_BOT(541, np, blkno, 0, error); |
91447636 | 707 | return (error); |
55e303ae A |
708 | } |
709 | goto loop; | |
710 | } | |
711 | if (bp->nb_bufsize != bufsize) | |
712 | panic("nfsbuf size mismatch"); | |
91447636 A |
713 | SET(bp->nb_lflags, NBL_BUSY); |
714 | SET(bp->nb_flags, NB_CACHE); | |
55e303ae A |
715 | nfs_buf_remfree(bp); |
716 | /* additional paranoia: */ | |
717 | if (ISSET(bp->nb_flags, NB_PAGELIST)) | |
718 | panic("pagelist buffer was not busy"); | |
719 | goto buffer_setup; | |
720 | } | |
721 | ||
91447636 A |
722 | if (flags & NBLK_ONLYVALID) { |
723 | lck_mtx_unlock(nfs_buf_mutex); | |
2d21ac55 | 724 | FSDBG_BOT(541, np, blkno, 0, 0x0000cace); |
91447636 A |
725 | return (0); |
726 | } | |
727 | ||
55e303ae A |
728 | /* |
729 | * where to get a free buffer: | |
91447636 | 730 | * - if meta and maxmeta reached, must reuse meta |
55e303ae | 731 | * - alloc new if we haven't reached min bufs |
483a1d10 A |
732 | * - if free lists are NOT empty |
733 | * - if free list is stale, use it | |
734 | * - else if freemeta list is stale, use it | |
735 | * - else if max bufs allocated, use least-time-to-stale | |
55e303ae A |
736 | * - alloc new if we haven't reached max allowed |
737 | * - start clearing out delwri list and try again | |
738 | */ | |
739 | ||
91447636 A |
740 | if ((operation == NBLK_META) && (nfsbufmetacnt >= nfsbufmetamax)) { |
741 | /* if we've hit max meta buffers, must reuse a meta buffer */ | |
742 | bp = TAILQ_FIRST(&nfsbuffreemeta); | |
743 | } else if ((nfsbufcnt > nfsbufmin) && | |
483a1d10 A |
744 | (!TAILQ_EMPTY(&nfsbuffree) || !TAILQ_EMPTY(&nfsbuffreemeta))) { |
745 | /* try to pull an nfsbuf off a free list */ | |
746 | struct nfsbuf *lrubp, *metabp; | |
747 | struct timeval now; | |
748 | microuptime(&now); | |
749 | ||
91447636 | 750 | /* if the next LRU or META buffer is invalid or stale, use it */ |
483a1d10 | 751 | lrubp = TAILQ_FIRST(&nfsbuffree); |
91447636 A |
752 | if (lrubp && (!NBUFSTAMPVALID(lrubp) || |
753 | ((lrubp->nb_timestamp + NFSBUF_LRU_STALE) < now.tv_sec))) | |
483a1d10 A |
754 | bp = lrubp; |
755 | metabp = TAILQ_FIRST(&nfsbuffreemeta); | |
91447636 A |
756 | if (!bp && metabp && (!NBUFSTAMPVALID(metabp) || |
757 | ((metabp->nb_timestamp + NFSBUF_META_STALE) < now.tv_sec))) | |
483a1d10 A |
758 | bp = metabp; |
759 | ||
760 | if (!bp && (nfsbufcnt >= nfsbufmax)) { | |
761 | /* we've already allocated all bufs, so */ | |
762 | /* choose the buffer that'll go stale first */ | |
763 | if (!metabp) | |
764 | bp = lrubp; | |
765 | else if (!lrubp) | |
766 | bp = metabp; | |
767 | else { | |
768 | int32_t lru_stale_time, meta_stale_time; | |
769 | lru_stale_time = lrubp->nb_timestamp + NFSBUF_LRU_STALE; | |
770 | meta_stale_time = metabp->nb_timestamp + NFSBUF_META_STALE; | |
771 | if (lru_stale_time <= meta_stale_time) | |
772 | bp = lrubp; | |
773 | else | |
774 | bp = metabp; | |
55e303ae | 775 | } |
55e303ae | 776 | } |
91447636 | 777 | } |
483a1d10 | 778 | |
91447636 A |
779 | if (bp) { |
780 | /* we have a buffer to reuse */ | |
2d21ac55 | 781 | FSDBG(544, np, blkno, bp, bp->nb_flags); |
91447636 A |
782 | nfs_buf_remfree(bp); |
783 | if (ISSET(bp->nb_flags, NB_DELWRI)) | |
784 | panic("nfs_buf_get: delwri"); | |
785 | SET(bp->nb_lflags, NBL_BUSY); | |
2d21ac55 A |
786 | /* disassociate buffer from previous nfsnode */ |
787 | if (bp->nb_np) { | |
91447636 A |
788 | if (bp->nb_vnbufs.le_next != NFSNOLIST) { |
789 | LIST_REMOVE(bp, nb_vnbufs); | |
790 | bp->nb_vnbufs.le_next = NFSNOLIST; | |
483a1d10 | 791 | } |
2d21ac55 | 792 | bp->nb_np = NULL; |
91447636 A |
793 | } |
794 | LIST_REMOVE(bp, nb_hash); | |
795 | /* nuke any creds we're holding */ | |
2d21ac55 | 796 | if (IS_VALID_CRED(bp->nb_rcred)) |
0c530ab8 | 797 | kauth_cred_unref(&bp->nb_rcred); |
2d21ac55 | 798 | if (IS_VALID_CRED(bp->nb_wcred)) |
0c530ab8 | 799 | kauth_cred_unref(&bp->nb_wcred); |
91447636 A |
800 | /* if buf will no longer be NB_META, dump old buffer */ |
801 | if (operation == NBLK_META) { | |
802 | if (!ISSET(bp->nb_flags, NB_META)) | |
803 | nfsbufmetacnt++; | |
804 | } else if (ISSET(bp->nb_flags, NB_META)) { | |
805 | if (bp->nb_data) { | |
806 | kfree(bp->nb_data, bp->nb_bufsize); | |
483a1d10 A |
807 | bp->nb_data = NULL; |
808 | } | |
91447636 | 809 | nfsbufmetacnt--; |
55e303ae | 810 | } |
91447636 A |
811 | /* re-init buf fields */ |
812 | bp->nb_error = 0; | |
813 | bp->nb_validoff = bp->nb_validend = -1; | |
814 | bp->nb_dirtyoff = bp->nb_dirtyend = 0; | |
815 | bp->nb_valid = 0; | |
816 | bp->nb_dirty = 0; | |
8f6c56a5 | 817 | bp->nb_verf = 0; |
91447636 A |
818 | } else { |
819 | /* no buffer to reuse */ | |
820 | if ((nfsbufcnt < nfsbufmax) && | |
821 | ((operation != NBLK_META) || (nfsbufmetacnt < nfsbufmetamax))) { | |
483a1d10 A |
822 | /* just alloc a new one */ |
823 | MALLOC(bp, struct nfsbuf *, sizeof(struct nfsbuf), M_TEMP, M_WAITOK); | |
91447636 A |
824 | if (!bp) { |
825 | lck_mtx_unlock(nfs_buf_mutex); | |
2d21ac55 | 826 | FSDBG_BOT(541, np, blkno, 0, error); |
91447636 A |
827 | return (ENOMEM); |
828 | } | |
483a1d10 | 829 | nfsbufcnt++; |
2d21ac55 A |
830 | |
831 | /* | |
832 | * If any excess bufs, make sure the timer | |
833 | * is running to free them up later. | |
834 | */ | |
835 | if (nfsbufcnt > nfsbufmin && !nfs_buf_timer_on) { | |
836 | nfs_buf_timer_on = 1; | |
837 | nfs_interval_timer_start(nfs_buf_timer_call, | |
838 | NFSBUF_FREE_PERIOD * 1000); | |
839 | } | |
840 | ||
91447636 A |
841 | if (operation == NBLK_META) |
842 | nfsbufmetacnt++; | |
2d21ac55 | 843 | NFSBUFCNTCHK(); |
483a1d10 A |
844 | /* init nfsbuf */ |
845 | bzero(bp, sizeof(*bp)); | |
846 | bp->nb_free.tqe_next = NFSNOLIST; | |
847 | bp->nb_validoff = bp->nb_validend = -1; | |
2d21ac55 | 848 | FSDBG(545, np, blkno, bp, 0); |
483a1d10 A |
849 | } else { |
850 | /* too many bufs... wait for buffers to free up */ | |
2d21ac55 | 851 | FSDBG_TOP(546, np, blkno, nfsbufcnt, nfsbufmax); |
55e303ae | 852 | |
483a1d10 | 853 | /* poke the delwri list */ |
91447636 | 854 | nfs_buf_delwri_push(1); |
483a1d10 A |
855 | |
856 | nfsneedbuffer = 1; | |
36401178 | 857 | msleep(&nfsneedbuffer, nfs_buf_mutex, PCATCH|PDROP, "nfsbufget", NULL); |
2d21ac55 | 858 | FSDBG_BOT(546, np, blkno, nfsbufcnt, nfsbufmax); |
36401178 | 859 | if ((error = nfs_sigintr(VTONMP(vp), NULL, thd, 0))) { |
2d21ac55 | 860 | FSDBG_BOT(541, np, blkno, 0, error); |
91447636 | 861 | return (error); |
483a1d10 A |
862 | } |
863 | goto loop; | |
55e303ae | 864 | } |
55e303ae A |
865 | } |
866 | ||
b0d623f7 A |
867 | /* set up nfsbuf */ |
868 | SET(bp->nb_lflags, NBL_BUSY); | |
91447636 | 869 | bp->nb_flags = 0; |
55e303ae A |
870 | bp->nb_lblkno = blkno; |
871 | /* insert buf in hash */ | |
91447636 | 872 | LIST_INSERT_HEAD(NFSBUFHASH(np, blkno), bp, nb_hash); |
2d21ac55 A |
873 | /* associate buffer with new nfsnode */ |
874 | bp->nb_np = np; | |
55e303ae A |
875 | LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs); |
876 | ||
877 | buffer_setup: | |
878 | ||
91447636 A |
879 | /* unlock hash */ |
880 | lck_mtx_unlock(nfs_buf_mutex); | |
881 | ||
55e303ae | 882 | switch (operation) { |
91447636 | 883 | case NBLK_META: |
55e303ae A |
884 | SET(bp->nb_flags, NB_META); |
885 | if ((bp->nb_bufsize != bufsize) && bp->nb_data) { | |
91447636 | 886 | kfree(bp->nb_data, bp->nb_bufsize); |
55e303ae A |
887 | bp->nb_data = NULL; |
888 | bp->nb_validoff = bp->nb_validend = -1; | |
889 | bp->nb_dirtyoff = bp->nb_dirtyend = 0; | |
890 | bp->nb_valid = 0; | |
891 | bp->nb_dirty = 0; | |
892 | CLR(bp->nb_flags, NB_CACHE); | |
893 | } | |
894 | if (!bp->nb_data) | |
91447636 A |
895 | bp->nb_data = kalloc(bufsize); |
896 | if (!bp->nb_data) { | |
897 | /* Ack! couldn't allocate the data buffer! */ | |
2d21ac55 | 898 | /* clean up buffer and return error */ |
91447636 A |
899 | lck_mtx_lock(nfs_buf_mutex); |
900 | LIST_REMOVE(bp, nb_vnbufs); | |
901 | bp->nb_vnbufs.le_next = NFSNOLIST; | |
2d21ac55 | 902 | bp->nb_np = NULL; |
91447636 A |
903 | /* invalidate usage timestamp to allow immediate freeing */ |
904 | NBUFSTAMPINVALIDATE(bp); | |
905 | if (bp->nb_free.tqe_next != NFSNOLIST) | |
906 | panic("nfsbuf on freelist"); | |
907 | TAILQ_INSERT_HEAD(&nfsbuffree, bp, nb_free); | |
908 | nfsbuffreecnt++; | |
909 | lck_mtx_unlock(nfs_buf_mutex); | |
2d21ac55 | 910 | FSDBG_BOT(541, np, blkno, 0xb00, ENOMEM); |
91447636 A |
911 | return (ENOMEM); |
912 | } | |
55e303ae A |
913 | bp->nb_bufsize = bufsize; |
914 | break; | |
915 | ||
91447636 A |
916 | case NBLK_READ: |
917 | case NBLK_WRITE: | |
918 | /* | |
919 | * Set or clear NB_READ now to let the UPL subsystem know | |
920 | * if we intend to modify the pages or not. | |
921 | */ | |
922 | if (operation == NBLK_READ) { | |
923 | SET(bp->nb_flags, NB_READ); | |
924 | } else { | |
925 | CLR(bp->nb_flags, NB_READ); | |
926 | } | |
55e303ae A |
927 | if (bufsize < PAGE_SIZE) |
928 | bufsize = PAGE_SIZE; | |
929 | bp->nb_bufsize = bufsize; | |
930 | bp->nb_validoff = bp->nb_validend = -1; | |
931 | ||
91447636 | 932 | if (UBCINFOEXISTS(vp)) { |
2d21ac55 | 933 | /* set up upl */ |
55e303ae A |
934 | if (nfs_buf_upl_setup(bp)) { |
935 | /* unable to create upl */ | |
936 | /* vm object must no longer exist */ | |
2d21ac55 | 937 | /* clean up buffer and return error */ |
91447636 | 938 | lck_mtx_lock(nfs_buf_mutex); |
55e303ae A |
939 | LIST_REMOVE(bp, nb_vnbufs); |
940 | bp->nb_vnbufs.le_next = NFSNOLIST; | |
2d21ac55 | 941 | bp->nb_np = NULL; |
91447636 A |
942 | /* invalidate usage timestamp to allow immediate freeing */ |
943 | NBUFSTAMPINVALIDATE(bp); | |
55e303ae A |
944 | if (bp->nb_free.tqe_next != NFSNOLIST) |
945 | panic("nfsbuf on freelist"); | |
946 | TAILQ_INSERT_HEAD(&nfsbuffree, bp, nb_free); | |
947 | nfsbuffreecnt++; | |
91447636 | 948 | lck_mtx_unlock(nfs_buf_mutex); |
2d21ac55 | 949 | FSDBG_BOT(541, np, blkno, 0x2bc, EIO); |
91447636 | 950 | return (EIO); |
55e303ae A |
951 | } |
952 | nfs_buf_upl_check(bp); | |
953 | } | |
954 | break; | |
955 | ||
956 | default: | |
957 | panic("nfs_buf_get: %d unknown operation", operation); | |
958 | } | |
959 | ||
91447636 | 960 | *bpp = bp; |
55e303ae | 961 | |
2d21ac55 | 962 | FSDBG_BOT(541, np, blkno, bp, bp->nb_flags); |
55e303ae | 963 | |
91447636 | 964 | return (0); |
55e303ae A |
965 | } |
966 | ||
967 | void | |
483a1d10 | 968 | nfs_buf_release(struct nfsbuf *bp, int freeup) |
55e303ae | 969 | { |
2d21ac55 A |
970 | nfsnode_t np = bp->nb_np; |
971 | vnode_t vp; | |
483a1d10 | 972 | struct timeval now; |
91447636 | 973 | int wakeup_needbuffer, wakeup_buffer, wakeup_nbdwrite; |
55e303ae A |
974 | |
975 | FSDBG_TOP(548, bp, NBOFF(bp), bp->nb_flags, bp->nb_data); | |
976 | FSDBG(548, bp->nb_validoff, bp->nb_validend, bp->nb_dirtyoff, bp->nb_dirtyend); | |
977 | FSDBG(548, bp->nb_valid, 0, bp->nb_dirty, 0); | |
978 | ||
2d21ac55 A |
979 | vp = np ? NFSTOV(np) : NULL; |
980 | if (vp && UBCINFOEXISTS(vp) && bp->nb_bufsize) { | |
b0d623f7 | 981 | int upl_flags, rv; |
55e303ae | 982 | upl_t upl; |
b0d623f7 | 983 | uint32_t i; |
55e303ae A |
984 | |
985 | if (!ISSET(bp->nb_flags, NB_PAGELIST) && !ISSET(bp->nb_flags, NB_INVAL)) { | |
986 | rv = nfs_buf_upl_setup(bp); | |
987 | if (rv) | |
988 | printf("nfs_buf_release: upl create failed %d\n", rv); | |
989 | else | |
990 | nfs_buf_upl_check(bp); | |
991 | } | |
992 | upl = bp->nb_pagelist; | |
993 | if (!upl) | |
994 | goto pagelist_cleanup_done; | |
995 | if (bp->nb_data) { | |
996 | if (ubc_upl_unmap(upl) != KERN_SUCCESS) | |
997 | panic("ubc_upl_unmap failed"); | |
998 | bp->nb_data = NULL; | |
999 | } | |
2d21ac55 A |
1000 | /* |
1001 | * Abort the pages on error or: if this is an invalid or | |
1002 | * non-needcommit nocache buffer AND no pages are dirty. | |
1003 | */ | |
1004 | if (ISSET(bp->nb_flags, NB_ERROR) || (!bp->nb_dirty && (ISSET(bp->nb_flags, NB_INVAL) || | |
1005 | (ISSET(bp->nb_flags, NB_NOCACHE) && !ISSET(bp->nb_flags, (NB_NEEDCOMMIT | NB_DELWRI)))))) { | |
1006 | if (ISSET(bp->nb_flags, (NB_READ | NB_INVAL | NB_NOCACHE))) | |
55e303ae A |
1007 | upl_flags = UPL_ABORT_DUMP_PAGES; |
1008 | else | |
1009 | upl_flags = 0; | |
1010 | ubc_upl_abort(upl, upl_flags); | |
1011 | goto pagelist_cleanup_done; | |
1012 | } | |
1013 | for (i=0; i <= (bp->nb_bufsize - 1)/PAGE_SIZE; i++) { | |
1014 | if (!NBPGVALID(bp,i)) | |
1015 | ubc_upl_abort_range(upl, | |
1016 | i*PAGE_SIZE, PAGE_SIZE, | |
1017 | UPL_ABORT_DUMP_PAGES | | |
1018 | UPL_ABORT_FREE_ON_EMPTY); | |
1019 | else { | |
1020 | if (NBPGDIRTY(bp,i)) | |
1021 | upl_flags = UPL_COMMIT_SET_DIRTY; | |
1022 | else | |
1023 | upl_flags = UPL_COMMIT_CLEAR_DIRTY; | |
b0d623f7 A |
1024 | |
1025 | if (!ISSET(bp->nb_flags, (NB_NEEDCOMMIT | NB_DELWRI))) | |
1026 | upl_flags |= UPL_COMMIT_CLEAR_PRECIOUS; | |
1027 | ||
55e303ae A |
1028 | ubc_upl_commit_range(upl, |
1029 | i*PAGE_SIZE, PAGE_SIZE, | |
1030 | upl_flags | | |
1031 | UPL_COMMIT_INACTIVATE | | |
1032 | UPL_COMMIT_FREE_ON_EMPTY); | |
1033 | } | |
1034 | } | |
1035 | pagelist_cleanup_done: | |
b0d623f7 | 1036 | /* invalidate any pages past EOF */ |
2d21ac55 | 1037 | if (NBOFF(bp) + bp->nb_bufsize > (off_t)(np->n_size)) { |
91447636 | 1038 | off_t start, end; |
2d21ac55 | 1039 | start = trunc_page_64(np->n_size) + PAGE_SIZE_64; |
0c530ab8 | 1040 | end = trunc_page_64(NBOFF(bp) + bp->nb_bufsize); |
b0d623f7 A |
1041 | if (start < NBOFF(bp)) |
1042 | start = NBOFF(bp); | |
91447636 A |
1043 | if (end > start) { |
1044 | if (!(rv = ubc_sync_range(vp, start, end, UBC_INVALIDATE))) | |
1045 | printf("nfs_buf_release(): ubc_sync_range failed!\n"); | |
1046 | } | |
55e303ae A |
1047 | } |
1048 | CLR(bp->nb_flags, NB_PAGELIST); | |
1049 | bp->nb_pagelist = NULL; | |
1050 | } | |
1051 | ||
91447636 A |
1052 | lck_mtx_lock(nfs_buf_mutex); |
1053 | ||
1054 | wakeup_needbuffer = wakeup_buffer = wakeup_nbdwrite = 0; | |
1055 | ||
55e303ae A |
1056 | /* Wake up any processes waiting for any buffer to become free. */ |
1057 | if (nfsneedbuffer) { | |
1058 | nfsneedbuffer = 0; | |
91447636 | 1059 | wakeup_needbuffer = 1; |
55e303ae A |
1060 | } |
1061 | /* Wake up any processes waiting for _this_ buffer to become free. */ | |
91447636 A |
1062 | if (ISSET(bp->nb_lflags, NBL_WANTED)) { |
1063 | CLR(bp->nb_lflags, NBL_WANTED); | |
1064 | wakeup_buffer = 1; | |
55e303ae A |
1065 | } |
1066 | ||
0c530ab8 A |
1067 | /* If it's non-needcommit nocache, or an error, mark it invalid. */ |
1068 | if (ISSET(bp->nb_flags, NB_ERROR) || | |
1069 | (ISSET(bp->nb_flags, NB_NOCACHE) && !ISSET(bp->nb_flags, (NB_NEEDCOMMIT | NB_DELWRI)))) | |
55e303ae A |
1070 | SET(bp->nb_flags, NB_INVAL); |
1071 | ||
1072 | if ((bp->nb_bufsize <= 0) || ISSET(bp->nb_flags, NB_INVAL)) { | |
2d21ac55 | 1073 | /* If it's invalid or empty, dissociate it from its nfsnode */ |
55e303ae A |
1074 | if (bp->nb_vnbufs.le_next != NFSNOLIST) { |
1075 | LIST_REMOVE(bp, nb_vnbufs); | |
1076 | bp->nb_vnbufs.le_next = NFSNOLIST; | |
1077 | } | |
2d21ac55 | 1078 | bp->nb_np = NULL; |
55e303ae A |
1079 | /* if this was a delayed write, wakeup anyone */ |
1080 | /* waiting for delayed writes to complete */ | |
1081 | if (ISSET(bp->nb_flags, NB_DELWRI)) { | |
1082 | CLR(bp->nb_flags, NB_DELWRI); | |
2d21ac55 A |
1083 | nfs_nbdwrite--; |
1084 | NFSBUFCNTCHK(); | |
91447636 | 1085 | wakeup_nbdwrite = 1; |
55e303ae | 1086 | } |
91447636 A |
1087 | /* invalidate usage timestamp to allow immediate freeing */ |
1088 | NBUFSTAMPINVALIDATE(bp); | |
55e303ae A |
1089 | /* put buffer at head of free list */ |
1090 | if (bp->nb_free.tqe_next != NFSNOLIST) | |
1091 | panic("nfsbuf on freelist"); | |
483a1d10 | 1092 | SET(bp->nb_flags, NB_INVAL); |
91447636 A |
1093 | if (ISSET(bp->nb_flags, NB_META)) { |
1094 | TAILQ_INSERT_HEAD(&nfsbuffreemeta, bp, nb_free); | |
1095 | nfsbuffreemetacnt++; | |
1096 | } else { | |
1097 | TAILQ_INSERT_HEAD(&nfsbuffree, bp, nb_free); | |
1098 | nfsbuffreecnt++; | |
1099 | } | |
55e303ae A |
1100 | } else if (ISSET(bp->nb_flags, NB_DELWRI)) { |
1101 | /* put buffer at end of delwri list */ | |
1102 | if (bp->nb_free.tqe_next != NFSNOLIST) | |
1103 | panic("nfsbuf on freelist"); | |
1104 | TAILQ_INSERT_TAIL(&nfsbufdelwri, bp, nb_free); | |
1105 | nfsbufdelwricnt++; | |
91447636 | 1106 | freeup = 0; |
55e303ae | 1107 | } else { |
483a1d10 A |
1108 | /* update usage timestamp */ |
1109 | microuptime(&now); | |
1110 | bp->nb_timestamp = now.tv_sec; | |
55e303ae A |
1111 | /* put buffer at end of free list */ |
1112 | if (bp->nb_free.tqe_next != NFSNOLIST) | |
1113 | panic("nfsbuf on freelist"); | |
483a1d10 A |
1114 | if (ISSET(bp->nb_flags, NB_META)) { |
1115 | TAILQ_INSERT_TAIL(&nfsbuffreemeta, bp, nb_free); | |
1116 | nfsbuffreemetacnt++; | |
1117 | } else { | |
1118 | TAILQ_INSERT_TAIL(&nfsbuffree, bp, nb_free); | |
1119 | nfsbuffreecnt++; | |
1120 | } | |
55e303ae A |
1121 | } |
1122 | ||
2d21ac55 | 1123 | NFSBUFCNTCHK(); |
55e303ae A |
1124 | |
1125 | /* Unlock the buffer. */ | |
2d21ac55 | 1126 | CLR(bp->nb_flags, (NB_ASYNC | NB_STABLE)); |
91447636 | 1127 | CLR(bp->nb_lflags, NBL_BUSY); |
55e303ae A |
1128 | |
1129 | FSDBG_BOT(548, bp, NBOFF(bp), bp->nb_flags, bp->nb_data); | |
91447636 A |
1130 | |
1131 | lck_mtx_unlock(nfs_buf_mutex); | |
1132 | ||
1133 | if (wakeup_needbuffer) | |
1134 | wakeup(&nfsneedbuffer); | |
1135 | if (wakeup_buffer) | |
1136 | wakeup(bp); | |
1137 | if (wakeup_nbdwrite) | |
1138 | wakeup(&nfs_nbdwrite); | |
1139 | if (freeup) | |
1140 | NFS_BUF_FREEUP(); | |
55e303ae A |
1141 | } |
1142 | ||
1143 | /* | |
1144 | * Wait for operations on the buffer to complete. | |
1145 | * When they do, extract and return the I/O's error value. | |
1146 | */ | |
1147 | int | |
1148 | nfs_buf_iowait(struct nfsbuf *bp) | |
1149 | { | |
1150 | FSDBG_TOP(549, bp, NBOFF(bp), bp->nb_flags, bp->nb_error); | |
1151 | ||
91447636 A |
1152 | lck_mtx_lock(nfs_buf_mutex); |
1153 | ||
55e303ae | 1154 | while (!ISSET(bp->nb_flags, NB_DONE)) |
2d21ac55 | 1155 | msleep(bp, nfs_buf_mutex, PRIBIO + 1, "nfs_buf_iowait", NULL); |
91447636 A |
1156 | |
1157 | lck_mtx_unlock(nfs_buf_mutex); | |
55e303ae A |
1158 | |
1159 | FSDBG_BOT(549, bp, NBOFF(bp), bp->nb_flags, bp->nb_error); | |
1160 | ||
1161 | /* check for interruption of I/O, then errors. */ | |
1162 | if (ISSET(bp->nb_flags, NB_EINTR)) { | |
1163 | CLR(bp->nb_flags, NB_EINTR); | |
1164 | return (EINTR); | |
1165 | } else if (ISSET(bp->nb_flags, NB_ERROR)) | |
1166 | return (bp->nb_error ? bp->nb_error : EIO); | |
1167 | return (0); | |
1168 | } | |
1169 | ||
1170 | /* | |
1171 | * Mark I/O complete on a buffer. | |
1172 | */ | |
1173 | void | |
1174 | nfs_buf_iodone(struct nfsbuf *bp) | |
1175 | { | |
55e303ae A |
1176 | |
1177 | FSDBG_TOP(550, bp, NBOFF(bp), bp->nb_flags, bp->nb_error); | |
1178 | ||
1179 | if (ISSET(bp->nb_flags, NB_DONE)) | |
1180 | panic("nfs_buf_iodone already"); | |
55e303ae A |
1181 | |
1182 | if (!ISSET(bp->nb_flags, NB_READ)) { | |
1183 | CLR(bp->nb_flags, NB_WRITEINPROG); | |
91447636 A |
1184 | /* |
1185 | * vnode_writedone() takes care of waking up | |
1186 | * any throttled write operations | |
1187 | */ | |
2d21ac55 | 1188 | vnode_writedone(NFSTOV(bp->nb_np)); |
b0d623f7 A |
1189 | nfs_node_lock_force(bp->nb_np); |
1190 | bp->nb_np->n_numoutput--; | |
1191 | nfs_node_unlock(bp->nb_np); | |
55e303ae | 1192 | } |
91447636 A |
1193 | if (ISSET(bp->nb_flags, NB_ASYNC)) { /* if async, release it */ |
1194 | SET(bp->nb_flags, NB_DONE); /* note that it's done */ | |
483a1d10 | 1195 | nfs_buf_release(bp, 1); |
91447636 A |
1196 | } else { /* or just wakeup the buffer */ |
1197 | lck_mtx_lock(nfs_buf_mutex); | |
1198 | SET(bp->nb_flags, NB_DONE); /* note that it's done */ | |
1199 | CLR(bp->nb_lflags, NBL_WANTED); | |
1200 | lck_mtx_unlock(nfs_buf_mutex); | |
55e303ae A |
1201 | wakeup(bp); |
1202 | } | |
1203 | ||
1204 | FSDBG_BOT(550, bp, NBOFF(bp), bp->nb_flags, bp->nb_error); | |
1205 | } | |
1206 | ||
1207 | void | |
2d21ac55 | 1208 | nfs_buf_write_delayed(struct nfsbuf *bp) |
55e303ae | 1209 | { |
2d21ac55 | 1210 | nfsnode_t np = bp->nb_np; |
55e303ae A |
1211 | |
1212 | FSDBG_TOP(551, bp, NBOFF(bp), bp->nb_flags, 0); | |
1213 | FSDBG(551, bp, bp->nb_dirtyoff, bp->nb_dirtyend, bp->nb_dirty); | |
1214 | ||
1215 | /* | |
1216 | * If the block hasn't been seen before: | |
1217 | * (1) Mark it as having been seen, | |
2d21ac55 | 1218 | * (2) Make sure it's on its node's correct block list, |
55e303ae A |
1219 | */ |
1220 | if (!ISSET(bp->nb_flags, NB_DELWRI)) { | |
1221 | SET(bp->nb_flags, NB_DELWRI); | |
55e303ae | 1222 | /* move to dirty list */ |
91447636 | 1223 | lck_mtx_lock(nfs_buf_mutex); |
2d21ac55 A |
1224 | nfs_nbdwrite++; |
1225 | NFSBUFCNTCHK(); | |
55e303ae A |
1226 | if (bp->nb_vnbufs.le_next != NFSNOLIST) |
1227 | LIST_REMOVE(bp, nb_vnbufs); | |
2d21ac55 | 1228 | LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs); |
91447636 | 1229 | lck_mtx_unlock(nfs_buf_mutex); |
55e303ae A |
1230 | } |
1231 | ||
1232 | /* | |
1233 | * If the vnode has "too many" write operations in progress | |
1234 | * wait for them to finish the IO | |
1235 | */ | |
2d21ac55 A |
1236 | vnode_waitforwrites(NFSTOV(np), VNODE_ASYNC_THROTTLE, 0, 0, "nfs_buf_write_delayed"); |
1237 | ||
1238 | /* the file is in a modified state, so make sure the flag's set */ | |
b0d623f7 | 1239 | nfs_node_lock_force(np); |
2d21ac55 | 1240 | np->n_flag |= NMODIFIED; |
b0d623f7 | 1241 | nfs_node_unlock(np); |
55e303ae A |
1242 | |
1243 | /* | |
2d21ac55 A |
1244 | * If we have too many delayed write buffers, |
1245 | * just fall back to doing the async write. | |
55e303ae A |
1246 | */ |
1247 | if (nfs_nbdwrite < 0) | |
1248 | panic("nfs_buf_write_delayed: Negative nfs_nbdwrite"); | |
2d21ac55 | 1249 | if (nfs_nbdwrite > NFS_A_LOT_OF_DELAYED_WRITES) { |
55e303ae A |
1250 | /* issue async write */ |
1251 | SET(bp->nb_flags, NB_ASYNC); | |
1252 | nfs_buf_write(bp); | |
1253 | FSDBG_BOT(551, bp, NBOFF(bp), bp->nb_flags, bp->nb_error); | |
1254 | return; | |
1255 | } | |
2d21ac55 | 1256 | |
55e303ae A |
1257 | /* Otherwise, the "write" is done, so mark and release the buffer. */ |
1258 | SET(bp->nb_flags, NB_DONE); | |
483a1d10 | 1259 | nfs_buf_release(bp, 1); |
55e303ae A |
1260 | FSDBG_BOT(551, bp, NBOFF(bp), bp->nb_flags, 0); |
1261 | return; | |
1262 | } | |
1263 | ||
8f6c56a5 A |
1264 | /* |
1265 | * Check that a "needcommit" buffer can still be committed. | |
1266 | * If the write verifier has changed, we need to clear the | |
1267 | * the needcommit flag. | |
1268 | */ | |
1269 | void | |
2d21ac55 | 1270 | nfs_buf_check_write_verifier(nfsnode_t np, struct nfsbuf *bp) |
8f6c56a5 A |
1271 | { |
1272 | struct nfsmount *nmp; | |
1273 | ||
1274 | if (!ISSET(bp->nb_flags, NB_NEEDCOMMIT)) | |
1275 | return; | |
1276 | ||
2d21ac55 A |
1277 | nmp = NFSTONMP(np); |
1278 | if (!nmp) | |
1279 | return; | |
1280 | if (!ISSET(bp->nb_flags, NB_STALEWVERF) && (bp->nb_verf == nmp->nm_verf)) | |
8f6c56a5 A |
1281 | return; |
1282 | ||
2d21ac55 A |
1283 | /* write verifier changed, clear commit/wverf flags */ |
1284 | CLR(bp->nb_flags, (NB_NEEDCOMMIT | NB_STALEWVERF)); | |
1285 | bp->nb_verf = 0; | |
b0d623f7 | 1286 | nfs_node_lock_force(np); |
8f6c56a5 A |
1287 | np->n_needcommitcnt--; |
1288 | CHECK_NEEDCOMMITCNT(np); | |
b0d623f7 | 1289 | nfs_node_unlock(np); |
8f6c56a5 A |
1290 | } |
1291 | ||
91447636 A |
1292 | /* |
1293 | * add a reference to a buffer so it doesn't disappear while being used | |
1294 | * (must be called with nfs_buf_mutex held) | |
1295 | */ | |
1296 | void | |
1297 | nfs_buf_refget(struct nfsbuf *bp) | |
1298 | { | |
1299 | bp->nb_refs++; | |
1300 | } | |
1301 | /* | |
1302 | * release a reference on a buffer | |
1303 | * (must be called with nfs_buf_mutex held) | |
1304 | */ | |
1305 | void | |
1306 | nfs_buf_refrele(struct nfsbuf *bp) | |
1307 | { | |
1308 | bp->nb_refs--; | |
1309 | } | |
1310 | ||
1311 | /* | |
1312 | * mark a particular buffer as BUSY | |
1313 | * (must be called with nfs_buf_mutex held) | |
1314 | */ | |
1315 | errno_t | |
1316 | nfs_buf_acquire(struct nfsbuf *bp, int flags, int slpflag, int slptimeo) | |
1317 | { | |
1318 | errno_t error; | |
1319 | struct timespec ts; | |
1320 | ||
1321 | if (ISSET(bp->nb_lflags, NBL_BUSY)) { | |
1322 | /* | |
b0d623f7 | 1323 | * since the lck_mtx_lock may block, the buffer |
91447636 A |
1324 | * may become BUSY, so we need to recheck for |
1325 | * a NOWAIT request | |
1326 | */ | |
1327 | if (flags & NBAC_NOWAIT) | |
1328 | return (EBUSY); | |
1329 | SET(bp->nb_lflags, NBL_WANTED); | |
1330 | ||
2d21ac55 A |
1331 | ts.tv_sec = (slptimeo/100); |
1332 | /* the hz value is 100; which leads to 10ms */ | |
1333 | ts.tv_nsec = (slptimeo % 100) * 10 * NSEC_PER_USEC * 1000; | |
91447636 | 1334 | |
2d21ac55 A |
1335 | error = msleep(bp, nfs_buf_mutex, slpflag | (PRIBIO + 1), |
1336 | "nfs_buf_acquire", &ts); | |
1337 | if (error) | |
1338 | return (error); | |
1339 | return (EAGAIN); | |
91447636 A |
1340 | } |
1341 | if (flags & NBAC_REMOVE) | |
1342 | nfs_buf_remfree(bp); | |
1343 | SET(bp->nb_lflags, NBL_BUSY); | |
1344 | ||
1345 | return (0); | |
1346 | } | |
1347 | ||
1348 | /* | |
1349 | * simply drop the BUSY status of a buffer | |
1350 | * (must be called with nfs_buf_mutex held) | |
1351 | */ | |
1352 | void | |
1353 | nfs_buf_drop(struct nfsbuf *bp) | |
1354 | { | |
1355 | int need_wakeup = 0; | |
1356 | ||
1357 | if (!ISSET(bp->nb_lflags, NBL_BUSY)) | |
1358 | panic("nfs_buf_drop: buffer not busy!"); | |
1359 | if (ISSET(bp->nb_lflags, NBL_WANTED)) { | |
2d21ac55 | 1360 | /* delay the actual wakeup until after we clear NBL_BUSY */ |
91447636 A |
1361 | need_wakeup = 1; |
1362 | } | |
1363 | /* Unlock the buffer. */ | |
1364 | CLR(bp->nb_lflags, (NBL_BUSY | NBL_WANTED)); | |
1365 | ||
1366 | if (need_wakeup) | |
1367 | wakeup(bp); | |
1368 | } | |
1369 | ||
1370 | /* | |
1371 | * prepare for iterating over an nfsnode's buffer list | |
1372 | * this lock protects the queue manipulation | |
1373 | * (must be called with nfs_buf_mutex held) | |
1374 | */ | |
1375 | int | |
2d21ac55 | 1376 | nfs_buf_iterprepare(nfsnode_t np, struct nfsbuflists *iterheadp, int flags) |
91447636 A |
1377 | { |
1378 | struct nfsbuflists *listheadp; | |
1379 | ||
1380 | if (flags & NBI_DIRTY) | |
1381 | listheadp = &np->n_dirtyblkhd; | |
1382 | else | |
1383 | listheadp = &np->n_cleanblkhd; | |
1384 | ||
1385 | if ((flags & NBI_NOWAIT) && (np->n_bufiterflags & NBI_ITER)) { | |
1386 | LIST_INIT(iterheadp); | |
1387 | return(EWOULDBLOCK); | |
1388 | } | |
1389 | ||
1390 | while (np->n_bufiterflags & NBI_ITER) { | |
1391 | np->n_bufiterflags |= NBI_ITERWANT; | |
2d21ac55 | 1392 | msleep(&np->n_bufiterflags, nfs_buf_mutex, 0, "nfs_buf_iterprepare", NULL); |
91447636 A |
1393 | } |
1394 | if (LIST_EMPTY(listheadp)) { | |
1395 | LIST_INIT(iterheadp); | |
1396 | return(EINVAL); | |
1397 | } | |
1398 | np->n_bufiterflags |= NBI_ITER; | |
1399 | ||
1400 | iterheadp->lh_first = listheadp->lh_first; | |
1401 | listheadp->lh_first->nb_vnbufs.le_prev = &iterheadp->lh_first; | |
1402 | LIST_INIT(listheadp); | |
1403 | ||
1404 | return(0); | |
1405 | } | |
1406 | ||
1407 | /* | |
2d21ac55 | 1408 | * clean up after iterating over an nfsnode's buffer list |
91447636 A |
1409 | * this lock protects the queue manipulation |
1410 | * (must be called with nfs_buf_mutex held) | |
1411 | */ | |
1412 | void | |
2d21ac55 | 1413 | nfs_buf_itercomplete(nfsnode_t np, struct nfsbuflists *iterheadp, int flags) |
91447636 A |
1414 | { |
1415 | struct nfsbuflists * listheadp; | |
1416 | struct nfsbuf *bp; | |
1417 | ||
1418 | if (flags & NBI_DIRTY) | |
1419 | listheadp = &np->n_dirtyblkhd; | |
1420 | else | |
1421 | listheadp = &np->n_cleanblkhd; | |
1422 | ||
1423 | while (!LIST_EMPTY(iterheadp)) { | |
1424 | bp = LIST_FIRST(iterheadp); | |
1425 | LIST_REMOVE(bp, nb_vnbufs); | |
1426 | LIST_INSERT_HEAD(listheadp, bp, nb_vnbufs); | |
1427 | } | |
1428 | ||
1429 | np->n_bufiterflags &= ~NBI_ITER; | |
1430 | if (np->n_bufiterflags & NBI_ITERWANT) { | |
1431 | np->n_bufiterflags &= ~NBI_ITERWANT; | |
1432 | wakeup(&np->n_bufiterflags); | |
1433 | } | |
1434 | } | |
1435 | ||
1c79356b A |
1436 | |
1437 | /* | |
2d21ac55 | 1438 | * Read an NFS buffer for a file. |
1c79356b A |
1439 | */ |
1440 | int | |
2d21ac55 | 1441 | nfs_buf_read(struct nfsbuf *bp) |
1c79356b | 1442 | { |
2d21ac55 A |
1443 | int error = 0; |
1444 | nfsnode_t np; | |
1445 | thread_t thd; | |
1446 | kauth_cred_t cred; | |
55e303ae | 1447 | |
2d21ac55 A |
1448 | np = bp->nb_np; |
1449 | cred = bp->nb_rcred; | |
1450 | if (IS_VALID_CRED(cred)) | |
1451 | kauth_cred_ref(cred); | |
1452 | thd = ISSET(bp->nb_flags, NB_ASYNC) ? NULL : current_thread(); | |
1c79356b | 1453 | |
2d21ac55 A |
1454 | /* sanity checks */ |
1455 | if (!ISSET(bp->nb_flags, NB_READ)) | |
1456 | panic("nfs_buf_read: !NB_READ"); | |
1457 | if (ISSET(bp->nb_flags, NB_DONE)) | |
1458 | CLR(bp->nb_flags, NB_DONE); | |
91447636 | 1459 | |
2d21ac55 | 1460 | NFS_BUF_MAP(bp); |
0c530ab8 | 1461 | |
b0d623f7 | 1462 | OSAddAtomic(1, &nfsstats.read_bios); |
2d21ac55 A |
1463 | |
1464 | error = nfs_buf_read_rpc(bp, thd, cred); | |
1c79356b | 1465 | /* |
2d21ac55 A |
1466 | * For async I/O, the callbacks will finish up the |
1467 | * read. Otherwise, the read has already been finished. | |
1c79356b | 1468 | */ |
2d21ac55 A |
1469 | |
1470 | if (IS_VALID_CRED(cred)) | |
1471 | kauth_cred_unref(&cred); | |
1472 | return (error); | |
1473 | } | |
1474 | ||
1475 | /* | |
1476 | * finish the reading of a buffer | |
1477 | */ | |
1478 | void | |
1479 | nfs_buf_read_finish(struct nfsbuf *bp) | |
1480 | { | |
1481 | nfsnode_t np = bp->nb_np; | |
1482 | struct nfsmount *nmp; | |
1483 | ||
1484 | if (!ISSET(bp->nb_flags, NB_ERROR)) { | |
1485 | /* update valid range */ | |
1486 | bp->nb_validoff = 0; | |
1487 | bp->nb_validend = bp->nb_endio; | |
b0d623f7 | 1488 | if (bp->nb_endio < (int)bp->nb_bufsize) { |
2d21ac55 A |
1489 | /* |
1490 | * The read may be short because we have unflushed writes | |
1491 | * that are extending the file size and the reads hit the | |
1492 | * (old) EOF on the server. So, just make sure nb_validend | |
1493 | * correctly tracks EOF. | |
1494 | * Note that the missing data should have already been zeroed | |
1495 | * in nfs_buf_read_rpc_finish(). | |
1496 | */ | |
1497 | off_t boff = NBOFF(bp); | |
1498 | if ((off_t)np->n_size >= (boff + bp->nb_bufsize)) | |
1499 | bp->nb_validend = bp->nb_bufsize; | |
1500 | else if ((off_t)np->n_size >= boff) | |
1501 | bp->nb_validend = np->n_size - boff; | |
1502 | else | |
1503 | bp->nb_validend = 0; | |
91447636 | 1504 | } |
2d21ac55 A |
1505 | if ((nmp = NFSTONMP(np)) && (nmp->nm_vers == NFS_VER2) && |
1506 | ((NBOFF(bp) + bp->nb_validend) > 0x100000000LL)) | |
1507 | bp->nb_validend = 0x100000000LL - NBOFF(bp); | |
1508 | bp->nb_valid = (1 << (round_page_32(bp->nb_validend) / PAGE_SIZE)) - 1; | |
1509 | if (bp->nb_validend & PAGE_MASK) { | |
1510 | /* zero-fill remainder of last page */ | |
1511 | bzero(bp->nb_data + bp->nb_validend, bp->nb_bufsize - bp->nb_validend); | |
91447636 | 1512 | } |
2d21ac55 A |
1513 | } |
1514 | nfs_buf_iodone(bp); | |
1515 | } | |
1516 | ||
1517 | /* | |
1518 | * initiate the NFS READ RPC(s) for a buffer | |
1519 | */ | |
1520 | int | |
1521 | nfs_buf_read_rpc(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) | |
1522 | { | |
1523 | struct nfsmount *nmp; | |
1524 | nfsnode_t np = bp->nb_np; | |
1525 | int error = 0, nfsvers, async; | |
b0d623f7 A |
1526 | int offset, nrpcs; |
1527 | uint32_t nmrsize, length, len; | |
2d21ac55 A |
1528 | off_t boff; |
1529 | struct nfsreq *req; | |
1530 | struct nfsreq_cbinfo cb; | |
1531 | ||
1532 | nmp = NFSTONMP(np); | |
1533 | if (!nmp) { | |
1534 | bp->nb_error = error = ENXIO; | |
1535 | SET(bp->nb_flags, NB_ERROR); | |
1536 | nfs_buf_iodone(bp); | |
1537 | return (error); | |
1538 | } | |
1539 | nfsvers = nmp->nm_vers; | |
1540 | nmrsize = nmp->nm_rsize; | |
1541 | ||
1542 | boff = NBOFF(bp); | |
1543 | offset = 0; | |
1544 | length = bp->nb_bufsize; | |
1545 | ||
1546 | if (nfsvers == NFS_VER2) { | |
1547 | if (boff > 0xffffffffLL) { | |
1548 | bp->nb_error = error = EFBIG; | |
1549 | SET(bp->nb_flags, NB_ERROR); | |
1550 | nfs_buf_iodone(bp); | |
91447636 A |
1551 | return (error); |
1552 | } | |
2d21ac55 A |
1553 | if ((boff + length - 1) > 0xffffffffLL) |
1554 | length = 0x100000000LL - boff; | |
91447636 A |
1555 | } |
1556 | ||
2d21ac55 A |
1557 | /* Note: Can only do async I/O if nfsiods are configured. */ |
1558 | async = (bp->nb_flags & NB_ASYNC); | |
1559 | cb.rcb_func = async ? nfs_buf_read_rpc_finish : NULL; | |
1560 | cb.rcb_bp = bp; | |
1561 | ||
1562 | bp->nb_offio = bp->nb_endio = 0; | |
1563 | bp->nb_rpcs = nrpcs = (length + nmrsize - 1) / nmrsize; | |
1564 | if (async && (nrpcs > 1)) { | |
1565 | SET(bp->nb_flags, NB_MULTASYNCRPC); | |
1566 | } else { | |
1567 | CLR(bp->nb_flags, NB_MULTASYNCRPC); | |
1c79356b | 1568 | } |
1c79356b | 1569 | |
2d21ac55 A |
1570 | while (length > 0) { |
1571 | if (ISSET(bp->nb_flags, NB_ERROR)) { | |
1572 | error = bp->nb_error; | |
91447636 | 1573 | break; |
2d21ac55 A |
1574 | } |
1575 | len = (length > nmrsize) ? nmrsize : length; | |
1576 | cb.rcb_args[0] = offset; | |
1577 | cb.rcb_args[1] = len; | |
b0d623f7 A |
1578 | if (nmp->nm_vers >= NFS_VER4) |
1579 | cb.rcb_args[2] = nmp->nm_stategenid; | |
2d21ac55 A |
1580 | req = NULL; |
1581 | error = nmp->nm_funcs->nf_read_rpc_async(np, boff + offset, len, thd, cred, &cb, &req); | |
1582 | if (error) | |
1c79356b | 1583 | break; |
2d21ac55 A |
1584 | offset += len; |
1585 | length -= len; | |
1586 | if (async) | |
1587 | continue; | |
1588 | nfs_buf_read_rpc_finish(req); | |
1589 | if (ISSET(bp->nb_flags, NB_ERROR)) { | |
1590 | error = bp->nb_error; | |
1591 | break; | |
1592 | } | |
1593 | } | |
55e303ae | 1594 | |
2d21ac55 | 1595 | if (length > 0) { |
55e303ae | 1596 | /* |
2d21ac55 A |
1597 | * Something bad happened while trying to send the RPC(s). |
1598 | * Wait for any outstanding requests to complete. | |
55e303ae | 1599 | */ |
2d21ac55 A |
1600 | bp->nb_error = error; |
1601 | SET(bp->nb_flags, NB_ERROR); | |
1602 | if (ISSET(bp->nb_flags, NB_MULTASYNCRPC)) { | |
1603 | nrpcs = (length + nmrsize - 1) / nmrsize; | |
1604 | lck_mtx_lock(nfs_buf_mutex); | |
1605 | bp->nb_rpcs -= nrpcs; | |
1606 | if (bp->nb_rpcs == 0) { | |
1607 | /* No RPCs left, so the buffer's done */ | |
1608 | lck_mtx_unlock(nfs_buf_mutex); | |
1609 | nfs_buf_iodone(bp); | |
1610 | } else { | |
1611 | /* wait for the last RPC to mark it done */ | |
1612 | while (bp->nb_rpcs > 0) | |
1613 | msleep(&bp->nb_rpcs, nfs_buf_mutex, 0, | |
1614 | "nfs_buf_read_rpc_cancel", NULL); | |
1615 | lck_mtx_unlock(nfs_buf_mutex); | |
55e303ae | 1616 | } |
2d21ac55 A |
1617 | } else { |
1618 | nfs_buf_iodone(bp); | |
55e303ae | 1619 | } |
2d21ac55 | 1620 | } |
55e303ae | 1621 | |
2d21ac55 A |
1622 | return (error); |
1623 | } | |
1c79356b | 1624 | |
2d21ac55 A |
1625 | /* |
1626 | * finish up an NFS READ RPC on a buffer | |
1627 | */ | |
1628 | void | |
1629 | nfs_buf_read_rpc_finish(struct nfsreq *req) | |
1630 | { | |
1631 | struct nfsmount *nmp; | |
1632 | size_t rlen; | |
1633 | struct nfsreq_cbinfo cb; | |
1634 | struct nfsbuf *bp; | |
1635 | int error = 0, nfsvers, offset, length, eof = 0, multasyncrpc, finished; | |
1636 | void *wakeme = NULL; | |
1637 | struct nfsreq *rreq = NULL; | |
1638 | nfsnode_t np; | |
1639 | thread_t thd; | |
1640 | kauth_cred_t cred; | |
b0d623f7 A |
1641 | uio_t auio; |
1642 | char uio_buf [ UIO_SIZEOF(1) ]; | |
2d21ac55 A |
1643 | |
1644 | finish: | |
1645 | np = req->r_np; | |
1646 | thd = req->r_thread; | |
1647 | cred = req->r_cred; | |
1648 | if (IS_VALID_CRED(cred)) | |
1649 | kauth_cred_ref(cred); | |
1650 | cb = req->r_callback; | |
1651 | bp = cb.rcb_bp; | |
1652 | ||
1653 | nmp = NFSTONMP(np); | |
1654 | if (!nmp) { | |
1655 | SET(bp->nb_flags, NB_ERROR); | |
1656 | bp->nb_error = error = ENXIO; | |
1657 | } | |
1658 | if (error || ISSET(bp->nb_flags, NB_ERROR)) { | |
1659 | /* just drop it */ | |
1660 | nfs_request_async_cancel(req); | |
1661 | goto out; | |
1662 | } | |
1663 | ||
1664 | nfsvers = nmp->nm_vers; | |
1665 | offset = cb.rcb_args[0]; | |
1666 | rlen = length = cb.rcb_args[1]; | |
1667 | ||
b0d623f7 A |
1668 | auio = uio_createwithbuffer(1, NBOFF(bp) + offset, UIO_SYSSPACE, |
1669 | UIO_READ, &uio_buf, sizeof(uio_buf)); | |
1670 | uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + offset), length); | |
2d21ac55 A |
1671 | |
1672 | /* finish the RPC */ | |
b0d623f7 | 1673 | error = nmp->nm_funcs->nf_read_rpc_async_finish(np, req, auio, &rlen, &eof); |
2d21ac55 A |
1674 | if ((error == EINPROGRESS) && cb.rcb_func) { |
1675 | /* async request restarted */ | |
1676 | if (IS_VALID_CRED(cred)) | |
1677 | kauth_cred_unref(&cred); | |
1678 | return; | |
1679 | } | |
b0d623f7 A |
1680 | if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error) && !ISSET(bp->nb_flags, NB_ERROR)) { |
1681 | lck_mtx_lock(&nmp->nm_lock); | |
1682 | if ((error != NFSERR_GRACE) && (cb.rcb_args[2] == nmp->nm_stategenid) && !(nmp->nm_state & NFSSTA_RECOVER)) { | |
1683 | printf("nfs_buf_read_rpc_finish: error %d, initiating recovery\n", error); | |
1684 | nmp->nm_state |= NFSSTA_RECOVER; | |
1685 | nfs_mount_sock_thread_wake(nmp); | |
1686 | } | |
1687 | lck_mtx_unlock(&nmp->nm_lock); | |
1688 | if (error == NFSERR_GRACE) | |
1689 | tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz); | |
1690 | if (!(error = nfs_mount_state_wait_for_recovery(nmp))) { | |
1691 | rlen = 0; | |
1692 | goto readagain; | |
1693 | } | |
1694 | } | |
2d21ac55 A |
1695 | if (error) { |
1696 | SET(bp->nb_flags, NB_ERROR); | |
1697 | bp->nb_error = error; | |
1698 | goto out; | |
1699 | } | |
1700 | ||
1701 | if ((rlen > 0) && (bp->nb_endio < (offset + (int)rlen))) | |
1702 | bp->nb_endio = offset + rlen; | |
1703 | ||
1704 | if ((nfsvers == NFS_VER2) || eof || (rlen == 0)) { | |
1705 | /* zero out the remaining data (up to EOF) */ | |
1706 | off_t rpcrem, eofrem, rem; | |
1707 | rpcrem = (length - rlen); | |
1708 | eofrem = np->n_size - (NBOFF(bp) + offset + rlen); | |
1709 | rem = (rpcrem < eofrem) ? rpcrem : eofrem; | |
1710 | if (rem > 0) | |
1711 | bzero(bp->nb_data + offset + rlen, rem); | |
1712 | } else if (((int)rlen < length) && !ISSET(bp->nb_flags, NB_ERROR)) { | |
1713 | /* | |
1714 | * short read | |
1715 | * | |
1716 | * We haven't hit EOF and we didn't get all the data | |
1717 | * requested, so we need to issue another read for the rest. | |
1718 | * (Don't bother if the buffer already hit an error.) | |
1719 | */ | |
b0d623f7 | 1720 | readagain: |
2d21ac55 A |
1721 | offset += rlen; |
1722 | length -= rlen; | |
1723 | cb.rcb_args[0] = offset; | |
1724 | cb.rcb_args[1] = length; | |
b0d623f7 A |
1725 | if (nmp->nm_vers >= NFS_VER4) |
1726 | cb.rcb_args[2] = nmp->nm_stategenid; | |
1727 | error = nmp->nm_funcs->nf_read_rpc_async(np, NBOFF(bp) + offset, length, thd, cred, &cb, &rreq); | |
2d21ac55 A |
1728 | if (!error) { |
1729 | if (IS_VALID_CRED(cred)) | |
1730 | kauth_cred_unref(&cred); | |
1731 | if (!cb.rcb_func) { | |
1732 | /* if !async we'll need to wait for this RPC to finish */ | |
1733 | req = rreq; | |
b0d623f7 | 1734 | rreq = NULL; |
2d21ac55 A |
1735 | goto finish; |
1736 | } | |
1737 | /* | |
1738 | * We're done here. | |
1739 | * Outstanding RPC count is unchanged. | |
1740 | * Callback will be called when RPC is done. | |
1741 | */ | |
1742 | return; | |
1743 | } | |
1744 | SET(bp->nb_flags, NB_ERROR); | |
1745 | bp->nb_error = error; | |
1746 | } | |
1747 | ||
1748 | out: | |
1749 | if (IS_VALID_CRED(cred)) | |
1750 | kauth_cred_unref(&cred); | |
1751 | ||
1752 | /* | |
1753 | * Decrement outstanding RPC count on buffer | |
1754 | * and call nfs_buf_read_finish on last RPC. | |
1755 | * | |
1756 | * (Note: when there are multiple async RPCs issued for a | |
1757 | * buffer we need nfs_buffer_mutex to avoid problems when | |
1758 | * aborting a partially-initiated set of RPCs) | |
1759 | */ | |
1760 | ||
1761 | multasyncrpc = ISSET(bp->nb_flags, NB_MULTASYNCRPC); | |
1762 | if (multasyncrpc) | |
1763 | lck_mtx_lock(nfs_buf_mutex); | |
1764 | ||
1765 | bp->nb_rpcs--; | |
1766 | finished = (bp->nb_rpcs == 0); | |
1767 | ||
1768 | if (multasyncrpc) | |
1769 | lck_mtx_unlock(nfs_buf_mutex); | |
1770 | ||
1771 | if (finished) { | |
1772 | if (multasyncrpc) | |
1773 | wakeme = &bp->nb_rpcs; | |
1774 | nfs_buf_read_finish(bp); | |
1775 | if (wakeme) | |
1776 | wakeup(wakeme); | |
1777 | } | |
1778 | } | |
1779 | ||
1780 | /* | |
1781 | * Do buffer readahead. | |
1782 | * Initiate async I/O to read buffers not in cache. | |
1783 | */ | |
b0d623f7 | 1784 | int |
2d21ac55 A |
1785 | nfs_buf_readahead(nfsnode_t np, int ioflag, daddr64_t *rabnp, daddr64_t lastrabn, thread_t thd, kauth_cred_t cred) |
1786 | { | |
1787 | struct nfsmount *nmp = NFSTONMP(np); | |
1788 | struct nfsbuf *bp; | |
1789 | int error = 0, nra; | |
1790 | ||
1791 | if (!nmp) | |
1792 | return (ENXIO); | |
1793 | if (nmp->nm_readahead <= 0) | |
1794 | return (0); | |
1795 | if (*rabnp > lastrabn) | |
1796 | return (0); | |
1797 | ||
1798 | for (nra = 0; (nra < nmp->nm_readahead) && (*rabnp <= lastrabn); nra++, *rabnp = *rabnp + 1) { | |
1799 | /* check if block exists and is valid. */ | |
b0d623f7 A |
1800 | if ((*rabnp * nmp->nm_biosize) >= (off_t)np->n_size) { |
1801 | /* stop reading ahead if we're beyond EOF */ | |
1802 | *rabnp = lastrabn; | |
1803 | break; | |
1804 | } | |
2d21ac55 A |
1805 | error = nfs_buf_get(np, *rabnp, nmp->nm_biosize, thd, NBLK_READ|NBLK_NOWAIT, &bp); |
1806 | if (error) | |
1807 | break; | |
b0d623f7 A |
1808 | nfs_node_lock_force(np); |
1809 | np->n_lastrahead = *rabnp; | |
1810 | nfs_node_unlock(np); | |
2d21ac55 A |
1811 | if (!bp) |
1812 | continue; | |
1813 | if ((ioflag & IO_NOCACHE) && ISSET(bp->nb_flags, NB_CACHE) && | |
1814 | !bp->nb_dirty && !ISSET(bp->nb_flags, (NB_DELWRI|NB_NCRDAHEAD))) { | |
1815 | CLR(bp->nb_flags, NB_CACHE); | |
1816 | bp->nb_valid = 0; | |
1817 | bp->nb_validoff = bp->nb_validend = -1; | |
1818 | } | |
1819 | if ((bp->nb_dirtyend <= 0) && !bp->nb_dirty && | |
1820 | !ISSET(bp->nb_flags, (NB_CACHE|NB_DELWRI))) { | |
1821 | SET(bp->nb_flags, (NB_READ|NB_ASYNC)); | |
1822 | if (ioflag & IO_NOCACHE) | |
1823 | SET(bp->nb_flags, NB_NCRDAHEAD); | |
1824 | if (!IS_VALID_CRED(bp->nb_rcred) && IS_VALID_CRED(cred)) { | |
1825 | kauth_cred_ref(cred); | |
1826 | bp->nb_rcred = cred; | |
1827 | } | |
1828 | if ((error = nfs_buf_read(bp))) | |
1829 | break; | |
1830 | continue; | |
1831 | } | |
1832 | nfs_buf_release(bp, 1); | |
1833 | } | |
1834 | return (error); | |
1835 | } | |
1836 | ||
1837 | /* | |
b0d623f7 | 1838 | * NFS buffer I/O for reading files. |
2d21ac55 A |
1839 | */ |
1840 | int | |
b0d623f7 | 1841 | nfs_bioread(nfsnode_t np, uio_t uio, int ioflag, vfs_context_t ctx) |
2d21ac55 A |
1842 | { |
1843 | vnode_t vp = NFSTOV(np); | |
1844 | struct nfsbuf *bp = NULL; | |
1845 | struct nfs_vattr nvattr; | |
1846 | struct nfsmount *nmp = VTONMP(vp); | |
b0d623f7 | 1847 | daddr64_t lbn, rabn = 0, lastrabn, maxrabn = -1; |
2d21ac55 A |
1848 | off_t diff; |
1849 | int error = 0, n = 0, on = 0; | |
b0d623f7 | 1850 | int nfsvers, biosize, modified, readaheads = 0; |
2d21ac55 A |
1851 | thread_t thd; |
1852 | kauth_cred_t cred; | |
b0d623f7 | 1853 | int64_t io_resid; |
2d21ac55 | 1854 | |
b0d623f7 | 1855 | FSDBG_TOP(514, np, uio_offset(uio), uio_resid(uio), ioflag); |
2d21ac55 A |
1856 | |
1857 | nfsvers = nmp->nm_vers; | |
1858 | biosize = nmp->nm_biosize; | |
1859 | thd = vfs_context_thread(ctx); | |
1860 | cred = vfs_context_ucred(ctx); | |
1861 | ||
b0d623f7 A |
1862 | if (vnode_vtype(vp) != VREG) { |
1863 | printf("nfs_bioread: type %x unexpected\n", vnode_vtype(vp)); | |
2d21ac55 A |
1864 | FSDBG_BOT(514, np, 0xd1e0016, 0, EINVAL); |
1865 | return (EINVAL); | |
1866 | } | |
1867 | ||
1868 | /* | |
b0d623f7 | 1869 | * For NFS, cache consistency can only be maintained approximately. |
2d21ac55 A |
1870 | * Although RFC1094 does not specify the criteria, the following is |
1871 | * believed to be compatible with the reference port. | |
b0d623f7 A |
1872 | * |
1873 | * If the file has changed since the last read RPC or you have | |
1874 | * written to the file, you may have lost data cache consistency | |
1875 | * with the server. So, check for a change, and flush all of the | |
1876 | * file's data out of the cache. | |
2d21ac55 | 1877 | * NB: This implies that cache data can be read when up to |
b0d623f7 A |
1878 | * NFS_MAXATTRTIMO seconds out of date. If you find that you |
1879 | * need current attributes, nfs_getattr() can be forced to fetch | |
1880 | * new attributes (via NATTRINVALIDATE() or NGA_UNCACHED). | |
2d21ac55 A |
1881 | */ |
1882 | ||
1883 | if (ISSET(np->n_flag, NUPDATESIZE)) | |
1884 | nfs_data_update_size(np, 0); | |
1885 | ||
b0d623f7 | 1886 | if ((error = nfs_node_lock(np))) { |
2d21ac55 A |
1887 | FSDBG_BOT(514, np, 0xd1e0222, 0, error); |
1888 | return (error); | |
1889 | } | |
1890 | ||
1891 | if (np->n_flag & NNEEDINVALIDATE) { | |
1892 | np->n_flag &= ~NNEEDINVALIDATE; | |
b0d623f7 A |
1893 | nfs_node_unlock(np); |
1894 | error = nfs_vinvalbuf(vp, V_SAVE|V_IGNORE_WRITEERR, ctx, 1); | |
1895 | if (!error) | |
1896 | error = nfs_node_lock(np); | |
1897 | if (error) { | |
2d21ac55 A |
1898 | FSDBG_BOT(514, np, 0xd1e0322, 0, error); |
1899 | return (error); | |
1900 | } | |
1901 | } | |
1902 | ||
b0d623f7 A |
1903 | modified = (np->n_flag & NMODIFIED); |
1904 | nfs_node_unlock(np); | |
1905 | /* nfs_getattr() will check changed and purge caches */ | |
1906 | error = nfs_getattr(np, &nvattr, ctx, modified ? NGA_UNCACHED : NGA_CACHED); | |
1907 | if (error) { | |
1908 | FSDBG_BOT(514, np, 0xd1e0004, 0, error); | |
1909 | return (error); | |
2d21ac55 A |
1910 | } |
1911 | ||
b0d623f7 A |
1912 | if (uio_resid(uio) == 0) { |
1913 | FSDBG_BOT(514, np, 0xd1e0001, 0, 0); | |
1914 | return (0); | |
1915 | } | |
1916 | if (uio_offset(uio) < 0) { | |
1917 | FSDBG_BOT(514, np, 0xd1e0002, 0, EINVAL); | |
1918 | return (EINVAL); | |
1919 | } | |
2d21ac55 | 1920 | |
b0d623f7 A |
1921 | /* |
1922 | * set up readahead - which may be limited by: | |
1923 | * + current request length (for IO_NOCACHE) | |
1924 | * + readahead setting | |
1925 | * + file size | |
1926 | */ | |
1927 | if (nmp->nm_readahead > 0) { | |
1928 | off_t end = uio_offset(uio) + uio_resid(uio); | |
1929 | if (end > (off_t)np->n_size) | |
1930 | end = np->n_size; | |
1931 | rabn = uio_offset(uio) / biosize; | |
1932 | maxrabn = (end - 1) / biosize; | |
1933 | nfs_node_lock_force(np); | |
1934 | if (!(ioflag & IO_NOCACHE) && | |
1935 | (!rabn || (rabn == np->n_lastread) || (rabn == (np->n_lastread+1)))) { | |
1936 | maxrabn += nmp->nm_readahead; | |
1937 | if ((maxrabn * biosize) >= (off_t)np->n_size) | |
1938 | maxrabn = ((off_t)np->n_size - 1)/biosize; | |
1939 | } | |
1940 | if (maxrabn < np->n_lastrahead) | |
1941 | np->n_lastrahead = -1; | |
1942 | if (rabn < np->n_lastrahead) | |
1943 | rabn = np->n_lastrahead + 1; | |
1944 | nfs_node_unlock(np); | |
1945 | } else { | |
1946 | rabn = maxrabn = 0; | |
2d21ac55 A |
1947 | } |
1948 | ||
1949 | do { | |
1950 | ||
b0d623f7 A |
1951 | nfs_data_lock(np, NFS_DATA_LOCK_SHARED); |
1952 | lbn = uio_offset(uio) / biosize; | |
2d21ac55 A |
1953 | |
1954 | /* | |
1955 | * Copy directly from any cached pages without grabbing the bufs. | |
b0d623f7 A |
1956 | * (If we are NOCACHE and we've issued readahead requests, we need |
1957 | * to grab the NB_NCRDAHEAD bufs to drop them.) | |
2d21ac55 | 1958 | */ |
b0d623f7 A |
1959 | if ((!(ioflag & IO_NOCACHE) || !readaheads) && |
1960 | ((uio->uio_segflg == UIO_USERSPACE32 || | |
1961 | uio->uio_segflg == UIO_USERSPACE64 || | |
1962 | uio->uio_segflg == UIO_USERSPACE))) { | |
1963 | io_resid = uio_resid(uio); | |
1964 | diff = np->n_size - uio_offset(uio); | |
2d21ac55 A |
1965 | if (diff < io_resid) |
1966 | io_resid = diff; | |
1967 | if (io_resid > 0) { | |
b0d623f7 A |
1968 | int count = (io_resid > INT_MAX) ? INT_MAX : io_resid; |
1969 | error = cluster_copy_ubc_data(vp, uio, &count, 0); | |
91447636 | 1970 | if (error) { |
2d21ac55 | 1971 | nfs_data_unlock(np); |
b0d623f7 | 1972 | FSDBG_BOT(514, np, uio_offset(uio), 0xcacefeed, error); |
91447636 A |
1973 | return (error); |
1974 | } | |
2d21ac55 A |
1975 | } |
1976 | /* count any biocache reads that we just copied directly */ | |
b0d623f7 A |
1977 | if (lbn != (uio_offset(uio)/biosize)) { |
1978 | OSAddAtomic((uio_offset(uio)/biosize) - lbn, &nfsstats.biocache_reads); | |
1979 | FSDBG(514, np, 0xcacefeed, uio_offset(uio), error); | |
2d21ac55 A |
1980 | } |
1981 | } | |
1982 | ||
b0d623f7 A |
1983 | lbn = uio_offset(uio) / biosize; |
1984 | on = uio_offset(uio) % biosize; | |
1985 | nfs_node_lock_force(np); | |
1986 | np->n_lastread = (uio_offset(uio) - 1) / biosize; | |
1987 | nfs_node_unlock(np); | |
2d21ac55 A |
1988 | |
1989 | /* adjust readahead block number, if necessary */ | |
1990 | if (rabn < lbn) | |
1991 | rabn = lbn; | |
1992 | lastrabn = MIN(maxrabn, lbn + nmp->nm_readahead); | |
1993 | if (rabn <= lastrabn) { /* start readaheads */ | |
1994 | error = nfs_buf_readahead(np, ioflag, &rabn, lastrabn, thd, cred); | |
1995 | if (error) { | |
1996 | nfs_data_unlock(np); | |
1997 | FSDBG_BOT(514, np, 0xd1e000b, 1, error); | |
1998 | return (error); | |
55e303ae | 1999 | } |
b0d623f7 | 2000 | readaheads = 1; |
1c79356b A |
2001 | } |
2002 | ||
b0d623f7 | 2003 | if ((uio_resid(uio) <= 0) || (uio_offset(uio) >= (off_t)np->n_size)) { |
2d21ac55 | 2004 | nfs_data_unlock(np); |
b0d623f7 | 2005 | FSDBG_BOT(514, np, uio_offset(uio), uio_resid(uio), 0xaaaaaaaa); |
55e303ae A |
2006 | return (0); |
2007 | } | |
2008 | ||
b0d623f7 | 2009 | OSAddAtomic(1, &nfsstats.biocache_reads); |
55e303ae | 2010 | |
1c79356b A |
2011 | /* |
2012 | * If the block is in the cache and has the required data | |
2013 | * in a valid region, just copy it out. | |
2014 | * Otherwise, get the block and write back/read in, | |
2015 | * as required. | |
2016 | */ | |
2017 | again: | |
b0d623f7 A |
2018 | io_resid = uio_resid(uio); |
2019 | n = (io_resid > (biosize - on)) ? (biosize - on) : io_resid; | |
2020 | diff = np->n_size - uio_offset(uio); | |
1c79356b A |
2021 | if (diff < n) |
2022 | n = diff; | |
55e303ae | 2023 | |
2d21ac55 | 2024 | error = nfs_buf_get(np, lbn, biosize, thd, NBLK_READ, &bp); |
91447636 | 2025 | if (error) { |
2d21ac55 A |
2026 | nfs_data_unlock(np); |
2027 | FSDBG_BOT(514, np, 0xd1e000c, 0, error); | |
2028 | return (error); | |
2029 | } | |
2030 | ||
2031 | if ((ioflag & IO_NOCACHE) && ISSET(bp->nb_flags, NB_CACHE)) { | |
2032 | /* | |
2033 | * IO_NOCACHE found a cached buffer. | |
2034 | * Flush the buffer if it's dirty. | |
2035 | * Invalidate the data if it wasn't just read | |
2036 | * in as part of a "nocache readahead". | |
2037 | */ | |
2038 | if (bp->nb_dirty || (bp->nb_dirtyend > 0)) { | |
2039 | /* so write the buffer out and try again */ | |
2040 | SET(bp->nb_flags, NB_NOCACHE); | |
2041 | goto flushbuffer; | |
2042 | } | |
b0d623f7 | 2043 | if (ISSET(bp->nb_flags, NB_NCRDAHEAD)) { |
2d21ac55 | 2044 | CLR(bp->nb_flags, NB_NCRDAHEAD); |
b0d623f7 | 2045 | SET(bp->nb_flags, NB_NOCACHE); |
2d21ac55 | 2046 | } |
55e303ae A |
2047 | } |
2048 | ||
2049 | /* if any pages are valid... */ | |
2050 | if (bp->nb_valid) { | |
2051 | /* ...check for any invalid pages in the read range */ | |
2052 | int pg, firstpg, lastpg, dirtypg; | |
2053 | dirtypg = firstpg = lastpg = -1; | |
2054 | pg = on/PAGE_SIZE; | |
2055 | while (pg <= (on + n - 1)/PAGE_SIZE) { | |
2056 | if (!NBPGVALID(bp,pg)) { | |
2057 | if (firstpg < 0) | |
2058 | firstpg = pg; | |
2059 | lastpg = pg; | |
2060 | } else if (firstpg >= 0 && dirtypg < 0 && NBPGDIRTY(bp,pg)) | |
2061 | dirtypg = pg; | |
2062 | pg++; | |
2063 | } | |
2064 | ||
2065 | /* if there are no invalid pages, we're all set */ | |
2066 | if (firstpg < 0) { | |
2067 | if (bp->nb_validoff < 0) { | |
2068 | /* valid range isn't set up, so */ | |
2069 | /* set it to what we know is valid */ | |
91447636 A |
2070 | bp->nb_validoff = trunc_page(on); |
2071 | bp->nb_validend = round_page(on+n); | |
55e303ae A |
2072 | nfs_buf_normalize_valid_range(np, bp); |
2073 | } | |
2074 | goto buffer_ready; | |
2075 | } | |
2076 | ||
2077 | /* there are invalid pages in the read range */ | |
2d21ac55 A |
2078 | if (((dirtypg > firstpg) && (dirtypg < lastpg)) || |
2079 | (((firstpg*PAGE_SIZE) < bp->nb_dirtyend) && (((lastpg+1)*PAGE_SIZE) > bp->nb_dirtyoff))) { | |
2080 | /* there are also dirty page(s) (or range) in the read range, */ | |
55e303ae | 2081 | /* so write the buffer out and try again */ |
2d21ac55 | 2082 | flushbuffer: |
55e303ae A |
2083 | CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL)); |
2084 | SET(bp->nb_flags, NB_ASYNC); | |
2d21ac55 | 2085 | if (!IS_VALID_CRED(bp->nb_wcred)) { |
91447636 A |
2086 | kauth_cred_ref(cred); |
2087 | bp->nb_wcred = cred; | |
2088 | } | |
55e303ae A |
2089 | error = nfs_buf_write(bp); |
2090 | if (error) { | |
2d21ac55 A |
2091 | nfs_data_unlock(np); |
2092 | FSDBG_BOT(514, np, 0xd1e000d, 0, error); | |
55e303ae A |
2093 | return (error); |
2094 | } | |
1c79356b A |
2095 | goto again; |
2096 | } | |
55e303ae | 2097 | if (!bp->nb_dirty && bp->nb_dirtyend <= 0 && |
2d21ac55 | 2098 | (lastpg - firstpg + 1) > (biosize/PAGE_SIZE)/2) { |
55e303ae A |
2099 | /* we need to read in more than half the buffer and the */ |
2100 | /* buffer's not dirty, so just fetch the whole buffer */ | |
2101 | bp->nb_valid = 0; | |
2102 | } else { | |
2103 | /* read the page range in */ | |
91447636 A |
2104 | uio_t auio; |
2105 | char uio_buf[ UIO_SIZEOF(1) ]; | |
2106 | ||
55e303ae | 2107 | NFS_BUF_MAP(bp); |
2d21ac55 | 2108 | auio = uio_createwithbuffer(1, (NBOFF(bp) + firstpg * PAGE_SIZE_64), |
91447636 A |
2109 | UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf)); |
2110 | if (!auio) { | |
2111 | error = ENOMEM; | |
2112 | } else { | |
b0d623f7 | 2113 | uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + (firstpg * PAGE_SIZE)), |
91447636 | 2114 | ((lastpg - firstpg + 1) * PAGE_SIZE)); |
2d21ac55 | 2115 | error = nfs_read_rpc(np, auio, ctx); |
91447636 | 2116 | } |
55e303ae | 2117 | if (error) { |
2d21ac55 | 2118 | if (ioflag & IO_NOCACHE) |
91447636 | 2119 | SET(bp->nb_flags, NB_NOCACHE); |
483a1d10 | 2120 | nfs_buf_release(bp, 1); |
2d21ac55 A |
2121 | nfs_data_unlock(np); |
2122 | FSDBG_BOT(514, np, 0xd1e000e, 0, error); | |
55e303ae A |
2123 | return (error); |
2124 | } | |
2125 | /* Make sure that the valid range is set to cover this read. */ | |
2126 | bp->nb_validoff = trunc_page_32(on); | |
2127 | bp->nb_validend = round_page_32(on+n); | |
2128 | nfs_buf_normalize_valid_range(np, bp); | |
91447636 | 2129 | if (uio_resid(auio) > 0) { |
55e303ae A |
2130 | /* if short read, must have hit EOF, */ |
2131 | /* so zero the rest of the range */ | |
91447636 | 2132 | bzero(CAST_DOWN(caddr_t, uio_curriovbase(auio)), uio_resid(auio)); |
55e303ae A |
2133 | } |
2134 | /* mark the pages (successfully read) as valid */ | |
2135 | for (pg=firstpg; pg <= lastpg; pg++) | |
2136 | NBPGVALID_SET(bp,pg); | |
2137 | } | |
1c79356b | 2138 | } |
55e303ae A |
2139 | /* if no pages are valid, read the whole block */ |
2140 | if (!bp->nb_valid) { | |
2d21ac55 A |
2141 | if (!IS_VALID_CRED(bp->nb_rcred) && IS_VALID_CRED(cred)) { |
2142 | kauth_cred_ref(cred); | |
2143 | bp->nb_rcred = cred; | |
2144 | } | |
55e303ae A |
2145 | SET(bp->nb_flags, NB_READ); |
2146 | CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL)); | |
2d21ac55 | 2147 | error = nfs_buf_read(bp); |
b0d623f7 A |
2148 | if (ioflag & IO_NOCACHE) |
2149 | SET(bp->nb_flags, NB_NOCACHE); | |
55e303ae | 2150 | if (error) { |
2d21ac55 | 2151 | nfs_data_unlock(np); |
483a1d10 | 2152 | nfs_buf_release(bp, 1); |
2d21ac55 | 2153 | FSDBG_BOT(514, np, 0xd1e000f, 0, error); |
55e303ae A |
2154 | return (error); |
2155 | } | |
2156 | } | |
2157 | buffer_ready: | |
55e303ae A |
2158 | /* validate read range against valid range and clip */ |
2159 | if (bp->nb_validend > 0) { | |
2160 | diff = (on >= bp->nb_validend) ? 0 : (bp->nb_validend - on); | |
2161 | if (diff < n) | |
2162 | n = diff; | |
2163 | } | |
55e303ae | 2164 | if (n > 0) { |
b0d623f7 A |
2165 | NFS_BUF_MAP(bp); |
2166 | error = uiomove(bp->nb_data + on, n, uio); | |
55e303ae | 2167 | } |
2d21ac55 | 2168 | |
2d21ac55 A |
2169 | nfs_buf_release(bp, 1); |
2170 | nfs_data_unlock(np); | |
b0d623f7 A |
2171 | nfs_node_lock_force(np); |
2172 | np->n_lastread = (uio_offset(uio) - 1) / biosize; | |
2173 | nfs_node_unlock(np); | |
2174 | } while (error == 0 && uio_resid(uio) > 0 && n > 0); | |
2175 | FSDBG_BOT(514, np, uio_offset(uio), uio_resid(uio), error); | |
1c79356b A |
2176 | return (error); |
2177 | } | |
2178 | ||
2d21ac55 A |
2179 | /* |
2180 | * limit the number of outstanding async I/O writes | |
2181 | */ | |
b0d623f7 | 2182 | int |
2d21ac55 A |
2183 | nfs_async_write_start(struct nfsmount *nmp) |
2184 | { | |
2185 | int error = 0, slpflag = (nmp->nm_flag & NFSMNT_INT) ? PCATCH : 0; | |
2186 | struct timespec ts = {1, 0}; | |
2187 | ||
2188 | if (nfs_max_async_writes <= 0) | |
2189 | return (0); | |
2190 | lck_mtx_lock(&nmp->nm_lock); | |
36401178 | 2191 | while ((nfs_max_async_writes > 0) && (nmp->nm_asyncwrites >= nfs_max_async_writes)) { |
2d21ac55 A |
2192 | if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1))) |
2193 | break; | |
36401178 A |
2194 | msleep(&nmp->nm_asyncwrites, &nmp->nm_lock, slpflag|(PZERO-1), "nfsasyncwrites", &ts); |
2195 | slpflag = 0; | |
2d21ac55 A |
2196 | } |
2197 | if (!error) | |
2198 | nmp->nm_asyncwrites++; | |
2199 | lck_mtx_unlock(&nmp->nm_lock); | |
2200 | return (error); | |
2201 | } | |
b0d623f7 | 2202 | void |
2d21ac55 A |
2203 | nfs_async_write_done(struct nfsmount *nmp) |
2204 | { | |
2205 | if (nmp->nm_asyncwrites <= 0) | |
2206 | return; | |
2207 | lck_mtx_lock(&nmp->nm_lock); | |
2208 | if (nmp->nm_asyncwrites-- >= nfs_max_async_writes) | |
2209 | wakeup(&nmp->nm_asyncwrites); | |
2210 | lck_mtx_unlock(&nmp->nm_lock); | |
2211 | } | |
fa4905b1 | 2212 | |
1c79356b | 2213 | /* |
2d21ac55 A |
2214 | * write (or commit) the given NFS buffer |
2215 | * | |
2216 | * Commit the buffer if we can. | |
2217 | * Write out any dirty range. | |
2218 | * If any dirty pages remain, write them out. | |
2219 | * Mark buffer done. | |
2220 | * | |
2221 | * For async requests, all the work beyond sending the initial | |
2222 | * write RPC is handled in the RPC callback(s). | |
1c79356b A |
2223 | */ |
2224 | int | |
2d21ac55 | 2225 | nfs_buf_write(struct nfsbuf *bp) |
1c79356b | 2226 | { |
2d21ac55 A |
2227 | int error = 0, oldflags, async; |
2228 | nfsnode_t np; | |
2229 | thread_t thd; | |
91447636 | 2230 | kauth_cred_t cred; |
2d21ac55 A |
2231 | proc_t p = current_proc(); |
2232 | int iomode, doff, dend, firstpg, lastpg; | |
2233 | uint32_t pagemask; | |
91447636 | 2234 | |
2d21ac55 | 2235 | FSDBG_TOP(553, bp, NBOFF(bp), bp->nb_flags, 0); |
91447636 | 2236 | |
2d21ac55 A |
2237 | if (!ISSET(bp->nb_lflags, NBL_BUSY)) |
2238 | panic("nfs_buf_write: buffer is not busy???"); | |
91447636 | 2239 | |
2d21ac55 A |
2240 | np = bp->nb_np; |
2241 | async = ISSET(bp->nb_flags, NB_ASYNC); | |
2242 | oldflags = bp->nb_flags; | |
91447636 | 2243 | |
2d21ac55 A |
2244 | CLR(bp->nb_flags, (NB_READ|NB_DONE|NB_ERROR|NB_DELWRI)); |
2245 | if (ISSET(oldflags, NB_DELWRI)) { | |
2246 | lck_mtx_lock(nfs_buf_mutex); | |
2247 | nfs_nbdwrite--; | |
2248 | NFSBUFCNTCHK(); | |
2249 | lck_mtx_unlock(nfs_buf_mutex); | |
2250 | wakeup(&nfs_nbdwrite); | |
91447636 | 2251 | } |
2d21ac55 A |
2252 | |
2253 | /* move to clean list */ | |
2254 | if (ISSET(oldflags, (NB_ASYNC|NB_DELWRI))) { | |
2255 | lck_mtx_lock(nfs_buf_mutex); | |
2256 | if (bp->nb_vnbufs.le_next != NFSNOLIST) | |
2257 | LIST_REMOVE(bp, nb_vnbufs); | |
2258 | LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs); | |
2259 | lck_mtx_unlock(nfs_buf_mutex); | |
1c79356b | 2260 | } |
b0d623f7 A |
2261 | nfs_node_lock_force(np); |
2262 | np->n_numoutput++; | |
2263 | nfs_node_unlock(np); | |
2d21ac55 | 2264 | vnode_startwrite(NFSTOV(np)); |
0c530ab8 | 2265 | |
2d21ac55 | 2266 | if (p && p->p_stats) |
b0d623f7 | 2267 | OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); |
0c530ab8 | 2268 | |
2d21ac55 A |
2269 | cred = bp->nb_wcred; |
2270 | if (!IS_VALID_CRED(cred) && ISSET(bp->nb_flags, NB_READ)) | |
2271 | cred = bp->nb_rcred; /* shouldn't really happen, but... */ | |
2272 | if (IS_VALID_CRED(cred)) | |
2273 | kauth_cred_ref(cred); | |
2274 | thd = async ? NULL : current_thread(); | |
2275 | ||
2276 | /* We need to make sure the pages are locked before doing I/O. */ | |
2277 | if (!ISSET(bp->nb_flags, NB_META) && UBCINFOEXISTS(NFSTOV(np))) { | |
2278 | if (!ISSET(bp->nb_flags, NB_PAGELIST)) { | |
2279 | error = nfs_buf_upl_setup(bp); | |
55e303ae | 2280 | if (error) { |
2d21ac55 A |
2281 | printf("nfs_buf_write: upl create failed %d\n", error); |
2282 | SET(bp->nb_flags, NB_ERROR); | |
2283 | bp->nb_error = error = EIO; | |
2284 | nfs_buf_iodone(bp); | |
2285 | goto out; | |
55e303ae | 2286 | } |
2d21ac55 | 2287 | nfs_buf_upl_check(bp); |
1c79356b A |
2288 | } |
2289 | } | |
55e303ae | 2290 | |
2d21ac55 A |
2291 | /* If NB_NEEDCOMMIT is set, a commit RPC may do the trick. */ |
2292 | if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) | |
2293 | nfs_buf_check_write_verifier(np, bp); | |
2294 | if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { | |
2295 | struct nfsmount *nmp = NFSTONMP(np); | |
2296 | if (!nmp) { | |
2297 | SET(bp->nb_flags, NB_ERROR); | |
2298 | bp->nb_error = error = EIO; | |
2299 | nfs_buf_iodone(bp); | |
2300 | goto out; | |
2301 | } | |
2302 | SET(bp->nb_flags, NB_WRITEINPROG); | |
2303 | error = nmp->nm_funcs->nf_commit_rpc(np, NBOFF(bp) + bp->nb_dirtyoff, | |
2304 | bp->nb_dirtyend - bp->nb_dirtyoff, bp->nb_wcred); | |
2305 | CLR(bp->nb_flags, NB_WRITEINPROG); | |
2306 | if (error) { | |
2307 | if (error != NFSERR_STALEWRITEVERF) { | |
2308 | SET(bp->nb_flags, NB_ERROR); | |
2309 | bp->nb_error = error; | |
55e303ae | 2310 | } |
2d21ac55 A |
2311 | nfs_buf_iodone(bp); |
2312 | goto out; | |
2313 | } | |
2314 | bp->nb_dirtyoff = bp->nb_dirtyend = 0; | |
2315 | CLR(bp->nb_flags, NB_NEEDCOMMIT); | |
b0d623f7 | 2316 | nfs_node_lock_force(np); |
2d21ac55 A |
2317 | np->n_needcommitcnt--; |
2318 | CHECK_NEEDCOMMITCNT(np); | |
b0d623f7 | 2319 | nfs_node_unlock(np); |
2d21ac55 A |
2320 | } |
2321 | if (!error && (bp->nb_dirtyend > 0)) { | |
2322 | /* sanity check the dirty range */ | |
2323 | if (NBOFF(bp) + bp->nb_dirtyend > (off_t) np->n_size) { | |
2324 | bp->nb_dirtyend = np->n_size - NBOFF(bp); | |
2325 | if (bp->nb_dirtyoff >= bp->nb_dirtyend) | |
2326 | bp->nb_dirtyoff = bp->nb_dirtyend = 0; | |
55e303ae | 2327 | } |
91447636 | 2328 | } |
2d21ac55 A |
2329 | if (!error && (bp->nb_dirtyend > 0)) { |
2330 | /* there's a dirty range that needs to be written out */ | |
2331 | NFS_BUF_MAP(bp); | |
2332 | ||
2333 | doff = bp->nb_dirtyoff; | |
2334 | dend = bp->nb_dirtyend; | |
2335 | ||
2336 | /* if doff page is dirty, move doff to start of page */ | |
2337 | if (NBPGDIRTY(bp, doff / PAGE_SIZE)) | |
2338 | doff -= doff & PAGE_MASK; | |
2339 | /* try to expand write range to include preceding dirty pages */ | |
2340 | if (!(doff & PAGE_MASK)) | |
2341 | while ((doff > 0) && NBPGDIRTY(bp, (doff - 1) / PAGE_SIZE)) | |
2342 | doff -= PAGE_SIZE; | |
2343 | /* if dend page is dirty, move dend to start of next page */ | |
2344 | if ((dend & PAGE_MASK) && NBPGDIRTY(bp, dend / PAGE_SIZE)) | |
2345 | dend = round_page_32(dend); | |
2346 | /* try to expand write range to include trailing dirty pages */ | |
2347 | if (!(dend & PAGE_MASK)) | |
b0d623f7 | 2348 | while ((dend < (int)bp->nb_bufsize) && NBPGDIRTY(bp, dend / PAGE_SIZE)) |
2d21ac55 A |
2349 | dend += PAGE_SIZE; |
2350 | /* make sure to keep dend clipped to EOF */ | |
2351 | if ((NBOFF(bp) + dend) > (off_t) np->n_size) | |
2352 | dend = np->n_size - NBOFF(bp); | |
2353 | /* calculate range of complete pages being written */ | |
2354 | firstpg = round_page_32(doff) / PAGE_SIZE; | |
2355 | lastpg = (trunc_page_32(dend) - 1) / PAGE_SIZE; | |
2356 | /* calculate mask for that page range */ | |
2357 | pagemask = ((1 << (lastpg + 1)) - 1) & ~((1 << firstpg) - 1); | |
91447636 | 2358 | |
fa4905b1 | 2359 | /* |
2d21ac55 A |
2360 | * compare page mask to nb_dirty; if there are other dirty pages |
2361 | * then write FILESYNC; otherwise, write UNSTABLE if async and | |
2362 | * not needcommit/stable; otherwise write FILESYNC | |
fa4905b1 | 2363 | */ |
2d21ac55 A |
2364 | if (bp->nb_dirty & ~pagemask) |
2365 | iomode = NFS_WRITE_FILESYNC; | |
2366 | else if ((bp->nb_flags & (NB_ASYNC | NB_NEEDCOMMIT | NB_STABLE)) == NB_ASYNC) | |
2367 | iomode = NFS_WRITE_UNSTABLE; | |
2368 | else | |
2369 | iomode = NFS_WRITE_FILESYNC; | |
55e303ae | 2370 | |
2d21ac55 A |
2371 | /* write the whole contiguous dirty range */ |
2372 | bp->nb_offio = doff; | |
2373 | bp->nb_endio = dend; | |
55e303ae | 2374 | |
b0d623f7 | 2375 | OSAddAtomic(1, &nfsstats.write_bios); |
55e303ae | 2376 | |
2d21ac55 A |
2377 | SET(bp->nb_flags, NB_WRITEINPROG); |
2378 | error = nfs_buf_write_rpc(bp, iomode, thd, cred); | |
55e303ae | 2379 | /* |
2d21ac55 A |
2380 | * For async I/O, the callbacks will finish up the |
2381 | * write and push out any dirty pages. Otherwise, | |
2382 | * the write has already been finished and any dirty | |
2383 | * pages pushed out. | |
55e303ae | 2384 | */ |
2d21ac55 A |
2385 | } else { |
2386 | if (!error && bp->nb_dirty) /* write out any dirty pages */ | |
2387 | error = nfs_buf_write_dirty_pages(bp, thd, cred); | |
2388 | nfs_buf_iodone(bp); | |
2389 | } | |
2390 | /* note: bp is still valid only for !async case */ | |
2391 | out: | |
2392 | if (!async) { | |
2393 | error = nfs_buf_iowait(bp); | |
2394 | /* move to clean list */ | |
2395 | if (oldflags & NB_DELWRI) { | |
2396 | lck_mtx_lock(nfs_buf_mutex); | |
2397 | if (bp->nb_vnbufs.le_next != NFSNOLIST) | |
2398 | LIST_REMOVE(bp, nb_vnbufs); | |
2399 | LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs); | |
2400 | lck_mtx_unlock(nfs_buf_mutex); | |
2401 | } | |
2402 | FSDBG_BOT(553, bp, NBOFF(bp), bp->nb_flags, error); | |
2403 | nfs_buf_release(bp, 1); | |
2404 | /* check if we need to invalidate (and we can) */ | |
2405 | if ((np->n_flag & NNEEDINVALIDATE) && | |
2406 | !(np->n_bflag & (NBINVALINPROG|NBFLUSHINPROG))) { | |
2407 | int invalidate = 0; | |
b0d623f7 | 2408 | nfs_node_lock_force(np); |
2d21ac55 A |
2409 | if (np->n_flag & NNEEDINVALIDATE) { |
2410 | invalidate = 1; | |
2411 | np->n_flag &= ~NNEEDINVALIDATE; | |
55e303ae | 2412 | } |
b0d623f7 | 2413 | nfs_node_unlock(np); |
2d21ac55 A |
2414 | if (invalidate) { |
2415 | /* | |
2416 | * There was a write error and we need to | |
2417 | * invalidate attrs and flush buffers in | |
2418 | * order to sync up with the server. | |
2419 | * (if this write was extending the file, | |
2420 | * we may no longer know the correct size) | |
2421 | * | |
2422 | * But we couldn't call vinvalbuf while holding | |
2423 | * the buffer busy. So we call vinvalbuf() after | |
2424 | * releasing the buffer. | |
2425 | */ | |
2426 | nfs_vinvalbuf2(NFSTOV(np), V_SAVE|V_IGNORE_WRITEERR, thd, cred, 1); | |
55e303ae | 2427 | } |
55e303ae | 2428 | } |
2d21ac55 A |
2429 | } |
2430 | ||
2431 | if (IS_VALID_CRED(cred)) | |
2432 | kauth_cred_unref(&cred); | |
2433 | return (error); | |
2434 | } | |
55e303ae | 2435 | |
2d21ac55 A |
2436 | /* |
2437 | * finish the writing of a buffer | |
2438 | */ | |
2439 | void | |
2440 | nfs_buf_write_finish(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) | |
2441 | { | |
2442 | nfsnode_t np = bp->nb_np; | |
2443 | int error = (bp->nb_flags & NB_ERROR) ? bp->nb_error : 0; | |
2444 | int firstpg, lastpg; | |
2445 | uint32_t pagemask; | |
2446 | ||
2447 | if ((error == EINTR) || (error == ERESTART)) { | |
2448 | CLR(bp->nb_flags, NB_ERROR); | |
2449 | SET(bp->nb_flags, NB_EINTR); | |
2450 | } | |
2451 | ||
2452 | if (!error) { | |
2453 | /* calculate range of complete pages being written */ | |
2454 | firstpg = round_page_32(bp->nb_offio) / PAGE_SIZE; | |
2455 | lastpg = (trunc_page_32(bp->nb_endio) - 1) / PAGE_SIZE; | |
2456 | /* calculate mask for that page range written */ | |
2457 | pagemask = ((1 << (lastpg + 1)) - 1) & ~((1 << firstpg) - 1); | |
2458 | /* clear dirty bits for pages we've written */ | |
2459 | bp->nb_dirty &= ~pagemask; | |
2460 | } | |
2461 | ||
2462 | /* manage needcommit state */ | |
2463 | if (!error && (bp->nb_commitlevel == NFS_WRITE_UNSTABLE)) { | |
2464 | if (!ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { | |
b0d623f7 | 2465 | nfs_node_lock_force(np); |
2d21ac55 | 2466 | np->n_needcommitcnt++; |
b0d623f7 | 2467 | nfs_node_unlock(np); |
2d21ac55 A |
2468 | SET(bp->nb_flags, NB_NEEDCOMMIT); |
2469 | } | |
2470 | /* make sure nb_dirtyoff/nb_dirtyend reflect actual range written */ | |
2471 | bp->nb_dirtyoff = bp->nb_offio; | |
2472 | bp->nb_dirtyend = bp->nb_endio; | |
2473 | } else if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { | |
b0d623f7 | 2474 | nfs_node_lock_force(np); |
2d21ac55 A |
2475 | np->n_needcommitcnt--; |
2476 | CHECK_NEEDCOMMITCNT(np); | |
b0d623f7 | 2477 | nfs_node_unlock(np); |
2d21ac55 A |
2478 | CLR(bp->nb_flags, NB_NEEDCOMMIT); |
2479 | } | |
2480 | ||
2481 | CLR(bp->nb_flags, NB_WRITEINPROG); | |
2482 | ||
2483 | /* | |
2484 | * For an unstable write, the buffer is still treated as dirty until | |
2485 | * a commit (or stable (re)write) is performed. Buffers needing only | |
2486 | * a commit are marked with the NB_DELWRI and NB_NEEDCOMMIT flags. | |
2487 | * | |
2488 | * If the write was interrupted we set NB_EINTR. Don't set NB_ERROR | |
2489 | * because that would cause the buffer to be dropped. The buffer is | |
2490 | * still valid and simply needs to be written again. | |
2491 | */ | |
2492 | if ((error == EINTR) || (error == ERESTART) || (!error && (bp->nb_flags & NB_NEEDCOMMIT))) { | |
2493 | CLR(bp->nb_flags, NB_INVAL); | |
2494 | if (!ISSET(bp->nb_flags, NB_DELWRI)) { | |
2495 | SET(bp->nb_flags, NB_DELWRI); | |
2496 | lck_mtx_lock(nfs_buf_mutex); | |
2497 | nfs_nbdwrite++; | |
2498 | NFSBUFCNTCHK(); | |
2499 | lck_mtx_unlock(nfs_buf_mutex); | |
2500 | } | |
fa4905b1 | 2501 | /* |
2d21ac55 A |
2502 | * Since for the NB_ASYNC case, we've reassigned the buffer to the |
2503 | * clean list, we have to reassign it back to the dirty one. Ugh. | |
fa4905b1 | 2504 | */ |
2d21ac55 A |
2505 | if (ISSET(bp->nb_flags, NB_ASYNC)) { |
2506 | /* move to dirty list */ | |
2507 | lck_mtx_lock(nfs_buf_mutex); | |
2508 | if (bp->nb_vnbufs.le_next != NFSNOLIST) | |
2509 | LIST_REMOVE(bp, nb_vnbufs); | |
2510 | LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs); | |
2511 | lck_mtx_unlock(nfs_buf_mutex); | |
2512 | } | |
2513 | } else { | |
2514 | /* either there's an error or we don't need to commit */ | |
2515 | if (error) { | |
2516 | /* | |
2517 | * There was a write error and we need to invalidate | |
2518 | * attrs and flush buffers in order to sync up with the | |
2519 | * server. (if this write was extending the file, we | |
2520 | * may no longer know the correct size) | |
2521 | * | |
2522 | * But we can't call vinvalbuf while holding this | |
2523 | * buffer busy. Set a flag to do it after releasing | |
2524 | * the buffer. | |
2525 | */ | |
b0d623f7 | 2526 | nfs_node_lock_force(np); |
2d21ac55 A |
2527 | np->n_error = error; |
2528 | np->n_flag |= (NWRITEERR | NNEEDINVALIDATE); | |
2529 | NATTRINVALIDATE(np); | |
b0d623f7 | 2530 | nfs_node_unlock(np); |
2d21ac55 A |
2531 | } |
2532 | /* clear the dirty range */ | |
2533 | bp->nb_dirtyoff = bp->nb_dirtyend = 0; | |
2534 | } | |
55e303ae | 2535 | |
2d21ac55 A |
2536 | if (!error && bp->nb_dirty) |
2537 | nfs_buf_write_dirty_pages(bp, thd, cred); | |
2538 | nfs_buf_iodone(bp); | |
2539 | } | |
fa4905b1 | 2540 | |
2d21ac55 A |
2541 | /* |
2542 | * write out any pages marked dirty in a buffer | |
2543 | * | |
2544 | * We do use unstable writes and follow up with a commit. | |
2545 | * If we catch the write verifier changing we'll restart | |
2546 | * do the writes filesync. | |
2547 | */ | |
2548 | int | |
2549 | nfs_buf_write_dirty_pages(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) | |
2550 | { | |
2551 | nfsnode_t np = bp->nb_np; | |
2552 | struct nfsmount *nmp = NFSTONMP(np); | |
2553 | int error = 0, commit, iomode, iomode2, len, pg, count, npages, off; | |
2554 | uint32_t dirty = bp->nb_dirty; | |
2555 | uint64_t wverf; | |
b0d623f7 A |
2556 | uio_t auio; |
2557 | char uio_buf [ UIO_SIZEOF(1) ]; | |
55e303ae | 2558 | |
2d21ac55 A |
2559 | if (!bp->nb_dirty) |
2560 | return (0); | |
2561 | ||
2562 | /* there are pages marked dirty that need to be written out */ | |
b0d623f7 | 2563 | OSAddAtomic(1, &nfsstats.write_bios); |
2d21ac55 A |
2564 | NFS_BUF_MAP(bp); |
2565 | SET(bp->nb_flags, NB_WRITEINPROG); | |
2566 | npages = bp->nb_bufsize / PAGE_SIZE; | |
2567 | iomode = NFS_WRITE_UNSTABLE; | |
2568 | ||
b0d623f7 A |
2569 | auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE, |
2570 | &uio_buf, sizeof(uio_buf)); | |
2d21ac55 A |
2571 | |
2572 | again: | |
2573 | dirty = bp->nb_dirty; | |
2574 | wverf = bp->nb_verf; | |
2575 | commit = NFS_WRITE_FILESYNC; | |
2576 | for (pg = 0; pg < npages; pg++) { | |
2577 | if (!NBPGDIRTY(bp, pg)) | |
2578 | continue; | |
2579 | count = 1; | |
2580 | while (((pg + count) < npages) && NBPGDIRTY(bp, pg + count)) | |
2581 | count++; | |
2582 | /* write count pages starting with page pg */ | |
2583 | off = pg * PAGE_SIZE; | |
2584 | len = count * PAGE_SIZE; | |
2585 | /* clip writes to EOF */ | |
2586 | if (NBOFF(bp) + off + len > (off_t) np->n_size) | |
2587 | len -= (NBOFF(bp) + off + len) - np->n_size; | |
2588 | if (len > 0) { | |
2589 | iomode2 = iomode; | |
b0d623f7 A |
2590 | uio_reset(auio, NBOFF(bp) + off, UIO_SYSSPACE, UIO_WRITE); |
2591 | uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + off), len); | |
2592 | error = nfs_write_rpc2(np, auio, thd, cred, &iomode2, &bp->nb_verf); | |
2d21ac55 A |
2593 | if (error) |
2594 | break; | |
2595 | if (iomode2 < commit) /* Retain the lowest commitment level returned. */ | |
2596 | commit = iomode2; | |
2597 | if ((commit != NFS_WRITE_FILESYNC) && (wverf != bp->nb_verf)) { | |
2598 | /* verifier changed, redo all the writes filesync */ | |
2599 | iomode = NFS_WRITE_FILESYNC; | |
2600 | goto again; | |
fa4905b1 A |
2601 | } |
2602 | } | |
2d21ac55 A |
2603 | /* clear dirty bits */ |
2604 | while (count--) { | |
2605 | dirty &= ~(1 << pg); | |
2606 | if (count) /* leave pg on last page */ | |
2607 | pg++; | |
2608 | } | |
2609 | } | |
2610 | CLR(bp->nb_flags, NB_WRITEINPROG); | |
2611 | ||
2612 | if (!error && (commit != NFS_WRITE_FILESYNC)) { | |
2613 | error = nmp->nm_funcs->nf_commit_rpc(np, NBOFF(bp), bp->nb_bufsize, cred); | |
2614 | if (error == NFSERR_STALEWRITEVERF) { | |
2615 | /* verifier changed, so we need to restart all the writes */ | |
2616 | iomode = NFS_WRITE_FILESYNC; | |
2617 | goto again; | |
2618 | } | |
2619 | } | |
2620 | if (!error) { | |
2621 | bp->nb_dirty = dirty; | |
2622 | } else { | |
2623 | SET(bp->nb_flags, NB_ERROR); | |
2624 | bp->nb_error = error; | |
2625 | } | |
2626 | return (error); | |
2627 | } | |
2628 | ||
2629 | /* | |
2630 | * initiate the NFS WRITE RPC(s) for a buffer | |
2631 | */ | |
2632 | int | |
2633 | nfs_buf_write_rpc(struct nfsbuf *bp, int iomode, thread_t thd, kauth_cred_t cred) | |
2634 | { | |
2635 | struct nfsmount *nmp; | |
2636 | nfsnode_t np = bp->nb_np; | |
2637 | int error = 0, nfsvers, async; | |
b0d623f7 A |
2638 | int offset, nrpcs; |
2639 | uint32_t nmwsize, length, len; | |
2d21ac55 A |
2640 | struct nfsreq *req; |
2641 | struct nfsreq_cbinfo cb; | |
b0d623f7 A |
2642 | uio_t auio; |
2643 | char uio_buf [ UIO_SIZEOF(1) ]; | |
2d21ac55 A |
2644 | |
2645 | nmp = NFSTONMP(np); | |
2646 | if (!nmp) { | |
2647 | bp->nb_error = error = ENXIO; | |
2648 | SET(bp->nb_flags, NB_ERROR); | |
2649 | nfs_buf_iodone(bp); | |
2650 | return (error); | |
2651 | } | |
2652 | nfsvers = nmp->nm_vers; | |
2653 | nmwsize = nmp->nm_wsize; | |
2654 | ||
2655 | offset = bp->nb_offio; | |
2656 | length = bp->nb_endio - bp->nb_offio; | |
2657 | ||
2658 | /* Note: Can only do async I/O if nfsiods are configured. */ | |
2659 | async = (bp->nb_flags & NB_ASYNC) && (NFSIOD_MAX > 0); | |
2660 | bp->nb_commitlevel = NFS_WRITE_FILESYNC; | |
2661 | cb.rcb_func = async ? nfs_buf_write_rpc_finish : NULL; | |
2662 | cb.rcb_bp = bp; | |
2663 | ||
2664 | if ((nfsvers == NFS_VER2) && ((NBOFF(bp) + bp->nb_endio) > 0xffffffffLL)) { | |
2665 | bp->nb_error = error = EFBIG; | |
2666 | SET(bp->nb_flags, NB_ERROR); | |
2667 | nfs_buf_iodone(bp); | |
2668 | return (error); | |
2669 | } | |
2670 | ||
b0d623f7 A |
2671 | auio = uio_createwithbuffer(1, NBOFF(bp) + offset, UIO_SYSSPACE, |
2672 | UIO_WRITE, &uio_buf, sizeof(uio_buf)); | |
2673 | uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + offset), length); | |
2d21ac55 A |
2674 | |
2675 | bp->nb_rpcs = nrpcs = (length + nmwsize - 1) / nmwsize; | |
2676 | if (async && (nrpcs > 1)) { | |
2677 | SET(bp->nb_flags, NB_MULTASYNCRPC); | |
2678 | } else { | |
2679 | CLR(bp->nb_flags, NB_MULTASYNCRPC); | |
2680 | } | |
2681 | ||
2682 | while (length > 0) { | |
2683 | if (ISSET(bp->nb_flags, NB_ERROR)) { | |
2684 | error = bp->nb_error; | |
2685 | break; | |
2686 | } | |
2687 | len = (length > nmwsize) ? nmwsize : length; | |
2688 | cb.rcb_args[0] = offset; | |
2689 | cb.rcb_args[1] = len; | |
b0d623f7 A |
2690 | if (nmp->nm_vers >= NFS_VER4) |
2691 | cb.rcb_args[2] = nmp->nm_stategenid; | |
2d21ac55 A |
2692 | if (async && ((error = nfs_async_write_start(nmp)))) |
2693 | break; | |
2694 | req = NULL; | |
b0d623f7 | 2695 | error = nmp->nm_funcs->nf_write_rpc_async(np, auio, len, thd, cred, |
2d21ac55 A |
2696 | iomode, &cb, &req); |
2697 | if (error) { | |
2698 | if (async) | |
2699 | nfs_async_write_done(nmp); | |
2700 | break; | |
2701 | } | |
2702 | offset += len; | |
2703 | length -= len; | |
2704 | if (async) | |
2705 | continue; | |
2706 | nfs_buf_write_rpc_finish(req); | |
2707 | } | |
2708 | ||
2709 | if (length > 0) { | |
fa4905b1 | 2710 | /* |
2d21ac55 A |
2711 | * Something bad happened while trying to send the RPCs. |
2712 | * Wait for any outstanding requests to complete. | |
fa4905b1 | 2713 | */ |
2d21ac55 A |
2714 | bp->nb_error = error; |
2715 | SET(bp->nb_flags, NB_ERROR); | |
2716 | if (ISSET(bp->nb_flags, NB_MULTASYNCRPC)) { | |
2717 | nrpcs = (length + nmwsize - 1) / nmwsize; | |
2718 | lck_mtx_lock(nfs_buf_mutex); | |
2719 | bp->nb_rpcs -= nrpcs; | |
2720 | if (bp->nb_rpcs == 0) { | |
2721 | /* No RPCs left, so the buffer's done */ | |
2722 | lck_mtx_unlock(nfs_buf_mutex); | |
2723 | nfs_buf_write_finish(bp, thd, cred); | |
2724 | } else { | |
2725 | /* wait for the last RPC to mark it done */ | |
2726 | while (bp->nb_rpcs > 0) | |
2727 | msleep(&bp->nb_rpcs, nfs_buf_mutex, 0, | |
2728 | "nfs_buf_write_rpc_cancel", NULL); | |
2729 | lck_mtx_unlock(nfs_buf_mutex); | |
fa4905b1 | 2730 | } |
2d21ac55 A |
2731 | } else { |
2732 | nfs_buf_write_finish(bp, thd, cred); | |
2733 | } | |
2734 | } | |
55e303ae | 2735 | |
2d21ac55 A |
2736 | return (error); |
2737 | } | |
2738 | ||
2739 | /* | |
2740 | * finish up an NFS WRITE RPC on a buffer | |
2741 | */ | |
2742 | void | |
2743 | nfs_buf_write_rpc_finish(struct nfsreq *req) | |
2744 | { | |
2745 | int error = 0, nfsvers, offset, length, multasyncrpc, finished; | |
2746 | int committed = NFS_WRITE_FILESYNC; | |
2747 | uint64_t wverf = 0; | |
2748 | size_t rlen; | |
2749 | void *wakeme = NULL; | |
2750 | struct nfsreq_cbinfo cb; | |
2751 | struct nfsreq *wreq = NULL; | |
2752 | struct nfsbuf *bp; | |
2753 | struct nfsmount *nmp; | |
2754 | nfsnode_t np; | |
2755 | thread_t thd; | |
2756 | kauth_cred_t cred; | |
b0d623f7 A |
2757 | uio_t auio; |
2758 | char uio_buf [ UIO_SIZEOF(1) ]; | |
2d21ac55 A |
2759 | |
2760 | finish: | |
2761 | np = req->r_np; | |
2762 | thd = req->r_thread; | |
2763 | cred = req->r_cred; | |
2764 | if (IS_VALID_CRED(cred)) | |
2765 | kauth_cred_ref(cred); | |
2766 | cb = req->r_callback; | |
2767 | bp = cb.rcb_bp; | |
2768 | ||
2769 | nmp = NFSTONMP(np); | |
2770 | if (!nmp) { | |
2771 | SET(bp->nb_flags, NB_ERROR); | |
2772 | bp->nb_error = error = ENXIO; | |
2773 | } | |
2774 | if (error || ISSET(bp->nb_flags, NB_ERROR)) { | |
2775 | /* just drop it */ | |
2776 | nfs_request_async_cancel(req); | |
2777 | goto out; | |
2778 | } | |
2779 | nfsvers = nmp->nm_vers; | |
2780 | ||
2781 | offset = cb.rcb_args[0]; | |
2782 | rlen = length = cb.rcb_args[1]; | |
2783 | ||
2784 | /* finish the RPC */ | |
2785 | error = nmp->nm_funcs->nf_write_rpc_async_finish(np, req, &committed, &rlen, &wverf); | |
2786 | if ((error == EINPROGRESS) && cb.rcb_func) { | |
2787 | /* async request restarted */ | |
2788 | if (IS_VALID_CRED(cred)) | |
2789 | kauth_cred_unref(&cred); | |
2790 | return; | |
2791 | } | |
b0d623f7 A |
2792 | if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error) && !ISSET(bp->nb_flags, NB_ERROR)) { |
2793 | lck_mtx_lock(&nmp->nm_lock); | |
2794 | if ((error != NFSERR_GRACE) && (cb.rcb_args[2] == nmp->nm_stategenid) && !(nmp->nm_state & NFSSTA_RECOVER)) { | |
2795 | printf("nfs_buf_write_rpc_finish: error %d, initiating recovery\n", error); | |
2796 | nmp->nm_state |= NFSSTA_RECOVER; | |
2797 | nfs_mount_sock_thread_wake(nmp); | |
2798 | } | |
2799 | lck_mtx_unlock(&nmp->nm_lock); | |
2800 | if (error == NFSERR_GRACE) | |
2801 | tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz); | |
2802 | if (!(error = nfs_mount_state_wait_for_recovery(nmp))) { | |
2803 | rlen = 0; | |
2804 | goto writeagain; | |
2805 | } | |
2806 | } | |
2d21ac55 A |
2807 | if (error) { |
2808 | SET(bp->nb_flags, NB_ERROR); | |
2809 | bp->nb_error = error; | |
2810 | } | |
2811 | if (error || (nfsvers == NFS_VER2)) | |
2812 | goto out; | |
2813 | if (rlen <= 0) { | |
2814 | SET(bp->nb_flags, NB_ERROR); | |
2815 | bp->nb_error = error = EIO; | |
2816 | goto out; | |
2817 | } | |
2818 | ||
2819 | /* save lowest commit level returned */ | |
2820 | if (committed < bp->nb_commitlevel) | |
2821 | bp->nb_commitlevel = committed; | |
2822 | ||
2823 | /* check the write verifier */ | |
2824 | if (!bp->nb_verf) { | |
2825 | bp->nb_verf = wverf; | |
2826 | } else if (bp->nb_verf != wverf) { | |
2827 | /* verifier changed, so buffer will need to be rewritten */ | |
2828 | bp->nb_flags |= NB_STALEWVERF; | |
2829 | bp->nb_commitlevel = NFS_WRITE_UNSTABLE; | |
2830 | bp->nb_verf = wverf; | |
2831 | } | |
2832 | ||
2833 | /* | |
2834 | * check for a short write | |
2835 | * | |
2836 | * If the server didn't write all the data, then we | |
2837 | * need to issue another write for the rest of it. | |
2838 | * (Don't bother if the buffer hit an error or stale wverf.) | |
2839 | */ | |
2840 | if (((int)rlen < length) && !(bp->nb_flags & (NB_STALEWVERF|NB_ERROR))) { | |
b0d623f7 | 2841 | writeagain: |
2d21ac55 A |
2842 | offset += rlen; |
2843 | length -= rlen; | |
2844 | ||
b0d623f7 A |
2845 | auio = uio_createwithbuffer(1, NBOFF(bp) + offset, UIO_SYSSPACE, |
2846 | UIO_WRITE, &uio_buf, sizeof(uio_buf)); | |
2847 | uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + offset), length); | |
55e303ae | 2848 | |
2d21ac55 A |
2849 | cb.rcb_args[0] = offset; |
2850 | cb.rcb_args[1] = length; | |
b0d623f7 A |
2851 | if (nmp->nm_vers >= NFS_VER4) |
2852 | cb.rcb_args[2] = nmp->nm_stategenid; | |
55e303ae | 2853 | |
b0d623f7 A |
2854 | // XXX iomode should really match the original request |
2855 | error = nmp->nm_funcs->nf_write_rpc_async(np, auio, length, thd, cred, | |
2d21ac55 A |
2856 | NFS_WRITE_FILESYNC, &cb, &wreq); |
2857 | if (!error) { | |
2858 | if (IS_VALID_CRED(cred)) | |
2859 | kauth_cred_unref(&cred); | |
2860 | if (!cb.rcb_func) { | |
2861 | /* if !async we'll need to wait for this RPC to finish */ | |
2862 | req = wreq; | |
b0d623f7 | 2863 | wreq = NULL; |
2d21ac55 | 2864 | goto finish; |
fa4905b1 | 2865 | } |
2d21ac55 A |
2866 | /* |
2867 | * We're done here. | |
2868 | * Outstanding RPC count is unchanged. | |
2869 | * Callback will be called when RPC is done. | |
2870 | */ | |
2871 | return; | |
fa4905b1 | 2872 | } |
2d21ac55 A |
2873 | SET(bp->nb_flags, NB_ERROR); |
2874 | bp->nb_error = error; | |
2875 | } | |
55e303ae | 2876 | |
2d21ac55 A |
2877 | out: |
2878 | if (cb.rcb_func) | |
2879 | nfs_async_write_done(nmp); | |
2880 | /* | |
2881 | * Decrement outstanding RPC count on buffer | |
2882 | * and call nfs_buf_write_finish on last RPC. | |
2883 | * | |
2884 | * (Note: when there are multiple async RPCs issued for a | |
2885 | * buffer we need nfs_buffer_mutex to avoid problems when | |
2886 | * aborting a partially-initiated set of RPCs) | |
2887 | */ | |
2888 | multasyncrpc = ISSET(bp->nb_flags, NB_MULTASYNCRPC); | |
2889 | if (multasyncrpc) | |
2890 | lck_mtx_lock(nfs_buf_mutex); | |
2891 | ||
2892 | bp->nb_rpcs--; | |
2893 | finished = (bp->nb_rpcs == 0); | |
55e303ae | 2894 | |
2d21ac55 A |
2895 | if (multasyncrpc) |
2896 | lck_mtx_unlock(nfs_buf_mutex); | |
2897 | ||
2898 | if (finished) { | |
2899 | if (multasyncrpc) | |
2900 | wakeme = &bp->nb_rpcs; | |
2901 | nfs_buf_write_finish(bp, thd, cred); | |
2902 | if (wakeme) | |
2903 | wakeup(wakeme); | |
2904 | } | |
2905 | ||
2906 | if (IS_VALID_CRED(cred)) | |
2907 | kauth_cred_unref(&cred); | |
2908 | } | |
2909 | ||
2910 | /* | |
2911 | * Send commit(s) for the given node's "needcommit" buffers | |
2912 | */ | |
2913 | int | |
2914 | nfs_flushcommits(nfsnode_t np, int nowait) | |
2915 | { | |
2916 | struct nfsmount *nmp; | |
b0d623f7 | 2917 | struct nfsbuf *bp, *prevlbp, *lbp; |
2d21ac55 A |
2918 | struct nfsbuflists blist, commitlist; |
2919 | int error = 0, retv, wcred_set, flags, dirty; | |
2920 | u_quad_t off, endoff, toff; | |
2921 | u_int32_t count; | |
2922 | kauth_cred_t wcred = NULL; | |
2923 | ||
2924 | FSDBG_TOP(557, np, 0, 0, 0); | |
2925 | ||
2926 | /* | |
2927 | * A nb_flags == (NB_DELWRI | NB_NEEDCOMMIT) block has been written to the | |
2928 | * server, but nas not been committed to stable storage on the server | |
2929 | * yet. The byte range is worked out for as many nfsbufs as we can handle | |
2930 | * and the commit rpc is done. | |
2931 | */ | |
2932 | if (!LIST_EMPTY(&np->n_dirtyblkhd)) { | |
b0d623f7 | 2933 | error = nfs_node_lock(np); |
2d21ac55 A |
2934 | if (error) |
2935 | goto done; | |
1c79356b | 2936 | np->n_flag |= NMODIFIED; |
b0d623f7 | 2937 | nfs_node_unlock(np); |
2d21ac55 | 2938 | } |
1c79356b | 2939 | |
2d21ac55 A |
2940 | off = (u_quad_t)-1; |
2941 | endoff = 0; | |
2942 | wcred_set = 0; | |
2943 | LIST_INIT(&commitlist); | |
2944 | ||
2945 | nmp = NFSTONMP(np); | |
2946 | if (!nmp) { | |
2947 | error = ENXIO; | |
2948 | goto done; | |
2949 | } | |
2950 | if (nmp->nm_vers == NFS_VER2) { | |
2951 | error = EINVAL; | |
2952 | goto done; | |
2953 | } | |
2954 | ||
2955 | flags = NBI_DIRTY; | |
2956 | if (nowait) | |
2957 | flags |= NBI_NOWAIT; | |
2958 | lck_mtx_lock(nfs_buf_mutex); | |
2959 | if (!nfs_buf_iterprepare(np, &blist, flags)) { | |
2960 | while ((bp = LIST_FIRST(&blist))) { | |
2961 | LIST_REMOVE(bp, nb_vnbufs); | |
2962 | LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs); | |
2963 | error = nfs_buf_acquire(bp, NBAC_NOWAIT, 0, 0); | |
2964 | if (error) | |
2965 | continue; | |
2966 | if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) | |
2967 | nfs_buf_check_write_verifier(np, bp); | |
2968 | if (((bp->nb_flags & (NB_DELWRI | NB_NEEDCOMMIT)) | |
2969 | != (NB_DELWRI | NB_NEEDCOMMIT))) { | |
2970 | nfs_buf_drop(bp); | |
2971 | continue; | |
2972 | } | |
2973 | nfs_buf_remfree(bp); | |
b0d623f7 A |
2974 | |
2975 | /* buffer UPLs will be grabbed *in order* below */ | |
2d21ac55 A |
2976 | |
2977 | FSDBG(557, bp, bp->nb_flags, bp->nb_valid, bp->nb_dirty); | |
2978 | FSDBG(557, bp->nb_validoff, bp->nb_validend, | |
2979 | bp->nb_dirtyoff, bp->nb_dirtyend); | |
55e303ae | 2980 | |
2d21ac55 A |
2981 | /* |
2982 | * Work out if all buffers are using the same cred | |
2983 | * so we can deal with them all with one commit. | |
2984 | * | |
2985 | * Note: creds in bp's must be obtained by kauth_cred_ref | |
2986 | * on the same original cred in order for them to be equal. | |
2987 | */ | |
2988 | if (wcred_set == 0) { | |
2989 | wcred = bp->nb_wcred; | |
2990 | if (!IS_VALID_CRED(wcred)) | |
2991 | panic("nfs: needcommit w/out wcred"); | |
2992 | wcred_set = 1; | |
2993 | } else if ((wcred_set == 1) && wcred != bp->nb_wcred) { | |
2994 | wcred_set = -1; | |
2995 | } | |
2996 | SET(bp->nb_flags, NB_WRITEINPROG); | |
2997 | ||
2998 | /* | |
b0d623f7 A |
2999 | * Add this buffer to the list of buffers we are committing. |
3000 | * Buffers are inserted into the list in ascending order so that | |
3001 | * we can take the UPLs in order after the list is complete. | |
2d21ac55 | 3002 | */ |
b0d623f7 A |
3003 | prevlbp = NULL; |
3004 | LIST_FOREACH(lbp, &commitlist, nb_vnbufs) { | |
3005 | if (bp->nb_lblkno < lbp->nb_lblkno) | |
3006 | break; | |
3007 | prevlbp = lbp; | |
3008 | } | |
2d21ac55 | 3009 | LIST_REMOVE(bp, nb_vnbufs); |
b0d623f7 A |
3010 | if (prevlbp) |
3011 | LIST_INSERT_AFTER(prevlbp, bp, nb_vnbufs); | |
3012 | else | |
3013 | LIST_INSERT_HEAD(&commitlist, bp, nb_vnbufs); | |
3014 | ||
3015 | /* update commit range start, end */ | |
2d21ac55 A |
3016 | toff = NBOFF(bp) + bp->nb_dirtyoff; |
3017 | if (toff < off) | |
3018 | off = toff; | |
3019 | toff += (u_quad_t)(bp->nb_dirtyend - bp->nb_dirtyoff); | |
3020 | if (toff > endoff) | |
3021 | endoff = toff; | |
3022 | } | |
3023 | nfs_buf_itercomplete(np, &blist, NBI_DIRTY); | |
3024 | } | |
3025 | lck_mtx_unlock(nfs_buf_mutex); | |
3026 | ||
3027 | if (LIST_EMPTY(&commitlist)) { | |
3028 | error = ENOBUFS; | |
3029 | goto done; | |
3030 | } | |
3031 | ||
b0d623f7 A |
3032 | /* |
3033 | * We need a UPL to prevent others from accessing the buffers during | |
3034 | * our commit RPC(s). | |
3035 | * | |
3036 | * We used to also check for dirty pages here; if there were any we'd | |
3037 | * abort the commit and force the entire buffer to be written again. | |
3038 | * Instead of doing that, we just go ahead and commit the dirty range, | |
3039 | * and then leave the buffer around with dirty pages that will be | |
3040 | * written out later. | |
3041 | */ | |
3042 | LIST_FOREACH(bp, &commitlist, nb_vnbufs) { | |
3043 | if (!ISSET(bp->nb_flags, NB_PAGELIST)) { | |
3044 | retv = nfs_buf_upl_setup(bp); | |
3045 | if (retv) { | |
3046 | /* Unable to create the UPL, the VM object probably no longer exists. */ | |
3047 | printf("nfs_flushcommits: upl create failed %d\n", retv); | |
3048 | bp->nb_valid = bp->nb_dirty = 0; | |
3049 | } | |
3050 | } | |
3051 | nfs_buf_upl_check(bp); | |
3052 | } | |
3053 | ||
2d21ac55 A |
3054 | /* |
3055 | * Commit data on the server, as required. | |
3056 | * If all bufs are using the same wcred, then use that with | |
3057 | * one call for all of them, otherwise commit each one | |
3058 | * separately. | |
3059 | */ | |
3060 | if (wcred_set == 1) { | |
3061 | /* | |
3062 | * Note, it's possible the commit range could be >2^32-1. | |
3063 | * If it is, we'll send one commit that covers the whole file. | |
3064 | */ | |
3065 | if ((endoff - off) > 0xffffffff) | |
3066 | count = 0; | |
3067 | else | |
3068 | count = (endoff - off); | |
3069 | retv = nmp->nm_funcs->nf_commit_rpc(np, off, count, wcred); | |
3070 | } else { | |
3071 | retv = 0; | |
3072 | LIST_FOREACH(bp, &commitlist, nb_vnbufs) { | |
3073 | toff = NBOFF(bp) + bp->nb_dirtyoff; | |
3074 | count = bp->nb_dirtyend - bp->nb_dirtyoff; | |
3075 | retv = nmp->nm_funcs->nf_commit_rpc(np, toff, count, bp->nb_wcred); | |
3076 | if (retv) | |
3077 | break; | |
55e303ae | 3078 | } |
2d21ac55 A |
3079 | } |
3080 | ||
3081 | /* | |
3082 | * Now, either mark the blocks I/O done or mark the | |
3083 | * blocks dirty, depending on whether the commit | |
3084 | * succeeded. | |
3085 | */ | |
3086 | while ((bp = LIST_FIRST(&commitlist))) { | |
3087 | LIST_REMOVE(bp, nb_vnbufs); | |
3088 | FSDBG(557, bp, retv, bp->nb_flags, bp->nb_dirty); | |
b0d623f7 | 3089 | nfs_node_lock_force(np); |
2d21ac55 A |
3090 | CLR(bp->nb_flags, (NB_NEEDCOMMIT | NB_WRITEINPROG)); |
3091 | np->n_needcommitcnt--; | |
3092 | CHECK_NEEDCOMMITCNT(np); | |
b0d623f7 | 3093 | nfs_node_unlock(np); |
2d21ac55 A |
3094 | |
3095 | if (retv) { | |
3096 | /* move back to dirty list */ | |
3097 | lck_mtx_lock(nfs_buf_mutex); | |
3098 | LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs); | |
3099 | lck_mtx_unlock(nfs_buf_mutex); | |
3100 | nfs_buf_release(bp, 1); | |
3101 | continue; | |
1c79356b | 3102 | } |
2d21ac55 | 3103 | |
b0d623f7 A |
3104 | nfs_node_lock_force(np); |
3105 | np->n_numoutput++; | |
3106 | nfs_node_unlock(np); | |
2d21ac55 A |
3107 | vnode_startwrite(NFSTOV(np)); |
3108 | if (ISSET(bp->nb_flags, NB_DELWRI)) { | |
3109 | lck_mtx_lock(nfs_buf_mutex); | |
3110 | nfs_nbdwrite--; | |
3111 | NFSBUFCNTCHK(); | |
3112 | lck_mtx_unlock(nfs_buf_mutex); | |
3113 | wakeup(&nfs_nbdwrite); | |
1c79356b | 3114 | } |
2d21ac55 A |
3115 | CLR(bp->nb_flags, (NB_READ|NB_DONE|NB_ERROR|NB_DELWRI)); |
3116 | /* if block still has dirty pages, we don't want it to */ | |
3117 | /* be released in nfs_buf_iodone(). So, don't set NB_ASYNC. */ | |
3118 | if (!(dirty = bp->nb_dirty)) | |
3119 | SET(bp->nb_flags, NB_ASYNC); | |
3120 | else | |
3121 | CLR(bp->nb_flags, NB_ASYNC); | |
1c79356b | 3122 | |
2d21ac55 A |
3123 | /* move to clean list */ |
3124 | lck_mtx_lock(nfs_buf_mutex); | |
3125 | LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs); | |
3126 | lck_mtx_unlock(nfs_buf_mutex); | |
3127 | ||
3128 | bp->nb_dirtyoff = bp->nb_dirtyend = 0; | |
3129 | ||
3130 | nfs_buf_iodone(bp); | |
3131 | if (dirty) { | |
3132 | /* throw it back in as a delayed write buffer */ | |
3133 | CLR(bp->nb_flags, NB_DONE); | |
3134 | nfs_buf_write_delayed(bp); | |
55e303ae | 3135 | } |
2d21ac55 | 3136 | } |
1c79356b | 3137 | |
2d21ac55 A |
3138 | done: |
3139 | FSDBG_BOT(557, np, 0, 0, error); | |
3140 | return (error); | |
3141 | } | |
3142 | ||
3143 | /* | |
3144 | * Flush all the blocks associated with a vnode. | |
3145 | * Walk through the buffer pool and push any dirty pages | |
3146 | * associated with the vnode. | |
3147 | */ | |
3148 | int | |
3149 | nfs_flush(nfsnode_t np, int waitfor, thread_t thd, int ignore_writeerr) | |
3150 | { | |
3151 | struct nfsbuf *bp; | |
3152 | struct nfsbuflists blist; | |
3153 | struct nfsmount *nmp = NFSTONMP(np); | |
3154 | int error = 0, error2, slptimeo = 0, slpflag = 0; | |
3155 | int nfsvers, flags, passone = 1; | |
3156 | ||
3157 | FSDBG_TOP(517, np, waitfor, ignore_writeerr, 0); | |
3158 | ||
3159 | if (!nmp) { | |
3160 | error = ENXIO; | |
3161 | goto out; | |
3162 | } | |
3163 | nfsvers = nmp->nm_vers; | |
3164 | if (nmp->nm_flag & NFSMNT_INT) | |
3165 | slpflag = PCATCH; | |
3166 | ||
3167 | if (!LIST_EMPTY(&np->n_dirtyblkhd)) { | |
b0d623f7 | 3168 | nfs_node_lock_force(np); |
2d21ac55 | 3169 | np->n_flag |= NMODIFIED; |
b0d623f7 | 3170 | nfs_node_unlock(np); |
2d21ac55 A |
3171 | } |
3172 | ||
3173 | lck_mtx_lock(nfs_buf_mutex); | |
3174 | while (np->n_bflag & NBFLUSHINPROG) { | |
3175 | np->n_bflag |= NBFLUSHWANT; | |
b0d623f7 A |
3176 | msleep(&np->n_bflag, nfs_buf_mutex, slpflag, "nfs_flush", NULL); |
3177 | if ((error = nfs_sigintr(NFSTONMP(np), NULL, thd, 0))) { | |
2d21ac55 A |
3178 | lck_mtx_unlock(nfs_buf_mutex); |
3179 | goto out; | |
3180 | } | |
3181 | } | |
3182 | np->n_bflag |= NBFLUSHINPROG; | |
3183 | ||
3184 | /* | |
3185 | * On the first pass, start async/unstable writes on all | |
3186 | * delayed write buffers. Then wait for all writes to complete | |
3187 | * and call nfs_flushcommits() to commit any uncommitted buffers. | |
3188 | * On all subsequent passes, start STABLE writes on any remaining | |
3189 | * dirty buffers. Then wait for all writes to complete. | |
3190 | */ | |
3191 | again: | |
3192 | FSDBG(518, LIST_FIRST(&np->n_dirtyblkhd), np->n_flag, 0, 0); | |
3193 | if (!NFSTONMP(np)) { | |
3194 | lck_mtx_unlock(nfs_buf_mutex); | |
3195 | error = ENXIO; | |
3196 | goto done; | |
3197 | } | |
3198 | ||
3199 | /* Start/do any write(s) that are required. */ | |
3200 | if (!nfs_buf_iterprepare(np, &blist, NBI_DIRTY)) { | |
3201 | while ((bp = LIST_FIRST(&blist))) { | |
3202 | LIST_REMOVE(bp, nb_vnbufs); | |
3203 | LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs); | |
b0d623f7 | 3204 | flags = (passone || !(waitfor == MNT_WAIT || waitfor == MNT_DWAIT)) ? NBAC_NOWAIT : 0; |
2d21ac55 A |
3205 | if (flags != NBAC_NOWAIT) |
3206 | nfs_buf_refget(bp); | |
3207 | while ((error = nfs_buf_acquire(bp, flags, slpflag, slptimeo))) { | |
3208 | FSDBG(524, bp, flags, bp->nb_lflags, bp->nb_flags); | |
3209 | if (error == EBUSY) | |
3210 | break; | |
3211 | if (error) { | |
3212 | error2 = nfs_sigintr(NFSTONMP(np), NULL, thd, 0); | |
3213 | if (error2) { | |
3214 | if (flags != NBAC_NOWAIT) | |
3215 | nfs_buf_refrele(bp); | |
3216 | nfs_buf_itercomplete(np, &blist, NBI_DIRTY); | |
3217 | lck_mtx_unlock(nfs_buf_mutex); | |
3218 | error = error2; | |
3219 | goto done; | |
3220 | } | |
3221 | if (slpflag == PCATCH) { | |
3222 | slpflag = 0; | |
3223 | slptimeo = 2 * hz; | |
3224 | } | |
3225 | } | |
3226 | } | |
3227 | if (flags != NBAC_NOWAIT) | |
3228 | nfs_buf_refrele(bp); | |
3229 | if (error == EBUSY) | |
3230 | continue; | |
3231 | if (!bp->nb_np) { | |
3232 | /* buffer is no longer valid */ | |
3233 | nfs_buf_drop(bp); | |
3234 | continue; | |
3235 | } | |
3236 | if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) | |
3237 | nfs_buf_check_write_verifier(np, bp); | |
3238 | if (!ISSET(bp->nb_flags, NB_DELWRI)) { | |
3239 | /* buffer is no longer dirty */ | |
3240 | nfs_buf_drop(bp); | |
3241 | continue; | |
3242 | } | |
3243 | FSDBG(525, bp, passone, bp->nb_lflags, bp->nb_flags); | |
b0d623f7 | 3244 | if ((passone || !(waitfor == MNT_WAIT || waitfor == MNT_DWAIT)) && |
2d21ac55 A |
3245 | ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { |
3246 | nfs_buf_drop(bp); | |
3247 | continue; | |
3248 | } | |
3249 | nfs_buf_remfree(bp); | |
3250 | lck_mtx_unlock(nfs_buf_mutex); | |
3251 | if (ISSET(bp->nb_flags, NB_ERROR)) { | |
b0d623f7 | 3252 | nfs_node_lock_force(np); |
2d21ac55 A |
3253 | np->n_error = bp->nb_error ? bp->nb_error : EIO; |
3254 | np->n_flag |= NWRITEERR; | |
b0d623f7 | 3255 | nfs_node_unlock(np); |
2d21ac55 A |
3256 | nfs_buf_release(bp, 1); |
3257 | lck_mtx_lock(nfs_buf_mutex); | |
3258 | continue; | |
3259 | } | |
3260 | SET(bp->nb_flags, NB_ASYNC); | |
3261 | if (!passone) { | |
3262 | /* NB_STABLE forces this to be written FILESYNC */ | |
3263 | SET(bp->nb_flags, NB_STABLE); | |
3264 | } | |
3265 | nfs_buf_write(bp); | |
3266 | lck_mtx_lock(nfs_buf_mutex); | |
3267 | } | |
3268 | nfs_buf_itercomplete(np, &blist, NBI_DIRTY); | |
3269 | } | |
3270 | lck_mtx_unlock(nfs_buf_mutex); | |
3271 | ||
b0d623f7 | 3272 | if (waitfor == MNT_WAIT || waitfor == MNT_DWAIT) { |
2d21ac55 A |
3273 | while ((error = vnode_waitforwrites(NFSTOV(np), 0, slpflag, slptimeo, "nfsflush"))) { |
3274 | error2 = nfs_sigintr(NFSTONMP(np), NULL, thd, 0); | |
3275 | if (error2) { | |
3276 | error = error2; | |
3277 | goto done; | |
3278 | } | |
3279 | if (slpflag == PCATCH) { | |
3280 | slpflag = 0; | |
3281 | slptimeo = 2 * hz; | |
55e303ae | 3282 | } |
2d21ac55 A |
3283 | } |
3284 | } | |
55e303ae | 3285 | |
2d21ac55 A |
3286 | if (nfsvers != NFS_VER2) { |
3287 | /* loop while it looks like there are still buffers to be */ | |
3288 | /* commited and nfs_flushcommits() seems to be handling them. */ | |
3289 | while (np->n_needcommitcnt) | |
3290 | if (nfs_flushcommits(np, 0)) | |
3291 | break; | |
3292 | } | |
55e303ae | 3293 | |
2d21ac55 A |
3294 | if (passone) { |
3295 | passone = 0; | |
3296 | if (!LIST_EMPTY(&np->n_dirtyblkhd)) { | |
b0d623f7 | 3297 | nfs_node_lock_force(np); |
2d21ac55 | 3298 | np->n_flag |= NMODIFIED; |
b0d623f7 | 3299 | nfs_node_unlock(np); |
2d21ac55 A |
3300 | } |
3301 | lck_mtx_lock(nfs_buf_mutex); | |
3302 | goto again; | |
3303 | } | |
55e303ae | 3304 | |
b0d623f7 | 3305 | if (waitfor == MNT_WAIT || waitfor == MNT_DWAIT) { |
2d21ac55 | 3306 | if (!LIST_EMPTY(&np->n_dirtyblkhd)) { |
b0d623f7 | 3307 | nfs_node_lock_force(np); |
2d21ac55 | 3308 | np->n_flag |= NMODIFIED; |
b0d623f7 | 3309 | nfs_node_unlock(np); |
2d21ac55 A |
3310 | } |
3311 | lck_mtx_lock(nfs_buf_mutex); | |
3312 | if (!LIST_EMPTY(&np->n_dirtyblkhd)) | |
3313 | goto again; | |
3314 | lck_mtx_unlock(nfs_buf_mutex); | |
b0d623f7 A |
3315 | nfs_node_lock_force(np); |
3316 | /* | |
3317 | * OK, it looks like there are no dirty blocks. If we have no | |
3318 | * writes in flight and no one in the write code, we can clear | |
3319 | * the modified flag. In order to make sure we see the latest | |
3320 | * attributes and size, we also invalidate the attributes and | |
3321 | * advance the attribute cache XID to guarantee that attributes | |
3322 | * newer than our clearing of NMODIFIED will get loaded next. | |
3323 | * (If we don't do this, it's possible for the flush's final | |
3324 | * write/commit (xid1) to be executed in parallel with a subsequent | |
3325 | * getattr request (xid2). The getattr could return attributes | |
3326 | * from *before* the write/commit completed but the stale attributes | |
3327 | * would be preferred because of the xid ordering.) | |
3328 | */ | |
3329 | if (!np->n_wrbusy && !np->n_numoutput) { | |
2d21ac55 | 3330 | np->n_flag &= ~NMODIFIED; |
b0d623f7 A |
3331 | NATTRINVALIDATE(np); |
3332 | nfs_get_xid(&np->n_xid); | |
3333 | } | |
2d21ac55 | 3334 | } else { |
b0d623f7 | 3335 | nfs_node_lock_force(np); |
0c530ab8 A |
3336 | } |
3337 | ||
2d21ac55 A |
3338 | FSDBG(526, np->n_flag, np->n_error, 0, 0); |
3339 | if (!ignore_writeerr && (np->n_flag & NWRITEERR)) { | |
3340 | error = np->n_error; | |
3341 | np->n_flag &= ~NWRITEERR; | |
3342 | } | |
b0d623f7 | 3343 | nfs_node_unlock(np); |
2d21ac55 A |
3344 | done: |
3345 | lck_mtx_lock(nfs_buf_mutex); | |
3346 | flags = np->n_bflag; | |
3347 | np->n_bflag &= ~(NBFLUSHINPROG|NBFLUSHWANT); | |
3348 | lck_mtx_unlock(nfs_buf_mutex); | |
3349 | if (flags & NBFLUSHWANT) | |
3350 | wakeup(&np->n_bflag); | |
3351 | out: | |
3352 | FSDBG_BOT(517, np, error, ignore_writeerr, 0); | |
0c530ab8 | 3353 | return (error); |
1c79356b A |
3354 | } |
3355 | ||
1c79356b | 3356 | /* |
55e303ae A |
3357 | * Flush out and invalidate all buffers associated with a vnode. |
3358 | * Called with the underlying object locked. | |
1c79356b | 3359 | */ |
b0d623f7 | 3360 | int |
91447636 | 3361 | nfs_vinvalbuf_internal( |
2d21ac55 | 3362 | nfsnode_t np, |
91447636 | 3363 | int flags, |
2d21ac55 | 3364 | thread_t thd, |
91447636 | 3365 | kauth_cred_t cred, |
91447636 A |
3366 | int slpflag, |
3367 | int slptimeo) | |
1c79356b | 3368 | { |
55e303ae | 3369 | struct nfsbuf *bp; |
91447636 A |
3370 | struct nfsbuflists blist; |
3371 | int list, error = 0; | |
9bccf70c | 3372 | |
55e303ae | 3373 | if (flags & V_SAVE) { |
2d21ac55 | 3374 | if ((error = nfs_flush(np, MNT_WAIT, thd, (flags & V_IGNORE_WRITEERR)))) |
55e303ae | 3375 | return (error); |
9bccf70c A |
3376 | } |
3377 | ||
91447636 | 3378 | lck_mtx_lock(nfs_buf_mutex); |
55e303ae | 3379 | for (;;) { |
91447636 A |
3380 | list = NBI_CLEAN; |
3381 | if (nfs_buf_iterprepare(np, &blist, list)) { | |
3382 | list = NBI_DIRTY; | |
3383 | if (nfs_buf_iterprepare(np, &blist, list)) | |
3384 | break; | |
3385 | } | |
3386 | while ((bp = LIST_FIRST(&blist))) { | |
3387 | LIST_REMOVE(bp, nb_vnbufs); | |
3388 | if (list == NBI_CLEAN) | |
3389 | LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs); | |
3390 | else | |
3391 | LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs); | |
3392 | nfs_buf_refget(bp); | |
3393 | while ((error = nfs_buf_acquire(bp, NBAC_REMOVE, slpflag, slptimeo))) { | |
2d21ac55 | 3394 | FSDBG(556, np, bp, NBOFF(bp), bp->nb_flags); |
91447636 | 3395 | if (error != EAGAIN) { |
2d21ac55 | 3396 | FSDBG(554, np, bp, -1, error); |
91447636 A |
3397 | nfs_buf_refrele(bp); |
3398 | nfs_buf_itercomplete(np, &blist, list); | |
3399 | lck_mtx_unlock(nfs_buf_mutex); | |
55e303ae A |
3400 | return (error); |
3401 | } | |
55e303ae | 3402 | } |
91447636 | 3403 | nfs_buf_refrele(bp); |
2d21ac55 | 3404 | FSDBG(554, np, bp, NBOFF(bp), bp->nb_flags); |
91447636 | 3405 | lck_mtx_unlock(nfs_buf_mutex); |
2d21ac55 | 3406 | if ((flags & V_SAVE) && UBCINFOEXISTS(NFSTOV(np)) && bp->nb_np && |
91447636 | 3407 | (NBOFF(bp) < (off_t)np->n_size)) { |
2d21ac55 | 3408 | /* extra paranoia: make sure we're not */ |
55e303ae A |
3409 | /* somehow leaving any dirty data around */ |
3410 | int mustwrite = 0; | |
91447636 A |
3411 | int end = (NBOFF(bp) + bp->nb_bufsize > (off_t)np->n_size) ? |
3412 | ((off_t)np->n_size - NBOFF(bp)) : bp->nb_bufsize; | |
55e303ae A |
3413 | if (!ISSET(bp->nb_flags, NB_PAGELIST)) { |
3414 | error = nfs_buf_upl_setup(bp); | |
3415 | if (error == EINVAL) { | |
3416 | /* vm object must no longer exist */ | |
3417 | /* hopefully we don't need to do */ | |
3418 | /* anything for this buffer */ | |
3419 | } else if (error) | |
91447636 | 3420 | printf("nfs_vinvalbuf: upl setup failed %d\n", error); |
55e303ae A |
3421 | bp->nb_valid = bp->nb_dirty = 0; |
3422 | } | |
3423 | nfs_buf_upl_check(bp); | |
3424 | /* check for any dirty data before the EOF */ | |
2d21ac55 | 3425 | if ((bp->nb_dirtyend > 0) && (bp->nb_dirtyoff < end)) { |
55e303ae | 3426 | /* clip dirty range to EOF */ |
2d21ac55 | 3427 | if (bp->nb_dirtyend > end) { |
55e303ae | 3428 | bp->nb_dirtyend = end; |
2d21ac55 A |
3429 | if (bp->nb_dirtyoff >= bp->nb_dirtyend) |
3430 | bp->nb_dirtyoff = bp->nb_dirtyend = 0; | |
3431 | } | |
3432 | if ((bp->nb_dirtyend > 0) && (bp->nb_dirtyoff < end)) | |
3433 | mustwrite++; | |
55e303ae A |
3434 | } |
3435 | bp->nb_dirty &= (1 << (round_page_32(end)/PAGE_SIZE)) - 1; | |
2d21ac55 A |
3436 | if (bp->nb_dirty) |
3437 | mustwrite++; | |
91447636 | 3438 | /* also make sure we'll have a credential to do the write */ |
0c530ab8 | 3439 | if (mustwrite && !IS_VALID_CRED(bp->nb_wcred) && !IS_VALID_CRED(cred)) { |
91447636 A |
3440 | printf("nfs_vinvalbuf: found dirty buffer with no write creds\n"); |
3441 | mustwrite = 0; | |
3442 | } | |
55e303ae | 3443 | if (mustwrite) { |
2d21ac55 | 3444 | FSDBG(554, np, bp, 0xd00dee, bp->nb_flags); |
55e303ae A |
3445 | if (!ISSET(bp->nb_flags, NB_PAGELIST)) |
3446 | panic("nfs_vinvalbuf: dirty buffer without upl"); | |
3447 | /* gotta write out dirty data before invalidating */ | |
3448 | /* (NB_STABLE indicates that data writes should be FILESYNC) */ | |
3449 | /* (NB_NOCACHE indicates buffer should be discarded) */ | |
3450 | CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL | NB_ASYNC)); | |
3451 | SET(bp->nb_flags, NB_STABLE | NB_NOCACHE); | |
0c530ab8 | 3452 | if (!IS_VALID_CRED(bp->nb_wcred)) { |
91447636 A |
3453 | kauth_cred_ref(cred); |
3454 | bp->nb_wcred = cred; | |
3455 | } | |
55e303ae A |
3456 | error = nfs_buf_write(bp); |
3457 | // Note: bp has been released | |
3458 | if (error) { | |
3459 | FSDBG(554, bp, 0xd00dee, 0xbad, error); | |
b0d623f7 | 3460 | nfs_node_lock_force(np); |
55e303ae A |
3461 | np->n_error = error; |
3462 | np->n_flag |= NWRITEERR; | |
91447636 A |
3463 | /* |
3464 | * There was a write error and we need to | |
3465 | * invalidate attrs to sync with server. | |
3466 | * (if this write was extending the file, | |
3467 | * we may no longer know the correct size) | |
3468 | */ | |
3469 | NATTRINVALIDATE(np); | |
b0d623f7 A |
3470 | nfs_node_unlock(np); |
3471 | if (error == EINTR) { | |
3472 | /* | |
3473 | * Abort on EINTR. If we don't, we could | |
3474 | * be stuck in this loop forever because | |
3475 | * the buffer will continue to stay dirty. | |
3476 | */ | |
3477 | lck_mtx_lock(nfs_buf_mutex); | |
3478 | nfs_buf_itercomplete(np, &blist, list); | |
3479 | lck_mtx_unlock(nfs_buf_mutex); | |
3480 | return (error); | |
3481 | } | |
55e303ae A |
3482 | error = 0; |
3483 | } | |
91447636 A |
3484 | lck_mtx_lock(nfs_buf_mutex); |
3485 | continue; | |
55e303ae A |
3486 | } |
3487 | } | |
3488 | SET(bp->nb_flags, NB_INVAL); | |
91447636 | 3489 | // hold off on FREEUPs until we're done here |
483a1d10 | 3490 | nfs_buf_release(bp, 0); |
91447636 | 3491 | lck_mtx_lock(nfs_buf_mutex); |
55e303ae | 3492 | } |
91447636 | 3493 | nfs_buf_itercomplete(np, &blist, list); |
55e303ae | 3494 | } |
2d21ac55 A |
3495 | if (!LIST_EMPTY(&(np)->n_dirtyblkhd) || !LIST_EMPTY(&(np)->n_cleanblkhd)) |
3496 | panic("nfs_vinvalbuf: flush/inval failed"); | |
91447636 | 3497 | lck_mtx_unlock(nfs_buf_mutex); |
b0d623f7 A |
3498 | nfs_node_lock_force(np); |
3499 | if (!(flags & V_SAVE)) | |
2d21ac55 | 3500 | np->n_flag &= ~NMODIFIED; |
b0d623f7 A |
3501 | if (vnode_vtype(NFSTOV(np)) == VREG) |
3502 | np->n_lastrahead = -1; | |
3503 | nfs_node_unlock(np); | |
483a1d10 | 3504 | NFS_BUF_FREEUP(); |
55e303ae | 3505 | return (0); |
1c79356b A |
3506 | } |
3507 | ||
55e303ae | 3508 | |
1c79356b A |
3509 | /* |
3510 | * Flush and invalidate all dirty buffers. If another process is already | |
3511 | * doing the flush, just wait for completion. | |
3512 | */ | |
3513 | int | |
2d21ac55 A |
3514 | nfs_vinvalbuf(vnode_t vp, int flags, vfs_context_t ctx, int intrflg) |
3515 | { | |
3516 | return nfs_vinvalbuf2(vp, flags, vfs_context_thread(ctx), vfs_context_ucred(ctx), intrflg); | |
3517 | } | |
3518 | ||
3519 | int | |
3520 | nfs_vinvalbuf2(vnode_t vp, int flags, thread_t thd, kauth_cred_t cred, int intrflg) | |
1c79356b | 3521 | { |
2d21ac55 A |
3522 | nfsnode_t np = VTONFS(vp); |
3523 | struct nfsmount *nmp = VTONMP(vp); | |
3524 | int error, rv, slpflag, slptimeo, nflags; | |
91447636 | 3525 | off_t size; |
1c79356b | 3526 | |
2d21ac55 | 3527 | FSDBG_TOP(554, np, flags, intrflg, 0); |
55e303ae | 3528 | |
2d21ac55 | 3529 | if (nmp && !(nmp->nm_flag & NFSMNT_INT)) |
1c79356b A |
3530 | intrflg = 0; |
3531 | if (intrflg) { | |
3532 | slpflag = PCATCH; | |
3533 | slptimeo = 2 * hz; | |
3534 | } else { | |
3535 | slpflag = 0; | |
3536 | slptimeo = 0; | |
3537 | } | |
1c79356b | 3538 | |
2d21ac55 A |
3539 | /* First wait for any other process doing a flush to complete. */ |
3540 | lck_mtx_lock(nfs_buf_mutex); | |
3541 | while (np->n_bflag & NBINVALINPROG) { | |
3542 | np->n_bflag |= NBINVALWANT; | |
b0d623f7 A |
3543 | msleep(&np->n_bflag, nfs_buf_mutex, slpflag, "nfs_vinvalbuf", NULL); |
3544 | if ((error = nfs_sigintr(VTONMP(vp), NULL, thd, 0))) { | |
2d21ac55 | 3545 | lck_mtx_unlock(nfs_buf_mutex); |
55e303ae | 3546 | return (error); |
1c79356b | 3547 | } |
1c79356b | 3548 | } |
2d21ac55 A |
3549 | np->n_bflag |= NBINVALINPROG; |
3550 | lck_mtx_unlock(nfs_buf_mutex); | |
3551 | ||
3552 | /* Now, flush as required. */ | |
3553 | error = nfs_vinvalbuf_internal(np, flags, thd, cred, slpflag, 0); | |
3554 | while (error) { | |
3555 | FSDBG(554, np, 0, 0, error); | |
3556 | if ((error = nfs_sigintr(VTONMP(vp), NULL, thd, 0))) | |
3557 | goto done; | |
3558 | error = nfs_vinvalbuf_internal(np, flags, thd, cred, 0, slptimeo); | |
1c79356b | 3559 | } |
2d21ac55 A |
3560 | |
3561 | /* get the pages out of vm also */ | |
3562 | if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp))) | |
3563 | if (!(rv = ubc_sync_range(vp, 0, size, UBC_PUSHALL | UBC_SYNC | UBC_INVALIDATE))) | |
91447636 | 3564 | panic("nfs_vinvalbuf(): ubc_sync_range failed!"); |
2d21ac55 A |
3565 | done: |
3566 | lck_mtx_lock(nfs_buf_mutex); | |
3567 | nflags = np->n_bflag; | |
3568 | np->n_bflag &= ~(NBINVALINPROG|NBINVALWANT); | |
3569 | lck_mtx_unlock(nfs_buf_mutex); | |
3570 | if (nflags & NBINVALWANT) | |
3571 | wakeup(&np->n_bflag); | |
91447636 | 3572 | |
2d21ac55 A |
3573 | FSDBG_BOT(554, np, flags, intrflg, error); |
3574 | return (error); | |
1c79356b A |
3575 | } |
3576 | ||
3577 | /* | |
2d21ac55 A |
3578 | * Add an async I/O request to the mount's async I/O queue and make |
3579 | * sure that an nfsiod will service it. | |
1c79356b | 3580 | */ |
2d21ac55 A |
3581 | void |
3582 | nfs_asyncio_finish(struct nfsreq *req) | |
1c79356b A |
3583 | { |
3584 | struct nfsmount *nmp; | |
2d21ac55 A |
3585 | struct nfsiod *niod; |
3586 | int started = 0; | |
1c79356b | 3587 | |
2d21ac55 | 3588 | FSDBG_TOP(552, nmp, 0, 0, 0); |
1c79356b | 3589 | again: |
2d21ac55 A |
3590 | if (((nmp = req->r_nmp)) == NULL) |
3591 | return; | |
3592 | lck_mtx_lock(nfsiod_mutex); | |
3593 | niod = nmp->nm_niod; | |
3594 | ||
3595 | /* grab an nfsiod if we don't have one already */ | |
3596 | if (!niod) { | |
3597 | niod = TAILQ_FIRST(&nfsiodfree); | |
3598 | if (niod) { | |
3599 | TAILQ_REMOVE(&nfsiodfree, niod, niod_link); | |
3600 | TAILQ_INSERT_TAIL(&nfsiodwork, niod, niod_link); | |
3601 | niod->niod_nmp = nmp; | |
3602 | } else if (((nfsiod_thread_count < NFSIOD_MAX) || (nfsiod_thread_count <= 0)) && (started < 4)) { | |
1c79356b | 3603 | /* |
2d21ac55 A |
3604 | * Try starting a new thread. |
3605 | * We may try a couple times if other callers | |
3606 | * get the new threads before we do. | |
1c79356b | 3607 | */ |
2d21ac55 A |
3608 | lck_mtx_unlock(nfsiod_mutex); |
3609 | started++; | |
3610 | if (!nfsiod_start()) | |
3611 | goto again; | |
3612 | lck_mtx_lock(nfsiod_mutex); | |
1c79356b | 3613 | } |
91447636 | 3614 | } |
55e303ae | 3615 | |
2d21ac55 A |
3616 | if (req->r_achain.tqe_next == NFSREQNOLIST) |
3617 | TAILQ_INSERT_TAIL(&nmp->nm_iodq, req, r_achain); | |
3618 | ||
3619 | /* If this mount doesn't already have an nfsiod working on it... */ | |
3620 | if (!nmp->nm_niod) { | |
3621 | if (niod) { /* give it the nfsiod we just grabbed */ | |
3622 | nmp->nm_niod = niod; | |
3623 | lck_mtx_unlock(nfsiod_mutex); | |
3624 | wakeup(niod); | |
3625 | } else if (nfsiod_thread_count > 0) { | |
3626 | /* just queue it up on nfsiod mounts queue */ | |
3627 | TAILQ_INSERT_TAIL(&nfsiodmounts, nmp, nm_iodlink); | |
3628 | lck_mtx_unlock(nfsiod_mutex); | |
3629 | } else { | |
3630 | printf("nfs_asyncio(): no nfsiods? %d %d (%d)\n", nfsiod_thread_count, NFSIOD_MAX, started); | |
3631 | lck_mtx_unlock(nfsiod_mutex); | |
3632 | /* we have no other option but to be persistent */ | |
3633 | started = 0; | |
3634 | goto again; | |
1c79356b | 3635 | } |
2d21ac55 A |
3636 | } else { |
3637 | lck_mtx_unlock(nfsiod_mutex); | |
1c79356b A |
3638 | } |
3639 | ||
2d21ac55 A |
3640 | FSDBG_BOT(552, nmp, 0, 0, 0); |
3641 | } | |
1c79356b | 3642 | |
2d21ac55 A |
3643 | /* |
3644 | * queue up async I/O request for resend | |
3645 | */ | |
3646 | void | |
3647 | nfs_asyncio_resend(struct nfsreq *req) | |
3648 | { | |
3649 | struct nfsmount *nmp = req->r_nmp; | |
1c79356b | 3650 | |
2d21ac55 A |
3651 | if (!nmp) |
3652 | return; | |
3653 | nfs_gss_clnt_rpcdone(req); | |
3654 | lck_mtx_lock(&nmp->nm_lock); | |
b0d623f7 | 3655 | if (!(req->r_flags & R_RESENDQ)) { |
2d21ac55 A |
3656 | TAILQ_INSERT_TAIL(&nmp->nm_resendq, req, r_rchain); |
3657 | req->r_flags |= R_RESENDQ; | |
1c79356b | 3658 | } |
2d21ac55 A |
3659 | nfs_mount_sock_thread_wake(nmp); |
3660 | lck_mtx_unlock(&nmp->nm_lock); | |
1c79356b A |
3661 | } |
3662 | ||
3663 | /* | |
b0d623f7 A |
3664 | * Read directory data into a buffer. |
3665 | * | |
3666 | * Buffer will be filled (unless EOF is hit). | |
3667 | * Buffers after this one may also be completely/partially filled. | |
1c79356b A |
3668 | */ |
3669 | int | |
2d21ac55 | 3670 | nfs_buf_readdir(struct nfsbuf *bp, vfs_context_t ctx) |
1c79356b | 3671 | { |
b0d623f7 A |
3672 | nfsnode_t np = bp->nb_np; |
3673 | struct nfsmount *nmp = NFSTONMP(np); | |
3674 | int error = 0; | |
1c79356b | 3675 | |
b0d623f7 A |
3676 | if (!nmp) |
3677 | return (ENXIO); | |
55e303ae | 3678 | |
b0d623f7 A |
3679 | if (nmp->nm_vers < NFS_VER4) |
3680 | error = nfs3_readdir_rpc(np, bp, ctx); | |
3681 | else | |
3682 | error = nfs4_readdir_rpc(np, bp, ctx); | |
3683 | ||
3684 | if (error && (error != NFSERR_DIRBUFDROPPED)) { | |
55e303ae A |
3685 | SET(bp->nb_flags, NB_ERROR); |
3686 | bp->nb_error = error; | |
1c79356b | 3687 | } |
1c79356b A |
3688 | return (error); |
3689 | } |