]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/nfs/nfs_vnops.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / bsd / nfs / nfs_vnops.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
65 * FreeBSD-Id: nfs_vnops.c,v 1.72 1997/11/07 09:20:48 phk Exp $
66 */
67
68#include <nfs/nfs_conf.h>
69#if CONFIG_NFS_CLIENT
70
71/*
72 * vnode op calls for Sun NFS version 2 and 3
73 */
74#include <sys/param.h>
75#include <sys/kernel.h>
76#include <sys/systm.h>
77#include <sys/resourcevar.h>
78#include <sys/proc_internal.h>
79#include <sys/kauth.h>
80#include <sys/mount_internal.h>
81#include <sys/malloc.h>
82#include <sys/kpi_mbuf.h>
83#include <sys/conf.h>
84#include <sys/vnode_internal.h>
85#include <sys/dirent.h>
86#include <sys/fcntl.h>
87#include <sys/lockf.h>
88#include <sys/ubc_internal.h>
89#include <sys/attr.h>
90#include <sys/signalvar.h>
91#include <sys/uio_internal.h>
92#include <sys/xattr.h>
93
94#include <vfs/vfs_support.h>
95
96#include <sys/vm.h>
97
98#include <sys/time.h>
99#include <kern/clock.h>
100#include <libkern/OSAtomic.h>
101
102#include <miscfs/fifofs/fifo.h>
103#include <miscfs/specfs/specdev.h>
104
105#include <nfs/rpcv2.h>
106#include <nfs/nfsproto.h>
107#include <nfs/nfs.h>
108#include <nfs/nfsnode.h>
109#include <nfs/nfs_gss.h>
110#include <nfs/nfsmount.h>
111#include <nfs/nfs_lock.h>
112#include <nfs/xdr_subs.h>
113#include <nfs/nfsm_subs.h>
114
115#include <net/if.h>
116#include <netinet/in.h>
117#include <netinet/in_var.h>
118
119#include <vm/vm_kern.h>
120#include <vm/vm_pageout.h>
121
122#include <kern/task.h>
123#include <kern/sched_prim.h>
124
125#define NFS_VNOP_DBG(...) NFS_DBG(NFS_FAC_VNOP, 7, ## __VA_ARGS__)
126#define DEFAULT_READLINK_NOCACHE 0
127
128/*
129 * NFS vnode ops
130 */
131int nfs_vnop_lookup(struct vnop_lookup_args *);
132int nfsspec_vnop_read(struct vnop_read_args *);
133int nfsspec_vnop_write(struct vnop_write_args *);
134int nfsspec_vnop_close(struct vnop_close_args *);
135#if FIFO
136int nfsfifo_vnop_read(struct vnop_read_args *);
137int nfsfifo_vnop_write(struct vnop_write_args *);
138int nfsfifo_vnop_close(struct vnop_close_args *);
139#endif
140int nfs_vnop_ioctl(struct vnop_ioctl_args *);
141int nfs_vnop_select(struct vnop_select_args *);
142int nfs_vnop_setattr(struct vnop_setattr_args *);
143int nfs_vnop_fsync(struct vnop_fsync_args *);
144int nfs_vnop_rename(struct vnop_rename_args *);
145int nfs_vnop_readdir(struct vnop_readdir_args *);
146int nfs_vnop_readlink(struct vnop_readlink_args *);
147int nfs_vnop_pathconf(struct vnop_pathconf_args *);
148int nfs_vnop_pagein(struct vnop_pagein_args *);
149int nfs_vnop_pageout(struct vnop_pageout_args *);
150int nfs_vnop_blktooff(struct vnop_blktooff_args *);
151int nfs_vnop_offtoblk(struct vnop_offtoblk_args *);
152int nfs_vnop_blockmap(struct vnop_blockmap_args *);
153int nfs_vnop_monitor(struct vnop_monitor_args *);
154
155int nfs3_vnop_create(struct vnop_create_args *);
156int nfs3_vnop_mknod(struct vnop_mknod_args *);
157int nfs3_vnop_getattr(struct vnop_getattr_args *);
158int nfs3_vnop_link(struct vnop_link_args *);
159int nfs3_vnop_mkdir(struct vnop_mkdir_args *);
160int nfs3_vnop_rmdir(struct vnop_rmdir_args *);
161int nfs3_vnop_symlink(struct vnop_symlink_args *);
162
163
164vnop_t **nfsv2_vnodeop_p;
165static const struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
166 { .opve_op = &vnop_default_desc, .opve_impl = (vnop_t *)vn_default_error },
167 { .opve_op = &vnop_lookup_desc, .opve_impl = (vnop_t *)nfs_vnop_lookup }, /* lookup */
168 { .opve_op = &vnop_create_desc, .opve_impl = (vnop_t *)nfs3_vnop_create }, /* create */
169 { .opve_op = &vnop_mknod_desc, .opve_impl = (vnop_t *)nfs3_vnop_mknod }, /* mknod */
170 { .opve_op = &vnop_open_desc, .opve_impl = (vnop_t *)nfs_vnop_open }, /* open */
171 { .opve_op = &vnop_close_desc, .opve_impl = (vnop_t *)nfs_vnop_close }, /* close */
172 { .opve_op = &vnop_access_desc, .opve_impl = (vnop_t *)nfs_vnop_access }, /* access */
173 { .opve_op = &vnop_getattr_desc, .opve_impl = (vnop_t *)nfs3_vnop_getattr }, /* getattr */
174 { .opve_op = &vnop_setattr_desc, .opve_impl = (vnop_t *)nfs_vnop_setattr }, /* setattr */
175 { .opve_op = &vnop_read_desc, .opve_impl = (vnop_t *)nfs_vnop_read }, /* read */
176 { .opve_op = &vnop_write_desc, .opve_impl = (vnop_t *)nfs_vnop_write }, /* write */
177 { .opve_op = &vnop_ioctl_desc, .opve_impl = (vnop_t *)nfs_vnop_ioctl }, /* ioctl */
178 { .opve_op = &vnop_select_desc, .opve_impl = (vnop_t *)nfs_vnop_select }, /* select */
179 { .opve_op = &vnop_revoke_desc, .opve_impl = (vnop_t *)nfs_vnop_revoke }, /* revoke */
180 { .opve_op = &vnop_mmap_desc, .opve_impl = (vnop_t *)nfs_vnop_mmap }, /* mmap */
181 { .opve_op = &vnop_mnomap_desc, .opve_impl = (vnop_t *)nfs_vnop_mnomap }, /* mnomap */
182 { .opve_op = &vnop_fsync_desc, .opve_impl = (vnop_t *)nfs_vnop_fsync }, /* fsync */
183 { .opve_op = &vnop_remove_desc, .opve_impl = (vnop_t *)nfs_vnop_remove }, /* remove */
184 { .opve_op = &vnop_link_desc, .opve_impl = (vnop_t *)nfs3_vnop_link }, /* link */
185 { .opve_op = &vnop_rename_desc, .opve_impl = (vnop_t *)nfs_vnop_rename }, /* rename */
186 { .opve_op = &vnop_mkdir_desc, .opve_impl = (vnop_t *)nfs3_vnop_mkdir }, /* mkdir */
187 { .opve_op = &vnop_rmdir_desc, .opve_impl = (vnop_t *)nfs3_vnop_rmdir }, /* rmdir */
188 { .opve_op = &vnop_symlink_desc, .opve_impl = (vnop_t *)nfs3_vnop_symlink }, /* symlink */
189 { .opve_op = &vnop_readdir_desc, .opve_impl = (vnop_t *)nfs_vnop_readdir }, /* readdir */
190 { .opve_op = &vnop_readlink_desc, .opve_impl = (vnop_t *)nfs_vnop_readlink }, /* readlink */
191 { .opve_op = &vnop_inactive_desc, .opve_impl = (vnop_t *)nfs_vnop_inactive }, /* inactive */
192 { .opve_op = &vnop_reclaim_desc, .opve_impl = (vnop_t *)nfs_vnop_reclaim }, /* reclaim */
193 { .opve_op = &vnop_strategy_desc, .opve_impl = (vnop_t *)err_strategy }, /* strategy */
194 { .opve_op = &vnop_pathconf_desc, .opve_impl = (vnop_t *)nfs_vnop_pathconf }, /* pathconf */
195 { .opve_op = &vnop_advlock_desc, .opve_impl = (vnop_t *)nfs_vnop_advlock }, /* advlock */
196 { .opve_op = &vnop_bwrite_desc, .opve_impl = (vnop_t *)err_bwrite }, /* bwrite */
197 { .opve_op = &vnop_pagein_desc, .opve_impl = (vnop_t *)nfs_vnop_pagein }, /* Pagein */
198 { .opve_op = &vnop_pageout_desc, .opve_impl = (vnop_t *)nfs_vnop_pageout }, /* Pageout */
199 { .opve_op = &vnop_copyfile_desc, .opve_impl = (vnop_t *)err_copyfile }, /* Copyfile */
200 { .opve_op = &vnop_blktooff_desc, .opve_impl = (vnop_t *)nfs_vnop_blktooff }, /* blktooff */
201 { .opve_op = &vnop_offtoblk_desc, .opve_impl = (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */
202 { .opve_op = &vnop_blockmap_desc, .opve_impl = (vnop_t *)nfs_vnop_blockmap }, /* blockmap */
203 { .opve_op = &vnop_monitor_desc, .opve_impl = (vnop_t *)nfs_vnop_monitor }, /* monitor */
204 { .opve_op = NULL, .opve_impl = NULL }
205};
206const struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
207{ &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
208
209
210#if CONFIG_NFS4
211vnop_t **nfsv4_vnodeop_p;
212static const struct vnodeopv_entry_desc nfsv4_vnodeop_entries[] = {
213 { &vnop_default_desc, (vnop_t *)vn_default_error },
214 { &vnop_lookup_desc, (vnop_t *)nfs_vnop_lookup }, /* lookup */
215 { &vnop_create_desc, (vnop_t *)nfs4_vnop_create }, /* create */
216 { &vnop_mknod_desc, (vnop_t *)nfs4_vnop_mknod }, /* mknod */
217 { &vnop_open_desc, (vnop_t *)nfs_vnop_open }, /* open */
218 { &vnop_close_desc, (vnop_t *)nfs_vnop_close }, /* close */
219 { &vnop_access_desc, (vnop_t *)nfs_vnop_access }, /* access */
220 { &vnop_getattr_desc, (vnop_t *)nfs4_vnop_getattr }, /* getattr */
221 { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */
222 { &vnop_read_desc, (vnop_t *)nfs_vnop_read }, /* read */
223 { &vnop_write_desc, (vnop_t *)nfs_vnop_write }, /* write */
224 { &vnop_ioctl_desc, (vnop_t *)nfs_vnop_ioctl }, /* ioctl */
225 { &vnop_select_desc, (vnop_t *)nfs_vnop_select }, /* select */
226 { &vnop_revoke_desc, (vnop_t *)nfs_vnop_revoke }, /* revoke */
227 { &vnop_mmap_desc, (vnop_t *)nfs_vnop_mmap }, /* mmap */
228 { &vnop_mnomap_desc, (vnop_t *)nfs_vnop_mnomap }, /* mnomap */
229 { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */
230 { &vnop_remove_desc, (vnop_t *)nfs_vnop_remove }, /* remove */
231 { &vnop_link_desc, (vnop_t *)nfs4_vnop_link }, /* link */
232 { &vnop_rename_desc, (vnop_t *)nfs_vnop_rename }, /* rename */
233 { &vnop_mkdir_desc, (vnop_t *)nfs4_vnop_mkdir }, /* mkdir */
234 { &vnop_rmdir_desc, (vnop_t *)nfs4_vnop_rmdir }, /* rmdir */
235 { &vnop_symlink_desc, (vnop_t *)nfs4_vnop_symlink }, /* symlink */
236 { &vnop_readdir_desc, (vnop_t *)nfs_vnop_readdir }, /* readdir */
237 { &vnop_readlink_desc, (vnop_t *)nfs_vnop_readlink }, /* readlink */
238 { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */
239 { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */
240 { &vnop_strategy_desc, (vnop_t *)err_strategy }, /* strategy */
241 { &vnop_pathconf_desc, (vnop_t *)nfs_vnop_pathconf }, /* pathconf */
242 { &vnop_advlock_desc, (vnop_t *)nfs_vnop_advlock }, /* advlock */
243 { &vnop_bwrite_desc, (vnop_t *)err_bwrite }, /* bwrite */
244 { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */
245 { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */
246 { &vnop_copyfile_desc, (vnop_t *)err_copyfile }, /* Copyfile */
247 { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */
248 { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */
249 { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */
250 { &vnop_getxattr_desc, (vnop_t *)nfs4_vnop_getxattr }, /* getxattr */
251 { &vnop_setxattr_desc, (vnop_t *)nfs4_vnop_setxattr }, /* setxattr */
252 { &vnop_removexattr_desc, (vnop_t *)nfs4_vnop_removexattr },/* removexattr */
253 { &vnop_listxattr_desc, (vnop_t *)nfs4_vnop_listxattr },/* listxattr */
254#if NAMEDSTREAMS
255 { &vnop_getnamedstream_desc, (vnop_t *)nfs4_vnop_getnamedstream }, /* getnamedstream */
256 { &vnop_makenamedstream_desc, (vnop_t *)nfs4_vnop_makenamedstream }, /* makenamedstream */
257 { &vnop_removenamedstream_desc, (vnop_t *)nfs4_vnop_removenamedstream },/* removenamedstream */
258#endif
259 { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */
260 { NULL, NULL }
261};
262const struct vnodeopv_desc nfsv4_vnodeop_opv_desc =
263{ &nfsv4_vnodeop_p, nfsv4_vnodeop_entries };
264#endif
265
266/*
267 * Special device vnode ops
268 */
269vnop_t **spec_nfsv2nodeop_p;
270static const struct vnodeopv_entry_desc spec_nfsv2nodeop_entries[] = {
271 { &vnop_default_desc, (vnop_t *)vn_default_error },
272 { &vnop_lookup_desc, (vnop_t *)spec_lookup }, /* lookup */
273 { &vnop_create_desc, (vnop_t *)spec_create }, /* create */
274 { &vnop_mknod_desc, (vnop_t *)spec_mknod }, /* mknod */
275 { &vnop_open_desc, (vnop_t *)spec_open }, /* open */
276 { &vnop_close_desc, (vnop_t *)nfsspec_vnop_close }, /* close */
277 { &vnop_getattr_desc, (vnop_t *)nfs3_vnop_getattr }, /* getattr */
278 { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */
279 { &vnop_read_desc, (vnop_t *)nfsspec_vnop_read }, /* read */
280 { &vnop_write_desc, (vnop_t *)nfsspec_vnop_write }, /* write */
281 { &vnop_ioctl_desc, (vnop_t *)spec_ioctl }, /* ioctl */
282 { &vnop_select_desc, (vnop_t *)spec_select }, /* select */
283 { &vnop_revoke_desc, (vnop_t *)spec_revoke }, /* revoke */
284 { &vnop_mmap_desc, (vnop_t *)spec_mmap }, /* mmap */
285 { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */
286 { &vnop_remove_desc, (vnop_t *)spec_remove }, /* remove */
287 { &vnop_link_desc, (vnop_t *)spec_link }, /* link */
288 { &vnop_rename_desc, (vnop_t *)spec_rename }, /* rename */
289 { &vnop_mkdir_desc, (vnop_t *)spec_mkdir }, /* mkdir */
290 { &vnop_rmdir_desc, (vnop_t *)spec_rmdir }, /* rmdir */
291 { &vnop_symlink_desc, (vnop_t *)spec_symlink }, /* symlink */
292 { &vnop_readdir_desc, (vnop_t *)spec_readdir }, /* readdir */
293 { &vnop_readlink_desc, (vnop_t *)spec_readlink }, /* readlink */
294 { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */
295 { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */
296 { &vnop_strategy_desc, (vnop_t *)spec_strategy }, /* strategy */
297 { &vnop_pathconf_desc, (vnop_t *)spec_pathconf }, /* pathconf */
298 { &vnop_advlock_desc, (vnop_t *)spec_advlock }, /* advlock */
299 { &vnop_bwrite_desc, (vnop_t *)vn_bwrite }, /* bwrite */
300 { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */
301 { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */
302 { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */
303 { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */
304 { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */
305 { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */
306 { NULL, NULL }
307};
308const struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
309{ &spec_nfsv2nodeop_p, spec_nfsv2nodeop_entries };
310#if CONFIG_NFS4
311vnop_t **spec_nfsv4nodeop_p;
312static const struct vnodeopv_entry_desc spec_nfsv4nodeop_entries[] = {
313 { &vnop_default_desc, (vnop_t *)vn_default_error },
314 { &vnop_lookup_desc, (vnop_t *)spec_lookup }, /* lookup */
315 { &vnop_create_desc, (vnop_t *)spec_create }, /* create */
316 { &vnop_mknod_desc, (vnop_t *)spec_mknod }, /* mknod */
317 { &vnop_open_desc, (vnop_t *)spec_open }, /* open */
318 { &vnop_close_desc, (vnop_t *)nfsspec_vnop_close }, /* close */
319 { &vnop_getattr_desc, (vnop_t *)nfs4_vnop_getattr }, /* getattr */
320 { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */
321 { &vnop_read_desc, (vnop_t *)nfsspec_vnop_read }, /* read */
322 { &vnop_write_desc, (vnop_t *)nfsspec_vnop_write }, /* write */
323 { &vnop_ioctl_desc, (vnop_t *)spec_ioctl }, /* ioctl */
324 { &vnop_select_desc, (vnop_t *)spec_select }, /* select */
325 { &vnop_revoke_desc, (vnop_t *)spec_revoke }, /* revoke */
326 { &vnop_mmap_desc, (vnop_t *)spec_mmap }, /* mmap */
327 { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */
328 { &vnop_remove_desc, (vnop_t *)spec_remove }, /* remove */
329 { &vnop_link_desc, (vnop_t *)spec_link }, /* link */
330 { &vnop_rename_desc, (vnop_t *)spec_rename }, /* rename */
331 { &vnop_mkdir_desc, (vnop_t *)spec_mkdir }, /* mkdir */
332 { &vnop_rmdir_desc, (vnop_t *)spec_rmdir }, /* rmdir */
333 { &vnop_symlink_desc, (vnop_t *)spec_symlink }, /* symlink */
334 { &vnop_readdir_desc, (vnop_t *)spec_readdir }, /* readdir */
335 { &vnop_readlink_desc, (vnop_t *)spec_readlink }, /* readlink */
336 { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */
337 { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */
338 { &vnop_strategy_desc, (vnop_t *)spec_strategy }, /* strategy */
339 { &vnop_pathconf_desc, (vnop_t *)spec_pathconf }, /* pathconf */
340 { &vnop_advlock_desc, (vnop_t *)spec_advlock }, /* advlock */
341 { &vnop_bwrite_desc, (vnop_t *)vn_bwrite }, /* bwrite */
342 { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */
343 { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */
344 { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */
345 { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */
346 { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */
347 { &vnop_getxattr_desc, (vnop_t *)nfs4_vnop_getxattr }, /* getxattr */
348 { &vnop_setxattr_desc, (vnop_t *)nfs4_vnop_setxattr }, /* setxattr */
349 { &vnop_removexattr_desc, (vnop_t *)nfs4_vnop_removexattr },/* removexattr */
350 { &vnop_listxattr_desc, (vnop_t *)nfs4_vnop_listxattr },/* listxattr */
351#if NAMEDSTREAMS
352 { &vnop_getnamedstream_desc, (vnop_t *)nfs4_vnop_getnamedstream }, /* getnamedstream */
353 { &vnop_makenamedstream_desc, (vnop_t *)nfs4_vnop_makenamedstream }, /* makenamedstream */
354 { &vnop_removenamedstream_desc, (vnop_t *)nfs4_vnop_removenamedstream },/* removenamedstream */
355#endif
356 { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */
357 { NULL, NULL }
358};
359const struct vnodeopv_desc spec_nfsv4nodeop_opv_desc =
360{ &spec_nfsv4nodeop_p, spec_nfsv4nodeop_entries };
361#endif /* CONFIG_NFS4 */
362
363#if FIFO
364vnop_t **fifo_nfsv2nodeop_p;
365static const struct vnodeopv_entry_desc fifo_nfsv2nodeop_entries[] = {
366 { &vnop_default_desc, (vnop_t *)vn_default_error },
367 { &vnop_lookup_desc, (vnop_t *)fifo_lookup }, /* lookup */
368 { &vnop_create_desc, (vnop_t *)fifo_create }, /* create */
369 { &vnop_mknod_desc, (vnop_t *)fifo_mknod }, /* mknod */
370 { &vnop_open_desc, (vnop_t *)fifo_open }, /* open */
371 { &vnop_close_desc, (vnop_t *)nfsfifo_vnop_close }, /* close */
372 { &vnop_getattr_desc, (vnop_t *)nfs3_vnop_getattr }, /* getattr */
373 { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */
374 { &vnop_read_desc, (vnop_t *)nfsfifo_vnop_read }, /* read */
375 { &vnop_write_desc, (vnop_t *)nfsfifo_vnop_write }, /* write */
376 { &vnop_ioctl_desc, (vnop_t *)fifo_ioctl }, /* ioctl */
377 { &vnop_select_desc, (vnop_t *)fifo_select }, /* select */
378 { &vnop_revoke_desc, (vnop_t *)fifo_revoke }, /* revoke */
379 { &vnop_mmap_desc, (vnop_t *)fifo_mmap }, /* mmap */
380 { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */
381 { &vnop_remove_desc, (vnop_t *)fifo_remove }, /* remove */
382 { &vnop_link_desc, (vnop_t *)fifo_link }, /* link */
383 { &vnop_rename_desc, (vnop_t *)fifo_rename }, /* rename */
384 { &vnop_mkdir_desc, (vnop_t *)fifo_mkdir }, /* mkdir */
385 { &vnop_rmdir_desc, (vnop_t *)fifo_rmdir }, /* rmdir */
386 { &vnop_symlink_desc, (vnop_t *)fifo_symlink }, /* symlink */
387 { &vnop_readdir_desc, (vnop_t *)fifo_readdir }, /* readdir */
388 { &vnop_readlink_desc, (vnop_t *)fifo_readlink }, /* readlink */
389 { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */
390 { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */
391 { &vnop_strategy_desc, (vnop_t *)fifo_strategy }, /* strategy */
392 { &vnop_pathconf_desc, (vnop_t *)fifo_pathconf }, /* pathconf */
393 { &vnop_advlock_desc, (vnop_t *)fifo_advlock }, /* advlock */
394 { &vnop_bwrite_desc, (vnop_t *)vn_bwrite }, /* bwrite */
395 { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */
396 { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */
397 { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */
398 { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */
399 { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */
400 { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */
401 { NULL, NULL }
402};
403const struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
404{ &fifo_nfsv2nodeop_p, fifo_nfsv2nodeop_entries };
405#endif
406
407#if CONFIG_NFS4
408#if FIFO
409vnop_t **fifo_nfsv4nodeop_p;
410static const struct vnodeopv_entry_desc fifo_nfsv4nodeop_entries[] = {
411 { &vnop_default_desc, (vnop_t *)vn_default_error },
412 { &vnop_lookup_desc, (vnop_t *)fifo_lookup }, /* lookup */
413 { &vnop_create_desc, (vnop_t *)fifo_create }, /* create */
414 { &vnop_mknod_desc, (vnop_t *)fifo_mknod }, /* mknod */
415 { &vnop_open_desc, (vnop_t *)fifo_open }, /* open */
416 { &vnop_close_desc, (vnop_t *)nfsfifo_vnop_close }, /* close */
417 { &vnop_getattr_desc, (vnop_t *)nfs4_vnop_getattr }, /* getattr */
418 { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */
419 { &vnop_read_desc, (vnop_t *)nfsfifo_vnop_read }, /* read */
420 { &vnop_write_desc, (vnop_t *)nfsfifo_vnop_write }, /* write */
421 { &vnop_ioctl_desc, (vnop_t *)fifo_ioctl }, /* ioctl */
422 { &vnop_select_desc, (vnop_t *)fifo_select }, /* select */
423 { &vnop_revoke_desc, (vnop_t *)fifo_revoke }, /* revoke */
424 { &vnop_mmap_desc, (vnop_t *)fifo_mmap }, /* mmap */
425 { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */
426 { &vnop_remove_desc, (vnop_t *)fifo_remove }, /* remove */
427 { &vnop_link_desc, (vnop_t *)fifo_link }, /* link */
428 { &vnop_rename_desc, (vnop_t *)fifo_rename }, /* rename */
429 { &vnop_mkdir_desc, (vnop_t *)fifo_mkdir }, /* mkdir */
430 { &vnop_rmdir_desc, (vnop_t *)fifo_rmdir }, /* rmdir */
431 { &vnop_symlink_desc, (vnop_t *)fifo_symlink }, /* symlink */
432 { &vnop_readdir_desc, (vnop_t *)fifo_readdir }, /* readdir */
433 { &vnop_readlink_desc, (vnop_t *)fifo_readlink }, /* readlink */
434 { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */
435 { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */
436 { &vnop_strategy_desc, (vnop_t *)fifo_strategy }, /* strategy */
437 { &vnop_pathconf_desc, (vnop_t *)fifo_pathconf }, /* pathconf */
438 { &vnop_advlock_desc, (vnop_t *)fifo_advlock }, /* advlock */
439 { &vnop_bwrite_desc, (vnop_t *)vn_bwrite }, /* bwrite */
440 { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */
441 { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */
442 { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */
443 { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */
444 { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */
445 { &vnop_getxattr_desc, (vnop_t *)nfs4_vnop_getxattr }, /* getxattr */
446 { &vnop_setxattr_desc, (vnop_t *)nfs4_vnop_setxattr }, /* setxattr */
447 { &vnop_removexattr_desc, (vnop_t *)nfs4_vnop_removexattr },/* removexattr */
448 { &vnop_listxattr_desc, (vnop_t *)nfs4_vnop_listxattr },/* listxattr */
449#if NAMEDSTREAMS
450 { &vnop_getnamedstream_desc, (vnop_t *)nfs4_vnop_getnamedstream }, /* getnamedstream */
451 { &vnop_makenamedstream_desc, (vnop_t *)nfs4_vnop_makenamedstream }, /* makenamedstream */
452 { &vnop_removenamedstream_desc, (vnop_t *)nfs4_vnop_removenamedstream },/* removenamedstream */
453#endif
454 { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */
455 { NULL, NULL }
456};
457const struct vnodeopv_desc fifo_nfsv4nodeop_opv_desc =
458{ &fifo_nfsv4nodeop_p, fifo_nfsv4nodeop_entries };
459#endif /* FIFO */
460#endif /* CONFIG_NFS4 */
461
462int nfs_sillyrename(nfsnode_t, nfsnode_t, struct componentname *, vfs_context_t);
463int nfs_getattr_internal(nfsnode_t, struct nfs_vattr *, vfs_context_t, int);
464int nfs_refresh_fh(nfsnode_t, vfs_context_t);
465
466
467/*
468 * Update nfsnode attributes to avoid extra getattr calls for each direntry.
469 * This function should be called only if RDIRPLUS flag is enabled.
470 */
471void
472nfs_rdirplus_update_node_attrs(nfsnode_t dnp, struct direntry *dp, fhandle_t *fhp, struct nfs_vattr *nvattrp, uint64_t *savedxidp)
473{
474 nfsnode_t np;
475 struct componentname cn;
476 int isdot = (dp->d_namlen == 1) && (dp->d_name[0] == '.');
477 int isdotdot = (dp->d_namlen == 2) && (dp->d_name[0] == '.') && (dp->d_name[1] == '.');
478
479 if (isdot || isdotdot) {
480 return;
481 }
482
483 np = NULL;
484 bzero(&cn, sizeof(cn));
485 cn.cn_nameptr = dp->d_name;
486 cn.cn_namelen = dp->d_namlen;
487 cn.cn_nameiop = LOOKUP;
488
489 nfs_nget(NFSTOMP(dnp), dnp, &cn, fhp->fh_data, fhp->fh_len, nvattrp, savedxidp, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
490 if (np) {
491 nfs_node_unlock(np);
492 vnode_put(NFSTOV(np));
493 }
494}
495
496/*
497 * Find the slot in the access cache for this UID.
498 * If adding and no existing slot is found, reuse slots in FIFO order.
499 * The index of the next slot to use is kept in the last entry of the n_access array.
500 */
501int
502nfs_node_access_slot(nfsnode_t np, uid_t uid, int add)
503{
504 int slot;
505
506 for (slot = 0; slot < NFS_ACCESS_CACHE_SIZE; slot++) {
507 if (np->n_accessuid[slot] == uid) {
508 break;
509 }
510 }
511 if (slot == NFS_ACCESS_CACHE_SIZE) {
512 if (!add) {
513 return -1;
514 }
515 slot = np->n_access[NFS_ACCESS_CACHE_SIZE];
516 np->n_access[NFS_ACCESS_CACHE_SIZE] = (slot + 1) % NFS_ACCESS_CACHE_SIZE;
517 }
518 return slot;
519}
520
521int
522nfs3_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx)
523{
524 int error = 0, lockerror = ENOENT, status, slot;
525 uint32_t access_result = 0;
526 u_int64_t xid;
527 struct nfsm_chain nmreq, nmrep;
528 struct nfsmount *nmp;
529 struct timeval now;
530 uid_t uid;
531
532 nfsm_chain_null(&nmreq);
533 nfsm_chain_null(&nmrep);
534
535 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_FH(NFS_VER3) + NFSX_UNSIGNED);
536 nfsm_chain_add_fh(error, &nmreq, NFS_VER3, np->n_fhp, np->n_fhsize);
537 nfsm_chain_add_32(error, &nmreq, *access);
538 nfsm_chain_build_done(error, &nmreq);
539 nfsmout_if(error);
540 error = nfs_request2(np, NULL, &nmreq, NFSPROC_ACCESS,
541 vfs_context_thread(ctx), vfs_context_ucred(ctx),
542 NULL, rpcflags, &nmrep, &xid, &status);
543 if ((lockerror = nfs_node_lock(np))) {
544 error = lockerror;
545 }
546 nfsm_chain_postop_attr_update(error, &nmrep, np, &xid);
547 if (!error) {
548 error = status;
549 }
550 nfsm_chain_get_32(error, &nmrep, access_result);
551 nfsmout_if(error);
552
553 /* XXXab do we really need mount here, also why are we doing access cache management here? */
554 nmp = NFSTONMP(np);
555 if (nfs_mount_gone(nmp)) {
556 error = ENXIO;
557 }
558 nfsmout_if(error);
559
560#if CONFIG_NFS_GSS
561 if (auth_is_kerberized(np->n_auth) || auth_is_kerberized(nmp->nm_auth)) {
562 uid = nfs_cred_getasid2uid(vfs_context_ucred(ctx));
563 } else {
564 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
565 }
566#else
567 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
568#endif /* CONFIG_NFS_GSS */
569 slot = nfs_node_access_slot(np, uid, 1);
570 np->n_accessuid[slot] = uid;
571 microuptime(&now);
572 np->n_accessstamp[slot] = now.tv_sec;
573 np->n_access[slot] = access_result;
574
575 /*
576 * If we asked for DELETE but didn't get it, the server
577 * may simply not support returning that bit (possible
578 * on UNIX systems). So, we'll assume that it is OK,
579 * and just let any subsequent delete action fail if it
580 * really isn't deletable.
581 */
582 if ((*access & NFS_ACCESS_DELETE) &&
583 !(np->n_access[slot] & NFS_ACCESS_DELETE)) {
584 np->n_access[slot] |= NFS_ACCESS_DELETE;
585 }
586 /* ".zfs" subdirectories may erroneously give a denied answer for add/remove */
587 if (nfs_access_dotzfs && (np->n_flag & NISDOTZFSCHILD)) {
588 np->n_access[slot] |= (NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE);
589 }
590 /* pass back the access returned with this request */
591 *access = np->n_access[slot];
592nfsmout:
593 if (!lockerror) {
594 nfs_node_unlock(np);
595 }
596 nfsm_chain_cleanup(&nmreq);
597 nfsm_chain_cleanup(&nmrep);
598 return error;
599}
600
601
602/*
603 * NFS access vnode op.
604 * For NFS version 2, just return ok. File accesses may fail later.
605 * For NFS version 3+, use the access RPC to check accessibility. If file
606 * permissions are changed on the server, accesses might still fail later.
607 */
608int
609nfs_vnop_access(
610 struct vnop_access_args /* {
611 * struct vnodeop_desc *a_desc;
612 * vnode_t a_vp;
613 * int a_action;
614 * vfs_context_t a_context;
615 * } */*ap)
616{
617 vfs_context_t ctx = ap->a_context;
618 vnode_t vp = ap->a_vp;
619 int error = 0, slot, dorpc, rpcflags = 0;
620 u_int32_t access, waccess;
621 nfsnode_t np = VTONFS(vp);
622 struct nfsmount *nmp;
623 int nfsvers;
624 struct timeval now;
625 uid_t uid;
626
627 nmp = VTONMP(vp);
628 if (nfs_mount_gone(nmp)) {
629 return ENXIO;
630 }
631 nfsvers = nmp->nm_vers;
632
633
634 if (nfsvers == NFS_VER2 || NMFLAG(nmp, NOOPAQUE_AUTH)) {
635 if ((ap->a_action & KAUTH_VNODE_WRITE_RIGHTS) &&
636 vfs_isrdonly(vnode_mount(vp))) {
637 return EROFS;
638 }
639 return 0;
640 }
641
642 /*
643 * For NFS v3, do an access rpc, otherwise you are stuck emulating
644 * ufs_access() locally using the vattr. This may not be correct,
645 * since the server may apply other access criteria such as
646 * client uid-->server uid mapping that we do not know about, but
647 * this is better than just returning anything that is lying about
648 * in the cache.
649 */
650
651 /*
652 * Convert KAUTH primitives to NFS access rights.
653 */
654 access = 0;
655 if (vnode_isdir(vp)) {
656 /* directory */
657 if (ap->a_action &
658 (KAUTH_VNODE_LIST_DIRECTORY |
659 KAUTH_VNODE_READ_EXTATTRIBUTES)) {
660 access |= NFS_ACCESS_READ;
661 }
662 if (ap->a_action & KAUTH_VNODE_SEARCH) {
663 access |= NFS_ACCESS_LOOKUP;
664 }
665 if (ap->a_action &
666 (KAUTH_VNODE_ADD_FILE |
667 KAUTH_VNODE_ADD_SUBDIRECTORY)) {
668 access |= NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND;
669 }
670 if (ap->a_action & KAUTH_VNODE_DELETE_CHILD) {
671 access |= NFS_ACCESS_MODIFY;
672 }
673 } else {
674 /* file */
675 if (ap->a_action &
676 (KAUTH_VNODE_READ_DATA |
677 KAUTH_VNODE_READ_EXTATTRIBUTES)) {
678 access |= NFS_ACCESS_READ;
679 }
680 if (ap->a_action & KAUTH_VNODE_WRITE_DATA) {
681 access |= NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND;
682 }
683 if (ap->a_action & KAUTH_VNODE_APPEND_DATA) {
684 access |= NFS_ACCESS_EXTEND;
685 }
686 if (ap->a_action & KAUTH_VNODE_EXECUTE) {
687 access |= NFS_ACCESS_EXECUTE;
688 }
689 }
690 /* common */
691 if (ap->a_action & KAUTH_VNODE_DELETE) {
692 access |= NFS_ACCESS_DELETE;
693 }
694 if (ap->a_action &
695 (KAUTH_VNODE_WRITE_ATTRIBUTES |
696 KAUTH_VNODE_WRITE_EXTATTRIBUTES |
697 KAUTH_VNODE_WRITE_SECURITY)) {
698 access |= NFS_ACCESS_MODIFY;
699 }
700 /* XXX this is pretty dubious */
701 if (ap->a_action & KAUTH_VNODE_CHANGE_OWNER) {
702 access |= NFS_ACCESS_MODIFY;
703 }
704
705 /* if caching, always ask for every right */
706 if (nfs_access_cache_timeout > 0) {
707 waccess = NFS_ACCESS_READ | NFS_ACCESS_MODIFY |
708 NFS_ACCESS_EXTEND | NFS_ACCESS_EXECUTE |
709 NFS_ACCESS_DELETE | NFS_ACCESS_LOOKUP;
710 } else {
711 waccess = access;
712 }
713
714 if ((error = nfs_node_lock(np))) {
715 return error;
716 }
717
718 /*
719 * Does our cached result allow us to give a definite yes to
720 * this request?
721 */
722#if CONFIG_NFS_GSS
723 if (auth_is_kerberized(np->n_auth) || auth_is_kerberized(nmp->nm_auth)) {
724 uid = nfs_cred_getasid2uid(vfs_context_ucred(ctx));
725 } else {
726 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
727 }
728#else
729 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
730#endif /* CONFIG_NFS_GSS */
731 slot = nfs_node_access_slot(np, uid, 0);
732 dorpc = 1;
733 if (access == 0) {
734 /* not asking for any rights understood by NFS, so don't bother doing an RPC */
735 /* OSAddAtomic(1, &nfsstats.accesscache_hits); */
736 dorpc = 0;
737 waccess = 0;
738 } else if (NACCESSVALID(np, slot)) {
739 microuptime(&now);
740 if (((now.tv_sec < (np->n_accessstamp[slot] + nfs_access_cache_timeout)) &&
741 ((np->n_access[slot] & access) == access)) || nfs_use_cache(nmp)) {
742 /* OSAddAtomic(1, &nfsstats.accesscache_hits); */
743 dorpc = 0;
744 waccess = np->n_access[slot];
745 }
746 }
747 nfs_node_unlock(np);
748 if (dorpc) {
749 /* Either a no, or a don't know. Go to the wire. */
750 /* OSAddAtomic(1, &nfsstats.accesscache_misses); */
751
752 /*
753 * Allow an access call to timeout if we have it cached
754 * so we won't hang if the server isn't responding.
755 */
756 if (NACCESSVALID(np, slot)) {
757 rpcflags |= R_SOFT;
758 }
759
760 error = nmp->nm_funcs->nf_access_rpc(np, &waccess, rpcflags, ctx);
761
762 /*
763 * If the server didn't respond return the cached access.
764 */
765 if ((error == ETIMEDOUT) && (rpcflags & R_SOFT)) {
766 error = 0;
767 waccess = np->n_access[slot];
768 }
769 }
770 if (!error && ((waccess & access) != access)) {
771 error = EACCES;
772 }
773
774 return error;
775}
776
777
778/*
779 * NFS open vnode op
780 *
781 * Perform various update/invalidation checks and then add the
782 * open to the node. Regular files will have an open file structure
783 * on the node and, for NFSv4, perform an OPEN request on the server.
784 */
785int
786nfs_vnop_open(
787 struct vnop_open_args /* {
788 * struct vnodeop_desc *a_desc;
789 * vnode_t a_vp;
790 * int a_mode;
791 * vfs_context_t a_context;
792 * } */*ap)
793{
794 vfs_context_t ctx = ap->a_context;
795 vnode_t vp = ap->a_vp;
796 nfsnode_t np = VTONFS(vp);
797 struct nfsmount *nmp;
798 int error, accessMode, denyMode, opened = 0;
799 struct nfs_open_owner *noop = NULL;
800 struct nfs_open_file *nofp = NULL;
801 enum vtype vtype;
802
803 if (!(ap->a_mode & (FREAD | FWRITE))) {
804 return EINVAL;
805 }
806
807 nmp = VTONMP(vp);
808 if (nfs_mount_gone(nmp)) {
809 return ENXIO;
810 }
811 if (np->n_flag & NREVOKE) {
812 return EIO;
813 }
814
815 vtype = vnode_vtype(vp);
816 if ((vtype != VREG) && (vtype != VDIR) && (vtype != VLNK)) {
817 return EACCES;
818 }
819
820 /* First, check if we need to update/invalidate */
821 if (ISSET(np->n_flag, NUPDATESIZE)) {
822 nfs_data_update_size(np, 0);
823 }
824 if ((error = nfs_node_lock(np))) {
825 return error;
826 }
827 if (np->n_flag & NNEEDINVALIDATE) {
828 np->n_flag &= ~NNEEDINVALIDATE;
829 if (vtype == VDIR) {
830 nfs_invaldir(np);
831 }
832 nfs_node_unlock(np);
833 nfs_vinvalbuf(vp, V_SAVE | V_IGNORE_WRITEERR, ctx, 1);
834 if ((error = nfs_node_lock(np))) {
835 return error;
836 }
837 }
838 if (vtype == VREG) {
839 np->n_lastrahead = -1;
840 }
841 if (np->n_flag & NMODIFIED) {
842 if (vtype == VDIR) {
843 nfs_invaldir(np);
844 }
845 nfs_node_unlock(np);
846 if ((error = nfs_vinvalbuf(vp, V_SAVE | V_IGNORE_WRITEERR, ctx, 1))) {
847 return error;
848 }
849 } else {
850 nfs_node_unlock(np);
851 }
852
853 /* nfs_getattr() will check changed and purge caches */
854 if ((error = nfs_getattr(np, NULL, ctx, NGA_UNCACHED))) {
855 return error;
856 }
857
858 if (vtype != VREG) {
859 /* Just mark that it was opened */
860 lck_mtx_lock(&np->n_openlock);
861 np->n_openrefcnt++;
862 lck_mtx_unlock(&np->n_openlock);
863 return 0;
864 }
865
866 /* mode contains some combination of: FREAD, FWRITE, O_SHLOCK, O_EXLOCK */
867 accessMode = 0;
868 if (ap->a_mode & FREAD) {
869 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
870 }
871 if (ap->a_mode & FWRITE) {
872 accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE;
873 }
874 if (ap->a_mode & O_EXLOCK) {
875 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
876 } else if (ap->a_mode & O_SHLOCK) {
877 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
878 } else {
879 denyMode = NFS_OPEN_SHARE_DENY_NONE;
880 }
881 // XXX don't do deny modes just yet (and never do it for !v4)
882 denyMode = NFS_OPEN_SHARE_DENY_NONE;
883
884 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
885 if (!noop) {
886 return ENOMEM;
887 }
888
889restart:
890 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
891 if (error) {
892 nfs_open_owner_rele(noop);
893 return error;
894 }
895 if (np->n_flag & NREVOKE) {
896 error = EIO;
897 nfs_mount_state_in_use_end(nmp, 0);
898 nfs_open_owner_rele(noop);
899 return error;
900 }
901
902 error = nfs_open_file_find(np, noop, &nofp, accessMode, denyMode, 1);
903 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
904 NP(np, "nfs_vnop_open: LOST %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
905 error = EIO;
906 }
907#if CONFIG_NFS4
908 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
909 nfs_mount_state_in_use_end(nmp, 0);
910 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
911 nofp = NULL;
912 if (!error) {
913 goto restart;
914 }
915 }
916#endif
917 if (!error) {
918 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
919 }
920 if (error) {
921 nofp = NULL;
922 goto out;
923 }
924
925 if (nmp->nm_vers < NFS_VER4) {
926 /*
927 * NFS v2/v3 opens are always allowed - so just add it.
928 */
929 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
930 goto out;
931 }
932
933 /*
934 * If we just created the file and the modes match, then we simply use
935 * the open performed in the create. Otherwise, send the request.
936 */
937 if ((nofp->nof_flags & NFS_OPEN_FILE_CREATE) &&
938 (nofp->nof_creator == current_thread()) &&
939 (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) &&
940 (denyMode == NFS_OPEN_SHARE_DENY_NONE)) {
941 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
942 nofp->nof_creator = NULL;
943 } else {
944#if CONFIG_NFS4
945 if (!opened) {
946 error = nfs4_open(np, nofp, accessMode, denyMode, ctx);
947 }
948#endif
949 if ((error == EACCES) && (nofp->nof_flags & NFS_OPEN_FILE_CREATE) &&
950 (nofp->nof_creator == current_thread())) {
951 /*
952 * Ugh. This can happen if we just created the file with read-only
953 * perms and we're trying to open it for real with different modes
954 * (e.g. write-only or with a deny mode) and the server decides to
955 * not allow the second open because of the read-only perms.
956 * The best we can do is to just use the create's open.
957 * We may have access we don't need or we may not have a requested
958 * deny mode. We may log complaints later, but we'll try to avoid it.
959 */
960 if (denyMode != NFS_OPEN_SHARE_DENY_NONE) {
961 NP(np, "nfs_vnop_open: deny mode foregone on create, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
962 }
963 nofp->nof_creator = NULL;
964 error = 0;
965 }
966 if (error) {
967 goto out;
968 }
969 opened = 1;
970 /*
971 * If we had just created the file, we already had it open.
972 * If the actual open mode is less than what we grabbed at
973 * create time, then we'll downgrade the open here.
974 */
975 if ((nofp->nof_flags & NFS_OPEN_FILE_CREATE) &&
976 (nofp->nof_creator == current_thread())) {
977 error = nfs_close(np, nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, ctx);
978 if (error) {
979 NP(np, "nfs_vnop_open: create close error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
980 }
981 if (!nfs_mount_state_error_should_restart(error)) {
982 error = 0;
983 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
984 }
985 }
986 }
987
988out:
989 if (nofp) {
990 nfs_open_file_clear_busy(nofp);
991 }
992 if (nfs_mount_state_in_use_end(nmp, error)) {
993 nofp = NULL;
994 goto restart;
995 }
996 if (error) {
997 NP(np, "nfs_vnop_open: error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
998 }
999 if (noop) {
1000 nfs_open_owner_rele(noop);
1001 }
1002 if (!error && vtype == VREG && (ap->a_mode & FWRITE)) {
1003 lck_mtx_lock(&nmp->nm_lock);
1004 nmp->nm_state &= ~NFSSTA_SQUISHY;
1005 nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
1006 if (nmp->nm_curdeadtimeout <= 0) {
1007 nmp->nm_deadto_start = 0;
1008 }
1009 nmp->nm_writers++;
1010 lck_mtx_unlock(&nmp->nm_lock);
1011 }
1012
1013 return error;
1014}
1015
1016static uint32_t
1017nfs_no_of_open_file_writers(nfsnode_t np)
1018{
1019 uint32_t writers = 0;
1020 struct nfs_open_file *nofp;
1021
1022 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
1023 writers += nofp->nof_w + nofp->nof_rw + nofp->nof_w_dw + nofp->nof_rw_dw +
1024 nofp->nof_w_drw + nofp->nof_rw_drw + nofp->nof_d_w_dw +
1025 nofp->nof_d_rw_dw + nofp->nof_d_w_drw + nofp->nof_d_rw_drw +
1026 nofp->nof_d_w + nofp->nof_d_rw;
1027 }
1028
1029 return writers;
1030}
1031
1032/*
1033 * NFS close vnode op
1034 *
1035 * What an NFS client should do upon close after writing is a debatable issue.
1036 * Most NFS clients push delayed writes to the server upon close, basically for
1037 * two reasons:
1038 * 1 - So that any write errors may be reported back to the client process
1039 * doing the close system call. By far the two most likely errors are
1040 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
1041 * 2 - To put a worst case upper bound on cache inconsistency between
1042 * multiple clients for the file.
1043 * There is also a consistency problem for Version 2 of the protocol w.r.t.
1044 * not being able to tell if other clients are writing a file concurrently,
1045 * since there is no way of knowing if the changed modify time in the reply
1046 * is only due to the write for this client.
1047 * (NFS Version 3 provides weak cache consistency data in the reply that
1048 * should be sufficient to detect and handle this case.)
1049 *
1050 * The current code does the following:
1051 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
1052 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate them.
1053 * for NFS Version 4 - basically the same as NFSv3
1054 */
1055int
1056nfs_vnop_close(
1057 struct vnop_close_args /* {
1058 * struct vnodeop_desc *a_desc;
1059 * vnode_t a_vp;
1060 * int a_fflag;
1061 * vfs_context_t a_context;
1062 * } */*ap)
1063{
1064 vfs_context_t ctx = ap->a_context;
1065 vnode_t vp = ap->a_vp;
1066 nfsnode_t np = VTONFS(vp);
1067 struct nfsmount *nmp;
1068 int error = 0, error1, nfsvers;
1069 int fflag = ap->a_fflag;
1070 enum vtype vtype;
1071 int accessMode, denyMode;
1072 struct nfs_open_owner *noop = NULL;
1073 struct nfs_open_file *nofp = NULL;
1074
1075 nmp = VTONMP(vp);
1076 if (!nmp) {
1077 return ENXIO;
1078 }
1079 nfsvers = nmp->nm_vers;
1080 vtype = vnode_vtype(vp);
1081
1082 /* First, check if we need to update/flush/invalidate */
1083 if (ISSET(np->n_flag, NUPDATESIZE)) {
1084 nfs_data_update_size(np, 0);
1085 }
1086 nfs_node_lock_force(np);
1087 if (np->n_flag & NNEEDINVALIDATE) {
1088 np->n_flag &= ~NNEEDINVALIDATE;
1089 nfs_node_unlock(np);
1090 nfs_vinvalbuf(vp, V_SAVE | V_IGNORE_WRITEERR, ctx, 1);
1091 nfs_node_lock_force(np);
1092 }
1093 if ((vtype == VREG) && (np->n_flag & NMODIFIED) && (fflag & FWRITE)) {
1094 /* we're closing an open for write and the file is modified, so flush it */
1095 nfs_node_unlock(np);
1096 if (nfsvers != NFS_VER2) {
1097 error = nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), 0);
1098 } else {
1099 error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1);
1100 }
1101 nfs_node_lock_force(np);
1102 NATTRINVALIDATE(np);
1103 }
1104 if (np->n_flag & NWRITEERR) {
1105 np->n_flag &= ~NWRITEERR;
1106 error = np->n_error;
1107 }
1108 nfs_node_unlock(np);
1109
1110 if (vtype != VREG) {
1111 /* Just mark that it was closed */
1112 lck_mtx_lock(&np->n_openlock);
1113 if (np->n_openrefcnt == 0) {
1114 if (fflag & (FREAD | FWRITE)) {
1115 NP(np, "nfs_vnop_close: open reference underrun");
1116 error = EINVAL;
1117 }
1118 } else if (fflag & (FREAD | FWRITE)) {
1119 np->n_openrefcnt--;
1120 } else {
1121 /* No FREAD/FWRITE set - probably the final close */
1122 np->n_openrefcnt = 0;
1123 }
1124 lck_mtx_unlock(&np->n_openlock);
1125 return error;
1126 }
1127 error1 = error;
1128
1129 /* fflag should contain some combination of: FREAD, FWRITE, FHASLOCK */
1130 accessMode = 0;
1131 if (fflag & FREAD) {
1132 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
1133 }
1134 if (fflag & FWRITE) {
1135 accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE;
1136 }
1137// XXX It would be nice if we still had the O_EXLOCK/O_SHLOCK flags that were on the open
1138// if (fflag & O_EXLOCK)
1139// denyMode = NFS_OPEN_SHARE_DENY_BOTH;
1140// else if (fflag & O_SHLOCK)
1141// denyMode = NFS_OPEN_SHARE_DENY_WRITE;
1142// else
1143// denyMode = NFS_OPEN_SHARE_DENY_NONE;
1144#if 0 // Not yet
1145 if (fflag & FHASLOCK) {
1146 /* XXX assume FHASLOCK is for the deny mode and not flock */
1147 /* FHASLOCK flock will be unlocked in the close path, but the flag is not cleared. */
1148 if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) {
1149 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
1150 } else if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_WRITE) {
1151 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
1152 } else {
1153 denyMode = NFS_OPEN_SHARE_DENY_NONE;
1154 }
1155 } else {
1156 denyMode = NFS_OPEN_SHARE_DENY_NONE;
1157 }
1158#else
1159 // XXX don't do deny modes just yet (and never do it for !v4)
1160 denyMode = NFS_OPEN_SHARE_DENY_NONE;
1161#endif
1162
1163 if (!accessMode) {
1164 /*
1165 * No mode given to close?
1166 * Guess this is the final close.
1167 * We should unlock all locks and close all opens.
1168 */
1169 uint32_t writers;
1170 mount_t mp = vnode_mount(vp);
1171 int force = (!mp || vfs_isforce(mp));
1172
1173 writers = nfs_no_of_open_file_writers(np);
1174 nfs_release_open_state_for_node(np, force);
1175 if (writers) {
1176 lck_mtx_lock(&nmp->nm_lock);
1177 if (writers > nmp->nm_writers) {
1178 NP(np, "nfs_vnop_close: number of write opens for mount underrun. Node has %d"
1179 " opens for write. Mount has total of %d opens for write\n",
1180 writers, nmp->nm_writers);
1181 nmp->nm_writers = 0;
1182 } else {
1183 nmp->nm_writers -= writers;
1184 }
1185 lck_mtx_unlock(&nmp->nm_lock);
1186 }
1187
1188 return error;
1189 } else if (fflag & FWRITE) {
1190 lck_mtx_lock(&nmp->nm_lock);
1191 if (nmp->nm_writers == 0) {
1192 NP(np, "nfs_vnop_close: removing open writer from mount, but mount has no files open for writing");
1193 } else {
1194 nmp->nm_writers--;
1195 }
1196 lck_mtx_unlock(&nmp->nm_lock);
1197 }
1198
1199
1200 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 0);
1201 if (!noop) {
1202 // printf("nfs_vnop_close: can't get open owner!\n");
1203 return EIO;
1204 }
1205
1206restart:
1207 error = nfs_mount_state_in_use_start(nmp, NULL);
1208 if (error) {
1209 nfs_open_owner_rele(noop);
1210 return error;
1211 }
1212
1213 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 0);
1214#if CONFIG_NFS4
1215 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
1216 nfs_mount_state_in_use_end(nmp, 0);
1217 error = nfs4_reopen(nofp, NULL);
1218 nofp = NULL;
1219 if (!error) {
1220 goto restart;
1221 }
1222 }
1223#endif
1224 if (error) {
1225 NP(np, "nfs_vnop_close: no open file for owner, error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
1226 error = EBADF;
1227 goto out;
1228 }
1229 error = nfs_open_file_set_busy(nofp, NULL);
1230 if (error) {
1231 nofp = NULL;
1232 goto out;
1233 }
1234
1235 error = nfs_close(np, nofp, accessMode, denyMode, ctx);
1236 if (error) {
1237 NP(np, "nfs_vnop_close: close error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
1238 }
1239
1240out:
1241 if (nofp) {
1242 nfs_open_file_clear_busy(nofp);
1243 }
1244 if (nfs_mount_state_in_use_end(nmp, error)) {
1245 nofp = NULL;
1246 goto restart;
1247 }
1248 if (!error) {
1249 error = error1;
1250 }
1251 if (error) {
1252 NP(np, "nfs_vnop_close: error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
1253 }
1254 if (noop) {
1255 nfs_open_owner_rele(noop);
1256 }
1257 return error;
1258}
1259
1260/*
1261 * nfs_close(): common function that does all the heavy lifting of file closure
1262 *
1263 * Takes an open file structure and a set of access/deny modes and figures out how
1264 * to update the open file structure (and the state on the server) appropriately.
1265 */
1266int
1267nfs_close(
1268 nfsnode_t np,
1269 struct nfs_open_file *nofp,
1270 uint32_t accessMode,
1271 uint32_t denyMode,
1272 __unused vfs_context_t ctx)
1273{
1274#if CONFIG_NFS4
1275 struct nfs_lock_owner *nlop;
1276#endif
1277 int error = 0, changed = 0, delegated = 0, closed = 0, downgrade = 0;
1278 uint32_t newAccessMode, newDenyMode;
1279
1280 /* warn if modes don't match current state */
1281 if (((accessMode & nofp->nof_access) != accessMode) || ((denyMode & nofp->nof_deny) != denyMode)) {
1282 NP(np, "nfs_close: mode mismatch %d %d, current %d %d, %d",
1283 accessMode, denyMode, nofp->nof_access, nofp->nof_deny,
1284 kauth_cred_getuid(nofp->nof_owner->noo_cred));
1285 }
1286
1287 /*
1288 * If we're closing a write-only open, we may not have a write-only count
1289 * if we also grabbed read access. So, check the read-write count.
1290 */
1291 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
1292 if ((accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) &&
1293 (nofp->nof_w == 0) && (nofp->nof_d_w == 0) &&
1294 (nofp->nof_rw || nofp->nof_d_rw)) {
1295 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
1296 }
1297 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
1298 if ((accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) &&
1299 (nofp->nof_w_dw == 0) && (nofp->nof_d_w_dw == 0) &&
1300 (nofp->nof_rw_dw || nofp->nof_d_rw_dw)) {
1301 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
1302 }
1303 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
1304 if ((accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) &&
1305 (nofp->nof_w_drw == 0) && (nofp->nof_d_w_drw == 0) &&
1306 (nofp->nof_rw_drw || nofp->nof_d_rw_drw)) {
1307 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
1308 }
1309 }
1310
1311 nfs_open_file_remove_open_find(nofp, accessMode, denyMode, &newAccessMode, &newDenyMode, &delegated);
1312 if ((newAccessMode != nofp->nof_access) || (newDenyMode != nofp->nof_deny)) {
1313 changed = 1;
1314 } else {
1315 changed = 0;
1316 }
1317
1318 if (NFSTONMP(np)->nm_vers < NFS_VER4) {
1319 /* NFS v2/v3 closes simply need to remove the open. */
1320 goto v3close;
1321 }
1322#if CONFIG_NFS4
1323 if ((newAccessMode == 0) || (nofp->nof_opencnt == 1)) {
1324 /*
1325 * No more access after this close, so clean up and close it.
1326 * Don't send a close RPC if we're closing a delegated open.
1327 */
1328 nfs_wait_bufs(np);
1329 closed = 1;
1330 if (!delegated && !(nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
1331 error = nfs4_close_rpc(np, nofp, vfs_context_thread(ctx), vfs_context_ucred(ctx), 0);
1332 }
1333 if (error == NFSERR_LOCKS_HELD) {
1334 /*
1335 * Hmm... the server says we have locks we need to release first
1336 * Find the lock owner and try to unlock everything.
1337 */
1338 nlop = nfs_lock_owner_find(np, vfs_context_proc(ctx), 0);
1339 if (nlop) {
1340 nfs4_unlock_rpc(np, nlop, F_WRLCK, 0, UINT64_MAX,
1341 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
1342 nfs_lock_owner_rele(nlop);
1343 }
1344 error = nfs4_close_rpc(np, nofp, vfs_context_thread(ctx), vfs_context_ucred(ctx), 0);
1345 }
1346 } else if (changed) {
1347 /*
1348 * File is still open but with less access, so downgrade the open.
1349 * Don't send a downgrade RPC if we're closing a delegated open.
1350 */
1351 if (!delegated && !(nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
1352 downgrade = 1;
1353 /*
1354 * If we have delegated opens, we should probably claim them before sending
1355 * the downgrade because the server may not know the open we are downgrading to.
1356 */
1357 if (nofp->nof_d_rw_drw || nofp->nof_d_w_drw || nofp->nof_d_r_drw ||
1358 nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw ||
1359 nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r) {
1360 nfs4_claim_delegated_state_for_open_file(nofp, 0);
1361 }
1362 /* need to remove the open before sending the downgrade */
1363 nfs_open_file_remove_open(nofp, accessMode, denyMode);
1364 error = nfs4_open_downgrade_rpc(np, nofp, ctx);
1365 if (error) { /* Hmm.. that didn't work. Add the open back in. */
1366 nfs_open_file_add_open(nofp, accessMode, denyMode, delegated);
1367 }
1368 }
1369 }
1370#endif
1371v3close:
1372 if (error) {
1373 NP(np, "nfs_close: error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
1374 return error;
1375 }
1376
1377 if (!downgrade) {
1378 nfs_open_file_remove_open(nofp, accessMode, denyMode);
1379 }
1380
1381 if (closed) {
1382 lck_mtx_lock(&nofp->nof_lock);
1383 if (nofp->nof_r || nofp->nof_d_r || nofp->nof_w || nofp->nof_d_w || nofp->nof_d_rw ||
1384 (nofp->nof_rw && !((nofp->nof_flags & NFS_OPEN_FILE_CREATE) && !nofp->nof_creator && (nofp->nof_rw == 1))) ||
1385 nofp->nof_r_dw || nofp->nof_d_r_dw || nofp->nof_w_dw || nofp->nof_d_w_dw ||
1386 nofp->nof_rw_dw || nofp->nof_d_rw_dw || nofp->nof_r_drw || nofp->nof_d_r_drw ||
1387 nofp->nof_w_drw || nofp->nof_d_w_drw || nofp->nof_rw_drw || nofp->nof_d_rw_drw) {
1388 NP(np, "nfs_close: unexpected count: %u.%u %u.%u %u.%u dw %u.%u %u.%u %u.%u drw %u.%u %u.%u %u.%u flags 0x%x, %d",
1389 nofp->nof_r, nofp->nof_d_r, nofp->nof_w, nofp->nof_d_w,
1390 nofp->nof_rw, nofp->nof_d_rw, nofp->nof_r_dw, nofp->nof_d_r_dw,
1391 nofp->nof_w_dw, nofp->nof_d_w_dw, nofp->nof_rw_dw, nofp->nof_d_rw_dw,
1392 nofp->nof_r_drw, nofp->nof_d_r_drw, nofp->nof_w_drw, nofp->nof_d_w_drw,
1393 nofp->nof_rw_drw, nofp->nof_d_rw_drw, nofp->nof_flags,
1394 kauth_cred_getuid(nofp->nof_owner->noo_cred));
1395 }
1396 /* clear out all open info, just to be safe */
1397 nofp->nof_access = nofp->nof_deny = 0;
1398 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
1399 nofp->nof_r = nofp->nof_d_r = 0;
1400 nofp->nof_w = nofp->nof_d_w = 0;
1401 nofp->nof_rw = nofp->nof_d_rw = 0;
1402 nofp->nof_r_dw = nofp->nof_d_r_dw = 0;
1403 nofp->nof_w_dw = nofp->nof_d_w_dw = 0;
1404 nofp->nof_rw_dw = nofp->nof_d_rw_dw = 0;
1405 nofp->nof_r_drw = nofp->nof_d_r_drw = 0;
1406 nofp->nof_w_drw = nofp->nof_d_w_drw = 0;
1407 nofp->nof_rw_drw = nofp->nof_d_rw_drw = 0;
1408 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
1409 lck_mtx_unlock(&nofp->nof_lock);
1410 /* XXX we may potentially want to clean up idle/unused open file structures */
1411 }
1412 if (nofp->nof_flags & NFS_OPEN_FILE_LOST) {
1413 error = EIO;
1414 NP(np, "nfs_close: LOST%s, %d", !nofp->nof_opencnt ? " (last)" : "",
1415 kauth_cred_getuid(nofp->nof_owner->noo_cred));
1416 }
1417
1418 return error;
1419}
1420
1421
1422int
1423nfs3_getattr_rpc(
1424 nfsnode_t np,
1425 mount_t mp,
1426 u_char *fhp,
1427 size_t fhsize,
1428 int flags,
1429 vfs_context_t ctx,
1430 struct nfs_vattr *nvap,
1431 u_int64_t *xidp)
1432{
1433 struct nfsmount *nmp = mp ? VFSTONFS(mp) : NFSTONMP(np);
1434 int error = 0, status, nfsvers, rpcflags = 0;
1435 struct nfsm_chain nmreq, nmrep;
1436
1437 if (nfs_mount_gone(nmp)) {
1438 return ENXIO;
1439 }
1440 nfsvers = nmp->nm_vers;
1441
1442 if (flags & NGA_MONITOR) { /* vnode monitor requests should be soft */
1443 rpcflags = R_RECOVER;
1444 }
1445
1446 if (flags & NGA_SOFT) { /* Return ETIMEDOUT if server not responding */
1447 rpcflags |= R_SOFT;
1448 }
1449
1450 nfsm_chain_null(&nmreq);
1451 nfsm_chain_null(&nmrep);
1452
1453 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_FH(nfsvers));
1454 if (nfsvers != NFS_VER2) {
1455 nfsm_chain_add_32(error, &nmreq, fhsize);
1456 }
1457 nfsm_chain_add_opaque(error, &nmreq, fhp, fhsize);
1458 nfsm_chain_build_done(error, &nmreq);
1459 nfsmout_if(error);
1460 error = nfs_request2(np, mp, &nmreq, NFSPROC_GETATTR,
1461 vfs_context_thread(ctx), vfs_context_ucred(ctx),
1462 NULL, rpcflags, &nmrep, xidp, &status);
1463 if (!error) {
1464 error = status;
1465 }
1466 nfsmout_if(error);
1467 error = nfs_parsefattr(nmp, &nmrep, nfsvers, nvap);
1468nfsmout:
1469 nfsm_chain_cleanup(&nmreq);
1470 nfsm_chain_cleanup(&nmrep);
1471 return error;
1472}
1473
1474/*
1475 * nfs_refresh_fh will attempt to update the file handle for the node.
1476 *
1477 * It only does this for symbolic links and regular files that are not currently opened.
1478 *
1479 * On Success returns 0 and the nodes file handle is updated, or ESTALE on failure.
1480 */
1481int
1482nfs_refresh_fh(nfsnode_t np, vfs_context_t ctx)
1483{
1484 vnode_t dvp, vp = NFSTOV(np);
1485 nfsnode_t dnp;
1486 const char *v_name = vnode_getname(vp);
1487 char *name;
1488 int namelen, fhsize, refreshed;
1489 int error, wanted = 0;
1490 uint8_t *fhp;
1491 struct timespec ts = {.tv_sec = 2, .tv_nsec = 0};
1492
1493 NFS_VNOP_DBG("vnode is %d\n", vnode_vtype(vp));
1494
1495 dvp = vnode_parent(vp);
1496 if ((vnode_vtype(vp) != VREG && vnode_vtype(vp) != VLNK) ||
1497 v_name == NULL || *v_name == '\0' || dvp == NULL) {
1498 if (v_name != NULL) {
1499 vnode_putname(v_name);
1500 }
1501 return ESTALE;
1502 }
1503 dnp = VTONFS(dvp);
1504
1505 namelen = strlen(v_name);
1506 MALLOC(name, char *, namelen + 1, M_TEMP, M_WAITOK);
1507 if (name == NULL) {
1508 vnode_putname(v_name);
1509 return ESTALE;
1510 }
1511 bcopy(v_name, name, namelen + 1);
1512 NFS_VNOP_DBG("Trying to refresh %s : %s\n", v_name, name);
1513 vnode_putname(v_name);
1514
1515 /* Allocate the maximum size file handle */
1516 MALLOC(fhp, uint8_t *, NFS4_FHSIZE, M_TEMP, M_WAITOK);
1517 if (fhp == NULL) {
1518 FREE(name, M_TEMP);
1519 return ESTALE;
1520 }
1521
1522 if ((error = nfs_node_lock(np))) {
1523 FREE(name, M_TEMP);
1524 FREE(fhp, M_TEMP);
1525 return ESTALE;
1526 }
1527
1528 fhsize = np->n_fhsize;
1529 bcopy(np->n_fhp, fhp, fhsize);
1530 while (ISSET(np->n_flag, NREFRESH)) {
1531 SET(np->n_flag, NREFRESHWANT);
1532 NFS_VNOP_DBG("Waiting for refresh of %s\n", name);
1533 msleep(np, &np->n_lock, PZERO - 1, "nfsrefreshwant", &ts);
1534 if ((error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0))) {
1535 break;
1536 }
1537 }
1538 refreshed = error ? 0 : !NFS_CMPFH(np, fhp, fhsize);
1539 SET(np->n_flag, NREFRESH);
1540 nfs_node_unlock(np);
1541
1542 NFS_VNOP_DBG("error = %d, refreshed = %d\n", error, refreshed);
1543 if (error || refreshed) {
1544 goto nfsmout;
1545 }
1546
1547 /* Check that there are no open references for this file */
1548 lck_mtx_lock(&np->n_openlock);
1549 if (np->n_openrefcnt || !TAILQ_EMPTY(&np->n_opens) || !TAILQ_EMPTY(&np->n_lock_owners)) {
1550 int cnt = 0;
1551 struct nfs_open_file *ofp;
1552
1553 TAILQ_FOREACH(ofp, &np->n_opens, nof_link) {
1554 cnt += ofp->nof_opencnt;
1555 }
1556 if (cnt) {
1557 lck_mtx_unlock(&np->n_openlock);
1558 NFS_VNOP_DBG("Can not refresh file handle for %s with open state\n", name);
1559 NFS_VNOP_DBG("\topenrefcnt = %d, opens = %d lock_owners = %d\n",
1560 np->n_openrefcnt, cnt, !TAILQ_EMPTY(&np->n_lock_owners));
1561 error = ESTALE;
1562 goto nfsmout;
1563 }
1564 }
1565 lck_mtx_unlock(&np->n_openlock);
1566 /*
1567 * Since the FH is currently stale we should not be able to
1568 * establish any open state until the FH is refreshed.
1569 */
1570
1571 error = nfs_node_lock(np);
1572 nfsmout_if(error);
1573 /*
1574 * Symlinks should never need invalidations and are holding
1575 * the one and only nfsbuf in an uncached acquired state
1576 * trying to do a readlink. So we will hang if we invalidate
1577 * in that case. Only in in the VREG case do we need to
1578 * invalidate.
1579 */
1580 if (vnode_vtype(vp) == VREG) {
1581 np->n_flag &= ~NNEEDINVALIDATE;
1582 nfs_node_unlock(np);
1583 error = nfs_vinvalbuf(vp, V_IGNORE_WRITEERR, ctx, 1);
1584 if (error) {
1585 NFS_VNOP_DBG("nfs_vinvalbuf returned %d\n", error);
1586 }
1587 nfsmout_if(error);
1588 } else {
1589 nfs_node_unlock(np);
1590 }
1591
1592 NFS_VNOP_DBG("Looking up %s\n", name);
1593 error = nfs_lookitup(dnp, name, namelen, ctx, &np);
1594 if (error) {
1595 NFS_VNOP_DBG("nfs_lookitup returned %d\n", error);
1596 }
1597
1598nfsmout:
1599 nfs_node_lock_force(np);
1600 wanted = ISSET(np->n_flag, NREFRESHWANT);
1601 CLR(np->n_flag, NREFRESH | NREFRESHWANT);
1602 nfs_node_unlock(np);
1603 if (wanted) {
1604 wakeup(np);
1605 }
1606
1607 if (error == 0) {
1608 NFS_VNOP_DBG("%s refreshed file handle\n", name);
1609 }
1610
1611 FREE(name, M_TEMP);
1612 FREE(fhp, M_TEMP);
1613
1614 return error ? ESTALE : 0;
1615}
1616
1617int
1618nfs_getattr(nfsnode_t np, struct nfs_vattr *nvap, vfs_context_t ctx, int flags)
1619{
1620 int error;
1621
1622retry:
1623 error = nfs_getattr_internal(np, nvap, ctx, flags);
1624 if (error == ESTALE) {
1625 error = nfs_refresh_fh(np, ctx);
1626 if (!error) {
1627 goto retry;
1628 }
1629 }
1630 return error;
1631}
1632
1633int
1634nfs_getattr_internal(nfsnode_t np, struct nfs_vattr *nvap, vfs_context_t ctx, int flags)
1635{
1636 struct nfsmount *nmp;
1637 int error = 0, nfsvers, inprogset = 0, wanted = 0, avoidfloods;
1638 struct nfs_vattr nvattr;
1639 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
1640 u_int64_t xid;
1641
1642 FSDBG_TOP(513, np->n_size, np, np->n_vattr.nva_size, np->n_flag);
1643
1644 nmp = NFSTONMP(np);
1645
1646 if (nfs_mount_gone(nmp)) {
1647 return ENXIO;
1648 }
1649 nfsvers = nmp->nm_vers;
1650
1651 if (!nvap) {
1652 nvap = &nvattr;
1653 }
1654 NVATTR_INIT(nvap);
1655
1656 /* Update local times for special files. */
1657 if (np->n_flag & (NACC | NUPD)) {
1658 nfs_node_lock_force(np);
1659 np->n_flag |= NCHG;
1660 nfs_node_unlock(np);
1661 }
1662 /* Update size, if necessary */
1663 if (ISSET(np->n_flag, NUPDATESIZE)) {
1664 nfs_data_update_size(np, 0);
1665 }
1666
1667 error = nfs_node_lock(np);
1668 nfsmout_if(error);
1669 if (!(flags & (NGA_UNCACHED | NGA_MONITOR)) || ((nfsvers >= NFS_VER4) && (np->n_openflags & N_DELEG_MASK))) {
1670 /*
1671 * Use the cache or wait for any getattr in progress if:
1672 * - it's a cached request, or
1673 * - we have a delegation, or
1674 * - the server isn't responding
1675 */
1676 while (1) {
1677 error = nfs_getattrcache(np, nvap, flags);
1678 if (!error || (error != ENOENT)) {
1679 nfs_node_unlock(np);
1680 goto nfsmout;
1681 }
1682 error = 0;
1683 if (!ISSET(np->n_flag, NGETATTRINPROG)) {
1684 break;
1685 }
1686 if (flags & NGA_MONITOR) {
1687 /* no need to wait if a request is pending */
1688 error = EINPROGRESS;
1689 nfs_node_unlock(np);
1690 goto nfsmout;
1691 }
1692 SET(np->n_flag, NGETATTRWANT);
1693 msleep(np, &np->n_lock, PZERO - 1, "nfsgetattrwant", &ts);
1694 if ((error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0))) {
1695 nfs_node_unlock(np);
1696 goto nfsmout;
1697 }
1698 }
1699 SET(np->n_flag, NGETATTRINPROG);
1700 inprogset = 1;
1701 } else if (!ISSET(np->n_flag, NGETATTRINPROG)) {
1702 SET(np->n_flag, NGETATTRINPROG);
1703 inprogset = 1;
1704 } else if (flags & NGA_MONITOR) {
1705 /* no need to make a request if one is pending */
1706 error = EINPROGRESS;
1707 }
1708 nfs_node_unlock(np);
1709
1710 nmp = NFSTONMP(np);
1711 if (nfs_mount_gone(nmp)) {
1712 error = ENXIO;
1713 }
1714 if (error) {
1715 goto nfsmout;
1716 }
1717
1718 /*
1719 * Return cached attributes if they are valid,
1720 * if the server doesn't respond, and this is
1721 * some softened up style of mount.
1722 */
1723 if (NATTRVALID(np) && nfs_use_cache(nmp)) {
1724 flags |= NGA_SOFT;
1725 }
1726
1727 /*
1728 * We might want to try to get both the attributes and access info by
1729 * making an ACCESS call and seeing if it returns updated attributes.
1730 * But don't bother if we aren't caching access info or if the
1731 * attributes returned wouldn't be cached.
1732 */
1733 if (!(flags & NGA_ACL) && (nfsvers != NFS_VER2) && nfs_access_for_getattr && (nfs_access_cache_timeout > 0)) {
1734 if (nfs_attrcachetimeout(np) > 0) {
1735 /* OSAddAtomic(1, &nfsstats.accesscache_misses); */
1736 u_int32_t access = NFS_ACCESS_ALL;
1737 int rpcflags = 0;
1738
1739 /* Return cached attrs if server doesn't respond */
1740 if (flags & NGA_SOFT) {
1741 rpcflags |= R_SOFT;
1742 }
1743
1744 error = nmp->nm_funcs->nf_access_rpc(np, &access, rpcflags, ctx);
1745
1746 if (error == ETIMEDOUT) {
1747 goto returncached;
1748 }
1749
1750 if (error) {
1751 goto nfsmout;
1752 }
1753 nfs_node_lock_force(np);
1754 error = nfs_getattrcache(np, nvap, flags);
1755 nfs_node_unlock(np);
1756 if (!error || (error != ENOENT)) {
1757 goto nfsmout;
1758 }
1759 /* Well, that didn't work... just do a getattr... */
1760 error = 0;
1761 }
1762 }
1763
1764 avoidfloods = 0;
1765
1766tryagain:
1767 error = nmp->nm_funcs->nf_getattr_rpc(np, NULL, np->n_fhp, np->n_fhsize, flags, ctx, nvap, &xid);
1768 if (!error) {
1769 nfs_node_lock_force(np);
1770 error = nfs_loadattrcache(np, nvap, &xid, 0);
1771 nfs_node_unlock(np);
1772 }
1773
1774 /*
1775 * If the server didn't respond, return cached attributes.
1776 */
1777returncached:
1778 if ((flags & NGA_SOFT) && (error == ETIMEDOUT)) {
1779 nfs_node_lock_force(np);
1780 error = nfs_getattrcache(np, nvap, flags);
1781 if (!error || (error != ENOENT)) {
1782 nfs_node_unlock(np);
1783 goto nfsmout;
1784 }
1785 nfs_node_unlock(np);
1786 }
1787 nfsmout_if(error);
1788
1789 if (!xid) { /* out-of-order rpc - attributes were dropped */
1790 FSDBG(513, -1, np, np->n_xid >> 32, np->n_xid);
1791 if (avoidfloods++ < 20) {
1792 goto tryagain;
1793 }
1794 /* avoidfloods>1 is bizarre. at 20 pull the plug */
1795 /* just return the last attributes we got */
1796 }
1797nfsmout:
1798 nfs_node_lock_force(np);
1799 if (inprogset) {
1800 wanted = ISSET(np->n_flag, NGETATTRWANT);
1801 CLR(np->n_flag, (NGETATTRINPROG | NGETATTRWANT));
1802 }
1803 if (!error) {
1804 /* check if the node changed on us */
1805 vnode_t vp = NFSTOV(np);
1806 enum vtype vtype = vnode_vtype(vp);
1807 if ((vtype == VDIR) && NFS_CHANGED_NC(nfsvers, np, nvap)) {
1808 FSDBG(513, -1, np, 0, np);
1809 np->n_flag &= ~NNEGNCENTRIES;
1810 cache_purge(vp);
1811 np->n_ncgen++;
1812 NFS_CHANGED_UPDATE_NC(nfsvers, np, nvap);
1813 NFS_VNOP_DBG("Purge directory 0x%llx\n",
1814 (uint64_t)VM_KERNEL_ADDRPERM(vp));
1815 }
1816 if (NFS_CHANGED(nfsvers, np, nvap)) {
1817 FSDBG(513, -1, np, -1, np);
1818 if (vtype == VDIR) {
1819 NFS_VNOP_DBG("Invalidate directory 0x%llx\n",
1820 (uint64_t)VM_KERNEL_ADDRPERM(vp));
1821 nfs_invaldir(np);
1822 }
1823 nfs_node_unlock(np);
1824 if (wanted) {
1825 wakeup(np);
1826 }
1827 error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1);
1828 FSDBG(513, -1, np, -2, error);
1829 if (!error) {
1830 nfs_node_lock_force(np);
1831 NFS_CHANGED_UPDATE(nfsvers, np, nvap);
1832 nfs_node_unlock(np);
1833 }
1834 } else {
1835 nfs_node_unlock(np);
1836 if (wanted) {
1837 wakeup(np);
1838 }
1839 }
1840 } else {
1841 nfs_node_unlock(np);
1842 if (wanted) {
1843 wakeup(np);
1844 }
1845 }
1846
1847 if (nvap == &nvattr) {
1848 NVATTR_CLEANUP(nvap);
1849 } else if (!(flags & NGA_ACL)) {
1850 /* make sure we don't return an ACL if it wasn't asked for */
1851 NFS_BITMAP_CLR(nvap->nva_bitmap, NFS_FATTR_ACL);
1852 if (nvap->nva_acl) {
1853 kauth_acl_free(nvap->nva_acl);
1854 nvap->nva_acl = NULL;
1855 }
1856 }
1857 FSDBG_BOT(513, np->n_size, error, np->n_vattr.nva_size, np->n_flag);
1858 return error;
1859}
1860
1861
1862/*
1863 * NFS getattr call from vfs.
1864 */
1865
1866/*
1867 * The attributes we support over the wire.
1868 * We also get fsid but the vfs layer gets it out of the mount
1869 * structure after this calling us so there's no need to return it,
1870 * and Finder expects to call getattrlist just looking for the FSID
1871 * with out hanging on a non responsive server.
1872 */
1873#define NFS3_SUPPORTED_VATTRS \
1874 (VNODE_ATTR_va_rdev | \
1875 VNODE_ATTR_va_nlink | \
1876 VNODE_ATTR_va_data_size | \
1877 VNODE_ATTR_va_data_alloc | \
1878 VNODE_ATTR_va_uid | \
1879 VNODE_ATTR_va_gid | \
1880 VNODE_ATTR_va_mode | \
1881 VNODE_ATTR_va_modify_time | \
1882 VNODE_ATTR_va_change_time | \
1883 VNODE_ATTR_va_access_time | \
1884 VNODE_ATTR_va_fileid | \
1885 VNODE_ATTR_va_type)
1886
1887
1888int
1889nfs3_vnop_getattr(
1890 struct vnop_getattr_args /* {
1891 * struct vnodeop_desc *a_desc;
1892 * vnode_t a_vp;
1893 * struct vnode_attr *a_vap;
1894 * vfs_context_t a_context;
1895 * } */*ap)
1896{
1897 int error;
1898 nfsnode_t np;
1899 uint64_t supported_attrs;
1900 struct nfs_vattr nva;
1901 struct vnode_attr *vap = ap->a_vap;
1902 struct nfsmount *nmp;
1903 dev_t rdev;
1904
1905 nmp = VTONMP(ap->a_vp);
1906
1907 /*
1908 * Lets don't go over the wire if we don't support any of the attributes.
1909 * Just fall through at the VFS layer and let it cons up what it needs.
1910 */
1911 /* Return the io size no matter what, since we don't go over the wire for this */
1912 VATTR_RETURN(vap, va_iosize, nfs_iosize);
1913
1914 supported_attrs = NFS3_SUPPORTED_VATTRS;
1915
1916 if ((vap->va_active & supported_attrs) == 0) {
1917 return 0;
1918 }
1919
1920 if (VATTR_IS_ACTIVE(ap->a_vap, va_name)) {
1921 NFS_VNOP_DBG("Getting attrs for 0x%llx, vname is %s\n",
1922 (uint64_t)VM_KERNEL_ADDRPERM(ap->a_vp),
1923 ap->a_vp->v_name ? ap->a_vp->v_name : "empty");
1924 }
1925
1926 /*
1927 * We should not go over the wire if only fileid was requested and has ever been populated.
1928 */
1929 if ((vap->va_active & supported_attrs) == VNODE_ATTR_va_fileid) {
1930 np = VTONFS(ap->a_vp);
1931 if (np->n_attrstamp) {
1932 VATTR_RETURN(vap, va_fileid, np->n_vattr.nva_fileid);
1933 return 0;
1934 }
1935 }
1936
1937 error = nfs_getattr(VTONFS(ap->a_vp), &nva, ap->a_context, NGA_CACHED);
1938 if (error) {
1939 return error;
1940 }
1941
1942 /* copy nva to *a_vap */
1943 VATTR_RETURN(vap, va_type, nva.nva_type);
1944 VATTR_RETURN(vap, va_mode, nva.nva_mode);
1945 rdev = makedev(nva.nva_rawdev.specdata1, nva.nva_rawdev.specdata2);
1946 VATTR_RETURN(vap, va_rdev, rdev);
1947 VATTR_RETURN(vap, va_uid, nva.nva_uid);
1948 VATTR_RETURN(vap, va_gid, nva.nva_gid);
1949 VATTR_RETURN(vap, va_nlink, nva.nva_nlink);
1950 VATTR_RETURN(vap, va_fileid, nva.nva_fileid);
1951 VATTR_RETURN(vap, va_data_size, nva.nva_size);
1952 VATTR_RETURN(vap, va_data_alloc, nva.nva_bytes);
1953 vap->va_access_time.tv_sec = nva.nva_timesec[NFSTIME_ACCESS];
1954 vap->va_access_time.tv_nsec = nva.nva_timensec[NFSTIME_ACCESS];
1955 VATTR_SET_SUPPORTED(vap, va_access_time);
1956 vap->va_modify_time.tv_sec = nva.nva_timesec[NFSTIME_MODIFY];
1957 vap->va_modify_time.tv_nsec = nva.nva_timensec[NFSTIME_MODIFY];
1958 VATTR_SET_SUPPORTED(vap, va_modify_time);
1959 vap->va_change_time.tv_sec = nva.nva_timesec[NFSTIME_CHANGE];
1960 vap->va_change_time.tv_nsec = nva.nva_timensec[NFSTIME_CHANGE];
1961 VATTR_SET_SUPPORTED(vap, va_change_time);
1962
1963
1964 // VATTR_RETURN(vap, va_encoding, 0xffff /* kTextEncodingUnknown */);
1965 return error;
1966}
1967
1968/*
1969 * NFS setattr call.
1970 */
1971int
1972nfs_vnop_setattr(
1973 struct vnop_setattr_args /* {
1974 * struct vnodeop_desc *a_desc;
1975 * vnode_t a_vp;
1976 * struct vnode_attr *a_vap;
1977 * vfs_context_t a_context;
1978 * } */*ap)
1979{
1980 vfs_context_t ctx = ap->a_context;
1981 vnode_t vp = ap->a_vp;
1982 nfsnode_t np = VTONFS(vp);
1983 struct nfsmount *nmp;
1984 struct vnode_attr *vap = ap->a_vap;
1985 int error = 0;
1986 int biosize, nfsvers, namedattrs;
1987 u_quad_t origsize, vapsize;
1988 struct nfs_dulookup dul;
1989 nfsnode_t dnp = NULL;
1990 int dul_in_progress = 0;
1991 vnode_t dvp = NULL;
1992 const char *vname = NULL;
1993#if CONFIG_NFS4
1994 struct nfs_open_owner *noop = NULL;
1995 struct nfs_open_file *nofp = NULL;
1996#endif
1997 nmp = VTONMP(vp);
1998 if (nfs_mount_gone(nmp)) {
1999 return ENXIO;
2000 }
2001 nfsvers = nmp->nm_vers;
2002 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
2003 biosize = nmp->nm_biosize;
2004
2005 /* Disallow write attempts if the filesystem is mounted read-only. */
2006 if (vnode_vfsisrdonly(vp)) {
2007 return EROFS;
2008 }
2009
2010 origsize = np->n_size;
2011 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
2012 switch (vnode_vtype(vp)) {
2013 case VDIR:
2014 return EISDIR;
2015 case VCHR:
2016 case VBLK:
2017 case VSOCK:
2018 case VFIFO:
2019 if (!VATTR_IS_ACTIVE(vap, va_modify_time) &&
2020 !VATTR_IS_ACTIVE(vap, va_access_time) &&
2021 !VATTR_IS_ACTIVE(vap, va_mode) &&
2022 !VATTR_IS_ACTIVE(vap, va_uid) &&
2023 !VATTR_IS_ACTIVE(vap, va_gid)) {
2024 return 0;
2025 }
2026 VATTR_CLEAR_ACTIVE(vap, va_data_size);
2027 break;
2028 default:
2029 /*
2030 * Disallow write attempts if the filesystem is
2031 * mounted read-only.
2032 */
2033 if (vnode_vfsisrdonly(vp)) {
2034 return EROFS;
2035 }
2036 FSDBG_TOP(512, np->n_size, vap->va_data_size,
2037 np->n_vattr.nva_size, np->n_flag);
2038 /* clear NNEEDINVALIDATE, if set */
2039 if ((error = nfs_node_lock(np))) {
2040 return error;
2041 }
2042 if (np->n_flag & NNEEDINVALIDATE) {
2043 np->n_flag &= ~NNEEDINVALIDATE;
2044 }
2045 nfs_node_unlock(np);
2046 /* flush everything */
2047 error = nfs_vinvalbuf(vp, (vap->va_data_size ? V_SAVE : 0), ctx, 1);
2048 if (error) {
2049 NP(np, "nfs_setattr: nfs_vinvalbuf %d", error);
2050 FSDBG_BOT(512, np->n_size, vap->va_data_size, np->n_vattr.nva_size, -1);
2051 return error;
2052 }
2053#if CONFIG_NFS4
2054 if (nfsvers >= NFS_VER4) {
2055 /* setting file size requires having the file open for write access */
2056 if (np->n_flag & NREVOKE) {
2057 return EIO;
2058 }
2059 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
2060 if (!noop) {
2061 return ENOMEM;
2062 }
2063restart:
2064 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
2065 if (error) {
2066 return error;
2067 }
2068 if (np->n_flag & NREVOKE) {
2069 nfs_mount_state_in_use_end(nmp, 0);
2070 return EIO;
2071 }
2072 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
2073 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
2074 error = EIO;
2075 }
2076 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
2077 nfs_mount_state_in_use_end(nmp, 0);
2078 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
2079 nofp = NULL;
2080 if (!error) {
2081 goto restart;
2082 }
2083 }
2084 if (!error) {
2085 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
2086 }
2087 if (error) {
2088 nfs_open_owner_rele(noop);
2089 return error;
2090 }
2091 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_WRITE)) {
2092 /* we don't have the file open for write access, so open it */
2093 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE, ctx);
2094 if (!error) {
2095 nofp->nof_flags |= NFS_OPEN_FILE_SETATTR;
2096 }
2097 if (nfs_mount_state_error_should_restart(error)) {
2098 nfs_open_file_clear_busy(nofp);
2099 nofp = NULL;
2100 if (nfs_mount_state_in_use_end(nmp, error)) {
2101 goto restart;
2102 }
2103 }
2104 }
2105 }
2106#endif
2107 nfs_data_lock(np, NFS_DATA_LOCK_EXCLUSIVE);
2108 if (np->n_size > vap->va_data_size) { /* shrinking? */
2109 daddr64_t obn, bn;
2110 int neweofoff, mustwrite;
2111 struct nfsbuf *bp;
2112
2113 obn = (np->n_size - 1) / biosize;
2114 bn = vap->va_data_size / biosize;
2115 for (; obn >= bn; obn--) {
2116 if (!nfs_buf_is_incore(np, obn)) {
2117 continue;
2118 }
2119 error = nfs_buf_get(np, obn, biosize, NULL, NBLK_READ, &bp);
2120 if (error) {
2121 continue;
2122 }
2123 if (obn != bn) {
2124 FSDBG(512, bp, bp->nb_flags, 0, obn);
2125 SET(bp->nb_flags, NB_INVAL);
2126 nfs_buf_release(bp, 1);
2127 continue;
2128 }
2129 mustwrite = 0;
2130 neweofoff = vap->va_data_size - NBOFF(bp);
2131 /* check for any dirty data before the new EOF */
2132 if ((bp->nb_dirtyend > 0) && (bp->nb_dirtyoff < neweofoff)) {
2133 /* clip dirty range to EOF */
2134 if (bp->nb_dirtyend > neweofoff) {
2135 bp->nb_dirtyend = neweofoff;
2136 if (bp->nb_dirtyoff >= bp->nb_dirtyend) {
2137 bp->nb_dirtyoff = bp->nb_dirtyend = 0;
2138 }
2139 }
2140 if ((bp->nb_dirtyend > 0) && (bp->nb_dirtyoff < neweofoff)) {
2141 mustwrite++;
2142 }
2143 }
2144 bp->nb_dirty &= (1 << round_page_32(neweofoff) / PAGE_SIZE) - 1;
2145 if (bp->nb_dirty) {
2146 mustwrite++;
2147 }
2148 if (!mustwrite) {
2149 FSDBG(512, bp, bp->nb_flags, 0, obn);
2150 SET(bp->nb_flags, NB_INVAL);
2151 nfs_buf_release(bp, 1);
2152 continue;
2153 }
2154 /* gotta write out dirty data before invalidating */
2155 /* (NB_STABLE indicates that data writes should be FILESYNC) */
2156 /* (NB_NOCACHE indicates buffer should be discarded) */
2157 CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL | NB_ASYNC | NB_READ));
2158 SET(bp->nb_flags, NB_STABLE | NB_NOCACHE);
2159 if (!IS_VALID_CRED(bp->nb_wcred)) {
2160 kauth_cred_t cred = vfs_context_ucred(ctx);
2161 kauth_cred_ref(cred);
2162 bp->nb_wcred = cred;
2163 }
2164 error = nfs_buf_write(bp);
2165 // Note: bp has been released
2166 if (error) {
2167 FSDBG(512, bp, 0xd00dee, 0xbad, error);
2168 nfs_node_lock_force(np);
2169 np->n_error = error;
2170 np->n_flag |= NWRITEERR;
2171 /*
2172 * There was a write error and we need to
2173 * invalidate attrs and flush buffers in
2174 * order to sync up with the server.
2175 * (if this write was extending the file,
2176 * we may no longer know the correct size)
2177 */
2178 NATTRINVALIDATE(np);
2179 nfs_node_unlock(np);
2180 nfs_data_unlock(np);
2181 nfs_vinvalbuf(vp, V_SAVE | V_IGNORE_WRITEERR, ctx, 1);
2182 nfs_data_lock(np, NFS_DATA_LOCK_EXCLUSIVE);
2183 error = 0;
2184 }
2185 }
2186 }
2187 if (vap->va_data_size != np->n_size) {
2188 ubc_setsize(vp, (off_t)vap->va_data_size); /* XXX error? */
2189 }
2190 origsize = np->n_size;
2191 np->n_size = np->n_vattr.nva_size = vap->va_data_size;
2192 nfs_node_lock_force(np);
2193 CLR(np->n_flag, NUPDATESIZE);
2194 nfs_node_unlock(np);
2195 FSDBG(512, np, np->n_size, np->n_vattr.nva_size, 0xf00d0001);
2196 }
2197 } else if (VATTR_IS_ACTIVE(vap, va_modify_time) ||
2198 VATTR_IS_ACTIVE(vap, va_access_time) ||
2199 (vap->va_vaflags & VA_UTIMES_NULL)) {
2200 if ((error = nfs_node_lock(np))) {
2201 return error;
2202 }
2203 if ((np->n_flag & NMODIFIED) && (vnode_vtype(vp) == VREG)) {
2204 nfs_node_unlock(np);
2205 error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1);
2206 if (error == EINTR) {
2207 return error;
2208 }
2209 } else {
2210 nfs_node_unlock(np);
2211 }
2212 }
2213 if ((VATTR_IS_ACTIVE(vap, va_mode) || VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid) ||
2214 VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid)) &&
2215 !(error = nfs_node_lock(np))) {
2216 NACCESSINVALIDATE(np);
2217 nfs_node_unlock(np);
2218 if (!namedattrs) {
2219 dvp = vnode_getparent(vp);
2220 vname = vnode_getname(vp);
2221 dnp = (dvp && vname) ? VTONFS(dvp) : NULL;
2222 if (dnp) {
2223 if (nfs_node_set_busy(dnp, vfs_context_thread(ctx))) {
2224 vnode_put(dvp);
2225 vnode_putname(vname);
2226 } else {
2227 nfs_dulookup_init(&dul, dnp, vname, strlen(vname), ctx);
2228 nfs_dulookup_start(&dul, dnp, ctx);
2229 dul_in_progress = 1;
2230 }
2231 } else {
2232 if (dvp) {
2233 vnode_put(dvp);
2234 }
2235 if (vname) {
2236 vnode_putname(vname);
2237 }
2238 }
2239 }
2240 }
2241
2242 if (!error) {
2243 error = nmp->nm_funcs->nf_setattr_rpc(np, vap, ctx);
2244 }
2245
2246 if (dul_in_progress) {
2247 nfs_dulookup_finish(&dul, dnp, ctx);
2248 nfs_node_clear_busy(dnp);
2249 vnode_put(dvp);
2250 vnode_putname(vname);
2251 }
2252
2253 FSDBG_BOT(512, np->n_size, vap->va_data_size, np->n_vattr.nva_size, error);
2254 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
2255 if (error && (origsize != np->n_size) &&
2256 ((nfsvers < NFS_VER4) || !nfs_mount_state_error_should_restart(error))) {
2257 /* make every effort to resync file size w/ server... */
2258 /* (don't bother if we'll be restarting the operation) */
2259 int err; /* preserve "error" for return */
2260 np->n_size = np->n_vattr.nva_size = origsize;
2261 nfs_node_lock_force(np);
2262 CLR(np->n_flag, NUPDATESIZE);
2263 nfs_node_unlock(np);
2264 FSDBG(512, np, np->n_size, np->n_vattr.nva_size, 0xf00d0002);
2265 ubc_setsize(vp, (off_t)np->n_size); /* XXX check error */
2266 vapsize = vap->va_data_size;
2267 vap->va_data_size = origsize;
2268 err = nmp->nm_funcs->nf_setattr_rpc(np, vap, ctx);
2269 if (err) {
2270 NP(np, "nfs_vnop_setattr: nfs%d_setattr_rpc %d %d", nfsvers, error, err);
2271 }
2272 vap->va_data_size = vapsize;
2273 }
2274 nfs_node_lock_force(np);
2275 /*
2276 * The size was just set. If the size is already marked for update, don't
2277 * trust the newsize (it may have been set while the setattr was in progress).
2278 * Clear the update flag and make sure we fetch new attributes so we are sure
2279 * we have the latest size.
2280 */
2281 if (ISSET(np->n_flag, NUPDATESIZE)) {
2282 CLR(np->n_flag, NUPDATESIZE);
2283 NATTRINVALIDATE(np);
2284 nfs_node_unlock(np);
2285 nfs_getattr(np, NULL, ctx, NGA_UNCACHED);
2286 } else {
2287 nfs_node_unlock(np);
2288 }
2289 nfs_data_unlock(np);
2290#if CONFIG_NFS4
2291 if (nfsvers >= NFS_VER4) {
2292 if (nofp) {
2293 /* don't close our setattr open if we'll be restarting... */
2294 if (!nfs_mount_state_error_should_restart(error) &&
2295 (nofp->nof_flags & NFS_OPEN_FILE_SETATTR)) {
2296 int err = nfs_close(np, nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE, ctx);
2297 if (err) {
2298 NP(np, "nfs_vnop_setattr: close error: %d", err);
2299 }
2300 nofp->nof_flags &= ~NFS_OPEN_FILE_SETATTR;
2301 }
2302 nfs_open_file_clear_busy(nofp);
2303 nofp = NULL;
2304 }
2305 if (nfs_mount_state_in_use_end(nmp, error)) {
2306 goto restart;
2307 }
2308 nfs_open_owner_rele(noop);
2309 }
2310#endif
2311 }
2312 return error;
2313}
2314
2315/*
2316 * Do an NFS setattr RPC.
2317 */
2318int
2319nfs3_setattr_rpc(
2320 nfsnode_t np,
2321 struct vnode_attr *vap,
2322 vfs_context_t ctx)
2323{
2324 struct nfsmount *nmp = NFSTONMP(np);
2325 int error = 0, lockerror = ENOENT, status, wccpostattr = 0, nfsvers;
2326 u_int64_t xid, nextxid;
2327 struct nfsm_chain nmreq, nmrep;
2328
2329 if (nfs_mount_gone(nmp)) {
2330 return ENXIO;
2331 }
2332 nfsvers = nmp->nm_vers;
2333
2334 VATTR_SET_SUPPORTED(vap, va_mode);
2335 VATTR_SET_SUPPORTED(vap, va_uid);
2336 VATTR_SET_SUPPORTED(vap, va_gid);
2337 VATTR_SET_SUPPORTED(vap, va_data_size);
2338 VATTR_SET_SUPPORTED(vap, va_access_time);
2339 VATTR_SET_SUPPORTED(vap, va_modify_time);
2340
2341
2342 if (VATTR_IS_ACTIVE(vap, va_flags)
2343 ) {
2344 if (vap->va_flags) { /* we don't support setting flags */
2345 if (vap->va_active & ~VNODE_ATTR_va_flags) {
2346 return EINVAL; /* return EINVAL if other attributes also set */
2347 } else {
2348 return ENOTSUP; /* return ENOTSUP for chflags(2) */
2349 }
2350 }
2351 /* no flags set, so we'll just ignore it */
2352 if (!(vap->va_active & ~VNODE_ATTR_va_flags)) {
2353 return 0; /* no (other) attributes to set, so nothing to do */
2354 }
2355 }
2356
2357 nfsm_chain_null(&nmreq);
2358 nfsm_chain_null(&nmrep);
2359
2360 nfsm_chain_build_alloc_init(error, &nmreq,
2361 NFSX_FH(nfsvers) + NFSX_SATTR(nfsvers));
2362 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
2363 if (nfsvers == NFS_VER3) {
2364 if (VATTR_IS_ACTIVE(vap, va_mode)) {
2365 nfsm_chain_add_32(error, &nmreq, TRUE);
2366 nfsm_chain_add_32(error, &nmreq, vap->va_mode);
2367 } else {
2368 nfsm_chain_add_32(error, &nmreq, FALSE);
2369 }
2370 if (VATTR_IS_ACTIVE(vap, va_uid)) {
2371 nfsm_chain_add_32(error, &nmreq, TRUE);
2372 nfsm_chain_add_32(error, &nmreq, vap->va_uid);
2373 } else {
2374 nfsm_chain_add_32(error, &nmreq, FALSE);
2375 }
2376 if (VATTR_IS_ACTIVE(vap, va_gid)) {
2377 nfsm_chain_add_32(error, &nmreq, TRUE);
2378 nfsm_chain_add_32(error, &nmreq, vap->va_gid);
2379 } else {
2380 nfsm_chain_add_32(error, &nmreq, FALSE);
2381 }
2382 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
2383 nfsm_chain_add_32(error, &nmreq, TRUE);
2384 nfsm_chain_add_64(error, &nmreq, vap->va_data_size);
2385 } else {
2386 nfsm_chain_add_32(error, &nmreq, FALSE);
2387 }
2388 if (vap->va_vaflags & VA_UTIMES_NULL) {
2389 nfsm_chain_add_32(error, &nmreq, NFS_TIME_SET_TO_SERVER);
2390 nfsm_chain_add_32(error, &nmreq, NFS_TIME_SET_TO_SERVER);
2391 } else {
2392 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
2393 nfsm_chain_add_32(error, &nmreq, NFS_TIME_SET_TO_CLIENT);
2394 nfsm_chain_add_32(error, &nmreq, vap->va_access_time.tv_sec);
2395 nfsm_chain_add_32(error, &nmreq, vap->va_access_time.tv_nsec);
2396 } else {
2397 nfsm_chain_add_32(error, &nmreq, NFS_TIME_DONT_CHANGE);
2398 }
2399 if (VATTR_IS_ACTIVE(vap, va_modify_time)) {
2400 nfsm_chain_add_32(error, &nmreq, NFS_TIME_SET_TO_CLIENT);
2401 nfsm_chain_add_32(error, &nmreq, vap->va_modify_time.tv_sec);
2402 nfsm_chain_add_32(error, &nmreq, vap->va_modify_time.tv_nsec);
2403 } else {
2404 nfsm_chain_add_32(error, &nmreq, NFS_TIME_DONT_CHANGE);
2405 }
2406 }
2407 nfsm_chain_add_32(error, &nmreq, FALSE);
2408 } else {
2409 nfsm_chain_add_32(error, &nmreq, VATTR_IS_ACTIVE(vap, va_mode) ?
2410 vtonfsv2_mode(vnode_vtype(NFSTOV(np)), vap->va_mode) : -1);
2411 nfsm_chain_add_32(error, &nmreq, VATTR_IS_ACTIVE(vap, va_uid) ?
2412 vap->va_uid : (uint32_t)-1);
2413 nfsm_chain_add_32(error, &nmreq, VATTR_IS_ACTIVE(vap, va_gid) ?
2414 vap->va_gid : (uint32_t)-1);
2415 nfsm_chain_add_32(error, &nmreq, VATTR_IS_ACTIVE(vap, va_data_size) ?
2416 vap->va_data_size : (uint32_t)-1);
2417 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
2418 nfsm_chain_add_32(error, &nmreq, vap->va_access_time.tv_sec);
2419 nfsm_chain_add_32(error, &nmreq, (vap->va_access_time.tv_nsec != -1) ?
2420 ((uint32_t)vap->va_access_time.tv_nsec / 1000) : 0xffffffff);
2421 } else {
2422 nfsm_chain_add_32(error, &nmreq, -1);
2423 nfsm_chain_add_32(error, &nmreq, -1);
2424 }
2425 if (VATTR_IS_ACTIVE(vap, va_modify_time)) {
2426 nfsm_chain_add_32(error, &nmreq, vap->va_modify_time.tv_sec);
2427 nfsm_chain_add_32(error, &nmreq, (vap->va_modify_time.tv_nsec != -1) ?
2428 ((uint32_t)vap->va_modify_time.tv_nsec / 1000) : 0xffffffff);
2429 } else {
2430 nfsm_chain_add_32(error, &nmreq, -1);
2431 nfsm_chain_add_32(error, &nmreq, -1);
2432 }
2433 }
2434 nfsm_chain_build_done(error, &nmreq);
2435 nfsmout_if(error);
2436 error = nfs_request(np, NULL, &nmreq, NFSPROC_SETATTR, ctx, NULL, &nmrep, &xid, &status);
2437 if ((lockerror = nfs_node_lock(np))) {
2438 error = lockerror;
2439 }
2440 if (nfsvers == NFS_VER3) {
2441 struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 };
2442 nfsm_chain_get_wcc_data(error, &nmrep, np, &premtime, &wccpostattr, &xid);
2443 nfsmout_if(error);
2444 /* if file hadn't changed, update cached mtime */
2445 if (nfstimespeccmp(&np->n_mtime, &premtime, ==)) {
2446 NFS_CHANGED_UPDATE(nfsvers, np, &np->n_vattr);
2447 }
2448 /* if directory hadn't changed, update namecache mtime */
2449 if ((vnode_vtype(NFSTOV(np)) == VDIR) &&
2450 nfstimespeccmp(&np->n_ncmtime, &premtime, ==)) {
2451 NFS_CHANGED_UPDATE_NC(nfsvers, np, &np->n_vattr);
2452 }
2453 if (!wccpostattr) {
2454 NATTRINVALIDATE(np);
2455 }
2456 error = status;
2457 } else {
2458 if (!error) {
2459 error = status;
2460 }
2461 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
2462 }
2463 /*
2464 * We just changed the attributes and we want to make sure that we
2465 * see the latest attributes. Get the next XID. If it's not the
2466 * next XID after the SETATTR XID, then it's possible that another
2467 * RPC was in flight at the same time and it might put stale attributes
2468 * in the cache. In that case, we invalidate the attributes and set
2469 * the attribute cache XID to guarantee that newer attributes will
2470 * get loaded next.
2471 */
2472 nextxid = 0;
2473 nfs_get_xid(&nextxid);
2474 if (nextxid != (xid + 1)) {
2475 np->n_xid = nextxid;
2476 NATTRINVALIDATE(np);
2477 }
2478nfsmout:
2479 if (!lockerror) {
2480 nfs_node_unlock(np);
2481 }
2482 nfsm_chain_cleanup(&nmreq);
2483 nfsm_chain_cleanup(&nmrep);
2484 return error;
2485}
2486
2487/*
2488 * NFS lookup call, one step at a time...
2489 * First look in cache
2490 * If not found, unlock the directory nfsnode and do the RPC
2491 */
2492int
2493nfs_vnop_lookup(
2494 struct vnop_lookup_args /* {
2495 * struct vnodeop_desc *a_desc;
2496 * vnode_t a_dvp;
2497 * vnode_t *a_vpp;
2498 * struct componentname *a_cnp;
2499 * vfs_context_t a_context;
2500 * } */*ap)
2501{
2502 vfs_context_t ctx = ap->a_context;
2503 struct componentname *cnp = ap->a_cnp;
2504 vnode_t dvp = ap->a_dvp;
2505 vnode_t *vpp = ap->a_vpp;
2506 int flags = cnp->cn_flags;
2507 vnode_t newvp;
2508 nfsnode_t dnp, np;
2509 struct nfsmount *nmp;
2510 mount_t mp;
2511 int nfsvers, error, busyerror = ENOENT, isdot, isdotdot, negnamecache;
2512 u_int64_t xid;
2513 struct nfs_vattr nvattr;
2514 int ngflags;
2515 struct vnop_access_args naa;
2516 fhandle_t fh;
2517 struct nfsreq rq, *req = &rq;
2518
2519 *vpp = NULLVP;
2520
2521 dnp = VTONFS(dvp);
2522 NVATTR_INIT(&nvattr);
2523
2524 mp = vnode_mount(dvp);
2525 nmp = VFSTONFS(mp);
2526 if (nfs_mount_gone(nmp)) {
2527 error = ENXIO;
2528 goto error_return;
2529 }
2530 nfsvers = nmp->nm_vers;
2531 negnamecache = !NMFLAG(nmp, NONEGNAMECACHE);
2532
2533 if ((error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx)))) {
2534 goto error_return;
2535 }
2536 /* nfs_getattr() will check changed and purge caches */
2537 if ((error = nfs_getattr(dnp, NULL, ctx, NGA_CACHED))) {
2538 goto error_return;
2539 }
2540
2541 error = cache_lookup(dvp, vpp, cnp);
2542 switch (error) {
2543 case ENOENT:
2544 /* negative cache entry */
2545 goto error_return;
2546 case 0:
2547 /* cache miss */
2548 if ((nfsvers > NFS_VER2) && NMFLAG(nmp, RDIRPLUS)) {
2549 /* if rdirplus, try dir buf cache lookup */
2550 error = nfs_dir_buf_cache_lookup(dnp, &np, cnp, ctx, 0);
2551 if (!error && np) {
2552 /* dir buf cache hit */
2553 *vpp = NFSTOV(np);
2554 error = -1;
2555 }
2556 }
2557 if (error != -1) { /* cache miss */
2558 break;
2559 }
2560 /* FALLTHROUGH */
2561 case -1:
2562 /* cache hit, not really an error */
2563 OSAddAtomic64(1, &nfsstats.lookupcache_hits);
2564
2565 nfs_node_clear_busy(dnp);
2566 busyerror = ENOENT;
2567
2568 /* check for directory access */
2569 naa.a_desc = &vnop_access_desc;
2570 naa.a_vp = dvp;
2571 naa.a_action = KAUTH_VNODE_SEARCH;
2572 naa.a_context = ctx;
2573
2574 /* compute actual success/failure based on accessibility */
2575 error = nfs_vnop_access(&naa);
2576 /* FALLTHROUGH */
2577 default:
2578 /* unexpected error from cache_lookup */
2579 goto error_return;
2580 }
2581
2582 /* skip lookup, if we know who we are: "." or ".." */
2583 isdot = isdotdot = 0;
2584 if (cnp->cn_nameptr[0] == '.') {
2585 if (cnp->cn_namelen == 1) {
2586 isdot = 1;
2587 }
2588 if ((cnp->cn_namelen == 2) && (cnp->cn_nameptr[1] == '.')) {
2589 isdotdot = 1;
2590 }
2591 }
2592 if (isdotdot || isdot) {
2593 fh.fh_len = 0;
2594 goto found;
2595 }
2596#if CONFIG_NFS4
2597 if ((nfsvers >= NFS_VER4) && (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER)) {
2598 /* we should never be looking things up in a trigger directory, return nothing */
2599 error = ENOENT;
2600 goto error_return;
2601 }
2602#endif
2603
2604 /* do we know this name is too long? */
2605 nmp = VTONMP(dvp);
2606 if (nfs_mount_gone(nmp)) {
2607 error = ENXIO;
2608 goto error_return;
2609 }
2610 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXNAME) &&
2611 (cnp->cn_namelen > (int)nmp->nm_fsattr.nfsa_maxname)) {
2612 error = ENAMETOOLONG;
2613 goto error_return;
2614 }
2615
2616 error = 0;
2617 newvp = NULLVP;
2618
2619 OSAddAtomic64(1, &nfsstats.lookupcache_misses);
2620
2621 error = nmp->nm_funcs->nf_lookup_rpc_async(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &req);
2622 nfsmout_if(error);
2623 error = nmp->nm_funcs->nf_lookup_rpc_async_finish(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, req, &xid, &fh, &nvattr);
2624 nfsmout_if(error);
2625
2626 /* is the file handle the same as this directory's file handle? */
2627 isdot = NFS_CMPFH(dnp, fh.fh_data, fh.fh_len);
2628
2629found:
2630 if (flags & ISLASTCN) {
2631 switch (cnp->cn_nameiop) {
2632 case DELETE:
2633 cnp->cn_flags &= ~MAKEENTRY;
2634 break;
2635 case RENAME:
2636 cnp->cn_flags &= ~MAKEENTRY;
2637 if (isdot) {
2638 error = EISDIR;
2639 goto error_return;
2640 }
2641 break;
2642 }
2643 }
2644
2645 if (isdotdot) {
2646 newvp = vnode_getparent(dvp);
2647 if (!newvp) {
2648 error = ENOENT;
2649 goto error_return;
2650 }
2651 } else if (isdot) {
2652 error = vnode_get(dvp);
2653 if (error) {
2654 goto error_return;
2655 }
2656 newvp = dvp;
2657 nfs_node_lock_force(dnp);
2658 if (fh.fh_len && (dnp->n_xid <= xid)) {
2659 nfs_loadattrcache(dnp, &nvattr, &xid, 0);
2660 }
2661 nfs_node_unlock(dnp);
2662 } else {
2663 ngflags = (cnp->cn_flags & MAKEENTRY) ? NG_MAKEENTRY : 0;
2664 error = nfs_nget(mp, dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, ngflags, &np);
2665 if (error) {
2666 goto error_return;
2667 }
2668 newvp = NFSTOV(np);
2669 nfs_node_unlock(np);
2670 }
2671 *vpp = newvp;
2672
2673nfsmout:
2674 if (error) {
2675 if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME)) &&
2676 (flags & ISLASTCN) && (error == ENOENT)) {
2677 if (vnode_mount(dvp) && vnode_vfsisrdonly(dvp)) {
2678 error = EROFS;
2679 } else {
2680 error = EJUSTRETURN;
2681 }
2682 }
2683 }
2684 if ((error == ENOENT) && (cnp->cn_flags & MAKEENTRY) &&
2685 (cnp->cn_nameiop != CREATE) && negnamecache) {
2686 /* add a negative entry in the name cache */
2687 nfs_node_lock_force(dnp);
2688 cache_enter(dvp, NULL, cnp);
2689 dnp->n_flag |= NNEGNCENTRIES;
2690 nfs_node_unlock(dnp);
2691 }
2692error_return:
2693 NVATTR_CLEANUP(&nvattr);
2694 if (!busyerror) {
2695 nfs_node_clear_busy(dnp);
2696 }
2697 if (error && *vpp) {
2698 vnode_put(*vpp);
2699 *vpp = NULLVP;
2700 }
2701 return error;
2702}
2703
2704int nfs_readlink_nocache = DEFAULT_READLINK_NOCACHE;
2705
2706/*
2707 * NFS readlink call
2708 */
2709int
2710nfs_vnop_readlink(
2711 struct vnop_readlink_args /* {
2712 * struct vnodeop_desc *a_desc;
2713 * vnode_t a_vp;
2714 * struct uio *a_uio;
2715 * vfs_context_t a_context;
2716 * } */*ap)
2717{
2718 vfs_context_t ctx = ap->a_context;
2719 nfsnode_t np = VTONFS(ap->a_vp);
2720 struct nfsmount *nmp;
2721 int error = 0, nfsvers;
2722 uint32_t buflen;
2723 uio_t uio = ap->a_uio;
2724 struct nfsbuf *bp = NULL;
2725 struct timespec ts;
2726 int timeo;
2727
2728 if (vnode_vtype(ap->a_vp) != VLNK) {
2729 return EPERM;
2730 }
2731
2732 if (uio_resid(uio) == 0) {
2733 return 0;
2734 }
2735 if (uio_offset(uio) < 0) {
2736 return EINVAL;
2737 }
2738
2739 nmp = VTONMP(ap->a_vp);
2740 if (nfs_mount_gone(nmp)) {
2741 return ENXIO;
2742 }
2743 nfsvers = nmp->nm_vers;
2744
2745
2746 /* nfs_getattr() will check changed and purge caches */
2747 if ((error = nfs_getattr(np, NULL, ctx, nfs_readlink_nocache ? NGA_UNCACHED : NGA_CACHED))) {
2748 FSDBG(531, np, 0xd1e0001, 0, error);
2749 return error;
2750 }
2751
2752 if (nfs_readlink_nocache) {
2753 timeo = nfs_attrcachetimeout(np);
2754 nanouptime(&ts);
2755 }
2756
2757retry:
2758 OSAddAtomic64(1, &nfsstats.biocache_readlinks);
2759 error = nfs_buf_get(np, 0, NFS_MAXPATHLEN, vfs_context_thread(ctx), NBLK_META, &bp);
2760 if (error) {
2761 FSDBG(531, np, 0xd1e0002, 0, error);
2762 return error;
2763 }
2764
2765 if (nfs_readlink_nocache) {
2766 NFS_VNOP_DBG("timeo = %d ts.tv_sec = %ld need refresh = %d cached = %d\n", timeo, ts.tv_sec,
2767 (np->n_rltim.tv_sec + timeo) < ts.tv_sec || nfs_readlink_nocache > 1,
2768 ISSET(bp->nb_flags, NB_CACHE) == NB_CACHE);
2769 /* n_rltim is synchronized by the associated nfs buf */
2770 if (ISSET(bp->nb_flags, NB_CACHE) && ((nfs_readlink_nocache > 1) || ((np->n_rltim.tv_sec + timeo) < ts.tv_sec))) {
2771 SET(bp->nb_flags, NB_INVAL);
2772 nfs_buf_release(bp, 0);
2773 goto retry;
2774 }
2775 }
2776 if (!ISSET(bp->nb_flags, NB_CACHE)) {
2777readagain:
2778 OSAddAtomic64(1, &nfsstats.readlink_bios);
2779 buflen = bp->nb_bufsize;
2780 error = nmp->nm_funcs->nf_readlink_rpc(np, bp->nb_data, &buflen, ctx);
2781 if (error) {
2782 if (error == ESTALE) {
2783 NFS_VNOP_DBG("Stale FH from readlink rpc\n");
2784 error = nfs_refresh_fh(np, ctx);
2785 if (error == 0) {
2786 goto readagain;
2787 }
2788 }
2789 SET(bp->nb_flags, NB_ERROR);
2790 bp->nb_error = error;
2791 NFS_VNOP_DBG("readlink failed %d\n", error);
2792 } else {
2793 bp->nb_validoff = 0;
2794 bp->nb_validend = buflen;
2795 np->n_rltim = ts;
2796 NFS_VNOP_DBG("readlink of %.*s\n", bp->nb_validend, (char *)bp->nb_data);
2797 }
2798 } else {
2799 NFS_VNOP_DBG("got cached link of %.*s\n", bp->nb_validend, (char *)bp->nb_data);
2800 }
2801
2802 if (!error && (bp->nb_validend > 0)) {
2803 error = uiomove(bp->nb_data, bp->nb_validend, uio);
2804 }
2805 FSDBG(531, np, bp->nb_validend, 0, error);
2806 nfs_buf_release(bp, 1);
2807 return error;
2808}
2809
2810/*
2811 * Do a readlink RPC.
2812 */
2813int
2814nfs3_readlink_rpc(nfsnode_t np, char *buf, uint32_t *buflenp, vfs_context_t ctx)
2815{
2816 struct nfsmount *nmp;
2817 int error = 0, lockerror = ENOENT, nfsvers, status;
2818 uint32_t len;
2819 u_int64_t xid;
2820 struct nfsm_chain nmreq, nmrep;
2821
2822 nmp = NFSTONMP(np);
2823 if (nfs_mount_gone(nmp)) {
2824 return ENXIO;
2825 }
2826 nfsvers = nmp->nm_vers;
2827 nfsm_chain_null(&nmreq);
2828 nfsm_chain_null(&nmrep);
2829
2830 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_FH(nfsvers));
2831 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
2832 nfsm_chain_build_done(error, &nmreq);
2833 nfsmout_if(error);
2834 error = nfs_request(np, NULL, &nmreq, NFSPROC_READLINK, ctx, NULL, &nmrep, &xid, &status);
2835 if ((lockerror = nfs_node_lock(np))) {
2836 error = lockerror;
2837 }
2838 if (nfsvers == NFS_VER3) {
2839 nfsm_chain_postop_attr_update(error, &nmrep, np, &xid);
2840 }
2841 if (!error) {
2842 error = status;
2843 }
2844 nfsm_chain_get_32(error, &nmrep, len);
2845 nfsmout_if(error);
2846 if ((nfsvers == NFS_VER2) && (len > *buflenp)) {
2847 error = EBADRPC;
2848 goto nfsmout;
2849 }
2850 if (len >= *buflenp) {
2851 if (np->n_size && (np->n_size < *buflenp)) {
2852 len = np->n_size;
2853 } else {
2854 len = *buflenp - 1;
2855 }
2856 }
2857 nfsm_chain_get_opaque(error, &nmrep, len, buf);
2858 if (!error) {
2859 *buflenp = len;
2860 }
2861nfsmout:
2862 if (!lockerror) {
2863 nfs_node_unlock(np);
2864 }
2865 nfsm_chain_cleanup(&nmreq);
2866 nfsm_chain_cleanup(&nmrep);
2867 return error;
2868}
2869
2870/*
2871 * NFS read RPC call
2872 * Ditto above
2873 */
2874int
2875nfs_read_rpc(nfsnode_t np, uio_t uio, vfs_context_t ctx)
2876{
2877 struct nfsmount *nmp;
2878 int error = 0, nfsvers, eof = 0;
2879 size_t nmrsize, len, retlen;
2880 user_ssize_t tsiz;
2881 off_t txoffset;
2882 struct nfsreq rq, *req = &rq;
2883#if CONFIG_NFS4
2884 uint32_t stategenid = 0, restart = 0;
2885#endif
2886 FSDBG_TOP(536, np, uio_offset(uio), uio_resid(uio), 0);
2887 nmp = NFSTONMP(np);
2888 if (nfs_mount_gone(nmp)) {
2889 return ENXIO;
2890 }
2891 nfsvers = nmp->nm_vers;
2892 nmrsize = nmp->nm_rsize;
2893
2894 txoffset = uio_offset(uio);
2895 tsiz = uio_resid(uio);
2896 if ((nfsvers == NFS_VER2) && ((uint64_t)(txoffset + tsiz) > 0xffffffffULL)) {
2897 FSDBG_BOT(536, np, uio_offset(uio), uio_resid(uio), EFBIG);
2898 return EFBIG;
2899 }
2900
2901 while (tsiz > 0) {
2902 len = retlen = (tsiz > (user_ssize_t)nmrsize) ? nmrsize : (size_t)tsiz;
2903 FSDBG(536, np, txoffset, len, 0);
2904 if (np->n_flag & NREVOKE) {
2905 error = EIO;
2906 break;
2907 }
2908#if CONFIG_NFS4
2909 if (nmp->nm_vers >= NFS_VER4) {
2910 stategenid = nmp->nm_stategenid;
2911 }
2912#endif
2913 error = nmp->nm_funcs->nf_read_rpc_async(np, txoffset, len,
2914 vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, &req);
2915 if (!error) {
2916 error = nmp->nm_funcs->nf_read_rpc_async_finish(np, req, uio, &retlen, &eof);
2917 }
2918#if CONFIG_NFS4
2919 if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error) &&
2920 (++restart <= nfs_mount_state_max_restarts(nmp))) { /* guard against no progress */
2921 lck_mtx_lock(&nmp->nm_lock);
2922 if ((error != NFSERR_GRACE) && (stategenid == nmp->nm_stategenid)) {
2923 NP(np, "nfs_read_rpc: error %d, initiating recovery", error);
2924 nfs_need_recover(nmp, error);
2925 }
2926 lck_mtx_unlock(&nmp->nm_lock);
2927 if (np->n_flag & NREVOKE) {
2928 error = EIO;
2929 } else {
2930 if (error == NFSERR_GRACE) {
2931 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
2932 }
2933 if (!(error = nfs_mount_state_wait_for_recovery(nmp))) {
2934 continue;
2935 }
2936 }
2937 }
2938#endif
2939 if (error) {
2940 break;
2941 }
2942 txoffset += retlen;
2943 tsiz -= retlen;
2944 if (nfsvers != NFS_VER2) {
2945 if (eof || (retlen == 0)) {
2946 tsiz = 0;
2947 }
2948 } else if (retlen < len) {
2949 tsiz = 0;
2950 }
2951 }
2952
2953 FSDBG_BOT(536, np, eof, uio_resid(uio), error);
2954 return error;
2955}
2956
2957int
2958nfs3_read_rpc_async(
2959 nfsnode_t np,
2960 off_t offset,
2961 size_t len,
2962 thread_t thd,
2963 kauth_cred_t cred,
2964 struct nfsreq_cbinfo *cb,
2965 struct nfsreq **reqp)
2966{
2967 struct nfsmount *nmp;
2968 int error = 0, nfsvers;
2969 struct nfsm_chain nmreq;
2970
2971 nmp = NFSTONMP(np);
2972 if (nfs_mount_gone(nmp)) {
2973 return ENXIO;
2974 }
2975 nfsvers = nmp->nm_vers;
2976
2977 nfsm_chain_null(&nmreq);
2978 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_FH(nfsvers) + 3 * NFSX_UNSIGNED);
2979 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
2980 if (nfsvers == NFS_VER3) {
2981 nfsm_chain_add_64(error, &nmreq, offset);
2982 nfsm_chain_add_32(error, &nmreq, len);
2983 } else {
2984 nfsm_chain_add_32(error, &nmreq, offset);
2985 nfsm_chain_add_32(error, &nmreq, len);
2986 nfsm_chain_add_32(error, &nmreq, 0);
2987 }
2988 nfsm_chain_build_done(error, &nmreq);
2989 nfsmout_if(error);
2990 error = nfs_request_async(np, NULL, &nmreq, NFSPROC_READ, thd, cred, NULL, 0, cb, reqp);
2991nfsmout:
2992 nfsm_chain_cleanup(&nmreq);
2993 return error;
2994}
2995
2996int
2997nfs3_read_rpc_async_finish(
2998 nfsnode_t np,
2999 struct nfsreq *req,
3000 uio_t uio,
3001 size_t *lenp,
3002 int *eofp)
3003{
3004 int error = 0, lockerror, nfsvers, status, eof = 0;
3005 size_t retlen = 0;
3006 uint64_t xid;
3007 struct nfsmount *nmp;
3008 struct nfsm_chain nmrep;
3009
3010 nmp = NFSTONMP(np);
3011 if (nfs_mount_gone(nmp)) {
3012 nfs_request_async_cancel(req);
3013 return ENXIO;
3014 }
3015 nfsvers = nmp->nm_vers;
3016
3017 nfsm_chain_null(&nmrep);
3018
3019 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
3020 if (error == EINPROGRESS) { /* async request restarted */
3021 return error;
3022 }
3023
3024 if ((lockerror = nfs_node_lock(np))) {
3025 error = lockerror;
3026 }
3027 if (nfsvers == NFS_VER3) {
3028 nfsm_chain_postop_attr_update(error, &nmrep, np, &xid);
3029 }
3030 if (!error) {
3031 error = status;
3032 }
3033 if (nfsvers == NFS_VER3) {
3034 nfsm_chain_adv(error, &nmrep, NFSX_UNSIGNED);
3035 nfsm_chain_get_32(error, &nmrep, eof);
3036 } else {
3037 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
3038 }
3039 if (!lockerror) {
3040 nfs_node_unlock(np);
3041 }
3042 nfsm_chain_get_32(error, &nmrep, retlen);
3043 if ((nfsvers == NFS_VER2) && (retlen > *lenp)) {
3044 error = EBADRPC;
3045 }
3046 nfsmout_if(error);
3047 error = nfsm_chain_get_uio(&nmrep, MIN(retlen, *lenp), uio);
3048 if (eofp) {
3049 if (nfsvers == NFS_VER3) {
3050 if (!eof && !retlen) {
3051 eof = 1;
3052 }
3053 } else if (retlen < *lenp) {
3054 eof = 1;
3055 }
3056 *eofp = eof;
3057 }
3058 *lenp = MIN(retlen, *lenp);
3059nfsmout:
3060 nfsm_chain_cleanup(&nmrep);
3061 return error;
3062}
3063
3064/*
3065 * NFS write call
3066 */
3067int
3068nfs_vnop_write(
3069 struct vnop_write_args /* {
3070 * struct vnodeop_desc *a_desc;
3071 * vnode_t a_vp;
3072 * struct uio *a_uio;
3073 * int a_ioflag;
3074 * vfs_context_t a_context;
3075 * } */*ap)
3076{
3077 vfs_context_t ctx = ap->a_context;
3078 uio_t uio = ap->a_uio;
3079 vnode_t vp = ap->a_vp;
3080 nfsnode_t np = VTONFS(vp);
3081 int ioflag = ap->a_ioflag;
3082 struct nfsbuf *bp;
3083 struct nfsmount *nmp = VTONMP(vp);
3084 daddr64_t lbn;
3085 int biosize;
3086 int n, on, error = 0;
3087 off_t boff, start, end;
3088 uio_t auio;
3089 char auio_buf[UIO_SIZEOF(1)];
3090 thread_t thd;
3091 kauth_cred_t cred;
3092
3093 FSDBG_TOP(515, np, uio_offset(uio), uio_resid(uio), ioflag);
3094
3095 if (vnode_vtype(vp) != VREG) {
3096 FSDBG_BOT(515, np, uio_offset(uio), uio_resid(uio), EIO);
3097 return EIO;
3098 }
3099
3100 thd = vfs_context_thread(ctx);
3101 cred = vfs_context_ucred(ctx);
3102
3103 nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
3104
3105 if ((error = nfs_node_lock(np))) {
3106 nfs_data_unlock(np);
3107 FSDBG_BOT(515, np, uio_offset(uio), uio_resid(uio), error);
3108 return error;
3109 }
3110 np->n_wrbusy++;
3111
3112 if (np->n_flag & NWRITEERR) {
3113 error = np->n_error;
3114 np->n_flag &= ~NWRITEERR;
3115 }
3116 if (np->n_flag & NNEEDINVALIDATE) {
3117 np->n_flag &= ~NNEEDINVALIDATE;
3118 nfs_node_unlock(np);
3119 nfs_data_unlock(np);
3120 nfs_vinvalbuf(vp, V_SAVE | V_IGNORE_WRITEERR, ctx, 1);
3121 nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
3122 } else {
3123 nfs_node_unlock(np);
3124 }
3125 if (error) {
3126 goto out;
3127 }
3128
3129 biosize = nmp->nm_biosize;
3130
3131 if (ioflag & (IO_APPEND | IO_SYNC)) {
3132 nfs_node_lock_force(np);
3133 if (np->n_flag & NMODIFIED) {
3134 NATTRINVALIDATE(np);
3135 nfs_node_unlock(np);
3136 nfs_data_unlock(np);
3137 error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1);
3138 nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
3139 if (error) {
3140 FSDBG(515, np, uio_offset(uio), 0x10bad01, error);
3141 goto out;
3142 }
3143 } else {
3144 nfs_node_unlock(np);
3145 }
3146 if (ioflag & IO_APPEND) {
3147 nfs_data_unlock(np);
3148 /* nfs_getattr() will check changed and purge caches */
3149 error = nfs_getattr(np, NULL, ctx, NGA_UNCACHED);
3150 /* we'll be extending the file, so take the data lock exclusive */
3151 nfs_data_lock(np, NFS_DATA_LOCK_EXCLUSIVE);
3152 if (error) {
3153 FSDBG(515, np, uio_offset(uio), 0x10bad02, error);
3154 goto out;
3155 }
3156 uio_setoffset(uio, np->n_size);
3157 }
3158 }
3159 if (uio_offset(uio) < 0) {
3160 error = EINVAL;
3161 FSDBG_BOT(515, np, uio_offset(uio), 0xbad0ff, error);
3162 goto out;
3163 }
3164 if (uio_resid(uio) == 0) {
3165 goto out;
3166 }
3167
3168 if (((uio_offset(uio) + uio_resid(uio)) > (off_t)np->n_size) && !(ioflag & IO_APPEND)) {
3169 /*
3170 * It looks like we'll be extending the file, so take the data lock exclusive.
3171 */
3172 nfs_data_unlock(np);
3173 nfs_data_lock(np, NFS_DATA_LOCK_EXCLUSIVE);
3174
3175 /*
3176 * Also, if the write begins after the previous EOF buffer, make sure to zero
3177 * and validate the new bytes in that buffer.
3178 */
3179 struct nfsbuf *eofbp = NULL;
3180 daddr64_t eofbn = np->n_size / biosize;
3181 int eofoff = np->n_size % biosize;
3182 lbn = uio_offset(uio) / biosize;
3183
3184 if (eofoff && (eofbn < lbn)) {
3185 if ((error = nfs_buf_get(np, eofbn, biosize, thd, NBLK_WRITE | NBLK_ONLYVALID, &eofbp))) {
3186 goto out;
3187 }
3188 np->n_size += (biosize - eofoff);
3189 nfs_node_lock_force(np);
3190 CLR(np->n_flag, NUPDATESIZE);
3191 np->n_flag |= NMODIFIED;
3192 nfs_node_unlock(np);
3193 FSDBG(516, np, np->n_size, np->n_vattr.nva_size, 0xf00d0001);
3194 ubc_setsize(vp, (off_t)np->n_size); /* XXX errors */
3195 if (eofbp) {
3196 /*
3197 * For the old last page, don't zero bytes if there
3198 * are invalid bytes in that page (i.e. the page isn't
3199 * currently valid).
3200 * For pages after the old last page, zero them and
3201 * mark them as valid.
3202 */
3203 char *d;
3204 int i;
3205 if (ioflag & IO_NOCACHE) {
3206 SET(eofbp->nb_flags, NB_NOCACHE);
3207 }
3208 NFS_BUF_MAP(eofbp);
3209 FSDBG(516, eofbp, eofoff, biosize - eofoff, 0xe0fff01e);
3210 d = eofbp->nb_data;
3211 i = eofoff / PAGE_SIZE;
3212 while (eofoff < biosize) {
3213 int poff = eofoff & PAGE_MASK;
3214 if (!poff || NBPGVALID(eofbp, i)) {
3215 bzero(d + eofoff, PAGE_SIZE - poff);
3216 NBPGVALID_SET(eofbp, i);
3217 }
3218 eofoff += PAGE_SIZE - poff;
3219 i++;
3220 }
3221 nfs_buf_release(eofbp, 1);
3222 }
3223 }
3224 }
3225
3226 do {
3227 OSAddAtomic64(1, &nfsstats.biocache_writes);
3228 lbn = uio_offset(uio) / biosize;
3229 on = uio_offset(uio) % biosize;
3230 n = biosize - on;
3231 if (uio_resid(uio) < n) {
3232 n = uio_resid(uio);
3233 }
3234again:
3235 /*
3236 * Get a cache block for writing. The range to be written is
3237 * (off..off+n) within the block. We ensure that the block
3238 * either has no dirty region or that the given range is
3239 * contiguous with the existing dirty region.
3240 */
3241 error = nfs_buf_get(np, lbn, biosize, thd, NBLK_WRITE, &bp);
3242 if (error) {
3243 goto out;
3244 }
3245 /* map the block because we know we're going to write to it */
3246 NFS_BUF_MAP(bp);
3247
3248 if (ioflag & IO_NOCACHE) {
3249 SET(bp->nb_flags, NB_NOCACHE);
3250 }
3251
3252 if (!IS_VALID_CRED(bp->nb_wcred)) {
3253 kauth_cred_ref(cred);
3254 bp->nb_wcred = cred;
3255 }
3256
3257 /*
3258 * If there's already a dirty range AND dirty pages in this block we
3259 * need to send a commit AND write the dirty pages before continuing.
3260 *
3261 * If there's already a dirty range OR dirty pages in this block
3262 * and the new write range is not contiguous with the existing range,
3263 * then force the buffer to be written out now.
3264 * (We used to just extend the dirty range to cover the valid,
3265 * but unwritten, data in between also. But writing ranges
3266 * of data that weren't actually written by an application
3267 * risks overwriting some other client's data with stale data
3268 * that's just masquerading as new written data.)
3269 */
3270 if (bp->nb_dirtyend > 0) {
3271 if (on > bp->nb_dirtyend || (on + n) < bp->nb_dirtyoff || bp->nb_dirty) {
3272 FSDBG(515, np, uio_offset(uio), bp, 0xd15c001);
3273 /* write/commit buffer "synchronously" */
3274 /* (NB_STABLE indicates that data writes should be FILESYNC) */
3275 CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL));
3276 SET(bp->nb_flags, (NB_ASYNC | NB_STABLE));
3277 error = nfs_buf_write(bp);
3278 if (error) {
3279 goto out;
3280 }
3281 goto again;
3282 }
3283 } else if (bp->nb_dirty) {
3284 int firstpg, lastpg;
3285 u_int32_t pagemask;
3286 /* calculate write range pagemask */
3287 firstpg = on / PAGE_SIZE;
3288 lastpg = (on + n - 1) / PAGE_SIZE;
3289 pagemask = ((1 << (lastpg + 1)) - 1) & ~((1 << firstpg) - 1);
3290 /* check if there are dirty pages outside the write range */
3291 if (bp->nb_dirty & ~pagemask) {
3292 FSDBG(515, np, uio_offset(uio), bp, 0xd15c002);
3293 /* write/commit buffer "synchronously" */
3294 /* (NB_STABLE indicates that data writes should be FILESYNC) */
3295 CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL));
3296 SET(bp->nb_flags, (NB_ASYNC | NB_STABLE));
3297 error = nfs_buf_write(bp);
3298 if (error) {
3299 goto out;
3300 }
3301 goto again;
3302 }
3303 /* if the first or last pages are already dirty */
3304 /* make sure that the dirty range encompasses those pages */
3305 if (NBPGDIRTY(bp, firstpg) || NBPGDIRTY(bp, lastpg)) {
3306 FSDBG(515, np, uio_offset(uio), bp, 0xd15c003);
3307 bp->nb_dirtyoff = min(on, firstpg * PAGE_SIZE);
3308 if (NBPGDIRTY(bp, lastpg)) {
3309 bp->nb_dirtyend = (lastpg + 1) * PAGE_SIZE;
3310 /* clip to EOF */
3311 if (NBOFF(bp) + bp->nb_dirtyend > (off_t)np->n_size) {
3312 bp->nb_dirtyend = np->n_size - NBOFF(bp);
3313 if (bp->nb_dirtyoff >= bp->nb_dirtyend) {
3314 bp->nb_dirtyoff = bp->nb_dirtyend = 0;
3315 }
3316 }
3317 } else {
3318 bp->nb_dirtyend = on + n;
3319 }
3320 }
3321 }
3322
3323 /*
3324 * Are we extending the size of the file with this write?
3325 * If so, update file size now that we have the block.
3326 * If there was a partial buf at the old eof, validate
3327 * and zero the new bytes.
3328 */
3329 if ((uio_offset(uio) + n) > (off_t)np->n_size) {
3330 daddr64_t eofbn = np->n_size / biosize;
3331 int neweofoff = (uio_offset(uio) + n) % biosize;
3332
3333 FSDBG(515, 0xb1ffa000, uio_offset(uio) + n, eofoff, neweofoff);
3334
3335 /* if we're extending within the same last block */
3336 /* and the block is flagged as being cached... */
3337 if ((lbn == eofbn) && ISSET(bp->nb_flags, NB_CACHE)) {
3338 /* ...check that all pages in buffer are valid */
3339 int endpg = ((neweofoff ? neweofoff : biosize) - 1) / PAGE_SIZE;
3340 u_int32_t pagemask;
3341 /* pagemask only has to extend to last page being written to */
3342 pagemask = (1 << (endpg + 1)) - 1;
3343 FSDBG(515, 0xb1ffa001, bp->nb_valid, pagemask, 0);
3344 if ((bp->nb_valid & pagemask) != pagemask) {
3345 /* zerofill any hole */
3346 if (on > bp->nb_validend) {
3347 int i;
3348 for (i = bp->nb_validend / PAGE_SIZE; i <= (on - 1) / PAGE_SIZE; i++) {
3349 NBPGVALID_SET(bp, i);
3350 }
3351 NFS_BUF_MAP(bp);
3352 FSDBG(516, bp, bp->nb_validend, on - bp->nb_validend, 0xf01e);
3353 bzero((char *)bp->nb_data + bp->nb_validend,
3354 on - bp->nb_validend);
3355 }
3356 /* zerofill any trailing data in the last page */
3357 if (neweofoff) {
3358 NFS_BUF_MAP(bp);
3359 FSDBG(516, bp, neweofoff, PAGE_SIZE - (neweofoff & PAGE_MASK), 0xe0f);
3360 bzero((char *)bp->nb_data + neweofoff,
3361 PAGE_SIZE - (neweofoff & PAGE_MASK));
3362 }
3363 }
3364 }
3365 np->n_size = uio_offset(uio) + n;
3366 nfs_node_lock_force(np);
3367 CLR(np->n_flag, NUPDATESIZE);
3368 np->n_flag |= NMODIFIED;
3369 nfs_node_unlock(np);
3370 FSDBG(516, np, np->n_size, np->n_vattr.nva_size, 0xf00d0001);
3371 ubc_setsize(vp, (off_t)np->n_size); /* XXX errors */
3372 }
3373 /*
3374 * If dirtyend exceeds file size, chop it down. This should
3375 * not occur unless there is a race.
3376 */
3377 if (NBOFF(bp) + bp->nb_dirtyend > (off_t)np->n_size) {
3378 bp->nb_dirtyend = np->n_size - NBOFF(bp);
3379 if (bp->nb_dirtyoff >= bp->nb_dirtyend) {
3380 bp->nb_dirtyoff = bp->nb_dirtyend = 0;
3381 }
3382 }
3383 /*
3384 * UBC doesn't handle partial pages, so we need to make sure
3385 * that any pages left in the page cache are completely valid.
3386 *
3387 * Writes that are smaller than a block are delayed if they
3388 * don't extend to the end of the block.
3389 *
3390 * If the block isn't (completely) cached, we may need to read
3391 * in some parts of pages that aren't covered by the write.
3392 * If the write offset (on) isn't page aligned, we'll need to
3393 * read the start of the first page being written to. Likewise,
3394 * if the offset of the end of the write (on+n) isn't page aligned,
3395 * we'll need to read the end of the last page being written to.
3396 *
3397 * Notes:
3398 * We don't want to read anything we're just going to write over.
3399 * We don't want to read anything we're just going drop when the
3400 * I/O is complete (i.e. don't do reads for NOCACHE requests).
3401 * We don't want to issue multiple I/Os if we don't have to
3402 * (because they're synchronous rpcs).
3403 * We don't want to read anything we already have modified in the
3404 * page cache.
3405 */
3406 if (!ISSET(bp->nb_flags, NB_CACHE) && (n < biosize)) {
3407 int firstpg, lastpg, dirtypg;
3408 int firstpgoff, lastpgoff;
3409 start = end = -1;
3410 firstpg = on / PAGE_SIZE;
3411 firstpgoff = on & PAGE_MASK;
3412 lastpg = (on + n - 1) / PAGE_SIZE;
3413 lastpgoff = (on + n) & PAGE_MASK;
3414 if (firstpgoff && !NBPGVALID(bp, firstpg)) {
3415 /* need to read start of first page */
3416 start = firstpg * PAGE_SIZE;
3417 end = start + firstpgoff;
3418 }
3419 if (lastpgoff && !NBPGVALID(bp, lastpg)) {
3420 /* need to read end of last page */
3421 if (start < 0) {
3422 start = (lastpg * PAGE_SIZE) + lastpgoff;
3423 }
3424 end = (lastpg + 1) * PAGE_SIZE;
3425 }
3426 if (ISSET(bp->nb_flags, NB_NOCACHE)) {
3427 /*
3428 * For nocache writes, if there is any partial page at the
3429 * start or end of the write range, then we do the write
3430 * synchronously to make sure that we can drop the data
3431 * from the cache as soon as the WRITE finishes. Normally,
3432 * we would do an unstable write and not drop the data until
3433 * it was committed. But doing that here would risk allowing
3434 * invalid data to be read from the cache between the WRITE
3435 * and the COMMIT.
3436 * (NB_STABLE indicates that data writes should be FILESYNC)
3437 */
3438 if (end > start) {
3439 SET(bp->nb_flags, NB_STABLE);
3440 }
3441 goto skipread;
3442 }
3443 if (end > start) {
3444 /* need to read the data in range: start...end-1 */
3445
3446 /* first, check for dirty pages in between */
3447 /* if there are, we'll have to do two reads because */
3448 /* we don't want to overwrite the dirty pages. */
3449 for (dirtypg = start / PAGE_SIZE; dirtypg <= (end - 1) / PAGE_SIZE; dirtypg++) {
3450 if (NBPGDIRTY(bp, dirtypg)) {
3451 break;
3452 }
3453 }
3454
3455 /* if start is at beginning of page, try */
3456 /* to get any preceeding pages as well. */
3457 if (!(start & PAGE_MASK)) {
3458 /* stop at next dirty/valid page or start of block */
3459 for (; start > 0; start -= PAGE_SIZE) {
3460 if (NBPGVALID(bp, ((start - 1) / PAGE_SIZE))) {
3461 break;
3462 }
3463 }
3464 }
3465
3466 NFS_BUF_MAP(bp);
3467 /* setup uio for read(s) */
3468 boff = NBOFF(bp);
3469 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
3470 &auio_buf, sizeof(auio_buf));
3471
3472 if (dirtypg <= (end - 1) / PAGE_SIZE) {
3473 /* there's a dirty page in the way, so just do two reads */
3474 /* we'll read the preceding data here */
3475 uio_reset(auio, boff + start, UIO_SYSSPACE, UIO_READ);
3476 uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + start), on - start);
3477 error = nfs_read_rpc(np, auio, ctx);
3478 if (error) {
3479 /* couldn't read the data, so treat buffer as synchronous NOCACHE */
3480 SET(bp->nb_flags, (NB_NOCACHE | NB_STABLE));
3481 goto skipread;
3482 }
3483 if (uio_resid(auio) > 0) {
3484 FSDBG(516, bp, (caddr_t)uio_curriovbase(auio) - bp->nb_data, uio_resid(auio), 0xd00dee01);
3485 bzero(CAST_DOWN(caddr_t, uio_curriovbase(auio)), uio_resid(auio));
3486 }
3487 if (!error) {
3488 /* update validoff/validend if necessary */
3489 if ((bp->nb_validoff < 0) || (bp->nb_validoff > start)) {
3490 bp->nb_validoff = start;
3491 }
3492 if ((bp->nb_validend < 0) || (bp->nb_validend < on)) {
3493 bp->nb_validend = on;
3494 }
3495 if ((off_t)np->n_size > boff + bp->nb_validend) {
3496 bp->nb_validend = min(np->n_size - (boff + start), biosize);
3497 }
3498 /* validate any pages before the write offset */
3499 for (; start < on / PAGE_SIZE; start += PAGE_SIZE) {
3500 NBPGVALID_SET(bp, start / PAGE_SIZE);
3501 }
3502 }
3503 /* adjust start to read any trailing data */
3504 start = on + n;
3505 }
3506
3507 /* if end is at end of page, try to */
3508 /* get any following pages as well. */
3509 if (!(end & PAGE_MASK)) {
3510 /* stop at next valid page or end of block */
3511 for (; end < biosize; end += PAGE_SIZE) {
3512 if (NBPGVALID(bp, end / PAGE_SIZE)) {
3513 break;
3514 }
3515 }
3516 }
3517
3518 if (((boff + start) >= (off_t)np->n_size) ||
3519 ((start >= on) && ((boff + on + n) >= (off_t)np->n_size))) {
3520 /*
3521 * Either this entire read is beyond the current EOF
3522 * or the range that we won't be modifying (on+n...end)
3523 * is all beyond the current EOF.
3524 * No need to make a trip across the network to
3525 * read nothing. So, just zero the buffer instead.
3526 */
3527 FSDBG(516, bp, start, end - start, 0xd00dee00);
3528 bzero(bp->nb_data + start, end - start);
3529 error = 0;
3530 } else {
3531 /* now we'll read the (rest of the) data */
3532 uio_reset(auio, boff + start, UIO_SYSSPACE, UIO_READ);
3533 uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + start), end - start);
3534 error = nfs_read_rpc(np, auio, ctx);
3535 if (error) {
3536 /* couldn't read the data, so treat buffer as synchronous NOCACHE */
3537 SET(bp->nb_flags, (NB_NOCACHE | NB_STABLE));
3538 goto skipread;
3539 }
3540 if (uio_resid(auio) > 0) {
3541 FSDBG(516, bp, (caddr_t)uio_curriovbase(auio) - bp->nb_data, uio_resid(auio), 0xd00dee02);
3542 bzero(CAST_DOWN(caddr_t, uio_curriovbase(auio)), uio_resid(auio));
3543 }
3544 }
3545 if (!error) {
3546 /* update validoff/validend if necessary */
3547 if ((bp->nb_validoff < 0) || (bp->nb_validoff > start)) {
3548 bp->nb_validoff = start;
3549 }
3550 if ((bp->nb_validend < 0) || (bp->nb_validend < end)) {
3551 bp->nb_validend = end;
3552 }
3553 if ((off_t)np->n_size > boff + bp->nb_validend) {
3554 bp->nb_validend = min(np->n_size - (boff + start), biosize);
3555 }
3556 /* validate any pages before the write offset's page */
3557 for (; start < (off_t)trunc_page_32(on); start += PAGE_SIZE) {
3558 NBPGVALID_SET(bp, start / PAGE_SIZE);
3559 }
3560 /* validate any pages after the range of pages being written to */
3561 for (; (end - 1) > (off_t)round_page_32(on + n - 1); end -= PAGE_SIZE) {
3562 NBPGVALID_SET(bp, (end - 1) / PAGE_SIZE);
3563 }
3564 }
3565 /* Note: pages being written to will be validated when written */
3566 }
3567 }
3568skipread:
3569
3570 if (ISSET(bp->nb_flags, NB_ERROR)) {
3571 error = bp->nb_error;
3572 nfs_buf_release(bp, 1);
3573 goto out;
3574 }
3575
3576 nfs_node_lock_force(np);
3577 np->n_flag |= NMODIFIED;
3578 nfs_node_unlock(np);
3579
3580 NFS_BUF_MAP(bp);
3581 error = uiomove((char *)bp->nb_data + on, n, uio);
3582 if (error) {
3583 SET(bp->nb_flags, NB_ERROR);
3584 nfs_buf_release(bp, 1);
3585 goto out;
3586 }
3587
3588 /* validate any pages written to */
3589 start = on & ~PAGE_MASK;
3590 for (; start < on + n; start += PAGE_SIZE) {
3591 NBPGVALID_SET(bp, start / PAGE_SIZE);
3592 /*
3593 * This may seem a little weird, but we don't actually set the
3594 * dirty bits for writes. This is because we keep the dirty range
3595 * in the nb_dirtyoff/nb_dirtyend fields. Also, particularly for
3596 * delayed writes, when we give the pages back to the VM we don't
3597 * want to keep them marked dirty, because when we later write the
3598 * buffer we won't be able to tell which pages were written dirty
3599 * and which pages were mmapped and dirtied.
3600 */
3601 }
3602 if (bp->nb_dirtyend > 0) {
3603 bp->nb_dirtyoff = min(on, bp->nb_dirtyoff);
3604 bp->nb_dirtyend = max((on + n), bp->nb_dirtyend);
3605 } else {
3606 bp->nb_dirtyoff = on;
3607 bp->nb_dirtyend = on + n;
3608 }
3609 if (bp->nb_validend <= 0 || bp->nb_validend < bp->nb_dirtyoff ||
3610 bp->nb_validoff > bp->nb_dirtyend) {
3611 bp->nb_validoff = bp->nb_dirtyoff;
3612 bp->nb_validend = bp->nb_dirtyend;
3613 } else {
3614 bp->nb_validoff = min(bp->nb_validoff, bp->nb_dirtyoff);
3615 bp->nb_validend = max(bp->nb_validend, bp->nb_dirtyend);
3616 }
3617 if (!ISSET(bp->nb_flags, NB_CACHE)) {
3618 nfs_buf_normalize_valid_range(np, bp);
3619 }
3620
3621 /*
3622 * Since this block is being modified, it must be written
3623 * again and not just committed.
3624 */
3625 if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
3626 nfs_node_lock_force(np);
3627 if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
3628 np->n_needcommitcnt--;
3629 CHECK_NEEDCOMMITCNT(np);
3630 }
3631 CLR(bp->nb_flags, NB_NEEDCOMMIT);
3632 nfs_node_unlock(np);
3633 }
3634
3635 if (ioflag & IO_SYNC) {
3636 error = nfs_buf_write(bp);
3637 if (error) {
3638 goto out;
3639 }
3640 } else if (((n + on) == biosize) || (ioflag & IO_APPEND) ||
3641 (ioflag & IO_NOCACHE) || ISSET(bp->nb_flags, NB_NOCACHE)) {
3642 SET(bp->nb_flags, NB_ASYNC);
3643 error = nfs_buf_write(bp);
3644 if (error) {
3645 goto out;
3646 }
3647 } else {
3648 /* If the block wasn't already delayed: charge for the write */
3649 if (!ISSET(bp->nb_flags, NB_DELWRI)) {
3650 proc_t p = vfs_context_proc(ctx);
3651 if (p && p->p_stats) {
3652 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock);
3653 }
3654 }
3655 nfs_buf_write_delayed(bp);
3656 }
3657
3658
3659 if (np->n_needcommitcnt >= NFS_A_LOT_OF_NEEDCOMMITS) {
3660 nfs_flushcommits(np, 1);
3661 }
3662 } while (uio_resid(uio) > 0 && n > 0);
3663
3664out:
3665 nfs_node_lock_force(np);
3666 np->n_wrbusy--;
3667 if ((ioflag & IO_SYNC) && !np->n_wrbusy && !np->n_numoutput) {
3668 np->n_flag &= ~NMODIFIED;
3669 }
3670 nfs_node_unlock(np);
3671 nfs_data_unlock(np);
3672 FSDBG_BOT(515, np, uio_offset(uio), uio_resid(uio), error);
3673 return error;
3674}
3675
3676
3677/*
3678 * NFS write call
3679 */
3680int
3681nfs_write_rpc(
3682 nfsnode_t np,
3683 uio_t uio,
3684 vfs_context_t ctx,
3685 int *iomodep,
3686 uint64_t *wverfp)
3687{
3688 return nfs_write_rpc2(np, uio, vfs_context_thread(ctx), vfs_context_ucred(ctx), iomodep, wverfp);
3689}
3690
3691int
3692nfs_write_rpc2(
3693 nfsnode_t np,
3694 uio_t uio,
3695 thread_t thd,
3696 kauth_cred_t cred,
3697 int *iomodep,
3698 uint64_t *wverfp)
3699{
3700 struct nfsmount *nmp;
3701 int error = 0, nfsvers;
3702 int wverfset, commit, committed;
3703 uint64_t wverf = 0, wverf2;
3704 size_t nmwsize, totalsize, tsiz, len, rlen;
3705 struct nfsreq rq, *req = &rq;
3706#if CONFIG_NFS4
3707 uint32_t stategenid = 0, restart = 0;
3708#endif
3709 uint32_t vrestart = 0;
3710 uio_t uio_save = NULL;
3711
3712#if DIAGNOSTIC
3713 /* XXX limitation based on need to back up uio on short write */
3714 if (uio_iovcnt(uio) != 1) {
3715 panic("nfs3_write_rpc: iovcnt > 1");
3716 }
3717#endif
3718 FSDBG_TOP(537, np, uio_offset(uio), uio_resid(uio), *iomodep);
3719 nmp = NFSTONMP(np);
3720 if (nfs_mount_gone(nmp)) {
3721 return ENXIO;
3722 }
3723 nfsvers = nmp->nm_vers;
3724 nmwsize = nmp->nm_wsize;
3725
3726 wverfset = 0;
3727 committed = NFS_WRITE_FILESYNC;
3728
3729 totalsize = tsiz = uio_resid(uio);
3730 if ((nfsvers == NFS_VER2) && ((uint64_t)(uio_offset(uio) + tsiz) > 0xffffffffULL)) {
3731 FSDBG_BOT(537, np, uio_offset(uio), uio_resid(uio), EFBIG);
3732 return EFBIG;
3733 }
3734
3735 uio_save = uio_duplicate(uio);
3736 if (uio_save == NULL) {
3737 return EIO;
3738 }
3739
3740 while (tsiz > 0) {
3741 len = (tsiz > nmwsize) ? nmwsize : tsiz;
3742 FSDBG(537, np, uio_offset(uio), len, 0);
3743 if (np->n_flag & NREVOKE) {
3744 error = EIO;
3745 break;
3746 }
3747#if CONFIG_NFS4
3748 if (nmp->nm_vers >= NFS_VER4) {
3749 stategenid = nmp->nm_stategenid;
3750 }
3751#endif
3752 error = nmp->nm_funcs->nf_write_rpc_async(np, uio, len, thd, cred, *iomodep, NULL, &req);
3753 if (!error) {
3754 error = nmp->nm_funcs->nf_write_rpc_async_finish(np, req, &commit, &rlen, &wverf2);
3755 }
3756 nmp = NFSTONMP(np);
3757 if (nfs_mount_gone(nmp)) {
3758 error = ENXIO;
3759 }
3760#if CONFIG_NFS4
3761 if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error) &&
3762 (++restart <= nfs_mount_state_max_restarts(nmp))) { /* guard against no progress */
3763 lck_mtx_lock(&nmp->nm_lock);
3764 if ((error != NFSERR_GRACE) && (stategenid == nmp->nm_stategenid)) {
3765 NP(np, "nfs_write_rpc: error %d, initiating recovery", error);
3766 nfs_need_recover(nmp, error);
3767 }
3768 lck_mtx_unlock(&nmp->nm_lock);
3769 if (np->n_flag & NREVOKE) {
3770 error = EIO;
3771 } else {
3772 if (error == NFSERR_GRACE) {
3773 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
3774 }
3775 if (!(error = nfs_mount_state_wait_for_recovery(nmp))) {
3776 continue;
3777 }
3778 }
3779 }
3780#endif
3781 if (error) {
3782 break;
3783 }
3784 if (nfsvers == NFS_VER2) {
3785 tsiz -= len;
3786 continue;
3787 }
3788
3789 /* check for a short write */
3790 if (rlen < len) {
3791 /* Reset the uio to reflect the actual transfer */
3792 *uio = *uio_save;
3793 uio_update(uio, totalsize - (tsiz - rlen));
3794 len = rlen;
3795 }
3796
3797 /* return lowest commit level returned */
3798 if (commit < committed) {
3799 committed = commit;
3800 }
3801
3802 tsiz -= len;
3803
3804 /* check write verifier */
3805 if (!wverfset) {
3806 wverf = wverf2;
3807 wverfset = 1;
3808 } else if (wverf != wverf2) {
3809 /* verifier changed, so we need to restart all the writes */
3810 if (++vrestart > 100) {
3811 /* give up after too many restarts */
3812 error = EIO;
3813 break;
3814 }
3815 *uio = *uio_save; // Reset the uio back to the start
3816 committed = NFS_WRITE_FILESYNC;
3817 wverfset = 0;
3818 tsiz = totalsize;
3819 }
3820 }
3821 if (uio_save) {
3822 uio_free(uio_save);
3823 }
3824 if (wverfset && wverfp) {
3825 *wverfp = wverf;
3826 }
3827 *iomodep = committed;
3828 if (error) {
3829 uio_setresid(uio, tsiz);
3830 }
3831 FSDBG_BOT(537, np, committed, uio_resid(uio), error);
3832 return error;
3833}
3834
3835int
3836nfs3_write_rpc_async(
3837 nfsnode_t np,
3838 uio_t uio,
3839 size_t len,
3840 thread_t thd,
3841 kauth_cred_t cred,
3842 int iomode,
3843 struct nfsreq_cbinfo *cb,
3844 struct nfsreq **reqp)
3845{
3846 struct nfsmount *nmp;
3847 mount_t mp;
3848 int error = 0, nfsvers;
3849 struct nfsm_chain nmreq;
3850
3851 nmp = NFSTONMP(np);
3852 if (nfs_mount_gone(nmp)) {
3853 return ENXIO;
3854 }
3855 nfsvers = nmp->nm_vers;
3856
3857 /* for async mounts, don't bother sending sync write requests */
3858 if ((iomode != NFS_WRITE_UNSTABLE) && nfs_allow_async &&
3859 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
3860 iomode = NFS_WRITE_UNSTABLE;
3861 }
3862
3863 nfsm_chain_null(&nmreq);
3864 nfsm_chain_build_alloc_init(error, &nmreq,
3865 NFSX_FH(nfsvers) + 5 * NFSX_UNSIGNED + nfsm_rndup(len));
3866 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
3867 if (nfsvers == NFS_VER3) {
3868 nfsm_chain_add_64(error, &nmreq, uio_offset(uio));
3869 nfsm_chain_add_32(error, &nmreq, len);
3870 nfsm_chain_add_32(error, &nmreq, iomode);
3871 } else {
3872 nfsm_chain_add_32(error, &nmreq, 0);
3873 nfsm_chain_add_32(error, &nmreq, uio_offset(uio));
3874 nfsm_chain_add_32(error, &nmreq, 0);
3875 }
3876 nfsm_chain_add_32(error, &nmreq, len);
3877 nfsmout_if(error);
3878 error = nfsm_chain_add_uio(&nmreq, uio, len);
3879 nfsm_chain_build_done(error, &nmreq);
3880 nfsmout_if(error);
3881 error = nfs_request_async(np, NULL, &nmreq, NFSPROC_WRITE, thd, cred, NULL, 0, cb, reqp);
3882nfsmout:
3883 nfsm_chain_cleanup(&nmreq);
3884 return error;
3885}
3886
3887int
3888nfs3_write_rpc_async_finish(
3889 nfsnode_t np,
3890 struct nfsreq *req,
3891 int *iomodep,
3892 size_t *rlenp,
3893 uint64_t *wverfp)
3894{
3895 struct nfsmount *nmp;
3896 int error = 0, lockerror = ENOENT, nfsvers, status;
3897 int updatemtime = 0, wccpostattr = 0, rlen, committed = NFS_WRITE_FILESYNC;
3898 u_int64_t xid, wverf;
3899 mount_t mp;
3900 struct nfsm_chain nmrep;
3901
3902 nmp = NFSTONMP(np);
3903 if (nfs_mount_gone(nmp)) {
3904 nfs_request_async_cancel(req);
3905 return ENXIO;
3906 }
3907 nfsvers = nmp->nm_vers;
3908
3909 nfsm_chain_null(&nmrep);
3910
3911 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
3912 if (error == EINPROGRESS) { /* async request restarted */
3913 return error;
3914 }
3915 nmp = NFSTONMP(np);
3916 if (nfs_mount_gone(nmp)) {
3917 error = ENXIO;
3918 }
3919 if (!error && (lockerror = nfs_node_lock(np))) {
3920 error = lockerror;
3921 }
3922 if (nfsvers == NFS_VER3) {
3923 struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 };
3924 nfsm_chain_get_wcc_data(error, &nmrep, np, &premtime, &wccpostattr, &xid);
3925 if (nfstimespeccmp(&np->n_mtime, &premtime, ==)) {
3926 updatemtime = 1;
3927 }
3928 if (!error) {
3929 error = status;
3930 }
3931 nfsm_chain_get_32(error, &nmrep, rlen);
3932 nfsmout_if(error);
3933 *rlenp = rlen;
3934 if (rlen <= 0) {
3935 error = NFSERR_IO;
3936 }
3937 nfsm_chain_get_32(error, &nmrep, committed);
3938 nfsm_chain_get_64(error, &nmrep, wverf);
3939 nfsmout_if(error);
3940 if (wverfp) {
3941 *wverfp = wverf;
3942 }
3943 lck_mtx_lock(&nmp->nm_lock);
3944 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
3945 nmp->nm_verf = wverf;
3946 nmp->nm_state |= NFSSTA_HASWRITEVERF;
3947 } else if (nmp->nm_verf != wverf) {
3948 nmp->nm_verf = wverf;
3949 }
3950 lck_mtx_unlock(&nmp->nm_lock);
3951 } else {
3952 if (!error) {
3953 error = status;
3954 }
3955 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
3956 nfsmout_if(error);
3957 }
3958 if (updatemtime) {
3959 NFS_CHANGED_UPDATE(nfsvers, np, &np->n_vattr);
3960 }
3961nfsmout:
3962 if (!lockerror) {
3963 nfs_node_unlock(np);
3964 }
3965 nfsm_chain_cleanup(&nmrep);
3966 if ((committed != NFS_WRITE_FILESYNC) && nfs_allow_async &&
3967 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
3968 committed = NFS_WRITE_FILESYNC;
3969 }
3970 *iomodep = committed;
3971 return error;
3972}
3973
3974/*
3975 * NFS mknod vnode op
3976 *
3977 * For NFS v2 this is a kludge. Use a create RPC but with the IFMT bits of the
3978 * mode set to specify the file type and the size field for rdev.
3979 */
3980int
3981nfs3_vnop_mknod(
3982 struct vnop_mknod_args /* {
3983 * struct vnodeop_desc *a_desc;
3984 * vnode_t a_dvp;
3985 * vnode_t *a_vpp;
3986 * struct componentname *a_cnp;
3987 * struct vnode_attr *a_vap;
3988 * vfs_context_t a_context;
3989 * } */*ap)
3990{
3991 vnode_t dvp = ap->a_dvp;
3992 vnode_t *vpp = ap->a_vpp;
3993 struct componentname *cnp = ap->a_cnp;
3994 struct vnode_attr *vap = ap->a_vap;
3995 vfs_context_t ctx = ap->a_context;
3996 vnode_t newvp = NULL;
3997 nfsnode_t np = NULL;
3998 struct nfsmount *nmp;
3999 nfsnode_t dnp = VTONFS(dvp);
4000 struct nfs_vattr nvattr;
4001 fhandle_t fh;
4002 int error = 0, lockerror = ENOENT, busyerror = ENOENT, status, wccpostattr = 0;
4003 struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 };
4004 u_int32_t rdev;
4005 u_int64_t xid = 0, dxid;
4006 int nfsvers, gotuid, gotgid;
4007 struct nfsm_chain nmreq, nmrep;
4008 struct nfsreq rq, *req = &rq;
4009
4010 nmp = VTONMP(dvp);
4011 if (nfs_mount_gone(nmp)) {
4012 return ENXIO;
4013 }
4014 nfsvers = nmp->nm_vers;
4015
4016 if (!VATTR_IS_ACTIVE(vap, va_type)) {
4017 return EINVAL;
4018 }
4019 if (vap->va_type == VCHR || vap->va_type == VBLK) {
4020 if (!VATTR_IS_ACTIVE(vap, va_rdev)) {
4021 return EINVAL;
4022 }
4023 rdev = vap->va_rdev;
4024 } else if (vap->va_type == VFIFO || vap->va_type == VSOCK) {
4025 rdev = 0xffffffff;
4026 } else {
4027 return ENOTSUP;
4028 }
4029 if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN)) {
4030 return ENAMETOOLONG;
4031 }
4032
4033 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
4034
4035 VATTR_SET_SUPPORTED(vap, va_mode);
4036 VATTR_SET_SUPPORTED(vap, va_uid);
4037 VATTR_SET_SUPPORTED(vap, va_gid);
4038 VATTR_SET_SUPPORTED(vap, va_data_size);
4039 VATTR_SET_SUPPORTED(vap, va_access_time);
4040 VATTR_SET_SUPPORTED(vap, va_modify_time);
4041 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
4042 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
4043
4044 nfsm_chain_null(&nmreq);
4045 nfsm_chain_null(&nmrep);
4046
4047 nfsm_chain_build_alloc_init(error, &nmreq,
4048 NFSX_FH(nfsvers) + 4 * NFSX_UNSIGNED +
4049 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(nfsvers));
4050 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
4051 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
4052 if (nfsvers == NFS_VER3) {
4053 nfsm_chain_add_32(error, &nmreq, vtonfs_type(vap->va_type, nfsvers));
4054 nfsm_chain_add_v3sattr(nmp, error, &nmreq, vap);
4055 if (vap->va_type == VCHR || vap->va_type == VBLK) {
4056 nfsm_chain_add_32(error, &nmreq, major(vap->va_rdev));
4057 nfsm_chain_add_32(error, &nmreq, minor(vap->va_rdev));
4058 }
4059 } else {
4060 nfsm_chain_add_v2sattr(error, &nmreq, vap, rdev);
4061 }
4062 nfsm_chain_build_done(error, &nmreq);
4063 if (!error) {
4064 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
4065 }
4066 nfsmout_if(error);
4067
4068 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC_MKNOD,
4069 vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req);
4070 if (!error) {
4071 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
4072 }
4073
4074 if ((lockerror = nfs_node_lock(dnp))) {
4075 error = lockerror;
4076 }
4077 /* XXX no EEXIST kludge here? */
4078 dxid = xid;
4079 if (!error && !status) {
4080 if (dnp->n_flag & NNEGNCENTRIES) {
4081 dnp->n_flag &= ~NNEGNCENTRIES;
4082 cache_purge_negatives(dvp);
4083 }
4084 error = nfsm_chain_get_fh_attr(nmp, &nmrep, dnp, ctx, nfsvers, &xid, &fh, &nvattr);
4085 }
4086 if (nfsvers == NFS_VER3) {
4087 nfsm_chain_get_wcc_data(error, &nmrep, dnp, &premtime, &wccpostattr, &dxid);
4088 }
4089 if (!error) {
4090 error = status;
4091 }
4092nfsmout:
4093 nfsm_chain_cleanup(&nmreq);
4094 nfsm_chain_cleanup(&nmrep);
4095
4096 if (!lockerror) {
4097 dnp->n_flag |= NMODIFIED;
4098 /* if directory hadn't changed, update namecache mtime */
4099 if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) {
4100 NFS_CHANGED_UPDATE_NC(nfsvers, dnp, &dnp->n_vattr);
4101 }
4102 nfs_node_unlock(dnp);
4103 /* nfs_getattr() will check changed and purge caches */
4104 nfs_getattr(dnp, NULL, ctx, wccpostattr ? NGA_CACHED : NGA_UNCACHED);
4105 }
4106
4107 if (!error && fh.fh_len) {
4108 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np);
4109 }
4110 if (!error && !np) {
4111 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
4112 }
4113 if (!error && np) {
4114 newvp = NFSTOV(np);
4115 }
4116 if (!busyerror) {
4117 nfs_node_clear_busy(dnp);
4118 }
4119
4120 if (!error && (gotuid || gotgid) &&
4121 (!newvp || nfs_getattrcache(np, &nvattr, 0) ||
4122 (gotuid && (nvattr.nva_uid != vap->va_uid)) ||
4123 (gotgid && (nvattr.nva_gid != vap->va_gid)))) {
4124 /* clear ID bits if server didn't use them (or we can't tell) */
4125 VATTR_CLEAR_SUPPORTED(vap, va_uid);
4126 VATTR_CLEAR_SUPPORTED(vap, va_gid);
4127 }
4128 if (error) {
4129 if (newvp) {
4130 nfs_node_unlock(np);
4131 vnode_put(newvp);
4132 }
4133 } else {
4134 *vpp = newvp;
4135 nfs_node_unlock(np);
4136 }
4137 return error;
4138}
4139
4140static uint32_t create_verf;
4141/*
4142 * NFS file create call
4143 */
4144int
4145nfs3_vnop_create(
4146 struct vnop_create_args /* {
4147 * struct vnodeop_desc *a_desc;
4148 * vnode_t a_dvp;
4149 * vnode_t *a_vpp;
4150 * struct componentname *a_cnp;
4151 * struct vnode_attr *a_vap;
4152 * vfs_context_t a_context;
4153 * } */*ap)
4154{
4155 vfs_context_t ctx = ap->a_context;
4156 vnode_t dvp = ap->a_dvp;
4157 struct vnode_attr *vap = ap->a_vap;
4158 struct componentname *cnp = ap->a_cnp;
4159 struct nfs_vattr nvattr;
4160 fhandle_t fh;
4161 nfsnode_t np = NULL;
4162 struct nfsmount *nmp;
4163 nfsnode_t dnp = VTONFS(dvp);
4164 vnode_t newvp = NULL;
4165 int error = 0, lockerror = ENOENT, busyerror = ENOENT, status, wccpostattr = 0, fmode = 0;
4166 struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 };
4167 int nfsvers, gotuid, gotgid;
4168 u_int64_t xid, dxid;
4169 uint32_t val;
4170 struct nfsm_chain nmreq, nmrep;
4171 struct nfsreq rq, *req = &rq;
4172 struct nfs_dulookup dul;
4173 int dul_in_progress = 0;
4174 int namedattrs;
4175
4176 nmp = VTONMP(dvp);
4177 if (nfs_mount_gone(nmp)) {
4178 return ENXIO;
4179 }
4180 nfsvers = nmp->nm_vers;
4181 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
4182
4183 if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN)) {
4184 return ENAMETOOLONG;
4185 }
4186
4187 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
4188
4189 VATTR_SET_SUPPORTED(vap, va_mode);
4190 VATTR_SET_SUPPORTED(vap, va_uid);
4191 VATTR_SET_SUPPORTED(vap, va_gid);
4192 VATTR_SET_SUPPORTED(vap, va_data_size);
4193 VATTR_SET_SUPPORTED(vap, va_access_time);
4194 VATTR_SET_SUPPORTED(vap, va_modify_time);
4195 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
4196 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
4197
4198 if ((vap->va_vaflags & VA_EXCLUSIVE)
4199 ) {
4200 fmode |= O_EXCL;
4201 if (!VATTR_IS_ACTIVE(vap, va_access_time) || !VATTR_IS_ACTIVE(vap, va_modify_time)) {
4202 vap->va_vaflags |= VA_UTIMES_NULL;
4203 }
4204 }
4205
4206again:
4207 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
4208 if (!namedattrs) {
4209 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
4210 }
4211
4212 nfsm_chain_null(&nmreq);
4213 nfsm_chain_null(&nmrep);
4214
4215 nfsm_chain_build_alloc_init(error, &nmreq,
4216 NFSX_FH(nfsvers) + 2 * NFSX_UNSIGNED +
4217 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(nfsvers));
4218 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
4219 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
4220 if (nfsvers == NFS_VER3) {
4221 if (fmode & O_EXCL) {
4222 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_EXCLUSIVE);
4223 lck_rw_lock_shared(in_ifaddr_rwlock);
4224 if (!TAILQ_EMPTY(&in_ifaddrhead)) {
4225 val = IA_SIN(in_ifaddrhead.tqh_first)->sin_addr.s_addr;
4226 } else {
4227 val = create_verf;
4228 }
4229 lck_rw_done(in_ifaddr_rwlock);
4230 nfsm_chain_add_32(error, &nmreq, val);
4231 ++create_verf;
4232 nfsm_chain_add_32(error, &nmreq, create_verf);
4233 } else {
4234 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_UNCHECKED);
4235 nfsm_chain_add_v3sattr(nmp, error, &nmreq, vap);
4236 }
4237 } else {
4238 nfsm_chain_add_v2sattr(error, &nmreq, vap, 0);
4239 }
4240 nfsm_chain_build_done(error, &nmreq);
4241 nfsmout_if(error);
4242
4243 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC_CREATE,
4244 vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req);
4245 if (!error) {
4246 if (!namedattrs) {
4247 nfs_dulookup_start(&dul, dnp, ctx);
4248 dul_in_progress = 1;
4249 }
4250 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
4251 }
4252
4253 if ((lockerror = nfs_node_lock(dnp))) {
4254 error = lockerror;
4255 }
4256 dxid = xid;
4257 if (!error && !status) {
4258 if (dnp->n_flag & NNEGNCENTRIES) {
4259 dnp->n_flag &= ~NNEGNCENTRIES;
4260 cache_purge_negatives(dvp);
4261 }
4262 error = nfsm_chain_get_fh_attr(nmp, &nmrep, dnp, ctx, nfsvers, &xid, &fh, &nvattr);
4263 }
4264 if (nfsvers == NFS_VER3) {
4265 nfsm_chain_get_wcc_data(error, &nmrep, dnp, &premtime, &wccpostattr, &dxid);
4266 }
4267 if (!error) {
4268 error = status;
4269 }
4270nfsmout:
4271 nfsm_chain_cleanup(&nmreq);
4272 nfsm_chain_cleanup(&nmrep);
4273
4274 if (!lockerror) {
4275 dnp->n_flag |= NMODIFIED;
4276 /* if directory hadn't changed, update namecache mtime */
4277 if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) {
4278 NFS_CHANGED_UPDATE_NC(nfsvers, dnp, &dnp->n_vattr);
4279 }
4280 nfs_node_unlock(dnp);
4281 /* nfs_getattr() will check changed and purge caches */
4282 nfs_getattr(dnp, NULL, ctx, wccpostattr ? NGA_CACHED : NGA_UNCACHED);
4283 }
4284
4285 if (!error && fh.fh_len) {
4286 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np);
4287 }
4288 if (!error && !np) {
4289 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
4290 }
4291 if (!error && np) {
4292 newvp = NFSTOV(np);
4293 }
4294
4295 if (dul_in_progress) {
4296 nfs_dulookup_finish(&dul, dnp, ctx);
4297 }
4298 if (!busyerror) {
4299 nfs_node_clear_busy(dnp);
4300 }
4301
4302 if (error) {
4303 if ((nfsvers == NFS_VER3) && (fmode & O_EXCL) && (error == NFSERR_NOTSUPP)) {
4304 fmode &= ~O_EXCL;
4305 goto again;
4306 }
4307 if (newvp) {
4308 nfs_node_unlock(np);
4309 vnode_put(newvp);
4310 }
4311 } else if ((nfsvers == NFS_VER3) && (fmode & O_EXCL)) {
4312 nfs_node_unlock(np);
4313 error = nfs3_setattr_rpc(np, vap, ctx);
4314 if (error && (gotuid || gotgid)) {
4315 /* it's possible the server didn't like our attempt to set IDs. */
4316 /* so, let's try it again without those */
4317 VATTR_CLEAR_ACTIVE(vap, va_uid);
4318 VATTR_CLEAR_ACTIVE(vap, va_gid);
4319 error = nfs3_setattr_rpc(np, vap, ctx);
4320 }
4321 if (error) {
4322 vnode_put(newvp);
4323 } else {
4324 nfs_node_lock_force(np);
4325 }
4326 }
4327 if (!error) {
4328 *ap->a_vpp = newvp;
4329 }
4330 if (!error && (gotuid || gotgid) &&
4331 (!newvp || nfs_getattrcache(np, &nvattr, 0) ||
4332 (gotuid && (nvattr.nva_uid != vap->va_uid)) ||
4333 (gotgid && (nvattr.nva_gid != vap->va_gid)))) {
4334 /* clear ID bits if server didn't use them (or we can't tell) */
4335 VATTR_CLEAR_SUPPORTED(vap, va_uid);
4336 VATTR_CLEAR_SUPPORTED(vap, va_gid);
4337 }
4338 if (!error) {
4339 nfs_node_unlock(np);
4340 }
4341 return error;
4342}
4343
4344/*
4345 * NFS file remove call
4346 * To try and make NFS semantics closer to UFS semantics, a file that has
4347 * other processes using the vnode is renamed instead of removed and then
4348 * removed later on the last close.
4349 * - If vnode_isinuse()
4350 * If a rename is not already in the works
4351 * call nfs_sillyrename() to set it up
4352 * else
4353 * do the remove RPC
4354 */
4355int
4356nfs_vnop_remove(
4357 struct vnop_remove_args /* {
4358 * struct vnodeop_desc *a_desc;
4359 * vnode_t a_dvp;
4360 * vnode_t a_vp;
4361 * struct componentname *a_cnp;
4362 * int a_flags;
4363 * vfs_context_t a_context;
4364 * } */*ap)
4365{
4366 vfs_context_t ctx = ap->a_context;
4367 vnode_t vp = ap->a_vp;
4368 vnode_t dvp = ap->a_dvp;
4369 struct componentname *cnp = ap->a_cnp;
4370 nfsnode_t dnp = VTONFS(dvp);
4371 nfsnode_t np = VTONFS(vp);
4372 int error = 0, nfsvers, namedattrs, inuse, gotattr = 0, flushed = 0, setsize = 0;
4373 struct nfs_vattr nvattr;
4374 struct nfsmount *nmp;
4375 struct nfs_dulookup dul;
4376
4377 /* XXX prevent removing a sillyrenamed file? */
4378
4379 nmp = NFSTONMP(dnp);
4380 if (nfs_mount_gone(nmp)) {
4381 return ENXIO;
4382 }
4383 nfsvers = nmp->nm_vers;
4384 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
4385
4386again_relock:
4387 error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx));
4388 if (error) {
4389 return error;
4390 }
4391
4392 /* lock the node while we remove the file */
4393 lck_mtx_lock(nfs_node_hash_mutex);
4394 while (np->n_hflag & NHLOCKED) {
4395 np->n_hflag |= NHLOCKWANT;
4396 msleep(np, nfs_node_hash_mutex, PINOD, "nfs_remove", NULL);
4397 }
4398 np->n_hflag |= NHLOCKED;
4399 lck_mtx_unlock(nfs_node_hash_mutex);
4400
4401 if (!namedattrs) {
4402 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
4403 }
4404again:
4405 inuse = vnode_isinuse(vp, 0);
4406 if ((ap->a_flags & VNODE_REMOVE_NODELETEBUSY) && inuse) {
4407 /* Caller requested Carbon delete semantics, but file is busy */
4408 error = EBUSY;
4409 goto out;
4410 }
4411 if (inuse && !gotattr) {
4412 if (nfs_getattr(np, &nvattr, ctx, NGA_CACHED)) {
4413 nvattr.nva_nlink = 1;
4414 }
4415 gotattr = 1;
4416 goto again;
4417 }
4418 if (!inuse || (np->n_sillyrename && (nvattr.nva_nlink > 1))) {
4419 if (!inuse && !flushed) { /* flush all the buffers first */
4420 /* unlock the node */
4421 lck_mtx_lock(nfs_node_hash_mutex);
4422 np->n_hflag &= ~NHLOCKED;
4423 if (np->n_hflag & NHLOCKWANT) {
4424 np->n_hflag &= ~NHLOCKWANT;
4425 wakeup(np);
4426 }
4427 lck_mtx_unlock(nfs_node_hash_mutex);
4428 nfs_node_clear_busy2(dnp, np);
4429 error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1);
4430 FSDBG(260, np, np->n_size, np->n_vattr.nva_size, 0xf00d0011);
4431 flushed = 1;
4432 if (error == EINTR) {
4433 nfs_node_lock_force(np);
4434 NATTRINVALIDATE(np);
4435 nfs_node_unlock(np);
4436 return error;
4437 }
4438 if (!namedattrs) {
4439 nfs_dulookup_finish(&dul, dnp, ctx);
4440 }
4441 goto again_relock;
4442 }
4443#if CONFIG_NFS4
4444 if ((nmp->nm_vers >= NFS_VER4) && (np->n_openflags & N_DELEG_MASK)) {
4445 nfs4_delegation_return(np, 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
4446 }
4447#endif
4448 /*
4449 * Purge the name cache so that the chance of a lookup for
4450 * the name succeeding while the remove is in progress is
4451 * minimized.
4452 */
4453 nfs_name_cache_purge(dnp, np, cnp, ctx);
4454
4455 if (!namedattrs) {
4456 nfs_dulookup_start(&dul, dnp, ctx);
4457 }
4458
4459 /* Do the rpc */
4460 error = nmp->nm_funcs->nf_remove_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen,
4461 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4462
4463 /*
4464 * Kludge City: If the first reply to the remove rpc is lost..
4465 * the reply to the retransmitted request will be ENOENT
4466 * since the file was in fact removed
4467 * Therefore, we cheat and return success.
4468 */
4469 if (error == ENOENT) {
4470 error = 0;
4471 }
4472
4473 if (!error && !inuse && !np->n_sillyrename) {
4474 /*
4475 * removal succeeded, it's not in use, and not silly renamed so
4476 * remove nfsnode from hash now so we can't accidentally find it
4477 * again if another object gets created with the same filehandle
4478 * before this vnode gets reclaimed
4479 */
4480 lck_mtx_lock(nfs_node_hash_mutex);
4481 if (np->n_hflag & NHHASHED) {
4482 LIST_REMOVE(np, n_hash);
4483 np->n_hflag &= ~NHHASHED;
4484 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
4485 }
4486 lck_mtx_unlock(nfs_node_hash_mutex);
4487 /* clear flags now: won't get nfs_vnop_inactive for recycled vnode */
4488 /* clear all flags other than these */
4489 nfs_node_lock_force(np);
4490 np->n_flag &= (NMODIFIED);
4491 NATTRINVALIDATE(np);
4492 nfs_node_unlock(np);
4493 vnode_recycle(vp);
4494 setsize = 1;
4495 } else {
4496 nfs_node_lock_force(np);
4497 NATTRINVALIDATE(np);
4498 nfs_node_unlock(np);
4499 }
4500 } else if (!np->n_sillyrename) {
4501 if (!namedattrs) {
4502 nfs_dulookup_start(&dul, dnp, ctx);
4503 }
4504 error = nfs_sillyrename(dnp, np, cnp, ctx);
4505 nfs_node_lock_force(np);
4506 NATTRINVALIDATE(np);
4507 nfs_node_unlock(np);
4508 } else {
4509 nfs_node_lock_force(np);
4510 NATTRINVALIDATE(np);
4511 nfs_node_unlock(np);
4512 if (!namedattrs) {
4513 nfs_dulookup_start(&dul, dnp, ctx);
4514 }
4515 }
4516
4517 /* nfs_getattr() will check changed and purge caches */
4518 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
4519 if (!namedattrs) {
4520 nfs_dulookup_finish(&dul, dnp, ctx);
4521 }
4522out:
4523 /* unlock the node */
4524 lck_mtx_lock(nfs_node_hash_mutex);
4525 np->n_hflag &= ~NHLOCKED;
4526 if (np->n_hflag & NHLOCKWANT) {
4527 np->n_hflag &= ~NHLOCKWANT;
4528 wakeup(np);
4529 }
4530 lck_mtx_unlock(nfs_node_hash_mutex);
4531 nfs_node_clear_busy2(dnp, np);
4532 if (setsize) {
4533 ubc_setsize(vp, 0);
4534 }
4535 return error;
4536}
4537
4538/*
4539 * NFS silly-renamed file removal function called from nfs_vnop_inactive
4540 */
4541int
4542nfs_removeit(struct nfs_sillyrename *nsp)
4543{
4544 struct nfsmount *nmp = NFSTONMP(nsp->nsr_dnp);
4545 if (nfs_mount_gone(nmp)) {
4546 return ENXIO;
4547 }
4548 return nmp->nm_funcs->nf_remove_rpc(nsp->nsr_dnp, nsp->nsr_name, nsp->nsr_namlen, NULL, nsp->nsr_cred);
4549}
4550
4551/*
4552 * NFS remove rpc, called from nfs_remove() and nfs_removeit().
4553 */
4554int
4555nfs3_remove_rpc(
4556 nfsnode_t dnp,
4557 char *name,
4558 int namelen,
4559 thread_t thd,
4560 kauth_cred_t cred)
4561{
4562 int error = 0, lockerror = ENOENT, status, wccpostattr = 0;
4563 struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 };
4564 struct nfsmount *nmp;
4565 int nfsvers;
4566 u_int64_t xid;
4567 struct nfsm_chain nmreq, nmrep;
4568
4569 nmp = NFSTONMP(dnp);
4570 if (nfs_mount_gone(nmp)) {
4571 return ENXIO;
4572 }
4573 nfsvers = nmp->nm_vers;
4574 if ((nfsvers == NFS_VER2) && (namelen > NFS_MAXNAMLEN)) {
4575 return ENAMETOOLONG;
4576 }
4577
4578 nfsm_chain_null(&nmreq);
4579 nfsm_chain_null(&nmrep);
4580
4581 nfsm_chain_build_alloc_init(error, &nmreq,
4582 NFSX_FH(nfsvers) + NFSX_UNSIGNED + nfsm_rndup(namelen));
4583 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
4584 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
4585 nfsm_chain_build_done(error, &nmreq);
4586 nfsmout_if(error);
4587
4588 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC_REMOVE, thd, cred, NULL, 0, &nmrep, &xid, &status);
4589
4590 if ((lockerror = nfs_node_lock(dnp))) {
4591 error = lockerror;
4592 }
4593 if (nfsvers == NFS_VER3) {
4594 nfsm_chain_get_wcc_data(error, &nmrep, dnp, &premtime, &wccpostattr, &xid);
4595 }
4596 nfsmout_if(error);
4597 dnp->n_flag |= NMODIFIED;
4598 /* if directory hadn't changed, update namecache mtime */
4599 if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) {
4600 NFS_CHANGED_UPDATE_NC(nfsvers, dnp, &dnp->n_vattr);
4601 }
4602 if (!wccpostattr) {
4603 NATTRINVALIDATE(dnp);
4604 }
4605 if (!error) {
4606 error = status;
4607 }
4608nfsmout:
4609 if (!lockerror) {
4610 nfs_node_unlock(dnp);
4611 }
4612 nfsm_chain_cleanup(&nmreq);
4613 nfsm_chain_cleanup(&nmrep);
4614 return error;
4615}
4616
4617/*
4618 * NFS file rename call
4619 */
4620int
4621nfs_vnop_rename(
4622 struct vnop_rename_args /* {
4623 * struct vnodeop_desc *a_desc;
4624 * vnode_t a_fdvp;
4625 * vnode_t a_fvp;
4626 * struct componentname *a_fcnp;
4627 * vnode_t a_tdvp;
4628 * vnode_t a_tvp;
4629 * struct componentname *a_tcnp;
4630 * vfs_context_t a_context;
4631 * } */*ap)
4632{
4633 vfs_context_t ctx = ap->a_context;
4634 vnode_t fdvp = ap->a_fdvp;
4635 vnode_t fvp = ap->a_fvp;
4636 vnode_t tdvp = ap->a_tdvp;
4637 vnode_t tvp = ap->a_tvp;
4638 nfsnode_t fdnp, fnp, tdnp, tnp;
4639 struct componentname *tcnp = ap->a_tcnp;
4640 struct componentname *fcnp = ap->a_fcnp;
4641 int error, nfsvers, inuse = 0, tvprecycle = 0, locked = 0;
4642 mount_t fmp, tdmp, tmp;
4643 struct nfs_vattr nvattr;
4644 struct nfsmount *nmp;
4645
4646 fdnp = VTONFS(fdvp);
4647 fnp = VTONFS(fvp);
4648 tdnp = VTONFS(tdvp);
4649 tnp = tvp ? VTONFS(tvp) : NULL;
4650
4651 nmp = NFSTONMP(fdnp);
4652 if (nfs_mount_gone(nmp)) {
4653 return ENXIO;
4654 }
4655 nfsvers = nmp->nm_vers;
4656
4657 error = nfs_node_set_busy4(fdnp, fnp, tdnp, tnp, vfs_context_thread(ctx));
4658 if (error) {
4659 return error;
4660 }
4661
4662 if (tvp && (tvp != fvp)) {
4663 /* lock the node while we rename over the existing file */
4664 lck_mtx_lock(nfs_node_hash_mutex);
4665 while (tnp->n_hflag & NHLOCKED) {
4666 tnp->n_hflag |= NHLOCKWANT;
4667 msleep(tnp, nfs_node_hash_mutex, PINOD, "nfs_rename", NULL);
4668 }
4669 tnp->n_hflag |= NHLOCKED;
4670 lck_mtx_unlock(nfs_node_hash_mutex);
4671 locked = 1;
4672 }
4673
4674 /* Check for cross-device rename */
4675 fmp = vnode_mount(fvp);
4676 tmp = tvp ? vnode_mount(tvp) : NULL;
4677 tdmp = vnode_mount(tdvp);
4678 if ((fmp != tdmp) || (tvp && (fmp != tmp))) {
4679 error = EXDEV;
4680 goto out;
4681 }
4682
4683 /* XXX prevent renaming from/over a sillyrenamed file? */
4684
4685 /*
4686 * If the tvp exists and is in use, sillyrename it before doing the
4687 * rename of the new file over it.
4688 * XXX Can't sillyrename a directory.
4689 * Don't sillyrename if source and target are same vnode (hard
4690 * links or case-variants)
4691 */
4692 if (tvp && (tvp != fvp)) {
4693 inuse = vnode_isinuse(tvp, 0);
4694 }
4695 if (inuse && !tnp->n_sillyrename && (vnode_vtype(tvp) != VDIR)) {
4696 error = nfs_sillyrename(tdnp, tnp, tcnp, ctx);
4697 if (error) {
4698 /* sillyrename failed. Instead of pressing on, return error */
4699 goto out; /* should not be ENOENT. */
4700 } else {
4701 /* sillyrename succeeded.*/
4702 tvp = NULL;
4703 }
4704 }
4705#if CONFIG_NFS4
4706 else if (tvp && (nmp->nm_vers >= NFS_VER4) && (tnp->n_openflags & N_DELEG_MASK)) {
4707 nfs4_delegation_return(tnp, 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
4708 }
4709#endif
4710 error = nmp->nm_funcs->nf_rename_rpc(fdnp, fcnp->cn_nameptr, fcnp->cn_namelen,
4711 tdnp, tcnp->cn_nameptr, tcnp->cn_namelen, ctx);
4712
4713 /*
4714 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
4715 */
4716 if (error == ENOENT) {
4717 error = 0;
4718 }
4719
4720 if (tvp && (tvp != fvp) && !tnp->n_sillyrename) {
4721 nfs_node_lock_force(tnp);
4722 tvprecycle = (!error && !vnode_isinuse(tvp, 0) &&
4723 (nfs_getattrcache(tnp, &nvattr, 0) || (nvattr.nva_nlink == 1)));
4724 nfs_node_unlock(tnp);
4725 lck_mtx_lock(nfs_node_hash_mutex);
4726 if (tvprecycle && (tnp->n_hflag & NHHASHED)) {
4727 /*
4728 * remove nfsnode from hash now so we can't accidentally find it
4729 * again if another object gets created with the same filehandle
4730 * before this vnode gets reclaimed
4731 */
4732 LIST_REMOVE(tnp, n_hash);
4733 tnp->n_hflag &= ~NHHASHED;
4734 FSDBG(266, 0, tnp, tnp->n_flag, 0xb1eb1e);
4735 }
4736 lck_mtx_unlock(nfs_node_hash_mutex);
4737 }
4738
4739 /* purge the old name cache entries and enter the new one */
4740 nfs_name_cache_purge(fdnp, fnp, fcnp, ctx);
4741 if (tvp) {
4742 nfs_name_cache_purge(tdnp, tnp, tcnp, ctx);
4743 if (tvprecycle) {
4744 /* clear flags now: won't get nfs_vnop_inactive for recycled vnode */
4745 /* clear all flags other than these */
4746 nfs_node_lock_force(tnp);
4747 tnp->n_flag &= (NMODIFIED);
4748 nfs_node_unlock(tnp);
4749 vnode_recycle(tvp);
4750 }
4751 }
4752 if (!error) {
4753 nfs_node_lock_force(tdnp);
4754 if (tdnp->n_flag & NNEGNCENTRIES) {
4755 tdnp->n_flag &= ~NNEGNCENTRIES;
4756 cache_purge_negatives(tdvp);
4757 }
4758 nfs_node_unlock(tdnp);
4759 nfs_node_lock_force(fnp);
4760 cache_enter(tdvp, fvp, tcnp);
4761 if (tdvp != fdvp) { /* update parent pointer */
4762 if (fnp->n_parent && !vnode_get(fnp->n_parent)) {
4763 /* remove ref from old parent */
4764 vnode_rele(fnp->n_parent);
4765 vnode_put(fnp->n_parent);
4766 }
4767 fnp->n_parent = tdvp;
4768 if (tdvp && !vnode_get(tdvp)) {
4769 /* add ref to new parent */
4770 vnode_ref(tdvp);
4771 vnode_put(tdvp);
4772 } else {
4773 fnp->n_parent = NULL;
4774 }
4775 }
4776 nfs_node_unlock(fnp);
4777 }
4778out:
4779 /* nfs_getattr() will check changed and purge caches */
4780 nfs_getattr(fdnp, NULL, ctx, NGA_CACHED);
4781 nfs_getattr(tdnp, NULL, ctx, NGA_CACHED);
4782 if (locked) {
4783 /* unlock node */
4784 lck_mtx_lock(nfs_node_hash_mutex);
4785 tnp->n_hflag &= ~NHLOCKED;
4786 if (tnp->n_hflag & NHLOCKWANT) {
4787 tnp->n_hflag &= ~NHLOCKWANT;
4788 wakeup(tnp);
4789 }
4790 lck_mtx_unlock(nfs_node_hash_mutex);
4791 }
4792 nfs_node_clear_busy4(fdnp, fnp, tdnp, tnp);
4793 return error;
4794}
4795
4796/*
4797 * Do an NFS rename rpc. Called from nfs_vnop_rename() and nfs_sillyrename().
4798 */
4799int
4800nfs3_rename_rpc(
4801 nfsnode_t fdnp,
4802 char *fnameptr,
4803 int fnamelen,
4804 nfsnode_t tdnp,
4805 char *tnameptr,
4806 int tnamelen,
4807 vfs_context_t ctx)
4808{
4809 int error = 0, lockerror = ENOENT, status, fwccpostattr = 0, twccpostattr = 0;
4810 struct timespec fpremtime = { .tv_sec = 0, .tv_nsec = 0 }, tpremtime = { .tv_sec = 0, .tv_nsec = 0 };
4811 struct nfsmount *nmp;
4812 int nfsvers;
4813 u_int64_t xid, txid;
4814 struct nfsm_chain nmreq, nmrep;
4815
4816 nmp = NFSTONMP(fdnp);
4817 if (nfs_mount_gone(nmp)) {
4818 return ENXIO;
4819 }
4820 nfsvers = nmp->nm_vers;
4821 if ((nfsvers == NFS_VER2) &&
4822 ((fnamelen > NFS_MAXNAMLEN) || (tnamelen > NFS_MAXNAMLEN))) {
4823 return ENAMETOOLONG;
4824 }
4825
4826 nfsm_chain_null(&nmreq);
4827 nfsm_chain_null(&nmrep);
4828
4829 nfsm_chain_build_alloc_init(error, &nmreq,
4830 (NFSX_FH(nfsvers) + NFSX_UNSIGNED) * 2 +
4831 nfsm_rndup(fnamelen) + nfsm_rndup(tnamelen));
4832 nfsm_chain_add_fh(error, &nmreq, nfsvers, fdnp->n_fhp, fdnp->n_fhsize);
4833 nfsm_chain_add_name(error, &nmreq, fnameptr, fnamelen, nmp);
4834 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
4835 nfsm_chain_add_name(error, &nmreq, tnameptr, tnamelen, nmp);
4836 nfsm_chain_build_done(error, &nmreq);
4837 nfsmout_if(error);
4838
4839 error = nfs_request(fdnp, NULL, &nmreq, NFSPROC_RENAME, ctx, NULL, &nmrep, &xid, &status);
4840
4841 if ((lockerror = nfs_node_lock2(fdnp, tdnp))) {
4842 error = lockerror;
4843 }
4844 if (nfsvers == NFS_VER3) {
4845 txid = xid;
4846 nfsm_chain_get_wcc_data(error, &nmrep, fdnp, &fpremtime, &fwccpostattr, &xid);
4847 nfsm_chain_get_wcc_data(error, &nmrep, tdnp, &tpremtime, &twccpostattr, &txid);
4848 }
4849 if (!error) {
4850 error = status;
4851 }
4852nfsmout:
4853 nfsm_chain_cleanup(&nmreq);
4854 nfsm_chain_cleanup(&nmrep);
4855 if (!lockerror) {
4856 fdnp->n_flag |= NMODIFIED;
4857 /* if directory hadn't changed, update namecache mtime */
4858 if (nfstimespeccmp(&fdnp->n_ncmtime, &fpremtime, ==)) {
4859 NFS_CHANGED_UPDATE_NC(nfsvers, fdnp, &fdnp->n_vattr);
4860 }
4861 if (!fwccpostattr) {
4862 NATTRINVALIDATE(fdnp);
4863 }
4864 tdnp->n_flag |= NMODIFIED;
4865 /* if directory hadn't changed, update namecache mtime */
4866 if (nfstimespeccmp(&tdnp->n_ncmtime, &tpremtime, ==)) {
4867 NFS_CHANGED_UPDATE_NC(nfsvers, tdnp, &tdnp->n_vattr);
4868 }
4869 if (!twccpostattr) {
4870 NATTRINVALIDATE(tdnp);
4871 }
4872 nfs_node_unlock2(fdnp, tdnp);
4873 }
4874 return error;
4875}
4876
4877/*
4878 * NFS hard link create call
4879 */
4880int
4881nfs3_vnop_link(
4882 struct vnop_link_args /* {
4883 * struct vnodeop_desc *a_desc;
4884 * vnode_t a_vp;
4885 * vnode_t a_tdvp;
4886 * struct componentname *a_cnp;
4887 * vfs_context_t a_context;
4888 * } */*ap)
4889{
4890 vfs_context_t ctx = ap->a_context;
4891 vnode_t vp = ap->a_vp;
4892 vnode_t tdvp = ap->a_tdvp;
4893 struct componentname *cnp = ap->a_cnp;
4894 int error = 0, lockerror = ENOENT, status, wccpostattr = 0, attrflag = 0;
4895 struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 };
4896 struct nfsmount *nmp;
4897 nfsnode_t np = VTONFS(vp);
4898 nfsnode_t tdnp = VTONFS(tdvp);
4899 int nfsvers;
4900 u_int64_t xid, txid;
4901 struct nfsm_chain nmreq, nmrep;
4902
4903 if (vnode_mount(vp) != vnode_mount(tdvp)) {
4904 return EXDEV;
4905 }
4906
4907 nmp = VTONMP(vp);
4908 if (nfs_mount_gone(nmp)) {
4909 return ENXIO;
4910 }
4911 nfsvers = nmp->nm_vers;
4912 if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN)) {
4913 return ENAMETOOLONG;
4914 }
4915
4916 /*
4917 * Push all writes to the server, so that the attribute cache
4918 * doesn't get "out of sync" with the server.
4919 * XXX There should be a better way!
4920 */
4921 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
4922
4923 error = nfs_node_set_busy2(tdnp, np, vfs_context_thread(ctx));
4924 if (error) {
4925 return error;
4926 }
4927
4928 nfsm_chain_null(&nmreq);
4929 nfsm_chain_null(&nmrep);
4930
4931 nfsm_chain_build_alloc_init(error, &nmreq,
4932 NFSX_FH(nfsvers) * 2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
4933 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
4934 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
4935 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
4936 nfsm_chain_build_done(error, &nmreq);
4937 nfsmout_if(error);
4938 error = nfs_request(np, NULL, &nmreq, NFSPROC_LINK, ctx, NULL, &nmrep, &xid, &status);
4939
4940 if ((lockerror = nfs_node_lock2(tdnp, np))) {
4941 error = lockerror;
4942 goto nfsmout;
4943 }
4944 if (nfsvers == NFS_VER3) {
4945 txid = xid;
4946 nfsm_chain_postop_attr_update_flag(error, &nmrep, np, attrflag, &xid);
4947 nfsm_chain_get_wcc_data(error, &nmrep, tdnp, &premtime, &wccpostattr, &txid);
4948 }
4949 if (!error) {
4950 error = status;
4951 }
4952nfsmout:
4953 nfsm_chain_cleanup(&nmreq);
4954 nfsm_chain_cleanup(&nmrep);
4955 if (!lockerror) {
4956 if (!attrflag) {
4957 NATTRINVALIDATE(np);
4958 }
4959 tdnp->n_flag |= NMODIFIED;
4960 /* if directory hadn't changed, update namecache mtime */
4961 if (nfstimespeccmp(&tdnp->n_ncmtime, &premtime, ==)) {
4962 NFS_CHANGED_UPDATE_NC(nfsvers, tdnp, &tdnp->n_vattr);
4963 }
4964 if (!wccpostattr) {
4965 NATTRINVALIDATE(tdnp);
4966 }
4967 if (!error && (tdnp->n_flag & NNEGNCENTRIES)) {
4968 tdnp->n_flag &= ~NNEGNCENTRIES;
4969 cache_purge_negatives(tdvp);
4970 }
4971 nfs_node_unlock2(tdnp, np);
4972 }
4973 nfs_node_clear_busy2(tdnp, np);
4974 /*
4975 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
4976 */
4977 if (error == EEXIST) {
4978 error = 0;
4979 }
4980 return error;
4981}
4982
4983/*
4984 * NFS symbolic link create call
4985 */
4986int
4987nfs3_vnop_symlink(
4988 struct vnop_symlink_args /* {
4989 * struct vnodeop_desc *a_desc;
4990 * vnode_t a_dvp;
4991 * vnode_t *a_vpp;
4992 * struct componentname *a_cnp;
4993 * struct vnode_attr *a_vap;
4994 * char *a_target;
4995 * vfs_context_t a_context;
4996 * } */*ap)
4997{
4998 vfs_context_t ctx = ap->a_context;
4999 vnode_t dvp = ap->a_dvp;
5000 struct vnode_attr *vap = ap->a_vap;
5001 struct componentname *cnp = ap->a_cnp;
5002 struct nfs_vattr nvattr;
5003 fhandle_t fh;
5004 int slen, error = 0, lockerror = ENOENT, busyerror = ENOENT, status, wccpostattr = 0;
5005 struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 };
5006 vnode_t newvp = NULL;
5007 int nfsvers, gotuid, gotgid;
5008 u_int64_t xid = 0, dxid;
5009 nfsnode_t np = NULL;
5010 nfsnode_t dnp = VTONFS(dvp);
5011 struct nfsmount *nmp;
5012 struct nfsm_chain nmreq, nmrep;
5013 struct nfsreq rq, *req = &rq;
5014 struct nfs_dulookup dul;
5015 int namedattrs;
5016 int dul_in_progress = 0;
5017
5018 nmp = VTONMP(dvp);
5019 if (nfs_mount_gone(nmp)) {
5020 return ENXIO;
5021 }
5022 nfsvers = nmp->nm_vers;
5023 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
5024
5025 slen = strlen(ap->a_target);
5026 if ((nfsvers == NFS_VER2) &&
5027 ((cnp->cn_namelen > NFS_MAXNAMLEN) || (slen > NFS_MAXPATHLEN))) {
5028 return ENAMETOOLONG;
5029 }
5030
5031 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
5032
5033 VATTR_SET_SUPPORTED(vap, va_mode);
5034 VATTR_SET_SUPPORTED(vap, va_uid);
5035 VATTR_SET_SUPPORTED(vap, va_gid);
5036 VATTR_SET_SUPPORTED(vap, va_data_size);
5037 VATTR_SET_SUPPORTED(vap, va_access_time);
5038 VATTR_SET_SUPPORTED(vap, va_modify_time);
5039 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
5040 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
5041
5042 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
5043 if (!namedattrs) {
5044 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
5045 }
5046
5047 nfsm_chain_null(&nmreq);
5048 nfsm_chain_null(&nmrep);
5049
5050 nfsm_chain_build_alloc_init(error, &nmreq,
5051 NFSX_FH(nfsvers) + 2 * NFSX_UNSIGNED +
5052 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(nfsvers));
5053 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
5054 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
5055 if (nfsvers == NFS_VER3) {
5056 nfsm_chain_add_v3sattr(nmp, error, &nmreq, vap);
5057 }
5058 nfsm_chain_add_name(error, &nmreq, ap->a_target, slen, nmp);
5059 if (nfsvers == NFS_VER2) {
5060 nfsm_chain_add_v2sattr(error, &nmreq, vap, -1);
5061 }
5062 nfsm_chain_build_done(error, &nmreq);
5063 nfsmout_if(error);
5064
5065 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC_SYMLINK,
5066 vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req);
5067 if (!error) {
5068 if (!namedattrs) {
5069 nfs_dulookup_start(&dul, dnp, ctx);
5070 dul_in_progress = 1;
5071 }
5072 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
5073 }
5074
5075 if ((lockerror = nfs_node_lock(dnp))) {
5076 error = lockerror;
5077 }
5078 dxid = xid;
5079 if (!error && !status) {
5080 if (dnp->n_flag & NNEGNCENTRIES) {
5081 dnp->n_flag &= ~NNEGNCENTRIES;
5082 cache_purge_negatives(dvp);
5083 }
5084 if (nfsvers == NFS_VER3) {
5085 error = nfsm_chain_get_fh_attr(nmp, &nmrep, dnp, ctx, nfsvers, &xid, &fh, &nvattr);
5086 } else {
5087 fh.fh_len = 0;
5088 }
5089 }
5090 if (nfsvers == NFS_VER3) {
5091 nfsm_chain_get_wcc_data(error, &nmrep, dnp, &premtime, &wccpostattr, &dxid);
5092 }
5093 if (!error) {
5094 error = status;
5095 }
5096nfsmout:
5097 nfsm_chain_cleanup(&nmreq);
5098 nfsm_chain_cleanup(&nmrep);
5099
5100 if (!lockerror) {
5101 dnp->n_flag |= NMODIFIED;
5102 /* if directory hadn't changed, update namecache mtime */
5103 if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) {
5104 NFS_CHANGED_UPDATE_NC(nfsvers, dnp, &dnp->n_vattr);
5105 }
5106 nfs_node_unlock(dnp);
5107 /* nfs_getattr() will check changed and purge caches */
5108 nfs_getattr(dnp, NULL, ctx, wccpostattr ? NGA_CACHED : NGA_UNCACHED);
5109 }
5110
5111 if (!error && fh.fh_len) {
5112 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np);
5113 }
5114 if (!error && np) {
5115 newvp = NFSTOV(np);
5116 }
5117
5118 if (dul_in_progress) {
5119 nfs_dulookup_finish(&dul, dnp, ctx);
5120 }
5121
5122 /*
5123 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
5124 * if we can succeed in looking up the symlink.
5125 */
5126 if ((error == EEXIST) || (!error && !newvp)) {
5127 if (newvp) {
5128 nfs_node_unlock(np);
5129 vnode_put(newvp);
5130 newvp = NULL;
5131 }
5132 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
5133 if (!error) {
5134 newvp = NFSTOV(np);
5135 if (vnode_vtype(newvp) != VLNK) {
5136 error = EEXIST;
5137 }
5138 }
5139 }
5140 if (!busyerror) {
5141 nfs_node_clear_busy(dnp);
5142 }
5143 if (!error && (gotuid || gotgid) &&
5144 (!newvp || nfs_getattrcache(np, &nvattr, 0) ||
5145 (gotuid && (nvattr.nva_uid != vap->va_uid)) ||
5146 (gotgid && (nvattr.nva_gid != vap->va_gid)))) {
5147 /* clear ID bits if server didn't use them (or we can't tell) */
5148 VATTR_CLEAR_SUPPORTED(vap, va_uid);
5149 VATTR_CLEAR_SUPPORTED(vap, va_gid);
5150 }
5151 if (error) {
5152 if (newvp) {
5153 nfs_node_unlock(np);
5154 vnode_put(newvp);
5155 }
5156 } else {
5157 nfs_node_unlock(np);
5158 *ap->a_vpp = newvp;
5159 }
5160 return error;
5161}
5162
5163/*
5164 * NFS make dir call
5165 */
5166int
5167nfs3_vnop_mkdir(
5168 struct vnop_mkdir_args /* {
5169 * struct vnodeop_desc *a_desc;
5170 * vnode_t a_dvp;
5171 * vnode_t *a_vpp;
5172 * struct componentname *a_cnp;
5173 * struct vnode_attr *a_vap;
5174 * vfs_context_t a_context;
5175 * } */*ap)
5176{
5177 vfs_context_t ctx = ap->a_context;
5178 vnode_t dvp = ap->a_dvp;
5179 struct vnode_attr *vap = ap->a_vap;
5180 struct componentname *cnp = ap->a_cnp;
5181 struct nfs_vattr nvattr;
5182 nfsnode_t np = NULL;
5183 struct nfsmount *nmp;
5184 nfsnode_t dnp = VTONFS(dvp);
5185 vnode_t newvp = NULL;
5186 int error = 0, lockerror = ENOENT, busyerror = ENOENT, status, wccpostattr = 0;
5187 struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 };
5188 int nfsvers, gotuid, gotgid;
5189 u_int64_t xid = 0, dxid;
5190 fhandle_t fh;
5191 struct nfsm_chain nmreq, nmrep;
5192 struct nfsreq rq, *req = &rq;
5193 struct nfs_dulookup dul;
5194 int namedattrs;
5195 int dul_in_progress = 0;
5196
5197 nmp = VTONMP(dvp);
5198 if (nfs_mount_gone(nmp)) {
5199 return ENXIO;
5200 }
5201 nfsvers = nmp->nm_vers;
5202 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
5203
5204 if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN)) {
5205 return ENAMETOOLONG;
5206 }
5207
5208 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
5209
5210 VATTR_SET_SUPPORTED(vap, va_mode);
5211 VATTR_SET_SUPPORTED(vap, va_uid);
5212 VATTR_SET_SUPPORTED(vap, va_gid);
5213 VATTR_SET_SUPPORTED(vap, va_data_size);
5214 VATTR_SET_SUPPORTED(vap, va_access_time);
5215 VATTR_SET_SUPPORTED(vap, va_modify_time);
5216 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
5217 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
5218
5219 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
5220 if (!namedattrs) {
5221 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
5222 }
5223
5224 nfsm_chain_null(&nmreq);
5225 nfsm_chain_null(&nmrep);
5226
5227 nfsm_chain_build_alloc_init(error, &nmreq,
5228 NFSX_FH(nfsvers) + NFSX_UNSIGNED +
5229 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(nfsvers));
5230 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
5231 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
5232 if (nfsvers == NFS_VER3) {
5233 nfsm_chain_add_v3sattr(nmp, error, &nmreq, vap);
5234 } else {
5235 nfsm_chain_add_v2sattr(error, &nmreq, vap, -1);
5236 }
5237 nfsm_chain_build_done(error, &nmreq);
5238 nfsmout_if(error);
5239
5240 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC_MKDIR,
5241 vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req);
5242 if (!error) {
5243 if (!namedattrs) {
5244 nfs_dulookup_start(&dul, dnp, ctx);
5245 dul_in_progress = 1;
5246 }
5247 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
5248 }
5249
5250 if ((lockerror = nfs_node_lock(dnp))) {
5251 error = lockerror;
5252 }
5253 dxid = xid;
5254 if (!error && !status) {
5255 if (dnp->n_flag & NNEGNCENTRIES) {
5256 dnp->n_flag &= ~NNEGNCENTRIES;
5257 cache_purge_negatives(dvp);
5258 }
5259 error = nfsm_chain_get_fh_attr(nmp, &nmrep, dnp, ctx, nfsvers, &xid, &fh, &nvattr);
5260 }
5261 if (nfsvers == NFS_VER3) {
5262 nfsm_chain_get_wcc_data(error, &nmrep, dnp, &premtime, &wccpostattr, &dxid);
5263 }
5264 if (!error) {
5265 error = status;
5266 }
5267nfsmout:
5268 nfsm_chain_cleanup(&nmreq);
5269 nfsm_chain_cleanup(&nmrep);
5270
5271 if (!lockerror) {
5272 dnp->n_flag |= NMODIFIED;
5273 /* if directory hadn't changed, update namecache mtime */
5274 if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) {
5275 NFS_CHANGED_UPDATE_NC(nfsvers, dnp, &dnp->n_vattr);
5276 }
5277 nfs_node_unlock(dnp);
5278 /* nfs_getattr() will check changed and purge caches */
5279 nfs_getattr(dnp, NULL, ctx, wccpostattr ? NGA_CACHED : NGA_UNCACHED);
5280 }
5281
5282 if (!error && fh.fh_len) {
5283 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np);
5284 }
5285 if (!error && np) {
5286 newvp = NFSTOV(np);
5287 }
5288
5289 if (dul_in_progress) {
5290 nfs_dulookup_finish(&dul, dnp, ctx);
5291 }
5292
5293 /*
5294 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
5295 * if we can succeed in looking up the directory.
5296 */
5297 if ((error == EEXIST) || (!error && !newvp)) {
5298 if (newvp) {
5299 nfs_node_unlock(np);
5300 vnode_put(newvp);
5301 newvp = NULL;
5302 }
5303 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
5304 if (!error) {
5305 newvp = NFSTOV(np);
5306 if (vnode_vtype(newvp) != VDIR) {
5307 error = EEXIST;
5308 }
5309 }
5310 }
5311 if (!busyerror) {
5312 nfs_node_clear_busy(dnp);
5313 }
5314 if (!error && (gotuid || gotgid) &&
5315 (!newvp || nfs_getattrcache(np, &nvattr, 0) ||
5316 (gotuid && (nvattr.nva_uid != vap->va_uid)) ||
5317 (gotgid && (nvattr.nva_gid != vap->va_gid)))) {
5318 /* clear ID bits if server didn't use them (or we can't tell) */
5319 VATTR_CLEAR_SUPPORTED(vap, va_uid);
5320 VATTR_CLEAR_SUPPORTED(vap, va_gid);
5321 }
5322 if (error) {
5323 if (newvp) {
5324 nfs_node_unlock(np);
5325 vnode_put(newvp);
5326 }
5327 } else {
5328 nfs_node_unlock(np);
5329 *ap->a_vpp = newvp;
5330 }
5331 return error;
5332}
5333
5334/*
5335 * NFS remove directory call
5336 */
5337int
5338nfs3_vnop_rmdir(
5339 struct vnop_rmdir_args /* {
5340 * struct vnodeop_desc *a_desc;
5341 * vnode_t a_dvp;
5342 * vnode_t a_vp;
5343 * struct componentname *a_cnp;
5344 * vfs_context_t a_context;
5345 * } */*ap)
5346{
5347 vfs_context_t ctx = ap->a_context;
5348 vnode_t vp = ap->a_vp;
5349 vnode_t dvp = ap->a_dvp;
5350 struct componentname *cnp = ap->a_cnp;
5351 int error = 0, lockerror = ENOENT, status, wccpostattr = 0;
5352 struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 };
5353 struct nfsmount *nmp;
5354 nfsnode_t np = VTONFS(vp);
5355 nfsnode_t dnp = VTONFS(dvp);
5356 int nfsvers;
5357 u_int64_t xid;
5358 struct nfsm_chain nmreq, nmrep;
5359 struct nfsreq rq, *req = &rq;
5360 struct nfs_dulookup dul;
5361 int namedattrs;
5362 int dul_in_progress = 0;
5363
5364 nmp = VTONMP(vp);
5365 if (nfs_mount_gone(nmp)) {
5366 return ENXIO;
5367 }
5368 nfsvers = nmp->nm_vers;
5369 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
5370
5371 if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN)) {
5372 return ENAMETOOLONG;
5373 }
5374
5375 if ((error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx)))) {
5376 return error;
5377 }
5378
5379 if (!namedattrs) {
5380 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
5381 }
5382
5383 nfsm_chain_null(&nmreq);
5384 nfsm_chain_null(&nmrep);
5385
5386 nfsm_chain_build_alloc_init(error, &nmreq,
5387 NFSX_FH(nfsvers) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
5388 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
5389 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
5390 nfsm_chain_build_done(error, &nmreq);
5391 nfsmout_if(error);
5392
5393 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC_RMDIR,
5394 vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req);
5395 if (!error) {
5396 if (!namedattrs) {
5397 nfs_dulookup_start(&dul, dnp, ctx);
5398 dul_in_progress = 1;
5399 }
5400 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
5401 }
5402
5403 if ((lockerror = nfs_node_lock(dnp))) {
5404 error = lockerror;
5405 }
5406 if (nfsvers == NFS_VER3) {
5407 nfsm_chain_get_wcc_data(error, &nmrep, dnp, &premtime, &wccpostattr, &xid);
5408 }
5409 if (!error) {
5410 error = status;
5411 }
5412nfsmout:
5413 nfsm_chain_cleanup(&nmreq);
5414 nfsm_chain_cleanup(&nmrep);
5415
5416 if (!lockerror) {
5417 dnp->n_flag |= NMODIFIED;
5418 /* if directory hadn't changed, update namecache mtime */
5419 if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) {
5420 NFS_CHANGED_UPDATE_NC(nfsvers, dnp, &dnp->n_vattr);
5421 }
5422 nfs_node_unlock(dnp);
5423 nfs_name_cache_purge(dnp, np, cnp, ctx);
5424 /* nfs_getattr() will check changed and purge caches */
5425 nfs_getattr(dnp, NULL, ctx, wccpostattr ? NGA_CACHED : NGA_UNCACHED);
5426 }
5427 if (dul_in_progress) {
5428 nfs_dulookup_finish(&dul, dnp, ctx);
5429 }
5430 nfs_node_clear_busy2(dnp, np);
5431
5432 /*
5433 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
5434 */
5435 if (error == ENOENT) {
5436 error = 0;
5437 }
5438 if (!error) {
5439 /*
5440 * remove nfsnode from hash now so we can't accidentally find it
5441 * again if another object gets created with the same filehandle
5442 * before this vnode gets reclaimed
5443 */
5444 lck_mtx_lock(nfs_node_hash_mutex);
5445 if (np->n_hflag & NHHASHED) {
5446 LIST_REMOVE(np, n_hash);
5447 np->n_hflag &= ~NHHASHED;
5448 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
5449 }
5450 lck_mtx_unlock(nfs_node_hash_mutex);
5451 }
5452 return error;
5453}
5454
5455/*
5456 * NFS readdir call
5457 *
5458 * The incoming "offset" is a directory cookie indicating where in the
5459 * directory entries should be read from. A zero cookie means start at
5460 * the beginning of the directory. Any other cookie will be a cookie
5461 * returned from the server.
5462 *
5463 * Using that cookie, determine which buffer (and where in that buffer)
5464 * to start returning entries from. Buffer logical block numbers are
5465 * the cookies they start at. If a buffer is found that is not full,
5466 * call into the bio/RPC code to fill it. The RPC code will probably
5467 * fill several buffers (dropping the first, requiring a re-get).
5468 *
5469 * When done copying entries to the buffer, set the offset to the current
5470 * entry's cookie and enter that cookie in the cookie cache.
5471 *
5472 * Note: because the getdirentries(2) API returns a long-typed offset,
5473 * the incoming offset is a potentially truncated cookie (ptc).
5474 * The cookie matching code is aware of this and will fall back to
5475 * matching only 32 bits of the cookie.
5476 */
5477int
5478nfs_vnop_readdir(
5479 struct vnop_readdir_args /* {
5480 * struct vnodeop_desc *a_desc;
5481 * vnode_t a_vp;
5482 * struct uio *a_uio;
5483 * int a_flags;
5484 * int *a_eofflag;
5485 * int *a_numdirent;
5486 * vfs_context_t a_context;
5487 * } */*ap)
5488{
5489 vfs_context_t ctx = ap->a_context;
5490 vnode_t dvp = ap->a_vp;
5491 nfsnode_t dnp = VTONFS(dvp);
5492 struct nfsmount *nmp;
5493 uio_t uio = ap->a_uio;
5494 int error, nfsvers, extended, numdirent, bigcookies, ptc, done, attrcachetimeout;
5495 uint16_t i, iptc, rlen, nlen;
5496 uint64_t cookie, nextcookie, lbn = 0;
5497 struct nfsbuf *bp = NULL;
5498 struct nfs_dir_buf_header *ndbhp;
5499 struct direntry *dp, *dpptc;
5500 struct dirent dent;
5501 char *cp = NULL;
5502 struct timeval now;
5503 thread_t thd;
5504
5505 nmp = VTONMP(dvp);
5506 if (nfs_mount_gone(nmp)) {
5507 return ENXIO;
5508 }
5509 nfsvers = nmp->nm_vers;
5510 bigcookies = (nmp->nm_state & NFSSTA_BIGCOOKIES);
5511 extended = (ap->a_flags & VNODE_READDIR_EXTENDED);
5512
5513 if (vnode_vtype(dvp) != VDIR) {
5514 return EPERM;
5515 }
5516
5517 if (ap->a_eofflag) {
5518 *ap->a_eofflag = 0;
5519 }
5520
5521 if (uio_resid(uio) == 0) {
5522 return 0;
5523 }
5524#if CONFIG_NFS4
5525 if ((nfsvers >= NFS_VER4) && (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER)) {
5526 /* trigger directories should never be read, return nothing */
5527 return 0;
5528 }
5529#endif
5530 thd = vfs_context_thread(ctx);
5531 numdirent = done = 0;
5532 nextcookie = uio_offset(uio);
5533 ptc = bigcookies && NFS_DIR_COOKIE_POTENTIALLY_TRUNCATED(nextcookie);
5534
5535 if ((error = nfs_node_lock(dnp))) {
5536 goto out;
5537 }
5538
5539 if (dnp->n_flag & NNEEDINVALIDATE) {
5540 dnp->n_flag &= ~NNEEDINVALIDATE;
5541 nfs_invaldir(dnp);
5542 nfs_node_unlock(dnp);
5543 error = nfs_vinvalbuf(dvp, 0, ctx, 1);
5544 if (!error) {
5545 error = nfs_node_lock(dnp);
5546 }
5547 if (error) {
5548 goto out;
5549 }
5550 }
5551
5552 if (dnp->n_rdirplusstamp_eof && dnp->n_rdirplusstamp_sof) {
5553 attrcachetimeout = nfs_attrcachetimeout(dnp);
5554 microuptime(&now);
5555 if (attrcachetimeout && (now.tv_sec - dnp->n_rdirplusstamp_sof > attrcachetimeout - 1)) {
5556 dnp->n_rdirplusstamp_eof = dnp->n_rdirplusstamp_sof = 0;
5557 nfs_invaldir(dnp);
5558 nfs_node_unlock(dnp);
5559 error = nfs_vinvalbuf(dvp, 0, ctx, 1);
5560 if (!error) {
5561 error = nfs_node_lock(dnp);
5562 }
5563 if (error) {
5564 goto out;
5565 }
5566 }
5567 }
5568
5569 /*
5570 * check for need to invalidate when (re)starting at beginning
5571 */
5572 if (!nextcookie) {
5573 if (dnp->n_flag & NMODIFIED) {
5574 nfs_invaldir(dnp);
5575 nfs_node_unlock(dnp);
5576 if ((error = nfs_vinvalbuf(dvp, 0, ctx, 1))) {
5577 goto out;
5578 }
5579 } else {
5580 nfs_node_unlock(dnp);
5581 }
5582 /* nfs_getattr() will check changed and purge caches */
5583 if ((error = nfs_getattr(dnp, NULL, ctx, NGA_UNCACHED))) {
5584 goto out;
5585 }
5586 } else {
5587 nfs_node_unlock(dnp);
5588 }
5589
5590 error = nfs_dir_cookie_to_lbn(dnp, nextcookie, &ptc, &lbn);
5591 if (error) {
5592 if (error < 0) { /* just hit EOF cookie */
5593 done = 1;
5594 error = 0;
5595 }
5596 if (ap->a_eofflag) {
5597 *ap->a_eofflag = 1;
5598 }
5599 }
5600
5601 while (!error && !done) {
5602 OSAddAtomic64(1, &nfsstats.biocache_readdirs);
5603 cookie = nextcookie;
5604getbuffer:
5605 error = nfs_buf_get(dnp, lbn, NFS_DIRBLKSIZ, thd, NBLK_READ, &bp);
5606 if (error) {
5607 goto out;
5608 }
5609 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
5610 if (!ISSET(bp->nb_flags, NB_CACHE) || !ISSET(ndbhp->ndbh_flags, NDB_FULL)) {
5611 if (!ISSET(bp->nb_flags, NB_CACHE)) { /* initialize the buffer */
5612 ndbhp->ndbh_flags = 0;
5613 ndbhp->ndbh_count = 0;
5614 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
5615 ndbhp->ndbh_ncgen = dnp->n_ncgen;
5616 }
5617 error = nfs_buf_readdir(bp, ctx);
5618 if (error == NFSERR_DIRBUFDROPPED) {
5619 goto getbuffer;
5620 }
5621 if (error) {
5622 nfs_buf_release(bp, 1);
5623 }
5624 if (error && (error != ENXIO) && (error != ETIMEDOUT) && (error != EINTR) && (error != ERESTART)) {
5625 if (!nfs_node_lock(dnp)) {
5626 nfs_invaldir(dnp);
5627 nfs_node_unlock(dnp);
5628 }
5629 nfs_vinvalbuf(dvp, 0, ctx, 1);
5630 if (error == NFSERR_BAD_COOKIE) {
5631 error = ENOENT;
5632 }
5633 }
5634 if (error) {
5635 goto out;
5636 }
5637 }
5638
5639 /* find next entry to return */
5640 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
5641 i = 0;
5642 if ((lbn != cookie) && !(ptc && NFS_DIR_COOKIE_SAME32(lbn, cookie))) {
5643 dpptc = NULL;
5644 iptc = 0;
5645 for (; (i < ndbhp->ndbh_count) && (cookie != dp->d_seekoff); i++) {
5646 if (ptc && !dpptc && NFS_DIR_COOKIE_SAME32(cookie, dp->d_seekoff)) {
5647 iptc = i;
5648 dpptc = dp;
5649 }
5650 nextcookie = dp->d_seekoff;
5651 dp = NFS_DIRENTRY_NEXT(dp);
5652 }
5653 if ((i == ndbhp->ndbh_count) && dpptc) {
5654 i = iptc;
5655 dp = dpptc;
5656 }
5657 if (i < ndbhp->ndbh_count) {
5658 nextcookie = dp->d_seekoff;
5659 dp = NFS_DIRENTRY_NEXT(dp);
5660 i++;
5661 }
5662 }
5663 ptc = 0; /* only have to deal with ptc on first cookie */
5664
5665 /* return as many entries as we can */
5666 for (; i < ndbhp->ndbh_count; i++) {
5667 if (extended) {
5668 rlen = dp->d_reclen;
5669 cp = (char*)dp;
5670 } else {
5671 if (!cp) {
5672 cp = (char*)&dent;
5673 bzero(cp, sizeof(dent));
5674 }
5675 if (dp->d_namlen > (sizeof(dent.d_name) - 1)) {
5676 nlen = sizeof(dent.d_name) - 1;
5677 } else {
5678 nlen = dp->d_namlen;
5679 }
5680 rlen = NFS_DIRENT_LEN(nlen);
5681 dent.d_reclen = rlen;
5682 dent.d_ino = dp->d_ino;
5683 dent.d_type = dp->d_type;
5684 dent.d_namlen = nlen;
5685 strlcpy(dent.d_name, dp->d_name, nlen + 1);
5686 }
5687 /* check that the record fits */
5688 if (rlen > uio_resid(uio)) {
5689 done = 1;
5690 break;
5691 }
5692 if ((error = uiomove(cp, rlen, uio))) {
5693 break;
5694 }
5695 numdirent++;
5696 nextcookie = dp->d_seekoff;
5697 dp = NFS_DIRENTRY_NEXT(dp);
5698 }
5699
5700 if (i == ndbhp->ndbh_count) {
5701 /* hit end of buffer, move to next buffer */
5702 lbn = nextcookie;
5703 /* if we also hit EOF, we're done */
5704 if (ISSET(ndbhp->ndbh_flags, NDB_EOF)) {
5705 done = 1;
5706 if (ap->a_eofflag) {
5707 *ap->a_eofflag = 1;
5708 }
5709 }
5710 }
5711 if (!error) {
5712 uio_setoffset(uio, nextcookie);
5713 }
5714 if (!error && !done && (nextcookie == cookie)) {
5715 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie, i, ndbhp->ndbh_count);
5716 error = EIO;
5717 }
5718 nfs_buf_release(bp, 1);
5719 }
5720
5721 if (!error) {
5722 nfs_dir_cookie_cache(dnp, nextcookie, lbn);
5723 }
5724
5725 if (ap->a_numdirent) {
5726 *ap->a_numdirent = numdirent;
5727 }
5728out:
5729 return error;
5730}
5731
5732
5733/*
5734 * Invalidate cached directory information, except for the actual directory
5735 * blocks (which are invalidated separately).
5736 */
5737void
5738nfs_invaldir(nfsnode_t dnp)
5739{
5740 if (vnode_vtype(NFSTOV(dnp)) != VDIR) {
5741 return;
5742 }
5743 dnp->n_eofcookie = 0;
5744 dnp->n_cookieverf = 0;
5745 if (!dnp->n_cookiecache) {
5746 return;
5747 }
5748 dnp->n_cookiecache->free = 0;
5749 dnp->n_cookiecache->mru = -1;
5750 memset(dnp->n_cookiecache->next, -1, NFSNUMCOOKIES);
5751}
5752
5753/*
5754 * calculate how much space is available for additional directory entries.
5755 */
5756uint32_t
5757nfs_dir_buf_freespace(struct nfsbuf *bp, int rdirplus)
5758{
5759 struct nfs_dir_buf_header *ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
5760 uint32_t space;
5761
5762 if (!ndbhp) {
5763 return 0;
5764 }
5765 space = bp->nb_bufsize - ndbhp->ndbh_entry_end;
5766 if (rdirplus) {
5767 space -= ndbhp->ndbh_count * sizeof(struct nfs_vattr);
5768 }
5769 return space;
5770}
5771
5772/*
5773 * add/update a cookie->lbn entry in the directory cookie cache
5774 */
5775void
5776nfs_dir_cookie_cache(nfsnode_t dnp, uint64_t cookie, uint64_t lbn)
5777{
5778 struct nfsdmap *ndcc;
5779 int8_t i, prev;
5780
5781 if (!cookie) {
5782 return;
5783 }
5784
5785 if (nfs_node_lock(dnp)) {
5786 return;
5787 }
5788
5789 if (cookie == dnp->n_eofcookie) { /* EOF cookie */
5790 nfs_node_unlock(dnp);
5791 return;
5792 }
5793
5794 ndcc = dnp->n_cookiecache;
5795 if (!ndcc) {
5796 /* allocate the cookie cache structure */
5797 MALLOC_ZONE(dnp->n_cookiecache, struct nfsdmap *,
5798 sizeof(struct nfsdmap), M_NFSDIROFF, M_WAITOK);
5799 if (!dnp->n_cookiecache) {
5800 nfs_node_unlock(dnp);
5801 return;
5802 }
5803 ndcc = dnp->n_cookiecache;
5804 ndcc->free = 0;
5805 ndcc->mru = -1;
5806 memset(ndcc->next, -1, NFSNUMCOOKIES);
5807 }
5808
5809 /*
5810 * Search the list for this cookie.
5811 * Keep track of previous and last entries.
5812 */
5813 prev = -1;
5814 i = ndcc->mru;
5815 while ((i != -1) && (cookie != ndcc->cookies[i].key)) {
5816 if (ndcc->next[i] == -1) { /* stop on last entry so we can reuse */
5817 break;
5818 }
5819 prev = i;
5820 i = ndcc->next[i];
5821 }
5822 if ((i != -1) && (cookie == ndcc->cookies[i].key)) {
5823 /* found it, remove from list */
5824 if (prev != -1) {
5825 ndcc->next[prev] = ndcc->next[i];
5826 } else {
5827 ndcc->mru = ndcc->next[i];
5828 }
5829 } else {
5830 /* not found, use next free entry or reuse last entry */
5831 if (ndcc->free != NFSNUMCOOKIES) {
5832 i = ndcc->free++;
5833 } else {
5834 ndcc->next[prev] = -1;
5835 }
5836 ndcc->cookies[i].key = cookie;
5837 ndcc->cookies[i].lbn = lbn;
5838 }
5839 /* insert cookie at head of MRU list */
5840 ndcc->next[i] = ndcc->mru;
5841 ndcc->mru = i;
5842 nfs_node_unlock(dnp);
5843}
5844
5845/*
5846 * Try to map the given directory cookie to a directory buffer (return lbn).
5847 * If we have a possibly truncated cookie (ptc), check for 32-bit matches too.
5848 */
5849int
5850nfs_dir_cookie_to_lbn(nfsnode_t dnp, uint64_t cookie, int *ptc, uint64_t *lbnp)
5851{
5852 struct nfsdmap *ndcc = dnp->n_cookiecache;
5853 int8_t eofptc, found;
5854 int i, iptc;
5855 struct nfsmount *nmp;
5856 struct nfsbuf *bp, *lastbp;
5857 struct nfsbuflists blist;
5858 struct direntry *dp, *dpptc;
5859 struct nfs_dir_buf_header *ndbhp;
5860
5861 if (!cookie) { /* initial cookie */
5862 *lbnp = 0;
5863 *ptc = 0;
5864 return 0;
5865 }
5866
5867 if (nfs_node_lock(dnp)) {
5868 return ENOENT;
5869 }
5870
5871 if (cookie == dnp->n_eofcookie) { /* EOF cookie */
5872 nfs_node_unlock(dnp);
5873 OSAddAtomic64(1, &nfsstats.direofcache_hits);
5874 *ptc = 0;
5875 return -1;
5876 }
5877 /* note if cookie is a 32-bit match with the EOF cookie */
5878 eofptc = *ptc ? NFS_DIR_COOKIE_SAME32(cookie, dnp->n_eofcookie) : 0;
5879 iptc = -1;
5880
5881 /* search the list for the cookie */
5882 for (i = ndcc ? ndcc->mru : -1; i >= 0; i = ndcc->next[i]) {
5883 if (ndcc->cookies[i].key == cookie) {
5884 /* found a match for this cookie */
5885 *lbnp = ndcc->cookies[i].lbn;
5886 nfs_node_unlock(dnp);
5887 OSAddAtomic64(1, &nfsstats.direofcache_hits);
5888 *ptc = 0;
5889 return 0;
5890 }
5891 /* check for 32-bit match */
5892 if (*ptc && (iptc == -1) && NFS_DIR_COOKIE_SAME32(ndcc->cookies[i].key, cookie)) {
5893 iptc = i;
5894 }
5895 }
5896 /* exact match not found */
5897 if (eofptc) {
5898 /* but 32-bit match hit the EOF cookie */
5899 nfs_node_unlock(dnp);
5900 OSAddAtomic64(1, &nfsstats.direofcache_hits);
5901 return -1;
5902 }
5903 if (iptc >= 0) {
5904 /* but 32-bit match got a hit */
5905 *lbnp = ndcc->cookies[iptc].lbn;
5906 nfs_node_unlock(dnp);
5907 OSAddAtomic64(1, &nfsstats.direofcache_hits);
5908 return 0;
5909 }
5910 nfs_node_unlock(dnp);
5911
5912 /*
5913 * No match found in the cookie cache... hmm...
5914 * Let's search the directory's buffers for the cookie.
5915 */
5916 nmp = NFSTONMP(dnp);
5917 if (nfs_mount_gone(nmp)) {
5918 return ENXIO;
5919 }
5920 dpptc = NULL;
5921 found = 0;
5922
5923 lck_mtx_lock(nfs_buf_mutex);
5924 /*
5925 * Scan the list of buffers, keeping them in order.
5926 * Note that itercomplete inserts each of the remaining buffers
5927 * into the head of list (thus reversing the elements). So, we
5928 * make sure to iterate through all buffers, inserting them after
5929 * each other, to keep them in order.
5930 * Also note: the LIST_INSERT_AFTER(lastbp) is only safe because
5931 * we don't drop nfs_buf_mutex.
5932 */
5933 if (!nfs_buf_iterprepare(dnp, &blist, NBI_CLEAN)) {
5934 lastbp = NULL;
5935 while ((bp = LIST_FIRST(&blist))) {
5936 LIST_REMOVE(bp, nb_vnbufs);
5937 if (!lastbp) {
5938 LIST_INSERT_HEAD(&dnp->n_cleanblkhd, bp, nb_vnbufs);
5939 } else {
5940 LIST_INSERT_AFTER(lastbp, bp, nb_vnbufs);
5941 }
5942 lastbp = bp;
5943 if (found) {
5944 continue;
5945 }
5946 nfs_buf_refget(bp);
5947 if (nfs_buf_acquire(bp, NBAC_NOWAIT, 0, 0)) {
5948 /* just skip this buffer */
5949 nfs_buf_refrele(bp);
5950 continue;
5951 }
5952 nfs_buf_refrele(bp);
5953
5954 /* scan the buffer for the cookie */
5955 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
5956 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
5957 dpptc = NULL;
5958 for (i = 0; (i < ndbhp->ndbh_count) && (cookie != dp->d_seekoff); i++) {
5959 if (*ptc && !dpptc && NFS_DIR_COOKIE_SAME32(cookie, dp->d_seekoff)) {
5960 dpptc = dp;
5961 iptc = i;
5962 }
5963 dp = NFS_DIRENTRY_NEXT(dp);
5964 }
5965 if ((i == ndbhp->ndbh_count) && dpptc) {
5966 /* found only a PTC match */
5967 dp = dpptc;
5968 i = iptc;
5969 } else if (i < ndbhp->ndbh_count) {
5970 *ptc = 0;
5971 }
5972 if (i < (ndbhp->ndbh_count - 1)) {
5973 /* next entry is *in* this buffer: return this block */
5974 *lbnp = bp->nb_lblkno;
5975 found = 1;
5976 } else if (i == (ndbhp->ndbh_count - 1)) {
5977 /* next entry refers to *next* buffer: return next block */
5978 *lbnp = dp->d_seekoff;
5979 found = 1;
5980 }
5981 nfs_buf_drop(bp);
5982 }
5983 nfs_buf_itercomplete(dnp, &blist, NBI_CLEAN);
5984 }
5985 lck_mtx_unlock(nfs_buf_mutex);
5986 if (found) {
5987 OSAddAtomic64(1, &nfsstats.direofcache_hits);
5988 return 0;
5989 }
5990
5991 /* still not found... oh well, just start a new block */
5992 *lbnp = cookie;
5993 OSAddAtomic64(1, &nfsstats.direofcache_misses);
5994 return 0;
5995}
5996
5997/*
5998 * scan a directory buffer for the given name
5999 * Returns: ESRCH if not found, ENOENT if found invalid, 0 if found
6000 * Note: should only be called with RDIRPLUS directory buffers
6001 */
6002
6003#define NDBS_PURGE 1
6004#define NDBS_UPDATE 2
6005
6006int
6007nfs_dir_buf_search(
6008 struct nfsbuf *bp,
6009 struct componentname *cnp,
6010 fhandle_t *fhp,
6011 struct nfs_vattr *nvap,
6012 uint64_t *xidp,
6013 time_t *attrstampp,
6014 daddr64_t *nextlbnp,
6015 int flags)
6016{
6017 struct direntry *dp;
6018 struct nfs_dir_buf_header *ndbhp;
6019 struct nfs_vattr *nvattrp;
6020 daddr64_t nextlbn = 0;
6021 int i, error = ESRCH;
6022 uint32_t fhlen;
6023
6024 /* scan the buffer for the name */
6025 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
6026 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
6027 for (i = 0; i < ndbhp->ndbh_count; i++) {
6028 nextlbn = dp->d_seekoff;
6029 if ((cnp->cn_namelen == dp->d_namlen) && !strcmp(cnp->cn_nameptr, dp->d_name)) {
6030 fhlen = dp->d_name[dp->d_namlen + 1];
6031 nvattrp = NFS_DIR_BUF_NVATTR(bp, i);
6032 if ((ndbhp->ndbh_ncgen != bp->nb_np->n_ncgen) || (fhlen == 0) ||
6033 (nvattrp->nva_type == VNON) || (nvattrp->nva_fileid == 0)) {
6034 /* entry is not valid */
6035 error = ENOENT;
6036 break;
6037 }
6038 if (flags == NDBS_PURGE) {
6039 dp->d_fileno = 0;
6040 bzero(nvattrp, sizeof(*nvattrp));
6041 error = ENOENT;
6042 break;
6043 }
6044 if (flags == NDBS_UPDATE) {
6045 /* update direntry's attrs if fh matches */
6046 if ((fhp->fh_len == fhlen) && !bcmp(&dp->d_name[dp->d_namlen + 2], fhp->fh_data, fhlen)) {
6047 bcopy(nvap, nvattrp, sizeof(*nvap));
6048 dp->d_fileno = nvattrp->nva_fileid;
6049 nvattrp->nva_fileid = *xidp;
6050 *(time_t*)(&dp->d_name[dp->d_namlen + 2 + fhp->fh_len]) = *attrstampp;
6051 }
6052 error = 0;
6053 break;
6054 }
6055 /* copy out fh, attrs, attrstamp, and xid */
6056 fhp->fh_len = fhlen;
6057 bcopy(&dp->d_name[dp->d_namlen + 2], fhp->fh_data, MAX(fhp->fh_len, (int)sizeof(fhp->fh_data)));
6058 *attrstampp = *(time_t*)(&dp->d_name[dp->d_namlen + 2 + fhp->fh_len]);
6059 bcopy(nvattrp, nvap, sizeof(*nvap));
6060 *xidp = nvap->nva_fileid;
6061 nvap->nva_fileid = dp->d_fileno;
6062 error = 0;
6063 break;
6064 }
6065 dp = NFS_DIRENTRY_NEXT(dp);
6066 }
6067 if (nextlbnp) {
6068 *nextlbnp = nextlbn;
6069 }
6070 return error;
6071}
6072
6073/*
6074 * Look up a name in a directory's buffers.
6075 * Note: should only be called with RDIRPLUS directory buffers
6076 */
6077int
6078nfs_dir_buf_cache_lookup(nfsnode_t dnp, nfsnode_t *npp, struct componentname *cnp, vfs_context_t ctx, int purge)
6079{
6080 nfsnode_t newnp;
6081 struct nfsmount *nmp;
6082 int error = 0, i, found = 0, count = 0;
6083 u_int64_t xid;
6084 struct nfs_vattr nvattr;
6085 fhandle_t fh;
6086 time_t attrstamp = 0;
6087 thread_t thd = vfs_context_thread(ctx);
6088 struct nfsbuf *bp, *lastbp, *foundbp;
6089 struct nfsbuflists blist;
6090 daddr64_t lbn, nextlbn;
6091 int dotunder = (cnp->cn_namelen > 2) && (cnp->cn_nameptr[0] == '.') && (cnp->cn_nameptr[1] == '_');
6092 int isdot = (cnp->cn_namelen == 1) && (cnp->cn_nameptr[0] == '.');
6093 int isdotdot = (cnp->cn_namelen == 2) && (cnp->cn_nameptr[0] == '.') && (cnp->cn_nameptr[1] == '.');
6094
6095 nmp = NFSTONMP(dnp);
6096 if (nfs_mount_gone(nmp)) {
6097 return ENXIO;
6098 }
6099 if (!purge) {
6100 *npp = NULL;
6101 }
6102
6103 if (isdot || isdotdot) {
6104 return 0;
6105 }
6106
6107 /* first check most recent buffer (and next one too) */
6108 lbn = dnp->n_lastdbl;
6109 for (i = 0; i < 2; i++) {
6110 if ((error = nfs_buf_get(dnp, lbn, NFS_DIRBLKSIZ, thd, NBLK_READ | NBLK_ONLYVALID, &bp))) {
6111 return error;
6112 }
6113 if (!bp) {
6114 break;
6115 }
6116 count++;
6117 error = nfs_dir_buf_search(bp, cnp, &fh, &nvattr, &xid, &attrstamp, &nextlbn, purge ? NDBS_PURGE : 0);
6118 nfs_buf_release(bp, 0);
6119 if (error == ESRCH) {
6120 error = 0;
6121 } else {
6122 found = 1;
6123 break;
6124 }
6125 lbn = nextlbn;
6126 }
6127
6128 lck_mtx_lock(nfs_buf_mutex);
6129 if (found) {
6130 dnp->n_lastdbl = lbn;
6131 goto done;
6132 }
6133
6134 /*
6135 * Scan the list of buffers, keeping them in order.
6136 * Note that itercomplete inserts each of the remaining buffers
6137 * into the head of list (thus reversing the elements). So, we
6138 * make sure to iterate through all buffers, inserting them after
6139 * each other, to keep them in order.
6140 * Also note: the LIST_INSERT_AFTER(lastbp) is only safe because
6141 * we don't drop nfs_buf_mutex.
6142 */
6143 if (!nfs_buf_iterprepare(dnp, &blist, NBI_CLEAN)) {
6144 lastbp = foundbp = NULL;
6145 while ((bp = LIST_FIRST(&blist))) {
6146 LIST_REMOVE(bp, nb_vnbufs);
6147 if (!lastbp) {
6148 LIST_INSERT_HEAD(&dnp->n_cleanblkhd, bp, nb_vnbufs);
6149 } else {
6150 LIST_INSERT_AFTER(lastbp, bp, nb_vnbufs);
6151 }
6152 lastbp = bp;
6153 if (error || found) {
6154 continue;
6155 }
6156 if (!purge && dotunder && (count > 100)) { /* don't waste too much time looking for ._ files */
6157 continue;
6158 }
6159 nfs_buf_refget(bp);
6160 lbn = bp->nb_lblkno;
6161 if (nfs_buf_acquire(bp, NBAC_NOWAIT, 0, 0)) {
6162 /* just skip this buffer */
6163 nfs_buf_refrele(bp);
6164 continue;
6165 }
6166 nfs_buf_refrele(bp);
6167 count++;
6168 error = nfs_dir_buf_search(bp, cnp, &fh, &nvattr, &xid, &attrstamp, NULL, purge ? NDBS_PURGE : 0);
6169 if (error == ESRCH) {
6170 error = 0;
6171 } else {
6172 found = 1;
6173 foundbp = bp;
6174 }
6175 nfs_buf_drop(bp);
6176 }
6177 if (found) {
6178 LIST_REMOVE(foundbp, nb_vnbufs);
6179 LIST_INSERT_HEAD(&dnp->n_cleanblkhd, foundbp, nb_vnbufs);
6180 dnp->n_lastdbl = foundbp->nb_lblkno;
6181 }
6182 nfs_buf_itercomplete(dnp, &blist, NBI_CLEAN);
6183 }
6184done:
6185 lck_mtx_unlock(nfs_buf_mutex);
6186
6187 if (!error && found && !purge) {
6188 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len,
6189 &nvattr, &xid, dnp->n_auth, NG_MAKEENTRY, &newnp);
6190 if (error) {
6191 return error;
6192 }
6193 newnp->n_attrstamp = attrstamp;
6194 *npp = newnp;
6195 nfs_node_unlock(newnp);
6196 /* check if the dir buffer's attrs are out of date */
6197 if (!nfs_getattr(newnp, &nvattr, ctx, NGA_CACHED) &&
6198 (newnp->n_attrstamp != attrstamp)) {
6199 /* they are, so update them */
6200 error = nfs_buf_get(dnp, lbn, NFS_DIRBLKSIZ, thd, NBLK_READ | NBLK_ONLYVALID, &bp);
6201 if (!error && bp) {
6202 attrstamp = newnp->n_attrstamp;
6203 xid = newnp->n_xid;
6204 nfs_dir_buf_search(bp, cnp, &fh, &nvattr, &xid, &attrstamp, NULL, NDBS_UPDATE);
6205 nfs_buf_release(bp, 0);
6206 }
6207 error = 0;
6208 }
6209 }
6210
6211 return error;
6212}
6213
6214/*
6215 * Purge name cache entries for the given node.
6216 * For RDIRPLUS, also invalidate the entry in the directory's buffers.
6217 */
6218void
6219nfs_name_cache_purge(nfsnode_t dnp, nfsnode_t np, struct componentname *cnp, vfs_context_t ctx)
6220{
6221 struct nfsmount *nmp = NFSTONMP(dnp);
6222
6223 cache_purge(NFSTOV(np));
6224 if (nmp && (nmp->nm_vers > NFS_VER2) && NMFLAG(nmp, RDIRPLUS)) {
6225 nfs_dir_buf_cache_lookup(dnp, NULL, cnp, ctx, 1);
6226 }
6227}
6228
6229/*
6230 * NFS V3 readdir (plus) RPC.
6231 */
6232int
6233nfs3_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx)
6234{
6235 struct nfsmount *nmp;
6236 int error = 0, lockerror, nfsvers, rdirplus, bigcookies;
6237 int i, status, attrflag, fhflag, more_entries = 1, eof, bp_dropped = 0;
6238 uint32_t nmreaddirsize, nmrsize;
6239 uint32_t namlen, skiplen, fhlen, xlen, attrlen, reclen, space_free, space_needed;
6240 uint64_t cookie, lastcookie, xid, savedxid, fileno;
6241 struct nfsm_chain nmreq, nmrep, nmrepsave;
6242 fhandle_t fh;
6243 struct nfs_vattr *nvattrp;
6244 struct nfs_dir_buf_header *ndbhp;
6245 struct direntry *dp;
6246 char *padstart, padlen;
6247 struct timeval now;
6248
6249 nmp = NFSTONMP(dnp);
6250 if (nfs_mount_gone(nmp)) {
6251 return ENXIO;
6252 }
6253 nfsvers = nmp->nm_vers;
6254 nmreaddirsize = nmp->nm_readdirsize;
6255 nmrsize = nmp->nm_rsize;
6256 bigcookies = nmp->nm_state & NFSSTA_BIGCOOKIES;
6257noplus:
6258 rdirplus = ((nfsvers > NFS_VER2) && NMFLAG(nmp, RDIRPLUS)) ? 1 : 0;
6259
6260 if ((lockerror = nfs_node_lock(dnp))) {
6261 return lockerror;
6262 }
6263
6264 /* determine cookie to use, and move dp to the right offset */
6265 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
6266 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
6267 if (ndbhp->ndbh_count) {
6268 for (i = 0; i < ndbhp->ndbh_count - 1; i++) {
6269 dp = NFS_DIRENTRY_NEXT(dp);
6270 }
6271 cookie = dp->d_seekoff;
6272 dp = NFS_DIRENTRY_NEXT(dp);
6273 } else {
6274 cookie = bp->nb_lblkno;
6275 /* increment with every buffer read */
6276 OSAddAtomic64(1, &nfsstats.readdir_bios);
6277 }
6278 lastcookie = cookie;
6279
6280 /*
6281 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
6282 * the buffer is full (or we hit EOF). Then put the remainder of the
6283 * results in the next buffer(s).
6284 */
6285 nfsm_chain_null(&nmreq);
6286 nfsm_chain_null(&nmrep);
6287 while (nfs_dir_buf_freespace(bp, rdirplus) && !(ndbhp->ndbh_flags & NDB_FULL)) {
6288 nfsm_chain_build_alloc_init(error, &nmreq,
6289 NFSX_FH(nfsvers) + NFSX_READDIR(nfsvers) + NFSX_UNSIGNED);
6290 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
6291 if (nfsvers == NFS_VER3) {
6292 /* opaque values don't need swapping, but as long */
6293 /* as we are consistent about it, it should be ok */
6294 nfsm_chain_add_64(error, &nmreq, cookie);
6295 nfsm_chain_add_64(error, &nmreq, dnp->n_cookieverf);
6296 } else {
6297 nfsm_chain_add_32(error, &nmreq, cookie);
6298 }
6299 nfsm_chain_add_32(error, &nmreq, nmreaddirsize);
6300 if (rdirplus) {
6301 nfsm_chain_add_32(error, &nmreq, nmrsize);
6302 }
6303 nfsm_chain_build_done(error, &nmreq);
6304 nfs_node_unlock(dnp);
6305 lockerror = ENOENT;
6306 nfsmout_if(error);
6307
6308 error = nfs_request(dnp, NULL, &nmreq,
6309 rdirplus ? NFSPROC_READDIRPLUS : NFSPROC_READDIR,
6310 ctx, NULL, &nmrep, &xid, &status);
6311
6312 if ((lockerror = nfs_node_lock(dnp))) {
6313 error = lockerror;
6314 }
6315
6316 savedxid = xid;
6317 if (nfsvers == NFS_VER3) {
6318 nfsm_chain_postop_attr_update(error, &nmrep, dnp, &xid);
6319 }
6320 if (!error) {
6321 error = status;
6322 }
6323 if (nfsvers == NFS_VER3) {
6324 nfsm_chain_get_64(error, &nmrep, dnp->n_cookieverf);
6325 }
6326 nfsm_chain_get_32(error, &nmrep, more_entries);
6327
6328 if (!lockerror) {
6329 nfs_node_unlock(dnp);
6330 lockerror = ENOENT;
6331 }
6332 if (error == NFSERR_NOTSUPP) {
6333 /* oops... it doesn't look like readdirplus is supported */
6334 lck_mtx_lock(&nmp->nm_lock);
6335 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_RDIRPLUS);
6336 lck_mtx_unlock(&nmp->nm_lock);
6337 goto noplus;
6338 }
6339 nfsmout_if(error);
6340
6341 if (rdirplus) {
6342 microuptime(&now);
6343 if (lastcookie == 0) {
6344 dnp->n_rdirplusstamp_sof = now.tv_sec;
6345 dnp->n_rdirplusstamp_eof = 0;
6346 }
6347 }
6348
6349 /* loop through the entries packing them into the buffer */
6350 while (more_entries) {
6351 if (nfsvers == NFS_VER3) {
6352 nfsm_chain_get_64(error, &nmrep, fileno);
6353 } else {
6354 nfsm_chain_get_32(error, &nmrep, fileno);
6355 }
6356 nfsm_chain_get_32(error, &nmrep, namlen);
6357 nfsmout_if(error);
6358 /* just truncate names that don't fit in direntry.d_name */
6359 if (namlen <= 0) {
6360 error = EBADRPC;
6361 goto nfsmout;
6362 }
6363 if (namlen > (sizeof(dp->d_name) - 1)) {
6364 skiplen = namlen - sizeof(dp->d_name) + 1;
6365 namlen = sizeof(dp->d_name) - 1;
6366 } else {
6367 skiplen = 0;
6368 }
6369 /* guess that fh size will be same as parent */
6370 fhlen = rdirplus ? (1 + dnp->n_fhsize) : 0;
6371 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
6372 attrlen = rdirplus ? sizeof(struct nfs_vattr) : 0;
6373 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
6374 space_needed = reclen + attrlen;
6375 space_free = nfs_dir_buf_freespace(bp, rdirplus);
6376 if (space_needed > space_free) {
6377 /*
6378 * We still have entries to pack, but we've
6379 * run out of room in the current buffer.
6380 * So we need to move to the next buffer.
6381 * The block# for the next buffer is the
6382 * last cookie in the current buffer.
6383 */
6384nextbuffer:
6385 ndbhp->ndbh_flags |= NDB_FULL;
6386 nfs_buf_release(bp, 0);
6387 bp_dropped = 1;
6388 bp = NULL;
6389 error = nfs_buf_get(dnp, lastcookie, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
6390 nfsmout_if(error);
6391 /* initialize buffer */
6392 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
6393 ndbhp->ndbh_flags = 0;
6394 ndbhp->ndbh_count = 0;
6395 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
6396 ndbhp->ndbh_ncgen = dnp->n_ncgen;
6397 space_free = nfs_dir_buf_freespace(bp, rdirplus);
6398 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
6399 /* increment with every buffer read */
6400 OSAddAtomic64(1, &nfsstats.readdir_bios);
6401 }
6402 nmrepsave = nmrep;
6403 dp->d_fileno = fileno;
6404 dp->d_namlen = namlen;
6405 dp->d_reclen = reclen;
6406 dp->d_type = DT_UNKNOWN;
6407 nfsm_chain_get_opaque(error, &nmrep, namlen, dp->d_name);
6408 nfsmout_if(error);
6409 dp->d_name[namlen] = '\0';
6410 if (skiplen) {
6411 nfsm_chain_adv(error, &nmrep,
6412 nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen));
6413 }
6414 if (nfsvers == NFS_VER3) {
6415 nfsm_chain_get_64(error, &nmrep, cookie);
6416 } else {
6417 nfsm_chain_get_32(error, &nmrep, cookie);
6418 }
6419 nfsmout_if(error);
6420 dp->d_seekoff = cookie;
6421 if (!bigcookies && (cookie >> 32) && (nmp == NFSTONMP(dnp))) {
6422 /* we've got a big cookie, make sure flag is set */
6423 lck_mtx_lock(&nmp->nm_lock);
6424 nmp->nm_state |= NFSSTA_BIGCOOKIES;
6425 lck_mtx_unlock(&nmp->nm_lock);
6426 bigcookies = 1;
6427 }
6428 if (rdirplus) {
6429 nvattrp = NFS_DIR_BUF_NVATTR(bp, ndbhp->ndbh_count);
6430 /* check for attributes */
6431 nfsm_chain_get_32(error, &nmrep, attrflag);
6432 nfsmout_if(error);
6433 if (attrflag) {
6434 /* grab attributes */
6435 error = nfs_parsefattr(nmp, &nmrep, NFS_VER3, nvattrp);
6436 nfsmout_if(error);
6437 dp->d_type = IFTODT(VTTOIF(nvattrp->nva_type));
6438 /* fileid is already in d_fileno, so stash xid in attrs */
6439 nvattrp->nva_fileid = savedxid;
6440 } else {
6441 /* mark the attributes invalid */
6442 bzero(nvattrp, sizeof(struct nfs_vattr));
6443 }
6444 /* check for file handle */
6445 nfsm_chain_get_32(error, &nmrep, fhflag);
6446 nfsmout_if(error);
6447 if (fhflag) {
6448 nfsm_chain_get_fh(error, &nmrep, NFS_VER3, &fh);
6449 nfsmout_if(error);
6450 fhlen = fh.fh_len + 1;
6451 xlen = fhlen + sizeof(time_t);
6452 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
6453 space_needed = reclen + attrlen;
6454 if (space_needed > space_free) {
6455 /* didn't actually have the room... move on to next buffer */
6456 nmrep = nmrepsave;
6457 goto nextbuffer;
6458 }
6459 /* pack the file handle into the record */
6460 dp->d_name[dp->d_namlen + 1] = fh.fh_len;
6461 bcopy(fh.fh_data, &dp->d_name[dp->d_namlen + 2], fh.fh_len);
6462 } else {
6463 /* mark the file handle invalid */
6464 fh.fh_len = 0;
6465 fhlen = fh.fh_len + 1;
6466 xlen = fhlen + sizeof(time_t);
6467 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
6468 bzero(&dp->d_name[dp->d_namlen + 1], fhlen);
6469 }
6470 *(time_t*)(&dp->d_name[dp->d_namlen + 1 + fhlen]) = now.tv_sec;
6471 dp->d_reclen = reclen;
6472 nfs_rdirplus_update_node_attrs(dnp, dp, &fh, nvattrp, &savedxid);
6473 }
6474 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
6475 ndbhp->ndbh_count++;
6476 lastcookie = cookie;
6477 /* advance to next direntry in buffer */
6478 dp = NFS_DIRENTRY_NEXT(dp);
6479 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
6480 /* zero out the pad bytes */
6481 padlen = (char*)dp - padstart;
6482 if (padlen > 0) {
6483 bzero(padstart, padlen);
6484 }
6485 /* check for more entries */
6486 nfsm_chain_get_32(error, &nmrep, more_entries);
6487 nfsmout_if(error);
6488 }
6489 /* Finally, get the eof boolean */
6490 nfsm_chain_get_32(error, &nmrep, eof);
6491 nfsmout_if(error);
6492 if (eof) {
6493 ndbhp->ndbh_flags |= (NDB_FULL | NDB_EOF);
6494 nfs_node_lock_force(dnp);
6495 dnp->n_eofcookie = lastcookie;
6496 if (rdirplus) {
6497 dnp->n_rdirplusstamp_eof = now.tv_sec;
6498 }
6499 nfs_node_unlock(dnp);
6500 } else {
6501 more_entries = 1;
6502 }
6503 if (bp_dropped) {
6504 nfs_buf_release(bp, 0);
6505 bp = NULL;
6506 break;
6507 }
6508 if ((lockerror = nfs_node_lock(dnp))) {
6509 error = lockerror;
6510 }
6511 nfsmout_if(error);
6512 nfsm_chain_cleanup(&nmrep);
6513 nfsm_chain_null(&nmreq);
6514 }
6515nfsmout:
6516 if (bp_dropped && bp) {
6517 nfs_buf_release(bp, 0);
6518 }
6519 if (!lockerror) {
6520 nfs_node_unlock(dnp);
6521 }
6522 nfsm_chain_cleanup(&nmreq);
6523 nfsm_chain_cleanup(&nmrep);
6524 return bp_dropped ? NFSERR_DIRBUFDROPPED : error;
6525}
6526
6527/*
6528 * Silly rename. To make the NFS filesystem that is stateless look a little
6529 * more like the "ufs" a remove of an active vnode is translated to a rename
6530 * to a funny looking filename that is removed by nfs_vnop_inactive on the
6531 * nfsnode. There is the potential for another process on a different client
6532 * to create the same funny name between when the lookitup() fails and the
6533 * rename() completes, but...
6534 */
6535
6536/* format of "random" silly names - includes a number and pid */
6537/* (note: shouldn't exceed size of nfs_sillyrename.nsr_name) */
6538#define NFS_SILLYNAME_FORMAT ".nfs.%08x.%04x"
6539/* starting from zero isn't silly enough */
6540static uint32_t nfs_sillyrename_number = 0x20051025;
6541
6542int
6543nfs_sillyrename(
6544 nfsnode_t dnp,
6545 nfsnode_t np,
6546 struct componentname *cnp,
6547 vfs_context_t ctx)
6548{
6549 struct nfs_sillyrename *nsp;
6550 int error;
6551 short pid;
6552 kauth_cred_t cred;
6553 uint32_t num;
6554 struct nfsmount *nmp;
6555
6556 nmp = NFSTONMP(dnp);
6557 if (nfs_mount_gone(nmp)) {
6558 return ENXIO;
6559 }
6560
6561 nfs_name_cache_purge(dnp, np, cnp, ctx);
6562
6563 MALLOC_ZONE(nsp, struct nfs_sillyrename *,
6564 sizeof(struct nfs_sillyrename), M_NFSREQ, M_WAITOK);
6565 if (!nsp) {
6566 return ENOMEM;
6567 }
6568 cred = vfs_context_ucred(ctx);
6569 kauth_cred_ref(cred);
6570 nsp->nsr_cred = cred;
6571 nsp->nsr_dnp = dnp;
6572 error = vnode_ref(NFSTOV(dnp));
6573 if (error) {
6574 goto bad_norele;
6575 }
6576
6577 /* Fudge together a funny name */
6578 pid = vfs_context_pid(ctx);
6579 num = OSAddAtomic(1, &nfs_sillyrename_number);
6580 nsp->nsr_namlen = snprintf(nsp->nsr_name, sizeof(nsp->nsr_name),
6581 NFS_SILLYNAME_FORMAT, num, (pid & 0xffff));
6582 if (nsp->nsr_namlen >= (int)sizeof(nsp->nsr_name)) {
6583 nsp->nsr_namlen = sizeof(nsp->nsr_name) - 1;
6584 }
6585
6586 /* Try lookitups until we get one that isn't there */
6587 while (nfs_lookitup(dnp, nsp->nsr_name, nsp->nsr_namlen, ctx, NULL) == 0) {
6588 num = OSAddAtomic(1, &nfs_sillyrename_number);
6589 nsp->nsr_namlen = snprintf(nsp->nsr_name, sizeof(nsp->nsr_name),
6590 NFS_SILLYNAME_FORMAT, num, (pid & 0xffff));
6591 if (nsp->nsr_namlen >= (int)sizeof(nsp->nsr_name)) {
6592 nsp->nsr_namlen = sizeof(nsp->nsr_name) - 1;
6593 }
6594 }
6595
6596 /* now, do the rename */
6597 error = nmp->nm_funcs->nf_rename_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen,
6598 dnp, nsp->nsr_name, nsp->nsr_namlen, ctx);
6599
6600 /* Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. */
6601 if (error == ENOENT) {
6602 error = 0;
6603 }
6604 if (!error) {
6605 nfs_node_lock_force(dnp);
6606 if (dnp->n_flag & NNEGNCENTRIES) {
6607 dnp->n_flag &= ~NNEGNCENTRIES;
6608 cache_purge_negatives(NFSTOV(dnp));
6609 }
6610 nfs_node_unlock(dnp);
6611 }
6612 FSDBG(267, dnp, np, num, error);
6613 if (error) {
6614 goto bad;
6615 }
6616 error = nfs_lookitup(dnp, nsp->nsr_name, nsp->nsr_namlen, ctx, &np);
6617 nfs_node_lock_force(np);
6618 np->n_sillyrename = nsp;
6619 nfs_node_unlock(np);
6620 return 0;
6621bad:
6622 vnode_rele(NFSTOV(dnp));
6623bad_norele:
6624 nsp->nsr_cred = NOCRED;
6625 kauth_cred_unref(&cred);
6626 FREE_ZONE(nsp, sizeof(*nsp), M_NFSREQ);
6627 return error;
6628}
6629
6630int
6631nfs3_lookup_rpc_async(
6632 nfsnode_t dnp,
6633 char *name,
6634 int namelen,
6635 vfs_context_t ctx,
6636 struct nfsreq **reqp)
6637{
6638 struct nfsmount *nmp;
6639 struct nfsm_chain nmreq;
6640 int error = 0, nfsvers;
6641
6642 nmp = NFSTONMP(dnp);
6643 if (nfs_mount_gone(nmp)) {
6644 return ENXIO;
6645 }
6646 nfsvers = nmp->nm_vers;
6647
6648 nfsm_chain_null(&nmreq);
6649
6650 nfsm_chain_build_alloc_init(error, &nmreq,
6651 NFSX_FH(nfsvers) + NFSX_UNSIGNED + nfsm_rndup(namelen));
6652 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
6653 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
6654 nfsm_chain_build_done(error, &nmreq);
6655 nfsmout_if(error);
6656 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC_LOOKUP,
6657 vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, reqp);
6658nfsmout:
6659 nfsm_chain_cleanup(&nmreq);
6660 return error;
6661}
6662
6663int
6664nfs3_lookup_rpc_async_finish(
6665 nfsnode_t dnp,
6666 __unused char *name,
6667 __unused int namelen,
6668 vfs_context_t ctx,
6669 struct nfsreq *req,
6670 u_int64_t *xidp,
6671 fhandle_t *fhp,
6672 struct nfs_vattr *nvap)
6673{
6674 int error = 0, lockerror = ENOENT, status, nfsvers, attrflag;
6675 u_int64_t xid;
6676 struct nfsmount *nmp;
6677 struct nfsm_chain nmrep;
6678
6679 nmp = NFSTONMP(dnp);
6680 if (nmp == NULL) {
6681 return ENXIO;
6682 }
6683 nfsvers = nmp->nm_vers;
6684
6685 nfsm_chain_null(&nmrep);
6686
6687 error = nfs_request_async_finish(req, &nmrep, xidp, &status);
6688
6689 if ((lockerror = nfs_node_lock(dnp))) {
6690 error = lockerror;
6691 }
6692 xid = *xidp;
6693 if (error || status) {
6694 if (nfsvers == NFS_VER3) {
6695 nfsm_chain_postop_attr_update(error, &nmrep, dnp, &xid);
6696 }
6697 if (!error) {
6698 error = status;
6699 }
6700 goto nfsmout;
6701 }
6702
6703 nfsmout_if(error || !fhp || !nvap);
6704
6705 /* get the file handle */
6706 nfsm_chain_get_fh(error, &nmrep, nfsvers, fhp);
6707
6708 /* get the attributes */
6709 if (nfsvers == NFS_VER3) {
6710 nfsm_chain_postop_attr_get(nmp, error, &nmrep, attrflag, nvap);
6711 nfsm_chain_postop_attr_update(error, &nmrep, dnp, &xid);
6712 if (!error && !attrflag) {
6713 error = nfs3_getattr_rpc(NULL, NFSTOMP(dnp), fhp->fh_data, fhp->fh_len, 0, ctx, nvap, xidp);
6714 }
6715 } else {
6716 error = nfs_parsefattr(nmp, &nmrep, nfsvers, nvap);
6717 }
6718nfsmout:
6719 if (!lockerror) {
6720 nfs_node_unlock(dnp);
6721 }
6722 nfsm_chain_cleanup(&nmrep);
6723 return error;
6724}
6725
6726/*
6727 * Look up a file name and optionally either update the file handle or
6728 * allocate an nfsnode, depending on the value of npp.
6729 * npp == NULL --> just do the lookup
6730 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
6731 * handled too
6732 * *npp != NULL --> update the file handle in the vnode
6733 */
6734int
6735nfs_lookitup(
6736 nfsnode_t dnp,
6737 char *name,
6738 int namelen,
6739 vfs_context_t ctx,
6740 nfsnode_t *npp)
6741{
6742 int error = 0;
6743 nfsnode_t np, newnp = NULL;
6744 u_int64_t xid;
6745 fhandle_t fh;
6746 struct nfsmount *nmp;
6747 struct nfs_vattr nvattr;
6748 struct nfsreq rq, *req = &rq;
6749
6750 nmp = NFSTONMP(dnp);
6751 if (nfs_mount_gone(nmp)) {
6752 return ENXIO;
6753 }
6754
6755 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXNAME) &&
6756 (namelen > (int)nmp->nm_fsattr.nfsa_maxname)) {
6757 return ENAMETOOLONG;
6758 }
6759
6760 NVATTR_INIT(&nvattr);
6761
6762 /* check for lookup of "." */
6763 if ((name[0] == '.') && (namelen == 1)) {
6764 /* skip lookup, we know who we are */
6765 fh.fh_len = 0;
6766 newnp = dnp;
6767 goto nfsmout;
6768 }
6769
6770 error = nmp->nm_funcs->nf_lookup_rpc_async(dnp, name, namelen, ctx, &req);
6771 nfsmout_if(error);
6772 error = nmp->nm_funcs->nf_lookup_rpc_async_finish(dnp, name, namelen, ctx, req, &xid, &fh, &nvattr);
6773 nfsmout_if(!npp || error);
6774
6775 if (*npp) {
6776 np = *npp;
6777 if (fh.fh_len != np->n_fhsize) {
6778 u_char *oldbuf = (np->n_fhsize > NFS_SMALLFH) ? np->n_fhp : NULL;
6779 if (fh.fh_len > NFS_SMALLFH) {
6780 MALLOC_ZONE(np->n_fhp, u_char *, fh.fh_len, M_NFSBIGFH, M_WAITOK);
6781 if (!np->n_fhp) {
6782 np->n_fhp = oldbuf;
6783 error = ENOMEM;
6784 goto nfsmout;
6785 }
6786 } else {
6787 np->n_fhp = &np->n_fh[0];
6788 }
6789 if (oldbuf) {
6790 FREE_ZONE(oldbuf, np->n_fhsize, M_NFSBIGFH);
6791 }
6792 }
6793 bcopy(fh.fh_data, np->n_fhp, fh.fh_len);
6794 np->n_fhsize = fh.fh_len;
6795 nfs_node_lock_force(np);
6796 error = nfs_loadattrcache(np, &nvattr, &xid, 0);
6797 nfs_node_unlock(np);
6798 nfsmout_if(error);
6799 newnp = np;
6800 } else if (NFS_CMPFH(dnp, fh.fh_data, fh.fh_len)) {
6801 nfs_node_lock_force(dnp);
6802 if (dnp->n_xid <= xid) {
6803 error = nfs_loadattrcache(dnp, &nvattr, &xid, 0);
6804 }
6805 nfs_node_unlock(dnp);
6806 nfsmout_if(error);
6807 newnp = dnp;
6808 } else {
6809 struct componentname cn, *cnp = &cn;
6810 bzero(cnp, sizeof(*cnp));
6811 cnp->cn_nameptr = name;
6812 cnp->cn_namelen = namelen;
6813 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len,
6814 &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np);
6815 nfsmout_if(error);
6816 newnp = np;
6817 }
6818
6819nfsmout:
6820 if (npp && !*npp && !error) {
6821 *npp = newnp;
6822 }
6823 NVATTR_CLEANUP(&nvattr);
6824 return error;
6825}
6826
6827/*
6828 * set up and initialize a "._" file lookup structure used for
6829 * performing async lookups.
6830 */
6831void
6832nfs_dulookup_init(struct nfs_dulookup *dulp, nfsnode_t dnp, const char *name, int namelen, vfs_context_t ctx)
6833{
6834 int error, du_namelen;
6835 vnode_t du_vp;
6836 struct nfsmount *nmp = NFSTONMP(dnp);
6837
6838 /* check for ._ file in name cache */
6839 dulp->du_flags = 0;
6840 bzero(&dulp->du_cn, sizeof(dulp->du_cn));
6841 du_namelen = namelen + 2;
6842 if (!nmp || NMFLAG(nmp, NONEGNAMECACHE)) {
6843 return;
6844 }
6845 if ((namelen >= 2) && (name[0] == '.') && (name[1] == '_')) {
6846 return;
6847 }
6848 if (du_namelen >= (int)sizeof(dulp->du_smallname)) {
6849 MALLOC(dulp->du_cn.cn_nameptr, char *, du_namelen + 1, M_TEMP, M_WAITOK);
6850 } else {
6851 dulp->du_cn.cn_nameptr = dulp->du_smallname;
6852 }
6853 if (!dulp->du_cn.cn_nameptr) {
6854 return;
6855 }
6856 dulp->du_cn.cn_namelen = du_namelen;
6857 snprintf(dulp->du_cn.cn_nameptr, du_namelen + 1, "._%s", name);
6858 dulp->du_cn.cn_nameptr[du_namelen] = '\0';
6859 dulp->du_cn.cn_nameiop = LOOKUP;
6860 dulp->du_cn.cn_flags = MAKEENTRY;
6861
6862 error = cache_lookup(NFSTOV(dnp), &du_vp, &dulp->du_cn);
6863 if (error == -1) {
6864 vnode_put(du_vp);
6865 } else if (!error) {
6866 nmp = NFSTONMP(dnp);
6867 if (nmp && (nmp->nm_vers > NFS_VER2) && NMFLAG(nmp, RDIRPLUS)) {
6868 /* if rdirplus, try dir buf cache lookup */
6869 nfsnode_t du_np = NULL;
6870 if (!nfs_dir_buf_cache_lookup(dnp, &du_np, &dulp->du_cn, ctx, 0) && du_np) {
6871 /* dir buf cache hit */
6872 du_vp = NFSTOV(du_np);
6873 vnode_put(du_vp);
6874 error = -1;
6875 }
6876 }
6877 if (!error) {
6878 dulp->du_flags |= NFS_DULOOKUP_DOIT;
6879 }
6880 }
6881}
6882
6883/*
6884 * start an async "._" file lookup request
6885 */
6886void
6887nfs_dulookup_start(struct nfs_dulookup *dulp, nfsnode_t dnp, vfs_context_t ctx)
6888{
6889 struct nfsmount *nmp = NFSTONMP(dnp);
6890 struct nfsreq *req = &dulp->du_req;
6891
6892 if (!nmp || !(dulp->du_flags & NFS_DULOOKUP_DOIT) || (dulp->du_flags & NFS_DULOOKUP_INPROG)) {
6893 return;
6894 }
6895 if (!nmp->nm_funcs->nf_lookup_rpc_async(dnp, dulp->du_cn.cn_nameptr,
6896 dulp->du_cn.cn_namelen, ctx, &req)) {
6897 dulp->du_flags |= NFS_DULOOKUP_INPROG;
6898 }
6899}
6900
6901/*
6902 * finish an async "._" file lookup request and clean up the structure
6903 */
6904void
6905nfs_dulookup_finish(struct nfs_dulookup *dulp, nfsnode_t dnp, vfs_context_t ctx)
6906{
6907 struct nfsmount *nmp = NFSTONMP(dnp);
6908 int error;
6909 nfsnode_t du_np;
6910 u_int64_t xid;
6911 fhandle_t fh;
6912 struct nfs_vattr nvattr;
6913
6914 if (!nmp || !(dulp->du_flags & NFS_DULOOKUP_INPROG)) {
6915 goto out;
6916 }
6917
6918 NVATTR_INIT(&nvattr);
6919 error = nmp->nm_funcs->nf_lookup_rpc_async_finish(dnp, dulp->du_cn.cn_nameptr,
6920 dulp->du_cn.cn_namelen, ctx, &dulp->du_req, &xid, &fh, &nvattr);
6921 dulp->du_flags &= ~NFS_DULOOKUP_INPROG;
6922 if (error == ENOENT) {
6923 /* add a negative entry in the name cache */
6924 nfs_node_lock_force(dnp);
6925 cache_enter(NFSTOV(dnp), NULL, &dulp->du_cn);
6926 dnp->n_flag |= NNEGNCENTRIES;
6927 nfs_node_unlock(dnp);
6928 } else if (!error) {
6929 error = nfs_nget(NFSTOMP(dnp), dnp, &dulp->du_cn, fh.fh_data, fh.fh_len,
6930 &nvattr, &xid, dulp->du_req.r_auth, NG_MAKEENTRY, &du_np);
6931 if (!error) {
6932 nfs_node_unlock(du_np);
6933 vnode_put(NFSTOV(du_np));
6934 }
6935 }
6936 NVATTR_CLEANUP(&nvattr);
6937out:
6938 if (dulp->du_flags & NFS_DULOOKUP_INPROG) {
6939 nfs_request_async_cancel(&dulp->du_req);
6940 }
6941 if (dulp->du_cn.cn_nameptr && (dulp->du_cn.cn_nameptr != dulp->du_smallname)) {
6942 FREE(dulp->du_cn.cn_nameptr, M_TEMP);
6943 }
6944}
6945
6946
6947/*
6948 * NFS Version 3 commit RPC
6949 */
6950int
6951nfs3_commit_rpc(
6952 nfsnode_t np,
6953 uint64_t offset,
6954 uint64_t count,
6955 kauth_cred_t cred,
6956 uint64_t wverf)
6957{
6958 struct nfsmount *nmp;
6959 int error = 0, lockerror, status, wccpostattr = 0, nfsvers;
6960 struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 };
6961 u_int64_t xid, newwverf;
6962 uint32_t count32;
6963 struct nfsm_chain nmreq, nmrep;
6964
6965 nmp = NFSTONMP(np);
6966 FSDBG(521, np, offset, count, nmp ? nmp->nm_state : 0);
6967 if (nfs_mount_gone(nmp)) {
6968 return ENXIO;
6969 }
6970 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
6971 return 0;
6972 }
6973 nfsvers = nmp->nm_vers;
6974
6975 if (count > UINT32_MAX) {
6976 count32 = 0;
6977 } else {
6978 count32 = count;
6979 }
6980
6981 nfsm_chain_null(&nmreq);
6982 nfsm_chain_null(&nmrep);
6983
6984 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_FH(NFS_VER3));
6985 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
6986 nfsm_chain_add_64(error, &nmreq, offset);
6987 nfsm_chain_add_32(error, &nmreq, count32);
6988 nfsm_chain_build_done(error, &nmreq);
6989 nfsmout_if(error);
6990 error = nfs_request2(np, NULL, &nmreq, NFSPROC_COMMIT,
6991 current_thread(), cred, NULL, 0, &nmrep, &xid, &status);
6992 if ((lockerror = nfs_node_lock(np))) {
6993 error = lockerror;
6994 }
6995 /* can we do anything useful with the wcc info? */
6996 nfsm_chain_get_wcc_data(error, &nmrep, np, &premtime, &wccpostattr, &xid);
6997 if (!lockerror) {
6998 nfs_node_unlock(np);
6999 }
7000 if (!error) {
7001 error = status;
7002 }
7003 nfsm_chain_get_64(error, &nmrep, newwverf);
7004 nfsmout_if(error);
7005 lck_mtx_lock(&nmp->nm_lock);
7006 if (nmp->nm_verf != newwverf) {
7007 nmp->nm_verf = newwverf;
7008 }
7009 if (wverf != newwverf) {
7010 error = NFSERR_STALEWRITEVERF;
7011 }
7012 lck_mtx_unlock(&nmp->nm_lock);
7013nfsmout:
7014 nfsm_chain_cleanup(&nmreq);
7015 nfsm_chain_cleanup(&nmrep);
7016 return error;
7017}
7018
7019
7020int
7021nfs_vnop_blockmap(
7022 __unused struct vnop_blockmap_args /* {
7023 * struct vnodeop_desc *a_desc;
7024 * vnode_t a_vp;
7025 * off_t a_foffset;
7026 * size_t a_size;
7027 * daddr64_t *a_bpn;
7028 * size_t *a_run;
7029 * void *a_poff;
7030 * int a_flags;
7031 * } */*ap)
7032{
7033 return ENOTSUP;
7034}
7035
7036
7037/*
7038 * fsync vnode op. Just call nfs_flush().
7039 */
7040/* ARGSUSED */
7041int
7042nfs_vnop_fsync(
7043 struct vnop_fsync_args /* {
7044 * struct vnodeop_desc *a_desc;
7045 * vnode_t a_vp;
7046 * int a_waitfor;
7047 * vfs_context_t a_context;
7048 * } */*ap)
7049{
7050 return nfs_flush(VTONFS(ap->a_vp), ap->a_waitfor, vfs_context_thread(ap->a_context), 0);
7051}
7052
7053
7054/*
7055 * Do an NFS pathconf RPC.
7056 */
7057int
7058nfs3_pathconf_rpc(
7059 nfsnode_t np,
7060 struct nfs_fsattr *nfsap,
7061 vfs_context_t ctx)
7062{
7063 u_int64_t xid;
7064 int error = 0, lockerror, status, nfsvers;
7065 struct nfsm_chain nmreq, nmrep;
7066 struct nfsmount *nmp = NFSTONMP(np);
7067 uint32_t val = 0;
7068
7069 if (nfs_mount_gone(nmp)) {
7070 return ENXIO;
7071 }
7072 nfsvers = nmp->nm_vers;
7073
7074 nfsm_chain_null(&nmreq);
7075 nfsm_chain_null(&nmrep);
7076
7077 /* fetch pathconf info from server */
7078 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_FH(NFS_VER3));
7079 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
7080 nfsm_chain_build_done(error, &nmreq);
7081 nfsmout_if(error);
7082 error = nfs_request(np, NULL, &nmreq, NFSPROC_PATHCONF, ctx, NULL, &nmrep, &xid, &status);
7083 if ((lockerror = nfs_node_lock(np))) {
7084 error = lockerror;
7085 }
7086 nfsm_chain_postop_attr_update(error, &nmrep, np, &xid);
7087 if (!lockerror) {
7088 nfs_node_unlock(np);
7089 }
7090 if (!error) {
7091 error = status;
7092 }
7093 nfsm_chain_get_32(error, &nmrep, nfsap->nfsa_maxlink);
7094 nfsm_chain_get_32(error, &nmrep, nfsap->nfsa_maxname);
7095 nfsap->nfsa_flags &= ~(NFS_FSFLAG_NO_TRUNC | NFS_FSFLAG_CHOWN_RESTRICTED | NFS_FSFLAG_CASE_INSENSITIVE | NFS_FSFLAG_CASE_PRESERVING);
7096 nfsm_chain_get_32(error, &nmrep, val);
7097 if (val) {
7098 nfsap->nfsa_flags |= NFS_FSFLAG_NO_TRUNC;
7099 }
7100 nfsm_chain_get_32(error, &nmrep, val);
7101 if (val) {
7102 nfsap->nfsa_flags |= NFS_FSFLAG_CHOWN_RESTRICTED;
7103 }
7104 nfsm_chain_get_32(error, &nmrep, val);
7105 if (val) {
7106 nfsap->nfsa_flags |= NFS_FSFLAG_CASE_INSENSITIVE;
7107 }
7108 nfsm_chain_get_32(error, &nmrep, val);
7109 if (val) {
7110 nfsap->nfsa_flags |= NFS_FSFLAG_CASE_PRESERVING;
7111 }
7112 NFS_BITMAP_SET(nfsap->nfsa_bitmap, NFS_FATTR_MAXLINK);
7113 NFS_BITMAP_SET(nfsap->nfsa_bitmap, NFS_FATTR_MAXNAME);
7114 NFS_BITMAP_SET(nfsap->nfsa_bitmap, NFS_FATTR_NO_TRUNC);
7115 NFS_BITMAP_SET(nfsap->nfsa_bitmap, NFS_FATTR_CHOWN_RESTRICTED);
7116 NFS_BITMAP_SET(nfsap->nfsa_bitmap, NFS_FATTR_CASE_INSENSITIVE);
7117 NFS_BITMAP_SET(nfsap->nfsa_bitmap, NFS_FATTR_CASE_PRESERVING);
7118nfsmout:
7119 nfsm_chain_cleanup(&nmreq);
7120 nfsm_chain_cleanup(&nmrep);
7121 return error;
7122}
7123
7124/* save pathconf info for NFSv3 mount */
7125void
7126nfs3_pathconf_cache(struct nfsmount *nmp, struct nfs_fsattr *nfsap)
7127{
7128 nmp->nm_fsattr.nfsa_maxlink = nfsap->nfsa_maxlink;
7129 nmp->nm_fsattr.nfsa_maxname = nfsap->nfsa_maxname;
7130 nmp->nm_fsattr.nfsa_flags &= ~(NFS_FSFLAG_NO_TRUNC | NFS_FSFLAG_CHOWN_RESTRICTED | NFS_FSFLAG_CASE_INSENSITIVE | NFS_FSFLAG_CASE_PRESERVING);
7131 nmp->nm_fsattr.nfsa_flags |= nfsap->nfsa_flags & NFS_FSFLAG_NO_TRUNC;
7132 nmp->nm_fsattr.nfsa_flags |= nfsap->nfsa_flags & NFS_FSFLAG_CHOWN_RESTRICTED;
7133 nmp->nm_fsattr.nfsa_flags |= nfsap->nfsa_flags & NFS_FSFLAG_CASE_INSENSITIVE;
7134 nmp->nm_fsattr.nfsa_flags |= nfsap->nfsa_flags & NFS_FSFLAG_CASE_PRESERVING;
7135 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXLINK);
7136 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXNAME);
7137 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_NO_TRUNC);
7138 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_CHOWN_RESTRICTED);
7139 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_CASE_INSENSITIVE);
7140 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_CASE_PRESERVING);
7141 nmp->nm_state |= NFSSTA_GOTPATHCONF;
7142}
7143
7144/*
7145 * Return POSIX pathconf information applicable to nfs.
7146 *
7147 * The NFS V2 protocol doesn't support this, so just return EINVAL
7148 * for V2.
7149 */
7150/* ARGSUSED */
7151int
7152nfs_vnop_pathconf(
7153 struct vnop_pathconf_args /* {
7154 * struct vnodeop_desc *a_desc;
7155 * vnode_t a_vp;
7156 * int a_name;
7157 * int32_t *a_retval;
7158 * vfs_context_t a_context;
7159 * } */*ap)
7160{
7161 vnode_t vp = ap->a_vp;
7162 nfsnode_t np = VTONFS(vp);
7163 struct nfsmount *nmp;
7164 struct nfs_fsattr nfsa, *nfsap;
7165 int error = 0;
7166 uint64_t maxFileSize;
7167 uint nbits;
7168
7169 nmp = VTONMP(vp);
7170 if (nfs_mount_gone(nmp)) {
7171 return ENXIO;
7172 }
7173
7174 switch (ap->a_name) {
7175 case _PC_LINK_MAX:
7176 case _PC_NAME_MAX:
7177 case _PC_CHOWN_RESTRICTED:
7178 case _PC_NO_TRUNC:
7179 case _PC_CASE_SENSITIVE:
7180 case _PC_CASE_PRESERVING:
7181 break;
7182 case _PC_FILESIZEBITS:
7183 if (nmp->nm_vers == NFS_VER2) {
7184 *ap->a_retval = 32;
7185 return 0;
7186 }
7187 break;
7188 case _PC_XATTR_SIZE_BITS:
7189 /* Do we support xattrs natively? */
7190 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR) {
7191 break; /* Yes */
7192 }
7193 /* No... so just return an error */
7194 /* FALLTHROUGH */
7195 default:
7196 /* don't bother contacting the server if we know the answer */
7197 return EINVAL;
7198 }
7199
7200 if (nmp->nm_vers == NFS_VER2) {
7201 return EINVAL;
7202 }
7203
7204 lck_mtx_lock(&nmp->nm_lock);
7205 if (nmp->nm_vers == NFS_VER3) {
7206 if (!(nmp->nm_state & NFSSTA_GOTPATHCONF)) {
7207 /* no pathconf info cached */
7208 lck_mtx_unlock(&nmp->nm_lock);
7209 NFS_CLEAR_ATTRIBUTES(nfsa.nfsa_bitmap);
7210 error = nfs3_pathconf_rpc(np, &nfsa, ap->a_context);
7211 if (error) {
7212 return error;
7213 }
7214 nmp = VTONMP(vp);
7215 if (nfs_mount_gone(nmp)) {
7216 return ENXIO;
7217 }
7218 lck_mtx_lock(&nmp->nm_lock);
7219 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_HOMOGENEOUS) {
7220 /* all files have the same pathconf info, */
7221 /* so cache a copy of the results */
7222 nfs3_pathconf_cache(nmp, &nfsa);
7223 }
7224 nfsap = &nfsa;
7225 } else {
7226 nfsap = &nmp->nm_fsattr;
7227 }
7228 }
7229#if CONFIG_NFS4
7230 else if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_HOMOGENEOUS)) {
7231 /* no pathconf info cached */
7232 lck_mtx_unlock(&nmp->nm_lock);
7233 NFS_CLEAR_ATTRIBUTES(nfsa.nfsa_bitmap);
7234 error = nfs4_pathconf_rpc(np, &nfsa, ap->a_context);
7235 if (error) {
7236 return error;
7237 }
7238 nmp = VTONMP(vp);
7239 if (nfs_mount_gone(nmp)) {
7240 return ENXIO;
7241 }
7242 lck_mtx_lock(&nmp->nm_lock);
7243 nfsap = &nfsa;
7244 }
7245#endif
7246 else {
7247 nfsap = &nmp->nm_fsattr;
7248 }
7249 switch (ap->a_name) {
7250 case _PC_LINK_MAX:
7251 if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_MAXLINK)) {
7252 *ap->a_retval = nfsap->nfsa_maxlink;
7253#if CONFIG_NFS4
7254 } else if ((nmp->nm_vers == NFS_VER4) && NFS_BITMAP_ISSET(np->n_vattr.nva_bitmap, NFS_FATTR_MAXLINK)) {
7255 *ap->a_retval = np->n_vattr.nva_maxlink;
7256#endif
7257 } else {
7258 error = EINVAL;
7259 }
7260 break;
7261 case _PC_NAME_MAX:
7262 if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_MAXNAME)) {
7263 *ap->a_retval = nfsap->nfsa_maxname;
7264 } else {
7265 error = EINVAL;
7266 }
7267 break;
7268 case _PC_CHOWN_RESTRICTED:
7269 if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_CHOWN_RESTRICTED)) {
7270 *ap->a_retval = (nfsap->nfsa_flags & NFS_FSFLAG_CHOWN_RESTRICTED) ? 200112 /* _POSIX_CHOWN_RESTRICTED */ : 0;
7271 } else {
7272 error = EINVAL;
7273 }
7274 break;
7275 case _PC_NO_TRUNC:
7276 if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_NO_TRUNC)) {
7277 *ap->a_retval = (nfsap->nfsa_flags & NFS_FSFLAG_NO_TRUNC) ? 200112 /* _POSIX_NO_TRUNC */ : 0;
7278 } else {
7279 error = EINVAL;
7280 }
7281 break;
7282 case _PC_CASE_SENSITIVE:
7283 if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_CASE_INSENSITIVE)) {
7284 *ap->a_retval = (nfsap->nfsa_flags & NFS_FSFLAG_CASE_INSENSITIVE) ? 0 : 1;
7285 } else {
7286 error = EINVAL;
7287 }
7288 break;
7289 case _PC_CASE_PRESERVING:
7290 if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_CASE_PRESERVING)) {
7291 *ap->a_retval = (nfsap->nfsa_flags & NFS_FSFLAG_CASE_PRESERVING) ? 1 : 0;
7292 } else {
7293 error = EINVAL;
7294 }
7295 break;
7296 case _PC_XATTR_SIZE_BITS: /* same as file size bits if named attrs supported */
7297 case _PC_FILESIZEBITS:
7298 if (!NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_MAXFILESIZE)) {
7299 *ap->a_retval = 64;
7300 error = 0;
7301 break;
7302 }
7303 maxFileSize = nfsap->nfsa_maxfilesize;
7304 nbits = 1;
7305 if (maxFileSize & 0xffffffff00000000ULL) {
7306 nbits += 32;
7307 maxFileSize >>= 32;
7308 }
7309 if (maxFileSize & 0xffff0000) {
7310 nbits += 16;
7311 maxFileSize >>= 16;
7312 }
7313 if (maxFileSize & 0xff00) {
7314 nbits += 8;
7315 maxFileSize >>= 8;
7316 }
7317 if (maxFileSize & 0xf0) {
7318 nbits += 4;
7319 maxFileSize >>= 4;
7320 }
7321 if (maxFileSize & 0xc) {
7322 nbits += 2;
7323 maxFileSize >>= 2;
7324 }
7325 if (maxFileSize & 0x2) {
7326 nbits += 1;
7327 }
7328 *ap->a_retval = nbits;
7329 break;
7330 default:
7331 error = EINVAL;
7332 }
7333
7334 lck_mtx_unlock(&nmp->nm_lock);
7335
7336 return error;
7337}
7338
7339/*
7340 * Read wrapper for special devices.
7341 */
7342int
7343nfsspec_vnop_read(
7344 struct vnop_read_args /* {
7345 * struct vnodeop_desc *a_desc;
7346 * vnode_t a_vp;
7347 * struct uio *a_uio;
7348 * int a_ioflag;
7349 * vfs_context_t a_context;
7350 * } */*ap)
7351{
7352 nfsnode_t np = VTONFS(ap->a_vp);
7353 struct timespec now;
7354 int error;
7355
7356 /*
7357 * Set access flag.
7358 */
7359 if ((error = nfs_node_lock(np))) {
7360 return error;
7361 }
7362 np->n_flag |= NACC;
7363 nanotime(&now);
7364 np->n_atim.tv_sec = now.tv_sec;
7365 np->n_atim.tv_nsec = now.tv_nsec;
7366 nfs_node_unlock(np);
7367 return VOCALL(spec_vnodeop_p, VOFFSET(vnop_read), ap);
7368}
7369
7370/*
7371 * Write wrapper for special devices.
7372 */
7373int
7374nfsspec_vnop_write(
7375 struct vnop_write_args /* {
7376 * struct vnodeop_desc *a_desc;
7377 * vnode_t a_vp;
7378 * struct uio *a_uio;
7379 * int a_ioflag;
7380 * vfs_context_t a_context;
7381 * } */*ap)
7382{
7383 nfsnode_t np = VTONFS(ap->a_vp);
7384 struct timespec now;
7385 int error;
7386
7387 /*
7388 * Set update flag.
7389 */
7390 if ((error = nfs_node_lock(np))) {
7391 return error;
7392 }
7393 np->n_flag |= NUPD;
7394 nanotime(&now);
7395 np->n_mtim.tv_sec = now.tv_sec;
7396 np->n_mtim.tv_nsec = now.tv_nsec;
7397 nfs_node_unlock(np);
7398 return VOCALL(spec_vnodeop_p, VOFFSET(vnop_write), ap);
7399}
7400
7401/*
7402 * Close wrapper for special devices.
7403 *
7404 * Update the times on the nfsnode then do device close.
7405 */
7406int
7407nfsspec_vnop_close(
7408 struct vnop_close_args /* {
7409 * struct vnodeop_desc *a_desc;
7410 * vnode_t a_vp;
7411 * int a_fflag;
7412 * vfs_context_t a_context;
7413 * } */*ap)
7414{
7415 vnode_t vp = ap->a_vp;
7416 nfsnode_t np = VTONFS(vp);
7417 struct vnode_attr vattr;
7418 mount_t mp;
7419 int error;
7420
7421 if ((error = nfs_node_lock(np))) {
7422 return error;
7423 }
7424 if (np->n_flag & (NACC | NUPD)) {
7425 np->n_flag |= NCHG;
7426 if (!vnode_isinuse(vp, 0) && (mp = vnode_mount(vp)) && !vfs_isrdonly(mp)) {
7427 VATTR_INIT(&vattr);
7428 if (np->n_flag & NACC) {
7429 vattr.va_access_time = np->n_atim;
7430 VATTR_SET_ACTIVE(&vattr, va_access_time);
7431 }
7432 if (np->n_flag & NUPD) {
7433 vattr.va_modify_time = np->n_mtim;
7434 VATTR_SET_ACTIVE(&vattr, va_modify_time);
7435 }
7436 nfs_node_unlock(np);
7437 vnode_setattr(vp, &vattr, ap->a_context);
7438 } else {
7439 nfs_node_unlock(np);
7440 }
7441 } else {
7442 nfs_node_unlock(np);
7443 }
7444 return VOCALL(spec_vnodeop_p, VOFFSET(vnop_close), ap);
7445}
7446
7447#if FIFO
7448extern vnop_t **fifo_vnodeop_p;
7449
7450/*
7451 * Read wrapper for fifos.
7452 */
7453int
7454nfsfifo_vnop_read(
7455 struct vnop_read_args /* {
7456 * struct vnodeop_desc *a_desc;
7457 * vnode_t a_vp;
7458 * struct uio *a_uio;
7459 * int a_ioflag;
7460 * vfs_context_t a_context;
7461 * } */*ap)
7462{
7463 nfsnode_t np = VTONFS(ap->a_vp);
7464 struct timespec now;
7465 int error;
7466
7467 /*
7468 * Set access flag.
7469 */
7470 if ((error = nfs_node_lock(np))) {
7471 return error;
7472 }
7473 np->n_flag |= NACC;
7474 nanotime(&now);
7475 np->n_atim.tv_sec = now.tv_sec;
7476 np->n_atim.tv_nsec = now.tv_nsec;
7477 nfs_node_unlock(np);
7478 return VOCALL(fifo_vnodeop_p, VOFFSET(vnop_read), ap);
7479}
7480
7481/*
7482 * Write wrapper for fifos.
7483 */
7484int
7485nfsfifo_vnop_write(
7486 struct vnop_write_args /* {
7487 * struct vnodeop_desc *a_desc;
7488 * vnode_t a_vp;
7489 * struct uio *a_uio;
7490 * int a_ioflag;
7491 * vfs_context_t a_context;
7492 * } */*ap)
7493{
7494 nfsnode_t np = VTONFS(ap->a_vp);
7495 struct timespec now;
7496 int error;
7497
7498 /*
7499 * Set update flag.
7500 */
7501 if ((error = nfs_node_lock(np))) {
7502 return error;
7503 }
7504 np->n_flag |= NUPD;
7505 nanotime(&now);
7506 np->n_mtim.tv_sec = now.tv_sec;
7507 np->n_mtim.tv_nsec = now.tv_nsec;
7508 nfs_node_unlock(np);
7509 return VOCALL(fifo_vnodeop_p, VOFFSET(vnop_write), ap);
7510}
7511
7512/*
7513 * Close wrapper for fifos.
7514 *
7515 * Update the times on the nfsnode then do fifo close.
7516 */
7517int
7518nfsfifo_vnop_close(
7519 struct vnop_close_args /* {
7520 * struct vnodeop_desc *a_desc;
7521 * vnode_t a_vp;
7522 * int a_fflag;
7523 * vfs_context_t a_context;
7524 * } */*ap)
7525{
7526 vnode_t vp = ap->a_vp;
7527 nfsnode_t np = VTONFS(vp);
7528 struct vnode_attr vattr;
7529 struct timespec now;
7530 mount_t mp;
7531 int error;
7532
7533 if ((error = nfs_node_lock(np))) {
7534 return error;
7535 }
7536 if (np->n_flag & (NACC | NUPD)) {
7537 nanotime(&now);
7538 if (np->n_flag & NACC) {
7539 np->n_atim.tv_sec = now.tv_sec;
7540 np->n_atim.tv_nsec = now.tv_nsec;
7541 }
7542 if (np->n_flag & NUPD) {
7543 np->n_mtim.tv_sec = now.tv_sec;
7544 np->n_mtim.tv_nsec = now.tv_nsec;
7545 }
7546 np->n_flag |= NCHG;
7547 if (!vnode_isinuse(vp, 1) && (mp = vnode_mount(vp)) && !vfs_isrdonly(mp)) {
7548 VATTR_INIT(&vattr);
7549 if (np->n_flag & NACC) {
7550 vattr.va_access_time = np->n_atim;
7551 VATTR_SET_ACTIVE(&vattr, va_access_time);
7552 }
7553 if (np->n_flag & NUPD) {
7554 vattr.va_modify_time = np->n_mtim;
7555 VATTR_SET_ACTIVE(&vattr, va_modify_time);
7556 }
7557 nfs_node_unlock(np);
7558 vnode_setattr(vp, &vattr, ap->a_context);
7559 } else {
7560 nfs_node_unlock(np);
7561 }
7562 } else {
7563 nfs_node_unlock(np);
7564 }
7565 return VOCALL(fifo_vnodeop_p, VOFFSET(vnop_close), ap);
7566}
7567#endif /* FIFO */
7568
7569/*ARGSUSED*/
7570int
7571nfs_vnop_ioctl(
7572 struct vnop_ioctl_args /* {
7573 * struct vnodeop_desc *a_desc;
7574 * vnode_t a_vp;
7575 * u_int32_t a_command;
7576 * caddr_t a_data;
7577 * int a_fflag;
7578 * vfs_context_t a_context;
7579 * } */*ap)
7580{
7581 vfs_context_t ctx = ap->a_context;
7582 vnode_t vp = ap->a_vp;
7583 struct nfsmount *mp = VTONMP(vp);
7584 int error = ENOTTY;
7585#if CONFIG_NFS_GSS
7586 struct user_nfs_gss_principal gprinc = {};
7587 uint32_t len;
7588#endif
7589
7590 if (mp == NULL) {
7591 return ENXIO;
7592 }
7593 switch (ap->a_command) {
7594 case F_FULLFSYNC:
7595 if (vnode_vfsisrdonly(vp)) {
7596 return EROFS;
7597 }
7598 error = nfs_flush(VTONFS(vp), MNT_WAIT, vfs_context_thread(ctx), 0);
7599 break;
7600#if CONFIG_NFS_GSS
7601 case NFS_IOC_DESTROY_CRED:
7602 if (!auth_is_kerberized(mp->nm_auth)) {
7603 return ENOTSUP;
7604 }
7605 error = nfs_gss_clnt_ctx_remove(mp, vfs_context_ucred(ctx));
7606 break;
7607 case NFS_IOC_SET_CRED:
7608 case NFS_IOC_SET_CRED64:
7609 if (!auth_is_kerberized(mp->nm_auth)) {
7610 return ENOTSUP;
7611 }
7612 if ((ap->a_command == NFS_IOC_SET_CRED && vfs_context_is64bit(ctx)) ||
7613 (ap->a_command == NFS_IOC_SET_CRED64 && !vfs_context_is64bit(ctx))) {
7614 return EINVAL;
7615 }
7616 if (vfs_context_is64bit(ctx)) {
7617 gprinc = *(struct user_nfs_gss_principal *)ap->a_data;
7618 } else {
7619 struct nfs_gss_principal *tp;
7620 tp = (struct nfs_gss_principal *)ap->a_data;
7621 gprinc.princlen = tp->princlen;
7622 gprinc.nametype = tp->nametype;
7623 gprinc.principal = CAST_USER_ADDR_T(tp->principal);
7624 }
7625 NFS_DBG(NFS_FAC_GSS, 7, "Enter NFS_FSCTL_SET_CRED (64-bit=%d): principal length %d name type %d usr pointer 0x%llx\n", vfs_context_is64bit(ctx), gprinc.princlen, gprinc.nametype, (unsigned long long)gprinc.principal);
7626 if (gprinc.princlen > MAXPATHLEN) {
7627 return EINVAL;
7628 }
7629 uint8_t *p;
7630 MALLOC(p, uint8_t *, gprinc.princlen + 1, M_TEMP, M_WAITOK | M_ZERO);
7631 if (p == NULL) {
7632 return ENOMEM;
7633 }
7634 error = copyin(gprinc.principal, p, gprinc.princlen);
7635 if (error) {
7636 NFS_DBG(NFS_FAC_GSS, 7, "NFS_FSCTL_SET_CRED could not copy in princiapl data of len %d: %d\n",
7637 gprinc.princlen, error);
7638 FREE(p, M_TEMP);
7639 return error;
7640 }
7641 NFS_DBG(NFS_FAC_GSS, 7, "Seting credential to principal %s\n", p);
7642 error = nfs_gss_clnt_ctx_set_principal(mp, ctx, p, gprinc.princlen, gprinc.nametype);
7643 NFS_DBG(NFS_FAC_GSS, 7, "Seting credential to principal %s returned %d\n", p, error);
7644 FREE(p, M_TEMP);
7645 break;
7646 case NFS_IOC_GET_CRED:
7647 case NFS_IOC_GET_CRED64:
7648 if (!auth_is_kerberized(mp->nm_auth)) {
7649 return ENOTSUP;
7650 }
7651 if ((ap->a_command == NFS_IOC_GET_CRED && vfs_context_is64bit(ctx)) ||
7652 (ap->a_command == NFS_IOC_GET_CRED64 && !vfs_context_is64bit(ctx))) {
7653 return EINVAL;
7654 }
7655 error = nfs_gss_clnt_ctx_get_principal(mp, ctx, &gprinc);
7656 if (error) {
7657 break;
7658 }
7659 if (vfs_context_is64bit(ctx)) {
7660 struct user_nfs_gss_principal *upp = (struct user_nfs_gss_principal *)ap->a_data;
7661 len = upp->princlen;
7662 if (gprinc.princlen < len) {
7663 len = gprinc.princlen;
7664 }
7665 upp->princlen = gprinc.princlen;
7666 upp->nametype = gprinc.nametype;
7667 upp->flags = gprinc.flags;
7668 if (gprinc.principal) {
7669 error = copyout((void *)gprinc.principal, upp->principal, len);
7670 } else {
7671 upp->principal = USER_ADDR_NULL;
7672 }
7673 } else {
7674 struct nfs_gss_principal *u32pp = (struct nfs_gss_principal *)ap->a_data;
7675 len = u32pp->princlen;
7676 if (gprinc.princlen < len) {
7677 len = gprinc.princlen;
7678 }
7679 u32pp->princlen = gprinc.princlen;
7680 u32pp->nametype = gprinc.nametype;
7681 u32pp->flags = gprinc.flags;
7682 if (gprinc.principal) {
7683 error = copyout((void *)gprinc.principal, u32pp->principal, len);
7684 } else {
7685 u32pp->principal = (user32_addr_t)0;
7686 }
7687 }
7688 if (error) {
7689 NFS_DBG(NFS_FAC_GSS, 7, "NFS_FSCTL_GET_CRED could not copy out princiapl data of len %d: %d\n",
7690 gprinc.princlen, error);
7691 }
7692 if (gprinc.principal) {
7693 FREE(gprinc.principal, M_TEMP);
7694 }
7695#endif /* CONFIG_NFS_GSS */
7696 }
7697
7698 return error;
7699}
7700
7701/*ARGSUSED*/
7702int
7703nfs_vnop_select(
7704 __unused struct vnop_select_args /* {
7705 * struct vnodeop_desc *a_desc;
7706 * vnode_t a_vp;
7707 * int a_which;
7708 * int a_fflags;
7709 * void *a_wql;
7710 * vfs_context_t a_context;
7711 * } */*ap)
7712{
7713 /*
7714 * We were once bogusly seltrue() which returns 1. Is this right?
7715 */
7716 return 1;
7717}
7718
7719/*
7720 * vnode OP for pagein using UPL
7721 *
7722 * No buffer I/O, just RPCs straight into the mapped pages.
7723 */
7724int
7725nfs_vnop_pagein(
7726 struct vnop_pagein_args /* {
7727 * struct vnodeop_desc *a_desc;
7728 * vnode_t a_vp;
7729 * upl_t a_pl;
7730 * vm_offset_t a_pl_offset;
7731 * off_t a_f_offset;
7732 * size_t a_size;
7733 * int a_flags;
7734 * vfs_context_t a_context;
7735 * } */*ap)
7736{
7737 vnode_t vp = ap->a_vp;
7738 upl_t pl = ap->a_pl;
7739 size_t size = ap->a_size;
7740 off_t f_offset = ap->a_f_offset;
7741 vm_offset_t pl_offset = ap->a_pl_offset;
7742 int flags = ap->a_flags;
7743 thread_t thd;
7744 kauth_cred_t cred;
7745 nfsnode_t np = VTONFS(vp);
7746 size_t nmrsize, iosize, txsize, rxsize, retsize;
7747 off_t txoffset;
7748 struct nfsmount *nmp;
7749 int error = 0;
7750 vm_offset_t ioaddr, rxaddr;
7751 uio_t uio;
7752 char uio_buf[UIO_SIZEOF(1)];
7753 int nofreeupl = flags & UPL_NOCOMMIT;
7754 upl_page_info_t *plinfo;
7755#define MAXPAGINGREQS 16 /* max outstanding RPCs for pagein/pageout */
7756 struct nfsreq *req[MAXPAGINGREQS];
7757 int nextsend, nextwait;
7758#if CONFIG_NFS4
7759 uint32_t stategenid = 0;
7760#endif
7761 uint32_t restart = 0;
7762 kern_return_t kret;
7763
7764 FSDBG(322, np, f_offset, size, flags);
7765 if (pl == (upl_t)NULL) {
7766 panic("nfs_pagein: no upl");
7767 }
7768
7769 if (size <= 0) {
7770 printf("nfs_pagein: invalid size %ld", size);
7771 if (!nofreeupl) {
7772 (void) ubc_upl_abort_range(pl, pl_offset, size, 0);
7773 }
7774 return EINVAL;
7775 }
7776 if (f_offset < 0 || f_offset >= (off_t)np->n_size || (f_offset & PAGE_MASK_64)) {
7777 if (!nofreeupl) {
7778 ubc_upl_abort_range(pl, pl_offset, size,
7779 UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
7780 }
7781 return EINVAL;
7782 }
7783
7784 thd = vfs_context_thread(ap->a_context);
7785 cred = ubc_getcred(vp);
7786 if (!IS_VALID_CRED(cred)) {
7787 cred = vfs_context_ucred(ap->a_context);
7788 }
7789
7790 uio = uio_createwithbuffer(1, f_offset, UIO_SYSSPACE, UIO_READ,
7791 &uio_buf, sizeof(uio_buf));
7792
7793 nmp = VTONMP(vp);
7794 if (nfs_mount_gone(nmp)) {
7795 if (!nofreeupl) {
7796 ubc_upl_abort_range(pl, pl_offset, size,
7797 UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
7798 }
7799 return ENXIO;
7800 }
7801 nmrsize = nmp->nm_rsize;
7802
7803 plinfo = ubc_upl_pageinfo(pl);
7804 kret = ubc_upl_map(pl, &ioaddr);
7805 if (kret != KERN_SUCCESS) {
7806 panic("nfs_vnop_pagein: ubc_upl_map() failed with (%d)", kret);
7807 }
7808 ioaddr += pl_offset;
7809
7810tryagain:
7811#if CONFIG_NFS4
7812 if (nmp->nm_vers >= NFS_VER4) {
7813 stategenid = nmp->nm_stategenid;
7814 }
7815#endif
7816 txsize = rxsize = size;
7817 txoffset = f_offset;
7818 rxaddr = ioaddr;
7819
7820 bzero(req, sizeof(req));
7821 nextsend = nextwait = 0;
7822 do {
7823 if (np->n_flag & NREVOKE) {
7824 error = EIO;
7825 break;
7826 }
7827 /* send requests while we need to and have available slots */
7828 while ((txsize > 0) && (req[nextsend] == NULL)) {
7829 iosize = MIN(nmrsize, txsize);
7830 if ((error = nmp->nm_funcs->nf_read_rpc_async(np, txoffset, iosize, thd, cred, NULL, &req[nextsend]))) {
7831 req[nextsend] = NULL;
7832 break;
7833 }
7834 txoffset += iosize;
7835 txsize -= iosize;
7836 nextsend = (nextsend + 1) % MAXPAGINGREQS;
7837 }
7838 /* wait while we need to and break out if more requests to send */
7839 while ((rxsize > 0) && req[nextwait]) {
7840 iosize = retsize = MIN(nmrsize, rxsize);
7841 uio_reset(uio, uio_offset(uio), UIO_SYSSPACE, UIO_READ);
7842 uio_addiov(uio, CAST_USER_ADDR_T(rxaddr), iosize);
7843 FSDBG(322, uio_offset(uio), uio_resid(uio), rxaddr, rxsize);
7844#if UPL_DEBUG
7845 upl_ubc_alias_set(pl, (uintptr_t) current_thread(), (uintptr_t) 2);
7846#endif /* UPL_DEBUG */
7847 OSAddAtomic64(1, &nfsstats.pageins);
7848 error = nmp->nm_funcs->nf_read_rpc_async_finish(np, req[nextwait], uio, &retsize, NULL);
7849 req[nextwait] = NULL;
7850 nextwait = (nextwait + 1) % MAXPAGINGREQS;
7851#if CONFIG_NFS4
7852 if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error)) {
7853 lck_mtx_lock(&nmp->nm_lock);
7854 if ((error != NFSERR_GRACE) && (stategenid == nmp->nm_stategenid)) {
7855 NP(np, "nfs_vnop_pagein: error %d, initiating recovery", error);
7856 nfs_need_recover(nmp, error);
7857 }
7858 lck_mtx_unlock(&nmp->nm_lock);
7859 restart++;
7860 goto cancel;
7861 }
7862#endif
7863 if (error) {
7864 FSDBG(322, uio_offset(uio), uio_resid(uio), error, -1);
7865 break;
7866 }
7867 if (retsize < iosize) {
7868 /* Just zero fill the rest of the valid area. */
7869 int zcnt = iosize - retsize;
7870 bzero((char *)rxaddr + retsize, zcnt);
7871 FSDBG(324, uio_offset(uio), retsize, zcnt, rxaddr);
7872 uio_update(uio, zcnt);
7873 }
7874 rxaddr += iosize;
7875 rxsize -= iosize;
7876 if (txsize) {
7877 break;
7878 }
7879 }
7880 } while (!error && (txsize || rxsize));
7881
7882 restart = 0;
7883
7884 if (error) {
7885#if CONFIG_NFS4
7886cancel:
7887#endif
7888 /* cancel any outstanding requests */
7889 while (req[nextwait]) {
7890 nfs_request_async_cancel(req[nextwait]);
7891 req[nextwait] = NULL;
7892 nextwait = (nextwait + 1) % MAXPAGINGREQS;
7893 }
7894 if (np->n_flag & NREVOKE) {
7895 error = EIO;
7896 } else if (restart) {
7897 if (restart <= nfs_mount_state_max_restarts(nmp)) { /* guard against no progress */
7898 if (error == NFSERR_GRACE) {
7899 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
7900 }
7901 if (!(error = nfs_mount_state_wait_for_recovery(nmp))) {
7902 goto tryagain;
7903 }
7904 } else {
7905 NP(np, "nfs_pagein: too many restarts, aborting");
7906 }
7907 }
7908 }
7909
7910 ubc_upl_unmap(pl);
7911
7912 if (!nofreeupl) {
7913 if (error) {
7914 ubc_upl_abort_range(pl, pl_offset, size,
7915 UPL_ABORT_ERROR |
7916 UPL_ABORT_FREE_ON_EMPTY);
7917 } else {
7918 ubc_upl_commit_range(pl, pl_offset, size,
7919 UPL_COMMIT_CLEAR_DIRTY |
7920 UPL_COMMIT_FREE_ON_EMPTY);
7921 }
7922 }
7923 return error;
7924}
7925
7926
7927/*
7928 * the following are needed only by nfs_pageout to know how to handle errors
7929 * see nfs_pageout comments on explanation of actions.
7930 * the errors here are copied from errno.h and errors returned by servers
7931 * are expected to match the same numbers here. If not, our actions maybe
7932 * erroneous.
7933 */
7934char nfs_pageouterrorhandler(int);
7935enum actiontype {NOACTION, DUMP, DUMPANDLOG, RETRY, SEVER};
7936#define NFS_ELAST 88
7937static u_char errorcount[NFS_ELAST + 1]; /* better be zeros when initialized */
7938static const char errortooutcome[NFS_ELAST + 1] = {
7939 NOACTION,
7940 DUMP, /* EPERM 1 Operation not permitted */
7941 DUMP, /* ENOENT 2 No such file or directory */
7942 DUMPANDLOG, /* ESRCH 3 No such process */
7943 RETRY, /* EINTR 4 Interrupted system call */
7944 DUMP, /* EIO 5 Input/output error */
7945 DUMP, /* ENXIO 6 Device not configured */
7946 DUMPANDLOG, /* E2BIG 7 Argument list too long */
7947 DUMPANDLOG, /* ENOEXEC 8 Exec format error */
7948 DUMPANDLOG, /* EBADF 9 Bad file descriptor */
7949 DUMPANDLOG, /* ECHILD 10 No child processes */
7950 DUMPANDLOG, /* EDEADLK 11 Resource deadlock avoided - was EAGAIN */
7951 RETRY, /* ENOMEM 12 Cannot allocate memory */
7952 DUMP, /* EACCES 13 Permission denied */
7953 DUMPANDLOG, /* EFAULT 14 Bad address */
7954 DUMPANDLOG, /* ENOTBLK 15 POSIX - Block device required */
7955 RETRY, /* EBUSY 16 Device busy */
7956 DUMP, /* EEXIST 17 File exists */
7957 DUMP, /* EXDEV 18 Cross-device link */
7958 DUMP, /* ENODEV 19 Operation not supported by device */
7959 DUMP, /* ENOTDIR 20 Not a directory */
7960 DUMP, /* EISDIR 21 Is a directory */
7961 DUMP, /* EINVAL 22 Invalid argument */
7962 DUMPANDLOG, /* ENFILE 23 Too many open files in system */
7963 DUMPANDLOG, /* EMFILE 24 Too many open files */
7964 DUMPANDLOG, /* ENOTTY 25 Inappropriate ioctl for device */
7965 DUMPANDLOG, /* ETXTBSY 26 Text file busy - POSIX */
7966 DUMP, /* EFBIG 27 File too large */
7967 DUMP, /* ENOSPC 28 No space left on device */
7968 DUMPANDLOG, /* ESPIPE 29 Illegal seek */
7969 DUMP, /* EROFS 30 Read-only file system */
7970 DUMP, /* EMLINK 31 Too many links */
7971 RETRY, /* EPIPE 32 Broken pipe */
7972 /* math software */
7973 DUMPANDLOG, /* EDOM 33 Numerical argument out of domain */
7974 DUMPANDLOG, /* ERANGE 34 Result too large */
7975 RETRY, /* EAGAIN/EWOULDBLOCK 35 Resource temporarily unavailable */
7976 DUMPANDLOG, /* EINPROGRESS 36 Operation now in progress */
7977 DUMPANDLOG, /* EALREADY 37 Operation already in progress */
7978 /* ipc/network software -- argument errors */
7979 DUMPANDLOG, /* ENOTSOC 38 Socket operation on non-socket */
7980 DUMPANDLOG, /* EDESTADDRREQ 39 Destination address required */
7981 DUMPANDLOG, /* EMSGSIZE 40 Message too long */
7982 DUMPANDLOG, /* EPROTOTYPE 41 Protocol wrong type for socket */
7983 DUMPANDLOG, /* ENOPROTOOPT 42 Protocol not available */
7984 DUMPANDLOG, /* EPROTONOSUPPORT 43 Protocol not supported */
7985 DUMPANDLOG, /* ESOCKTNOSUPPORT 44 Socket type not supported */
7986 DUMPANDLOG, /* ENOTSUP 45 Operation not supported */
7987 DUMPANDLOG, /* EPFNOSUPPORT 46 Protocol family not supported */
7988 DUMPANDLOG, /* EAFNOSUPPORT 47 Address family not supported by protocol family */
7989 DUMPANDLOG, /* EADDRINUSE 48 Address already in use */
7990 DUMPANDLOG, /* EADDRNOTAVAIL 49 Can't assign requested address */
7991 /* ipc/network software -- operational errors */
7992 RETRY, /* ENETDOWN 50 Network is down */
7993 RETRY, /* ENETUNREACH 51 Network is unreachable */
7994 RETRY, /* ENETRESET 52 Network dropped connection on reset */
7995 RETRY, /* ECONNABORTED 53 Software caused connection abort */
7996 RETRY, /* ECONNRESET 54 Connection reset by peer */
7997 RETRY, /* ENOBUFS 55 No buffer space available */
7998 RETRY, /* EISCONN 56 Socket is already connected */
7999 RETRY, /* ENOTCONN 57 Socket is not connected */
8000 RETRY, /* ESHUTDOWN 58 Can't send after socket shutdown */
8001 RETRY, /* ETOOMANYREFS 59 Too many references: can't splice */
8002 RETRY, /* ETIMEDOUT 60 Operation timed out */
8003 RETRY, /* ECONNREFUSED 61 Connection refused */
8004
8005 DUMPANDLOG, /* ELOOP 62 Too many levels of symbolic links */
8006 DUMP, /* ENAMETOOLONG 63 File name too long */
8007 RETRY, /* EHOSTDOWN 64 Host is down */
8008 RETRY, /* EHOSTUNREACH 65 No route to host */
8009 DUMP, /* ENOTEMPTY 66 Directory not empty */
8010 /* quotas & mush */
8011 DUMPANDLOG, /* PROCLIM 67 Too many processes */
8012 DUMPANDLOG, /* EUSERS 68 Too many users */
8013 DUMPANDLOG, /* EDQUOT 69 Disc quota exceeded */
8014 /* Network File System */
8015 DUMP, /* ESTALE 70 Stale NFS file handle */
8016 DUMP, /* EREMOTE 71 Too many levels of remote in path */
8017 DUMPANDLOG, /* EBADRPC 72 RPC struct is bad */
8018 DUMPANDLOG, /* ERPCMISMATCH 73 RPC version wrong */
8019 DUMPANDLOG, /* EPROGUNAVAIL 74 RPC prog. not avail */
8020 DUMPANDLOG, /* EPROGMISMATCH 75 Program version wrong */
8021 DUMPANDLOG, /* EPROCUNAVAIL 76 Bad procedure for program */
8022
8023 DUMPANDLOG, /* ENOLCK 77 No locks available */
8024 DUMPANDLOG, /* ENOSYS 78 Function not implemented */
8025 DUMPANDLOG, /* EFTYPE 79 Inappropriate file type or format */
8026 DUMPANDLOG, /* EAUTH 80 Authentication error */
8027 DUMPANDLOG, /* ENEEDAUTH 81 Need authenticator */
8028 /* Intelligent device errors */
8029 DUMPANDLOG, /* EPWROFF 82 Device power is off */
8030 DUMPANDLOG, /* EDEVERR 83 Device error, e.g. paper out */
8031 DUMPANDLOG, /* EOVERFLOW 84 Value too large to be stored in data type */
8032 /* Program loading errors */
8033 DUMPANDLOG, /* EBADEXEC 85 Bad executable */
8034 DUMPANDLOG, /* EBADARCH 86 Bad CPU type in executable */
8035 DUMPANDLOG, /* ESHLIBVERS 87 Shared library version mismatch */
8036 DUMPANDLOG, /* EBADMACHO 88 Malformed Macho file */
8037};
8038
8039char
8040nfs_pageouterrorhandler(int error)
8041{
8042 if (error > NFS_ELAST) {
8043 return DUMP;
8044 } else {
8045 return errortooutcome[error];
8046 }
8047}
8048
8049
8050/*
8051 * vnode OP for pageout using UPL
8052 *
8053 * No buffer I/O, just RPCs straight from the mapped pages.
8054 * File size changes are not permitted in pageout.
8055 */
8056int
8057nfs_vnop_pageout(
8058 struct vnop_pageout_args /* {
8059 * struct vnodeop_desc *a_desc;
8060 * vnode_t a_vp;
8061 * upl_t a_pl;
8062 * vm_offset_t a_pl_offset;
8063 * off_t a_f_offset;
8064 * size_t a_size;
8065 * int a_flags;
8066 * vfs_context_t a_context;
8067 * } */*ap)
8068{
8069 vnode_t vp = ap->a_vp;
8070 upl_t pl = ap->a_pl;
8071 size_t size = ap->a_size;
8072 off_t f_offset = ap->a_f_offset;
8073 vm_offset_t pl_offset = ap->a_pl_offset;
8074 int flags = ap->a_flags;
8075 nfsnode_t np = VTONFS(vp);
8076 thread_t thd;
8077 kauth_cred_t cred;
8078 struct nfsbuf *bp;
8079 struct nfsmount *nmp = VTONMP(vp);
8080 daddr64_t lbn;
8081 int error = 0, iomode;
8082 off_t off, txoffset, rxoffset;
8083 vm_offset_t ioaddr, txaddr, rxaddr;
8084 uio_t auio;
8085 char uio_buf[UIO_SIZEOF(1)];
8086 int nofreeupl = flags & UPL_NOCOMMIT;
8087 size_t nmwsize, biosize, iosize, pgsize, txsize, rxsize, xsize, remsize;
8088 struct nfsreq *req[MAXPAGINGREQS];
8089 int nextsend, nextwait, wverfset, commit;
8090 uint64_t wverf, wverf2;
8091#if CONFIG_NFS4
8092 uint32_t stategenid = 0;
8093#endif
8094 uint32_t vrestart = 0, restart = 0, vrestarts = 0, restarts = 0;
8095 kern_return_t kret;
8096
8097 FSDBG(323, f_offset, size, pl, pl_offset);
8098
8099 if (pl == (upl_t)NULL) {
8100 panic("nfs_pageout: no upl");
8101 }
8102
8103 if (size <= 0) {
8104 printf("nfs_pageout: invalid size %ld", size);
8105 if (!nofreeupl) {
8106 ubc_upl_abort_range(pl, pl_offset, size, 0);
8107 }
8108 return EINVAL;
8109 }
8110
8111 if (!nmp) {
8112 if (!nofreeupl) {
8113 ubc_upl_abort(pl, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
8114 }
8115 return ENXIO;
8116 }
8117 biosize = nmp->nm_biosize;
8118 nmwsize = nmp->nm_wsize;
8119
8120 nfs_data_lock_noupdate(np, NFS_DATA_LOCK_SHARED);
8121
8122 /*
8123 * Check to see whether the buffer is incore.
8124 * If incore and not busy, invalidate it from the cache.
8125 */
8126 for (iosize = 0; iosize < size; iosize += xsize) {
8127 off = f_offset + iosize;
8128 /* need make sure we do things on block boundaries */
8129 xsize = biosize - (off % biosize);
8130 if (off + xsize > f_offset + size) {
8131 xsize = f_offset + size - off;
8132 }
8133 lbn = (daddr64_t)(off / biosize);
8134 lck_mtx_lock(nfs_buf_mutex);
8135 if ((bp = nfs_buf_incore(np, lbn))) {
8136 FSDBG(323, off, bp, bp->nb_lflags, bp->nb_flags);
8137 if (nfs_buf_acquire(bp, NBAC_NOWAIT, 0, 0)) {
8138 lck_mtx_unlock(nfs_buf_mutex);
8139 nfs_data_unlock_noupdate(np);
8140 /* no panic. just tell vm we are busy */
8141 if (!nofreeupl) {
8142 ubc_upl_abort_range(pl, pl_offset, size, 0);
8143 }
8144 return EBUSY;
8145 }
8146 if (bp->nb_dirtyend > 0) {
8147 /*
8148 * if there's a dirty range in the buffer, check
8149 * to see if it extends beyond the pageout region
8150 *
8151 * if the dirty region lies completely within the
8152 * pageout region, we just invalidate the buffer
8153 * because it's all being written out now anyway.
8154 *
8155 * if any of the dirty region lies outside the
8156 * pageout region, we'll try to clip the dirty
8157 * region to eliminate the portion that's being
8158 * paged out. If that's not possible, because
8159 * the dirty region extends before and after the
8160 * pageout region, then we'll just return EBUSY.
8161 */
8162 off_t boff, start, end;
8163 boff = NBOFF(bp);
8164 start = off;
8165 end = off + xsize;
8166 /* clip end to EOF */
8167 if (end > (off_t)np->n_size) {
8168 end = np->n_size;
8169 }
8170 start -= boff;
8171 end -= boff;
8172 if ((bp->nb_dirtyoff < start) &&
8173 (bp->nb_dirtyend > end)) {
8174 /*
8175 * not gonna be able to clip the dirty region
8176 *
8177 * But before returning the bad news, move the
8178 * buffer to the start of the delwri list and
8179 * give the list a push to try to flush the
8180 * buffer out.
8181 */
8182 FSDBG(323, np, bp, 0xd00deebc, EBUSY);
8183 nfs_buf_remfree(bp);
8184 TAILQ_INSERT_HEAD(&nfsbufdelwri, bp, nb_free);
8185 nfsbufdelwricnt++;
8186 nfs_buf_drop(bp);
8187 nfs_buf_delwri_push(1);
8188 lck_mtx_unlock(nfs_buf_mutex);
8189 nfs_data_unlock_noupdate(np);
8190 if (!nofreeupl) {
8191 ubc_upl_abort_range(pl, pl_offset, size, 0);
8192 }
8193 return EBUSY;
8194 }
8195 if ((bp->nb_dirtyoff < start) ||
8196 (bp->nb_dirtyend > end)) {
8197 /* clip dirty region, if necessary */
8198 if (bp->nb_dirtyoff < start) {
8199 bp->nb_dirtyend = min(bp->nb_dirtyend, start);
8200 }
8201 if (bp->nb_dirtyend > end) {
8202 bp->nb_dirtyoff = max(bp->nb_dirtyoff, end);
8203 }
8204 FSDBG(323, bp, bp->nb_dirtyoff, bp->nb_dirtyend, 0xd00dee00);
8205 /* we're leaving this block dirty */
8206 nfs_buf_drop(bp);
8207 lck_mtx_unlock(nfs_buf_mutex);
8208 continue;
8209 }
8210 }
8211 nfs_buf_remfree(bp);
8212 lck_mtx_unlock(nfs_buf_mutex);
8213 SET(bp->nb_flags, NB_INVAL);
8214 nfs_node_lock_force(np);
8215 if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
8216 CLR(bp->nb_flags, NB_NEEDCOMMIT);
8217 np->n_needcommitcnt--;
8218 CHECK_NEEDCOMMITCNT(np);
8219 }
8220 nfs_node_unlock(np);
8221 nfs_buf_release(bp, 1);
8222 } else {
8223 lck_mtx_unlock(nfs_buf_mutex);
8224 }
8225 }
8226
8227 thd = vfs_context_thread(ap->a_context);
8228 cred = ubc_getcred(vp);
8229 if (!IS_VALID_CRED(cred)) {
8230 cred = vfs_context_ucred(ap->a_context);
8231 }
8232
8233 nfs_node_lock_force(np);
8234 if (np->n_flag & NWRITEERR) {
8235 error = np->n_error;
8236 nfs_node_unlock(np);
8237 nfs_data_unlock_noupdate(np);
8238 if (!nofreeupl) {
8239 ubc_upl_abort_range(pl, pl_offset, size,
8240 UPL_ABORT_FREE_ON_EMPTY);
8241 }
8242 return error;
8243 }
8244 nfs_node_unlock(np);
8245
8246 if (f_offset < 0 || f_offset >= (off_t)np->n_size ||
8247 f_offset & PAGE_MASK_64 || size & PAGE_MASK_64) {
8248 nfs_data_unlock_noupdate(np);
8249 if (!nofreeupl) {
8250 ubc_upl_abort_range(pl, pl_offset, size,
8251 UPL_ABORT_FREE_ON_EMPTY);
8252 }
8253 return EINVAL;
8254 }
8255
8256 kret = ubc_upl_map(pl, &ioaddr);
8257 if (kret != KERN_SUCCESS) {
8258 panic("nfs_vnop_pageout: ubc_upl_map() failed with (%d)", kret);
8259 }
8260 ioaddr += pl_offset;
8261
8262 if ((u_quad_t)f_offset + size > np->n_size) {
8263 xsize = np->n_size - f_offset;
8264 } else {
8265 xsize = size;
8266 }
8267
8268 pgsize = round_page_64(xsize);
8269 if ((size > pgsize) && !nofreeupl) {
8270 ubc_upl_abort_range(pl, pl_offset + pgsize, size - pgsize,
8271 UPL_ABORT_FREE_ON_EMPTY);
8272 }
8273
8274 /*
8275 * check for partial page and clear the
8276 * contents past end of the file before
8277 * releasing it in the VM page cache
8278 */
8279 if ((u_quad_t)f_offset < np->n_size && (u_quad_t)f_offset + size > np->n_size) {
8280 size_t io = np->n_size - f_offset;
8281 bzero((caddr_t)(ioaddr + io), size - io);
8282 FSDBG(321, np->n_size, f_offset, f_offset + io, size - io);
8283 }
8284 nfs_data_unlock_noupdate(np);
8285
8286 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE,
8287 &uio_buf, sizeof(uio_buf));
8288
8289tryagain:
8290#if CONFIG_NFS4
8291 if (nmp->nm_vers >= NFS_VER4) {
8292 stategenid = nmp->nm_stategenid;
8293 }
8294#endif
8295 wverf = wverf2 = wverfset = 0;
8296 txsize = rxsize = xsize;
8297 txoffset = rxoffset = f_offset;
8298 txaddr = rxaddr = ioaddr;
8299 commit = NFS_WRITE_FILESYNC;
8300
8301 bzero(req, sizeof(req));
8302 nextsend = nextwait = 0;
8303 do {
8304 if (np->n_flag & NREVOKE) {
8305 error = EIO;
8306 break;
8307 }
8308 /* send requests while we need to and have available slots */
8309 while ((txsize > 0) && (req[nextsend] == NULL)) {
8310 iosize = MIN(nmwsize, txsize);
8311 uio_reset(auio, txoffset, UIO_SYSSPACE, UIO_WRITE);
8312 uio_addiov(auio, CAST_USER_ADDR_T(txaddr), iosize);
8313 FSDBG(323, uio_offset(auio), iosize, txaddr, txsize);
8314 OSAddAtomic64(1, &nfsstats.pageouts);
8315 nfs_node_lock_force(np);
8316 np->n_numoutput++;
8317 nfs_node_unlock(np);
8318 vnode_startwrite(vp);
8319 iomode = NFS_WRITE_UNSTABLE;
8320 if ((error = nmp->nm_funcs->nf_write_rpc_async(np, auio, iosize, thd, cred, iomode, NULL, &req[nextsend]))) {
8321 req[nextsend] = NULL;
8322 vnode_writedone(vp);
8323 nfs_node_lock_force(np);
8324 np->n_numoutput--;
8325 nfs_node_unlock(np);
8326 break;
8327 }
8328 txaddr += iosize;
8329 txoffset += iosize;
8330 txsize -= iosize;
8331 nextsend = (nextsend + 1) % MAXPAGINGREQS;
8332 }
8333 /* wait while we need to and break out if more requests to send */
8334 while ((rxsize > 0) && req[nextwait]) {
8335 iosize = remsize = MIN(nmwsize, rxsize);
8336 error = nmp->nm_funcs->nf_write_rpc_async_finish(np, req[nextwait], &iomode, &iosize, &wverf2);
8337 req[nextwait] = NULL;
8338 nextwait = (nextwait + 1) % MAXPAGINGREQS;
8339 vnode_writedone(vp);
8340 nfs_node_lock_force(np);
8341 np->n_numoutput--;
8342 nfs_node_unlock(np);
8343#if CONFIG_NFS4
8344 if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error)) {
8345 lck_mtx_lock(&nmp->nm_lock);
8346 if ((error != NFSERR_GRACE) && (stategenid == nmp->nm_stategenid)) {
8347 NP(np, "nfs_vnop_pageout: error %d, initiating recovery", error);
8348 nfs_need_recover(nmp, error);
8349 }
8350 lck_mtx_unlock(&nmp->nm_lock);
8351 restart = 1;
8352 goto cancel;
8353 }
8354#endif
8355 if (error) {
8356 FSDBG(323, rxoffset, rxsize, error, -1);
8357 break;
8358 }
8359 if (!wverfset) {
8360 wverf = wverf2;
8361 wverfset = 1;
8362 } else if (wverf != wverf2) {
8363 /* verifier changed, so we need to restart all the writes */
8364 vrestart = 1;
8365 goto cancel;
8366 }
8367 /* Retain the lowest commitment level returned. */
8368 if (iomode < commit) {
8369 commit = iomode;
8370 }
8371 rxaddr += iosize;
8372 rxoffset += iosize;
8373 rxsize -= iosize;
8374 remsize -= iosize;
8375 if (remsize > 0) {
8376 /* need to try sending the remainder */
8377 iosize = remsize;
8378 uio_reset(auio, rxoffset, UIO_SYSSPACE, UIO_WRITE);
8379 uio_addiov(auio, CAST_USER_ADDR_T(rxaddr), remsize);
8380 iomode = NFS_WRITE_UNSTABLE;
8381 error = nfs_write_rpc2(np, auio, thd, cred, &iomode, &wverf2);
8382#if CONFIG_NFS4
8383 if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error)) {
8384 NP(np, "nfs_vnop_pageout: restart: error %d", error);
8385 lck_mtx_lock(&nmp->nm_lock);
8386 if ((error != NFSERR_GRACE) && (stategenid == nmp->nm_stategenid)) {
8387 NP(np, "nfs_vnop_pageout: error %d, initiating recovery", error);
8388 nfs_need_recover(nmp, error);
8389 }
8390 lck_mtx_unlock(&nmp->nm_lock);
8391 restart = 1;
8392 goto cancel;
8393 }
8394#endif
8395 if (error) {
8396 FSDBG(323, rxoffset, rxsize, error, -1);
8397 break;
8398 }
8399 if (wverf != wverf2) {
8400 /* verifier changed, so we need to restart all the writes */
8401 vrestart = 1;
8402 goto cancel;
8403 }
8404 if (iomode < commit) {
8405 commit = iomode;
8406 }
8407 rxaddr += iosize;
8408 rxoffset += iosize;
8409 rxsize -= iosize;
8410 }
8411 if (txsize) {
8412 break;
8413 }
8414 }
8415 } while (!error && (txsize || rxsize));
8416
8417 vrestart = 0;
8418
8419 if (!error && (commit != NFS_WRITE_FILESYNC)) {
8420 error = nmp->nm_funcs->nf_commit_rpc(np, f_offset, xsize, cred, wverf);
8421 if (error == NFSERR_STALEWRITEVERF) {
8422 vrestart = 1;
8423 error = EIO;
8424 }
8425 }
8426
8427 if (error) {
8428cancel:
8429 /* cancel any outstanding requests */
8430 while (req[nextwait]) {
8431 nfs_request_async_cancel(req[nextwait]);
8432 req[nextwait] = NULL;
8433 nextwait = (nextwait + 1) % MAXPAGINGREQS;
8434 vnode_writedone(vp);
8435 nfs_node_lock_force(np);
8436 np->n_numoutput--;
8437 nfs_node_unlock(np);
8438 }
8439 if (np->n_flag & NREVOKE) {
8440 error = EIO;
8441 } else {
8442 if (vrestart) {
8443 if (++vrestarts <= 100) { /* guard against no progress */
8444 goto tryagain;
8445 }
8446 NP(np, "nfs_pageout: too many restarts, aborting");
8447 FSDBG(323, f_offset, xsize, ERESTART, -1);
8448 }
8449 if (restart) {
8450 if (restarts <= nfs_mount_state_max_restarts(nmp)) { /* guard against no progress */
8451 if (error == NFSERR_GRACE) {
8452 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
8453 }
8454 if (!(error = nfs_mount_state_wait_for_recovery(nmp))) {
8455 goto tryagain;
8456 }
8457 } else {
8458 NP(np, "nfs_pageout: too many restarts, aborting");
8459 FSDBG(323, f_offset, xsize, ERESTART, -1);
8460 }
8461 }
8462 }
8463 }
8464
8465 ubc_upl_unmap(pl);
8466
8467 /*
8468 * We've had several different solutions on what to do when the pageout
8469 * gets an error. If we don't handle it, and return an error to the
8470 * caller, vm, it will retry . This can end in endless looping
8471 * between vm and here doing retries of the same page. Doing a dump
8472 * back to vm, will get it out of vm's knowledge and we lose whatever
8473 * data existed. This is risky, but in some cases necessary. For
8474 * example, the initial fix here was to do that for ESTALE. In that case
8475 * the server is telling us that the file is no longer the same. We
8476 * would not want to keep paging out to that. We also saw some 151
8477 * errors from Auspex server and NFSv3 can return errors higher than
8478 * ELAST. Those along with NFS known server errors we will "dump" from
8479 * vm. Errors we don't expect to occur, we dump and log for further
8480 * analysis. Errors that could be transient, networking ones,
8481 * we let vm "retry". Lastly, errors that we retry, but may have potential
8482 * to storm the network, we "retrywithsleep". "sever" will be used in
8483 * in the future to dump all pages of object for cases like ESTALE.
8484 * All this is the basis for the states returned and first guesses on
8485 * error handling. Tweaking expected as more statistics are gathered.
8486 * Note, in the long run we may need another more robust solution to
8487 * have some kind of persistant store when the vm cannot dump nor keep
8488 * retrying as a solution, but this would be a file architectural change
8489 */
8490 if (!nofreeupl) { /* otherwise stacked file system has to handle this */
8491 if (error) {
8492 int abortflags = 0;
8493 char action = nfs_pageouterrorhandler(error);
8494
8495 switch (action) {
8496 case DUMP:
8497 abortflags = UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY;
8498 break;
8499 case DUMPANDLOG:
8500 abortflags = UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY;
8501 if (error <= NFS_ELAST) {
8502 if ((errorcount[error] % 100) == 0) {
8503 NP(np, "nfs_pageout: unexpected error %d. dumping vm page", error);
8504 }
8505 errorcount[error]++;
8506 }
8507 break;
8508 case RETRY:
8509 abortflags = UPL_ABORT_FREE_ON_EMPTY;
8510 break;
8511 case SEVER: /* not implemented */
8512 default:
8513 NP(np, "nfs_pageout: action %d not expected", action);
8514 break;
8515 }
8516
8517 ubc_upl_abort_range(pl, pl_offset, pgsize, abortflags);
8518 /* return error in all cases above */
8519 } else {
8520 ubc_upl_commit_range(pl, pl_offset, pgsize,
8521 UPL_COMMIT_CLEAR_DIRTY |
8522 UPL_COMMIT_FREE_ON_EMPTY);
8523 }
8524 }
8525 return error;
8526}
8527
8528/* Blktooff derives file offset given a logical block number */
8529int
8530nfs_vnop_blktooff(
8531 struct vnop_blktooff_args /* {
8532 * struct vnodeop_desc *a_desc;
8533 * vnode_t a_vp;
8534 * daddr64_t a_lblkno;
8535 * off_t *a_offset;
8536 * } */*ap)
8537{
8538 int biosize;
8539 vnode_t vp = ap->a_vp;
8540 struct nfsmount *nmp = VTONMP(vp);
8541
8542 if (nfs_mount_gone(nmp)) {
8543 return ENXIO;
8544 }
8545 biosize = nmp->nm_biosize;
8546
8547 *ap->a_offset = (off_t)(ap->a_lblkno * biosize);
8548
8549 return 0;
8550}
8551
8552int
8553nfs_vnop_offtoblk(
8554 struct vnop_offtoblk_args /* {
8555 * struct vnodeop_desc *a_desc;
8556 * vnode_t a_vp;
8557 * off_t a_offset;
8558 * daddr64_t *a_lblkno;
8559 * } */*ap)
8560{
8561 int biosize;
8562 vnode_t vp = ap->a_vp;
8563 struct nfsmount *nmp = VTONMP(vp);
8564
8565 if (nfs_mount_gone(nmp)) {
8566 return ENXIO;
8567 }
8568 biosize = nmp->nm_biosize;
8569
8570 *ap->a_lblkno = (daddr64_t)(ap->a_offset / biosize);
8571
8572 return 0;
8573}
8574
8575/*
8576 * vnode change monitoring
8577 */
8578int
8579nfs_vnop_monitor(
8580 struct vnop_monitor_args /* {
8581 * struct vnodeop_desc *a_desc;
8582 * vnode_t a_vp;
8583 * uint32_t a_events;
8584 * uint32_t a_flags;
8585 * void *a_handle;
8586 * vfs_context_t a_context;
8587 * } */*ap)
8588{
8589 nfsnode_t np = VTONFS(ap->a_vp);
8590 struct nfsmount *nmp = VTONMP(ap->a_vp);
8591 int error = 0;
8592
8593 if (nfs_mount_gone(nmp)) {
8594 return ENXIO;
8595 }
8596
8597 /* make sure that the vnode's monitoring status is up to date */
8598 lck_mtx_lock(&nmp->nm_lock);
8599 if (vnode_ismonitored(ap->a_vp)) {
8600 /* This vnode is currently being monitored, make sure we're tracking it. */
8601 if (np->n_monlink.le_next == NFSNOLIST) {
8602 LIST_INSERT_HEAD(&nmp->nm_monlist, np, n_monlink);
8603 nfs_mount_sock_thread_wake(nmp);
8604 }
8605 } else {
8606 /* This vnode is no longer being monitored, make sure we're not tracking it. */
8607 /* Wait for any in-progress getattr to complete first. */
8608 while (np->n_mflag & NMMONSCANINPROG) {
8609 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
8610 np->n_mflag |= NMMONSCANWANT;
8611 msleep(&np->n_mflag, &nmp->nm_lock, PZERO - 1, "nfswaitmonscan", &ts);
8612 }
8613 if (np->n_monlink.le_next != NFSNOLIST) {
8614 LIST_REMOVE(np, n_monlink);
8615 np->n_monlink.le_next = NFSNOLIST;
8616 }
8617 }
8618 lck_mtx_unlock(&nmp->nm_lock);
8619
8620 return error;
8621}
8622
8623/*
8624 * Send a vnode notification for the given events.
8625 */
8626void
8627nfs_vnode_notify(nfsnode_t np, uint32_t events)
8628{
8629 struct nfsmount *nmp = NFSTONMP(np);
8630 struct nfs_vattr nvattr;
8631 struct vnode_attr vattr, *vap = NULL;
8632 struct timeval now;
8633
8634 microuptime(&now);
8635 if ((np->n_evtstamp == now.tv_sec) || !nmp) {
8636 /* delay sending this notify */
8637 np->n_events |= events;
8638 return;
8639 }
8640 events |= np->n_events;
8641 np->n_events = 0;
8642 np->n_evtstamp = now.tv_sec;
8643
8644 vfs_get_notify_attributes(&vattr);
8645 if (!nfs_getattrcache(np, &nvattr, 0)) {
8646 vap = &vattr;
8647 VATTR_INIT(vap);
8648
8649 VATTR_RETURN(vap, va_fsid, vfs_statfs(nmp->nm_mountp)->f_fsid.val[0]);
8650 VATTR_RETURN(vap, va_fileid, nvattr.nva_fileid);
8651 VATTR_RETURN(vap, va_mode, nvattr.nva_mode);
8652 VATTR_RETURN(vap, va_uid, nvattr.nva_uid);
8653 VATTR_RETURN(vap, va_gid, nvattr.nva_gid);
8654 VATTR_RETURN(vap, va_nlink, nvattr.nva_nlink);
8655 }
8656 vnode_notify(NFSTOV(np), events, vap);
8657}
8658
8659#endif /* CONFIG_NFS_CLIENT */