]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/nfs/nfs_vfsops.c
xnu-3248.60.10.tar.gz
[apple/xnu.git] / bsd / nfs / nfs_vfsops.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*
30 * Copyright (c) 1989, 1993, 1995
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)nfs_vfsops.c 8.12 (Berkeley) 5/20/95
65 * FreeBSD-Id: nfs_vfsops.c,v 1.52 1997/11/12 05:42:21 julian Exp $
66 */
67/*
68 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
69 * support for mandatory and extensible security protections. This notice
70 * is included in support of clause 2.2 (b) of the Apple Public License,
71 * Version 2.0.
72 */
73
74#include <sys/param.h>
75#include <sys/systm.h>
76#include <sys/conf.h>
77#include <sys/ioctl.h>
78#include <sys/signal.h>
79#include <sys/proc_internal.h> /* for fs rooting to update rootdir in fdp */
80#include <sys/kauth.h>
81#include <sys/vnode_internal.h>
82#include <sys/malloc.h>
83#include <sys/kernel.h>
84#include <sys/sysctl.h>
85#include <sys/mount_internal.h>
86#include <sys/kpi_mbuf.h>
87#include <sys/socket.h>
88#include <sys/socketvar.h>
89#include <sys/fcntl.h>
90#include <sys/quota.h>
91#include <sys/priv.h>
92#include <libkern/OSAtomic.h>
93
94#include <sys/vm.h>
95#include <sys/vmparam.h>
96
97#if !defined(NO_MOUNT_PRIVATE)
98#include <sys/filedesc.h>
99#endif /* NO_MOUNT_PRIVATE */
100
101#include <net/if.h>
102#include <net/route.h>
103#include <netinet/in.h>
104
105#include <nfs/rpcv2.h>
106#include <nfs/krpc.h>
107#include <nfs/nfsproto.h>
108#include <nfs/nfs.h>
109#include <nfs/nfsnode.h>
110#include <nfs/nfs_gss.h>
111#include <nfs/nfsmount.h>
112#include <nfs/xdr_subs.h>
113#include <nfs/nfsm_subs.h>
114#include <nfs/nfsdiskless.h>
115#include <nfs/nfs_lock.h>
116#if CONFIG_MACF
117#include <security/mac_framework.h>
118#endif
119
120#include <pexpert/pexpert.h>
121
122#define NFS_VFS_DBG(...) NFS_DBG(NFS_FAC_VFS, 7, ## __VA_ARGS__)
123
124/*
125 * NFS client globals
126 */
127
128int nfs_ticks;
129static lck_grp_t *nfs_global_grp, *nfs_mount_grp;
130lck_mtx_t *nfs_global_mutex;
131uint32_t nfs_fs_attr_bitmap[NFS_ATTR_BITMAP_LEN];
132uint32_t nfs_object_attr_bitmap[NFS_ATTR_BITMAP_LEN];
133uint32_t nfs_getattr_bitmap[NFS_ATTR_BITMAP_LEN];
134struct nfsclientidlist nfsclientids;
135
136/* NFS requests */
137struct nfs_reqqhead nfs_reqq;
138lck_grp_t *nfs_request_grp;
139lck_mtx_t *nfs_request_mutex;
140thread_call_t nfs_request_timer_call;
141int nfs_request_timer_on;
142u_int32_t nfs_xid = 0;
143u_int32_t nfs_xidwrap = 0; /* to build a (non-wrapping) 64 bit xid */
144
145thread_call_t nfs_buf_timer_call;
146
147/* NFSv4 */
148lck_grp_t *nfs_open_grp;
149uint32_t nfs_open_owner_seqnum = 0;
150uint32_t nfs_lock_owner_seqnum = 0;
151thread_call_t nfs4_callback_timer_call;
152int nfs4_callback_timer_on = 0;
153
154/* nfsiod */
155lck_grp_t *nfsiod_lck_grp;
156lck_mtx_t *nfsiod_mutex;
157struct nfsiodlist nfsiodfree, nfsiodwork;
158struct nfsiodmountlist nfsiodmounts;
159int nfsiod_thread_count = 0;
160int nfsiod_thread_max = NFS_DEFASYNCTHREAD;
161int nfs_max_async_writes = NFS_DEFMAXASYNCWRITES;
162
163int nfs_iosize = NFS_IOSIZE;
164int nfs_access_cache_timeout = NFS_MAXATTRTIMO;
165int nfs_access_delete = 1; /* too many servers get this wrong - workaround on by default */
166int nfs_access_dotzfs = 1;
167int nfs_access_for_getattr = 0;
168int nfs_allow_async = 0;
169int nfs_statfs_rate_limit = NFS_DEFSTATFSRATELIMIT;
170int nfs_lockd_mounts = 0;
171int nfs_lockd_request_sent = 0;
172int nfs_idmap_ctrl = NFS_IDMAP_CTRL_USE_IDMAP_SERVICE;
173int nfs_callback_port = 0;
174
175int nfs_tprintf_initial_delay = NFS_TPRINTF_INITIAL_DELAY;
176int nfs_tprintf_delay = NFS_TPRINTF_DELAY;
177
178
179int mountnfs(char *, mount_t, vfs_context_t, vnode_t *);
180static int nfs_mount_diskless(struct nfs_dlmount *, const char *, int, vnode_t *, mount_t *, vfs_context_t);
181#if !defined(NO_MOUNT_PRIVATE)
182static int nfs_mount_diskless_private(struct nfs_dlmount *, const char *, int, vnode_t *, mount_t *, vfs_context_t);
183#endif /* NO_MOUNT_PRIVATE */
184int nfs_mount_connect(struct nfsmount *);
185void nfs_mount_drain_and_cleanup(struct nfsmount *);
186void nfs_mount_cleanup(struct nfsmount *);
187int nfs_mountinfo_assemble(struct nfsmount *, struct xdrbuf *);
188int nfs4_mount_update_path_with_symlink(struct nfsmount *, struct nfs_fs_path *, uint32_t, fhandle_t *, int *, fhandle_t *, vfs_context_t);
189
190/*
191 * NFS VFS operations.
192 */
193int nfs_vfs_mount(mount_t, vnode_t, user_addr_t, vfs_context_t);
194int nfs_vfs_start(mount_t, int, vfs_context_t);
195int nfs_vfs_unmount(mount_t, int, vfs_context_t);
196int nfs_vfs_root(mount_t, vnode_t *, vfs_context_t);
197int nfs_vfs_quotactl(mount_t, int, uid_t, caddr_t, vfs_context_t);
198int nfs_vfs_getattr(mount_t, struct vfs_attr *, vfs_context_t);
199int nfs_vfs_sync(mount_t, int, vfs_context_t);
200int nfs_vfs_vget(mount_t, ino64_t, vnode_t *, vfs_context_t);
201int nfs_vfs_vptofh(vnode_t, int *, unsigned char *, vfs_context_t);
202int nfs_vfs_fhtovp(mount_t, int, unsigned char *, vnode_t *, vfs_context_t);
203int nfs_vfs_init(struct vfsconf *);
204int nfs_vfs_sysctl(int *, u_int, user_addr_t, size_t *, user_addr_t, size_t, vfs_context_t);
205
206struct vfsops nfs_vfsops = {
207 nfs_vfs_mount,
208 nfs_vfs_start,
209 nfs_vfs_unmount,
210 nfs_vfs_root,
211 nfs_vfs_quotactl,
212 nfs_vfs_getattr,
213 nfs_vfs_sync,
214 nfs_vfs_vget,
215 nfs_vfs_fhtovp,
216 nfs_vfs_vptofh,
217 nfs_vfs_init,
218 nfs_vfs_sysctl,
219 NULL, /* setattr */
220 { NULL, /* reserved */
221 NULL, /* reserved */
222 NULL, /* reserved */
223 NULL, /* reserved */
224 NULL, /* reserved */
225 NULL, /* reserved */
226 NULL } /* reserved */
227};
228
229
230/*
231 * version-specific NFS functions
232 */
233int nfs3_mount(struct nfsmount *, vfs_context_t, nfsnode_t *);
234int nfs4_mount(struct nfsmount *, vfs_context_t, nfsnode_t *);
235int nfs3_fsinfo(struct nfsmount *, nfsnode_t, vfs_context_t);
236int nfs3_update_statfs(struct nfsmount *, vfs_context_t);
237int nfs4_update_statfs(struct nfsmount *, vfs_context_t);
238#if !QUOTA
239#define nfs3_getquota NULL
240#define nfs4_getquota NULL
241#else
242int nfs3_getquota(struct nfsmount *, vfs_context_t, uid_t, int, struct dqblk *);
243int nfs4_getquota(struct nfsmount *, vfs_context_t, uid_t, int, struct dqblk *);
244#endif
245
246struct nfs_funcs nfs3_funcs = {
247 nfs3_mount,
248 nfs3_update_statfs,
249 nfs3_getquota,
250 nfs3_access_rpc,
251 nfs3_getattr_rpc,
252 nfs3_setattr_rpc,
253 nfs3_read_rpc_async,
254 nfs3_read_rpc_async_finish,
255 nfs3_readlink_rpc,
256 nfs3_write_rpc_async,
257 nfs3_write_rpc_async_finish,
258 nfs3_commit_rpc,
259 nfs3_lookup_rpc_async,
260 nfs3_lookup_rpc_async_finish,
261 nfs3_remove_rpc,
262 nfs3_rename_rpc,
263 nfs3_setlock_rpc,
264 nfs3_unlock_rpc,
265 nfs3_getlock_rpc
266 };
267struct nfs_funcs nfs4_funcs = {
268 nfs4_mount,
269 nfs4_update_statfs,
270 nfs4_getquota,
271 nfs4_access_rpc,
272 nfs4_getattr_rpc,
273 nfs4_setattr_rpc,
274 nfs4_read_rpc_async,
275 nfs4_read_rpc_async_finish,
276 nfs4_readlink_rpc,
277 nfs4_write_rpc_async,
278 nfs4_write_rpc_async_finish,
279 nfs4_commit_rpc,
280 nfs4_lookup_rpc_async,
281 nfs4_lookup_rpc_async_finish,
282 nfs4_remove_rpc,
283 nfs4_rename_rpc,
284 nfs4_setlock_rpc,
285 nfs4_unlock_rpc,
286 nfs4_getlock_rpc
287 };
288
289/*
290 * Called once to initialize data structures...
291 */
292int
293nfs_vfs_init(__unused struct vfsconf *vfsp)
294{
295 int i;
296
297 /*
298 * Check to see if major data structures haven't bloated.
299 */
300 if (sizeof (struct nfsnode) > NFS_NODEALLOC) {
301 printf("struct nfsnode bloated (> %dbytes)\n", NFS_NODEALLOC);
302 printf("Try reducing NFS_SMALLFH\n");
303 }
304 if (sizeof (struct nfsmount) > NFS_MNTALLOC)
305 printf("struct nfsmount bloated (> %dbytes)\n", NFS_MNTALLOC);
306
307 nfs_ticks = (hz * NFS_TICKINTVL + 500) / 1000;
308 if (nfs_ticks < 1)
309 nfs_ticks = 1;
310
311 /* init async I/O thread pool state */
312 TAILQ_INIT(&nfsiodfree);
313 TAILQ_INIT(&nfsiodwork);
314 TAILQ_INIT(&nfsiodmounts);
315 nfsiod_lck_grp = lck_grp_alloc_init("nfsiod", LCK_GRP_ATTR_NULL);
316 nfsiod_mutex = lck_mtx_alloc_init(nfsiod_lck_grp, LCK_ATTR_NULL);
317
318 /* init lock groups, etc. */
319 nfs_mount_grp = lck_grp_alloc_init("nfs_mount", LCK_GRP_ATTR_NULL);
320 nfs_open_grp = lck_grp_alloc_init("nfs_open", LCK_GRP_ATTR_NULL);
321 nfs_global_grp = lck_grp_alloc_init("nfs_global", LCK_GRP_ATTR_NULL);
322
323 nfs_global_mutex = lck_mtx_alloc_init(nfs_global_grp, LCK_ATTR_NULL);
324
325 /* init request list mutex */
326 nfs_request_grp = lck_grp_alloc_init("nfs_request", LCK_GRP_ATTR_NULL);
327 nfs_request_mutex = lck_mtx_alloc_init(nfs_request_grp, LCK_ATTR_NULL);
328
329 /* initialize NFS request list */
330 TAILQ_INIT(&nfs_reqq);
331
332 nfs_nbinit(); /* Init the nfsbuf table */
333 nfs_nhinit(); /* Init the nfsnode table */
334 nfs_lockinit(); /* Init the nfs lock state */
335 nfs_gss_init(); /* Init RPCSEC_GSS security */
336
337 /* NFSv4 stuff */
338 NFS4_PER_FS_ATTRIBUTES(nfs_fs_attr_bitmap);
339 NFS4_PER_OBJECT_ATTRIBUTES(nfs_object_attr_bitmap);
340 NFS4_DEFAULT_ATTRIBUTES(nfs_getattr_bitmap);
341 for (i=0; i < NFS_ATTR_BITMAP_LEN; i++)
342 nfs_getattr_bitmap[i] &= nfs_object_attr_bitmap[i];
343 TAILQ_INIT(&nfsclientids);
344
345 /* initialize NFS timer callouts */
346 nfs_request_timer_call = thread_call_allocate(nfs_request_timer, NULL);
347 nfs_buf_timer_call = thread_call_allocate(nfs_buf_timer, NULL);
348 nfs4_callback_timer_call = thread_call_allocate(nfs4_callback_timer, NULL);
349
350 return (0);
351}
352
353/*
354 * nfs statfs call
355 */
356int
357nfs3_update_statfs(struct nfsmount *nmp, vfs_context_t ctx)
358{
359 nfsnode_t np;
360 int error = 0, lockerror, status, nfsvers;
361 u_int64_t xid;
362 struct nfsm_chain nmreq, nmrep;
363 uint32_t val = 0;
364
365 nfsvers = nmp->nm_vers;
366 np = nmp->nm_dnp;
367 if (!np)
368 return (ENXIO);
369 if ((error = vnode_get(NFSTOV(np))))
370 return (error);
371
372 nfsm_chain_null(&nmreq);
373 nfsm_chain_null(&nmrep);
374
375 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_FH(nfsvers));
376 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
377 nfsm_chain_build_done(error, &nmreq);
378 nfsmout_if(error);
379 error = nfs_request2(np, NULL, &nmreq, NFSPROC_FSSTAT, vfs_context_thread(ctx),
380 vfs_context_ucred(ctx), NULL, R_SOFT, &nmrep, &xid, &status);
381 if (error == ETIMEDOUT)
382 goto nfsmout;
383 if ((lockerror = nfs_node_lock(np)))
384 error = lockerror;
385 if (nfsvers == NFS_VER3)
386 nfsm_chain_postop_attr_update(error, &nmrep, np, &xid);
387 if (!lockerror)
388 nfs_node_unlock(np);
389 if (!error)
390 error = status;
391 nfsm_assert(error, NFSTONMP(np), ENXIO);
392 nfsmout_if(error);
393 lck_mtx_lock(&nmp->nm_lock);
394 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_TOTAL);
395 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_FREE);
396 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_AVAIL);
397 if (nfsvers == NFS_VER3) {
398 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_FILES_AVAIL);
399 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_FILES_TOTAL);
400 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_FILES_FREE);
401 nmp->nm_fsattr.nfsa_bsize = NFS_FABLKSIZE;
402 nfsm_chain_get_64(error, &nmrep, nmp->nm_fsattr.nfsa_space_total);
403 nfsm_chain_get_64(error, &nmrep, nmp->nm_fsattr.nfsa_space_free);
404 nfsm_chain_get_64(error, &nmrep, nmp->nm_fsattr.nfsa_space_avail);
405 nfsm_chain_get_64(error, &nmrep, nmp->nm_fsattr.nfsa_files_total);
406 nfsm_chain_get_64(error, &nmrep, nmp->nm_fsattr.nfsa_files_free);
407 nfsm_chain_get_64(error, &nmrep, nmp->nm_fsattr.nfsa_files_avail);
408 // skip invarsec
409 } else {
410 nfsm_chain_adv(error, &nmrep, NFSX_UNSIGNED); // skip tsize?
411 nfsm_chain_get_32(error, &nmrep, nmp->nm_fsattr.nfsa_bsize);
412 nfsm_chain_get_32(error, &nmrep, val);
413 nfsmout_if(error);
414 if (nmp->nm_fsattr.nfsa_bsize <= 0)
415 nmp->nm_fsattr.nfsa_bsize = NFS_FABLKSIZE;
416 nmp->nm_fsattr.nfsa_space_total = (uint64_t)val * nmp->nm_fsattr.nfsa_bsize;
417 nfsm_chain_get_32(error, &nmrep, val);
418 nfsmout_if(error);
419 nmp->nm_fsattr.nfsa_space_free = (uint64_t)val * nmp->nm_fsattr.nfsa_bsize;
420 nfsm_chain_get_32(error, &nmrep, val);
421 nfsmout_if(error);
422 nmp->nm_fsattr.nfsa_space_avail = (uint64_t)val * nmp->nm_fsattr.nfsa_bsize;
423 }
424 lck_mtx_unlock(&nmp->nm_lock);
425nfsmout:
426 nfsm_chain_cleanup(&nmreq);
427 nfsm_chain_cleanup(&nmrep);
428 vnode_put(NFSTOV(np));
429 return (error);
430}
431
432int
433nfs4_update_statfs(struct nfsmount *nmp, vfs_context_t ctx)
434{
435 nfsnode_t np;
436 int error = 0, lockerror, status, nfsvers, numops;
437 u_int64_t xid;
438 struct nfsm_chain nmreq, nmrep;
439 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
440 struct nfs_vattr nvattr;
441 struct nfsreq_secinfo_args si;
442
443 nfsvers = nmp->nm_vers;
444 np = nmp->nm_dnp;
445 if (!np)
446 return (ENXIO);
447 if ((error = vnode_get(NFSTOV(np))))
448 return (error);
449
450 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
451 NVATTR_INIT(&nvattr);
452 nfsm_chain_null(&nmreq);
453 nfsm_chain_null(&nmrep);
454
455 // PUTFH + GETATTR
456 numops = 2;
457 nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
458 nfsm_chain_add_compound_header(error, &nmreq, "statfs", nmp->nm_minor_vers, numops);
459 numops--;
460 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
461 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
462 numops--;
463 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
464 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
465 NFS4_STATFS_ATTRIBUTES(bitmap);
466 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
467 nfsm_chain_build_done(error, &nmreq);
468 nfsm_assert(error, (numops == 0), EPROTO);
469 nfsmout_if(error);
470 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
471 vfs_context_thread(ctx), vfs_context_ucred(ctx),
472 NULL, R_SOFT, &nmrep, &xid, &status);
473 nfsm_chain_skip_tag(error, &nmrep);
474 nfsm_chain_get_32(error, &nmrep, numops);
475 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
476 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
477 nfsm_assert(error, NFSTONMP(np), ENXIO);
478 nfsmout_if(error);
479 lck_mtx_lock(&nmp->nm_lock);
480 error = nfs4_parsefattr(&nmrep, &nmp->nm_fsattr, &nvattr, NULL, NULL, NULL);
481 lck_mtx_unlock(&nmp->nm_lock);
482 nfsmout_if(error);
483 if ((lockerror = nfs_node_lock(np)))
484 error = lockerror;
485 if (!error)
486 nfs_loadattrcache(np, &nvattr, &xid, 0);
487 if (!lockerror)
488 nfs_node_unlock(np);
489 nfsm_assert(error, NFSTONMP(np), ENXIO);
490 nfsmout_if(error);
491 nmp->nm_fsattr.nfsa_bsize = NFS_FABLKSIZE;
492nfsmout:
493 NVATTR_CLEANUP(&nvattr);
494 nfsm_chain_cleanup(&nmreq);
495 nfsm_chain_cleanup(&nmrep);
496 vnode_put(NFSTOV(np));
497 return (error);
498}
499
500
501/*
502 * The NFS VFS_GETATTR function: "statfs"-type information is retrieved
503 * using the nf_update_statfs() function, and other attributes are cobbled
504 * together from whatever sources we can (getattr, fsinfo, pathconf).
505 */
506int
507nfs_vfs_getattr(mount_t mp, struct vfs_attr *fsap, vfs_context_t ctx)
508{
509 struct nfsmount *nmp;
510 uint32_t bsize;
511 int error = 0, nfsvers;
512
513 nmp = VFSTONFS(mp);
514 if (nfs_mount_gone(nmp))
515 return (ENXIO);
516 nfsvers = nmp->nm_vers;
517
518 if (VFSATTR_IS_ACTIVE(fsap, f_bsize) ||
519 VFSATTR_IS_ACTIVE(fsap, f_iosize) ||
520 VFSATTR_IS_ACTIVE(fsap, f_blocks) ||
521 VFSATTR_IS_ACTIVE(fsap, f_bfree) ||
522 VFSATTR_IS_ACTIVE(fsap, f_bavail) ||
523 VFSATTR_IS_ACTIVE(fsap, f_bused) ||
524 VFSATTR_IS_ACTIVE(fsap, f_files) ||
525 VFSATTR_IS_ACTIVE(fsap, f_ffree)) {
526 int statfsrate = nfs_statfs_rate_limit;
527 int refresh = 1;
528
529 /*
530 * Are we rate-limiting statfs RPCs?
531 * (Treat values less than 1 or greater than 1,000,000 as no limit.)
532 */
533 if ((statfsrate > 0) && (statfsrate < 1000000)) {
534 struct timeval now;
535 uint32_t stamp;
536
537 microuptime(&now);
538 lck_mtx_lock(&nmp->nm_lock);
539 stamp = (now.tv_sec * statfsrate) + (now.tv_usec / (1000000/statfsrate));
540 if (stamp != nmp->nm_fsattrstamp) {
541 refresh = 1;
542 nmp->nm_fsattrstamp = stamp;
543 } else {
544 refresh = 0;
545 }
546 lck_mtx_unlock(&nmp->nm_lock);
547 }
548
549 if (refresh && !nfs_use_cache(nmp))
550 error = nmp->nm_funcs->nf_update_statfs(nmp, ctx);
551 if ((error == ESTALE) || (error == ETIMEDOUT))
552 error = 0;
553 if (error)
554 return (error);
555
556 lck_mtx_lock(&nmp->nm_lock);
557 VFSATTR_RETURN(fsap, f_iosize, nfs_iosize);
558 VFSATTR_RETURN(fsap, f_bsize, nmp->nm_fsattr.nfsa_bsize);
559 bsize = nmp->nm_fsattr.nfsa_bsize;
560 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_TOTAL))
561 VFSATTR_RETURN(fsap, f_blocks, nmp->nm_fsattr.nfsa_space_total / bsize);
562 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_FREE))
563 VFSATTR_RETURN(fsap, f_bfree, nmp->nm_fsattr.nfsa_space_free / bsize);
564 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_AVAIL))
565 VFSATTR_RETURN(fsap, f_bavail, nmp->nm_fsattr.nfsa_space_avail / bsize);
566 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_TOTAL) &&
567 NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_FREE))
568 VFSATTR_RETURN(fsap, f_bused,
569 (nmp->nm_fsattr.nfsa_space_total / bsize) -
570 (nmp->nm_fsattr.nfsa_space_free / bsize));
571 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_FILES_TOTAL))
572 VFSATTR_RETURN(fsap, f_files, nmp->nm_fsattr.nfsa_files_total);
573 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_FILES_FREE))
574 VFSATTR_RETURN(fsap, f_ffree, nmp->nm_fsattr.nfsa_files_free);
575 lck_mtx_unlock(&nmp->nm_lock);
576 }
577
578 if (VFSATTR_IS_ACTIVE(fsap, f_capabilities)) {
579 u_int32_t caps, valid;
580 nfsnode_t np = nmp->nm_dnp;
581
582 nfsm_assert(error, VFSTONFS(mp) && np, ENXIO);
583 if (error)
584 return (error);
585 lck_mtx_lock(&nmp->nm_lock);
586
587 /*
588 * The capabilities[] array defines what this volume supports.
589 *
590 * The valid[] array defines which bits this code understands
591 * the meaning of (whether the volume has that capability or not).
592 * Any zero bits here means "I don't know what you're asking about"
593 * and the caller cannot tell whether that capability is
594 * present or not.
595 */
596 caps = valid = 0;
597 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SYMLINK_SUPPORT)) {
598 valid |= VOL_CAP_FMT_SYMBOLICLINKS;
599 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_SYMLINK)
600 caps |= VOL_CAP_FMT_SYMBOLICLINKS;
601 }
602 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_LINK_SUPPORT)) {
603 valid |= VOL_CAP_FMT_HARDLINKS;
604 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_LINK)
605 caps |= VOL_CAP_FMT_HARDLINKS;
606 }
607 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_CASE_INSENSITIVE)) {
608 valid |= VOL_CAP_FMT_CASE_SENSITIVE;
609 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_CASE_INSENSITIVE))
610 caps |= VOL_CAP_FMT_CASE_SENSITIVE;
611 }
612 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_CASE_PRESERVING)) {
613 valid |= VOL_CAP_FMT_CASE_PRESERVING;
614 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_CASE_PRESERVING)
615 caps |= VOL_CAP_FMT_CASE_PRESERVING;
616 }
617 /* Note: VOL_CAP_FMT_2TB_FILESIZE is actually used to test for "large file support" */
618 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXFILESIZE)) {
619 /* Is server's max file size at least 4GB? */
620 if (nmp->nm_fsattr.nfsa_maxfilesize >= 0x100000000ULL)
621 caps |= VOL_CAP_FMT_2TB_FILESIZE;
622 } else if (nfsvers >= NFS_VER3) {
623 /*
624 * NFSv3 and up supports 64 bits of file size.
625 * So, we'll just assume maxfilesize >= 4GB
626 */
627 caps |= VOL_CAP_FMT_2TB_FILESIZE;
628 }
629 if (nfsvers >= NFS_VER4) {
630 caps |= VOL_CAP_FMT_HIDDEN_FILES;
631 valid |= VOL_CAP_FMT_HIDDEN_FILES;
632 // VOL_CAP_FMT_OPENDENYMODES
633// caps |= VOL_CAP_FMT_OPENDENYMODES;
634// valid |= VOL_CAP_FMT_OPENDENYMODES;
635 }
636 fsap->f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] =
637 // VOL_CAP_FMT_PERSISTENTOBJECTIDS |
638 // VOL_CAP_FMT_SYMBOLICLINKS |
639 // VOL_CAP_FMT_HARDLINKS |
640 // VOL_CAP_FMT_JOURNAL |
641 // VOL_CAP_FMT_JOURNAL_ACTIVE |
642 // VOL_CAP_FMT_NO_ROOT_TIMES |
643 // VOL_CAP_FMT_SPARSE_FILES |
644 // VOL_CAP_FMT_ZERO_RUNS |
645 // VOL_CAP_FMT_CASE_SENSITIVE |
646 // VOL_CAP_FMT_CASE_PRESERVING |
647 // VOL_CAP_FMT_FAST_STATFS |
648 // VOL_CAP_FMT_2TB_FILESIZE |
649 // VOL_CAP_FMT_OPENDENYMODES |
650 // VOL_CAP_FMT_HIDDEN_FILES |
651 caps;
652 fsap->f_capabilities.valid[VOL_CAPABILITIES_FORMAT] =
653 VOL_CAP_FMT_PERSISTENTOBJECTIDS |
654 // VOL_CAP_FMT_SYMBOLICLINKS |
655 // VOL_CAP_FMT_HARDLINKS |
656 // VOL_CAP_FMT_JOURNAL |
657 // VOL_CAP_FMT_JOURNAL_ACTIVE |
658 // VOL_CAP_FMT_NO_ROOT_TIMES |
659 // VOL_CAP_FMT_SPARSE_FILES |
660 // VOL_CAP_FMT_ZERO_RUNS |
661 // VOL_CAP_FMT_CASE_SENSITIVE |
662 // VOL_CAP_FMT_CASE_PRESERVING |
663 VOL_CAP_FMT_FAST_STATFS |
664 VOL_CAP_FMT_2TB_FILESIZE |
665 // VOL_CAP_FMT_OPENDENYMODES |
666 // VOL_CAP_FMT_HIDDEN_FILES |
667 valid;
668
669 /*
670 * We don't support most of the interfaces.
671 *
672 * We MAY support locking, but we don't have any easy way of probing.
673 * We can tell if there's no lockd running or if locks have been
674 * disabled for a mount, so we can definitely answer NO in that case.
675 * Any attempt to send a request to lockd to test for locking support
676 * may cause the lazily-launched locking daemons to be started
677 * unnecessarily. So we avoid that. However, we do record if we ever
678 * successfully perform a lock operation on a mount point, so if it
679 * looks like lock ops have worked, we do report that we support them.
680 */
681 caps = valid = 0;
682 if (nfsvers >= NFS_VER4) {
683 caps = VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK;
684 valid = VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK;
685 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL)
686 caps |= VOL_CAP_INT_EXTENDED_SECURITY;
687 valid |= VOL_CAP_INT_EXTENDED_SECURITY;
688 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)
689 caps |= VOL_CAP_INT_EXTENDED_ATTR;
690 valid |= VOL_CAP_INT_EXTENDED_ATTR;
691#if NAMEDSTREAMS
692 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)
693 caps |= VOL_CAP_INT_NAMEDSTREAMS;
694 valid |= VOL_CAP_INT_NAMEDSTREAMS;
695#endif
696 } else if (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED) {
697 /* locks disabled on this mount, so they definitely won't work */
698 valid = VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK;
699 } else if (nmp->nm_state & NFSSTA_LOCKSWORK) {
700 caps = VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK;
701 valid = VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK;
702 }
703 fsap->f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] =
704 // VOL_CAP_INT_SEARCHFS |
705 // VOL_CAP_INT_ATTRLIST |
706 // VOL_CAP_INT_NFSEXPORT |
707 // VOL_CAP_INT_READDIRATTR |
708 // VOL_CAP_INT_EXCHANGEDATA |
709 // VOL_CAP_INT_COPYFILE |
710 // VOL_CAP_INT_ALLOCATE |
711 // VOL_CAP_INT_VOL_RENAME |
712 // VOL_CAP_INT_ADVLOCK |
713 // VOL_CAP_INT_FLOCK |
714 // VOL_CAP_INT_EXTENDED_SECURITY |
715 // VOL_CAP_INT_USERACCESS |
716 // VOL_CAP_INT_MANLOCK |
717 // VOL_CAP_INT_NAMEDSTREAMS |
718 // VOL_CAP_INT_EXTENDED_ATTR |
719 VOL_CAP_INT_REMOTE_EVENT |
720 caps;
721 fsap->f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] =
722 VOL_CAP_INT_SEARCHFS |
723 VOL_CAP_INT_ATTRLIST |
724 VOL_CAP_INT_NFSEXPORT |
725 VOL_CAP_INT_READDIRATTR |
726 VOL_CAP_INT_EXCHANGEDATA |
727 VOL_CAP_INT_COPYFILE |
728 VOL_CAP_INT_ALLOCATE |
729 VOL_CAP_INT_VOL_RENAME |
730 // VOL_CAP_INT_ADVLOCK |
731 // VOL_CAP_INT_FLOCK |
732 // VOL_CAP_INT_EXTENDED_SECURITY |
733 // VOL_CAP_INT_USERACCESS |
734 // VOL_CAP_INT_MANLOCK |
735 // VOL_CAP_INT_NAMEDSTREAMS |
736 // VOL_CAP_INT_EXTENDED_ATTR |
737 VOL_CAP_INT_REMOTE_EVENT |
738 valid;
739
740 fsap->f_capabilities.capabilities[VOL_CAPABILITIES_RESERVED1] = 0;
741 fsap->f_capabilities.valid[VOL_CAPABILITIES_RESERVED1] = 0;
742
743 fsap->f_capabilities.capabilities[VOL_CAPABILITIES_RESERVED2] = 0;
744 fsap->f_capabilities.valid[VOL_CAPABILITIES_RESERVED2] = 0;
745
746 VFSATTR_SET_SUPPORTED(fsap, f_capabilities);
747 lck_mtx_unlock(&nmp->nm_lock);
748 }
749
750 if (VFSATTR_IS_ACTIVE(fsap, f_attributes)) {
751 fsap->f_attributes.validattr.commonattr = 0;
752 fsap->f_attributes.validattr.volattr =
753 ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES;
754 fsap->f_attributes.validattr.dirattr = 0;
755 fsap->f_attributes.validattr.fileattr = 0;
756 fsap->f_attributes.validattr.forkattr = 0;
757
758 fsap->f_attributes.nativeattr.commonattr = 0;
759 fsap->f_attributes.nativeattr.volattr =
760 ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES;
761 fsap->f_attributes.nativeattr.dirattr = 0;
762 fsap->f_attributes.nativeattr.fileattr = 0;
763 fsap->f_attributes.nativeattr.forkattr = 0;
764
765 VFSATTR_SET_SUPPORTED(fsap, f_attributes);
766 }
767
768 return (error);
769}
770
771/*
772 * nfs version 3 fsinfo rpc call
773 */
774int
775nfs3_fsinfo(struct nfsmount *nmp, nfsnode_t np, vfs_context_t ctx)
776{
777 int error = 0, lockerror, status, nmlocked = 0;
778 u_int64_t xid;
779 uint32_t val, prefsize, maxsize;
780 struct nfsm_chain nmreq, nmrep;
781
782 nfsm_chain_null(&nmreq);
783 nfsm_chain_null(&nmrep);
784
785 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_FH(nmp->nm_vers));
786 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
787 nfsm_chain_build_done(error, &nmreq);
788 nfsmout_if(error);
789 error = nfs_request(np, NULL, &nmreq, NFSPROC_FSINFO, ctx, NULL, &nmrep, &xid, &status);
790 if ((lockerror = nfs_node_lock(np)))
791 error = lockerror;
792 nfsm_chain_postop_attr_update(error, &nmrep, np, &xid);
793 if (!lockerror)
794 nfs_node_unlock(np);
795 if (!error)
796 error = status;
797 nfsmout_if(error);
798
799 lck_mtx_lock(&nmp->nm_lock);
800 nmlocked = 1;
801
802 nfsm_chain_get_32(error, &nmrep, maxsize);
803 nfsm_chain_get_32(error, &nmrep, prefsize);
804 nfsmout_if(error);
805 nmp->nm_fsattr.nfsa_maxread = maxsize;
806 if (prefsize < nmp->nm_rsize)
807 nmp->nm_rsize = (prefsize + NFS_FABLKSIZE - 1) &
808 ~(NFS_FABLKSIZE - 1);
809 if ((maxsize > 0) && (maxsize < nmp->nm_rsize)) {
810 nmp->nm_rsize = maxsize & ~(NFS_FABLKSIZE - 1);
811 if (nmp->nm_rsize == 0)
812 nmp->nm_rsize = maxsize;
813 }
814 nfsm_chain_adv(error, &nmrep, NFSX_UNSIGNED); // skip rtmult
815
816 nfsm_chain_get_32(error, &nmrep, maxsize);
817 nfsm_chain_get_32(error, &nmrep, prefsize);
818 nfsmout_if(error);
819 nmp->nm_fsattr.nfsa_maxwrite = maxsize;
820 if (prefsize < nmp->nm_wsize)
821 nmp->nm_wsize = (prefsize + NFS_FABLKSIZE - 1) &
822 ~(NFS_FABLKSIZE - 1);
823 if ((maxsize > 0) && (maxsize < nmp->nm_wsize)) {
824 nmp->nm_wsize = maxsize & ~(NFS_FABLKSIZE - 1);
825 if (nmp->nm_wsize == 0)
826 nmp->nm_wsize = maxsize;
827 }
828 nfsm_chain_adv(error, &nmrep, NFSX_UNSIGNED); // skip wtmult
829
830 nfsm_chain_get_32(error, &nmrep, prefsize);
831 nfsmout_if(error);
832 if ((prefsize > 0) && (prefsize < nmp->nm_readdirsize))
833 nmp->nm_readdirsize = prefsize;
834 if ((nmp->nm_fsattr.nfsa_maxread > 0) &&
835 (nmp->nm_fsattr.nfsa_maxread < nmp->nm_readdirsize))
836 nmp->nm_readdirsize = nmp->nm_fsattr.nfsa_maxread;
837
838 nfsm_chain_get_64(error, &nmrep, nmp->nm_fsattr.nfsa_maxfilesize);
839
840 nfsm_chain_adv(error, &nmrep, 2 * NFSX_UNSIGNED); // skip time_delta
841
842 /* convert FS properties to our own flags */
843 nfsm_chain_get_32(error, &nmrep, val);
844 nfsmout_if(error);
845 if (val & NFSV3FSINFO_LINK)
846 nmp->nm_fsattr.nfsa_flags |= NFS_FSFLAG_LINK;
847 if (val & NFSV3FSINFO_SYMLINK)
848 nmp->nm_fsattr.nfsa_flags |= NFS_FSFLAG_SYMLINK;
849 if (val & NFSV3FSINFO_HOMOGENEOUS)
850 nmp->nm_fsattr.nfsa_flags |= NFS_FSFLAG_HOMOGENEOUS;
851 if (val & NFSV3FSINFO_CANSETTIME)
852 nmp->nm_fsattr.nfsa_flags |= NFS_FSFLAG_SET_TIME;
853 nmp->nm_state |= NFSSTA_GOTFSINFO;
854 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXREAD);
855 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXWRITE);
856 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXFILESIZE);
857 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_LINK_SUPPORT);
858 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SYMLINK_SUPPORT);
859 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_HOMOGENEOUS);
860 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_CANSETTIME);
861nfsmout:
862 if (nmlocked)
863 lck_mtx_unlock(&nmp->nm_lock);
864 nfsm_chain_cleanup(&nmreq);
865 nfsm_chain_cleanup(&nmrep);
866 return (error);
867}
868
869/*
870 * Mount a remote root fs via. nfs. This depends on the info in the
871 * nfs_diskless structure that has been filled in properly by some primary
872 * bootstrap.
873 * It goes something like this:
874 * - do enough of "ifconfig" by calling ifioctl() so that the system
875 * can talk to the server
876 * - If nfs_diskless.mygateway is filled in, use that address as
877 * a default gateway.
878 * - hand craft the swap nfs vnode hanging off a fake mount point
879 * if swdevt[0].sw_dev == NODEV
880 * - build the rootfs mount point and call mountnfs() to do the rest.
881 */
882int
883nfs_mountroot(void)
884{
885 struct nfs_diskless nd;
886 mount_t mp = NULL;
887 vnode_t vp = NULL;
888 vfs_context_t ctx;
889 int error;
890#if !defined(NO_MOUNT_PRIVATE)
891 mount_t mppriv = NULL;
892 vnode_t vppriv = NULL;
893#endif /* NO_MOUNT_PRIVATE */
894 int v3, sotype;
895
896 /*
897 * Call nfs_boot_init() to fill in the nfs_diskless struct.
898 * Note: networking must already have been configured before
899 * we're called.
900 */
901 bzero((caddr_t) &nd, sizeof(nd));
902 error = nfs_boot_init(&nd);
903 if (error)
904 panic("nfs_boot_init: unable to initialize NFS root system information, "
905 "error %d, check configuration: %s\n", error, PE_boot_args());
906
907 /*
908 * Try NFSv3 first, then fallback to NFSv2.
909 * Likewise, try TCP first, then fall back to UDP.
910 */
911 v3 = 1;
912 sotype = SOCK_STREAM;
913
914tryagain:
915 error = nfs_boot_getfh(&nd, v3, sotype);
916 if (error) {
917 if (error == EHOSTDOWN || error == EHOSTUNREACH) {
918 if (nd.nd_root.ndm_mntfrom)
919 FREE_ZONE(nd.nd_root.ndm_mntfrom,
920 MAXPATHLEN, M_NAMEI);
921 if (nd.nd_root.ndm_path)
922 FREE_ZONE(nd.nd_root.ndm_path,
923 MAXPATHLEN, M_NAMEI);
924 if (nd.nd_private.ndm_mntfrom)
925 FREE_ZONE(nd.nd_private.ndm_mntfrom,
926 MAXPATHLEN, M_NAMEI);
927 if (nd.nd_private.ndm_path)
928 FREE_ZONE(nd.nd_private.ndm_path,
929 MAXPATHLEN, M_NAMEI);
930 return (error);
931 }
932 if (v3) {
933 if (sotype == SOCK_STREAM) {
934 printf("NFS mount (v3,TCP) failed with error %d, trying UDP...\n", error);
935 sotype = SOCK_DGRAM;
936 goto tryagain;
937 }
938 printf("NFS mount (v3,UDP) failed with error %d, trying v2...\n", error);
939 v3 = 0;
940 sotype = SOCK_STREAM;
941 goto tryagain;
942 } else if (sotype == SOCK_STREAM) {
943 printf("NFS mount (v2,TCP) failed with error %d, trying UDP...\n", error);
944 sotype = SOCK_DGRAM;
945 goto tryagain;
946 } else {
947 printf("NFS mount (v2,UDP) failed with error %d, giving up...\n", error);
948 }
949 switch(error) {
950 case EPROGUNAVAIL:
951 panic("NFS mount failed: NFS server mountd not responding, check server configuration: %s", PE_boot_args());
952 case EACCES:
953 case EPERM:
954 panic("NFS mount failed: NFS server refused mount, check server configuration: %s", PE_boot_args());
955 default:
956 panic("NFS mount failed with error %d, check configuration: %s", error, PE_boot_args());
957 }
958 }
959
960 ctx = vfs_context_kernel();
961
962 /*
963 * Create the root mount point.
964 */
965#if !defined(NO_MOUNT_PRIVATE)
966 {
967 //PWC hack until we have a real "mount" tool to remount root rw
968 int rw_root=0;
969 int flags = MNT_ROOTFS|MNT_RDONLY;
970 PE_parse_boot_argn("-rwroot_hack", &rw_root, sizeof (rw_root));
971 if(rw_root)
972 {
973 flags = MNT_ROOTFS;
974 kprintf("-rwroot_hack in effect: mounting root fs read/write\n");
975 }
976
977 if ((error = nfs_mount_diskless(&nd.nd_root, "/", flags, &vp, &mp, ctx)))
978#else
979 if ((error = nfs_mount_diskless(&nd.nd_root, "/", MNT_ROOTFS, &vp, &mp, ctx)))
980#endif /* NO_MOUNT_PRIVATE */
981 {
982 if (v3) {
983 if (sotype == SOCK_STREAM) {
984 printf("NFS root mount (v3,TCP) failed with %d, trying UDP...\n", error);
985 sotype = SOCK_DGRAM;
986 goto tryagain;
987 }
988 printf("NFS root mount (v3,UDP) failed with %d, trying v2...\n", error);
989 v3 = 0;
990 sotype = SOCK_STREAM;
991 goto tryagain;
992 } else if (sotype == SOCK_STREAM) {
993 printf("NFS root mount (v2,TCP) failed with %d, trying UDP...\n", error);
994 sotype = SOCK_DGRAM;
995 goto tryagain;
996 } else {
997 printf("NFS root mount (v2,UDP) failed with error %d, giving up...\n", error);
998 }
999 panic("NFS root mount failed with error %d, check configuration: %s\n", error, PE_boot_args());
1000 }
1001 }
1002 printf("root on %s\n", nd.nd_root.ndm_mntfrom);
1003
1004 vfs_unbusy(mp);
1005 mount_list_add(mp);
1006 rootvp = vp;
1007
1008#if !defined(NO_MOUNT_PRIVATE)
1009 if (nd.nd_private.ndm_saddr.sin_addr.s_addr) {
1010 error = nfs_mount_diskless_private(&nd.nd_private, "/private",
1011 0, &vppriv, &mppriv, ctx);
1012 if (error)
1013 panic("NFS /private mount failed with error %d, check configuration: %s\n", error, PE_boot_args());
1014 printf("private on %s\n", nd.nd_private.ndm_mntfrom);
1015
1016 vfs_unbusy(mppriv);
1017 mount_list_add(mppriv);
1018 }
1019
1020#endif /* NO_MOUNT_PRIVATE */
1021
1022 if (nd.nd_root.ndm_mntfrom)
1023 FREE_ZONE(nd.nd_root.ndm_mntfrom, MAXPATHLEN, M_NAMEI);
1024 if (nd.nd_root.ndm_path)
1025 FREE_ZONE(nd.nd_root.ndm_path, MAXPATHLEN, M_NAMEI);
1026 if (nd.nd_private.ndm_mntfrom)
1027 FREE_ZONE(nd.nd_private.ndm_mntfrom, MAXPATHLEN, M_NAMEI);
1028 if (nd.nd_private.ndm_path)
1029 FREE_ZONE(nd.nd_private.ndm_path, MAXPATHLEN, M_NAMEI);
1030
1031 /* Get root attributes (for the time). */
1032 error = nfs_getattr(VTONFS(vp), NULL, ctx, NGA_UNCACHED);
1033 if (error)
1034 panic("NFS mount: failed to get attributes for root directory, error %d, check server", error);
1035 return (0);
1036}
1037
1038/*
1039 * Internal version of mount system call for diskless setup.
1040 */
1041static int
1042nfs_mount_diskless(
1043 struct nfs_dlmount *ndmntp,
1044 const char *mntname,
1045 int mntflag,
1046 vnode_t *vpp,
1047 mount_t *mpp,
1048 vfs_context_t ctx)
1049{
1050 mount_t mp;
1051 int error, numcomps;
1052 char *xdrbuf, *p, *cp, *frompath, *endserverp;
1053 char uaddr[MAX_IPv4_STR_LEN];
1054 struct xdrbuf xb;
1055 uint32_t mattrs[NFS_MATTR_BITMAP_LEN];
1056 uint32_t mflags_mask[NFS_MFLAG_BITMAP_LEN];
1057 uint32_t mflags[NFS_MFLAG_BITMAP_LEN];
1058 uint32_t argslength_offset, attrslength_offset, end_offset;
1059
1060 if ((error = vfs_rootmountalloc("nfs", ndmntp->ndm_mntfrom, &mp))) {
1061 printf("nfs_mount_diskless: NFS not configured\n");
1062 return (error);
1063 }
1064
1065 mp->mnt_flag |= mntflag;
1066 if (!(mntflag & MNT_RDONLY))
1067 mp->mnt_flag &= ~MNT_RDONLY;
1068
1069 /* find the server-side path being mounted */
1070 frompath = ndmntp->ndm_mntfrom;
1071 if (*frompath == '[') { /* skip IPv6 literal address */
1072 while (*frompath && (*frompath != ']'))
1073 frompath++;
1074 if (*frompath == ']')
1075 frompath++;
1076 }
1077 while (*frompath && (*frompath != ':'))
1078 frompath++;
1079 endserverp = frompath;
1080 while (*frompath && (*frompath == ':'))
1081 frompath++;
1082 /* count fs location path components */
1083 p = frompath;
1084 while (*p && (*p == '/'))
1085 p++;
1086 numcomps = 0;
1087 while (*p) {
1088 numcomps++;
1089 while (*p && (*p != '/'))
1090 p++;
1091 while (*p && (*p == '/'))
1092 p++;
1093 }
1094
1095 /* convert address to universal address string */
1096 if (inet_ntop(AF_INET, &ndmntp->ndm_saddr.sin_addr, uaddr, sizeof(uaddr)) != uaddr) {
1097 printf("nfs_mount_diskless: bad address\n");
1098 return (EINVAL);
1099 }
1100
1101 /* prepare mount attributes */
1102 NFS_BITMAP_ZERO(mattrs, NFS_MATTR_BITMAP_LEN);
1103 NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_VERSION);
1104 NFS_BITMAP_SET(mattrs, NFS_MATTR_SOCKET_TYPE);
1105 NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_PORT);
1106 NFS_BITMAP_SET(mattrs, NFS_MATTR_FH);
1107 NFS_BITMAP_SET(mattrs, NFS_MATTR_FS_LOCATIONS);
1108 NFS_BITMAP_SET(mattrs, NFS_MATTR_MNTFLAGS);
1109
1110 /* prepare mount flags */
1111 NFS_BITMAP_ZERO(mflags_mask, NFS_MFLAG_BITMAP_LEN);
1112 NFS_BITMAP_ZERO(mflags, NFS_MFLAG_BITMAP_LEN);
1113 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_RESVPORT);
1114 NFS_BITMAP_SET(mflags, NFS_MFLAG_RESVPORT);
1115
1116 /* build xdr buffer */
1117 xb_init_buffer(&xb, NULL, 0);
1118 xb_add_32(error, &xb, NFS_ARGSVERSION_XDR);
1119 argslength_offset = xb_offset(&xb);
1120 xb_add_32(error, &xb, 0); // args length
1121 xb_add_32(error, &xb, NFS_XDRARGS_VERSION_0);
1122 xb_add_bitmap(error, &xb, mattrs, NFS_MATTR_BITMAP_LEN);
1123 attrslength_offset = xb_offset(&xb);
1124 xb_add_32(error, &xb, 0); // attrs length
1125 xb_add_32(error, &xb, ndmntp->ndm_nfsv3 ? 3 : 2); // NFS version
1126 xb_add_string(error, &xb, ((ndmntp->ndm_sotype == SOCK_DGRAM) ? "udp" : "tcp"), 3);
1127 xb_add_32(error, &xb, ntohs(ndmntp->ndm_saddr.sin_port)); // NFS port
1128 xb_add_fh(error, &xb, &ndmntp->ndm_fh[0], ndmntp->ndm_fhlen);
1129 /* fs location */
1130 xb_add_32(error, &xb, 1); /* fs location count */
1131 xb_add_32(error, &xb, 1); /* server count */
1132 xb_add_string(error, &xb, ndmntp->ndm_mntfrom, (endserverp - ndmntp->ndm_mntfrom)); /* server name */
1133 xb_add_32(error, &xb, 1); /* address count */
1134 xb_add_string(error, &xb, uaddr, strlen(uaddr)); /* address */
1135 xb_add_32(error, &xb, 0); /* empty server info */
1136 xb_add_32(error, &xb, numcomps); /* pathname component count */
1137 p = frompath;
1138 while (*p && (*p == '/'))
1139 p++;
1140 while (*p) {
1141 cp = p;
1142 while (*p && (*p != '/'))
1143 p++;
1144 xb_add_string(error, &xb, cp, (p - cp)); /* component */
1145 if (error)
1146 break;
1147 while (*p && (*p == '/'))
1148 p++;
1149 }
1150 xb_add_32(error, &xb, 0); /* empty fsl info */
1151 xb_add_32(error, &xb, mntflag); /* MNT flags */
1152 xb_build_done(error, &xb);
1153
1154 /* update opaque counts */
1155 end_offset = xb_offset(&xb);
1156 if (!error) {
1157 error = xb_seek(&xb, argslength_offset);
1158 xb_add_32(error, &xb, end_offset - argslength_offset + XDRWORD/*version*/);
1159 }
1160 if (!error) {
1161 error = xb_seek(&xb, attrslength_offset);
1162 xb_add_32(error, &xb, end_offset - attrslength_offset - XDRWORD/*don't include length field*/);
1163 }
1164 if (error) {
1165 printf("nfs_mount_diskless: error %d assembling mount args\n", error);
1166 xb_cleanup(&xb);
1167 return (error);
1168 }
1169 /* grab the assembled buffer */
1170 xdrbuf = xb_buffer_base(&xb);
1171 xb.xb_flags &= ~XB_CLEANUP;
1172
1173 /* do the mount */
1174 if ((error = mountnfs(xdrbuf, mp, ctx, vpp))) {
1175 printf("nfs_mountroot: mount %s failed: %d\n", mntname, error);
1176 // XXX vfs_rootmountfailed(mp);
1177 mount_list_lock();
1178 mp->mnt_vtable->vfc_refcount--;
1179 mount_list_unlock();
1180 vfs_unbusy(mp);
1181 mount_lock_destroy(mp);
1182#if CONFIG_MACF
1183 mac_mount_label_destroy(mp);
1184#endif
1185 FREE_ZONE(mp, sizeof(struct mount), M_MOUNT);
1186 } else {
1187 *mpp = mp;
1188 }
1189 xb_cleanup(&xb);
1190 return (error);
1191}
1192
1193#if !defined(NO_MOUNT_PRIVATE)
1194/*
1195 * Internal version of mount system call to mount "/private"
1196 * separately in diskless setup
1197 */
1198static int
1199nfs_mount_diskless_private(
1200 struct nfs_dlmount *ndmntp,
1201 const char *mntname,
1202 int mntflag,
1203 vnode_t *vpp,
1204 mount_t *mpp,
1205 vfs_context_t ctx)
1206{
1207 mount_t mp;
1208 int error, numcomps;
1209 proc_t procp;
1210 struct vfstable *vfsp;
1211 struct nameidata nd;
1212 vnode_t vp;
1213 char *xdrbuf = NULL, *p, *cp, *frompath, *endserverp;
1214 char uaddr[MAX_IPv4_STR_LEN];
1215 struct xdrbuf xb;
1216 uint32_t mattrs[NFS_MATTR_BITMAP_LEN];
1217 uint32_t mflags_mask[NFS_MFLAG_BITMAP_LEN], mflags[NFS_MFLAG_BITMAP_LEN];
1218 uint32_t argslength_offset, attrslength_offset, end_offset;
1219
1220 procp = current_proc(); /* XXX */
1221 xb_init(&xb, 0);
1222
1223 {
1224 /*
1225 * mimic main()!. Temporarily set up rootvnode and other stuff so
1226 * that namei works. Need to undo this because main() does it, too
1227 */
1228 struct filedesc *fdp; /* pointer to file descriptor state */
1229 fdp = procp->p_fd;
1230 mountlist.tqh_first->mnt_flag |= MNT_ROOTFS;
1231
1232 /* Get the vnode for '/'. Set fdp->fd_cdir to reference it. */
1233 if (VFS_ROOT(mountlist.tqh_first, &rootvnode, NULL))
1234 panic("cannot find root vnode");
1235 error = vnode_ref(rootvnode);
1236 if (error) {
1237 printf("nfs_mountroot: vnode_ref() failed on root vnode!\n");
1238 goto out;
1239 }
1240 fdp->fd_cdir = rootvnode;
1241 fdp->fd_rdir = NULL;
1242 }
1243
1244 /*
1245 * Get vnode to be covered
1246 */
1247 NDINIT(&nd, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
1248 CAST_USER_ADDR_T(mntname), ctx);
1249 if ((error = namei(&nd))) {
1250 printf("nfs_mountroot: private namei failed!\n");
1251 goto out;
1252 }
1253 {
1254 /* undo vnode_ref() in mimic main()! */
1255 vnode_rele(rootvnode);
1256 }
1257 nameidone(&nd);
1258 vp = nd.ni_vp;
1259
1260 if ((error = VNOP_FSYNC(vp, MNT_WAIT, ctx)) ||
1261 (error = buf_invalidateblks(vp, BUF_WRITE_DATA, 0, 0))) {
1262 vnode_put(vp);
1263 goto out;
1264 }
1265 if (vnode_vtype(vp) != VDIR) {
1266 vnode_put(vp);
1267 error = ENOTDIR;
1268 goto out;
1269 }
1270 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1271 if (!strncmp(vfsp->vfc_name, "nfs", sizeof(vfsp->vfc_name)))
1272 break;
1273 if (vfsp == NULL) {
1274 printf("nfs_mountroot: private NFS not configured\n");
1275 vnode_put(vp);
1276 error = ENODEV;
1277 goto out;
1278 }
1279 if (vnode_mountedhere(vp) != NULL) {
1280 vnode_put(vp);
1281 error = EBUSY;
1282 goto out;
1283 }
1284
1285 /*
1286 * Allocate and initialize the filesystem.
1287 */
1288 mp = _MALLOC_ZONE((u_int32_t)sizeof(struct mount), M_MOUNT, M_WAITOK);
1289 if (!mp) {
1290 printf("nfs_mountroot: unable to allocate mount structure\n");
1291 vnode_put(vp);
1292 error = ENOMEM;
1293 goto out;
1294 }
1295 bzero((char *)mp, sizeof(struct mount));
1296
1297 /* Initialize the default IO constraints */
1298 mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS;
1299 mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32;
1300 mp->mnt_ioflags = 0;
1301 mp->mnt_realrootvp = NULLVP;
1302 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
1303
1304 mount_lock_init(mp);
1305 TAILQ_INIT(&mp->mnt_vnodelist);
1306 TAILQ_INIT(&mp->mnt_workerqueue);
1307 TAILQ_INIT(&mp->mnt_newvnodes);
1308 (void)vfs_busy(mp, LK_NOWAIT);
1309 TAILQ_INIT(&mp->mnt_vnodelist);
1310 mount_list_lock();
1311 vfsp->vfc_refcount++;
1312 mount_list_unlock();
1313 mp->mnt_vtable = vfsp;
1314 mp->mnt_op = vfsp->vfc_vfsops;
1315 // mp->mnt_stat.f_type = vfsp->vfc_typenum;
1316 mp->mnt_flag = mntflag;
1317 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
1318 strncpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSNAMELEN-1);
1319 vp->v_mountedhere = mp;
1320 mp->mnt_vnodecovered = vp;
1321 vp = NULLVP;
1322 mp->mnt_vfsstat.f_owner = kauth_cred_getuid(kauth_cred_get());
1323 (void) copystr(mntname, mp->mnt_vfsstat.f_mntonname, MAXPATHLEN - 1, 0);
1324 (void) copystr(ndmntp->ndm_mntfrom, mp->mnt_vfsstat.f_mntfromname, MAXPATHLEN - 1, 0);
1325#if CONFIG_MACF
1326 mac_mount_label_init(mp);
1327 mac_mount_label_associate(ctx, mp);
1328#endif
1329
1330 /* find the server-side path being mounted */
1331 frompath = ndmntp->ndm_mntfrom;
1332 if (*frompath == '[') { /* skip IPv6 literal address */
1333 while (*frompath && (*frompath != ']'))
1334 frompath++;
1335 if (*frompath == ']')
1336 frompath++;
1337 }
1338 while (*frompath && (*frompath != ':'))
1339 frompath++;
1340 endserverp = frompath;
1341 while (*frompath && (*frompath == ':'))
1342 frompath++;
1343 /* count fs location path components */
1344 p = frompath;
1345 while (*p && (*p == '/'))
1346 p++;
1347 numcomps = 0;
1348 while (*p) {
1349 numcomps++;
1350 while (*p && (*p != '/'))
1351 p++;
1352 while (*p && (*p == '/'))
1353 p++;
1354 }
1355
1356 /* convert address to universal address string */
1357 if (inet_ntop(AF_INET, &ndmntp->ndm_saddr.sin_addr, uaddr, sizeof(uaddr)) != uaddr) {
1358 printf("nfs_mountroot: bad address\n");
1359 error = EINVAL;
1360 goto out;
1361 }
1362
1363 /* prepare mount attributes */
1364 NFS_BITMAP_ZERO(mattrs, NFS_MATTR_BITMAP_LEN);
1365 NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_VERSION);
1366 NFS_BITMAP_SET(mattrs, NFS_MATTR_SOCKET_TYPE);
1367 NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_PORT);
1368 NFS_BITMAP_SET(mattrs, NFS_MATTR_FH);
1369 NFS_BITMAP_SET(mattrs, NFS_MATTR_FS_LOCATIONS);
1370 NFS_BITMAP_SET(mattrs, NFS_MATTR_MNTFLAGS);
1371
1372 /* prepare mount flags */
1373 NFS_BITMAP_ZERO(mflags_mask, NFS_MFLAG_BITMAP_LEN);
1374 NFS_BITMAP_ZERO(mflags, NFS_MFLAG_BITMAP_LEN);
1375 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_RESVPORT);
1376 NFS_BITMAP_SET(mflags, NFS_MFLAG_RESVPORT);
1377
1378 /* build xdr buffer */
1379 xb_init_buffer(&xb, NULL, 0);
1380 xb_add_32(error, &xb, NFS_ARGSVERSION_XDR);
1381 argslength_offset = xb_offset(&xb);
1382 xb_add_32(error, &xb, 0); // args length
1383 xb_add_32(error, &xb, NFS_XDRARGS_VERSION_0);
1384 xb_add_bitmap(error, &xb, mattrs, NFS_MATTR_BITMAP_LEN);
1385 attrslength_offset = xb_offset(&xb);
1386 xb_add_32(error, &xb, 0); // attrs length
1387 xb_add_32(error, &xb, ndmntp->ndm_nfsv3 ? 3 : 2); // NFS version
1388 xb_add_string(error, &xb, ((ndmntp->ndm_sotype == SOCK_DGRAM) ? "udp" : "tcp"), 3);
1389 xb_add_32(error, &xb, ntohs(ndmntp->ndm_saddr.sin_port)); // NFS port
1390 xb_add_fh(error, &xb, &ndmntp->ndm_fh[0], ndmntp->ndm_fhlen);
1391 /* fs location */
1392 xb_add_32(error, &xb, 1); /* fs location count */
1393 xb_add_32(error, &xb, 1); /* server count */
1394 xb_add_string(error, &xb, ndmntp->ndm_mntfrom, (endserverp - ndmntp->ndm_mntfrom)); /* server name */
1395 xb_add_32(error, &xb, 1); /* address count */
1396 xb_add_string(error, &xb, uaddr, strlen(uaddr)); /* address */
1397 xb_add_32(error, &xb, 0); /* empty server info */
1398 xb_add_32(error, &xb, numcomps); /* pathname component count */
1399 p = frompath;
1400 while (*p && (*p == '/'))
1401 p++;
1402 while (*p) {
1403 cp = p;
1404 while (*p && (*p != '/'))
1405 p++;
1406 xb_add_string(error, &xb, cp, (p - cp)); /* component */
1407 if (error)
1408 break;
1409 while (*p && (*p == '/'))
1410 p++;
1411 }
1412 xb_add_32(error, &xb, 0); /* empty fsl info */
1413 xb_add_32(error, &xb, mntflag); /* MNT flags */
1414 xb_build_done(error, &xb);
1415
1416 /* update opaque counts */
1417 end_offset = xb_offset(&xb);
1418 if (!error) {
1419 error = xb_seek(&xb, argslength_offset);
1420 xb_add_32(error, &xb, end_offset - argslength_offset + XDRWORD/*version*/);
1421 }
1422 if (!error) {
1423 error = xb_seek(&xb, attrslength_offset);
1424 xb_add_32(error, &xb, end_offset - attrslength_offset - XDRWORD/*don't include length field*/);
1425 }
1426 if (error) {
1427 printf("nfs_mountroot: error %d assembling mount args\n", error);
1428 goto out;
1429 }
1430 /* grab the assembled buffer */
1431 xdrbuf = xb_buffer_base(&xb);
1432 xb.xb_flags &= ~XB_CLEANUP;
1433
1434 /* do the mount */
1435 if ((error = mountnfs(xdrbuf, mp, ctx, &vp))) {
1436 printf("nfs_mountroot: mount %s failed: %d\n", mntname, error);
1437 vnode_put(mp->mnt_vnodecovered);
1438 mount_list_lock();
1439 vfsp->vfc_refcount--;
1440 mount_list_unlock();
1441 vfs_unbusy(mp);
1442 mount_lock_destroy(mp);
1443#if CONFIG_MACF
1444 mac_mount_label_destroy(mp);
1445#endif
1446 FREE_ZONE(mp, sizeof (struct mount), M_MOUNT);
1447 goto out;
1448 }
1449
1450 *mpp = mp;
1451 *vpp = vp;
1452out:
1453 xb_cleanup(&xb);
1454 return (error);
1455}
1456#endif /* NO_MOUNT_PRIVATE */
1457
1458/*
1459 * Convert old style NFS mount args to XDR.
1460 */
1461static int
1462nfs_convert_old_nfs_args(mount_t mp, user_addr_t data, vfs_context_t ctx, int argsversion, int inkernel, char **xdrbufp)
1463{
1464 int error = 0, args64bit, argsize, numcomps;
1465 struct user_nfs_args args;
1466 struct nfs_args tempargs;
1467 caddr_t argsp;
1468 size_t len;
1469 u_char nfh[NFS4_FHSIZE];
1470 char *mntfrom, *endserverp, *frompath, *p, *cp;
1471 struct sockaddr_storage ss;
1472 void *sinaddr;
1473 char uaddr[MAX_IPv6_STR_LEN];
1474 uint32_t mattrs[NFS_MATTR_BITMAP_LEN];
1475 uint32_t mflags_mask[NFS_MFLAG_BITMAP_LEN], mflags[NFS_MFLAG_BITMAP_LEN];
1476 uint32_t nfsvers, nfslockmode = 0, argslength_offset, attrslength_offset, end_offset;
1477 struct xdrbuf xb;
1478
1479 *xdrbufp = NULL;
1480
1481 /* allocate a temporary buffer for mntfrom */
1482 MALLOC_ZONE(mntfrom, char*, MAXPATHLEN, M_NAMEI, M_WAITOK);
1483 if (!mntfrom)
1484 return (ENOMEM);
1485
1486 args64bit = (inkernel || vfs_context_is64bit(ctx));
1487 argsp = args64bit ? (void*)&args : (void*)&tempargs;
1488
1489 argsize = args64bit ? sizeof(args) : sizeof(tempargs);
1490 switch (argsversion) {
1491 case 3:
1492 argsize -= NFS_ARGSVERSION4_INCSIZE;
1493 case 4:
1494 argsize -= NFS_ARGSVERSION5_INCSIZE;
1495 case 5:
1496 argsize -= NFS_ARGSVERSION6_INCSIZE;
1497 case 6:
1498 break;
1499 default:
1500 error = EPROGMISMATCH;
1501 goto nfsmout;
1502 }
1503
1504 /* read in the structure */
1505 if (inkernel)
1506 bcopy(CAST_DOWN(void *, data), argsp, argsize);
1507 else
1508 error = copyin(data, argsp, argsize);
1509 nfsmout_if(error);
1510
1511 if (!args64bit) {
1512 args.addrlen = tempargs.addrlen;
1513 args.sotype = tempargs.sotype;
1514 args.proto = tempargs.proto;
1515 args.fhsize = tempargs.fhsize;
1516 args.flags = tempargs.flags;
1517 args.wsize = tempargs.wsize;
1518 args.rsize = tempargs.rsize;
1519 args.readdirsize = tempargs.readdirsize;
1520 args.timeo = tempargs.timeo;
1521 args.retrans = tempargs.retrans;
1522 args.maxgrouplist = tempargs.maxgrouplist;
1523 args.readahead = tempargs.readahead;
1524 args.leaseterm = tempargs.leaseterm;
1525 args.deadthresh = tempargs.deadthresh;
1526 args.addr = CAST_USER_ADDR_T(tempargs.addr);
1527 args.fh = CAST_USER_ADDR_T(tempargs.fh);
1528 args.hostname = CAST_USER_ADDR_T(tempargs.hostname);
1529 if (args.version >= 4) {
1530 args.acregmin = tempargs.acregmin;
1531 args.acregmax = tempargs.acregmax;
1532 args.acdirmin = tempargs.acdirmin;
1533 args.acdirmax = tempargs.acdirmax;
1534 }
1535 if (args.version >= 5)
1536 args.auth = tempargs.auth;
1537 if (args.version >= 6)
1538 args.deadtimeout = tempargs.deadtimeout;
1539 }
1540
1541 if ((args.fhsize < 0) || (args.fhsize > NFS4_FHSIZE)) {
1542 error = EINVAL;
1543 goto nfsmout;
1544 }
1545 if (args.fhsize > 0) {
1546 if (inkernel)
1547 bcopy(CAST_DOWN(void *, args.fh), (caddr_t)nfh, args.fhsize);
1548 else
1549 error = copyin(args.fh, (caddr_t)nfh, args.fhsize);
1550 nfsmout_if(error);
1551 }
1552
1553 if (inkernel)
1554 error = copystr(CAST_DOWN(void *, args.hostname), mntfrom, MAXPATHLEN-1, &len);
1555 else
1556 error = copyinstr(args.hostname, mntfrom, MAXPATHLEN-1, &len);
1557 nfsmout_if(error);
1558 bzero(&mntfrom[len], MAXPATHLEN - len);
1559
1560 /* find the server-side path being mounted */
1561 frompath = mntfrom;
1562 if (*frompath == '[') { /* skip IPv6 literal address */
1563 while (*frompath && (*frompath != ']'))
1564 frompath++;
1565 if (*frompath == ']')
1566 frompath++;
1567 }
1568 while (*frompath && (*frompath != ':'))
1569 frompath++;
1570 endserverp = frompath;
1571 while (*frompath && (*frompath == ':'))
1572 frompath++;
1573 /* count fs location path components */
1574 p = frompath;
1575 while (*p && (*p == '/'))
1576 p++;
1577 numcomps = 0;
1578 while (*p) {
1579 numcomps++;
1580 while (*p && (*p != '/'))
1581 p++;
1582 while (*p && (*p == '/'))
1583 p++;
1584 }
1585
1586 /* copy socket address */
1587 if (inkernel)
1588 bcopy(CAST_DOWN(void *, args.addr), &ss, args.addrlen);
1589 else {
1590 if ((size_t)args.addrlen > sizeof (struct sockaddr_storage))
1591 error = EINVAL;
1592 else
1593 error = copyin(args.addr, &ss, args.addrlen);
1594 }
1595 nfsmout_if(error);
1596 ss.ss_len = args.addrlen;
1597
1598 /* convert address to universal address string */
1599 if (ss.ss_family == AF_INET)
1600 sinaddr = &((struct sockaddr_in*)&ss)->sin_addr;
1601 else if (ss.ss_family == AF_INET6)
1602 sinaddr = &((struct sockaddr_in6*)&ss)->sin6_addr;
1603 else
1604 sinaddr = NULL;
1605 if (!sinaddr || (inet_ntop(ss.ss_family, sinaddr, uaddr, sizeof(uaddr)) != uaddr)) {
1606 error = EINVAL;
1607 goto nfsmout;
1608 }
1609
1610 /* prepare mount flags */
1611 NFS_BITMAP_ZERO(mflags_mask, NFS_MFLAG_BITMAP_LEN);
1612 NFS_BITMAP_ZERO(mflags, NFS_MFLAG_BITMAP_LEN);
1613 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_SOFT);
1614 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_INTR);
1615 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_RESVPORT);
1616 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOCONNECT);
1617 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_DUMBTIMER);
1618 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_CALLUMNT);
1619 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_RDIRPLUS);
1620 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NONEGNAMECACHE);
1621 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_MUTEJUKEBOX);
1622 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOQUOTA);
1623 if (args.flags & NFSMNT_SOFT)
1624 NFS_BITMAP_SET(mflags, NFS_MFLAG_SOFT);
1625 if (args.flags & NFSMNT_INT)
1626 NFS_BITMAP_SET(mflags, NFS_MFLAG_INTR);
1627 if (args.flags & NFSMNT_RESVPORT)
1628 NFS_BITMAP_SET(mflags, NFS_MFLAG_RESVPORT);
1629 if (args.flags & NFSMNT_NOCONN)
1630 NFS_BITMAP_SET(mflags, NFS_MFLAG_NOCONNECT);
1631 if (args.flags & NFSMNT_DUMBTIMR)
1632 NFS_BITMAP_SET(mflags, NFS_MFLAG_DUMBTIMER);
1633 if (args.flags & NFSMNT_CALLUMNT)
1634 NFS_BITMAP_SET(mflags, NFS_MFLAG_CALLUMNT);
1635 if (args.flags & NFSMNT_RDIRPLUS)
1636 NFS_BITMAP_SET(mflags, NFS_MFLAG_RDIRPLUS);
1637 if (args.flags & NFSMNT_NONEGNAMECACHE)
1638 NFS_BITMAP_SET(mflags, NFS_MFLAG_NONEGNAMECACHE);
1639 if (args.flags & NFSMNT_MUTEJUKEBOX)
1640 NFS_BITMAP_SET(mflags, NFS_MFLAG_MUTEJUKEBOX);
1641 if (args.flags & NFSMNT_NOQUOTA)
1642 NFS_BITMAP_SET(mflags, NFS_MFLAG_NOQUOTA);
1643
1644 /* prepare mount attributes */
1645 NFS_BITMAP_ZERO(mattrs, NFS_MATTR_BITMAP_LEN);
1646 NFS_BITMAP_SET(mattrs, NFS_MATTR_FLAGS);
1647 NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_VERSION);
1648 NFS_BITMAP_SET(mattrs, NFS_MATTR_SOCKET_TYPE);
1649 NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_PORT);
1650 NFS_BITMAP_SET(mattrs, NFS_MATTR_FH);
1651 NFS_BITMAP_SET(mattrs, NFS_MATTR_FS_LOCATIONS);
1652 NFS_BITMAP_SET(mattrs, NFS_MATTR_MNTFLAGS);
1653 NFS_BITMAP_SET(mattrs, NFS_MATTR_MNTFROM);
1654 if (args.flags & NFSMNT_NFSV4)
1655 nfsvers = 4;
1656 else if (args.flags & NFSMNT_NFSV3)
1657 nfsvers = 3;
1658 else
1659 nfsvers = 2;
1660 if ((args.flags & NFSMNT_RSIZE) && (args.rsize > 0))
1661 NFS_BITMAP_SET(mattrs, NFS_MATTR_READ_SIZE);
1662 if ((args.flags & NFSMNT_WSIZE) && (args.wsize > 0))
1663 NFS_BITMAP_SET(mattrs, NFS_MATTR_WRITE_SIZE);
1664 if ((args.flags & NFSMNT_TIMEO) && (args.timeo > 0))
1665 NFS_BITMAP_SET(mattrs, NFS_MATTR_REQUEST_TIMEOUT);
1666 if ((args.flags & NFSMNT_RETRANS) && (args.retrans > 0))
1667 NFS_BITMAP_SET(mattrs, NFS_MATTR_SOFT_RETRY_COUNT);
1668 if ((args.flags & NFSMNT_MAXGRPS) && (args.maxgrouplist > 0))
1669 NFS_BITMAP_SET(mattrs, NFS_MATTR_MAX_GROUP_LIST);
1670 if ((args.flags & NFSMNT_READAHEAD) && (args.readahead > 0))
1671 NFS_BITMAP_SET(mattrs, NFS_MATTR_READAHEAD);
1672 if ((args.flags & NFSMNT_READDIRSIZE) && (args.readdirsize > 0))
1673 NFS_BITMAP_SET(mattrs, NFS_MATTR_READDIR_SIZE);
1674 if ((args.flags & NFSMNT_NOLOCKS) ||
1675 (args.flags & NFSMNT_LOCALLOCKS)) {
1676 NFS_BITMAP_SET(mattrs, NFS_MATTR_LOCK_MODE);
1677 if (args.flags & NFSMNT_NOLOCKS)
1678 nfslockmode = NFS_LOCK_MODE_DISABLED;
1679 else if (args.flags & NFSMNT_LOCALLOCKS)
1680 nfslockmode = NFS_LOCK_MODE_LOCAL;
1681 else
1682 nfslockmode = NFS_LOCK_MODE_ENABLED;
1683 }
1684 if (args.version >= 4) {
1685 if ((args.flags & NFSMNT_ACREGMIN) && (args.acregmin > 0))
1686 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_REG_MIN);
1687 if ((args.flags & NFSMNT_ACREGMAX) && (args.acregmax > 0))
1688 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_REG_MAX);
1689 if ((args.flags & NFSMNT_ACDIRMIN) && (args.acdirmin > 0))
1690 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MIN);
1691 if ((args.flags & NFSMNT_ACDIRMAX) && (args.acdirmax > 0))
1692 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MAX);
1693 }
1694 if (args.version >= 5) {
1695 if ((args.flags & NFSMNT_SECFLAVOR) || (args.flags & NFSMNT_SECSYSOK))
1696 NFS_BITMAP_SET(mattrs, NFS_MATTR_SECURITY);
1697 }
1698 if (args.version >= 6) {
1699 if ((args.flags & NFSMNT_DEADTIMEOUT) && (args.deadtimeout > 0))
1700 NFS_BITMAP_SET(mattrs, NFS_MATTR_DEAD_TIMEOUT);
1701 }
1702
1703 /* build xdr buffer */
1704 xb_init_buffer(&xb, NULL, 0);
1705 xb_add_32(error, &xb, args.version);
1706 argslength_offset = xb_offset(&xb);
1707 xb_add_32(error, &xb, 0); // args length
1708 xb_add_32(error, &xb, NFS_XDRARGS_VERSION_0);
1709 xb_add_bitmap(error, &xb, mattrs, NFS_MATTR_BITMAP_LEN);
1710 attrslength_offset = xb_offset(&xb);
1711 xb_add_32(error, &xb, 0); // attrs length
1712 xb_add_bitmap(error, &xb, mflags_mask, NFS_MFLAG_BITMAP_LEN); /* mask */
1713 xb_add_bitmap(error, &xb, mflags, NFS_MFLAG_BITMAP_LEN); /* value */
1714 xb_add_32(error, &xb, nfsvers);
1715 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE))
1716 xb_add_32(error, &xb, args.rsize);
1717 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE))
1718 xb_add_32(error, &xb, args.wsize);
1719 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READDIR_SIZE))
1720 xb_add_32(error, &xb, args.readdirsize);
1721 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READAHEAD))
1722 xb_add_32(error, &xb, args.readahead);
1723 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_REG_MIN)) {
1724 xb_add_32(error, &xb, args.acregmin);
1725 xb_add_32(error, &xb, 0);
1726 }
1727 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_REG_MAX)) {
1728 xb_add_32(error, &xb, args.acregmax);
1729 xb_add_32(error, &xb, 0);
1730 }
1731 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MIN)) {
1732 xb_add_32(error, &xb, args.acdirmin);
1733 xb_add_32(error, &xb, 0);
1734 }
1735 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MAX)) {
1736 xb_add_32(error, &xb, args.acdirmax);
1737 xb_add_32(error, &xb, 0);
1738 }
1739 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCK_MODE))
1740 xb_add_32(error, &xb, nfslockmode);
1741 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SECURITY)) {
1742 uint32_t flavors[2], i=0;
1743 if (args.flags & NFSMNT_SECFLAVOR)
1744 flavors[i++] = args.auth;
1745 if ((args.flags & NFSMNT_SECSYSOK) && ((i == 0) || (flavors[0] != RPCAUTH_SYS)))
1746 flavors[i++] = RPCAUTH_SYS;
1747 xb_add_word_array(error, &xb, flavors, i);
1748 }
1749 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MAX_GROUP_LIST))
1750 xb_add_32(error, &xb, args.maxgrouplist);
1751 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOCKET_TYPE))
1752 xb_add_string(error, &xb, ((args.sotype == SOCK_DGRAM) ? "udp" : "tcp"), 3);
1753 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_PORT))
1754 xb_add_32(error, &xb, ((ss.ss_family == AF_INET) ?
1755 ntohs(((struct sockaddr_in*)&ss)->sin_port) :
1756 ntohs(((struct sockaddr_in6*)&ss)->sin6_port)));
1757 /* NFS_MATTR_MOUNT_PORT (not available in old args) */
1758 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REQUEST_TIMEOUT)) {
1759 /* convert from .1s increments to time */
1760 xb_add_32(error, &xb, args.timeo/10);
1761 xb_add_32(error, &xb, (args.timeo%10)*100000000);
1762 }
1763 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOFT_RETRY_COUNT))
1764 xb_add_32(error, &xb, args.retrans);
1765 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_DEAD_TIMEOUT)) {
1766 xb_add_32(error, &xb, args.deadtimeout);
1767 xb_add_32(error, &xb, 0);
1768 }
1769 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FH))
1770 xb_add_fh(error, &xb, &nfh[0], args.fhsize);
1771 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FS_LOCATIONS)) {
1772 xb_add_32(error, &xb, 1); /* fs location count */
1773 xb_add_32(error, &xb, 1); /* server count */
1774 xb_add_string(error, &xb, mntfrom, (endserverp - mntfrom)); /* server name */
1775 xb_add_32(error, &xb, 1); /* address count */
1776 xb_add_string(error, &xb, uaddr, strlen(uaddr)); /* address */
1777 xb_add_32(error, &xb, 0); /* empty server info */
1778 xb_add_32(error, &xb, numcomps); /* pathname component count */
1779 nfsmout_if(error);
1780 p = frompath;
1781 while (*p && (*p == '/'))
1782 p++;
1783 while (*p) {
1784 cp = p;
1785 while (*p && (*p != '/'))
1786 p++;
1787 xb_add_string(error, &xb, cp, (p - cp)); /* component */
1788 nfsmout_if(error);
1789 while (*p && (*p == '/'))
1790 p++;
1791 }
1792 xb_add_32(error, &xb, 0); /* empty fsl info */
1793 }
1794 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFLAGS))
1795 xb_add_32(error, &xb, (vfs_flags(mp) & MNT_VISFLAGMASK)); /* VFS MNT_* flags */
1796 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFROM))
1797 xb_add_string(error, &xb, mntfrom, strlen(mntfrom)); /* fixed f_mntfromname */
1798 xb_build_done(error, &xb);
1799
1800 /* update opaque counts */
1801 end_offset = xb_offset(&xb);
1802 error = xb_seek(&xb, argslength_offset);
1803 xb_add_32(error, &xb, end_offset - argslength_offset + XDRWORD/*version*/);
1804 nfsmout_if(error);
1805 error = xb_seek(&xb, attrslength_offset);
1806 xb_add_32(error, &xb, end_offset - attrslength_offset - XDRWORD/*don't include length field*/);
1807
1808 if (!error) {
1809 /* grab the assembled buffer */
1810 *xdrbufp = xb_buffer_base(&xb);
1811 xb.xb_flags &= ~XB_CLEANUP;
1812 }
1813nfsmout:
1814 xb_cleanup(&xb);
1815 FREE_ZONE(mntfrom, MAXPATHLEN, M_NAMEI);
1816 return (error);
1817}
1818
1819/*
1820 * VFS Operations.
1821 *
1822 * mount system call
1823 */
1824int
1825nfs_vfs_mount(mount_t mp, vnode_t vp, user_addr_t data, vfs_context_t ctx)
1826{
1827 int error = 0, inkernel = vfs_iskernelmount(mp);
1828 uint32_t argsversion, argslength;
1829 char *xdrbuf = NULL;
1830
1831 /* read in version */
1832 if (inkernel)
1833 bcopy(CAST_DOWN(void *, data), &argsversion, sizeof(argsversion));
1834 else if ((error = copyin(data, &argsversion, sizeof(argsversion))))
1835 return (error);
1836
1837 /* If we have XDR args, then all values in the buffer are in network order */
1838 if (argsversion == htonl(NFS_ARGSVERSION_XDR))
1839 argsversion = NFS_ARGSVERSION_XDR;
1840
1841 switch (argsversion) {
1842 case 3:
1843 case 4:
1844 case 5:
1845 case 6:
1846 /* convert old-style args to xdr */
1847 error = nfs_convert_old_nfs_args(mp, data, ctx, argsversion, inkernel, &xdrbuf);
1848 break;
1849 case NFS_ARGSVERSION_XDR:
1850 /* copy in xdr buffer */
1851 if (inkernel)
1852 bcopy(CAST_DOWN(void *, (data + XDRWORD)), &argslength, XDRWORD);
1853 else
1854 error = copyin((data + XDRWORD), &argslength, XDRWORD);
1855 if (error)
1856 break;
1857 argslength = ntohl(argslength);
1858 /* put a reasonable limit on the size of the XDR args */
1859 if (argslength > 16*1024) {
1860 error = E2BIG;
1861 break;
1862 }
1863 /* allocate xdr buffer */
1864 xdrbuf = xb_malloc(xdr_rndup(argslength));
1865 if (!xdrbuf) {
1866 error = ENOMEM;
1867 break;
1868 }
1869 if (inkernel)
1870 bcopy(CAST_DOWN(void *, data), xdrbuf, argslength);
1871 else
1872 error = copyin(data, xdrbuf, argslength);
1873 break;
1874 default:
1875 error = EPROGMISMATCH;
1876 }
1877
1878 if (error) {
1879 if (xdrbuf)
1880 xb_free(xdrbuf);
1881 return (error);
1882 }
1883 error = mountnfs(xdrbuf, mp, ctx, &vp);
1884 return (error);
1885}
1886
1887/*
1888 * Common code for mount and mountroot
1889 */
1890
1891/* Set up an NFSv2/v3 mount */
1892int
1893nfs3_mount(
1894 struct nfsmount *nmp,
1895 vfs_context_t ctx,
1896 nfsnode_t *npp)
1897{
1898 int error = 0;
1899 struct nfs_vattr nvattr;
1900 u_int64_t xid;
1901
1902 *npp = NULL;
1903
1904 if (!nmp->nm_fh)
1905 return (EINVAL);
1906
1907 /*
1908 * Get file attributes for the mountpoint. These are needed
1909 * in order to properly create the root vnode.
1910 */
1911 error = nfs3_getattr_rpc(NULL, nmp->nm_mountp, nmp->nm_fh->fh_data, nmp->nm_fh->fh_len, 0,
1912 ctx, &nvattr, &xid);
1913 if (error)
1914 goto out;
1915
1916 error = nfs_nget(nmp->nm_mountp, NULL, NULL, nmp->nm_fh->fh_data, nmp->nm_fh->fh_len,
1917 &nvattr, &xid, RPCAUTH_UNKNOWN, NG_MARKROOT, npp);
1918 if (*npp)
1919 nfs_node_unlock(*npp);
1920 if (error)
1921 goto out;
1922
1923 /*
1924 * Try to make sure we have all the general info from the server.
1925 */
1926 if (nmp->nm_vers == NFS_VER2) {
1927 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXNAME);
1928 nmp->nm_fsattr.nfsa_maxname = NFS_MAXNAMLEN;
1929 } else if (nmp->nm_vers == NFS_VER3) {
1930 /* get the NFSv3 FSINFO */
1931 error = nfs3_fsinfo(nmp, *npp, ctx);
1932 if (error)
1933 goto out;
1934 /* If the server indicates all pathconf info is */
1935 /* the same, grab a copy of that info now */
1936 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_HOMOGENEOUS) &&
1937 (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_HOMOGENEOUS)) {
1938 struct nfs_fsattr nfsa;
1939 if (!nfs3_pathconf_rpc(*npp, &nfsa, ctx)) {
1940 /* cache a copy of the results */
1941 lck_mtx_lock(&nmp->nm_lock);
1942 nfs3_pathconf_cache(nmp, &nfsa);
1943 lck_mtx_unlock(&nmp->nm_lock);
1944 }
1945 }
1946 }
1947out:
1948 if (*npp && error) {
1949 vnode_put(NFSTOV(*npp));
1950 vnode_recycle(NFSTOV(*npp));
1951 *npp = NULL;
1952 }
1953 return (error);
1954}
1955
1956/*
1957 * Update an NFSv4 mount path with the contents of the symlink.
1958 *
1959 * Read the link for the given file handle.
1960 * Insert the link's components into the path.
1961 */
1962int
1963nfs4_mount_update_path_with_symlink(struct nfsmount *nmp, struct nfs_fs_path *nfsp, uint32_t curcomp, fhandle_t *dirfhp, int *depthp, fhandle_t *fhp, vfs_context_t ctx)
1964{
1965 int error = 0, status, numops;
1966 uint32_t len = 0, comp, newcomp, linkcompcount;
1967 u_int64_t xid;
1968 struct nfsm_chain nmreq, nmrep;
1969 struct nfsreq rq, *req = &rq;
1970 struct nfsreq_secinfo_args si;
1971 char *link = NULL, *p, *q, ch;
1972 struct nfs_fs_path nfsp2;
1973
1974 bzero(&nfsp2, sizeof(nfsp2));
1975 if (dirfhp->fh_len)
1976 NFSREQ_SECINFO_SET(&si, NULL, dirfhp->fh_data, dirfhp->fh_len, nfsp->np_components[curcomp], 0);
1977 else
1978 NFSREQ_SECINFO_SET(&si, NULL, NULL, 0, nfsp->np_components[curcomp], 0);
1979 nfsm_chain_null(&nmreq);
1980 nfsm_chain_null(&nmrep);
1981
1982 MALLOC_ZONE(link, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
1983 if (!link)
1984 error = ENOMEM;
1985
1986 // PUTFH, READLINK
1987 numops = 2;
1988 nfsm_chain_build_alloc_init(error, &nmreq, 12 * NFSX_UNSIGNED);
1989 nfsm_chain_add_compound_header(error, &nmreq, "readlink", nmp->nm_minor_vers, numops);
1990 numops--;
1991 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1992 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, fhp->fh_data, fhp->fh_len);
1993 numops--;
1994 nfsm_chain_add_32(error, &nmreq, NFS_OP_READLINK);
1995 nfsm_chain_build_done(error, &nmreq);
1996 nfsm_assert(error, (numops == 0), EPROTO);
1997 nfsmout_if(error);
1998
1999 error = nfs_request_async(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND,
2000 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
2001 if (!error)
2002 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
2003
2004 nfsm_chain_skip_tag(error, &nmrep);
2005 nfsm_chain_get_32(error, &nmrep, numops);
2006 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
2007 nfsm_chain_op_check(error, &nmrep, NFS_OP_READLINK);
2008 nfsm_chain_get_32(error, &nmrep, len);
2009 nfsmout_if(error);
2010 if (len == 0)
2011 error = ENOENT;
2012 else if (len >= MAXPATHLEN)
2013 len = MAXPATHLEN - 1;
2014 nfsm_chain_get_opaque(error, &nmrep, len, link);
2015 nfsmout_if(error);
2016 /* make sure link string is terminated properly */
2017 link[len] = '\0';
2018
2019 /* count the number of components in link */
2020 p = link;
2021 while (*p && (*p == '/'))
2022 p++;
2023 linkcompcount = 0;
2024 while (*p) {
2025 linkcompcount++;
2026 while (*p && (*p != '/'))
2027 p++;
2028 while (*p && (*p == '/'))
2029 p++;
2030 }
2031
2032 /* free up used components */
2033 for (comp=0; comp <= curcomp; comp++) {
2034 if (nfsp->np_components[comp]) {
2035 FREE(nfsp->np_components[comp], M_TEMP);
2036 nfsp->np_components[comp] = NULL;
2037 }
2038 }
2039
2040 /* set up new path */
2041 nfsp2.np_compcount = nfsp->np_compcount - curcomp - 1 + linkcompcount;
2042 MALLOC(nfsp2.np_components, char **, nfsp2.np_compcount*sizeof(char*), M_TEMP, M_WAITOK|M_ZERO);
2043 if (!nfsp2.np_components) {
2044 error = ENOMEM;
2045 goto nfsmout;
2046 }
2047
2048 /* add link components */
2049 p = link;
2050 while (*p && (*p == '/'))
2051 p++;
2052 for (newcomp=0; newcomp < linkcompcount; newcomp++) {
2053 /* find end of component */
2054 q = p;
2055 while (*q && (*q != '/'))
2056 q++;
2057 MALLOC(nfsp2.np_components[newcomp], char *, q-p+1, M_TEMP, M_WAITOK|M_ZERO);
2058 if (!nfsp2.np_components[newcomp]) {
2059 error = ENOMEM;
2060 break;
2061 }
2062 ch = *q;
2063 *q = '\0';
2064 strlcpy(nfsp2.np_components[newcomp], p, q-p+1);
2065 *q = ch;
2066 p = q;
2067 while (*p && (*p == '/'))
2068 p++;
2069 }
2070 nfsmout_if(error);
2071
2072 /* add remaining components */
2073 for(comp = curcomp + 1; comp < nfsp->np_compcount; comp++,newcomp++) {
2074 nfsp2.np_components[newcomp] = nfsp->np_components[comp];
2075 nfsp->np_components[comp] = NULL;
2076 }
2077
2078 /* move new path into place */
2079 FREE(nfsp->np_components, M_TEMP);
2080 nfsp->np_components = nfsp2.np_components;
2081 nfsp->np_compcount = nfsp2.np_compcount;
2082 nfsp2.np_components = NULL;
2083
2084 /* for absolute link, let the caller now that the next dirfh is root */
2085 if (link[0] == '/') {
2086 dirfhp->fh_len = 0;
2087 *depthp = 0;
2088 }
2089nfsmout:
2090 if (link)
2091 FREE_ZONE(link, MAXPATHLEN, M_NAMEI);
2092 if (nfsp2.np_components) {
2093 for (comp=0; comp < nfsp2.np_compcount; comp++)
2094 if (nfsp2.np_components[comp])
2095 FREE(nfsp2.np_components[comp], M_TEMP);
2096 FREE(nfsp2.np_components, M_TEMP);
2097 }
2098 nfsm_chain_cleanup(&nmreq);
2099 nfsm_chain_cleanup(&nmrep);
2100 return (error);
2101}
2102
2103/* Set up an NFSv4 mount */
2104int
2105nfs4_mount(
2106 struct nfsmount *nmp,
2107 vfs_context_t ctx,
2108 nfsnode_t *npp)
2109{
2110 struct nfsm_chain nmreq, nmrep;
2111 int error = 0, numops, status, interval, isdotdot, loopcnt = 0, depth = 0;
2112 struct nfs_fs_path fspath, *nfsp, fspath2;
2113 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], comp, comp2;
2114 fhandle_t fh, dirfh;
2115 struct nfs_vattr nvattr;
2116 u_int64_t xid;
2117 struct nfsreq rq, *req = &rq;
2118 struct nfsreq_secinfo_args si;
2119 struct nfs_sec sec;
2120 struct nfs_fs_locations nfsls;
2121
2122 *npp = NULL;
2123 fh.fh_len = dirfh.fh_len = 0;
2124 TAILQ_INIT(&nmp->nm_open_owners);
2125 TAILQ_INIT(&nmp->nm_delegations);
2126 TAILQ_INIT(&nmp->nm_dreturnq);
2127 nmp->nm_stategenid = 1;
2128 NVATTR_INIT(&nvattr);
2129 bzero(&nfsls, sizeof(nfsls));
2130 nfsm_chain_null(&nmreq);
2131 nfsm_chain_null(&nmrep);
2132
2133 /*
2134 * If no security flavors were specified we'll want to default to the server's
2135 * preferred flavor. For NFSv4.0 we need a file handle and name to get that via
2136 * SECINFO, so we'll do that on the last component of the server path we are
2137 * mounting. If we are mounting the server's root, we'll need to defer the
2138 * SECINFO call to the first successful LOOKUP request.
2139 */
2140 if (!nmp->nm_sec.count)
2141 nmp->nm_state |= NFSSTA_NEEDSECINFO;
2142
2143 /* make a copy of the current location's path */
2144 nfsp = &nmp->nm_locations.nl_locations[nmp->nm_locations.nl_current.nli_loc]->nl_path;
2145 bzero(&fspath, sizeof(fspath));
2146 fspath.np_compcount = nfsp->np_compcount;
2147 if (fspath.np_compcount > 0) {
2148 MALLOC(fspath.np_components, char **, fspath.np_compcount*sizeof(char*), M_TEMP, M_WAITOK|M_ZERO);
2149 if (!fspath.np_components) {
2150 error = ENOMEM;
2151 goto nfsmout;
2152 }
2153 for (comp=0; comp < nfsp->np_compcount; comp++) {
2154 int slen = strlen(nfsp->np_components[comp]);
2155 MALLOC(fspath.np_components[comp], char *, slen+1, M_TEMP, M_WAITOK|M_ZERO);
2156 if (!fspath.np_components[comp]) {
2157 error = ENOMEM;
2158 break;
2159 }
2160 strlcpy(fspath.np_components[comp], nfsp->np_components[comp], slen+1);
2161 }
2162 if (error)
2163 goto nfsmout;
2164 }
2165
2166 /* for mirror mounts, we can just use the file handle passed in */
2167 if (nmp->nm_fh) {
2168 dirfh.fh_len = nmp->nm_fh->fh_len;
2169 bcopy(nmp->nm_fh->fh_data, dirfh.fh_data, dirfh.fh_len);
2170 NFSREQ_SECINFO_SET(&si, NULL, dirfh.fh_data, dirfh.fh_len, NULL, 0);
2171 goto gotfh;
2172 }
2173
2174 /* otherwise, we need to get the fh for the directory we are mounting */
2175
2176 /* if no components, just get root */
2177 if (fspath.np_compcount == 0) {
2178nocomponents:
2179 // PUTROOTFH + GETATTR(FH)
2180 NFSREQ_SECINFO_SET(&si, NULL, NULL, 0, NULL, 0);
2181 numops = 2;
2182 nfsm_chain_build_alloc_init(error, &nmreq, 9 * NFSX_UNSIGNED);
2183 nfsm_chain_add_compound_header(error, &nmreq, "mount", nmp->nm_minor_vers, numops);
2184 numops--;
2185 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTROOTFH);
2186 numops--;
2187 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
2188 NFS_CLEAR_ATTRIBUTES(bitmap);
2189 NFS4_DEFAULT_ATTRIBUTES(bitmap);
2190 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
2191 nfsm_chain_add_bitmap(error, &nmreq, bitmap, NFS_ATTR_BITMAP_LEN);
2192 nfsm_chain_build_done(error, &nmreq);
2193 nfsm_assert(error, (numops == 0), EPROTO);
2194 nfsmout_if(error);
2195 error = nfs_request_async(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND,
2196 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
2197 if (!error)
2198 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
2199 nfsm_chain_skip_tag(error, &nmrep);
2200 nfsm_chain_get_32(error, &nmrep, numops);
2201 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTROOTFH);
2202 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
2203 nfsmout_if(error);
2204 NFS_CLEAR_ATTRIBUTES(nmp->nm_fsattr.nfsa_bitmap);
2205 error = nfs4_parsefattr(&nmrep, &nmp->nm_fsattr, &nvattr, &dirfh, NULL, NULL);
2206 if (!error && !NFS_BITMAP_ISSET(&nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
2207 printf("nfs: mount didn't return filehandle?\n");
2208 error = EBADRPC;
2209 }
2210 nfsmout_if(error);
2211 nfsm_chain_cleanup(&nmrep);
2212 nfsm_chain_null(&nmreq);
2213 NVATTR_CLEANUP(&nvattr);
2214 goto gotfh;
2215 }
2216
2217 /* look up each path component */
2218 for (comp=0; comp < fspath.np_compcount; ) {
2219 isdotdot = 0;
2220 if (fspath.np_components[comp][0] == '.') {
2221 if (fspath.np_components[comp][1] == '\0') {
2222 /* skip "." */
2223 comp++;
2224 continue;
2225 }
2226 /* treat ".." specially */
2227 if ((fspath.np_components[comp][1] == '.') &&
2228 (fspath.np_components[comp][2] == '\0'))
2229 isdotdot = 1;
2230 if (isdotdot && (dirfh.fh_len == 0)) {
2231 /* ".." in root directory is same as "." */
2232 comp++;
2233 continue;
2234 }
2235 }
2236 // PUT(ROOT)FH + LOOKUP(P) + GETFH + GETATTR
2237 if (dirfh.fh_len == 0)
2238 NFSREQ_SECINFO_SET(&si, NULL, NULL, 0, isdotdot ? NULL : fspath.np_components[comp], 0);
2239 else
2240 NFSREQ_SECINFO_SET(&si, NULL, dirfh.fh_data, dirfh.fh_len, isdotdot ? NULL : fspath.np_components[comp], 0);
2241 numops = 4;
2242 nfsm_chain_build_alloc_init(error, &nmreq, 18 * NFSX_UNSIGNED);
2243 nfsm_chain_add_compound_header(error, &nmreq, "mount", nmp->nm_minor_vers, numops);
2244 numops--;
2245 if (dirfh.fh_len) {
2246 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
2247 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, dirfh.fh_data, dirfh.fh_len);
2248 } else {
2249 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTROOTFH);
2250 }
2251 numops--;
2252 if (isdotdot) {
2253 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUPP);
2254 } else {
2255 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
2256 nfsm_chain_add_name(error, &nmreq,
2257 fspath.np_components[comp], strlen(fspath.np_components[comp]), nmp);
2258 }
2259 numops--;
2260 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETFH);
2261 numops--;
2262 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
2263 NFS_CLEAR_ATTRIBUTES(bitmap);
2264 NFS4_DEFAULT_ATTRIBUTES(bitmap);
2265 /* if no namedattr support or component is ".zfs", clear NFS_FATTR_NAMED_ATTR */
2266 if (NMFLAG(nmp, NONAMEDATTR) || !strcmp(fspath.np_components[comp], ".zfs"))
2267 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
2268 nfsm_chain_add_bitmap(error, &nmreq, bitmap, NFS_ATTR_BITMAP_LEN);
2269 nfsm_chain_build_done(error, &nmreq);
2270 nfsm_assert(error, (numops == 0), EPROTO);
2271 nfsmout_if(error);
2272 error = nfs_request_async(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND,
2273 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
2274 if (!error)
2275 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
2276 nfsm_chain_skip_tag(error, &nmrep);
2277 nfsm_chain_get_32(error, &nmrep, numops);
2278 nfsm_chain_op_check(error, &nmrep, dirfh.fh_len ? NFS_OP_PUTFH : NFS_OP_PUTROOTFH);
2279 nfsm_chain_op_check(error, &nmrep, isdotdot ? NFS_OP_LOOKUPP : NFS_OP_LOOKUP);
2280 nfsmout_if(error);
2281 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH);
2282 nfsm_chain_get_32(error, &nmrep, fh.fh_len);
2283 nfsm_chain_get_opaque(error, &nmrep, fh.fh_len, fh.fh_data);
2284 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
2285 if (!error) {
2286 NFS_CLEAR_ATTRIBUTES(nmp->nm_fsattr.nfsa_bitmap);
2287 error = nfs4_parsefattr(&nmrep, &nmp->nm_fsattr, &nvattr, NULL, NULL, &nfsls);
2288 }
2289 nfsm_chain_cleanup(&nmrep);
2290 nfsm_chain_null(&nmreq);
2291 if (error) {
2292 /* LOOKUP succeeded but GETATTR failed? This could be a referral. */
2293 /* Try the lookup again with a getattr for fs_locations. */
2294 nfs_fs_locations_cleanup(&nfsls);
2295 error = nfs4_get_fs_locations(nmp, NULL, dirfh.fh_data, dirfh.fh_len, fspath.np_components[comp], ctx, &nfsls);
2296 if (!error && (nfsls.nl_numlocs < 1))
2297 error = ENOENT;
2298 nfsmout_if(error);
2299 if (++loopcnt > MAXSYMLINKS) {
2300 /* too many symlink/referral redirections */
2301 error = ELOOP;
2302 goto nfsmout;
2303 }
2304 /* tear down the current connection */
2305 nfs_disconnect(nmp);
2306 /* replace fs locations */
2307 nfs_fs_locations_cleanup(&nmp->nm_locations);
2308 nmp->nm_locations = nfsls;
2309 bzero(&nfsls, sizeof(nfsls));
2310 /* initiate a connection using the new fs locations */
2311 error = nfs_mount_connect(nmp);
2312 if (!error && !(nmp->nm_locations.nl_current.nli_flags & NLI_VALID))
2313 error = EIO;
2314 nfsmout_if(error);
2315 /* add new server's remote path to beginning of our path and continue */
2316 nfsp = &nmp->nm_locations.nl_locations[nmp->nm_locations.nl_current.nli_loc]->nl_path;
2317 bzero(&fspath2, sizeof(fspath2));
2318 fspath2.np_compcount = (fspath.np_compcount - comp - 1) + nfsp->np_compcount;
2319 if (fspath2.np_compcount > 0) {
2320 MALLOC(fspath2.np_components, char **, fspath2.np_compcount*sizeof(char*), M_TEMP, M_WAITOK|M_ZERO);
2321 if (!fspath2.np_components) {
2322 error = ENOMEM;
2323 goto nfsmout;
2324 }
2325 for (comp2=0; comp2 < nfsp->np_compcount; comp2++) {
2326 int slen = strlen(nfsp->np_components[comp2]);
2327 MALLOC(fspath2.np_components[comp2], char *, slen+1, M_TEMP, M_WAITOK|M_ZERO);
2328 if (!fspath2.np_components[comp2]) {
2329 /* clean up fspath2, then error out */
2330 while (comp2 > 0) {
2331 comp2--;
2332 FREE(fspath2.np_components[comp2], M_TEMP);
2333 }
2334 FREE(fspath2.np_components, M_TEMP);
2335 error = ENOMEM;
2336 goto nfsmout;
2337 }
2338 strlcpy(fspath2.np_components[comp2], nfsp->np_components[comp2], slen+1);
2339 }
2340 if ((fspath.np_compcount - comp - 1) > 0)
2341 bcopy(&fspath.np_components[comp+1], &fspath2.np_components[nfsp->np_compcount], (fspath.np_compcount - comp - 1)*sizeof(char*));
2342 /* free up unused parts of old path (prior components and component array) */
2343 do {
2344 FREE(fspath.np_components[comp], M_TEMP);
2345 } while (comp-- > 0);
2346 FREE(fspath.np_components, M_TEMP);
2347 /* put new path in place */
2348 fspath = fspath2;
2349 }
2350 /* reset dirfh and component index */
2351 dirfh.fh_len = 0;
2352 comp = 0;
2353 NVATTR_CLEANUP(&nvattr);
2354 if (fspath.np_compcount == 0)
2355 goto nocomponents;
2356 continue;
2357 }
2358 nfsmout_if(error);
2359 /* if file handle is for a symlink, then update the path with the symlink contents */
2360 if (NFS_BITMAP_ISSET(&nvattr.nva_bitmap, NFS_FATTR_TYPE) && (nvattr.nva_type == VLNK)) {
2361 if (++loopcnt > MAXSYMLINKS)
2362 error = ELOOP;
2363 else
2364 error = nfs4_mount_update_path_with_symlink(nmp, &fspath, comp, &dirfh, &depth, &fh, ctx);
2365 nfsmout_if(error);
2366 /* directory file handle is either left the same or reset to root (if link was absolute) */
2367 /* path traversal starts at beginning of the path again */
2368 comp = 0;
2369 NVATTR_CLEANUP(&nvattr);
2370 nfs_fs_locations_cleanup(&nfsls);
2371 continue;
2372 }
2373 NVATTR_CLEANUP(&nvattr);
2374 nfs_fs_locations_cleanup(&nfsls);
2375 /* not a symlink... */
2376 if ((nmp->nm_state & NFSSTA_NEEDSECINFO) && (comp == (fspath.np_compcount-1)) && !isdotdot) {
2377 /* need to get SECINFO for the directory being mounted */
2378 if (dirfh.fh_len == 0)
2379 NFSREQ_SECINFO_SET(&si, NULL, NULL, 0, isdotdot ? NULL : fspath.np_components[comp], 0);
2380 else
2381 NFSREQ_SECINFO_SET(&si, NULL, dirfh.fh_data, dirfh.fh_len, isdotdot ? NULL : fspath.np_components[comp], 0);
2382 sec.count = NX_MAX_SEC_FLAVORS;
2383 error = nfs4_secinfo_rpc(nmp, &si, vfs_context_ucred(ctx), sec.flavors, &sec.count);
2384 /* [sigh] some implementations return "illegal" error for unsupported ops */
2385 if (error == NFSERR_OP_ILLEGAL)
2386 error = 0;
2387 nfsmout_if(error);
2388 /* set our default security flavor to the first in the list */
2389 if (sec.count)
2390 nmp->nm_auth = sec.flavors[0];
2391 nmp->nm_state &= ~NFSSTA_NEEDSECINFO;
2392 }
2393 /* advance directory file handle, component index, & update depth */
2394 dirfh = fh;
2395 comp++;
2396 if (!isdotdot) /* going down the hierarchy */
2397 depth++;
2398 else if (--depth <= 0) /* going up the hierarchy */
2399 dirfh.fh_len = 0; /* clear dirfh when we hit root */
2400 }
2401
2402gotfh:
2403 /* get attrs for mount point root */
2404 numops = NMFLAG(nmp, NONAMEDATTR) ? 2 : 3; // PUTFH + GETATTR + OPENATTR
2405 nfsm_chain_build_alloc_init(error, &nmreq, 25 * NFSX_UNSIGNED);
2406 nfsm_chain_add_compound_header(error, &nmreq, "mount", nmp->nm_minor_vers, numops);
2407 numops--;
2408 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
2409 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, dirfh.fh_data, dirfh.fh_len);
2410 numops--;
2411 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
2412 NFS_CLEAR_ATTRIBUTES(bitmap);
2413 NFS4_DEFAULT_ATTRIBUTES(bitmap);
2414 /* if no namedattr support or last component is ".zfs", clear NFS_FATTR_NAMED_ATTR */
2415 if (NMFLAG(nmp, NONAMEDATTR) || ((fspath.np_compcount > 0) && !strcmp(fspath.np_components[fspath.np_compcount-1], ".zfs")))
2416 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
2417 nfsm_chain_add_bitmap(error, &nmreq, bitmap, NFS_ATTR_BITMAP_LEN);
2418 if (!NMFLAG(nmp, NONAMEDATTR)) {
2419 numops--;
2420 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
2421 nfsm_chain_add_32(error, &nmreq, 0);
2422 }
2423 nfsm_chain_build_done(error, &nmreq);
2424 nfsm_assert(error, (numops == 0), EPROTO);
2425 nfsmout_if(error);
2426 error = nfs_request_async(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND,
2427 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
2428 if (!error)
2429 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
2430 nfsm_chain_skip_tag(error, &nmrep);
2431 nfsm_chain_get_32(error, &nmrep, numops);
2432 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
2433 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
2434 nfsmout_if(error);
2435 NFS_CLEAR_ATTRIBUTES(nmp->nm_fsattr.nfsa_bitmap);
2436 error = nfs4_parsefattr(&nmrep, &nmp->nm_fsattr, &nvattr, NULL, NULL, NULL);
2437 nfsmout_if(error);
2438 if (!NMFLAG(nmp, NONAMEDATTR)) {
2439 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
2440 if (error == ENOENT)
2441 error = 0;
2442 /* [sigh] some implementations return "illegal" error for unsupported ops */
2443 if (error || !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_NAMED_ATTR)) {
2444 nmp->nm_fsattr.nfsa_flags &= ~NFS_FSFLAG_NAMED_ATTR;
2445 } else {
2446 nmp->nm_fsattr.nfsa_flags |= NFS_FSFLAG_NAMED_ATTR;
2447 }
2448 } else {
2449 nmp->nm_fsattr.nfsa_flags &= ~NFS_FSFLAG_NAMED_ATTR;
2450 }
2451 if (NMFLAG(nmp, NOACL)) /* make sure ACL support is turned off */
2452 nmp->nm_fsattr.nfsa_flags &= ~NFS_FSFLAG_ACL;
2453 if (NMFLAG(nmp, ACLONLY) && !(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL))
2454 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_ACLONLY);
2455 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_FH_EXPIRE_TYPE)) {
2456 uint32_t fhtype = ((nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_FHTYPE_MASK) >> NFS_FSFLAG_FHTYPE_SHIFT);
2457 if (fhtype != NFS_FH_PERSISTENT)
2458 printf("nfs: warning: non-persistent file handles! for %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
2459 }
2460
2461 /* make sure it's a directory */
2462 if (!NFS_BITMAP_ISSET(&nvattr.nva_bitmap, NFS_FATTR_TYPE) || (nvattr.nva_type != VDIR)) {
2463 error = ENOTDIR;
2464 goto nfsmout;
2465 }
2466
2467 /* save the NFS fsid */
2468 nmp->nm_fsid = nvattr.nva_fsid;
2469
2470 /* create the root node */
2471 error = nfs_nget(nmp->nm_mountp, NULL, NULL, dirfh.fh_data, dirfh.fh_len, &nvattr, &xid, rq.r_auth, NG_MARKROOT, npp);
2472 nfsmout_if(error);
2473
2474 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL)
2475 vfs_setextendedsecurity(nmp->nm_mountp);
2476
2477 /* adjust I/O sizes to server limits */
2478 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXREAD) && (nmp->nm_fsattr.nfsa_maxread > 0)) {
2479 if (nmp->nm_fsattr.nfsa_maxread < (uint64_t)nmp->nm_rsize) {
2480 nmp->nm_rsize = nmp->nm_fsattr.nfsa_maxread & ~(NFS_FABLKSIZE - 1);
2481 if (nmp->nm_rsize == 0)
2482 nmp->nm_rsize = nmp->nm_fsattr.nfsa_maxread;
2483 }
2484 }
2485 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXWRITE) && (nmp->nm_fsattr.nfsa_maxwrite > 0)) {
2486 if (nmp->nm_fsattr.nfsa_maxwrite < (uint64_t)nmp->nm_wsize) {
2487 nmp->nm_wsize = nmp->nm_fsattr.nfsa_maxwrite & ~(NFS_FABLKSIZE - 1);
2488 if (nmp->nm_wsize == 0)
2489 nmp->nm_wsize = nmp->nm_fsattr.nfsa_maxwrite;
2490 }
2491 }
2492
2493 /* set up lease renew timer */
2494 nmp->nm_renew_timer = thread_call_allocate(nfs4_renew_timer, nmp);
2495 interval = nmp->nm_fsattr.nfsa_lease / 2;
2496 if (interval < 1)
2497 interval = 1;
2498 nfs_interval_timer_start(nmp->nm_renew_timer, interval * 1000);
2499
2500nfsmout:
2501 if (fspath.np_components) {
2502 for (comp=0; comp < fspath.np_compcount; comp++)
2503 if (fspath.np_components[comp])
2504 FREE(fspath.np_components[comp], M_TEMP);
2505 FREE(fspath.np_components, M_TEMP);
2506 }
2507 NVATTR_CLEANUP(&nvattr);
2508 nfs_fs_locations_cleanup(&nfsls);
2509 if (*npp)
2510 nfs_node_unlock(*npp);
2511 nfsm_chain_cleanup(&nmreq);
2512 nfsm_chain_cleanup(&nmrep);
2513 return (error);
2514}
2515
2516/*
2517 * Thread to handle initial NFS mount connection.
2518 */
2519void
2520nfs_mount_connect_thread(void *arg, __unused wait_result_t wr)
2521{
2522 struct nfsmount *nmp = arg;
2523 int error = 0, savederror = 0, slpflag = (NMFLAG(nmp, INTR) ? PCATCH : 0);
2524 int done = 0, timeo, tries, maxtries;
2525
2526 if (NM_OMFLAG(nmp, MNTQUICK)) {
2527 timeo = 8;
2528 maxtries = 1;
2529 } else {
2530 timeo = 30;
2531 maxtries = 2;
2532 }
2533
2534 for (tries = 0; tries < maxtries; tries++) {
2535 error = nfs_connect(nmp, 1, timeo);
2536 switch (error) {
2537 case ETIMEDOUT:
2538 case EAGAIN:
2539 case EPIPE:
2540 case EADDRNOTAVAIL:
2541 case ENETDOWN:
2542 case ENETUNREACH:
2543 case ENETRESET:
2544 case ECONNABORTED:
2545 case ECONNRESET:
2546 case EISCONN:
2547 case ENOTCONN:
2548 case ESHUTDOWN:
2549 case ECONNREFUSED:
2550 case EHOSTDOWN:
2551 case EHOSTUNREACH:
2552 /* just keep retrying on any of these errors */
2553 break;
2554 case 0:
2555 default:
2556 /* looks like we got an answer... */
2557 done = 1;
2558 break;
2559 }
2560
2561 /* save the best error */
2562 if (nfs_connect_error_class(error) >= nfs_connect_error_class(savederror))
2563 savederror = error;
2564 if (done) {
2565 error = savederror;
2566 break;
2567 }
2568
2569 /* pause before next attempt */
2570 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 0)))
2571 break;
2572 error = tsleep(nmp, PSOCK|slpflag, "nfs_mount_connect_retry", 2*hz);
2573 if (error && (error != EWOULDBLOCK))
2574 break;
2575 error = savederror;
2576 }
2577
2578 /* update status of mount connect */
2579 lck_mtx_lock(&nmp->nm_lock);
2580 if (!nmp->nm_mounterror)
2581 nmp->nm_mounterror = error;
2582 nmp->nm_state &= ~NFSSTA_MOUNT_THREAD;
2583 lck_mtx_unlock(&nmp->nm_lock);
2584 wakeup(&nmp->nm_nss);
2585}
2586
2587int
2588nfs_mount_connect(struct nfsmount *nmp)
2589{
2590 int error = 0, slpflag;
2591 thread_t thd;
2592 struct timespec ts = { 2, 0 };
2593
2594 /*
2595 * Set up the socket. Perform initial search for a location/server/address to
2596 * connect to and negotiate any unspecified mount parameters. This work is
2597 * done on a kernel thread to satisfy reserved port usage needs.
2598 */
2599 slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
2600 lck_mtx_lock(&nmp->nm_lock);
2601 /* set flag that the thread is running */
2602 nmp->nm_state |= NFSSTA_MOUNT_THREAD;
2603 if (kernel_thread_start(nfs_mount_connect_thread, nmp, &thd) != KERN_SUCCESS) {
2604 nmp->nm_state &= ~NFSSTA_MOUNT_THREAD;
2605 nmp->nm_mounterror = EIO;
2606 printf("nfs mount %s start socket connect thread failed\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
2607 } else {
2608 thread_deallocate(thd);
2609 }
2610
2611 /* wait until mount connect thread is finished/gone */
2612 while (nmp->nm_state & NFSSTA_MOUNT_THREAD) {
2613 error = msleep(&nmp->nm_nss, &nmp->nm_lock, slpflag|PSOCK, "nfsconnectthread", &ts);
2614 if ((error && (error != EWOULDBLOCK)) || ((error = nfs_sigintr(nmp, NULL, current_thread(), 1)))) {
2615 /* record error */
2616 if (!nmp->nm_mounterror)
2617 nmp->nm_mounterror = error;
2618 /* signal the thread that we are aborting */
2619 nmp->nm_sockflags |= NMSOCK_UNMOUNT;
2620 if (nmp->nm_nss)
2621 wakeup(nmp->nm_nss);
2622 /* and continue waiting on it to finish */
2623 slpflag = 0;
2624 }
2625 }
2626 lck_mtx_unlock(&nmp->nm_lock);
2627
2628 /* grab mount connect status */
2629 error = nmp->nm_mounterror;
2630
2631 return (error);
2632}
2633
2634/* Table of maximum minor version for a given version */
2635uint32_t maxminorverstab[] = {
2636 0, /* Version 0 (does not exist) */
2637 0, /* Version 1 (does not exist) */
2638 0, /* Version 2 */
2639 0, /* Version 3 */
2640 0, /* Version 4 */
2641};
2642
2643#define NFS_MAX_SUPPORTED_VERSION ((long)(sizeof (maxminorverstab) / sizeof (uint32_t) - 1))
2644#define NFS_MAX_SUPPORTED_MINOR_VERSION(v) ((long)(maxminorverstab[(v)]))
2645
2646#define DEFAULT_NFS_MIN_VERS VER2PVER(2, 0)
2647#define DEFAULT_NFS_MAX_VERS VER2PVER(3, 0)
2648
2649/*
2650 * Common code to mount an NFS file system.
2651 */
2652int
2653mountnfs(
2654 char *xdrbuf,
2655 mount_t mp,
2656 vfs_context_t ctx,
2657 vnode_t *vpp)
2658{
2659 struct nfsmount *nmp;
2660 nfsnode_t np;
2661 int error = 0;
2662 struct vfsstatfs *sbp;
2663 struct xdrbuf xb;
2664 uint32_t i, val, maxio, iosize, len;
2665 uint32_t *mattrs;
2666 uint32_t *mflags_mask;
2667 uint32_t *mflags;
2668 uint32_t argslength, attrslength;
2669 struct nfs_location_index firstloc = { NLI_VALID, 0, 0, 0 };
2670
2671 /* make sure mbuf constants are set up */
2672 if (!nfs_mbuf_mhlen)
2673 nfs_mbuf_init();
2674
2675 if (vfs_flags(mp) & MNT_UPDATE) {
2676 nmp = VFSTONFS(mp);
2677 /* update paths, file handles, etc, here XXX */
2678 xb_free(xdrbuf);
2679 return (0);
2680 } else {
2681 /* allocate an NFS mount structure for this mount */
2682 MALLOC_ZONE(nmp, struct nfsmount *,
2683 sizeof (struct nfsmount), M_NFSMNT, M_WAITOK);
2684 if (!nmp) {
2685 xb_free(xdrbuf);
2686 return (ENOMEM);
2687 }
2688 bzero((caddr_t)nmp, sizeof (struct nfsmount));
2689 lck_mtx_init(&nmp->nm_lock, nfs_mount_grp, LCK_ATTR_NULL);
2690 TAILQ_INIT(&nmp->nm_resendq);
2691 TAILQ_INIT(&nmp->nm_iodq);
2692 TAILQ_INIT(&nmp->nm_gsscl);
2693 LIST_INIT(&nmp->nm_monlist);
2694 vfs_setfsprivate(mp, nmp);
2695 vfs_getnewfsid(mp);
2696 nmp->nm_mountp = mp;
2697 vfs_setauthopaque(mp);
2698
2699 nfs_nhinit_finish();
2700
2701 nmp->nm_args = xdrbuf;
2702
2703 /* set up defaults */
2704 nmp->nm_ref = 0;
2705 nmp->nm_vers = 0;
2706 nmp->nm_min_vers = DEFAULT_NFS_MIN_VERS;
2707 nmp->nm_max_vers = DEFAULT_NFS_MAX_VERS;
2708 nmp->nm_timeo = NFS_TIMEO;
2709 nmp->nm_retry = NFS_RETRANS;
2710 nmp->nm_sotype = 0;
2711 nmp->nm_sofamily = 0;
2712 nmp->nm_nfsport = 0;
2713 nmp->nm_wsize = NFS_WSIZE;
2714 nmp->nm_rsize = NFS_RSIZE;
2715 nmp->nm_readdirsize = NFS_READDIRSIZE;
2716 nmp->nm_numgrps = NFS_MAXGRPS;
2717 nmp->nm_readahead = NFS_DEFRAHEAD;
2718 nmp->nm_tprintf_delay = nfs_tprintf_delay;
2719 if (nmp->nm_tprintf_delay < 0)
2720 nmp->nm_tprintf_delay = 0;
2721 nmp->nm_tprintf_initial_delay = nfs_tprintf_initial_delay;
2722 if (nmp->nm_tprintf_initial_delay < 0)
2723 nmp->nm_tprintf_initial_delay = 0;
2724 nmp->nm_acregmin = NFS_MINATTRTIMO;
2725 nmp->nm_acregmax = NFS_MAXATTRTIMO;
2726 nmp->nm_acdirmin = NFS_MINDIRATTRTIMO;
2727 nmp->nm_acdirmax = NFS_MAXDIRATTRTIMO;
2728 nmp->nm_auth = RPCAUTH_SYS;
2729 nmp->nm_iodlink.tqe_next = NFSNOLIST;
2730 nmp->nm_deadtimeout = 0;
2731 nmp->nm_curdeadtimeout = 0;
2732 NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_NOACL);
2733 nmp->nm_realm = NULL;
2734 nmp->nm_principal = NULL;
2735 nmp->nm_sprinc = NULL;
2736 }
2737
2738 mattrs = nmp->nm_mattrs;
2739 mflags = nmp->nm_mflags;
2740 mflags_mask = nmp->nm_mflags_mask;
2741
2742 /* set up NFS mount with args */
2743 xb_init_buffer(&xb, xdrbuf, 2*XDRWORD);
2744 xb_get_32(error, &xb, val); /* version */
2745 xb_get_32(error, &xb, argslength); /* args length */
2746 nfsmerr_if(error);
2747 xb_init_buffer(&xb, xdrbuf, argslength); /* restart parsing with actual buffer length */
2748 xb_get_32(error, &xb, val); /* version */
2749 xb_get_32(error, &xb, argslength); /* args length */
2750 xb_get_32(error, &xb, val); /* XDR args version */
2751 if (val != NFS_XDRARGS_VERSION_0)
2752 error = EINVAL;
2753 len = NFS_MATTR_BITMAP_LEN;
2754 xb_get_bitmap(error, &xb, mattrs, len); /* mount attribute bitmap */
2755 attrslength = 0;
2756 xb_get_32(error, &xb, attrslength); /* attrs length */
2757 if (!error && (attrslength > (argslength - ((4+NFS_MATTR_BITMAP_LEN+1)*XDRWORD))))
2758 error = EINVAL;
2759 nfsmerr_if(error);
2760 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FLAGS)) {
2761 len = NFS_MFLAG_BITMAP_LEN;
2762 xb_get_bitmap(error, &xb, mflags_mask, len); /* mount flag mask */
2763 len = NFS_MFLAG_BITMAP_LEN;
2764 xb_get_bitmap(error, &xb, mflags, len); /* mount flag values */
2765 if (!error) {
2766 /* clear all mask bits and OR in all the ones that are set */
2767 nmp->nm_flags[0] &= ~mflags_mask[0];
2768 nmp->nm_flags[0] |= (mflags_mask[0] & mflags[0]);
2769 }
2770 }
2771 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION)) {
2772 /* Can't specify a single version and a range */
2773 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION_RANGE))
2774 error = EINVAL;
2775 xb_get_32(error, &xb, nmp->nm_vers);
2776 if (nmp->nm_vers > NFS_MAX_SUPPORTED_VERSION ||
2777 nmp->nm_vers < NFS_VER2)
2778 error = EINVAL;
2779 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_MINOR_VERSION))
2780 xb_get_32(error, &xb, nmp->nm_minor_vers);
2781 else
2782 nmp->nm_minor_vers = maxminorverstab[nmp->nm_vers];
2783 if (nmp->nm_minor_vers > maxminorverstab[nmp->nm_vers])
2784 error = EINVAL;
2785 nmp->nm_max_vers = nmp->nm_min_vers =
2786 VER2PVER(nmp->nm_vers, nmp->nm_minor_vers);
2787 }
2788 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_MINOR_VERSION)) {
2789 /* should have also gotten NFS version (and already gotten minor version) */
2790 if (!NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION))
2791 error = EINVAL;
2792 }
2793 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION_RANGE)) {
2794 xb_get_32(error, &xb, nmp->nm_min_vers);
2795 xb_get_32(error, &xb, nmp->nm_max_vers);
2796 if ((nmp->nm_min_vers > nmp->nm_max_vers) ||
2797 (PVER2MAJOR(nmp->nm_max_vers) > NFS_MAX_SUPPORTED_VERSION) ||
2798 (PVER2MINOR(nmp->nm_min_vers) > maxminorverstab[PVER2MAJOR(nmp->nm_min_vers)]) ||
2799 (PVER2MINOR(nmp->nm_max_vers) > maxminorverstab[PVER2MAJOR(nmp->nm_max_vers)]))
2800 error = EINVAL;
2801 }
2802 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE))
2803 xb_get_32(error, &xb, nmp->nm_rsize);
2804 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE))
2805 xb_get_32(error, &xb, nmp->nm_wsize);
2806 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READDIR_SIZE))
2807 xb_get_32(error, &xb, nmp->nm_readdirsize);
2808 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READAHEAD))
2809 xb_get_32(error, &xb, nmp->nm_readahead);
2810 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_REG_MIN)) {
2811 xb_get_32(error, &xb, nmp->nm_acregmin);
2812 xb_skip(error, &xb, XDRWORD);
2813 }
2814 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_REG_MAX)) {
2815 xb_get_32(error, &xb, nmp->nm_acregmax);
2816 xb_skip(error, &xb, XDRWORD);
2817 }
2818 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MIN)) {
2819 xb_get_32(error, &xb, nmp->nm_acdirmin);
2820 xb_skip(error, &xb, XDRWORD);
2821 }
2822 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MAX)) {
2823 xb_get_32(error, &xb, nmp->nm_acdirmax);
2824 xb_skip(error, &xb, XDRWORD);
2825 }
2826 nfsmerr_if(error);
2827 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCK_MODE)) {
2828 xb_get_32(error, &xb, val);
2829 switch (val) {
2830 case NFS_LOCK_MODE_DISABLED:
2831 case NFS_LOCK_MODE_LOCAL:
2832 if (nmp->nm_vers >= NFS_VER4) {
2833 /* disabled/local lock mode only allowed on v2/v3 */
2834 error = EINVAL;
2835 break;
2836 }
2837 /* FALLTHROUGH */
2838 case NFS_LOCK_MODE_ENABLED:
2839 nmp->nm_lockmode = val;
2840 break;
2841 default:
2842 error = EINVAL;
2843 }
2844 }
2845 nfsmerr_if(error);
2846 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SECURITY)) {
2847 uint32_t seccnt;
2848 xb_get_32(error, &xb, seccnt);
2849 if (!error && ((seccnt < 1) || (seccnt > NX_MAX_SEC_FLAVORS)))
2850 error = EINVAL;
2851 nfsmerr_if(error);
2852 nmp->nm_sec.count = seccnt;
2853 for (i=0; i < seccnt; i++) {
2854 xb_get_32(error, &xb, nmp->nm_sec.flavors[i]);
2855 /* Check for valid security flavor */
2856 switch (nmp->nm_sec.flavors[i]) {
2857 case RPCAUTH_NONE:
2858 case RPCAUTH_SYS:
2859 case RPCAUTH_KRB5:
2860 case RPCAUTH_KRB5I:
2861 case RPCAUTH_KRB5P:
2862 break;
2863 default:
2864 error = EINVAL;
2865 }
2866 }
2867 /* start with the first flavor */
2868 nmp->nm_auth = nmp->nm_sec.flavors[0];
2869 }
2870 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MAX_GROUP_LIST))
2871 xb_get_32(error, &xb, nmp->nm_numgrps);
2872 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOCKET_TYPE)) {
2873 char sotype[6];
2874
2875 xb_get_32(error, &xb, val);
2876 if (!error && ((val < 3) || (val > 5)))
2877 error = EINVAL;
2878 nfsmerr_if(error);
2879 error = xb_get_bytes(&xb, sotype, val, 0);
2880 nfsmerr_if(error);
2881 sotype[val] = '\0';
2882 if (!strcmp(sotype, "tcp")) {
2883 nmp->nm_sotype = SOCK_STREAM;
2884 } else if (!strcmp(sotype, "udp")) {
2885 nmp->nm_sotype = SOCK_DGRAM;
2886 } else if (!strcmp(sotype, "tcp4")) {
2887 nmp->nm_sotype = SOCK_STREAM;
2888 nmp->nm_sofamily = AF_INET;
2889 } else if (!strcmp(sotype, "udp4")) {
2890 nmp->nm_sotype = SOCK_DGRAM;
2891 nmp->nm_sofamily = AF_INET;
2892 } else if (!strcmp(sotype, "tcp6")) {
2893 nmp->nm_sotype = SOCK_STREAM;
2894 nmp->nm_sofamily = AF_INET6;
2895 } else if (!strcmp(sotype, "udp6")) {
2896 nmp->nm_sotype = SOCK_DGRAM;
2897 nmp->nm_sofamily = AF_INET6;
2898 } else if (!strcmp(sotype, "inet4")) {
2899 nmp->nm_sofamily = AF_INET;
2900 } else if (!strcmp(sotype, "inet6")) {
2901 nmp->nm_sofamily = AF_INET6;
2902 } else if (!strcmp(sotype, "inet")) {
2903 nmp->nm_sofamily = 0; /* ok */
2904 } else {
2905 error = EINVAL;
2906 }
2907 if (!error && (nmp->nm_vers >= NFS_VER4) && nmp->nm_sotype &&
2908 (nmp->nm_sotype != SOCK_STREAM))
2909 error = EINVAL; /* NFSv4 is only allowed over TCP. */
2910 nfsmerr_if(error);
2911 }
2912 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_PORT))
2913 xb_get_32(error, &xb, nmp->nm_nfsport);
2914 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MOUNT_PORT))
2915 xb_get_32(error, &xb, nmp->nm_mountport);
2916 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REQUEST_TIMEOUT)) {
2917 /* convert from time to 0.1s units */
2918 xb_get_32(error, &xb, nmp->nm_timeo);
2919 xb_get_32(error, &xb, val);
2920 nfsmerr_if(error);
2921 if (val >= 1000000000)
2922 error = EINVAL;
2923 nfsmerr_if(error);
2924 nmp->nm_timeo *= 10;
2925 nmp->nm_timeo += (val+100000000-1)/100000000;
2926 /* now convert to ticks */
2927 nmp->nm_timeo = (nmp->nm_timeo * NFS_HZ + 5) / 10;
2928 }
2929 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOFT_RETRY_COUNT)) {
2930 xb_get_32(error, &xb, val);
2931 if (!error && (val > 1))
2932 nmp->nm_retry = val;
2933 }
2934 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_DEAD_TIMEOUT)) {
2935 xb_get_32(error, &xb, nmp->nm_deadtimeout);
2936 xb_skip(error, &xb, XDRWORD);
2937 }
2938 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FH)) {
2939 nfsmerr_if(error);
2940 MALLOC(nmp->nm_fh, fhandle_t *, sizeof(fhandle_t), M_TEMP, M_WAITOK|M_ZERO);
2941 if (!nmp->nm_fh)
2942 error = ENOMEM;
2943 xb_get_32(error, &xb, nmp->nm_fh->fh_len);
2944 nfsmerr_if(error);
2945 if (nmp->nm_fh->fh_len < 0 ||
2946 (size_t)nmp->nm_fh->fh_len > sizeof(nmp->nm_fh->fh_data))
2947 error = EINVAL;
2948 else
2949 error = xb_get_bytes(&xb, (char*)&nmp->nm_fh->fh_data[0], nmp->nm_fh->fh_len, 0);
2950 }
2951 nfsmerr_if(error);
2952 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FS_LOCATIONS)) {
2953 uint32_t loc, serv, addr, comp;
2954 struct nfs_fs_location *fsl;
2955 struct nfs_fs_server *fss;
2956 struct nfs_fs_path *fsp;
2957
2958 xb_get_32(error, &xb, nmp->nm_locations.nl_numlocs); /* fs location count */
2959 /* sanity check location count */
2960 if (!error && ((nmp->nm_locations.nl_numlocs < 1) || (nmp->nm_locations.nl_numlocs > 256)))
2961 error = EINVAL;
2962 nfsmerr_if(error);
2963 MALLOC(nmp->nm_locations.nl_locations, struct nfs_fs_location **, nmp->nm_locations.nl_numlocs * sizeof(struct nfs_fs_location*), M_TEMP, M_WAITOK|M_ZERO);
2964 if (!nmp->nm_locations.nl_locations)
2965 error = ENOMEM;
2966 for (loc = 0; loc < nmp->nm_locations.nl_numlocs; loc++) {
2967 nfsmerr_if(error);
2968 MALLOC(fsl, struct nfs_fs_location *, sizeof(struct nfs_fs_location), M_TEMP, M_WAITOK|M_ZERO);
2969 if (!fsl)
2970 error = ENOMEM;
2971 nmp->nm_locations.nl_locations[loc] = fsl;
2972 xb_get_32(error, &xb, fsl->nl_servcount); /* server count */
2973 /* sanity check server count */
2974 if (!error && ((fsl->nl_servcount < 1) || (fsl->nl_servcount > 256)))
2975 error = EINVAL;
2976 nfsmerr_if(error);
2977 MALLOC(fsl->nl_servers, struct nfs_fs_server **, fsl->nl_servcount * sizeof(struct nfs_fs_server*), M_TEMP, M_WAITOK|M_ZERO);
2978 if (!fsl->nl_servers)
2979 error = ENOMEM;
2980 for (serv = 0; serv < fsl->nl_servcount; serv++) {
2981 nfsmerr_if(error);
2982 MALLOC(fss, struct nfs_fs_server *, sizeof(struct nfs_fs_server), M_TEMP, M_WAITOK|M_ZERO);
2983 if (!fss)
2984 error = ENOMEM;
2985 fsl->nl_servers[serv] = fss;
2986 xb_get_32(error, &xb, val); /* server name length */
2987 /* sanity check server name length */
2988 if (!error && ((val < 1) || (val > MAXPATHLEN)))
2989 error = EINVAL;
2990 nfsmerr_if(error);
2991 MALLOC(fss->ns_name, char *, val+1, M_TEMP, M_WAITOK|M_ZERO);
2992 if (!fss->ns_name)
2993 error = ENOMEM;
2994 nfsmerr_if(error);
2995 error = xb_get_bytes(&xb, fss->ns_name, val, 0); /* server name */
2996 xb_get_32(error, &xb, fss->ns_addrcount); /* address count */
2997 /* sanity check address count (OK to be zero) */
2998 if (!error && (fss->ns_addrcount > 256))
2999 error = EINVAL;
3000 nfsmerr_if(error);
3001 if (fss->ns_addrcount > 0) {
3002 MALLOC(fss->ns_addresses, char **, fss->ns_addrcount * sizeof(char *), M_TEMP, M_WAITOK|M_ZERO);
3003 if (!fss->ns_addresses)
3004 error = ENOMEM;
3005 for (addr = 0; addr < fss->ns_addrcount; addr++) {
3006 xb_get_32(error, &xb, val); /* address length */
3007 /* sanity check address length */
3008 if (!error && ((val < 1) || (val > 128)))
3009 error = EINVAL;
3010 nfsmerr_if(error);
3011 MALLOC(fss->ns_addresses[addr], char *, val+1, M_TEMP, M_WAITOK|M_ZERO);
3012 if (!fss->ns_addresses[addr])
3013 error = ENOMEM;
3014 nfsmerr_if(error);
3015 error = xb_get_bytes(&xb, fss->ns_addresses[addr], val, 0); /* address */
3016 }
3017 }
3018 xb_get_32(error, &xb, val); /* server info length */
3019 xb_skip(error, &xb, val); /* skip server info */
3020 }
3021 /* get pathname */
3022 fsp = &fsl->nl_path;
3023 xb_get_32(error, &xb, fsp->np_compcount); /* component count */
3024 /* sanity check component count */
3025 if (!error && (fsp->np_compcount > MAXPATHLEN))
3026 error = EINVAL;
3027 nfsmerr_if(error);
3028 if (fsp->np_compcount) {
3029 MALLOC(fsp->np_components, char **, fsp->np_compcount * sizeof(char*), M_TEMP, M_WAITOK|M_ZERO);
3030 if (!fsp->np_components)
3031 error = ENOMEM;
3032 }
3033 for (comp = 0; comp < fsp->np_compcount; comp++) {
3034 xb_get_32(error, &xb, val); /* component length */
3035 /* sanity check component length */
3036 if (!error && (val == 0)) {
3037 /*
3038 * Apparently some people think a path with zero components should
3039 * be encoded with one zero-length component. So, just ignore any
3040 * zero length components.
3041 */
3042 comp--;
3043 fsp->np_compcount--;
3044 if (fsp->np_compcount == 0) {
3045 FREE(fsp->np_components, M_TEMP);
3046 fsp->np_components = NULL;
3047 }
3048 continue;
3049 }
3050 if (!error && ((val < 1) || (val > MAXPATHLEN)))
3051 error = EINVAL;
3052 nfsmerr_if(error);
3053 MALLOC(fsp->np_components[comp], char *, val+1, M_TEMP, M_WAITOK|M_ZERO);
3054 if (!fsp->np_components[comp])
3055 error = ENOMEM;
3056 nfsmerr_if(error);
3057 error = xb_get_bytes(&xb, fsp->np_components[comp], val, 0); /* component */
3058 }
3059 xb_get_32(error, &xb, val); /* fs location info length */
3060 xb_skip(error, &xb, val); /* skip fs location info */
3061 }
3062 }
3063 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFLAGS))
3064 xb_skip(error, &xb, XDRWORD);
3065 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFROM)) {
3066 xb_get_32(error, &xb, len);
3067 nfsmerr_if(error);
3068 val = len;
3069 if (val >= sizeof(vfs_statfs(mp)->f_mntfromname))
3070 val = sizeof(vfs_statfs(mp)->f_mntfromname) - 1;
3071 error = xb_get_bytes(&xb, vfs_statfs(mp)->f_mntfromname, val, 0);
3072 if ((len - val) > 0)
3073 xb_skip(error, &xb, len - val);
3074 nfsmerr_if(error);
3075 vfs_statfs(mp)->f_mntfromname[val] = '\0';
3076 }
3077 nfsmerr_if(error);
3078
3079 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REALM)) {
3080 xb_get_32(error, &xb, len);
3081 if (!error && ((len < 1) || (len > MAXPATHLEN)))
3082 error=EINVAL;
3083 nfsmerr_if(error);
3084 /* allocate an extra byte for a leading '@' if its not already prepended to the realm */
3085 MALLOC(nmp->nm_realm, char *, len+2, M_TEMP, M_WAITOK|M_ZERO);
3086 if (!nmp->nm_realm)
3087 error = ENOMEM;
3088 nfsmerr_if(error);
3089 error = xb_get_bytes(&xb, nmp->nm_realm, len, 0);
3090 if (error == 0 && *nmp->nm_realm != '@') {
3091 bcopy(nmp->nm_realm, &nmp->nm_realm[1], len);
3092 nmp->nm_realm[0] = '@';
3093 }
3094 }
3095 nfsmerr_if(error);
3096
3097 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_PRINCIPAL)) {
3098 xb_get_32(error, &xb, len);
3099 if (!error && ((len < 1) || (len > MAXPATHLEN)))
3100 error=EINVAL;
3101 nfsmerr_if(error);
3102 MALLOC(nmp->nm_principal, char *, len+1, M_TEMP, M_WAITOK|M_ZERO);
3103 if (!nmp->nm_principal)
3104 error = ENOMEM;
3105 nfsmerr_if(error);
3106 error = xb_get_bytes(&xb, nmp->nm_principal, len, 0);
3107 }
3108 nfsmerr_if(error);
3109
3110 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SVCPRINCIPAL)) {
3111 xb_get_32(error, &xb, len);
3112 if (!error && ((len < 1) || (len > MAXPATHLEN)))
3113 error=EINVAL;
3114 nfsmerr_if(error);
3115 MALLOC(nmp->nm_sprinc, char *, len+1, M_TEMP, M_WAITOK|M_ZERO);
3116 if (!nmp->nm_sprinc)
3117 error = ENOMEM;
3118 nfsmerr_if(error);
3119 error = xb_get_bytes(&xb, nmp->nm_sprinc, len, 0);
3120 }
3121 nfsmerr_if(error);
3122
3123 /*
3124 * Sanity check/finalize settings.
3125 */
3126
3127 if (nmp->nm_timeo < NFS_MINTIMEO)
3128 nmp->nm_timeo = NFS_MINTIMEO;
3129 else if (nmp->nm_timeo > NFS_MAXTIMEO)
3130 nmp->nm_timeo = NFS_MAXTIMEO;
3131 if (nmp->nm_retry > NFS_MAXREXMIT)
3132 nmp->nm_retry = NFS_MAXREXMIT;
3133
3134 if (nmp->nm_numgrps > NFS_MAXGRPS)
3135 nmp->nm_numgrps = NFS_MAXGRPS;
3136 if (nmp->nm_readahead > NFS_MAXRAHEAD)
3137 nmp->nm_readahead = NFS_MAXRAHEAD;
3138 if (nmp->nm_acregmin > nmp->nm_acregmax)
3139 nmp->nm_acregmin = nmp->nm_acregmax;
3140 if (nmp->nm_acdirmin > nmp->nm_acdirmax)
3141 nmp->nm_acdirmin = nmp->nm_acdirmax;
3142
3143 /* need at least one fs location */
3144 if (nmp->nm_locations.nl_numlocs < 1)
3145 error = EINVAL;
3146 nfsmerr_if(error);
3147
3148 /* init mount's mntfromname to first location */
3149 if (!NM_OMATTR_GIVEN(nmp, MNTFROM))
3150 nfs_location_mntfromname(&nmp->nm_locations, firstloc,
3151 vfs_statfs(mp)->f_mntfromname, sizeof(vfs_statfs(mp)->f_mntfromname), 0);
3152
3153 /* Need to save the mounting credential for v4. */
3154 nmp->nm_mcred = vfs_context_ucred(ctx);
3155 if (IS_VALID_CRED(nmp->nm_mcred))
3156 kauth_cred_ref(nmp->nm_mcred);
3157
3158 /*
3159 * If a reserved port is required, check for that privilege.
3160 * (Note that mirror mounts are exempt because the privilege was
3161 * already checked for the original mount.)
3162 */
3163 if (NMFLAG(nmp, RESVPORT) && !vfs_iskernelmount(mp))
3164 error = priv_check_cred(nmp->nm_mcred, PRIV_NETINET_RESERVEDPORT, 0);
3165 nfsmerr_if(error);
3166
3167 /* do mount's initial socket connection */
3168 error = nfs_mount_connect(nmp);
3169 nfsmerr_if(error);
3170
3171 /* set up the version-specific function tables */
3172 if (nmp->nm_vers < NFS_VER4)
3173 nmp->nm_funcs = &nfs3_funcs;
3174 else
3175 nmp->nm_funcs = &nfs4_funcs;
3176
3177 /* sanity check settings now that version/connection is set */
3178 if (nmp->nm_vers == NFS_VER2) /* ignore RDIRPLUS on NFSv2 */
3179 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_RDIRPLUS);
3180 if (nmp->nm_vers >= NFS_VER4) {
3181 if (NFS_BITMAP_ISSET(nmp->nm_flags, NFS_MFLAG_ACLONLY)) /* aclonly trumps noacl */
3182 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_NOACL);
3183 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_CALLUMNT);
3184 if (nmp->nm_lockmode != NFS_LOCK_MODE_ENABLED)
3185 error = EINVAL; /* disabled/local lock mode only allowed on v2/v3 */
3186 } else {
3187 /* ignore these if not v4 */
3188 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_NOCALLBACK);
3189 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_NONAMEDATTR);
3190 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_NOACL);
3191 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_ACLONLY);
3192 }
3193 nfsmerr_if(error);
3194
3195 if (nmp->nm_sotype == SOCK_DGRAM) {
3196 /* I/O size defaults for UDP are different */
3197 if (!NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE))
3198 nmp->nm_rsize = NFS_DGRAM_RSIZE;
3199 if (!NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE))
3200 nmp->nm_wsize = NFS_DGRAM_WSIZE;
3201 }
3202
3203 /* round down I/O sizes to multiple of NFS_FABLKSIZE */
3204 nmp->nm_rsize &= ~(NFS_FABLKSIZE - 1);
3205 if (nmp->nm_rsize <= 0)
3206 nmp->nm_rsize = NFS_FABLKSIZE;
3207 nmp->nm_wsize &= ~(NFS_FABLKSIZE - 1);
3208 if (nmp->nm_wsize <= 0)
3209 nmp->nm_wsize = NFS_FABLKSIZE;
3210
3211 /* and limit I/O sizes to maximum allowed */
3212 maxio = (nmp->nm_vers == NFS_VER2) ? NFS_V2MAXDATA :
3213 (nmp->nm_sotype == SOCK_DGRAM) ? NFS_MAXDGRAMDATA : NFS_MAXDATA;
3214 if (maxio > NFS_MAXBSIZE)
3215 maxio = NFS_MAXBSIZE;
3216 if (nmp->nm_rsize > maxio)
3217 nmp->nm_rsize = maxio;
3218 if (nmp->nm_wsize > maxio)
3219 nmp->nm_wsize = maxio;
3220
3221 if (nmp->nm_readdirsize > maxio)
3222 nmp->nm_readdirsize = maxio;
3223 if (nmp->nm_readdirsize > nmp->nm_rsize)
3224 nmp->nm_readdirsize = nmp->nm_rsize;
3225
3226 /* Set up the sockets and related info */
3227 if (nmp->nm_sotype == SOCK_DGRAM)
3228 TAILQ_INIT(&nmp->nm_cwndq);
3229
3230 /*
3231 * Get the root node/attributes from the NFS server and
3232 * do any basic, version-specific setup.
3233 */
3234 error = nmp->nm_funcs->nf_mount(nmp, ctx, &np);
3235 nfsmerr_if(error);
3236
3237 /*
3238 * A reference count is needed on the node representing the
3239 * remote root. If this object is not persistent, then backward
3240 * traversals of the mount point (i.e. "..") will not work if
3241 * the node gets flushed out of the cache.
3242 */
3243 nmp->nm_dnp = np;
3244 *vpp = NFSTOV(np);
3245 /* get usecount and drop iocount */
3246 error = vnode_ref(*vpp);
3247 vnode_put(*vpp);
3248 if (error) {
3249 vnode_recycle(*vpp);
3250 goto nfsmerr;
3251 }
3252
3253 /*
3254 * Do statfs to ensure static info gets set to reasonable values.
3255 */
3256 if ((error = nmp->nm_funcs->nf_update_statfs(nmp, ctx))) {
3257 int error2 = vnode_getwithref(*vpp);
3258 vnode_rele(*vpp);
3259 if (!error2)
3260 vnode_put(*vpp);
3261 vnode_recycle(*vpp);
3262 goto nfsmerr;
3263 }
3264 sbp = vfs_statfs(mp);
3265 sbp->f_bsize = nmp->nm_fsattr.nfsa_bsize;
3266 sbp->f_blocks = nmp->nm_fsattr.nfsa_space_total / sbp->f_bsize;
3267 sbp->f_bfree = nmp->nm_fsattr.nfsa_space_free / sbp->f_bsize;
3268 sbp->f_bavail = nmp->nm_fsattr.nfsa_space_avail / sbp->f_bsize;
3269 sbp->f_bused = (nmp->nm_fsattr.nfsa_space_total / sbp->f_bsize) -
3270 (nmp->nm_fsattr.nfsa_space_free / sbp->f_bsize);
3271 sbp->f_files = nmp->nm_fsattr.nfsa_files_total;
3272 sbp->f_ffree = nmp->nm_fsattr.nfsa_files_free;
3273 sbp->f_iosize = nfs_iosize;
3274
3275 /*
3276 * Calculate the size used for I/O buffers. Use the larger
3277 * of the two sizes to minimise NFS requests but make sure
3278 * that it is at least one VM page to avoid wasting buffer
3279 * space and to allow easy mmapping of I/O buffers.
3280 * The read/write RPC calls handle the splitting up of
3281 * buffers into multiple requests if the buffer size is
3282 * larger than the I/O size.
3283 */
3284 iosize = max(nmp->nm_rsize, nmp->nm_wsize);
3285 if (iosize < PAGE_SIZE)
3286 iosize = PAGE_SIZE;
3287 nmp->nm_biosize = trunc_page_32(iosize);
3288
3289 /* For NFSv3 and greater, there is a (relatively) reliable ACCESS call. */
3290 if (nmp->nm_vers > NFS_VER2)
3291 vfs_setauthopaqueaccess(mp);
3292
3293 switch (nmp->nm_lockmode) {
3294 case NFS_LOCK_MODE_DISABLED:
3295 break;
3296 case NFS_LOCK_MODE_LOCAL:
3297 vfs_setlocklocal(nmp->nm_mountp);
3298 break;
3299 case NFS_LOCK_MODE_ENABLED:
3300 default:
3301 if (nmp->nm_vers <= NFS_VER3)
3302 nfs_lockd_mount_register(nmp);
3303 break;
3304 }
3305
3306 /* success! */
3307 lck_mtx_lock(&nmp->nm_lock);
3308 nmp->nm_state |= NFSSTA_MOUNTED;
3309 lck_mtx_unlock(&nmp->nm_lock);
3310 return (0);
3311nfsmerr:
3312 nfs_mount_cleanup(nmp);
3313 return (error);
3314}
3315
3316#if CONFIG_TRIGGERS
3317
3318/*
3319 * We've detected a file system boundary on the server and
3320 * need to mount a new file system so that our file systems
3321 * MIRROR the file systems on the server.
3322 *
3323 * Build the mount arguments for the new mount and call kernel_mount().
3324 */
3325int
3326nfs_mirror_mount_domount(vnode_t dvp, vnode_t vp, vfs_context_t ctx)
3327{
3328 nfsnode_t np = VTONFS(vp);
3329 nfsnode_t dnp = VTONFS(dvp);
3330 struct nfsmount *nmp = NFSTONMP(np);
3331 char fstype[MFSTYPENAMELEN], *mntfromname = NULL, *path = NULL, *relpath, *p, *cp;
3332 int error = 0, pathbuflen = MAXPATHLEN, i, mntflags = 0, referral, skipcopy = 0;
3333 size_t nlen;
3334 struct xdrbuf xb, xbnew;
3335 uint32_t mattrs[NFS_MATTR_BITMAP_LEN];
3336 uint32_t newmattrs[NFS_MATTR_BITMAP_LEN];
3337 uint32_t newmflags[NFS_MFLAG_BITMAP_LEN];
3338 uint32_t newmflags_mask[NFS_MFLAG_BITMAP_LEN];
3339 uint32_t argslength = 0, val, count, mlen, mlen2, rlen, relpathcomps;
3340 uint32_t argslength_offset, attrslength_offset, end_offset;
3341 uint32_t numlocs, loc, numserv, serv, numaddr, addr, numcomp, comp;
3342 char buf[XDRWORD];
3343 struct nfs_fs_locations nfsls;
3344
3345 referral = (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL);
3346 if (referral)
3347 bzero(&nfsls, sizeof(nfsls));
3348
3349 xb_init(&xbnew, 0);
3350
3351 if (!nmp || (nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)))
3352 return (ENXIO);
3353
3354 /* allocate a couple path buffers we need */
3355 MALLOC_ZONE(mntfromname, char *, pathbuflen, M_NAMEI, M_WAITOK);
3356 if (!mntfromname) {
3357 error = ENOMEM;
3358 goto nfsmerr;
3359 }
3360 MALLOC_ZONE(path, char *, pathbuflen, M_NAMEI, M_WAITOK);
3361 if (!path) {
3362 error = ENOMEM;
3363 goto nfsmerr;
3364 }
3365
3366 /* get the path for the directory being mounted on */
3367 error = vn_getpath(vp, path, &pathbuflen);
3368 if (error) {
3369 error = ENOMEM;
3370 goto nfsmerr;
3371 }
3372
3373 /*
3374 * Set up the mntfromname for the new mount based on the
3375 * current mount's mntfromname and the directory's path
3376 * relative to the current mount's mntonname.
3377 * Set up relpath to point at the relative path on the current mount.
3378 * Also, count the number of components in relpath.
3379 * We'll be adding those to each fs location path in the new args.
3380 */
3381 nlen = strlcpy(mntfromname, vfs_statfs(nmp->nm_mountp)->f_mntfromname, MAXPATHLEN);
3382 if ((nlen > 0) && (mntfromname[nlen-1] == '/')) { /* avoid double '/' in new name */
3383 mntfromname[nlen-1] = '\0';
3384 nlen--;
3385 }
3386 relpath = mntfromname + nlen;
3387 nlen = strlcat(mntfromname, path + strlen(vfs_statfs(nmp->nm_mountp)->f_mntonname), MAXPATHLEN);
3388 if (nlen >= MAXPATHLEN) {
3389 error = ENAMETOOLONG;
3390 goto nfsmerr;
3391 }
3392 /* count the number of components in relpath */
3393 p = relpath;
3394 while (*p && (*p == '/'))
3395 p++;
3396 relpathcomps = 0;
3397 while (*p) {
3398 relpathcomps++;
3399 while (*p && (*p != '/'))
3400 p++;
3401 while (*p && (*p == '/'))
3402 p++;
3403 }
3404
3405 /* grab a copy of the file system type */
3406 vfs_name(vnode_mount(vp), fstype);
3407
3408 /* for referrals, fetch the fs locations */
3409 if (referral) {
3410 const char *vname = vnode_getname(NFSTOV(np));
3411 if (!vname) {
3412 error = ENOENT;
3413 } else {
3414 error = nfs4_get_fs_locations(nmp, dnp, NULL, 0, vname, ctx, &nfsls);
3415 vnode_putname(vname);
3416 if (!error && (nfsls.nl_numlocs < 1))
3417 error = ENOENT;
3418 }
3419 nfsmerr_if(error);
3420 }
3421
3422 /* set up NFS mount args based on current mount args */
3423
3424#define xb_copy_32(E, XBSRC, XBDST, V) \
3425 do { \
3426 if (E) break; \
3427 xb_get_32((E), (XBSRC), (V)); \
3428 if (skipcopy) break; \
3429 xb_add_32((E), (XBDST), (V)); \
3430 } while (0)
3431#define xb_copy_opaque(E, XBSRC, XBDST) \
3432 do { \
3433 uint32_t __count, __val; \
3434 xb_copy_32((E), (XBSRC), (XBDST), __count); \
3435 if (E) break; \
3436 __count = nfsm_rndup(__count); \
3437 __count /= XDRWORD; \
3438 while (__count-- > 0) \
3439 xb_copy_32((E), (XBSRC), (XBDST), __val); \
3440 } while (0)
3441
3442 xb_init_buffer(&xb, nmp->nm_args, 2*XDRWORD);
3443 xb_get_32(error, &xb, val); /* version */
3444 xb_get_32(error, &xb, argslength); /* args length */
3445 xb_init_buffer(&xb, nmp->nm_args, argslength);
3446
3447 xb_init_buffer(&xbnew, NULL, 0);
3448 xb_copy_32(error, &xb, &xbnew, val); /* version */
3449 argslength_offset = xb_offset(&xbnew);
3450 xb_copy_32(error, &xb, &xbnew, val); /* args length */
3451 xb_copy_32(error, &xb, &xbnew, val); /* XDR args version */
3452 count = NFS_MATTR_BITMAP_LEN;
3453 xb_get_bitmap(error, &xb, mattrs, count); /* mount attribute bitmap */
3454 nfsmerr_if(error);
3455 for (i = 0; i < NFS_MATTR_BITMAP_LEN; i++)
3456 newmattrs[i] = mattrs[i];
3457 if (referral)
3458 NFS_BITMAP_SET(newmattrs, NFS_MATTR_FS_LOCATIONS);
3459 else
3460 NFS_BITMAP_SET(newmattrs, NFS_MATTR_FH);
3461 NFS_BITMAP_SET(newmattrs, NFS_MATTR_FLAGS);
3462 NFS_BITMAP_SET(newmattrs, NFS_MATTR_MNTFLAGS);
3463 NFS_BITMAP_CLR(newmattrs, NFS_MATTR_MNTFROM);
3464 xb_add_bitmap(error, &xbnew, newmattrs, NFS_MATTR_BITMAP_LEN);
3465 attrslength_offset = xb_offset(&xbnew);
3466 xb_copy_32(error, &xb, &xbnew, val); /* attrs length */
3467 NFS_BITMAP_ZERO(newmflags_mask, NFS_MFLAG_BITMAP_LEN);
3468 NFS_BITMAP_ZERO(newmflags, NFS_MFLAG_BITMAP_LEN);
3469 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FLAGS)) {
3470 count = NFS_MFLAG_BITMAP_LEN;
3471 xb_get_bitmap(error, &xb, newmflags_mask, count); /* mount flag mask bitmap */
3472 count = NFS_MFLAG_BITMAP_LEN;
3473 xb_get_bitmap(error, &xb, newmflags, count); /* mount flag bitmap */
3474 }
3475 NFS_BITMAP_SET(newmflags_mask, NFS_MFLAG_EPHEMERAL);
3476 NFS_BITMAP_SET(newmflags, NFS_MFLAG_EPHEMERAL);
3477 xb_add_bitmap(error, &xbnew, newmflags_mask, NFS_MFLAG_BITMAP_LEN);
3478 xb_add_bitmap(error, &xbnew, newmflags, NFS_MFLAG_BITMAP_LEN);
3479 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION))
3480 xb_copy_32(error, &xb, &xbnew, val);
3481 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_MINOR_VERSION))
3482 xb_copy_32(error, &xb, &xbnew, val);
3483 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION_RANGE)) {
3484 xb_copy_32(error, &xb, &xbnew, val);
3485 xb_copy_32(error, &xb, &xbnew, val);
3486 }
3487 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE))
3488 xb_copy_32(error, &xb, &xbnew, val);
3489 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE))
3490 xb_copy_32(error, &xb, &xbnew, val);
3491 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READDIR_SIZE))
3492 xb_copy_32(error, &xb, &xbnew, val);
3493 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READAHEAD))
3494 xb_copy_32(error, &xb, &xbnew, val);
3495 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_REG_MIN)) {
3496 xb_copy_32(error, &xb, &xbnew, val);
3497 xb_copy_32(error, &xb, &xbnew, val);
3498 }
3499 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_REG_MAX)) {
3500 xb_copy_32(error, &xb, &xbnew, val);
3501 xb_copy_32(error, &xb, &xbnew, val);
3502 }
3503 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MIN)) {
3504 xb_copy_32(error, &xb, &xbnew, val);
3505 xb_copy_32(error, &xb, &xbnew, val);
3506 }
3507 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MAX)) {
3508 xb_copy_32(error, &xb, &xbnew, val);
3509 xb_copy_32(error, &xb, &xbnew, val);
3510 }
3511 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCK_MODE))
3512 xb_copy_32(error, &xb, &xbnew, val);
3513 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SECURITY)) {
3514 xb_copy_32(error, &xb, &xbnew, count);
3515 while (!error && (count-- > 0))
3516 xb_copy_32(error, &xb, &xbnew, val);
3517 }
3518 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MAX_GROUP_LIST))
3519 xb_copy_32(error, &xb, &xbnew, val);
3520 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOCKET_TYPE))
3521 xb_copy_opaque(error, &xb, &xbnew);
3522 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_PORT))
3523 xb_copy_32(error, &xb, &xbnew, val);
3524 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MOUNT_PORT))
3525 xb_copy_32(error, &xb, &xbnew, val);
3526 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REQUEST_TIMEOUT)) {
3527 xb_copy_32(error, &xb, &xbnew, val);
3528 xb_copy_32(error, &xb, &xbnew, val);
3529 }
3530 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOFT_RETRY_COUNT))
3531 xb_copy_32(error, &xb, &xbnew, val);
3532 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_DEAD_TIMEOUT)) {
3533 xb_copy_32(error, &xb, &xbnew, val);
3534 xb_copy_32(error, &xb, &xbnew, val);
3535 }
3536 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FH)) {
3537 xb_get_32(error, &xb, count);
3538 xb_skip(error, &xb, count);
3539 }
3540 if (!referral) {
3541 /* set the initial file handle to the directory's file handle */
3542 xb_add_fh(error, &xbnew, np->n_fhp, np->n_fhsize);
3543 }
3544 /* copy/extend/skip fs locations */
3545 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FS_LOCATIONS)) {
3546 numlocs = numserv = numaddr = numcomp = 0;
3547 if (referral) /* don't copy the fs locations for a referral */
3548 skipcopy = 1;
3549 xb_copy_32(error, &xb, &xbnew, numlocs); /* location count */
3550 for (loc = 0; !error && (loc < numlocs); loc++) {
3551 xb_copy_32(error, &xb, &xbnew, numserv); /* server count */
3552 for (serv = 0; !error && (serv < numserv); serv++) {
3553 xb_copy_opaque(error, &xb, &xbnew); /* server name */
3554 xb_copy_32(error, &xb, &xbnew, numaddr); /* address count */
3555 for (addr = 0; !error && (addr < numaddr); addr++)
3556 xb_copy_opaque(error, &xb, &xbnew); /* address */
3557 xb_copy_opaque(error, &xb, &xbnew); /* server info */
3558 }
3559 /* pathname */
3560 xb_get_32(error, &xb, numcomp); /* component count */
3561 if (!skipcopy)
3562 xb_add_32(error, &xbnew, numcomp+relpathcomps); /* new component count */
3563 for (comp = 0; !error && (comp < numcomp); comp++)
3564 xb_copy_opaque(error, &xb, &xbnew); /* component */
3565 /* add additional components */
3566 for (comp = 0; !skipcopy && !error && (comp < relpathcomps); comp++) {
3567 p = relpath;
3568 while (*p && (*p == '/'))
3569 p++;
3570 while (*p && !error) {
3571 cp = p;
3572 while (*p && (*p != '/'))
3573 p++;
3574 xb_add_string(error, &xbnew, cp, (p - cp)); /* component */
3575 while (*p && (*p == '/'))
3576 p++;
3577 }
3578 }
3579 xb_copy_opaque(error, &xb, &xbnew); /* fs location info */
3580 }
3581 if (referral)
3582 skipcopy = 0;
3583 }
3584 if (referral) {
3585 /* add referral's fs locations */
3586 xb_add_32(error, &xbnew, nfsls.nl_numlocs); /* FS_LOCATIONS */
3587 for (loc = 0; !error && (loc < nfsls.nl_numlocs); loc++) {
3588 xb_add_32(error, &xbnew, nfsls.nl_locations[loc]->nl_servcount);
3589 for (serv = 0; !error && (serv < nfsls.nl_locations[loc]->nl_servcount); serv++) {
3590 xb_add_string(error, &xbnew, nfsls.nl_locations[loc]->nl_servers[serv]->ns_name,
3591 strlen(nfsls.nl_locations[loc]->nl_servers[serv]->ns_name));
3592 xb_add_32(error, &xbnew, nfsls.nl_locations[loc]->nl_servers[serv]->ns_addrcount);
3593 for (addr = 0; !error && (addr < nfsls.nl_locations[loc]->nl_servers[serv]->ns_addrcount); addr++)
3594 xb_add_string(error, &xbnew, nfsls.nl_locations[loc]->nl_servers[serv]->ns_addresses[addr],
3595 strlen(nfsls.nl_locations[loc]->nl_servers[serv]->ns_addresses[addr]));
3596 xb_add_32(error, &xbnew, 0); /* empty server info */
3597 }
3598 xb_add_32(error, &xbnew, nfsls.nl_locations[loc]->nl_path.np_compcount);
3599 for (comp = 0; !error && (comp < nfsls.nl_locations[loc]->nl_path.np_compcount); comp++)
3600 xb_add_string(error, &xbnew, nfsls.nl_locations[loc]->nl_path.np_components[comp],
3601 strlen(nfsls.nl_locations[loc]->nl_path.np_components[comp]));
3602 xb_add_32(error, &xbnew, 0); /* empty fs location info */
3603 }
3604 }
3605 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFLAGS))
3606 xb_get_32(error, &xb, mntflags);
3607 /*
3608 * We add the following mount flags to the ones for the mounted-on mount:
3609 * MNT_DONTBROWSE - to keep the mount from showing up as a separate volume
3610 * MNT_AUTOMOUNTED - to keep DiskArb from retriggering the mount after
3611 * an unmount (looking for /.autodiskmounted)
3612 */
3613 mntflags |= (MNT_AUTOMOUNTED | MNT_DONTBROWSE);
3614 xb_add_32(error, &xbnew, mntflags);
3615 if (!referral && NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFROM)) {
3616 /* copy mntfrom string and add relpath */
3617 rlen = strlen(relpath);
3618 xb_get_32(error, &xb, mlen);
3619 nfsmerr_if(error);
3620 mlen2 = mlen + ((relpath[0] != '/') ? 1 : 0) + rlen;
3621 xb_add_32(error, &xbnew, mlen2);
3622 count = mlen/XDRWORD;
3623 /* copy the original string */
3624 while (count-- > 0)
3625 xb_copy_32(error, &xb, &xbnew, val);
3626 if (!error && (mlen % XDRWORD)) {
3627 error = xb_get_bytes(&xb, buf, mlen%XDRWORD, 0);
3628 if (!error)
3629 error = xb_add_bytes(&xbnew, buf, mlen%XDRWORD, 1);
3630 }
3631 /* insert a '/' if the relative path doesn't start with one */
3632 if (!error && (relpath[0] != '/')) {
3633 buf[0] = '/';
3634 error = xb_add_bytes(&xbnew, buf, 1, 1);
3635 }
3636 /* add the additional relative path */
3637 if (!error)
3638 error = xb_add_bytes(&xbnew, relpath, rlen, 1);
3639 /* make sure the resulting string has the right number of pad bytes */
3640 if (!error && (mlen2 != nfsm_rndup(mlen2))) {
3641 bzero(buf, sizeof(buf));
3642 count = nfsm_rndup(mlen2) - mlen2;
3643 error = xb_add_bytes(&xbnew, buf, count, 1);
3644 }
3645 }
3646 xb_build_done(error, &xbnew);
3647
3648 /* update opaque counts */
3649 end_offset = xb_offset(&xbnew);
3650 if (!error) {
3651 error = xb_seek(&xbnew, argslength_offset);
3652 argslength = end_offset - argslength_offset + XDRWORD/*version*/;
3653 xb_add_32(error, &xbnew, argslength);
3654 }
3655 if (!error) {
3656 error = xb_seek(&xbnew, attrslength_offset);
3657 xb_add_32(error, &xbnew, end_offset - attrslength_offset - XDRWORD/*don't include length field*/);
3658 }
3659 nfsmerr_if(error);
3660
3661 /*
3662 * For kernel_mount() call, use the existing mount flags (instead of the
3663 * original flags) because flags like MNT_NOSUID and MNT_NODEV may have
3664 * been silently enforced.
3665 */
3666 mntflags = vnode_vfsvisflags(vp);
3667 mntflags |= (MNT_AUTOMOUNTED | MNT_DONTBROWSE);
3668
3669 /* do the mount */
3670 error = kernel_mount(fstype, dvp, vp, path, xb_buffer_base(&xbnew), argslength,
3671 mntflags, KERNEL_MOUNT_PERMIT_UNMOUNT | KERNEL_MOUNT_NOAUTH, ctx);
3672
3673nfsmerr:
3674 if (error)
3675 printf("nfs: mirror mount of %s on %s failed (%d)\n",
3676 mntfromname, path, error);
3677 /* clean up */
3678 xb_cleanup(&xbnew);
3679 if (referral)
3680 nfs_fs_locations_cleanup(&nfsls);
3681 if (path)
3682 FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
3683 if (mntfromname)
3684 FREE_ZONE(mntfromname, MAXPATHLEN, M_NAMEI);
3685 if (!error)
3686 nfs_ephemeral_mount_harvester_start();
3687 return (error);
3688}
3689
3690/*
3691 * trigger vnode functions
3692 */
3693
3694resolver_result_t
3695nfs_mirror_mount_trigger_resolve(
3696 vnode_t vp,
3697 const struct componentname *cnp,
3698 enum path_operation pop,
3699 __unused int flags,
3700 __unused void *data,
3701 vfs_context_t ctx)
3702{
3703 nfsnode_t np = VTONFS(vp);
3704 vnode_t pvp = NULLVP;
3705 int error = 0;
3706 resolver_result_t result;
3707
3708 /*
3709 * We have a trigger node that doesn't have anything mounted on it yet.
3710 * We'll do the mount if either:
3711 * (a) this isn't the last component of the path OR
3712 * (b) this is an op that looks like it should trigger the mount.
3713 */
3714 if (cnp->cn_flags & ISLASTCN) {
3715 switch (pop) {
3716 case OP_MOUNT:
3717 case OP_UNMOUNT:
3718 case OP_STATFS:
3719 case OP_LINK:
3720 case OP_UNLINK:
3721 case OP_RENAME:
3722 case OP_MKNOD:
3723 case OP_MKFIFO:
3724 case OP_SYMLINK:
3725 case OP_ACCESS:
3726 case OP_GETATTR:
3727 case OP_MKDIR:
3728 case OP_RMDIR:
3729 case OP_REVOKE:
3730 case OP_GETXATTR:
3731 case OP_LISTXATTR:
3732 /* don't perform the mount for these operations */
3733 result = vfs_resolver_result(np->n_trigseq, RESOLVER_NOCHANGE, 0);
3734#ifdef NFS_TRIGGER_DEBUG
3735 NP(np, "nfs trigger RESOLVE: no change, last %d nameiop %d, seq %d",
3736 (cnp->cn_flags & ISLASTCN) ? 1 : 0, cnp->cn_nameiop, np->n_trigseq);
3737#endif
3738 return (result);
3739 case OP_OPEN:
3740 case OP_CHDIR:
3741 case OP_CHROOT:
3742 case OP_TRUNCATE:
3743 case OP_COPYFILE:
3744 case OP_PATHCONF:
3745 case OP_READLINK:
3746 case OP_SETATTR:
3747 case OP_EXCHANGEDATA:
3748 case OP_SEARCHFS:
3749 case OP_FSCTL:
3750 case OP_SETXATTR:
3751 case OP_REMOVEXATTR:
3752 default:
3753 /* go ahead and do the mount */
3754 break;
3755 }
3756 }
3757
3758 if (vnode_mountedhere(vp) != NULL) {
3759 /*
3760 * Um... there's already something mounted.
3761 * Been there. Done that. Let's just say it succeeded.
3762 */
3763 error = 0;
3764 goto skipmount;
3765 }
3766
3767 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
3768 result = vfs_resolver_result(np->n_trigseq, RESOLVER_ERROR, error);
3769#ifdef NFS_TRIGGER_DEBUG
3770 NP(np, "nfs trigger RESOLVE: busy error %d, last %d nameiop %d, seq %d",
3771 error, (cnp->cn_flags & ISLASTCN) ? 1 : 0, cnp->cn_nameiop, np->n_trigseq);
3772#endif
3773 return (result);
3774 }
3775
3776 pvp = vnode_getparent(vp);
3777 if (pvp == NULLVP)
3778 error = EINVAL;
3779 if (!error)
3780 error = nfs_mirror_mount_domount(pvp, vp, ctx);
3781skipmount:
3782 if (!error)
3783 np->n_trigseq++;
3784 result = vfs_resolver_result(np->n_trigseq, error ? RESOLVER_ERROR : RESOLVER_RESOLVED, error);
3785#ifdef NFS_TRIGGER_DEBUG
3786 NP(np, "nfs trigger RESOLVE: %s %d, last %d nameiop %d, seq %d",
3787 error ? "error" : "resolved", error,
3788 (cnp->cn_flags & ISLASTCN) ? 1 : 0, cnp->cn_nameiop, np->n_trigseq);
3789#endif
3790
3791 if (pvp != NULLVP)
3792 vnode_put(pvp);
3793 nfs_node_clear_busy(np);
3794 return (result);
3795}
3796
3797resolver_result_t
3798nfs_mirror_mount_trigger_unresolve(
3799 vnode_t vp,
3800 int flags,
3801 __unused void *data,
3802 vfs_context_t ctx)
3803{
3804 nfsnode_t np = VTONFS(vp);
3805 mount_t mp;
3806 int error;
3807 resolver_result_t result;
3808
3809 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
3810 result = vfs_resolver_result(np->n_trigseq, RESOLVER_ERROR, error);
3811#ifdef NFS_TRIGGER_DEBUG
3812 NP(np, "nfs trigger UNRESOLVE: busy error %d, seq %d", error, np->n_trigseq);
3813#endif
3814 return (result);
3815 }
3816
3817 mp = vnode_mountedhere(vp);
3818 if (!mp)
3819 error = EINVAL;
3820 if (!error)
3821 error = vfs_unmountbyfsid(&(vfs_statfs(mp)->f_fsid), flags, ctx);
3822 if (!error)
3823 np->n_trigseq++;
3824 result = vfs_resolver_result(np->n_trigseq, error ? RESOLVER_ERROR : RESOLVER_UNRESOLVED, error);
3825#ifdef NFS_TRIGGER_DEBUG
3826 NP(np, "nfs trigger UNRESOLVE: %s %d, seq %d",
3827 error ? "error" : "unresolved", error, np->n_trigseq);
3828#endif
3829 nfs_node_clear_busy(np);
3830 return (result);
3831}
3832
3833resolver_result_t
3834nfs_mirror_mount_trigger_rearm(
3835 vnode_t vp,
3836 __unused int flags,
3837 __unused void *data,
3838 vfs_context_t ctx)
3839{
3840 nfsnode_t np = VTONFS(vp);
3841 int error;
3842 resolver_result_t result;
3843
3844 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
3845 result = vfs_resolver_result(np->n_trigseq, RESOLVER_ERROR, error);
3846#ifdef NFS_TRIGGER_DEBUG
3847 NP(np, "nfs trigger REARM: busy error %d, seq %d", error, np->n_trigseq);
3848#endif
3849 return (result);
3850 }
3851
3852 np->n_trigseq++;
3853 result = vfs_resolver_result(np->n_trigseq,
3854 vnode_mountedhere(vp) ? RESOLVER_RESOLVED : RESOLVER_UNRESOLVED, 0);
3855#ifdef NFS_TRIGGER_DEBUG
3856 NP(np, "nfs trigger REARM: %s, seq %d",
3857 vnode_mountedhere(vp) ? "resolved" : "unresolved", np->n_trigseq);
3858#endif
3859 nfs_node_clear_busy(np);
3860 return (result);
3861}
3862
3863/*
3864 * Periodically attempt to unmount ephemeral (mirror) mounts in an attempt to limit
3865 * the number of unused mounts.
3866 */
3867
3868#define NFS_EPHEMERAL_MOUNT_HARVEST_INTERVAL 120 /* how often the harvester runs */
3869struct nfs_ephemeral_mount_harvester_info {
3870 fsid_t fsid; /* FSID that we need to try to unmount */
3871 uint32_t mountcount; /* count of ephemeral mounts seen in scan */
3872 };
3873/* various globals for the harvester */
3874static thread_call_t nfs_ephemeral_mount_harvester_timer = NULL;
3875static int nfs_ephemeral_mount_harvester_on = 0;
3876
3877kern_return_t thread_terminate(thread_t);
3878
3879static int
3880nfs_ephemeral_mount_harvester_callback(mount_t mp, void *arg)
3881{
3882 struct nfs_ephemeral_mount_harvester_info *hinfo = arg;
3883 struct nfsmount *nmp;
3884 struct timeval now;
3885
3886 if (strcmp(mp->mnt_vfsstat.f_fstypename, "nfs"))
3887 return (VFS_RETURNED);
3888 nmp = VFSTONFS(mp);
3889 if (!nmp || !NMFLAG(nmp, EPHEMERAL))
3890 return (VFS_RETURNED);
3891 hinfo->mountcount++;
3892
3893 /* avoid unmounting mounts that have been triggered within the last harvest interval */
3894 microtime(&now);
3895 if ((nmp->nm_mounttime >> 32) > ((uint32_t)now.tv_sec - NFS_EPHEMERAL_MOUNT_HARVEST_INTERVAL))
3896 return (VFS_RETURNED);
3897
3898 if (hinfo->fsid.val[0] || hinfo->fsid.val[1]) {
3899 /* attempt to unmount previously-found ephemeral mount */
3900 vfs_unmountbyfsid(&hinfo->fsid, 0, vfs_context_kernel());
3901 hinfo->fsid.val[0] = hinfo->fsid.val[1] = 0;
3902 }
3903
3904 /*
3905 * We can't call unmount here since we hold a mount iter ref
3906 * on mp so save its fsid for the next call iteration to unmount.
3907 */
3908 hinfo->fsid.val[0] = mp->mnt_vfsstat.f_fsid.val[0];
3909 hinfo->fsid.val[1] = mp->mnt_vfsstat.f_fsid.val[1];
3910
3911 return (VFS_RETURNED);
3912}
3913
3914/*
3915 * Spawn a thread to do the ephemeral mount harvesting.
3916 */
3917static void
3918nfs_ephemeral_mount_harvester_timer_func(void)
3919{
3920 thread_t thd;
3921
3922 if (kernel_thread_start(nfs_ephemeral_mount_harvester, NULL, &thd) == KERN_SUCCESS)
3923 thread_deallocate(thd);
3924}
3925
3926/*
3927 * Iterate all mounts looking for NFS ephemeral mounts to try to unmount.
3928 */
3929void
3930nfs_ephemeral_mount_harvester(__unused void *arg, __unused wait_result_t wr)
3931{
3932 struct nfs_ephemeral_mount_harvester_info hinfo;
3933 uint64_t deadline;
3934
3935 hinfo.mountcount = 0;
3936 hinfo.fsid.val[0] = hinfo.fsid.val[1] = 0;
3937 vfs_iterate(VFS_ITERATE_TAIL_FIRST, nfs_ephemeral_mount_harvester_callback, &hinfo);
3938 if (hinfo.fsid.val[0] || hinfo.fsid.val[1]) {
3939 /* attempt to unmount last found ephemeral mount */
3940 vfs_unmountbyfsid(&hinfo.fsid, 0, vfs_context_kernel());
3941 }
3942
3943 lck_mtx_lock(nfs_global_mutex);
3944 if (!hinfo.mountcount) {
3945 /* no more ephemeral mounts - don't need timer */
3946 nfs_ephemeral_mount_harvester_on = 0;
3947 } else {
3948 /* re-arm the timer */
3949 clock_interval_to_deadline(NFS_EPHEMERAL_MOUNT_HARVEST_INTERVAL, NSEC_PER_SEC, &deadline);
3950 thread_call_enter_delayed(nfs_ephemeral_mount_harvester_timer, deadline);
3951 nfs_ephemeral_mount_harvester_on = 1;
3952 }
3953 lck_mtx_unlock(nfs_global_mutex);
3954
3955 /* thread done */
3956 thread_terminate(current_thread());
3957}
3958
3959/*
3960 * Make sure the NFS ephemeral mount harvester timer is running.
3961 */
3962void
3963nfs_ephemeral_mount_harvester_start(void)
3964{
3965 uint64_t deadline;
3966
3967 lck_mtx_lock(nfs_global_mutex);
3968 if (nfs_ephemeral_mount_harvester_on) {
3969 lck_mtx_unlock(nfs_global_mutex);
3970 return;
3971 }
3972 if (nfs_ephemeral_mount_harvester_timer == NULL)
3973 nfs_ephemeral_mount_harvester_timer = thread_call_allocate((thread_call_func_t)nfs_ephemeral_mount_harvester_timer_func, NULL);
3974 clock_interval_to_deadline(NFS_EPHEMERAL_MOUNT_HARVEST_INTERVAL, NSEC_PER_SEC, &deadline);
3975 thread_call_enter_delayed(nfs_ephemeral_mount_harvester_timer, deadline);
3976 nfs_ephemeral_mount_harvester_on = 1;
3977 lck_mtx_unlock(nfs_global_mutex);
3978}
3979
3980#endif
3981
3982/*
3983 * Send a MOUNT protocol MOUNT request to the server to get the initial file handle (and security).
3984 */
3985int
3986nfs3_mount_rpc(struct nfsmount *nmp, struct sockaddr *sa, int sotype, int nfsvers, char *path, vfs_context_t ctx, int timeo, fhandle_t *fh, struct nfs_sec *sec)
3987{
3988 int error = 0, slen, mntproto;
3989 thread_t thd = vfs_context_thread(ctx);
3990 kauth_cred_t cred = vfs_context_ucred(ctx);
3991 uint64_t xid = 0;
3992 struct nfsm_chain nmreq, nmrep;
3993 mbuf_t mreq;
3994 uint32_t mntvers, mntport, val;
3995 struct sockaddr_storage ss;
3996 struct sockaddr *saddr = (struct sockaddr*)&ss;
3997
3998 nfsm_chain_null(&nmreq);
3999 nfsm_chain_null(&nmrep);
4000
4001 mntvers = (nfsvers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
4002 mntproto = (NM_OMFLAG(nmp, MNTUDP) || (sotype == SOCK_DGRAM)) ? IPPROTO_UDP : IPPROTO_TCP;
4003 sec->count = 0;
4004
4005 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
4006 if (saddr->sa_family == AF_INET) {
4007 if (nmp->nm_mountport)
4008 ((struct sockaddr_in*)saddr)->sin_port = htons(nmp->nm_mountport);
4009 mntport = ntohs(((struct sockaddr_in*)saddr)->sin_port);
4010 } else {
4011 if (nmp->nm_mountport)
4012 ((struct sockaddr_in6*)saddr)->sin6_port = htons(nmp->nm_mountport);
4013 mntport = ntohs(((struct sockaddr_in6*)saddr)->sin6_port);
4014 }
4015
4016 while (!mntport) {
4017 error = nfs_portmap_lookup(nmp, ctx, saddr, NULL, RPCPROG_MNT, mntvers, mntproto, timeo);
4018 nfsmout_if(error);
4019 if (saddr->sa_family == AF_INET)
4020 mntport = ntohs(((struct sockaddr_in*)saddr)->sin_port);
4021 else
4022 mntport = ntohs(((struct sockaddr_in6*)saddr)->sin6_port);
4023 if (!mntport) {
4024 /* if not found and TCP, then retry with UDP */
4025 if (mntproto == IPPROTO_UDP) {
4026 error = EPROGUNAVAIL;
4027 break;
4028 }
4029 mntproto = IPPROTO_UDP;
4030 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
4031 }
4032 }
4033 nfsmout_if(error || !mntport);
4034
4035 /* MOUNT protocol MOUNT request */
4036 slen = strlen(path);
4037 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_UNSIGNED + nfsm_rndup(slen));
4038 nfsm_chain_add_name(error, &nmreq, path, slen, nmp);
4039 nfsm_chain_build_done(error, &nmreq);
4040 nfsmout_if(error);
4041 error = nfsm_rpchead2(nmp, (mntproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM,
4042 RPCPROG_MNT, mntvers, RPCMNT_MOUNT,
4043 RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, &xid, &mreq);
4044 nfsmout_if(error);
4045 nmreq.nmc_mhead = NULL;
4046 error = nfs_aux_request(nmp, thd, saddr, NULL,
4047 ((mntproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM),
4048 mreq, R_XID32(xid), 1, timeo, &nmrep);
4049 nfsmout_if(error);
4050 nfsm_chain_get_32(error, &nmrep, val);
4051 if (!error && val)
4052 error = val;
4053 nfsm_chain_get_fh(error, &nmrep, nfsvers, fh);
4054 if (!error && (nfsvers > NFS_VER2)) {
4055 sec->count = NX_MAX_SEC_FLAVORS;
4056 error = nfsm_chain_get_secinfo(&nmrep, &sec->flavors[0], &sec->count);
4057 }
4058nfsmout:
4059 nfsm_chain_cleanup(&nmreq);
4060 nfsm_chain_cleanup(&nmrep);
4061 return (error);
4062}
4063
4064
4065/*
4066 * Send a MOUNT protocol UNMOUNT request to tell the server we've unmounted it.
4067 */
4068void
4069nfs3_umount_rpc(struct nfsmount *nmp, vfs_context_t ctx, int timeo)
4070{
4071 int error = 0, slen, mntproto;
4072 thread_t thd = vfs_context_thread(ctx);
4073 kauth_cred_t cred = vfs_context_ucred(ctx);
4074 char *path;
4075 uint64_t xid = 0;
4076 struct nfsm_chain nmreq, nmrep;
4077 mbuf_t mreq;
4078 uint32_t mntvers, mntport;
4079 struct sockaddr_storage ss;
4080 struct sockaddr *saddr = (struct sockaddr*)&ss;
4081
4082 if (!nmp->nm_saddr)
4083 return;
4084
4085 nfsm_chain_null(&nmreq);
4086 nfsm_chain_null(&nmrep);
4087
4088 mntvers = (nmp->nm_vers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
4089 mntproto = (NM_OMFLAG(nmp, MNTUDP) || (nmp->nm_sotype == SOCK_DGRAM)) ? IPPROTO_UDP : IPPROTO_TCP;
4090 mntport = nmp->nm_mountport;
4091
4092 bcopy(nmp->nm_saddr, saddr, min(sizeof(ss), nmp->nm_saddr->sa_len));
4093 if (saddr->sa_family == AF_INET)
4094 ((struct sockaddr_in*)saddr)->sin_port = htons(mntport);
4095 else
4096 ((struct sockaddr_in6*)saddr)->sin6_port = htons(mntport);
4097
4098 while (!mntport) {
4099 error = nfs_portmap_lookup(nmp, ctx, saddr, NULL, RPCPROG_MNT, mntvers, mntproto, timeo);
4100 nfsmout_if(error);
4101 if (saddr->sa_family == AF_INET)
4102 mntport = ntohs(((struct sockaddr_in*)saddr)->sin_port);
4103 else
4104 mntport = ntohs(((struct sockaddr_in6*)saddr)->sin6_port);
4105 /* if not found and mntvers > VER1, then retry with VER1 */
4106 if (!mntport) {
4107 if (mntvers > RPCMNT_VER1) {
4108 mntvers = RPCMNT_VER1;
4109 } else if (mntproto == IPPROTO_TCP) {
4110 mntproto = IPPROTO_UDP;
4111 mntvers = (nmp->nm_vers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
4112 } else {
4113 break;
4114 }
4115 bcopy(nmp->nm_saddr, saddr, min(sizeof(ss), nmp->nm_saddr->sa_len));
4116 }
4117 }
4118 nfsmout_if(!mntport);
4119
4120 /* MOUNT protocol UNMOUNT request */
4121 path = &vfs_statfs(nmp->nm_mountp)->f_mntfromname[0];
4122 while (*path && (*path != '/'))
4123 path++;
4124 slen = strlen(path);
4125 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_UNSIGNED + nfsm_rndup(slen));
4126 nfsm_chain_add_name(error, &nmreq, path, slen, nmp);
4127 nfsm_chain_build_done(error, &nmreq);
4128 nfsmout_if(error);
4129 error = nfsm_rpchead2(nmp, (mntproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM,
4130 RPCPROG_MNT, RPCMNT_VER1, RPCMNT_UMOUNT,
4131 RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, &xid, &mreq);
4132 nfsmout_if(error);
4133 nmreq.nmc_mhead = NULL;
4134 error = nfs_aux_request(nmp, thd, saddr, NULL,
4135 ((mntproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM),
4136 mreq, R_XID32(xid), 1, timeo, &nmrep);
4137nfsmout:
4138 nfsm_chain_cleanup(&nmreq);
4139 nfsm_chain_cleanup(&nmrep);
4140}
4141
4142/*
4143 * unmount system call
4144 */
4145int
4146nfs_vfs_unmount(
4147 mount_t mp,
4148 int mntflags,
4149 __unused vfs_context_t ctx)
4150{
4151 struct nfsmount *nmp;
4152 vnode_t vp;
4153 int error, flags = 0;
4154 struct timespec ts = { 1, 0 };
4155
4156 nmp = VFSTONFS(mp);
4157 lck_mtx_lock(&nmp->nm_lock);
4158 /*
4159 * Set the flag indicating that an unmount attempt is in progress.
4160 */
4161 nmp->nm_state |= NFSSTA_UNMOUNTING;
4162 /*
4163 * During a force unmount we want to...
4164 * Mark that we are doing a force unmount.
4165 * Make the mountpoint soft.
4166 */
4167 if (mntflags & MNT_FORCE) {
4168 flags |= FORCECLOSE;
4169 nmp->nm_state |= NFSSTA_FORCE;
4170 NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_SOFT);
4171 }
4172 /*
4173 * Wait for any in-progress monitored node scan to complete.
4174 */
4175 while (nmp->nm_state & NFSSTA_MONITOR_SCAN)
4176 msleep(&nmp->nm_state, &nmp->nm_lock, PZERO-1, "nfswaitmonscan", &ts);
4177 /*
4178 * Goes something like this..
4179 * - Call vflush() to clear out vnodes for this file system,
4180 * except for the swap files. Deal with them in 2nd pass.
4181 * - Decrement reference on the vnode representing remote root.
4182 * - Clean up the NFS mount structure.
4183 */
4184 vp = NFSTOV(nmp->nm_dnp);
4185 lck_mtx_unlock(&nmp->nm_lock);
4186
4187 /*
4188 * vflush will check for busy vnodes on mountpoint.
4189 * Will do the right thing for MNT_FORCE. That is, we should
4190 * not get EBUSY back.
4191 */
4192 error = vflush(mp, vp, SKIPSWAP | flags);
4193 if (mntflags & MNT_FORCE) {
4194 error = vflush(mp, NULLVP, flags); /* locks vp in the process */
4195 } else {
4196 if (vnode_isinuse(vp, 1))
4197 error = EBUSY;
4198 else
4199 error = vflush(mp, vp, flags);
4200 }
4201 if (error) {
4202 lck_mtx_lock(&nmp->nm_lock);
4203 nmp->nm_state &= ~NFSSTA_UNMOUNTING;
4204 lck_mtx_unlock(&nmp->nm_lock);
4205 return (error);
4206 }
4207
4208 lck_mtx_lock(&nmp->nm_lock);
4209 nmp->nm_dnp = NULL;
4210 lck_mtx_unlock(&nmp->nm_lock);
4211
4212 /*
4213 * Release the root vnode reference held by mountnfs()
4214 */
4215 error = vnode_get(vp);
4216 vnode_rele(vp);
4217 if (!error)
4218 vnode_put(vp);
4219
4220 vflush(mp, NULLVP, FORCECLOSE);
4221
4222 /* Wait for all other references to be released and free the mount */
4223 nfs_mount_drain_and_cleanup(nmp);
4224
4225 return (0);
4226}
4227
4228/*
4229 * cleanup/destroy NFS fs locations structure
4230 */
4231void
4232nfs_fs_locations_cleanup(struct nfs_fs_locations *nfslsp)
4233{
4234 struct nfs_fs_location *fsl;
4235 struct nfs_fs_server *fss;
4236 struct nfs_fs_path *fsp;
4237 uint32_t loc, serv, addr, comp;
4238
4239 /* free up fs locations */
4240 if (!nfslsp->nl_numlocs || !nfslsp->nl_locations)
4241 return;
4242
4243 for (loc = 0; loc < nfslsp->nl_numlocs; loc++) {
4244 fsl = nfslsp->nl_locations[loc];
4245 if (!fsl)
4246 continue;
4247 if ((fsl->nl_servcount > 0) && fsl->nl_servers) {
4248 for (serv = 0; serv < fsl->nl_servcount; serv++) {
4249 fss = fsl->nl_servers[serv];
4250 if (!fss)
4251 continue;
4252 if ((fss->ns_addrcount > 0) && fss->ns_addresses) {
4253 for (addr = 0; addr < fss->ns_addrcount; addr++)
4254 FREE(fss->ns_addresses[addr], M_TEMP);
4255 FREE(fss->ns_addresses, M_TEMP);
4256 }
4257 FREE(fss->ns_name, M_TEMP);
4258 FREE(fss, M_TEMP);
4259 }
4260 FREE(fsl->nl_servers, M_TEMP);
4261 }
4262 fsp = &fsl->nl_path;
4263 if (fsp->np_compcount && fsp->np_components) {
4264 for (comp = 0; comp < fsp->np_compcount; comp++)
4265 if (fsp->np_components[comp])
4266 FREE(fsp->np_components[comp], M_TEMP);
4267 FREE(fsp->np_components, M_TEMP);
4268 }
4269 FREE(fsl, M_TEMP);
4270 }
4271 FREE(nfslsp->nl_locations, M_TEMP);
4272 nfslsp->nl_numlocs = 0;
4273 nfslsp->nl_locations = NULL;
4274}
4275
4276void
4277nfs_mount_rele(struct nfsmount *nmp)
4278{
4279 int wup = 0;
4280
4281 lck_mtx_lock(&nmp->nm_lock);
4282 if (nmp->nm_ref < 1)
4283 panic("nfs zombie mount underflow\n");
4284 nmp->nm_ref--;
4285 if (nmp->nm_ref == 0)
4286 wup = nmp->nm_state & NFSSTA_MOUNT_DRAIN;
4287 lck_mtx_unlock(&nmp->nm_lock);
4288 if (wup)
4289 wakeup(&nmp->nm_ref);
4290}
4291
4292void
4293nfs_mount_drain_and_cleanup(struct nfsmount *nmp)
4294{
4295 lck_mtx_lock(&nmp->nm_lock);
4296 nmp->nm_state |= NFSSTA_MOUNT_DRAIN;
4297 while (nmp->nm_ref > 0) {
4298 msleep(&nmp->nm_ref, &nmp->nm_lock, PZERO-1, "nfs_mount_drain", NULL);
4299 }
4300 assert(nmp->nm_ref == 0);
4301 lck_mtx_unlock(&nmp->nm_lock);
4302 nfs_mount_cleanup(nmp);
4303}
4304
4305/*
4306 * nfs_mount_zombie
4307 */
4308void
4309nfs_mount_zombie(struct nfsmount *nmp, int nm_state_flags)
4310{
4311 struct nfsreq *req, *treq;
4312 struct nfs_reqqhead iodq, resendq;
4313 struct timespec ts = { 1, 0 };
4314 struct nfs_open_owner *noop, *nextnoop;
4315 nfsnode_t np;
4316 int docallback;
4317
4318 lck_mtx_lock(&nmp->nm_lock);
4319 nmp->nm_state |= nm_state_flags;
4320 nmp->nm_ref++;
4321 lck_mtx_unlock(&nmp->nm_lock);
4322
4323 /* stop callbacks */
4324 if ((nmp->nm_vers >= NFS_VER4) && !NMFLAG(nmp, NOCALLBACK) && nmp->nm_cbid)
4325 nfs4_mount_callback_shutdown(nmp);
4326
4327 /* Destroy any RPCSEC_GSS contexts */
4328 nfs_gss_clnt_ctx_unmount(nmp);
4329
4330 /* mark the socket for termination */
4331 lck_mtx_lock(&nmp->nm_lock);
4332 nmp->nm_sockflags |= NMSOCK_UNMOUNT;
4333
4334 /* Have the socket thread send the unmount RPC, if requested/appropriate. */
4335 if ((nmp->nm_vers < NFS_VER4) && (nmp->nm_state & NFSSTA_MOUNTED) &&
4336 !(nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) && NMFLAG(nmp, CALLUMNT))
4337 nfs_mount_sock_thread_wake(nmp);
4338
4339 /* wait for the socket thread to terminate */
4340 while (nmp->nm_sockthd && current_thread() != nmp->nm_sockthd) {
4341 wakeup(&nmp->nm_sockthd);
4342 msleep(&nmp->nm_sockthd, &nmp->nm_lock, PZERO-1, "nfswaitsockthd", &ts);
4343 }
4344 lck_mtx_unlock(&nmp->nm_lock);
4345
4346 /* tear down the socket */
4347 nfs_disconnect(nmp);
4348
4349 lck_mtx_lock(&nmp->nm_lock);
4350
4351 if ((nmp->nm_vers >= NFS_VER4) && !NMFLAG(nmp, NOCALLBACK) && nmp->nm_cbid) {
4352 /* clear out any pending delegation return requests */
4353 while ((np = TAILQ_FIRST(&nmp->nm_dreturnq))) {
4354 TAILQ_REMOVE(&nmp->nm_dreturnq, np, n_dreturn);
4355 np->n_dreturn.tqe_next = NFSNOLIST;
4356 }
4357 }
4358
4359 /* cancel any renew timer */
4360 if ((nmp->nm_vers >= NFS_VER4) && nmp->nm_renew_timer) {
4361 thread_call_cancel(nmp->nm_renew_timer);
4362 thread_call_free(nmp->nm_renew_timer);
4363 }
4364
4365 lck_mtx_unlock(&nmp->nm_lock);
4366
4367 if (nmp->nm_state & NFSSTA_MOUNTED)
4368 switch (nmp->nm_lockmode) {
4369 case NFS_LOCK_MODE_DISABLED:
4370 case NFS_LOCK_MODE_LOCAL:
4371 break;
4372 case NFS_LOCK_MODE_ENABLED:
4373 default:
4374 if (nmp->nm_vers <= NFS_VER3) {
4375 nfs_lockd_mount_unregister(nmp);
4376 nmp->nm_lockmode = NFS_LOCK_MODE_DISABLED;
4377 }
4378 break;
4379 }
4380
4381 if ((nmp->nm_vers >= NFS_VER4) && nmp->nm_longid) {
4382 /* remove/deallocate the client ID data */
4383 lck_mtx_lock(nfs_global_mutex);
4384 TAILQ_REMOVE(&nfsclientids, nmp->nm_longid, nci_link);
4385 if (nmp->nm_longid->nci_id)
4386 FREE(nmp->nm_longid->nci_id, M_TEMP);
4387 FREE(nmp->nm_longid, M_TEMP);
4388 lck_mtx_unlock(nfs_global_mutex);
4389 }
4390
4391 /*
4392 * Be sure all requests for this mount are completed
4393 * and removed from the resend queue.
4394 */
4395 TAILQ_INIT(&resendq);
4396 lck_mtx_lock(nfs_request_mutex);
4397 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
4398 if (req->r_nmp == nmp) {
4399 lck_mtx_lock(&req->r_mtx);
4400 if (!req->r_error && req->r_nmrep.nmc_mhead == NULL)
4401 req->r_error = EIO;
4402 if (req->r_flags & R_RESENDQ) {
4403 lck_mtx_lock(&nmp->nm_lock);
4404 req->r_flags &= ~R_RESENDQ;
4405 if (req->r_rchain.tqe_next != NFSREQNOLIST) {
4406 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
4407 /*
4408 * Queue up the request so that we can unreference them
4409 * with out holding nfs_request_mutex
4410 */
4411 TAILQ_INSERT_TAIL(&resendq, req, r_rchain);
4412 }
4413 lck_mtx_unlock(&nmp->nm_lock);
4414 }
4415 wakeup(req);
4416 lck_mtx_unlock(&req->r_mtx);
4417 }
4418 }
4419 lck_mtx_unlock(nfs_request_mutex);
4420
4421 /* Since we've drop the request mutex we can now safely unreference the request */
4422 TAILQ_FOREACH_SAFE(req, &resendq, r_rchain, treq) {
4423 TAILQ_REMOVE(&resendq, req, r_rchain);
4424 nfs_request_rele(req);
4425 }
4426
4427 /*
4428 * Now handle and outstanding async requests. We need to walk the
4429 * request queue again this time with the nfsiod_mutex held. No
4430 * other iods can grab our requests until we've put them on our own
4431 * local iod queue for processing.
4432 */
4433 TAILQ_INIT(&iodq);
4434 lck_mtx_lock(nfs_request_mutex);
4435 lck_mtx_lock(nfsiod_mutex);
4436 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
4437 if (req->r_nmp == nmp) {
4438 lck_mtx_lock(&req->r_mtx);
4439 if (req->r_callback.rcb_func
4440 && !(req->r_flags & R_WAITSENT) && !(req->r_flags & R_IOD)) {
4441 /*
4442 * Since R_IOD is not set then we need to handle it. If
4443 * we're not on a list add it to our iod queue. Otherwise
4444 * we must already be on nm_iodq which is added to our
4445 * local queue below.
4446 * %%% We should really keep a back pointer to our iod queue
4447 * that we're on.
4448 */
4449 req->r_flags |= R_IOD;
4450 if (req->r_achain.tqe_next == NFSREQNOLIST) {
4451 TAILQ_INSERT_TAIL(&iodq, req, r_achain);
4452 }
4453 }
4454 lck_mtx_unlock(&req->r_mtx);
4455 }
4456 }
4457
4458 /* finish any async I/O RPCs queued up */
4459 if (nmp->nm_iodlink.tqe_next != NFSNOLIST)
4460 TAILQ_REMOVE(&nfsiodmounts, nmp, nm_iodlink);
4461 TAILQ_CONCAT(&iodq, &nmp->nm_iodq, r_achain);
4462 lck_mtx_unlock(nfsiod_mutex);
4463 lck_mtx_unlock(nfs_request_mutex);
4464
4465 TAILQ_FOREACH_SAFE(req, &iodq, r_achain, treq) {
4466 TAILQ_REMOVE(&iodq, req, r_achain);
4467 req->r_achain.tqe_next = NFSREQNOLIST;
4468 lck_mtx_lock(&req->r_mtx);
4469 docallback = !(req->r_flags & R_WAITSENT);
4470 lck_mtx_unlock(&req->r_mtx);
4471 if (docallback)
4472 req->r_callback.rcb_func(req);
4473 }
4474
4475 /* clean up common state */
4476 lck_mtx_lock(&nmp->nm_lock);
4477 while ((np = LIST_FIRST(&nmp->nm_monlist))) {
4478 LIST_REMOVE(np, n_monlink);
4479 np->n_monlink.le_next = NFSNOLIST;
4480 }
4481 TAILQ_FOREACH_SAFE(noop, &nmp->nm_open_owners, noo_link, nextnoop) {
4482 TAILQ_REMOVE(&nmp->nm_open_owners, noop, noo_link);
4483 noop->noo_flags &= ~NFS_OPEN_OWNER_LINK;
4484 if (noop->noo_refcnt)
4485 continue;
4486 nfs_open_owner_destroy(noop);
4487 }
4488 lck_mtx_unlock(&nmp->nm_lock);
4489
4490 /* clean up NFSv4 state */
4491 if (nmp->nm_vers >= NFS_VER4) {
4492 lck_mtx_lock(&nmp->nm_lock);
4493 while ((np = TAILQ_FIRST(&nmp->nm_delegations))) {
4494 TAILQ_REMOVE(&nmp->nm_delegations, np, n_dlink);
4495 np->n_dlink.tqe_next = NFSNOLIST;
4496 }
4497 lck_mtx_unlock(&nmp->nm_lock);
4498 }
4499
4500 nfs_mount_rele(nmp);
4501}
4502
4503/*
4504 * cleanup/destroy an nfsmount
4505 */
4506void
4507nfs_mount_cleanup(struct nfsmount *nmp)
4508{
4509 if (!nmp)
4510 return;
4511
4512 nfs_mount_zombie(nmp, 0);
4513
4514 NFS_VFS_DBG("Unmounting %s from %s\n",
4515 vfs_statfs(nmp->nm_mountp)->f_mntfromname,
4516 vfs_statfs(nmp->nm_mountp)->f_mntonname);
4517 NFS_VFS_DBG("nfs state = %x\n", nmp->nm_state);
4518 NFS_VFS_DBG("nfs socket flags = %x\n", nmp->nm_sockflags);
4519 NFS_VFS_DBG("nfs mount ref count is %d\n", nmp->nm_ref);
4520 NFS_VFS_DBG("mount ref count is %d\n", nmp->nm_mountp->mnt_count);
4521
4522 if (nmp->nm_mountp)
4523 vfs_setfsprivate(nmp->nm_mountp, NULL);
4524
4525 lck_mtx_lock(&nmp->nm_lock);
4526 if (nmp->nm_ref)
4527 panic("Some one has grabbed a ref %d\n", nmp->nm_ref);
4528
4529 if (nmp->nm_saddr)
4530 FREE(nmp->nm_saddr, M_SONAME);
4531 if ((nmp->nm_vers < NFS_VER4) && nmp->nm_rqsaddr)
4532 FREE(nmp->nm_rqsaddr, M_SONAME);
4533
4534 if (IS_VALID_CRED(nmp->nm_mcred))
4535 kauth_cred_unref(&nmp->nm_mcred);
4536
4537 nfs_fs_locations_cleanup(&nmp->nm_locations);
4538
4539 if (nmp->nm_realm)
4540 FREE(nmp->nm_realm, M_TEMP);
4541 if (nmp->nm_principal)
4542 FREE(nmp->nm_principal, M_TEMP);
4543 if (nmp->nm_sprinc)
4544 FREE(nmp->nm_sprinc, M_TEMP);
4545
4546 if (nmp->nm_args)
4547 xb_free(nmp->nm_args);
4548
4549 lck_mtx_unlock(&nmp->nm_lock);
4550
4551 lck_mtx_destroy(&nmp->nm_lock, nfs_mount_grp);
4552 if (nmp->nm_fh)
4553 FREE(nmp->nm_fh, M_TEMP);
4554 FREE_ZONE((caddr_t)nmp, sizeof (struct nfsmount), M_NFSMNT);
4555}
4556
4557/*
4558 * Return root of a filesystem
4559 */
4560int
4561nfs_vfs_root(mount_t mp, vnode_t *vpp, __unused vfs_context_t ctx)
4562{
4563 vnode_t vp;
4564 struct nfsmount *nmp;
4565 int error;
4566 u_int32_t vpid;
4567
4568 nmp = VFSTONFS(mp);
4569 if (!nmp || !nmp->nm_dnp)
4570 return (ENXIO);
4571 vp = NFSTOV(nmp->nm_dnp);
4572 vpid = vnode_vid(vp);
4573 while ((error = vnode_getwithvid(vp, vpid))) {
4574 /* vnode_get() may return ENOENT if the dir changes. */
4575 /* If that happens, just try it again, else return the error. */
4576 if ((error != ENOENT) || (vnode_vid(vp) == vpid))
4577 return (error);
4578 vpid = vnode_vid(vp);
4579 }
4580 *vpp = vp;
4581 return (0);
4582}
4583
4584/*
4585 * Do operations associated with quotas
4586 */
4587#if !QUOTA
4588int
4589nfs_vfs_quotactl(
4590 __unused mount_t mp,
4591 __unused int cmds,
4592 __unused uid_t uid,
4593 __unused caddr_t datap,
4594 __unused vfs_context_t context)
4595{
4596 return (ENOTSUP);
4597}
4598#else
4599
4600int
4601nfs3_getquota(struct nfsmount *nmp, vfs_context_t ctx, uid_t id, int type, struct dqblk *dqb)
4602{
4603 int error = 0, slen, timeo;
4604 int rqport = 0, rqproto, rqvers = (type == GRPQUOTA) ? RPCRQUOTA_EXT_VER : RPCRQUOTA_VER;
4605 thread_t thd = vfs_context_thread(ctx);
4606 kauth_cred_t cred = vfs_context_ucred(ctx);
4607 char *path;
4608 uint64_t xid = 0;
4609 struct nfsm_chain nmreq, nmrep;
4610 mbuf_t mreq;
4611 uint32_t val = 0, bsize = 0;
4612 struct sockaddr *rqsaddr;
4613 struct timeval now;
4614
4615 if (!nmp->nm_saddr)
4616 return (ENXIO);
4617
4618 if (NMFLAG(nmp, NOQUOTA))
4619 return (ENOTSUP);
4620
4621 if (!nmp->nm_rqsaddr)
4622 MALLOC(nmp->nm_rqsaddr, struct sockaddr *, sizeof(struct sockaddr_storage), M_SONAME, M_WAITOK|M_ZERO);
4623 if (!nmp->nm_rqsaddr)
4624 return (ENOMEM);
4625 rqsaddr = nmp->nm_rqsaddr;
4626 if (rqsaddr->sa_family == AF_INET6)
4627 rqport = ntohs(((struct sockaddr_in6*)rqsaddr)->sin6_port);
4628 else if (rqsaddr->sa_family == AF_INET)
4629 rqport = ntohs(((struct sockaddr_in*)rqsaddr)->sin_port);
4630
4631 timeo = NMFLAG(nmp, SOFT) ? 10 : 60;
4632 rqproto = IPPROTO_UDP; /* XXX should prefer TCP if mount is TCP */
4633
4634 /* check if we have a recently cached rquota port */
4635 microuptime(&now);
4636 if (!rqport || ((nmp->nm_rqsaddrstamp + 60) >= (uint32_t)now.tv_sec)) {
4637 /* send portmap request to get rquota port */
4638 bcopy(nmp->nm_saddr, rqsaddr, min(sizeof(struct sockaddr_storage), nmp->nm_saddr->sa_len));
4639 error = nfs_portmap_lookup(nmp, ctx, rqsaddr, NULL, RPCPROG_RQUOTA, rqvers, rqproto, timeo);
4640 if (error)
4641 return (error);
4642 if (rqsaddr->sa_family == AF_INET6)
4643 rqport = ntohs(((struct sockaddr_in6*)rqsaddr)->sin6_port);
4644 else if (rqsaddr->sa_family == AF_INET)
4645 rqport = ntohs(((struct sockaddr_in*)rqsaddr)->sin_port);
4646 else
4647 return (EIO);
4648 if (!rqport)
4649 return (ENOTSUP);
4650 microuptime(&now);
4651 nmp->nm_rqsaddrstamp = now.tv_sec;
4652 }
4653
4654 /* rquota request */
4655 nfsm_chain_null(&nmreq);
4656 nfsm_chain_null(&nmrep);
4657 path = &vfs_statfs(nmp->nm_mountp)->f_mntfromname[0];
4658 while (*path && (*path != '/'))
4659 path++;
4660 slen = strlen(path);
4661 nfsm_chain_build_alloc_init(error, &nmreq, 3 * NFSX_UNSIGNED + nfsm_rndup(slen));
4662 nfsm_chain_add_name(error, &nmreq, path, slen, nmp);
4663 if (type == GRPQUOTA)
4664 nfsm_chain_add_32(error, &nmreq, type);
4665 nfsm_chain_add_32(error, &nmreq, id);
4666 nfsm_chain_build_done(error, &nmreq);
4667 nfsmout_if(error);
4668 error = nfsm_rpchead2(nmp, (rqproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM,
4669 RPCPROG_RQUOTA, rqvers, RPCRQUOTA_GET,
4670 RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, &xid, &mreq);
4671 nfsmout_if(error);
4672 nmreq.nmc_mhead = NULL;
4673 error = nfs_aux_request(nmp, thd, rqsaddr, NULL,
4674 (rqproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM,
4675 mreq, R_XID32(xid), 0, timeo, &nmrep);
4676 nfsmout_if(error);
4677
4678 /* parse rquota response */
4679 nfsm_chain_get_32(error, &nmrep, val);
4680 if (!error && (val != RQUOTA_STAT_OK)) {
4681 if (val == RQUOTA_STAT_NOQUOTA)
4682 error = ENOENT;
4683 else if (val == RQUOTA_STAT_EPERM)
4684 error = EPERM;
4685 else
4686 error = EIO;
4687 }
4688 nfsm_chain_get_32(error, &nmrep, bsize);
4689 nfsm_chain_adv(error, &nmrep, NFSX_UNSIGNED);
4690 nfsm_chain_get_32(error, &nmrep, val);
4691 nfsmout_if(error);
4692 dqb->dqb_bhardlimit = (uint64_t)val * bsize;
4693 nfsm_chain_get_32(error, &nmrep, val);
4694 nfsmout_if(error);
4695 dqb->dqb_bsoftlimit = (uint64_t)val * bsize;
4696 nfsm_chain_get_32(error, &nmrep, val);
4697 nfsmout_if(error);
4698 dqb->dqb_curbytes = (uint64_t)val * bsize;
4699 nfsm_chain_get_32(error, &nmrep, dqb->dqb_ihardlimit);
4700 nfsm_chain_get_32(error, &nmrep, dqb->dqb_isoftlimit);
4701 nfsm_chain_get_32(error, &nmrep, dqb->dqb_curinodes);
4702 nfsm_chain_get_32(error, &nmrep, dqb->dqb_btime);
4703 nfsm_chain_get_32(error, &nmrep, dqb->dqb_itime);
4704 nfsmout_if(error);
4705 dqb->dqb_id = id;
4706nfsmout:
4707 nfsm_chain_cleanup(&nmreq);
4708 nfsm_chain_cleanup(&nmrep);
4709 return (error);
4710}
4711
4712int
4713nfs4_getquota(struct nfsmount *nmp, vfs_context_t ctx, uid_t id, int type, struct dqblk *dqb)
4714{
4715 nfsnode_t np;
4716 int error = 0, status, nfsvers, numops;
4717 u_int64_t xid;
4718 struct nfsm_chain nmreq, nmrep;
4719 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
4720 thread_t thd = vfs_context_thread(ctx);
4721 kauth_cred_t cred = vfs_context_ucred(ctx);
4722 struct nfsreq_secinfo_args si;
4723
4724 if (type != USRQUOTA) /* NFSv4 only supports user quotas */
4725 return (ENOTSUP);
4726
4727 /* first check that the server supports any of the quota attributes */
4728 if (!NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_AVAIL_HARD) &&
4729 !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_AVAIL_SOFT) &&
4730 !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_USED))
4731 return (ENOTSUP);
4732
4733 /*
4734 * The credential passed to the server needs to have
4735 * an effective uid that matches the given uid.
4736 */
4737 if (id != kauth_cred_getuid(cred)) {
4738 struct posix_cred temp_pcred;
4739 posix_cred_t pcred = posix_cred_get(cred);
4740 bzero(&temp_pcred, sizeof(temp_pcred));
4741 temp_pcred.cr_uid = id;
4742 temp_pcred.cr_ngroups = pcred->cr_ngroups;
4743 bcopy(pcred->cr_groups, temp_pcred.cr_groups, sizeof(temp_pcred.cr_groups));
4744 cred = posix_cred_create(&temp_pcred);
4745 if (!IS_VALID_CRED(cred))
4746 return (ENOMEM);
4747 } else {
4748 kauth_cred_ref(cred);
4749 }
4750
4751 nfsvers = nmp->nm_vers;
4752 np = nmp->nm_dnp;
4753 if (!np)
4754 error = ENXIO;
4755 if (error || ((error = vnode_get(NFSTOV(np))))) {
4756 kauth_cred_unref(&cred);
4757 return(error);
4758 }
4759
4760 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
4761 nfsm_chain_null(&nmreq);
4762 nfsm_chain_null(&nmrep);
4763
4764 // PUTFH + GETATTR
4765 numops = 2;
4766 nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
4767 nfsm_chain_add_compound_header(error, &nmreq, "quota", nmp->nm_minor_vers, numops);
4768 numops--;
4769 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
4770 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
4771 numops--;
4772 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
4773 NFS_CLEAR_ATTRIBUTES(bitmap);
4774 NFS_BITMAP_SET(bitmap, NFS_FATTR_QUOTA_AVAIL_HARD);
4775 NFS_BITMAP_SET(bitmap, NFS_FATTR_QUOTA_AVAIL_SOFT);
4776 NFS_BITMAP_SET(bitmap, NFS_FATTR_QUOTA_USED);
4777 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
4778 nfsm_chain_build_done(error, &nmreq);
4779 nfsm_assert(error, (numops == 0), EPROTO);
4780 nfsmout_if(error);
4781 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, &nmrep, &xid, &status);
4782 nfsm_chain_skip_tag(error, &nmrep);
4783 nfsm_chain_get_32(error, &nmrep, numops);
4784 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
4785 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
4786 nfsm_assert(error, NFSTONMP(np), ENXIO);
4787 nfsmout_if(error);
4788 error = nfs4_parsefattr(&nmrep, NULL, NULL, NULL, dqb, NULL);
4789 nfsmout_if(error);
4790 nfsm_assert(error, NFSTONMP(np), ENXIO);
4791nfsmout:
4792 nfsm_chain_cleanup(&nmreq);
4793 nfsm_chain_cleanup(&nmrep);
4794 vnode_put(NFSTOV(np));
4795 kauth_cred_unref(&cred);
4796 return (error);
4797}
4798
4799int
4800nfs_vfs_quotactl(mount_t mp, int cmds, uid_t uid, caddr_t datap, vfs_context_t ctx)
4801{
4802 struct nfsmount *nmp;
4803 int cmd, type, error, nfsvers;
4804 uid_t euid = kauth_cred_getuid(vfs_context_ucred(ctx));
4805 struct dqblk *dqb = (struct dqblk*)datap;
4806
4807 nmp = VFSTONFS(mp);
4808 if (nfs_mount_gone(nmp))
4809 return (ENXIO);
4810 nfsvers = nmp->nm_vers;
4811
4812 if (uid == ~0U)
4813 uid = euid;
4814
4815 /* we can only support Q_GETQUOTA */
4816 cmd = cmds >> SUBCMDSHIFT;
4817 switch (cmd) {
4818 case Q_GETQUOTA:
4819 break;
4820 case Q_QUOTAON:
4821 case Q_QUOTAOFF:
4822 case Q_SETQUOTA:
4823 case Q_SETUSE:
4824 case Q_SYNC:
4825 case Q_QUOTASTAT:
4826 return (ENOTSUP);
4827 default:
4828 return (EINVAL);
4829 }
4830
4831 type = cmds & SUBCMDMASK;
4832 if ((u_int)type >= MAXQUOTAS)
4833 return (EINVAL);
4834 if ((uid != euid) && ((error = vfs_context_suser(ctx))))
4835 return (error);
4836
4837 if (vfs_busy(mp, LK_NOWAIT))
4838 return (0);
4839 bzero(dqb, sizeof(*dqb));
4840 error = nmp->nm_funcs->nf_getquota(nmp, ctx, uid, type, dqb);
4841 vfs_unbusy(mp);
4842 return (error);
4843}
4844#endif
4845
4846/*
4847 * Flush out the buffer cache
4848 */
4849int nfs_sync_callout(vnode_t, void *);
4850
4851struct nfs_sync_cargs {
4852 vfs_context_t ctx;
4853 int waitfor;
4854 int error;
4855};
4856
4857int
4858nfs_sync_callout(vnode_t vp, void *arg)
4859{
4860 struct nfs_sync_cargs *cargs = (struct nfs_sync_cargs*)arg;
4861 nfsnode_t np = VTONFS(vp);
4862 int error;
4863
4864 if (np->n_flag & NREVOKE) {
4865 vn_revoke(vp, REVOKEALL, cargs->ctx);
4866 return (VNODE_RETURNED);
4867 }
4868
4869 if (LIST_EMPTY(&np->n_dirtyblkhd))
4870 return (VNODE_RETURNED);
4871 if (np->n_wrbusy > 0)
4872 return (VNODE_RETURNED);
4873 if (np->n_bflag & (NBFLUSHINPROG|NBINVALINPROG))
4874 return (VNODE_RETURNED);
4875
4876 error = nfs_flush(np, cargs->waitfor, vfs_context_thread(cargs->ctx), 0);
4877 if (error)
4878 cargs->error = error;
4879
4880 return (VNODE_RETURNED);
4881}
4882
4883int
4884nfs_vfs_sync(mount_t mp, int waitfor, vfs_context_t ctx)
4885{
4886 struct nfs_sync_cargs cargs;
4887
4888 cargs.waitfor = waitfor;
4889 cargs.ctx = ctx;
4890 cargs.error = 0;
4891
4892 vnode_iterate(mp, 0, nfs_sync_callout, &cargs);
4893
4894 return (cargs.error);
4895}
4896
4897/*
4898 * NFS flat namespace lookup.
4899 * Currently unsupported.
4900 */
4901/*ARGSUSED*/
4902int
4903nfs_vfs_vget(
4904 __unused mount_t mp,
4905 __unused ino64_t ino,
4906 __unused vnode_t *vpp,
4907 __unused vfs_context_t ctx)
4908{
4909
4910 return (ENOTSUP);
4911}
4912
4913/*
4914 * At this point, this should never happen
4915 */
4916/*ARGSUSED*/
4917int
4918nfs_vfs_fhtovp(
4919 __unused mount_t mp,
4920 __unused int fhlen,
4921 __unused unsigned char *fhp,
4922 __unused vnode_t *vpp,
4923 __unused vfs_context_t ctx)
4924{
4925
4926 return (ENOTSUP);
4927}
4928
4929/*
4930 * Vnode pointer to File handle, should never happen either
4931 */
4932/*ARGSUSED*/
4933int
4934nfs_vfs_vptofh(
4935 __unused vnode_t vp,
4936 __unused int *fhlenp,
4937 __unused unsigned char *fhp,
4938 __unused vfs_context_t ctx)
4939{
4940
4941 return (ENOTSUP);
4942}
4943
4944/*
4945 * Vfs start routine, a no-op.
4946 */
4947/*ARGSUSED*/
4948int
4949nfs_vfs_start(
4950 __unused mount_t mp,
4951 __unused int flags,
4952 __unused vfs_context_t ctx)
4953{
4954
4955 return (0);
4956}
4957
4958/*
4959 * Build the mount info buffer for NFS_MOUNTINFO.
4960 */
4961int
4962nfs_mountinfo_assemble(struct nfsmount *nmp, struct xdrbuf *xb)
4963{
4964 struct xdrbuf xbinfo, xborig;
4965 char sotype[6];
4966 uint32_t origargsvers, origargslength;
4967 uint32_t infolength_offset, curargsopaquelength_offset, curargslength_offset, attrslength_offset, curargs_end_offset, end_offset;
4968 uint32_t miattrs[NFS_MIATTR_BITMAP_LEN];
4969 uint32_t miflags_mask[NFS_MIFLAG_BITMAP_LEN];
4970 uint32_t miflags[NFS_MIFLAG_BITMAP_LEN];
4971 uint32_t mattrs[NFS_MATTR_BITMAP_LEN];
4972 uint32_t mflags_mask[NFS_MFLAG_BITMAP_LEN];
4973 uint32_t mflags[NFS_MFLAG_BITMAP_LEN];
4974 uint32_t loc, serv, addr, comp;
4975 int i, timeo, error = 0;
4976
4977 /* set up mount info attr and flag bitmaps */
4978 NFS_BITMAP_ZERO(miattrs, NFS_MIATTR_BITMAP_LEN);
4979 NFS_BITMAP_SET(miattrs, NFS_MIATTR_FLAGS);
4980 NFS_BITMAP_SET(miattrs, NFS_MIATTR_ORIG_ARGS);
4981 NFS_BITMAP_SET(miattrs, NFS_MIATTR_CUR_ARGS);
4982 NFS_BITMAP_SET(miattrs, NFS_MIATTR_CUR_LOC_INDEX);
4983 NFS_BITMAP_ZERO(miflags_mask, NFS_MIFLAG_BITMAP_LEN);
4984 NFS_BITMAP_ZERO(miflags, NFS_MIFLAG_BITMAP_LEN);
4985 NFS_BITMAP_SET(miflags_mask, NFS_MIFLAG_DEAD);
4986 NFS_BITMAP_SET(miflags_mask, NFS_MIFLAG_NOTRESP);
4987 NFS_BITMAP_SET(miflags_mask, NFS_MIFLAG_RECOVERY);
4988 if (nmp->nm_state & NFSSTA_DEAD)
4989 NFS_BITMAP_SET(miflags, NFS_MIFLAG_DEAD);
4990 if ((nmp->nm_state & (NFSSTA_TIMEO|NFSSTA_JUKEBOXTIMEO)) ||
4991 ((nmp->nm_state & NFSSTA_LOCKTIMEO) && (nmp->nm_lockmode == NFS_LOCK_MODE_ENABLED)))
4992 NFS_BITMAP_SET(miflags, NFS_MIFLAG_NOTRESP);
4993 if (nmp->nm_state & NFSSTA_RECOVER)
4994 NFS_BITMAP_SET(miflags, NFS_MIFLAG_RECOVERY);
4995
4996 /* get original mount args length */
4997 xb_init_buffer(&xborig, nmp->nm_args, 2*XDRWORD);
4998 xb_get_32(error, &xborig, origargsvers); /* version */
4999 xb_get_32(error, &xborig, origargslength); /* args length */
5000 nfsmerr_if(error);
5001
5002 /* set up current mount attributes bitmap */
5003 NFS_BITMAP_ZERO(mattrs, NFS_MATTR_BITMAP_LEN);
5004 NFS_BITMAP_SET(mattrs, NFS_MATTR_FLAGS);
5005 NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_VERSION);
5006 if (nmp->nm_vers >= NFS_VER4)
5007 NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_MINOR_VERSION);
5008 NFS_BITMAP_SET(mattrs, NFS_MATTR_READ_SIZE);
5009 NFS_BITMAP_SET(mattrs, NFS_MATTR_WRITE_SIZE);
5010 NFS_BITMAP_SET(mattrs, NFS_MATTR_READDIR_SIZE);
5011 NFS_BITMAP_SET(mattrs, NFS_MATTR_READAHEAD);
5012 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_REG_MIN);
5013 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_REG_MAX);
5014 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MIN);
5015 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MAX);
5016 NFS_BITMAP_SET(mattrs, NFS_MATTR_LOCK_MODE);
5017 NFS_BITMAP_SET(mattrs, NFS_MATTR_SECURITY);
5018 NFS_BITMAP_SET(mattrs, NFS_MATTR_MAX_GROUP_LIST);
5019 NFS_BITMAP_SET(mattrs, NFS_MATTR_SOCKET_TYPE);
5020 NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_PORT);
5021 if ((nmp->nm_vers < NFS_VER4) && nmp->nm_mountport)
5022 NFS_BITMAP_SET(mattrs, NFS_MATTR_MOUNT_PORT);
5023 NFS_BITMAP_SET(mattrs, NFS_MATTR_REQUEST_TIMEOUT);
5024 if (NMFLAG(nmp, SOFT))
5025 NFS_BITMAP_SET(mattrs, NFS_MATTR_SOFT_RETRY_COUNT);
5026 if (nmp->nm_deadtimeout)
5027 NFS_BITMAP_SET(mattrs, NFS_MATTR_DEAD_TIMEOUT);
5028 if (nmp->nm_fh)
5029 NFS_BITMAP_SET(mattrs, NFS_MATTR_FH);
5030 NFS_BITMAP_SET(mattrs, NFS_MATTR_FS_LOCATIONS);
5031 NFS_BITMAP_SET(mattrs, NFS_MATTR_MNTFLAGS);
5032 if (origargsvers < NFS_ARGSVERSION_XDR)
5033 NFS_BITMAP_SET(mattrs, NFS_MATTR_MNTFROM);
5034 if (nmp->nm_realm)
5035 NFS_BITMAP_SET(mattrs, NFS_MATTR_REALM);
5036 if (nmp->nm_principal)
5037 NFS_BITMAP_SET(mattrs, NFS_MATTR_PRINCIPAL);
5038 if (nmp->nm_sprinc)
5039 NFS_BITMAP_SET(mattrs, NFS_MATTR_SVCPRINCIPAL);
5040
5041 /* set up current mount flags bitmap */
5042 /* first set the flags that we will be setting - either on OR off */
5043 NFS_BITMAP_ZERO(mflags_mask, NFS_MFLAG_BITMAP_LEN);
5044 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_SOFT);
5045 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_INTR);
5046 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_RESVPORT);
5047 if (nmp->nm_sotype == SOCK_DGRAM)
5048 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOCONNECT);
5049 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_DUMBTIMER);
5050 if (nmp->nm_vers < NFS_VER4)
5051 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_CALLUMNT);
5052 if (nmp->nm_vers >= NFS_VER3)
5053 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_RDIRPLUS);
5054 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NONEGNAMECACHE);
5055 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_MUTEJUKEBOX);
5056 if (nmp->nm_vers >= NFS_VER4) {
5057 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_EPHEMERAL);
5058 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOCALLBACK);
5059 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NONAMEDATTR);
5060 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOACL);
5061 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_ACLONLY);
5062 }
5063 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NFC);
5064 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOQUOTA);
5065 if (nmp->nm_vers < NFS_VER4)
5066 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_MNTUDP);
5067 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_MNTQUICK);
5068 /* now set the flags that should be set */
5069 NFS_BITMAP_ZERO(mflags, NFS_MFLAG_BITMAP_LEN);
5070 if (NMFLAG(nmp, SOFT))
5071 NFS_BITMAP_SET(mflags, NFS_MFLAG_SOFT);
5072 if (NMFLAG(nmp, INTR))
5073 NFS_BITMAP_SET(mflags, NFS_MFLAG_INTR);
5074 if (NMFLAG(nmp, RESVPORT))
5075 NFS_BITMAP_SET(mflags, NFS_MFLAG_RESVPORT);
5076 if ((nmp->nm_sotype == SOCK_DGRAM) && NMFLAG(nmp, NOCONNECT))
5077 NFS_BITMAP_SET(mflags, NFS_MFLAG_NOCONNECT);
5078 if (NMFLAG(nmp, DUMBTIMER))
5079 NFS_BITMAP_SET(mflags, NFS_MFLAG_DUMBTIMER);
5080 if ((nmp->nm_vers < NFS_VER4) && NMFLAG(nmp, CALLUMNT))
5081 NFS_BITMAP_SET(mflags, NFS_MFLAG_CALLUMNT);
5082 if ((nmp->nm_vers >= NFS_VER3) && NMFLAG(nmp, RDIRPLUS))
5083 NFS_BITMAP_SET(mflags, NFS_MFLAG_RDIRPLUS);
5084 if (NMFLAG(nmp, NONEGNAMECACHE))
5085 NFS_BITMAP_SET(mflags, NFS_MFLAG_NONEGNAMECACHE);
5086 if (NMFLAG(nmp, MUTEJUKEBOX))
5087 NFS_BITMAP_SET(mflags, NFS_MFLAG_MUTEJUKEBOX);
5088 if (nmp->nm_vers >= NFS_VER4) {
5089 if (NMFLAG(nmp, EPHEMERAL))
5090 NFS_BITMAP_SET(mflags, NFS_MFLAG_EPHEMERAL);
5091 if (NMFLAG(nmp, NOCALLBACK))
5092 NFS_BITMAP_SET(mflags, NFS_MFLAG_NOCALLBACK);
5093 if (NMFLAG(nmp, NONAMEDATTR))
5094 NFS_BITMAP_SET(mflags, NFS_MFLAG_NONAMEDATTR);
5095 if (NMFLAG(nmp, NOACL))
5096 NFS_BITMAP_SET(mflags, NFS_MFLAG_NOACL);
5097 if (NMFLAG(nmp, ACLONLY))
5098 NFS_BITMAP_SET(mflags, NFS_MFLAG_ACLONLY);
5099 }
5100 if (NMFLAG(nmp, NFC))
5101 NFS_BITMAP_SET(mflags, NFS_MFLAG_NFC);
5102 if (NMFLAG(nmp, NOQUOTA) || ((nmp->nm_vers >= NFS_VER4) &&
5103 !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_AVAIL_HARD) &&
5104 !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_AVAIL_SOFT) &&
5105 !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_USED)))
5106 NFS_BITMAP_SET(mflags, NFS_MFLAG_NOQUOTA);
5107 if ((nmp->nm_vers < NFS_VER4) && NMFLAG(nmp, MNTUDP))
5108 NFS_BITMAP_SET(mflags, NFS_MFLAG_MNTUDP);
5109 if (NMFLAG(nmp, MNTQUICK))
5110 NFS_BITMAP_SET(mflags, NFS_MFLAG_MNTQUICK);
5111
5112 /* assemble info buffer: */
5113 xb_init_buffer(&xbinfo, NULL, 0);
5114 xb_add_32(error, &xbinfo, NFS_MOUNT_INFO_VERSION);
5115 infolength_offset = xb_offset(&xbinfo);
5116 xb_add_32(error, &xbinfo, 0);
5117 xb_add_bitmap(error, &xbinfo, miattrs, NFS_MIATTR_BITMAP_LEN);
5118 xb_add_bitmap(error, &xbinfo, miflags, NFS_MIFLAG_BITMAP_LEN);
5119 xb_add_32(error, &xbinfo, origargslength);
5120 if (!error)
5121 error = xb_add_bytes(&xbinfo, nmp->nm_args, origargslength, 0);
5122
5123 /* the opaque byte count for the current mount args values: */
5124 curargsopaquelength_offset = xb_offset(&xbinfo);
5125 xb_add_32(error, &xbinfo, 0);
5126
5127 /* Encode current mount args values */
5128 xb_add_32(error, &xbinfo, NFS_ARGSVERSION_XDR);
5129 curargslength_offset = xb_offset(&xbinfo);
5130 xb_add_32(error, &xbinfo, 0);
5131 xb_add_32(error, &xbinfo, NFS_XDRARGS_VERSION_0);
5132 xb_add_bitmap(error, &xbinfo, mattrs, NFS_MATTR_BITMAP_LEN);
5133 attrslength_offset = xb_offset(&xbinfo);
5134 xb_add_32(error, &xbinfo, 0);
5135 xb_add_bitmap(error, &xbinfo, mflags_mask, NFS_MFLAG_BITMAP_LEN);
5136 xb_add_bitmap(error, &xbinfo, mflags, NFS_MFLAG_BITMAP_LEN);
5137 xb_add_32(error, &xbinfo, nmp->nm_vers); /* NFS_VERSION */
5138 if (nmp->nm_vers >= NFS_VER4)
5139 xb_add_32(error, &xbinfo, nmp->nm_minor_vers); /* NFS_MINOR_VERSION */
5140 xb_add_32(error, &xbinfo, nmp->nm_rsize); /* READ_SIZE */
5141 xb_add_32(error, &xbinfo, nmp->nm_wsize); /* WRITE_SIZE */
5142 xb_add_32(error, &xbinfo, nmp->nm_readdirsize); /* READDIR_SIZE */
5143 xb_add_32(error, &xbinfo, nmp->nm_readahead); /* READAHEAD */
5144 xb_add_32(error, &xbinfo, nmp->nm_acregmin); /* ATTRCACHE_REG_MIN */
5145 xb_add_32(error, &xbinfo, 0); /* ATTRCACHE_REG_MIN */
5146 xb_add_32(error, &xbinfo, nmp->nm_acregmax); /* ATTRCACHE_REG_MAX */
5147 xb_add_32(error, &xbinfo, 0); /* ATTRCACHE_REG_MAX */
5148 xb_add_32(error, &xbinfo, nmp->nm_acdirmin); /* ATTRCACHE_DIR_MIN */
5149 xb_add_32(error, &xbinfo, 0); /* ATTRCACHE_DIR_MIN */
5150 xb_add_32(error, &xbinfo, nmp->nm_acdirmax); /* ATTRCACHE_DIR_MAX */
5151 xb_add_32(error, &xbinfo, 0); /* ATTRCACHE_DIR_MAX */
5152 xb_add_32(error, &xbinfo, nmp->nm_lockmode); /* LOCK_MODE */
5153 if (nmp->nm_sec.count) {
5154 xb_add_32(error, &xbinfo, nmp->nm_sec.count); /* SECURITY */
5155 nfsmerr_if(error);
5156 for (i=0; i < nmp->nm_sec.count; i++)
5157 xb_add_32(error, &xbinfo, nmp->nm_sec.flavors[i]);
5158 } else if (nmp->nm_servsec.count) {
5159 xb_add_32(error, &xbinfo, nmp->nm_servsec.count); /* SECURITY */
5160 nfsmerr_if(error);
5161 for (i=0; i < nmp->nm_servsec.count; i++)
5162 xb_add_32(error, &xbinfo, nmp->nm_servsec.flavors[i]);
5163 } else {
5164 xb_add_32(error, &xbinfo, 1); /* SECURITY */
5165 xb_add_32(error, &xbinfo, nmp->nm_auth);
5166 }
5167 xb_add_32(error, &xbinfo, nmp->nm_numgrps); /* MAX_GROUP_LIST */
5168 nfsmerr_if(error);
5169 snprintf(sotype, sizeof(sotype), "%s%s", (nmp->nm_sotype == SOCK_DGRAM) ? "udp" : "tcp",
5170 nmp->nm_sofamily ? (nmp->nm_sofamily == AF_INET) ? "4" : "6" : "");
5171 xb_add_string(error, &xbinfo, sotype, strlen(sotype)); /* SOCKET_TYPE */
5172 xb_add_32(error, &xbinfo, ntohs(((struct sockaddr_in*)nmp->nm_saddr)->sin_port)); /* NFS_PORT */
5173 if ((nmp->nm_vers < NFS_VER4) && nmp->nm_mountport)
5174 xb_add_32(error, &xbinfo, nmp->nm_mountport); /* MOUNT_PORT */
5175 timeo = (nmp->nm_timeo * 10) / NFS_HZ;
5176 xb_add_32(error, &xbinfo, timeo/10); /* REQUEST_TIMEOUT */
5177 xb_add_32(error, &xbinfo, (timeo%10)*100000000); /* REQUEST_TIMEOUT */
5178 if (NMFLAG(nmp, SOFT))
5179 xb_add_32(error, &xbinfo, nmp->nm_retry); /* SOFT_RETRY_COUNT */
5180 if (nmp->nm_deadtimeout) {
5181 xb_add_32(error, &xbinfo, nmp->nm_deadtimeout); /* DEAD_TIMEOUT */
5182 xb_add_32(error, &xbinfo, 0); /* DEAD_TIMEOUT */
5183 }
5184 if (nmp->nm_fh)
5185 xb_add_fh(error, &xbinfo, &nmp->nm_fh->fh_data[0], nmp->nm_fh->fh_len); /* FH */
5186 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_numlocs); /* FS_LOCATIONS */
5187 for (loc = 0; !error && (loc < nmp->nm_locations.nl_numlocs); loc++) {
5188 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_servcount);
5189 for (serv = 0; !error && (serv < nmp->nm_locations.nl_locations[loc]->nl_servcount); serv++) {
5190 xb_add_string(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_name,
5191 strlen(nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_name));
5192 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount);
5193 for (addr = 0; !error && (addr < nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount); addr++)
5194 xb_add_string(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addresses[addr],
5195 strlen(nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addresses[addr]));
5196 xb_add_32(error, &xbinfo, 0); /* empty server info */
5197 }
5198 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_path.np_compcount);
5199 for (comp = 0; !error && (comp < nmp->nm_locations.nl_locations[loc]->nl_path.np_compcount); comp++)
5200 xb_add_string(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_path.np_components[comp],
5201 strlen(nmp->nm_locations.nl_locations[loc]->nl_path.np_components[comp]));
5202 xb_add_32(error, &xbinfo, 0); /* empty fs location info */
5203 }
5204 xb_add_32(error, &xbinfo, vfs_flags(nmp->nm_mountp)); /* MNTFLAGS */
5205 if (origargsvers < NFS_ARGSVERSION_XDR)
5206 xb_add_string(error, &xbinfo, vfs_statfs(nmp->nm_mountp)->f_mntfromname,
5207 strlen(vfs_statfs(nmp->nm_mountp)->f_mntfromname)); /* MNTFROM */
5208 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REALM))
5209 xb_add_string(error, &xbinfo, nmp->nm_realm, strlen(nmp->nm_realm));
5210 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_PRINCIPAL))
5211 xb_add_string(error, &xbinfo, nmp->nm_principal, strlen(nmp->nm_principal));
5212 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SVCPRINCIPAL))
5213 xb_add_string(error, &xbinfo, nmp->nm_sprinc, strlen(nmp->nm_sprinc));
5214
5215 curargs_end_offset = xb_offset(&xbinfo);
5216
5217 /* NFS_MIATTR_CUR_LOC_INDEX */
5218 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_current.nli_flags);
5219 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_current.nli_loc);
5220 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_current.nli_serv);
5221 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_current.nli_addr);
5222
5223 xb_build_done(error, &xbinfo);
5224
5225 /* update opaque counts */
5226 end_offset = xb_offset(&xbinfo);
5227 if (!error) {
5228 error = xb_seek(&xbinfo, attrslength_offset);
5229 xb_add_32(error, &xbinfo, curargs_end_offset - attrslength_offset - XDRWORD/*don't include length field*/);
5230 }
5231 if (!error) {
5232 error = xb_seek(&xbinfo, curargslength_offset);
5233 xb_add_32(error, &xbinfo, curargs_end_offset - curargslength_offset + XDRWORD/*version*/);
5234 }
5235 if (!error) {
5236 error = xb_seek(&xbinfo, curargsopaquelength_offset);
5237 xb_add_32(error, &xbinfo, curargs_end_offset - curargslength_offset + XDRWORD/*version*/);
5238 }
5239 if (!error) {
5240 error = xb_seek(&xbinfo, infolength_offset);
5241 xb_add_32(error, &xbinfo, end_offset - infolength_offset + XDRWORD/*version*/);
5242 }
5243 nfsmerr_if(error);
5244
5245 /* copy result xdrbuf to caller */
5246 *xb = xbinfo;
5247
5248 /* and mark the local copy as not needing cleanup */
5249 xbinfo.xb_flags &= ~XB_CLEANUP;
5250nfsmerr:
5251 xb_cleanup(&xbinfo);
5252 return (error);
5253}
5254
5255/*
5256 * Do that sysctl thang...
5257 */
5258int
5259nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
5260 user_addr_t newp, size_t newlen, vfs_context_t ctx)
5261{
5262 int error = 0, val;
5263 int softnobrowse;
5264 struct sysctl_req *req = NULL;
5265 union union_vfsidctl vc;
5266 mount_t mp;
5267 struct nfsmount *nmp = NULL;
5268 struct vfsquery vq;
5269 struct nfsreq *rq;
5270 boolean_t is_64_bit;
5271 fsid_t fsid;
5272 struct xdrbuf xb;
5273 struct netfs_status *nsp = NULL;
5274 int timeoutmask;
5275 uint pos, totlen, count, numThreads;
5276#if NFSSERVER
5277 struct nfs_exportfs *nxfs;
5278 struct nfs_export *nx;
5279 struct nfs_active_user_list *ulist;
5280 struct nfs_export_stat_desc stat_desc;
5281 struct nfs_export_stat_rec statrec;
5282 struct nfs_user_stat_node *unode, *unode_next;
5283 struct nfs_user_stat_desc ustat_desc;
5284 struct nfs_user_stat_user_rec ustat_rec;
5285 struct nfs_user_stat_path_rec upath_rec;
5286 uint bytes_avail, bytes_total, recs_copied;
5287 uint numExports, numRecs;
5288#endif /* NFSSERVER */
5289
5290 /*
5291 * All names at this level are terminal.
5292 */
5293 if (namelen > 1)
5294 return (ENOTDIR); /* overloaded */
5295
5296 is_64_bit = vfs_context_is64bit(ctx);
5297
5298 /* common code for "new style" VFS_CTL sysctl, get the mount. */
5299 switch (name[0]) {
5300 case VFS_CTL_TIMEO:
5301 case VFS_CTL_NOLOCKS:
5302 case VFS_CTL_NSTATUS:
5303 case VFS_CTL_QUERY:
5304 req = CAST_DOWN(struct sysctl_req *, oldp);
5305 if (req == NULL) {
5306 return EFAULT;
5307 }
5308 error = SYSCTL_IN(req, &vc, is_64_bit? sizeof(vc.vc64):sizeof(vc.vc32));
5309 if (error)
5310 return (error);
5311 mp = vfs_getvfs(&vc.vc32.vc_fsid); /* works for 32 and 64 */
5312 if (mp == NULL)
5313 return (ENOENT);
5314 nmp = VFSTONFS(mp);
5315 if (!nmp)
5316 return (ENOENT);
5317 bzero(&vq, sizeof(vq));
5318 req->newidx = 0;
5319 if (is_64_bit) {
5320 req->newptr = vc.vc64.vc_ptr;
5321 req->newlen = (size_t)vc.vc64.vc_len;
5322 } else {
5323 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
5324 req->newlen = vc.vc32.vc_len;
5325 }
5326 break;
5327 }
5328
5329 switch(name[0]) {
5330 case NFS_NFSSTATS:
5331 if (!oldp) {
5332 *oldlenp = sizeof nfsstats;
5333 return (0);
5334 }
5335
5336 if (*oldlenp < sizeof nfsstats) {
5337 *oldlenp = sizeof nfsstats;
5338 return (ENOMEM);
5339 }
5340
5341 error = copyout(&nfsstats, oldp, sizeof nfsstats);
5342 if (error)
5343 return (error);
5344
5345 if (newp && newlen != sizeof nfsstats)
5346 return (EINVAL);
5347
5348 if (newp)
5349 return copyin(newp, &nfsstats, sizeof nfsstats);
5350 return (0);
5351 case NFS_MOUNTINFO:
5352 /* read in the fsid */
5353 if (*oldlenp < sizeof(fsid))
5354 return (EINVAL);
5355 if ((error = copyin(oldp, &fsid, sizeof(fsid))))
5356 return (error);
5357 /* swizzle it back to host order */
5358 fsid.val[0] = ntohl(fsid.val[0]);
5359 fsid.val[1] = ntohl(fsid.val[1]);
5360 /* find mount and make sure it's NFS */
5361 if (((mp = vfs_getvfs(&fsid))) == NULL)
5362 return (ENOENT);
5363 if (strcmp(mp->mnt_vfsstat.f_fstypename, "nfs"))
5364 return (EINVAL);
5365 if (((nmp = VFSTONFS(mp))) == NULL)
5366 return (ENOENT);
5367 xb_init(&xb, 0);
5368 if ((error = nfs_mountinfo_assemble(nmp, &xb)))
5369 return (error);
5370 if (*oldlenp < xb.xb_u.xb_buffer.xbb_len)
5371 error = ENOMEM;
5372 else
5373 error = copyout(xb_buffer_base(&xb), oldp, xb.xb_u.xb_buffer.xbb_len);
5374 *oldlenp = xb.xb_u.xb_buffer.xbb_len;
5375 xb_cleanup(&xb);
5376 break;
5377#if NFSSERVER
5378 case NFS_EXPORTSTATS:
5379 /* setup export stat descriptor */
5380 stat_desc.rec_vers = NFS_EXPORT_STAT_REC_VERSION;
5381
5382 if (!nfsrv_is_initialized()) {
5383 stat_desc.rec_count = 0;
5384 if (oldp && (*oldlenp >= sizeof(struct nfs_export_stat_desc)))
5385 error = copyout(&stat_desc, oldp, sizeof(struct nfs_export_stat_desc));
5386 *oldlenp = sizeof(struct nfs_export_stat_desc);
5387 return (error);
5388 }
5389
5390 /* Count the number of exported directories */
5391 lck_rw_lock_shared(&nfsrv_export_rwlock);
5392 numExports = 0;
5393 LIST_FOREACH(nxfs, &nfsrv_exports, nxfs_next)
5394 LIST_FOREACH(nx, &nxfs->nxfs_exports, nx_next)
5395 numExports += 1;
5396
5397 /* update stat descriptor's export record count */
5398 stat_desc.rec_count = numExports;
5399
5400 /* calculate total size of required buffer */
5401 totlen = sizeof(struct nfs_export_stat_desc) + (numExports * sizeof(struct nfs_export_stat_rec));
5402
5403 /* Check caller's buffer */
5404 if (oldp == 0) {
5405 lck_rw_done(&nfsrv_export_rwlock);
5406 /* indicate required buffer len */
5407 *oldlenp = totlen;
5408 return (0);
5409 }
5410
5411 /* We require the caller's buffer to be at least large enough to hold the descriptor */
5412 if (*oldlenp < sizeof(struct nfs_export_stat_desc)) {
5413 lck_rw_done(&nfsrv_export_rwlock);
5414 /* indicate required buffer len */
5415 *oldlenp = totlen;
5416 return (ENOMEM);
5417 }
5418
5419 /* indicate required buffer len */
5420 *oldlenp = totlen;
5421
5422 /* check if export table is empty */
5423 if (!numExports) {
5424 lck_rw_done(&nfsrv_export_rwlock);
5425 error = copyout(&stat_desc, oldp, sizeof(struct nfs_export_stat_desc));
5426 return (error);
5427 }
5428
5429 /* calculate how many actual export stat records fit into caller's buffer */
5430 numRecs = (*oldlenp - sizeof(struct nfs_export_stat_desc)) / sizeof(struct nfs_export_stat_rec);
5431
5432 if (!numRecs) {
5433 /* caller's buffer can only accomodate descriptor */
5434 lck_rw_done(&nfsrv_export_rwlock);
5435 stat_desc.rec_count = 0;
5436 error = copyout(&stat_desc, oldp, sizeof(struct nfs_export_stat_desc));
5437 return (error);
5438 }
5439
5440 /* adjust to actual number of records to copyout to caller's buffer */
5441 if (numRecs > numExports)
5442 numRecs = numExports;
5443
5444 /* set actual number of records we are returning */
5445 stat_desc.rec_count = numRecs;
5446
5447 /* first copy out the stat descriptor */
5448 pos = 0;
5449 error = copyout(&stat_desc, oldp + pos, sizeof(struct nfs_export_stat_desc));
5450 if (error) {
5451 lck_rw_done(&nfsrv_export_rwlock);
5452 return (error);
5453 }
5454 pos += sizeof(struct nfs_export_stat_desc);
5455
5456 /* Loop through exported directories */
5457 count = 0;
5458 LIST_FOREACH(nxfs, &nfsrv_exports, nxfs_next) {
5459 LIST_FOREACH(nx, &nxfs->nxfs_exports, nx_next) {
5460
5461 if (count >= numRecs)
5462 break;
5463
5464 /* build exported filesystem path */
5465 snprintf(statrec.path, sizeof(statrec.path), "%s%s%s",
5466 nxfs->nxfs_path, ((nxfs->nxfs_path[1] && nx->nx_path[0]) ? "/" : ""),
5467 nx->nx_path);
5468
5469 /* build the 64-bit export stat counters */
5470 statrec.ops = ((uint64_t)nx->nx_stats.ops.hi << 32) |
5471 nx->nx_stats.ops.lo;
5472 statrec.bytes_read = ((uint64_t)nx->nx_stats.bytes_read.hi << 32) |
5473 nx->nx_stats.bytes_read.lo;
5474 statrec.bytes_written = ((uint64_t)nx->nx_stats.bytes_written.hi << 32) |
5475 nx->nx_stats.bytes_written.lo;
5476 error = copyout(&statrec, oldp + pos, sizeof(statrec));
5477 if (error) {
5478 lck_rw_done(&nfsrv_export_rwlock);
5479 return (error);
5480 }
5481 /* advance buffer position */
5482 pos += sizeof(statrec);
5483 }
5484 }
5485 lck_rw_done(&nfsrv_export_rwlock);
5486 break;
5487 case NFS_USERSTATS:
5488 /* init structures used for copying out of kernel */
5489 ustat_desc.rec_vers = NFS_USER_STAT_REC_VERSION;
5490 ustat_rec.rec_type = NFS_USER_STAT_USER_REC;
5491 upath_rec.rec_type = NFS_USER_STAT_PATH_REC;
5492
5493 /* initialize counters */
5494 bytes_total = sizeof(struct nfs_user_stat_desc);
5495 bytes_avail = *oldlenp;
5496 recs_copied = 0;
5497
5498 if (!nfsrv_is_initialized()) /* NFS server not initialized, so no stats */
5499 goto ustat_skip;
5500
5501 /* reclaim old expired user nodes */
5502 nfsrv_active_user_list_reclaim();
5503
5504 /* reserve space for the buffer descriptor */
5505 if (bytes_avail >= sizeof(struct nfs_user_stat_desc))
5506 bytes_avail -= sizeof(struct nfs_user_stat_desc);
5507 else
5508 bytes_avail = 0;
5509
5510 /* put buffer position past the buffer descriptor */
5511 pos = sizeof(struct nfs_user_stat_desc);
5512
5513 /* Loop through exported directories */
5514 lck_rw_lock_shared(&nfsrv_export_rwlock);
5515 LIST_FOREACH(nxfs, &nfsrv_exports, nxfs_next) {
5516 LIST_FOREACH(nx, &nxfs->nxfs_exports, nx_next) {
5517 /* copy out path */
5518 if (bytes_avail >= sizeof(struct nfs_user_stat_path_rec)) {
5519 snprintf(upath_rec.path, sizeof(upath_rec.path), "%s%s%s",
5520 nxfs->nxfs_path, ((nxfs->nxfs_path[1] && nx->nx_path[0]) ? "/" : ""),
5521 nx->nx_path);
5522
5523 error = copyout(&upath_rec, oldp + pos, sizeof(struct nfs_user_stat_path_rec));
5524 if (error) {
5525 /* punt */
5526 goto ustat_done;
5527 }
5528
5529 pos += sizeof(struct nfs_user_stat_path_rec);
5530 bytes_avail -= sizeof(struct nfs_user_stat_path_rec);
5531 recs_copied++;
5532 }
5533 else {
5534 /* Caller's buffer is exhausted */
5535 bytes_avail = 0;
5536 }
5537
5538 bytes_total += sizeof(struct nfs_user_stat_path_rec);
5539
5540 /* Scan through all user nodes of this export */
5541 ulist = &nx->nx_user_list;
5542 lck_mtx_lock(&ulist->user_mutex);
5543 for (unode = TAILQ_FIRST(&ulist->user_lru); unode; unode = unode_next) {
5544 unode_next = TAILQ_NEXT(unode, lru_link);
5545
5546 /* copy out node if there is space */
5547 if (bytes_avail >= sizeof(struct nfs_user_stat_user_rec)) {
5548 /* prepare a user stat rec for copying out */
5549 ustat_rec.uid = unode->uid;
5550 bcopy(&unode->sock, &ustat_rec.sock, unode->sock.ss_len);
5551 ustat_rec.ops = unode->ops;
5552 ustat_rec.bytes_read = unode->bytes_read;
5553 ustat_rec.bytes_written = unode->bytes_written;
5554 ustat_rec.tm_start = unode->tm_start;
5555 ustat_rec.tm_last = unode->tm_last;
5556
5557 error = copyout(&ustat_rec, oldp + pos, sizeof(struct nfs_user_stat_user_rec));
5558
5559 if (error) {
5560 /* punt */
5561 lck_mtx_unlock(&ulist->user_mutex);
5562 goto ustat_done;
5563 }
5564
5565 pos += sizeof(struct nfs_user_stat_user_rec);
5566 bytes_avail -= sizeof(struct nfs_user_stat_user_rec);
5567 recs_copied++;
5568 }
5569 else {
5570 /* Caller's buffer is exhausted */
5571 bytes_avail = 0;
5572 }
5573 bytes_total += sizeof(struct nfs_user_stat_user_rec);
5574 }
5575 /* can unlock this export's list now */
5576 lck_mtx_unlock(&ulist->user_mutex);
5577 }
5578 }
5579
5580ustat_done:
5581 /* unlock the export table */
5582 lck_rw_done(&nfsrv_export_rwlock);
5583
5584ustat_skip:
5585 /* indicate number of actual records copied */
5586 ustat_desc.rec_count = recs_copied;
5587
5588 if (!error) {
5589 /* check if there was enough room for the buffer descriptor */
5590 if (*oldlenp >= sizeof(struct nfs_user_stat_desc))
5591 error = copyout(&ustat_desc, oldp, sizeof(struct nfs_user_stat_desc));
5592 else
5593 error = ENOMEM;
5594
5595 /* always indicate required buffer size */
5596 *oldlenp = bytes_total;
5597 }
5598 break;
5599 case NFS_USERCOUNT:
5600 if (!oldp) {
5601 *oldlenp = sizeof(nfsrv_user_stat_node_count);
5602 return (0);
5603 }
5604
5605 if (*oldlenp < sizeof(nfsrv_user_stat_node_count)) {
5606 *oldlenp = sizeof(nfsrv_user_stat_node_count);
5607 return (ENOMEM);
5608 }
5609
5610 if (nfsrv_is_initialized()) {
5611 /* reclaim old expired user nodes */
5612 nfsrv_active_user_list_reclaim();
5613 }
5614
5615 error = copyout(&nfsrv_user_stat_node_count, oldp, sizeof(nfsrv_user_stat_node_count));
5616 break;
5617#endif /* NFSSERVER */
5618 case VFS_CTL_NOLOCKS:
5619 if (req->oldptr != USER_ADDR_NULL) {
5620 lck_mtx_lock(&nmp->nm_lock);
5621 val = (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED) ? 1 : 0;
5622 lck_mtx_unlock(&nmp->nm_lock);
5623 error = SYSCTL_OUT(req, &val, sizeof(val));
5624 if (error)
5625 return (error);
5626 }
5627 if (req->newptr != USER_ADDR_NULL) {
5628 error = SYSCTL_IN(req, &val, sizeof(val));
5629 if (error)
5630 return (error);
5631 lck_mtx_lock(&nmp->nm_lock);
5632 if (nmp->nm_lockmode == NFS_LOCK_MODE_LOCAL) {
5633 /* can't toggle locks when using local locks */
5634 error = EINVAL;
5635 } else if ((nmp->nm_vers >= NFS_VER4) && val) {
5636 /* can't disable locks for NFSv4 */
5637 error = EINVAL;
5638 } else if (val) {
5639 if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_ENABLED))
5640 nfs_lockd_mount_unregister(nmp);
5641 nmp->nm_lockmode = NFS_LOCK_MODE_DISABLED;
5642 nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
5643 } else {
5644 if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED))
5645 nfs_lockd_mount_register(nmp);
5646 nmp->nm_lockmode = NFS_LOCK_MODE_ENABLED;
5647 }
5648 lck_mtx_unlock(&nmp->nm_lock);
5649 }
5650 break;
5651 case VFS_CTL_QUERY:
5652 lck_mtx_lock(&nmp->nm_lock);
5653 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
5654 softnobrowse = (NMFLAG(nmp, SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE));
5655 if (!softnobrowse && (nmp->nm_state & NFSSTA_TIMEO))
5656 vq.vq_flags |= VQ_NOTRESP;
5657 if (!softnobrowse && (nmp->nm_state & NFSSTA_JUKEBOXTIMEO) && !NMFLAG(nmp, MUTEJUKEBOX))
5658 vq.vq_flags |= VQ_NOTRESP;
5659 if (!softnobrowse && (nmp->nm_state & NFSSTA_LOCKTIMEO) &&
5660 (nmp->nm_lockmode == NFS_LOCK_MODE_ENABLED))
5661 vq.vq_flags |= VQ_NOTRESP;
5662 if (nmp->nm_state & NFSSTA_DEAD)
5663 vq.vq_flags |= VQ_DEAD;
5664 lck_mtx_unlock(&nmp->nm_lock);
5665 error = SYSCTL_OUT(req, &vq, sizeof(vq));
5666 break;
5667 case VFS_CTL_TIMEO:
5668 if (req->oldptr != USER_ADDR_NULL) {
5669 lck_mtx_lock(&nmp->nm_lock);
5670 val = nmp->nm_tprintf_initial_delay;
5671 lck_mtx_unlock(&nmp->nm_lock);
5672 error = SYSCTL_OUT(req, &val, sizeof(val));
5673 if (error)
5674 return (error);
5675 }
5676 if (req->newptr != USER_ADDR_NULL) {
5677 error = SYSCTL_IN(req, &val, sizeof(val));
5678 if (error)
5679 return (error);
5680 lck_mtx_lock(&nmp->nm_lock);
5681 if (val < 0)
5682 nmp->nm_tprintf_initial_delay = 0;
5683 else
5684 nmp->nm_tprintf_initial_delay = val;
5685 lck_mtx_unlock(&nmp->nm_lock);
5686 }
5687 break;
5688 case VFS_CTL_NSTATUS:
5689 /*
5690 * Return the status of this mount. This is much more
5691 * information than VFS_CTL_QUERY. In addition to the
5692 * vq_flags return the significant mount options along
5693 * with the list of threads blocked on the mount and
5694 * how long the threads have been waiting.
5695 */
5696
5697 lck_mtx_lock(nfs_request_mutex);
5698 lck_mtx_lock(&nmp->nm_lock);
5699
5700 /*
5701 * Count the number of requests waiting for a reply.
5702 * Note: there could be multiple requests from the same thread.
5703 */
5704 numThreads = 0;
5705 TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
5706 if (rq->r_nmp == nmp)
5707 numThreads++;
5708 }
5709
5710 /* Calculate total size of result buffer */
5711 totlen = sizeof(struct netfs_status) + (numThreads * sizeof(uint64_t));
5712
5713 if (req->oldptr == USER_ADDR_NULL) { // Caller is querying buffer size
5714 lck_mtx_unlock(&nmp->nm_lock);
5715 lck_mtx_unlock(nfs_request_mutex);
5716 return SYSCTL_OUT(req, NULL, totlen);
5717 }
5718 if (req->oldlen < totlen) { // Check if caller's buffer is big enough
5719 lck_mtx_unlock(&nmp->nm_lock);
5720 lck_mtx_unlock(nfs_request_mutex);
5721 return (ERANGE);
5722 }
5723
5724 MALLOC(nsp, struct netfs_status *, totlen, M_TEMP, M_WAITOK|M_ZERO);
5725 if (nsp == NULL) {
5726 lck_mtx_unlock(&nmp->nm_lock);
5727 lck_mtx_unlock(nfs_request_mutex);
5728 return (ENOMEM);
5729 }
5730 timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
5731 if (nmp->nm_state & timeoutmask)
5732 nsp->ns_status |= VQ_NOTRESP;
5733 if (nmp->nm_state & NFSSTA_DEAD)
5734 nsp->ns_status |= VQ_DEAD;
5735
5736 (void) nfs_mountopts(nmp, nsp->ns_mountopts, sizeof(nsp->ns_mountopts));
5737 nsp->ns_threadcount = numThreads;
5738
5739 /*
5740 * Get the thread ids of threads waiting for a reply
5741 * and find the longest wait time.
5742 */
5743 if (numThreads > 0) {
5744 struct timeval now;
5745 time_t sendtime;
5746
5747 microuptime(&now);
5748 count = 0;
5749 sendtime = now.tv_sec;
5750 TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
5751 if (rq->r_nmp == nmp) {
5752 if (rq->r_start < sendtime)
5753 sendtime = rq->r_start;
5754 // A thread_id of zero is used to represent an async I/O request.
5755 nsp->ns_threadids[count] =
5756 rq->r_thread ? thread_tid(rq->r_thread) : 0;
5757 if (++count >= numThreads)
5758 break;
5759 }
5760 }
5761 nsp->ns_waittime = now.tv_sec - sendtime;
5762 }
5763
5764 lck_mtx_unlock(&nmp->nm_lock);
5765 lck_mtx_unlock(nfs_request_mutex);
5766
5767 error = SYSCTL_OUT(req, nsp, totlen);
5768 FREE(nsp, M_TEMP);
5769 break;
5770 default:
5771 return (ENOTSUP);
5772 }
5773 return (error);
5774}