]> git.saurik.com Git - apple/xnu.git/blob - bsd/nfs/nfs_syscalls.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / bsd / nfs / nfs_syscalls.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
24 /*
25 * Copyright (c) 1989, 1993
26 * The Regents of the University of California. All rights reserved.
27 *
28 * This code is derived from software contributed to Berkeley by
29 * Rick Macklem at The University of Guelph.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 3. All advertising materials mentioning features or use of this software
40 * must display the following acknowledgement:
41 * This product includes software developed by the University of
42 * California, Berkeley and its contributors.
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 * @(#)nfs_syscalls.c 8.5 (Berkeley) 3/30/95
60 * FreeBSD-Id: nfs_syscalls.c,v 1.32 1997/11/07 08:53:25 phk Exp $
61 */
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 /* XXX CSM 11/25/97 FreeBSD's generated syscall prototypes */
66 #ifdef notyet
67 #include <sys/sysproto.h>
68 #endif
69 #include <sys/kernel.h>
70 #include <sys/file_internal.h>
71 #include <sys/filedesc.h>
72 #include <sys/stat.h>
73 #include <sys/vnode_internal.h>
74 #include <sys/mount_internal.h>
75 #include <sys/proc_internal.h> /* for fdflags */
76 #include <sys/kauth.h>
77 #include <sys/sysctl.h>
78 #include <sys/ubc.h>
79 #include <sys/uio.h>
80 #include <sys/malloc.h>
81 #include <sys/kpi_mbuf.h>
82 #include <sys/socket.h>
83 #include <sys/socketvar.h>
84 #include <sys/domain.h>
85 #include <sys/protosw.h>
86 #include <sys/fcntl.h>
87 #include <sys/lockf.h>
88 #include <sys/syslog.h>
89 #include <sys/user.h>
90 #include <sys/sysproto.h>
91 #include <sys/kpi_socket.h>
92 #include <libkern/OSAtomic.h>
93
94 #include <bsm/audit_kernel.h>
95
96 #include <netinet/in.h>
97 #include <netinet/tcp.h>
98 #if ISO
99 #include <netiso/iso.h>
100 #endif
101 #include <nfs/xdr_subs.h>
102 #include <nfs/rpcv2.h>
103 #include <nfs/nfsproto.h>
104 #include <nfs/nfs.h>
105 #include <nfs/nfsm_subs.h>
106 #include <nfs/nfsrvcache.h>
107 #include <nfs/nfsmount.h>
108 #include <nfs/nfsnode.h>
109 #include <nfs/nfsrtt.h>
110 #include <nfs/nfs_lock.h>
111
112 extern void unix_syscall_return(int);
113
114 /* Global defs. */
115 extern int (*nfsrv3_procs[NFS_NPROCS])(struct nfsrv_descript *nd,
116 struct nfssvc_sock *slp,
117 proc_t procp,
118 mbuf_t *mreqp);
119 extern int nfs_numasync;
120 extern int nfs_ioddelwri;
121 extern int nfsrtton;
122 extern struct nfsstats nfsstats;
123 extern int nfsrvw_procrastinate;
124 extern int nfsrvw_procrastinate_v3;
125
126 struct nfssvc_sock *nfs_udpsock, *nfs_cltpsock;
127 static int nuidhash_max = NFS_MAXUIDHASH;
128
129 static void nfsrv_zapsock(struct nfssvc_sock *slp);
130 static int nfssvc_iod(proc_t);
131 static int nfskerb_clientd(struct nfsmount *, struct nfsd_cargs *, int, user_addr_t, proc_t);
132
133 static int nfs_asyncdaemon[NFS_MAXASYNCDAEMON];
134
135 #ifndef NFS_NOSERVER
136 int nfsd_waiting = 0;
137 static struct nfsdrt nfsdrt;
138 int nfs_numnfsd = 0;
139 static void nfsd_rt(int sotype, struct nfsrv_descript *nd, int cacherep);
140 static int nfssvc_addsock(socket_t, mbuf_t, proc_t);
141 static int nfssvc_nfsd(struct nfsd_srvargs *,user_addr_t, proc_t);
142 static int nfssvc_export(user_addr_t, proc_t);
143
144 static int nfs_privport = 0;
145 /* XXX CSM 11/25/97 Upgrade sysctl.h someday */
146 #ifdef notyet
147 SYSCTL_INT(_vfs_nfs, NFS_NFSPRIVPORT, nfs_privport, CTLFLAG_RW, &nfs_privport, 0, "");
148 SYSCTL_INT(_vfs_nfs, OID_AUTO, gatherdelay, CTLFLAG_RW, &nfsrvw_procrastinate, 0, "");
149 SYSCTL_INT(_vfs_nfs, OID_AUTO, gatherdelay_v3, CTLFLAG_RW, &nfsrvw_procrastinate_v3, 0, "");
150 #endif
151
152 /*
153 * NFS server system calls
154 * getfh() lives here too, but maybe should move to kern/vfs_syscalls.c
155 */
156
157 /*
158 * Get file handle system call
159 */
160 int
161 getfh(proc_t p, struct getfh_args *uap, __unused int *retval)
162 {
163 vnode_t vp;
164 struct nfs_filehandle nfh;
165 int error;
166 struct nameidata nd;
167 struct vfs_context context;
168 char path[MAXPATHLEN], *ptr;
169 u_int pathlen;
170 struct nfs_exportfs *nxfs;
171 struct nfs_export *nx;
172
173 context.vc_proc = p;
174 context.vc_ucred = kauth_cred_get();
175
176 /*
177 * Must be super user
178 */
179 error = proc_suser(p);
180 if (error)
181 return (error);
182
183 error = copyinstr(uap->fname, path, MAXPATHLEN, (size_t *)&pathlen);
184 if (error)
185 return (error);
186
187 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
188 UIO_SYSSPACE, path, &context);
189 error = namei(&nd);
190 if (error)
191 return (error);
192 nameidone(&nd);
193
194 vp = nd.ni_vp;
195
196 // find exportfs that matches f_mntonname
197 lck_rw_lock_shared(&nfs_export_rwlock);
198 ptr = vnode_mount(vp)->mnt_vfsstat.f_mntonname;
199 LIST_FOREACH(nxfs, &nfs_exports, nxfs_next) {
200 if (!strcmp(nxfs->nxfs_path, ptr))
201 break;
202 }
203 if (!nxfs || strncmp(nxfs->nxfs_path, path, strlen(nxfs->nxfs_path))) {
204 error = EINVAL;
205 goto out;
206 }
207 // find export that best matches remainder of path
208 ptr = path + strlen(nxfs->nxfs_path);
209 while (*ptr && (*ptr == '/'))
210 ptr++;
211 LIST_FOREACH(nx, &nxfs->nxfs_exports, nx_next) {
212 int len = strlen(nx->nx_path);
213 if (len == 0) // we've hit the export entry for the root directory
214 break;
215 if (!strncmp(nx->nx_path, ptr, len))
216 break;
217 }
218 if (!nx) {
219 error = EINVAL;
220 goto out;
221 }
222
223 bzero(&nfh, sizeof(nfh));
224 nfh.nfh_xh.nxh_version = NFS_FH_VERSION;
225 nfh.nfh_xh.nxh_fsid = nxfs->nxfs_id;
226 nfh.nfh_xh.nxh_expid = nx->nx_id;
227 nfh.nfh_xh.nxh_flags = 0;
228 nfh.nfh_xh.nxh_reserved = 0;
229 nfh.nfh_len = NFS_MAX_FID_SIZE;
230 error = VFS_VPTOFH(vp, &nfh.nfh_len, &nfh.nfh_fid[0], NULL);
231 if (nfh.nfh_len > (int)NFS_MAX_FID_SIZE)
232 error = EOVERFLOW;
233 nfh.nfh_xh.nxh_fidlen = nfh.nfh_len;
234 nfh.nfh_len += sizeof(nfh.nfh_xh);
235
236 out:
237 lck_rw_done(&nfs_export_rwlock);
238 vnode_put(vp);
239 if (error)
240 return (error);
241 error = copyout((caddr_t)&nfh, uap->fhp, sizeof(nfh));
242 return (error);
243 }
244
245 #endif /* NFS_NOSERVER */
246
247 extern struct fileops vnops;
248
249 /*
250 * syscall for the rpc.lockd to use to translate a NFS file handle into
251 * an open descriptor.
252 *
253 * warning: do not remove the suser() call or this becomes one giant
254 * security hole.
255 */
256 int
257 fhopen( proc_t p,
258 struct fhopen_args *uap,
259 register_t *retval)
260 {
261 vnode_t vp;
262 struct nfs_filehandle nfh;
263 struct nfs_export *nx;
264 struct nfs_export_options *nxo;
265 struct flock lf;
266 struct fileproc *fp, *nfp;
267 int fmode, error, type;
268 int indx;
269 kauth_cred_t cred = proc_ucred(p);
270 struct vfs_context context;
271 kauth_action_t action;
272
273 context.vc_proc = p;
274 context.vc_ucred = cred;
275
276 /*
277 * Must be super user
278 */
279 error = suser(cred, 0);
280 if (error)
281 return (error);
282
283 fmode = FFLAGS(uap->flags);
284 /* why not allow a non-read/write open for our lockd? */
285 if (((fmode & (FREAD | FWRITE)) == 0) || (fmode & O_CREAT))
286 return (EINVAL);
287
288 error = copyin(uap->u_fhp, &nfh.nfh_len, sizeof(nfh.nfh_len));
289 if (error)
290 return (error);
291 if ((nfh.nfh_len < (int)sizeof(struct nfs_exphandle)) ||
292 (nfh.nfh_len > (int)NFS_MAX_FH_SIZE))
293 return (EINVAL);
294 error = copyin(uap->u_fhp, &nfh, sizeof(nfh.nfh_len) + nfh.nfh_len);
295 if (error)
296 return (error);
297
298 lck_rw_lock_shared(&nfs_export_rwlock);
299 /* now give me my vnode, it gets returned to me with a reference */
300 error = nfsrv_fhtovp(&nfh, NULL, TRUE, &vp, &nx, &nxo);
301 lck_rw_done(&nfs_export_rwlock);
302 if (error)
303 return (error);
304
305 /*
306 * From now on we have to make sure not
307 * to forget about the vnode.
308 * Any error that causes an abort must vnode_put(vp).
309 * Just set error = err and 'goto bad;'.
310 */
311
312 /*
313 * from vn_open
314 */
315 if (vnode_vtype(vp) == VSOCK) {
316 error = EOPNOTSUPP;
317 goto bad;
318 }
319
320 /* disallow write operations on directories */
321 if (vnode_isdir(vp) && (fmode & (FWRITE | O_TRUNC))) {
322 error = EISDIR;
323 goto bad;
324 }
325
326 /* compute action to be authorized */
327 action = 0;
328 if (fmode & FREAD)
329 action |= KAUTH_VNODE_READ_DATA;
330 if (fmode & (FWRITE | O_TRUNC))
331 action |= KAUTH_VNODE_WRITE_DATA;
332 if ((error = vnode_authorize(vp, NULL, action, &context)) != 0)
333 goto bad;
334
335 if ((error = VNOP_OPEN(vp, fmode, &context)))
336 goto bad;
337 if ((error = vnode_ref_ext(vp, fmode)))
338 goto bad;
339
340 /*
341 * end of vn_open code
342 */
343
344 // starting here... error paths should call vn_close/vnode_put
345 if ((error = falloc(p, &nfp, &indx)) != 0) {
346 vn_close(vp, fmode & FMASK, cred, p);
347 goto bad;
348 }
349 fp = nfp;
350
351 fp->f_fglob->fg_flag = fmode & FMASK;
352 fp->f_fglob->fg_type = DTYPE_VNODE;
353 fp->f_fglob->fg_ops = &vnops;
354 fp->f_fglob->fg_data = (caddr_t)vp;
355
356 // XXX do we really need to support this with fhopen()?
357 if (fmode & (O_EXLOCK | O_SHLOCK)) {
358 lf.l_whence = SEEK_SET;
359 lf.l_start = 0;
360 lf.l_len = 0;
361 if (fmode & O_EXLOCK)
362 lf.l_type = F_WRLCK;
363 else
364 lf.l_type = F_RDLCK;
365 type = F_FLOCK;
366 if ((fmode & FNONBLOCK) == 0)
367 type |= F_WAIT;
368 if ((error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, F_SETLK, &lf, type, &context))) {
369 vn_close(vp, fp->f_fglob->fg_flag, fp->f_fglob->fg_cred, p);
370 fp_free(p, indx, fp);
371 return (error);
372 }
373 fp->f_fglob->fg_flag |= FHASLOCK;
374 }
375
376 vnode_put(vp);
377
378 proc_fdlock(p);
379 *fdflags(p, indx) &= ~UF_RESERVED;
380 fp_drop(p, indx, fp, 1);
381 proc_fdunlock(p);
382
383 *retval = indx;
384 return (0);
385
386 bad:
387 vnode_put(vp);
388 return (error);
389 }
390
391 /*
392 * Nfs server psuedo system call for the nfsd's
393 * Based on the flag value it either:
394 * - adds a socket to the selection list
395 * - remains in the kernel as an nfsd
396 * - remains in the kernel as an nfsiod
397 */
398 int
399 nfssvc(proc_t p, struct nfssvc_args *uap, __unused int *retval)
400 {
401 #ifndef NFS_NOSERVER
402 struct nameidata nd;
403 mbuf_t nam;
404 struct user_nfsd_args user_nfsdarg;
405 struct nfsd_srvargs nfsd_srvargs, *nsd = &nfsd_srvargs;
406 struct nfsd_cargs ncd;
407 struct nfsd *nfsd;
408 struct nfssvc_sock *slp;
409 struct nfsuid *nuidp;
410 struct nfsmount *nmp;
411 struct timeval now;
412 socket_t so;
413 struct vfs_context context;
414 struct ucred temp_cred;
415 #endif /* NFS_NOSERVER */
416 int error;
417
418 AUDIT_ARG(cmd, uap->flag);
419
420 /*
421 * Must be super user
422 */
423 error = proc_suser(p);
424 if(error)
425 return (error);
426 if (uap->flag & NFSSVC_BIOD)
427 error = nfssvc_iod(p);
428 #ifdef NFS_NOSERVER
429 else
430 error = ENXIO;
431 #else /* !NFS_NOSERVER */
432 else if (uap->flag & NFSSVC_MNTD) {
433
434 context.vc_proc = p;
435 context.vc_ucred = kauth_cred_get();
436
437 error = copyin(uap->argp, (caddr_t)&ncd, sizeof (ncd));
438 if (error)
439 return (error);
440
441 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
442 (proc_is64bit(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
443 CAST_USER_ADDR_T(ncd.ncd_dirp), &context);
444 error = namei(&nd);
445 if (error)
446 return (error);
447 nameidone(&nd);
448
449 if (vnode_isvroot(nd.ni_vp) == 0)
450 error = EINVAL;
451 nmp = VFSTONFS(vnode_mount(nd.ni_vp));
452 vnode_put(nd.ni_vp);
453 if (error)
454 return (error);
455
456 if ((nmp->nm_state & NFSSTA_MNTD) &&
457 (uap->flag & NFSSVC_GOTAUTH) == 0)
458 return (0);
459 nmp->nm_state |= NFSSTA_MNTD;
460 error = nfskerb_clientd(nmp, &ncd, uap->flag, uap->argp, p);
461 } else if (uap->flag & NFSSVC_ADDSOCK) {
462 if (IS_64BIT_PROCESS(p)) {
463 error = copyin(uap->argp, (caddr_t)&user_nfsdarg, sizeof(user_nfsdarg));
464 } else {
465 struct nfsd_args tmp_args;
466 error = copyin(uap->argp, (caddr_t)&tmp_args, sizeof(tmp_args));
467 if (error == 0) {
468 user_nfsdarg.sock = tmp_args.sock;
469 user_nfsdarg.name = CAST_USER_ADDR_T(tmp_args.name);
470 user_nfsdarg.namelen = tmp_args.namelen;
471 }
472 }
473 if (error)
474 return (error);
475 /* get the socket */
476 error = file_socket(user_nfsdarg.sock, &so);
477 if (error)
478 return (error);
479 /* Get the client address for connected sockets. */
480 if (user_nfsdarg.name == USER_ADDR_NULL || user_nfsdarg.namelen == 0) {
481 nam = NULL;
482 } else {
483 error = sockargs(&nam, user_nfsdarg.name, user_nfsdarg.namelen, MBUF_TYPE_SONAME);
484 if (error) {
485 /* drop the iocount file_socket() grabbed on the file descriptor */
486 file_drop(user_nfsdarg.sock);
487 return (error);
488 }
489 }
490 /*
491 * nfssvc_addsock() will grab a retain count on the socket
492 * to keep the socket from being closed when nfsd closes its
493 * file descriptor for it.
494 */
495 error = nfssvc_addsock(so, nam, p);
496 /* drop the iocount file_socket() grabbed on the file descriptor */
497 file_drop(user_nfsdarg.sock);
498 } else if (uap->flag & NFSSVC_NFSD) {
499 error = copyin(uap->argp, (caddr_t)nsd, sizeof (*nsd));
500 if (error)
501 return (error);
502
503 if ((uap->flag & NFSSVC_AUTHIN) && ((nfsd = nsd->nsd_nfsd)) &&
504 (nfsd->nfsd_slp->ns_flag & SLP_VALID)) {
505 slp = nfsd->nfsd_slp;
506
507 /*
508 * First check to see if another nfsd has already
509 * added this credential.
510 */
511 for (nuidp = NUIDHASH(slp,nsd->nsd_cr.cr_uid)->lh_first;
512 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
513 if (kauth_cred_getuid(nuidp->nu_cr) == nsd->nsd_cr.cr_uid &&
514 (!nfsd->nfsd_nd->nd_nam2 ||
515 netaddr_match(NU_NETFAM(nuidp),
516 &nuidp->nu_haddr, nfsd->nfsd_nd->nd_nam2)))
517 break;
518 }
519 if (nuidp) {
520 nfsrv_setcred(nuidp->nu_cr,nfsd->nfsd_nd->nd_cr);
521 nfsd->nfsd_nd->nd_flag |= ND_KERBFULL;
522 } else {
523 /*
524 * Nope, so we will.
525 */
526 if (slp->ns_numuids < nuidhash_max) {
527 slp->ns_numuids++;
528 nuidp = (struct nfsuid *)
529 _MALLOC_ZONE(sizeof (struct nfsuid),
530 M_NFSUID, M_WAITOK);
531 } else
532 nuidp = (struct nfsuid *)0;
533 if ((slp->ns_flag & SLP_VALID) == 0) {
534 if (nuidp) {
535 FREE_ZONE((caddr_t)nuidp,
536 sizeof (struct nfsuid), M_NFSUID);
537 slp->ns_numuids--;
538 }
539 } else {
540 if (nuidp == (struct nfsuid *)0) {
541 nuidp = slp->ns_uidlruhead.tqh_first;
542 if (!nuidp)
543 return (ENOMEM);
544 LIST_REMOVE(nuidp, nu_hash);
545 TAILQ_REMOVE(&slp->ns_uidlruhead, nuidp,
546 nu_lru);
547 if (nuidp->nu_flag & NU_NAM)
548 mbuf_freem(nuidp->nu_nam);
549 kauth_cred_rele(nuidp->nu_cr);
550 }
551 nuidp->nu_flag = 0;
552
553 if (nsd->nsd_cr.cr_ngroups > NGROUPS)
554 nsd->nsd_cr.cr_ngroups = NGROUPS;
555
556 nfsrv_setcred(&nsd->nsd_cr, &temp_cred);
557 nuidp->nu_cr = kauth_cred_create(&temp_cred);
558
559 if (!nuidp->nu_cr) {
560 FREE_ZONE(nuidp, sizeof(struct nfsuid), M_NFSUID);
561 slp->ns_numuids--;
562 return (ENOMEM);
563 }
564 nuidp->nu_timestamp = nsd->nsd_timestamp;
565 microtime(&now);
566 nuidp->nu_expire = now.tv_sec + nsd->nsd_ttl;
567 /*
568 * and save the session key in nu_key.
569 */
570 bcopy(nsd->nsd_key, nuidp->nu_key,
571 sizeof (nsd->nsd_key));
572 if (nfsd->nfsd_nd->nd_nam2) {
573 struct sockaddr_in *saddr;
574
575 saddr = mbuf_data(nfsd->nfsd_nd->nd_nam2);
576 switch (saddr->sin_family) {
577 case AF_INET:
578 nuidp->nu_flag |= NU_INETADDR;
579 nuidp->nu_inetaddr =
580 saddr->sin_addr.s_addr;
581 break;
582 case AF_ISO:
583 default:
584 nuidp->nu_flag |= NU_NAM;
585 error = mbuf_copym(nfsd->nfsd_nd->nd_nam2, 0,
586 MBUF_COPYALL, MBUF_WAITOK,
587 &nuidp->nu_nam);
588 if (error) {
589 kauth_cred_rele(nuidp->nu_cr);
590 FREE_ZONE(nuidp, sizeof(struct nfsuid), M_NFSUID);
591 slp->ns_numuids--;
592 return (error);
593 }
594 break;
595 };
596 }
597 TAILQ_INSERT_TAIL(&slp->ns_uidlruhead, nuidp,
598 nu_lru);
599 LIST_INSERT_HEAD(NUIDHASH(slp, nsd->nsd_uid),
600 nuidp, nu_hash);
601 nfsrv_setcred(nuidp->nu_cr,
602 nfsd->nfsd_nd->nd_cr);
603 nfsd->nfsd_nd->nd_flag |= ND_KERBFULL;
604 }
605 }
606 }
607 if ((uap->flag & NFSSVC_AUTHINFAIL) && (nfsd = nsd->nsd_nfsd))
608 nfsd->nfsd_flag |= NFSD_AUTHFAIL;
609 error = nfssvc_nfsd(nsd, uap->argp, p);
610 } else if (uap->flag & NFSSVC_EXPORT) {
611 error = nfssvc_export(uap->argp, p);
612 } else {
613 error = EINVAL;
614 }
615 #endif /* NFS_NOSERVER */
616 if (error == EINTR || error == ERESTART)
617 error = 0;
618 return (error);
619 }
620
621 /*
622 * NFSKERB client helper daemon.
623 * Gets authorization strings for "kerb" mounts.
624 */
625 static int
626 nfskerb_clientd(
627 struct nfsmount *nmp,
628 struct nfsd_cargs *ncd,
629 int flag,
630 user_addr_t argp,
631 proc_t p)
632 {
633 struct nfsuid *nuidp, *nnuidp;
634 int error = 0;
635 struct nfsreq *rp;
636 struct timeval now;
637
638 /*
639 * First initialize some variables
640 */
641 microtime(&now);
642
643 /*
644 * If an authorization string is being passed in, get it.
645 */
646 if ((flag & NFSSVC_GOTAUTH) && (nmp->nm_state & NFSSTA_MOUNTED) &&
647 ((nmp->nm_state & NFSSTA_WAITAUTH) == 0)) {
648 if (nmp->nm_state & NFSSTA_HASAUTH)
649 panic("cld kerb");
650 if ((flag & NFSSVC_AUTHINFAIL) == 0) {
651 if (ncd->ncd_authlen <= nmp->nm_authlen &&
652 ncd->ncd_verflen <= nmp->nm_verflen &&
653 !copyin(CAST_USER_ADDR_T(ncd->ncd_authstr),nmp->nm_authstr,ncd->ncd_authlen)&&
654 !copyin(CAST_USER_ADDR_T(ncd->ncd_verfstr),nmp->nm_verfstr,ncd->ncd_verflen)){
655 nmp->nm_authtype = ncd->ncd_authtype;
656 nmp->nm_authlen = ncd->ncd_authlen;
657 nmp->nm_verflen = ncd->ncd_verflen;
658 #if NFSKERB
659 nmp->nm_key = ncd->ncd_key;
660 #endif
661 } else
662 nmp->nm_state |= NFSSTA_AUTHERR;
663 } else
664 nmp->nm_state |= NFSSTA_AUTHERR;
665 nmp->nm_state |= NFSSTA_HASAUTH;
666 wakeup((caddr_t)&nmp->nm_authlen);
667 } else {
668 nmp->nm_state |= NFSSTA_WAITAUTH;
669 }
670
671 /*
672 * Loop every second updating queue until there is a termination sig.
673 */
674 while (nmp->nm_state & NFSSTA_MOUNTED) {
675 /* Get an authorization string, if required. */
676 if ((nmp->nm_state & (NFSSTA_WAITAUTH | NFSSTA_HASAUTH)) == 0) {
677 ncd->ncd_authuid = nmp->nm_authuid;
678 if (copyout((caddr_t)ncd, argp, sizeof (struct nfsd_cargs)))
679 nmp->nm_state |= NFSSTA_WAITAUTH;
680 else
681 return (ENEEDAUTH);
682 }
683 /* Wait a bit (no pun) and do it again. */
684 if ((nmp->nm_state & NFSSTA_MOUNTED) &&
685 (nmp->nm_state & (NFSSTA_WAITAUTH | NFSSTA_HASAUTH))) {
686 error = tsleep((caddr_t)&nmp->nm_authstr, PSOCK | PCATCH,
687 "nfskrbtimr", hz / 3);
688 if (error == EINTR || error == ERESTART)
689 dounmount(nmp->nm_mountp, 0, p);
690 }
691 }
692
693 /*
694 * Finally, we can free up the mount structure.
695 */
696 for (nuidp = nmp->nm_uidlruhead.tqh_first; nuidp != 0; nuidp = nnuidp) {
697 nnuidp = nuidp->nu_lru.tqe_next;
698 LIST_REMOVE(nuidp, nu_hash);
699 TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp, nu_lru);
700 kauth_cred_rele(nuidp->nu_cr);
701 FREE_ZONE((caddr_t)nuidp, sizeof (struct nfsuid), M_NFSUID);
702 }
703 /*
704 * Loop through outstanding request list and remove dangling
705 * references to defunct nfsmount struct
706 */
707 for (rp = nfs_reqq.tqh_first; rp; rp = rp->r_chain.tqe_next)
708 if (rp->r_nmp == nmp)
709 rp->r_nmp = (struct nfsmount *)0;
710 /* Need to wake up any rcvlock waiters so they notice the unmount. */
711 if (nmp->nm_state & NFSSTA_WANTRCV) {
712 nmp->nm_state &= ~NFSSTA_WANTRCV;
713 wakeup(&nmp->nm_state);
714 }
715 FREE_ZONE((caddr_t)nmp, sizeof (struct nfsmount), M_NFSMNT);
716 if (error == EWOULDBLOCK)
717 error = 0;
718 return (error);
719 }
720
721 #ifndef NFS_NOSERVER
722 /*
723 * Adds a socket to the list for servicing by nfsds.
724 */
725 static int
726 nfssvc_addsock(
727 socket_t so,
728 mbuf_t mynam,
729 __unused proc_t p)
730 {
731 int siz;
732 struct nfssvc_sock *slp;
733 struct nfssvc_sock *tslp = NULL;
734 int error, sodomain, sotype, soprotocol, on = 1;
735 struct timeval timeo;
736
737 /* make sure mbuf constants are set up */
738 if (!nfs_mbuf_mlen)
739 nfs_mbuf_init();
740
741 sock_gettype(so, &sodomain, &sotype, &soprotocol);
742
743 /*
744 * Add it to the list, as required.
745 */
746 if (soprotocol == IPPROTO_UDP) {
747 tslp = nfs_udpsock;
748 if (!tslp || (tslp->ns_flag & SLP_VALID)) {
749 mbuf_freem(mynam);
750 return (EPERM);
751 }
752 #if ISO
753 } else if (soprotocol == ISOPROTO_CLTP) {
754 tslp = nfs_cltpsock;
755 if (!tslp || (tslp->ns_flag & SLP_VALID)) {
756 mbuf_freem(mynam);
757 return (EPERM);
758 }
759 #endif /* ISO */
760 }
761 /* reserve buffer space for 2 maximally-sized packets */
762 siz = NFS_MAXPACKET;
763 if (sotype == SOCK_STREAM)
764 siz += sizeof (u_long);
765 siz *= 2;
766 if (siz > NFS_MAXSOCKBUF)
767 siz = NFS_MAXSOCKBUF;
768 if ((error = sock_setsockopt(so, SOL_SOCKET, SO_SNDBUF, &siz, sizeof(siz))) ||
769 (error = sock_setsockopt(so, SOL_SOCKET, SO_RCVBUF, &siz, sizeof(siz)))) {
770 mbuf_freem(mynam);
771 return (error);
772 }
773
774 /*
775 * Set protocol specific options { for now TCP only } and
776 * reserve some space. For datagram sockets, this can get called
777 * repeatedly for the same socket, but that isn't harmful.
778 */
779 if (sotype == SOCK_STREAM) {
780 sock_setsockopt(so, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on));
781 }
782 if (sodomain == AF_INET && soprotocol == IPPROTO_TCP) {
783 sock_setsockopt(so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
784 }
785
786 sock_nointerrupt(so, 0);
787
788 timeo.tv_usec = 0;
789 timeo.tv_sec = 0;
790 error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
791 error = sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
792
793 if (tslp) {
794 slp = tslp;
795 lck_mtx_lock(nfsd_mutex);
796 } else {
797 MALLOC(slp, struct nfssvc_sock *, sizeof(struct nfssvc_sock),
798 M_NFSSVC, M_WAITOK);
799 if (!slp) {
800 mbuf_freem(mynam);
801 return (ENOMEM);
802 }
803 bzero((caddr_t)slp, sizeof (struct nfssvc_sock));
804 lck_rw_init(&slp->ns_rwlock, nfs_slp_rwlock_group, nfs_slp_lock_attr);
805 lck_mtx_init(&slp->ns_wgmutex, nfs_slp_mutex_group, nfs_slp_lock_attr);
806 TAILQ_INIT(&slp->ns_uidlruhead);
807 lck_mtx_lock(nfsd_mutex);
808 TAILQ_INSERT_TAIL(&nfssvc_sockhead, slp, ns_chain);
809 }
810
811 sock_retain(so); /* grab a retain count on the socket */
812 slp->ns_so = so;
813 slp->ns_sotype = sotype;
814 slp->ns_nam = mynam;
815
816 socket_lock(so, 1);
817 so->so_upcallarg = (caddr_t)slp;
818 so->so_upcall = nfsrv_rcv;
819 so->so_rcv.sb_flags |= SB_UPCALL; /* required for freebsd merge */
820 socket_unlock(so, 1);
821
822 slp->ns_flag = SLP_VALID | SLP_NEEDQ;
823
824 nfsrv_wakenfsd(slp);
825 lck_mtx_unlock(nfsd_mutex);
826
827 return (0);
828 }
829
830 /*
831 * Called by nfssvc() for nfsds. Just loops around servicing rpc requests
832 * until it is killed by a signal.
833 */
834 static int
835 nfssvc_nfsd(nsd, argp, p)
836 struct nfsd_srvargs *nsd;
837 user_addr_t argp;
838 proc_t p;
839 {
840 mbuf_t m, mreq;
841 struct nfssvc_sock *slp;
842 struct nfsd *nfsd = nsd->nsd_nfsd;
843 struct nfsrv_descript *nd = NULL;
844 int error = 0, cacherep, writes_todo;
845 int siz, procrastinate;
846 u_quad_t cur_usec;
847 struct timeval now;
848 boolean_t funnel_state;
849
850 #ifndef nolint
851 cacherep = RC_DOIT;
852 writes_todo = 0;
853 #endif
854 if (nfsd == (struct nfsd *)0) {
855 MALLOC(nfsd, struct nfsd *, sizeof(struct nfsd), M_NFSD, M_WAITOK);
856 if (!nfsd)
857 return (ENOMEM);
858 nsd->nsd_nfsd = nfsd;
859 bzero((caddr_t)nfsd, sizeof (struct nfsd));
860 nfsd->nfsd_procp = p;
861 lck_mtx_lock(nfsd_mutex);
862 TAILQ_INSERT_TAIL(&nfsd_head, nfsd, nfsd_chain);
863 nfs_numnfsd++;
864 lck_mtx_unlock(nfsd_mutex);
865 }
866
867 funnel_state = thread_funnel_set(kernel_flock, FALSE);
868
869 /*
870 * Loop getting rpc requests until SIGKILL.
871 */
872 for (;;) {
873 if ((nfsd->nfsd_flag & NFSD_REQINPROG) == 0) {
874 lck_mtx_lock(nfsd_mutex);
875 while ((nfsd->nfsd_slp == NULL) && !(nfsd_head_flag & NFSD_CHECKSLP)) {
876 nfsd->nfsd_flag |= NFSD_WAITING;
877 nfsd_waiting++;
878 error = msleep(nfsd, nfsd_mutex, PSOCK | PCATCH, "nfsd", 0);
879 nfsd_waiting--;
880 if (error) {
881 lck_mtx_unlock(nfsd_mutex);
882 goto done;
883 }
884 }
885 if ((nfsd->nfsd_slp == NULL) && (nfsd_head_flag & NFSD_CHECKSLP)) {
886 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) {
887 lck_rw_lock_shared(&slp->ns_rwlock);
888 if ((slp->ns_flag & (SLP_VALID | SLP_DOREC))
889 == (SLP_VALID | SLP_DOREC)) {
890 if (lck_rw_lock_shared_to_exclusive(&slp->ns_rwlock)) {
891 /* upgrade failed and we lost the lock; take exclusive and recheck */
892 lck_rw_lock_exclusive(&slp->ns_rwlock);
893 if ((slp->ns_flag & (SLP_VALID | SLP_DOREC))
894 != (SLP_VALID | SLP_DOREC)) {
895 /* flags no longer set, so skip this socket */
896 lck_rw_done(&slp->ns_rwlock);
897 continue;
898 }
899 }
900 slp->ns_flag &= ~SLP_DOREC;
901 slp->ns_sref++;
902 nfsd->nfsd_slp = slp;
903 lck_rw_done(&slp->ns_rwlock);
904 break;
905 }
906 lck_rw_done(&slp->ns_rwlock);
907 }
908 if (slp == 0)
909 nfsd_head_flag &= ~NFSD_CHECKSLP;
910 }
911 lck_mtx_unlock(nfsd_mutex);
912 if ((slp = nfsd->nfsd_slp) == NULL)
913 continue;
914 lck_rw_lock_exclusive(&slp->ns_rwlock);
915 if (slp->ns_flag & SLP_VALID) {
916 if (slp->ns_flag & SLP_DISCONN) {
917 nfsrv_zapsock(slp);
918 } else if (slp->ns_flag & SLP_NEEDQ) {
919 slp->ns_flag &= ~SLP_NEEDQ;
920 nfsrv_rcv_locked(slp->ns_so, slp, MBUF_WAITOK);
921 }
922 error = nfsrv_dorec(slp, nfsd, &nd);
923 microuptime(&now);
924 cur_usec = (u_quad_t)now.tv_sec * 1000000 +
925 (u_quad_t)now.tv_usec;
926 if (error && slp->ns_wgtime && (slp->ns_wgtime <= cur_usec)) {
927 error = 0;
928 cacherep = RC_DOIT;
929 writes_todo = 1;
930 } else
931 writes_todo = 0;
932 nfsd->nfsd_flag |= NFSD_REQINPROG;
933 }
934 lck_rw_done(&slp->ns_rwlock);
935 } else {
936 error = 0;
937 slp = nfsd->nfsd_slp;
938 }
939 if (error || (slp->ns_flag & SLP_VALID) == 0) {
940 if (nd) {
941 if (nd->nd_nam2)
942 mbuf_freem(nd->nd_nam2);
943 if (nd->nd_cr)
944 kauth_cred_rele(nd->nd_cr);
945 FREE_ZONE((caddr_t)nd,
946 sizeof *nd, M_NFSRVDESC);
947 nd = NULL;
948 }
949 nfsd->nfsd_slp = NULL;
950 nfsd->nfsd_flag &= ~NFSD_REQINPROG;
951 nfsrv_slpderef(slp);
952 continue;
953 }
954 if (nd) {
955 microuptime(&nd->nd_starttime);
956 if (nd->nd_nam2)
957 nd->nd_nam = nd->nd_nam2;
958 else
959 nd->nd_nam = slp->ns_nam;
960
961 /*
962 * Check to see if authorization is needed.
963 */
964 if (nfsd->nfsd_flag & NFSD_NEEDAUTH) {
965 nfsd->nfsd_flag &= ~NFSD_NEEDAUTH;
966 nsd->nsd_haddr = ((struct sockaddr_in *)mbuf_data(nd->nd_nam))->sin_addr.s_addr;
967 nsd->nsd_authlen = nfsd->nfsd_authlen;
968 nsd->nsd_verflen = nfsd->nfsd_verflen;
969 if (!copyout(nfsd->nfsd_authstr,CAST_USER_ADDR_T(nsd->nsd_authstr),
970 nfsd->nfsd_authlen) &&
971 !copyout(nfsd->nfsd_verfstr, CAST_USER_ADDR_T(nsd->nsd_verfstr),
972 nfsd->nfsd_verflen) &&
973 !copyout((caddr_t)nsd, argp, sizeof (*nsd))) {
974 thread_funnel_set(kernel_flock, funnel_state);
975 return (ENEEDAUTH);
976 }
977 cacherep = RC_DROPIT;
978 } else
979 cacherep = nfsrv_getcache(nd, slp, &mreq);
980
981 if (nfsd->nfsd_flag & NFSD_AUTHFAIL) {
982 nfsd->nfsd_flag &= ~NFSD_AUTHFAIL;
983 nd->nd_procnum = NFSPROC_NOOP;
984 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_TOOWEAK);
985 cacherep = RC_DOIT;
986 } else if (nfs_privport) {
987 /* Check if source port is privileged */
988 u_short port;
989 struct sockaddr *nam = mbuf_data(nd->nd_nam);
990 struct sockaddr_in *sin;
991
992 sin = (struct sockaddr_in *)nam;
993 port = ntohs(sin->sin_port);
994 if (port >= IPPORT_RESERVED &&
995 nd->nd_procnum != NFSPROC_NULL) {
996 char strbuf[MAX_IPv4_STR_LEN];
997 nd->nd_procnum = NFSPROC_NOOP;
998 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_TOOWEAK);
999 cacherep = RC_DOIT;
1000 printf("NFS request from unprivileged port (%s:%d)\n",
1001 inet_ntop(AF_INET, &sin->sin_addr, strbuf, sizeof(strbuf)),
1002 port);
1003 }
1004 }
1005
1006 }
1007
1008 /*
1009 * Loop to get all the write rpc relies that have been
1010 * gathered together.
1011 */
1012 do {
1013 switch (cacherep) {
1014 case RC_DOIT:
1015 if (nd && (nd->nd_flag & ND_NFSV3))
1016 procrastinate = nfsrvw_procrastinate_v3;
1017 else
1018 procrastinate = nfsrvw_procrastinate;
1019 lck_rw_lock_shared(&nfs_export_rwlock);
1020 if (writes_todo || ((nd->nd_procnum == NFSPROC_WRITE) && (procrastinate > 0)))
1021 error = nfsrv_writegather(&nd, slp, nfsd->nfsd_procp, &mreq);
1022 else
1023 error = (*(nfsrv3_procs[nd->nd_procnum]))(nd, slp, nfsd->nfsd_procp, &mreq);
1024 lck_rw_done(&nfs_export_rwlock);
1025 if (mreq == NULL)
1026 break;
1027 if (error) {
1028 OSAddAtomic(1, (SInt32*)&nfsstats.srv_errs);
1029 nfsrv_updatecache(nd, FALSE, mreq);
1030 if (nd->nd_nam2) {
1031 mbuf_freem(nd->nd_nam2);
1032 nd->nd_nam2 = NULL;
1033 }
1034 break;
1035 }
1036 OSAddAtomic(1, (SInt32*)&nfsstats.srvrpccnt[nd->nd_procnum]);
1037 nfsrv_updatecache(nd, TRUE, mreq);
1038 nd->nd_mrep = NULL;
1039 case RC_REPLY:
1040 m = mreq;
1041 siz = 0;
1042 while (m) {
1043 siz += mbuf_len(m);
1044 m = mbuf_next(m);
1045 }
1046 if (siz <= 0 || siz > NFS_MAXPACKET) {
1047 printf("mbuf siz=%d\n",siz);
1048 panic("Bad nfs svc reply");
1049 }
1050 m = mreq;
1051 mbuf_pkthdr_setlen(m, siz);
1052 error = mbuf_pkthdr_setrcvif(m, NULL);
1053 if (error)
1054 panic("nfsd setrcvif failed: %d", error);
1055 /*
1056 * For stream protocols, prepend a Sun RPC
1057 * Record Mark.
1058 */
1059 if (slp->ns_sotype == SOCK_STREAM) {
1060 error = mbuf_prepend(&m, NFSX_UNSIGNED, MBUF_WAITOK);
1061 if (!error)
1062 *(u_long*)mbuf_data(m) = htonl(0x80000000 | siz);
1063 }
1064 if (!error) {
1065 if (slp->ns_flag & SLP_VALID) {
1066 error = nfs_send(slp->ns_so, nd->nd_nam2, m, NULL);
1067 } else {
1068 error = EPIPE;
1069 mbuf_freem(m);
1070 }
1071 } else {
1072 mbuf_freem(m);
1073 }
1074 mreq = NULL;
1075 if (nfsrtton)
1076 nfsd_rt(slp->ns_sotype, nd, cacherep);
1077 if (nd->nd_nam2) {
1078 mbuf_freem(nd->nd_nam2);
1079 nd->nd_nam2 = NULL;
1080 }
1081 if (nd->nd_mrep) {
1082 mbuf_freem(nd->nd_mrep);
1083 nd->nd_mrep = NULL;
1084 }
1085 if (error == EPIPE) {
1086 lck_rw_lock_exclusive(&slp->ns_rwlock);
1087 nfsrv_zapsock(slp);
1088 lck_rw_done(&slp->ns_rwlock);
1089 }
1090 if (error == EINTR || error == ERESTART) {
1091 if (nd->nd_cr)
1092 kauth_cred_rele(nd->nd_cr);
1093 FREE_ZONE((caddr_t)nd, sizeof *nd, M_NFSRVDESC);
1094 nfsrv_slpderef(slp);
1095 goto done;
1096 }
1097 break;
1098 case RC_DROPIT:
1099 if (nfsrtton)
1100 nfsd_rt(slp->ns_sotype, nd, cacherep);
1101 mbuf_freem(nd->nd_mrep);
1102 mbuf_freem(nd->nd_nam2);
1103 nd->nd_mrep = nd->nd_nam2 = NULL;
1104 break;
1105 };
1106 if (nd) {
1107 if (nd->nd_mrep)
1108 mbuf_freem(nd->nd_mrep);
1109 if (nd->nd_nam2)
1110 mbuf_freem(nd->nd_nam2);
1111 if (nd->nd_cr)
1112 kauth_cred_rele(nd->nd_cr);
1113 FREE_ZONE((caddr_t)nd, sizeof *nd, M_NFSRVDESC);
1114 nd = NULL;
1115 }
1116
1117 /*
1118 * Check to see if there are outstanding writes that
1119 * need to be serviced.
1120 */
1121 microuptime(&now);
1122 cur_usec = (u_quad_t)now.tv_sec * 1000000 +
1123 (u_quad_t)now.tv_usec;
1124 if (slp->ns_wgtime && (slp->ns_wgtime <= cur_usec)) {
1125 cacherep = RC_DOIT;
1126 writes_todo = 1;
1127 } else {
1128 writes_todo = 0;
1129 }
1130 } while (writes_todo);
1131 lck_rw_lock_exclusive(&slp->ns_rwlock);
1132 if (nfsrv_dorec(slp, nfsd, &nd)) {
1133 lck_rw_done(&slp->ns_rwlock);
1134 nfsd->nfsd_flag &= ~NFSD_REQINPROG;
1135 nfsd->nfsd_slp = NULL;
1136 nfsrv_slpderef(slp);
1137 } else {
1138 lck_rw_done(&slp->ns_rwlock);
1139 }
1140 }
1141 done:
1142 thread_funnel_set(kernel_flock, funnel_state);
1143 lck_mtx_lock(nfsd_mutex);
1144 TAILQ_REMOVE(&nfsd_head, nfsd, nfsd_chain);
1145 FREE(nfsd, M_NFSD);
1146 nsd->nsd_nfsd = (struct nfsd *)0;
1147 if (--nfs_numnfsd == 0)
1148 nfsrv_init(TRUE); /* Reinitialize everything */
1149 lck_mtx_unlock(nfsd_mutex);
1150 return (error);
1151 }
1152
1153 static int
1154 nfssvc_export(user_addr_t argp, proc_t p)
1155 {
1156 int error = 0, is_64bit;
1157 struct user_nfs_export_args unxa;
1158 struct vfs_context context;
1159
1160 context.vc_proc = p;
1161 context.vc_ucred = kauth_cred_get();
1162 is_64bit = IS_64BIT_PROCESS(p);
1163
1164 /* copy in pointers to path and export args */
1165 if (is_64bit) {
1166 error = copyin(argp, (caddr_t)&unxa, sizeof(unxa));
1167 } else {
1168 struct nfs_export_args tnxa;
1169 error = copyin(argp, (caddr_t)&tnxa, sizeof(tnxa));
1170 if (error == 0) {
1171 /* munge into LP64 version of nfs_export_args structure */
1172 unxa.nxa_fsid = tnxa.nxa_fsid;
1173 unxa.nxa_expid = tnxa.nxa_expid;
1174 unxa.nxa_fspath = CAST_USER_ADDR_T(tnxa.nxa_fspath);
1175 unxa.nxa_exppath = CAST_USER_ADDR_T(tnxa.nxa_exppath);
1176 unxa.nxa_flags = tnxa.nxa_flags;
1177 unxa.nxa_netcount = tnxa.nxa_netcount;
1178 unxa.nxa_nets = CAST_USER_ADDR_T(tnxa.nxa_nets);
1179 }
1180 }
1181 if (error)
1182 return (error);
1183
1184 error = nfsrv_export(&unxa, &context);
1185
1186 return (error);
1187 }
1188
1189 #endif /* NFS_NOSERVER */
1190
1191 int nfs_defect = 0;
1192 /* XXX CSM 11/25/97 Upgrade sysctl.h someday */
1193 #ifdef notyet
1194 SYSCTL_INT(_vfs_nfs, OID_AUTO, defect, CTLFLAG_RW, &nfs_defect, 0, "");
1195 #endif
1196
1197 int
1198 nfsclnt(proc_t p, struct nfsclnt_args *uap, __unused int *retval)
1199 {
1200 struct lockd_ans la;
1201 int error;
1202
1203 if (uap->flag == NFSCLNT_LOCKDWAIT) {
1204 return (nfslockdwait(p));
1205 }
1206 if (uap->flag == NFSCLNT_LOCKDANS) {
1207 error = copyin(uap->argp, &la, sizeof(la));
1208 return (error != 0 ? error : nfslockdans(p, &la));
1209 }
1210 if (uap->flag == NFSCLNT_LOCKDFD)
1211 return (nfslockdfd(p, CAST_DOWN(int, uap->argp)));
1212 return EINVAL;
1213 }
1214
1215
1216 static int nfssvc_iod_continue(int);
1217
1218 /*
1219 * Asynchronous I/O daemons for client nfs.
1220 * They do read-ahead and write-behind operations on the block I/O cache.
1221 * Never returns unless it fails or gets killed.
1222 */
1223 static int
1224 nfssvc_iod(__unused proc_t p)
1225 {
1226 register int i, myiod;
1227 struct uthread *ut;
1228
1229 /*
1230 * Assign my position or return error if too many already running
1231 */
1232 myiod = -1;
1233 for (i = 0; i < NFS_MAXASYNCDAEMON; i++)
1234 if (nfs_asyncdaemon[i] == 0) {
1235 nfs_asyncdaemon[i]++;
1236 myiod = i;
1237 break;
1238 }
1239 if (myiod == -1)
1240 return (EBUSY);
1241 nfs_numasync++;
1242
1243 /* stuff myiod into uthread to get off local stack for continuation */
1244
1245 ut = (struct uthread *)get_bsdthread_info(current_thread());
1246 ut->uu_state.uu_nfs_myiod = myiod; /* squirrel away for continuation */
1247
1248 nfssvc_iod_continue(0);
1249 /* NOTREACHED */
1250 return (0);
1251 }
1252
1253 /*
1254 * Continuation for Asynchronous I/O daemons for client nfs.
1255 */
1256 static int
1257 nfssvc_iod_continue(int error)
1258 {
1259 register struct nfsbuf *bp;
1260 register int i, myiod;
1261 struct nfsmount *nmp;
1262 struct uthread *ut;
1263 proc_t p;
1264
1265 /*
1266 * real myiod is stored in uthread, recover it
1267 */
1268 ut = (struct uthread *)get_bsdthread_info(current_thread());
1269 myiod = ut->uu_state.uu_nfs_myiod;
1270 p = current_proc(); // XXX
1271
1272 /*
1273 * Just loop around doin our stuff until SIGKILL
1274 * - actually we don't loop with continuations...
1275 */
1276 lck_mtx_lock(nfs_iod_mutex);
1277 for (;;) {
1278 while (((nmp = nfs_iodmount[myiod]) == NULL
1279 || nmp->nm_bufq.tqh_first == NULL)
1280 && error == 0 && nfs_ioddelwri == 0) {
1281 if (nmp)
1282 nmp->nm_bufqiods--;
1283 nfs_iodwant[myiod] = p; // XXX this doesn't need to be a proc_t
1284 nfs_iodmount[myiod] = NULL;
1285 error = msleep0((caddr_t)&nfs_iodwant[myiod], nfs_iod_mutex,
1286 PWAIT | PCATCH | PDROP, "nfsidl", 0, nfssvc_iod_continue);
1287 lck_mtx_lock(nfs_iod_mutex);
1288 }
1289 if (error) {
1290 nfs_asyncdaemon[myiod] = 0;
1291 if (nmp) nmp->nm_bufqiods--;
1292 nfs_iodwant[myiod] = NULL;
1293 nfs_iodmount[myiod] = NULL;
1294 lck_mtx_unlock(nfs_iod_mutex);
1295 nfs_numasync--;
1296 if (error == EINTR || error == ERESTART)
1297 error = 0;
1298 unix_syscall_return(error);
1299 }
1300 if (nmp != NULL) {
1301 while ((bp = TAILQ_FIRST(&nmp->nm_bufq)) != NULL) {
1302 /* Take one off the front of the list */
1303 TAILQ_REMOVE(&nmp->nm_bufq, bp, nb_free);
1304 bp->nb_free.tqe_next = NFSNOLIST;
1305 nmp->nm_bufqlen--;
1306 if (nmp->nm_bufqwant && nmp->nm_bufqlen < 2 * nfs_numasync) {
1307 nmp->nm_bufqwant = FALSE;
1308 lck_mtx_unlock(nfs_iod_mutex);
1309 wakeup(&nmp->nm_bufq);
1310 } else {
1311 lck_mtx_unlock(nfs_iod_mutex);
1312 }
1313
1314 SET(bp->nb_flags, NB_IOD);
1315 if (ISSET(bp->nb_flags, NB_READ))
1316 nfs_doio(bp, bp->nb_rcred, NULL);
1317 else
1318 nfs_doio(bp, bp->nb_wcred, NULL);
1319
1320 lck_mtx_lock(nfs_iod_mutex);
1321 /*
1322 * If there are more than one iod on this mount, then defect
1323 * so that the iods can be shared out fairly between the mounts
1324 */
1325 if (nfs_defect && nmp->nm_bufqiods > 1) {
1326 nfs_iodmount[myiod] = NULL;
1327 nmp->nm_bufqiods--;
1328 break;
1329 }
1330 }
1331 }
1332 lck_mtx_unlock(nfs_iod_mutex);
1333
1334 if (nfs_ioddelwri) {
1335 i = 0;
1336 nfs_ioddelwri = 0;
1337 lck_mtx_lock(nfs_buf_mutex);
1338 while (i < 8 && (bp = TAILQ_FIRST(&nfsbufdelwri)) != NULL) {
1339 struct nfsnode *np = VTONFS(bp->nb_vp);
1340 nfs_buf_remfree(bp);
1341 nfs_buf_refget(bp);
1342 while ((error = nfs_buf_acquire(bp, 0, 0, 0)) == EAGAIN);
1343 nfs_buf_refrele(bp);
1344 if (error)
1345 break;
1346 if (!bp->nb_vp) {
1347 /* buffer is no longer valid */
1348 nfs_buf_drop(bp);
1349 continue;
1350 }
1351 if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
1352 /* put buffer at end of delwri list */
1353 TAILQ_INSERT_TAIL(&nfsbufdelwri, bp, nb_free);
1354 nfsbufdelwricnt++;
1355 nfs_buf_drop(bp);
1356 lck_mtx_unlock(nfs_buf_mutex);
1357 nfs_flushcommits(np->n_vnode, NULL, 1);
1358 } else {
1359 SET(bp->nb_flags, (NB_ASYNC | NB_IOD));
1360 lck_mtx_unlock(nfs_buf_mutex);
1361 nfs_buf_write(bp);
1362 }
1363 i++;
1364 lck_mtx_lock(nfs_buf_mutex);
1365 }
1366 lck_mtx_unlock(nfs_buf_mutex);
1367 }
1368
1369 lck_mtx_lock(nfs_iod_mutex);
1370 }
1371 }
1372
1373 /*
1374 * Shut down a socket associated with an nfssvc_sock structure.
1375 * Should be called with the send lock set, if required.
1376 * The trick here is to increment the sref at the start, so that the nfsds
1377 * will stop using it and clear ns_flag at the end so that it will not be
1378 * reassigned during cleanup.
1379 */
1380 static void
1381 nfsrv_zapsock(struct nfssvc_sock *slp)
1382 {
1383 socket_t so;
1384
1385 if ((slp->ns_flag & SLP_VALID) == 0)
1386 return;
1387 slp->ns_flag &= ~SLP_ALLFLAGS;
1388
1389 so = slp->ns_so;
1390 if (so == NULL)
1391 return;
1392
1393 socket_lock(so, 1);
1394 so->so_upcall = NULL;
1395 so->so_rcv.sb_flags &= ~SB_UPCALL;
1396 socket_unlock(so, 1);
1397 sock_shutdown(so, SHUT_RDWR);
1398 }
1399
1400 /*
1401 * Get an authorization string for the uid by having the mount_nfs sitting
1402 * on this mount point porpous out of the kernel and do it.
1403 */
1404 int
1405 nfs_getauth(nmp, rep, cred, auth_str, auth_len, verf_str, verf_len, key)
1406 register struct nfsmount *nmp;
1407 struct nfsreq *rep;
1408 kauth_cred_t cred;
1409 char **auth_str;
1410 int *auth_len;
1411 char *verf_str;
1412 int *verf_len;
1413 NFSKERBKEY_T key; /* return session key */
1414 {
1415 int error = 0;
1416
1417 while ((nmp->nm_state & NFSSTA_WAITAUTH) == 0) {
1418 nmp->nm_state |= NFSSTA_WANTAUTH;
1419 (void) tsleep((caddr_t)&nmp->nm_authtype, PSOCK,
1420 "nfsauth1", 2 * hz);
1421 error = nfs_sigintr(nmp, rep, rep->r_procp);
1422 if (error) {
1423 nmp->nm_state &= ~NFSSTA_WANTAUTH;
1424 return (error);
1425 }
1426 }
1427 nmp->nm_state &= ~NFSSTA_WANTAUTH;
1428 MALLOC(*auth_str, char *, RPCAUTH_MAXSIZ, M_TEMP, M_WAITOK);
1429 if (!*auth_str)
1430 return (ENOMEM);
1431 nmp->nm_authstr = *auth_str;
1432 nmp->nm_authlen = RPCAUTH_MAXSIZ;
1433 nmp->nm_verfstr = verf_str;
1434 nmp->nm_verflen = *verf_len;
1435 nmp->nm_authuid = kauth_cred_getuid(cred);
1436 nmp->nm_state &= ~NFSSTA_WAITAUTH;
1437 wakeup((caddr_t)&nmp->nm_authstr);
1438
1439 /*
1440 * And wait for mount_nfs to do its stuff.
1441 */
1442 while ((nmp->nm_state & NFSSTA_HASAUTH) == 0 && error == 0) {
1443 (void) tsleep((caddr_t)&nmp->nm_authlen, PSOCK,
1444 "nfsauth2", 2 * hz);
1445 error = nfs_sigintr(nmp, rep, rep->r_procp);
1446 }
1447 if (nmp->nm_state & NFSSTA_AUTHERR) {
1448 nmp->nm_state &= ~NFSSTA_AUTHERR;
1449 error = EAUTH;
1450 }
1451 if (error)
1452 FREE(*auth_str, M_TEMP);
1453 else {
1454 *auth_len = nmp->nm_authlen;
1455 *verf_len = nmp->nm_verflen;
1456 bcopy((caddr_t)nmp->nm_key, (caddr_t)key, sizeof (key));
1457 }
1458 nmp->nm_state &= ~NFSSTA_HASAUTH;
1459 nmp->nm_state |= NFSSTA_WAITAUTH;
1460 if (nmp->nm_state & NFSSTA_WANTAUTH) {
1461 nmp->nm_state &= ~NFSSTA_WANTAUTH;
1462 wakeup((caddr_t)&nmp->nm_authtype);
1463 }
1464 return (error);
1465 }
1466
1467 /*
1468 * Get a nickname authenticator and verifier.
1469 */
1470 int
1471 nfs_getnickauth(
1472 struct nfsmount *nmp,
1473 kauth_cred_t cred,
1474 char **auth_str,
1475 int *auth_len,
1476 char *verf_str,
1477 __unused int verf_len)
1478 {
1479 register struct nfsuid *nuidp;
1480 register u_long *nickp, *verfp;
1481 struct timeval ktvin, ktvout, now;
1482
1483 #if DIAGNOSTIC
1484 if (verf_len < (4 * NFSX_UNSIGNED))
1485 panic("nfs_getnickauth verf too small");
1486 #endif
1487 for (nuidp = NMUIDHASH(nmp, kauth_cred_getuid(cred))->lh_first;
1488 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
1489 if (kauth_cred_getuid(nuidp->nu_cr) == kauth_cred_getuid(cred))
1490 break;
1491 }
1492 microtime(&now);
1493 if (!nuidp || nuidp->nu_expire < now.tv_sec)
1494 return (EACCES);
1495
1496 MALLOC(nickp, u_long *, 2 * NFSX_UNSIGNED, M_TEMP, M_WAITOK);
1497 if (!nickp)
1498 return (ENOMEM);
1499
1500 /*
1501 * Move to the end of the lru list (end of lru == most recently used).
1502 */
1503 TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp, nu_lru);
1504 TAILQ_INSERT_TAIL(&nmp->nm_uidlruhead, nuidp, nu_lru);
1505
1506 *nickp++ = txdr_unsigned(RPCAKN_NICKNAME);
1507 *nickp = txdr_unsigned(nuidp->nu_nickname);
1508 *auth_str = (char *)nickp;
1509 *auth_len = 2 * NFSX_UNSIGNED;
1510
1511 /*
1512 * Now we must encrypt the verifier and package it up.
1513 */
1514 verfp = (u_long *)verf_str;
1515 *verfp++ = txdr_unsigned(RPCAKN_NICKNAME);
1516 microtime(&now);
1517 if (now.tv_sec > nuidp->nu_timestamp.tv_sec ||
1518 (now.tv_sec == nuidp->nu_timestamp.tv_sec &&
1519 now.tv_usec > nuidp->nu_timestamp.tv_usec))
1520 nuidp->nu_timestamp = now;
1521 else
1522 nuidp->nu_timestamp.tv_usec++;
1523 ktvin.tv_sec = txdr_unsigned(nuidp->nu_timestamp.tv_sec);
1524 ktvin.tv_usec = txdr_unsigned(nuidp->nu_timestamp.tv_usec);
1525
1526 /*
1527 * Now encrypt the timestamp verifier in ecb mode using the session
1528 * key.
1529 */
1530 #if NFSKERB
1531 XXX
1532 #endif
1533
1534 *verfp++ = ktvout.tv_sec;
1535 *verfp++ = ktvout.tv_usec;
1536 *verfp = 0;
1537 return (0);
1538 }
1539
1540 /*
1541 * Save the current nickname in a hash list entry on the mount point.
1542 */
1543 int
1544 nfs_savenickauth(nmp, cred, len, key, mdp, dposp, mrep)
1545 register struct nfsmount *nmp;
1546 kauth_cred_t cred;
1547 int len;
1548 NFSKERBKEY_T key;
1549 mbuf_t *mdp;
1550 char **dposp;
1551 mbuf_t mrep;
1552 {
1553 register struct nfsuid *nuidp;
1554 register u_long *tl;
1555 register long t1;
1556 mbuf_t md = *mdp;
1557 struct timeval ktvin, ktvout, now;
1558 u_long nick;
1559 char *dpos = *dposp, *cp2;
1560 int deltasec, error = 0;
1561
1562 if (len == (3 * NFSX_UNSIGNED)) {
1563 nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED);
1564 ktvin.tv_sec = *tl++;
1565 ktvin.tv_usec = *tl++;
1566 nick = fxdr_unsigned(u_long, *tl);
1567
1568 /*
1569 * Decrypt the timestamp in ecb mode.
1570 */
1571 #if NFSKERB
1572 XXX
1573 #endif
1574 ktvout.tv_sec = fxdr_unsigned(long, ktvout.tv_sec);
1575 ktvout.tv_usec = fxdr_unsigned(long, ktvout.tv_usec);
1576 microtime(&now);
1577 deltasec = now.tv_sec - ktvout.tv_sec;
1578 if (deltasec < 0)
1579 deltasec = -deltasec;
1580 /*
1581 * If ok, add it to the hash list for the mount point.
1582 */
1583 if (deltasec <= NFS_KERBCLOCKSKEW) {
1584 if (nmp->nm_numuids < nuidhash_max) {
1585 nmp->nm_numuids++;
1586 MALLOC_ZONE(nuidp, struct nfsuid *,
1587 sizeof (struct nfsuid),
1588 M_NFSUID, M_WAITOK);
1589 } else {
1590 nuidp = NULL;
1591 }
1592 if (!nuidp) {
1593 nuidp = nmp->nm_uidlruhead.tqh_first;
1594 if (!nuidp) {
1595 error = ENOMEM;
1596 goto nfsmout;
1597 }
1598 LIST_REMOVE(nuidp, nu_hash);
1599 TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp, nu_lru);
1600 kauth_cred_rele(nuidp->nu_cr);
1601 }
1602 nuidp->nu_flag = 0;
1603 kauth_cred_ref(cred);
1604 nuidp->nu_cr = cred;
1605 nuidp->nu_expire = now.tv_sec + NFS_KERBTTL;
1606 nuidp->nu_timestamp = ktvout;
1607 nuidp->nu_nickname = nick;
1608 bcopy(key, nuidp->nu_key, sizeof (key));
1609 TAILQ_INSERT_TAIL(&nmp->nm_uidlruhead, nuidp, nu_lru);
1610 LIST_INSERT_HEAD(NMUIDHASH(nmp, kauth_cred_getuid(cred)),
1611 nuidp, nu_hash);
1612 }
1613 } else
1614 nfsm_adv(nfsm_rndup(len));
1615 nfsmout:
1616 *mdp = md;
1617 *dposp = dpos;
1618 return (error);
1619 }
1620
1621 #ifndef NFS_NOSERVER
1622
1623 /*
1624 * cleanup and release a server socket structure.
1625 */
1626 static void
1627 nfsrv_slpfree(struct nfssvc_sock *slp)
1628 {
1629 struct nfsuid *nuidp, *nnuidp;
1630 struct nfsrv_descript *nwp, *nnwp;
1631
1632 if (slp->ns_so) {
1633 sock_release(slp->ns_so);
1634 slp->ns_so = NULL;
1635 }
1636 if (slp->ns_nam)
1637 mbuf_free(slp->ns_nam);
1638 if (slp->ns_raw)
1639 mbuf_freem(slp->ns_raw);
1640 if (slp->ns_rec)
1641 mbuf_freem(slp->ns_rec);
1642 slp->ns_nam = slp->ns_raw = slp->ns_rec = NULL;
1643
1644 for (nuidp = slp->ns_uidlruhead.tqh_first; nuidp != 0;
1645 nuidp = nnuidp) {
1646 nnuidp = nuidp->nu_lru.tqe_next;
1647 LIST_REMOVE(nuidp, nu_hash);
1648 TAILQ_REMOVE(&slp->ns_uidlruhead, nuidp, nu_lru);
1649 if (nuidp->nu_flag & NU_NAM)
1650 mbuf_freem(nuidp->nu_nam);
1651 kauth_cred_rele(nuidp->nu_cr);
1652 FREE_ZONE((caddr_t)nuidp,
1653 sizeof (struct nfsuid), M_NFSUID);
1654 }
1655
1656 for (nwp = slp->ns_tq.lh_first; nwp; nwp = nnwp) {
1657 nnwp = nwp->nd_tq.le_next;
1658 LIST_REMOVE(nwp, nd_tq);
1659 if (nwp->nd_cr)
1660 kauth_cred_rele(nwp->nd_cr);
1661 FREE_ZONE((caddr_t)nwp, sizeof *nwp, M_NFSRVDESC);
1662 }
1663 LIST_INIT(&slp->ns_tq);
1664
1665 lck_rw_destroy(&slp->ns_rwlock, nfs_slp_rwlock_group);
1666 lck_mtx_destroy(&slp->ns_wgmutex, nfs_slp_mutex_group);
1667 FREE(slp, M_NFSSVC);
1668 }
1669
1670 /*
1671 * Derefence a server socket structure. If it has no more references and
1672 * is no longer valid, you can throw it away.
1673 */
1674 void
1675 nfsrv_slpderef(struct nfssvc_sock *slp)
1676 {
1677 lck_mtx_lock(nfsd_mutex);
1678 lck_rw_lock_exclusive(&slp->ns_rwlock);
1679 slp->ns_sref--;
1680 if (slp->ns_sref || (slp->ns_flag & SLP_VALID)) {
1681 lck_rw_done(&slp->ns_rwlock);
1682 lck_mtx_unlock(nfsd_mutex);
1683 return;
1684 }
1685
1686 TAILQ_REMOVE(&nfssvc_sockhead, slp, ns_chain);
1687 lck_mtx_unlock(nfsd_mutex);
1688
1689 nfsrv_slpfree(slp);
1690 }
1691
1692
1693 /*
1694 * Initialize the data structures for the server.
1695 * Handshake with any new nfsds starting up to avoid any chance of
1696 * corruption.
1697 */
1698 void
1699 nfsrv_init(terminating)
1700 int terminating;
1701 {
1702 struct nfssvc_sock *slp, *nslp;
1703
1704 if (terminating) {
1705 for (slp = TAILQ_FIRST(&nfssvc_sockhead); slp != 0; slp = nslp) {
1706 nslp = TAILQ_NEXT(slp, ns_chain);
1707 if (slp->ns_flag & SLP_VALID) {
1708 lck_rw_lock_exclusive(&slp->ns_rwlock);
1709 nfsrv_zapsock(slp);
1710 lck_rw_done(&slp->ns_rwlock);
1711 }
1712 TAILQ_REMOVE(&nfssvc_sockhead, slp, ns_chain);
1713 /* grab the lock one final time in case anyone's using it */
1714 lck_rw_lock_exclusive(&slp->ns_rwlock);
1715 nfsrv_slpfree(slp);
1716 }
1717 nfsrv_cleancache(); /* And clear out server cache */
1718 /* XXX Revisit when enabling WebNFS */
1719 #ifdef WEBNFS_ENABLED
1720 } else
1721 nfs_pub.np_valid = 0;
1722 #else
1723 }
1724 #endif
1725
1726 TAILQ_INIT(&nfssvc_sockhead);
1727
1728 TAILQ_INIT(&nfsd_head);
1729 nfsd_head_flag &= ~NFSD_CHECKSLP;
1730
1731 MALLOC(nfs_udpsock, struct nfssvc_sock *, sizeof(struct nfssvc_sock),
1732 M_NFSSVC, M_WAITOK);
1733 if (nfs_udpsock) {
1734 bzero((caddr_t)nfs_udpsock, sizeof (struct nfssvc_sock));
1735 lck_rw_init(&nfs_udpsock->ns_rwlock, nfs_slp_rwlock_group, nfs_slp_lock_attr);
1736 TAILQ_INIT(&nfs_udpsock->ns_uidlruhead);
1737 TAILQ_INSERT_HEAD(&nfssvc_sockhead, nfs_udpsock, ns_chain);
1738 } else {
1739 printf("nfsrv_init() failed to allocate UDP socket\n");
1740 }
1741
1742 #if ISO
1743 MALLOC(nfs_cltpsock, struct nfssvc_sock *, sizeof(struct nfssvc_sock),
1744 M_NFSSVC, M_WAITOK);
1745 if (nfs_cltpsock) {
1746 bzero((caddr_t)nfs_cltpsock, sizeof (struct nfssvc_sock));
1747 lck_rw_init(&nfs_cltpsock->ns_rwlock, nfs_slp_rwlock_group, nfs_slp_lock_attr);
1748 TAILQ_INIT(&nfs_cltpsock->ns_uidlruhead);
1749 TAILQ_INSERT_TAIL(&nfssvc_sockhead, nfs_cltpsock, ns_chain);
1750 } else {
1751 printf("nfsrv_init() failed to allocate CLTP socket\n");
1752 }
1753 #endif
1754 }
1755
1756 /*
1757 * Add entries to the server monitor log.
1758 */
1759 static void
1760 nfsd_rt(sotype, nd, cacherep)
1761 int sotype;
1762 register struct nfsrv_descript *nd;
1763 int cacherep;
1764 {
1765 register struct drt *rt;
1766 struct timeval now;
1767
1768 rt = &nfsdrt.drt[nfsdrt.pos];
1769 if (cacherep == RC_DOIT)
1770 rt->flag = 0;
1771 else if (cacherep == RC_REPLY)
1772 rt->flag = DRT_CACHEREPLY;
1773 else
1774 rt->flag = DRT_CACHEDROP;
1775 if (sotype == SOCK_STREAM)
1776 rt->flag |= DRT_TCP;
1777 else if (nd->nd_flag & ND_NFSV3)
1778 rt->flag |= DRT_NFSV3;
1779 rt->proc = nd->nd_procnum;
1780 if (((struct sockaddr *)mbuf_data(nd->nd_nam))->sa_family == AF_INET)
1781 rt->ipadr = ((struct sockaddr_in *)mbuf_data(nd->nd_nam))->sin_addr.s_addr;
1782 else
1783 rt->ipadr = INADDR_ANY;
1784 microuptime(&now);
1785 rt->resptime = ((now.tv_sec - nd->nd_starttime.tv_sec) * 1000000) +
1786 (now.tv_usec - nd->nd_starttime.tv_usec);
1787 microtime(&rt->tstamp); // XXX unused
1788 nfsdrt.pos = (nfsdrt.pos + 1) % NFSRTTLOGSIZ;
1789 }
1790 #endif /* NFS_NOSERVER */