]> git.saurik.com Git - apple/xnu.git/blob - bsd/nfs/nfs_syscalls.c
xnu-792.2.4.tar.gz
[apple/xnu.git] / bsd / nfs / nfs_syscalls.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23 /*
24 * Copyright (c) 1989, 1993
25 * The Regents of the University of California. All rights reserved.
26 *
27 * This code is derived from software contributed to Berkeley by
28 * Rick Macklem at The University of Guelph.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
32 * are met:
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by the University of
41 * California, Berkeley and its contributors.
42 * 4. Neither the name of the University nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * SUCH DAMAGE.
57 *
58 * @(#)nfs_syscalls.c 8.5 (Berkeley) 3/30/95
59 * FreeBSD-Id: nfs_syscalls.c,v 1.32 1997/11/07 08:53:25 phk Exp $
60 */
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 /* XXX CSM 11/25/97 FreeBSD's generated syscall prototypes */
65 #ifdef notyet
66 #include <sys/sysproto.h>
67 #endif
68 #include <sys/kernel.h>
69 #include <sys/file_internal.h>
70 #include <sys/filedesc.h>
71 #include <sys/stat.h>
72 #include <sys/vnode_internal.h>
73 #include <sys/mount_internal.h>
74 #include <sys/proc_internal.h> /* for fdflags */
75 #include <sys/kauth.h>
76 #include <sys/sysctl.h>
77 #include <sys/ubc.h>
78 #include <sys/uio.h>
79 #include <sys/malloc.h>
80 #include <sys/kpi_mbuf.h>
81 #include <sys/socket.h>
82 #include <sys/socketvar.h>
83 #include <sys/domain.h>
84 #include <sys/protosw.h>
85 #include <sys/fcntl.h>
86 #include <sys/lockf.h>
87 #include <sys/syslog.h>
88 #include <sys/user.h>
89 #include <sys/sysproto.h>
90 #include <sys/kpi_socket.h>
91 #include <libkern/OSAtomic.h>
92
93 #include <bsm/audit_kernel.h>
94
95 #include <netinet/in.h>
96 #include <netinet/tcp.h>
97 #if ISO
98 #include <netiso/iso.h>
99 #endif
100 #include <nfs/xdr_subs.h>
101 #include <nfs/rpcv2.h>
102 #include <nfs/nfsproto.h>
103 #include <nfs/nfs.h>
104 #include <nfs/nfsm_subs.h>
105 #include <nfs/nfsrvcache.h>
106 #include <nfs/nfsmount.h>
107 #include <nfs/nfsnode.h>
108 #include <nfs/nfsrtt.h>
109 #include <nfs/nfs_lock.h>
110
111 extern void unix_syscall_return(int);
112
113 /* Global defs. */
114 extern int (*nfsrv3_procs[NFS_NPROCS])(struct nfsrv_descript *nd,
115 struct nfssvc_sock *slp,
116 proc_t procp,
117 mbuf_t *mreqp);
118 extern int nfs_numasync;
119 extern int nfs_ioddelwri;
120 extern int nfsrtton;
121 extern struct nfsstats nfsstats;
122 extern int nfsrvw_procrastinate;
123 extern int nfsrvw_procrastinate_v3;
124
125 struct nfssvc_sock *nfs_udpsock, *nfs_cltpsock;
126 static int nuidhash_max = NFS_MAXUIDHASH;
127
128 static void nfsrv_zapsock(struct nfssvc_sock *slp);
129 static int nfssvc_iod(proc_t);
130 static int nfskerb_clientd(struct nfsmount *, struct nfsd_cargs *, int, user_addr_t, proc_t);
131
132 static int nfs_asyncdaemon[NFS_MAXASYNCDAEMON];
133
134 #ifndef NFS_NOSERVER
135 int nfsd_waiting = 0;
136 static struct nfsdrt nfsdrt;
137 int nfs_numnfsd = 0;
138 static void nfsd_rt(int sotype, struct nfsrv_descript *nd, int cacherep);
139 static int nfssvc_addsock(socket_t, mbuf_t, proc_t);
140 static int nfssvc_nfsd(struct nfsd_srvargs *,user_addr_t, proc_t);
141 static int nfssvc_export(user_addr_t, proc_t);
142
143 static int nfs_privport = 0;
144 /* XXX CSM 11/25/97 Upgrade sysctl.h someday */
145 #ifdef notyet
146 SYSCTL_INT(_vfs_nfs, NFS_NFSPRIVPORT, nfs_privport, CTLFLAG_RW, &nfs_privport, 0, "");
147 SYSCTL_INT(_vfs_nfs, OID_AUTO, gatherdelay, CTLFLAG_RW, &nfsrvw_procrastinate, 0, "");
148 SYSCTL_INT(_vfs_nfs, OID_AUTO, gatherdelay_v3, CTLFLAG_RW, &nfsrvw_procrastinate_v3, 0, "");
149 #endif
150
151 /*
152 * NFS server system calls
153 * getfh() lives here too, but maybe should move to kern/vfs_syscalls.c
154 */
155
156 /*
157 * Get file handle system call
158 */
159 int
160 getfh(proc_t p, struct getfh_args *uap, __unused int *retval)
161 {
162 vnode_t vp;
163 struct nfs_filehandle nfh;
164 int error;
165 struct nameidata nd;
166 struct vfs_context context;
167 char path[MAXPATHLEN], *ptr;
168 u_int pathlen;
169 struct nfs_exportfs *nxfs;
170 struct nfs_export *nx;
171
172 context.vc_proc = p;
173 context.vc_ucred = kauth_cred_get();
174
175 /*
176 * Must be super user
177 */
178 error = proc_suser(p);
179 if (error)
180 return (error);
181
182 error = copyinstr(uap->fname, path, MAXPATHLEN, (size_t *)&pathlen);
183 if (error)
184 return (error);
185
186 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
187 UIO_SYSSPACE, path, &context);
188 error = namei(&nd);
189 if (error)
190 return (error);
191 nameidone(&nd);
192
193 vp = nd.ni_vp;
194
195 // find exportfs that matches f_mntonname
196 lck_rw_lock_shared(&nfs_export_rwlock);
197 ptr = vnode_mount(vp)->mnt_vfsstat.f_mntonname;
198 LIST_FOREACH(nxfs, &nfs_exports, nxfs_next) {
199 if (!strcmp(nxfs->nxfs_path, ptr))
200 break;
201 }
202 if (!nxfs || strncmp(nxfs->nxfs_path, path, strlen(nxfs->nxfs_path))) {
203 error = EINVAL;
204 goto out;
205 }
206 // find export that best matches remainder of path
207 ptr = path + strlen(nxfs->nxfs_path);
208 while (*ptr && (*ptr == '/'))
209 ptr++;
210 LIST_FOREACH(nx, &nxfs->nxfs_exports, nx_next) {
211 int len = strlen(nx->nx_path);
212 if (len == 0) // we've hit the export entry for the root directory
213 break;
214 if (!strncmp(nx->nx_path, ptr, len))
215 break;
216 }
217 if (!nx) {
218 error = EINVAL;
219 goto out;
220 }
221
222 bzero(&nfh, sizeof(nfh));
223 nfh.nfh_xh.nxh_version = NFS_FH_VERSION;
224 nfh.nfh_xh.nxh_fsid = nxfs->nxfs_id;
225 nfh.nfh_xh.nxh_expid = nx->nx_id;
226 nfh.nfh_xh.nxh_flags = 0;
227 nfh.nfh_xh.nxh_reserved = 0;
228 nfh.nfh_len = NFS_MAX_FID_SIZE;
229 error = VFS_VPTOFH(vp, &nfh.nfh_len, &nfh.nfh_fid[0], NULL);
230 if (nfh.nfh_len > (int)NFS_MAX_FID_SIZE)
231 error = EOVERFLOW;
232 nfh.nfh_xh.nxh_fidlen = nfh.nfh_len;
233 nfh.nfh_len += sizeof(nfh.nfh_xh);
234
235 out:
236 lck_rw_done(&nfs_export_rwlock);
237 vnode_put(vp);
238 if (error)
239 return (error);
240 error = copyout((caddr_t)&nfh, uap->fhp, sizeof(nfh));
241 return (error);
242 }
243
244 #endif /* NFS_NOSERVER */
245
246 extern struct fileops vnops;
247
248 /*
249 * syscall for the rpc.lockd to use to translate a NFS file handle into
250 * an open descriptor.
251 *
252 * warning: do not remove the suser() call or this becomes one giant
253 * security hole.
254 */
255 int
256 fhopen( proc_t p,
257 struct fhopen_args *uap,
258 register_t *retval)
259 {
260 vnode_t vp;
261 struct nfs_filehandle nfh;
262 struct nfs_export *nx;
263 struct nfs_export_options *nxo;
264 struct flock lf;
265 struct fileproc *fp, *nfp;
266 int fmode, error, type;
267 int indx;
268 kauth_cred_t cred = proc_ucred(p);
269 struct vfs_context context;
270 kauth_action_t action;
271
272 context.vc_proc = p;
273 context.vc_ucred = cred;
274
275 /*
276 * Must be super user
277 */
278 error = suser(cred, 0);
279 if (error)
280 return (error);
281
282 fmode = FFLAGS(uap->flags);
283 /* why not allow a non-read/write open for our lockd? */
284 if (((fmode & (FREAD | FWRITE)) == 0) || (fmode & O_CREAT))
285 return (EINVAL);
286
287 error = copyin(uap->u_fhp, &nfh.nfh_len, sizeof(nfh.nfh_len));
288 if (error)
289 return (error);
290 if ((nfh.nfh_len < (int)sizeof(struct nfs_exphandle)) ||
291 (nfh.nfh_len > (int)NFS_MAX_FH_SIZE))
292 return (EINVAL);
293 error = copyin(uap->u_fhp, &nfh, sizeof(nfh.nfh_len) + nfh.nfh_len);
294 if (error)
295 return (error);
296
297 lck_rw_lock_shared(&nfs_export_rwlock);
298 /* now give me my vnode, it gets returned to me with a reference */
299 error = nfsrv_fhtovp(&nfh, NULL, TRUE, &vp, &nx, &nxo);
300 lck_rw_done(&nfs_export_rwlock);
301 if (error)
302 return (error);
303
304 /*
305 * From now on we have to make sure not
306 * to forget about the vnode.
307 * Any error that causes an abort must vnode_put(vp).
308 * Just set error = err and 'goto bad;'.
309 */
310
311 /*
312 * from vn_open
313 */
314 if (vnode_vtype(vp) == VSOCK) {
315 error = EOPNOTSUPP;
316 goto bad;
317 }
318
319 /* disallow write operations on directories */
320 if (vnode_isdir(vp) && (fmode & (FWRITE | O_TRUNC))) {
321 error = EISDIR;
322 goto bad;
323 }
324
325 /* compute action to be authorized */
326 action = 0;
327 if (fmode & FREAD)
328 action |= KAUTH_VNODE_READ_DATA;
329 if (fmode & (FWRITE | O_TRUNC))
330 action |= KAUTH_VNODE_WRITE_DATA;
331 if ((error = vnode_authorize(vp, NULL, action, &context)) != 0)
332 goto bad;
333
334 if ((error = VNOP_OPEN(vp, fmode, &context)))
335 goto bad;
336 if ((error = vnode_ref_ext(vp, fmode)))
337 goto bad;
338
339 /*
340 * end of vn_open code
341 */
342
343 // starting here... error paths should call vn_close/vnode_put
344 if ((error = falloc(p, &nfp, &indx)) != 0) {
345 vn_close(vp, fmode & FMASK, cred, p);
346 goto bad;
347 }
348 fp = nfp;
349
350 fp->f_fglob->fg_flag = fmode & FMASK;
351 fp->f_fglob->fg_type = DTYPE_VNODE;
352 fp->f_fglob->fg_ops = &vnops;
353 fp->f_fglob->fg_data = (caddr_t)vp;
354
355 // XXX do we really need to support this with fhopen()?
356 if (fmode & (O_EXLOCK | O_SHLOCK)) {
357 lf.l_whence = SEEK_SET;
358 lf.l_start = 0;
359 lf.l_len = 0;
360 if (fmode & O_EXLOCK)
361 lf.l_type = F_WRLCK;
362 else
363 lf.l_type = F_RDLCK;
364 type = F_FLOCK;
365 if ((fmode & FNONBLOCK) == 0)
366 type |= F_WAIT;
367 if ((error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, F_SETLK, &lf, type, &context))) {
368 vn_close(vp, fp->f_fglob->fg_flag, fp->f_fglob->fg_cred, p);
369 fp_free(p, indx, fp);
370 return (error);
371 }
372 fp->f_fglob->fg_flag |= FHASLOCK;
373 }
374
375 vnode_put(vp);
376
377 proc_fdlock(p);
378 *fdflags(p, indx) &= ~UF_RESERVED;
379 fp_drop(p, indx, fp, 1);
380 proc_fdunlock(p);
381
382 *retval = indx;
383 return (0);
384
385 bad:
386 vnode_put(vp);
387 return (error);
388 }
389
390 /*
391 * Nfs server psuedo system call for the nfsd's
392 * Based on the flag value it either:
393 * - adds a socket to the selection list
394 * - remains in the kernel as an nfsd
395 * - remains in the kernel as an nfsiod
396 */
397 int
398 nfssvc(proc_t p, struct nfssvc_args *uap, __unused int *retval)
399 {
400 #ifndef NFS_NOSERVER
401 struct nameidata nd;
402 mbuf_t nam;
403 struct user_nfsd_args user_nfsdarg;
404 struct nfsd_srvargs nfsd_srvargs, *nsd = &nfsd_srvargs;
405 struct nfsd_cargs ncd;
406 struct nfsd *nfsd;
407 struct nfssvc_sock *slp;
408 struct nfsuid *nuidp;
409 struct nfsmount *nmp;
410 struct timeval now;
411 socket_t so;
412 struct vfs_context context;
413 struct ucred temp_cred;
414 #endif /* NFS_NOSERVER */
415 int error;
416
417 AUDIT_ARG(cmd, uap->flag);
418
419 /*
420 * Must be super user
421 */
422 error = proc_suser(p);
423 if(error)
424 return (error);
425 if (uap->flag & NFSSVC_BIOD)
426 error = nfssvc_iod(p);
427 #ifdef NFS_NOSERVER
428 else
429 error = ENXIO;
430 #else /* !NFS_NOSERVER */
431 else if (uap->flag & NFSSVC_MNTD) {
432
433 context.vc_proc = p;
434 context.vc_ucred = kauth_cred_get();
435
436 error = copyin(uap->argp, (caddr_t)&ncd, sizeof (ncd));
437 if (error)
438 return (error);
439
440 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
441 (proc_is64bit(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
442 CAST_USER_ADDR_T(ncd.ncd_dirp), &context);
443 error = namei(&nd);
444 if (error)
445 return (error);
446 nameidone(&nd);
447
448 if (vnode_isvroot(nd.ni_vp) == 0)
449 error = EINVAL;
450 nmp = VFSTONFS(vnode_mount(nd.ni_vp));
451 vnode_put(nd.ni_vp);
452 if (error)
453 return (error);
454
455 if ((nmp->nm_state & NFSSTA_MNTD) &&
456 (uap->flag & NFSSVC_GOTAUTH) == 0)
457 return (0);
458 nmp->nm_state |= NFSSTA_MNTD;
459 error = nfskerb_clientd(nmp, &ncd, uap->flag, uap->argp, p);
460 } else if (uap->flag & NFSSVC_ADDSOCK) {
461 if (IS_64BIT_PROCESS(p)) {
462 error = copyin(uap->argp, (caddr_t)&user_nfsdarg, sizeof(user_nfsdarg));
463 } else {
464 struct nfsd_args tmp_args;
465 error = copyin(uap->argp, (caddr_t)&tmp_args, sizeof(tmp_args));
466 if (error == 0) {
467 user_nfsdarg.sock = tmp_args.sock;
468 user_nfsdarg.name = CAST_USER_ADDR_T(tmp_args.name);
469 user_nfsdarg.namelen = tmp_args.namelen;
470 }
471 }
472 if (error)
473 return (error);
474 /* get the socket */
475 error = file_socket(user_nfsdarg.sock, &so);
476 if (error)
477 return (error);
478 /* Get the client address for connected sockets. */
479 if (user_nfsdarg.name == USER_ADDR_NULL || user_nfsdarg.namelen == 0) {
480 nam = NULL;
481 } else {
482 error = sockargs(&nam, user_nfsdarg.name, user_nfsdarg.namelen, MBUF_TYPE_SONAME);
483 if (error) {
484 /* drop the iocount file_socket() grabbed on the file descriptor */
485 file_drop(user_nfsdarg.sock);
486 return (error);
487 }
488 }
489 /*
490 * nfssvc_addsock() will grab a retain count on the socket
491 * to keep the socket from being closed when nfsd closes its
492 * file descriptor for it.
493 */
494 error = nfssvc_addsock(so, nam, p);
495 /* drop the iocount file_socket() grabbed on the file descriptor */
496 file_drop(user_nfsdarg.sock);
497 } else if (uap->flag & NFSSVC_NFSD) {
498 error = copyin(uap->argp, (caddr_t)nsd, sizeof (*nsd));
499 if (error)
500 return (error);
501
502 if ((uap->flag & NFSSVC_AUTHIN) && ((nfsd = nsd->nsd_nfsd)) &&
503 (nfsd->nfsd_slp->ns_flag & SLP_VALID)) {
504 slp = nfsd->nfsd_slp;
505
506 /*
507 * First check to see if another nfsd has already
508 * added this credential.
509 */
510 for (nuidp = NUIDHASH(slp,nsd->nsd_cr.cr_uid)->lh_first;
511 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
512 if (kauth_cred_getuid(nuidp->nu_cr) == nsd->nsd_cr.cr_uid &&
513 (!nfsd->nfsd_nd->nd_nam2 ||
514 netaddr_match(NU_NETFAM(nuidp),
515 &nuidp->nu_haddr, nfsd->nfsd_nd->nd_nam2)))
516 break;
517 }
518 if (nuidp) {
519 nfsrv_setcred(nuidp->nu_cr,nfsd->nfsd_nd->nd_cr);
520 nfsd->nfsd_nd->nd_flag |= ND_KERBFULL;
521 } else {
522 /*
523 * Nope, so we will.
524 */
525 if (slp->ns_numuids < nuidhash_max) {
526 slp->ns_numuids++;
527 nuidp = (struct nfsuid *)
528 _MALLOC_ZONE(sizeof (struct nfsuid),
529 M_NFSUID, M_WAITOK);
530 } else
531 nuidp = (struct nfsuid *)0;
532 if ((slp->ns_flag & SLP_VALID) == 0) {
533 if (nuidp) {
534 FREE_ZONE((caddr_t)nuidp,
535 sizeof (struct nfsuid), M_NFSUID);
536 slp->ns_numuids--;
537 }
538 } else {
539 if (nuidp == (struct nfsuid *)0) {
540 nuidp = slp->ns_uidlruhead.tqh_first;
541 if (!nuidp)
542 return (ENOMEM);
543 LIST_REMOVE(nuidp, nu_hash);
544 TAILQ_REMOVE(&slp->ns_uidlruhead, nuidp,
545 nu_lru);
546 if (nuidp->nu_flag & NU_NAM)
547 mbuf_freem(nuidp->nu_nam);
548 kauth_cred_rele(nuidp->nu_cr);
549 }
550 nuidp->nu_flag = 0;
551
552 if (nsd->nsd_cr.cr_ngroups > NGROUPS)
553 nsd->nsd_cr.cr_ngroups = NGROUPS;
554
555 nfsrv_setcred(&nsd->nsd_cr, &temp_cred);
556 nuidp->nu_cr = kauth_cred_create(&temp_cred);
557
558 if (!nuidp->nu_cr) {
559 FREE_ZONE(nuidp, sizeof(struct nfsuid), M_NFSUID);
560 slp->ns_numuids--;
561 return (ENOMEM);
562 }
563 nuidp->nu_timestamp = nsd->nsd_timestamp;
564 microtime(&now);
565 nuidp->nu_expire = now.tv_sec + nsd->nsd_ttl;
566 /*
567 * and save the session key in nu_key.
568 */
569 bcopy(nsd->nsd_key, nuidp->nu_key,
570 sizeof (nsd->nsd_key));
571 if (nfsd->nfsd_nd->nd_nam2) {
572 struct sockaddr_in *saddr;
573
574 saddr = mbuf_data(nfsd->nfsd_nd->nd_nam2);
575 switch (saddr->sin_family) {
576 case AF_INET:
577 nuidp->nu_flag |= NU_INETADDR;
578 nuidp->nu_inetaddr =
579 saddr->sin_addr.s_addr;
580 break;
581 case AF_ISO:
582 default:
583 nuidp->nu_flag |= NU_NAM;
584 error = mbuf_copym(nfsd->nfsd_nd->nd_nam2, 0,
585 MBUF_COPYALL, MBUF_WAITOK,
586 &nuidp->nu_nam);
587 if (error) {
588 kauth_cred_rele(nuidp->nu_cr);
589 FREE_ZONE(nuidp, sizeof(struct nfsuid), M_NFSUID);
590 slp->ns_numuids--;
591 return (error);
592 }
593 break;
594 };
595 }
596 TAILQ_INSERT_TAIL(&slp->ns_uidlruhead, nuidp,
597 nu_lru);
598 LIST_INSERT_HEAD(NUIDHASH(slp, nsd->nsd_uid),
599 nuidp, nu_hash);
600 nfsrv_setcred(nuidp->nu_cr,
601 nfsd->nfsd_nd->nd_cr);
602 nfsd->nfsd_nd->nd_flag |= ND_KERBFULL;
603 }
604 }
605 }
606 if ((uap->flag & NFSSVC_AUTHINFAIL) && (nfsd = nsd->nsd_nfsd))
607 nfsd->nfsd_flag |= NFSD_AUTHFAIL;
608 error = nfssvc_nfsd(nsd, uap->argp, p);
609 } else if (uap->flag & NFSSVC_EXPORT) {
610 error = nfssvc_export(uap->argp, p);
611 } else {
612 error = EINVAL;
613 }
614 #endif /* NFS_NOSERVER */
615 if (error == EINTR || error == ERESTART)
616 error = 0;
617 return (error);
618 }
619
620 /*
621 * NFSKERB client helper daemon.
622 * Gets authorization strings for "kerb" mounts.
623 */
624 static int
625 nfskerb_clientd(
626 struct nfsmount *nmp,
627 struct nfsd_cargs *ncd,
628 int flag,
629 user_addr_t argp,
630 proc_t p)
631 {
632 struct nfsuid *nuidp, *nnuidp;
633 int error = 0;
634 struct nfsreq *rp;
635 struct timeval now;
636
637 /*
638 * First initialize some variables
639 */
640 microtime(&now);
641
642 /*
643 * If an authorization string is being passed in, get it.
644 */
645 if ((flag & NFSSVC_GOTAUTH) && (nmp->nm_state & NFSSTA_MOUNTED) &&
646 ((nmp->nm_state & NFSSTA_WAITAUTH) == 0)) {
647 if (nmp->nm_state & NFSSTA_HASAUTH)
648 panic("cld kerb");
649 if ((flag & NFSSVC_AUTHINFAIL) == 0) {
650 if (ncd->ncd_authlen <= nmp->nm_authlen &&
651 ncd->ncd_verflen <= nmp->nm_verflen &&
652 !copyin(CAST_USER_ADDR_T(ncd->ncd_authstr),nmp->nm_authstr,ncd->ncd_authlen)&&
653 !copyin(CAST_USER_ADDR_T(ncd->ncd_verfstr),nmp->nm_verfstr,ncd->ncd_verflen)){
654 nmp->nm_authtype = ncd->ncd_authtype;
655 nmp->nm_authlen = ncd->ncd_authlen;
656 nmp->nm_verflen = ncd->ncd_verflen;
657 #if NFSKERB
658 nmp->nm_key = ncd->ncd_key;
659 #endif
660 } else
661 nmp->nm_state |= NFSSTA_AUTHERR;
662 } else
663 nmp->nm_state |= NFSSTA_AUTHERR;
664 nmp->nm_state |= NFSSTA_HASAUTH;
665 wakeup((caddr_t)&nmp->nm_authlen);
666 } else {
667 nmp->nm_state |= NFSSTA_WAITAUTH;
668 }
669
670 /*
671 * Loop every second updating queue until there is a termination sig.
672 */
673 while (nmp->nm_state & NFSSTA_MOUNTED) {
674 /* Get an authorization string, if required. */
675 if ((nmp->nm_state & (NFSSTA_WAITAUTH | NFSSTA_HASAUTH)) == 0) {
676 ncd->ncd_authuid = nmp->nm_authuid;
677 if (copyout((caddr_t)ncd, argp, sizeof (struct nfsd_cargs)))
678 nmp->nm_state |= NFSSTA_WAITAUTH;
679 else
680 return (ENEEDAUTH);
681 }
682 /* Wait a bit (no pun) and do it again. */
683 if ((nmp->nm_state & NFSSTA_MOUNTED) &&
684 (nmp->nm_state & (NFSSTA_WAITAUTH | NFSSTA_HASAUTH))) {
685 error = tsleep((caddr_t)&nmp->nm_authstr, PSOCK | PCATCH,
686 "nfskrbtimr", hz / 3);
687 if (error == EINTR || error == ERESTART)
688 dounmount(nmp->nm_mountp, 0, p);
689 }
690 }
691
692 /*
693 * Finally, we can free up the mount structure.
694 */
695 for (nuidp = nmp->nm_uidlruhead.tqh_first; nuidp != 0; nuidp = nnuidp) {
696 nnuidp = nuidp->nu_lru.tqe_next;
697 LIST_REMOVE(nuidp, nu_hash);
698 TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp, nu_lru);
699 kauth_cred_rele(nuidp->nu_cr);
700 FREE_ZONE((caddr_t)nuidp, sizeof (struct nfsuid), M_NFSUID);
701 }
702 /*
703 * Loop through outstanding request list and remove dangling
704 * references to defunct nfsmount struct
705 */
706 for (rp = nfs_reqq.tqh_first; rp; rp = rp->r_chain.tqe_next)
707 if (rp->r_nmp == nmp)
708 rp->r_nmp = (struct nfsmount *)0;
709 /* Need to wake up any rcvlock waiters so they notice the unmount. */
710 if (nmp->nm_state & NFSSTA_WANTRCV) {
711 nmp->nm_state &= ~NFSSTA_WANTRCV;
712 wakeup(&nmp->nm_state);
713 }
714 FREE_ZONE((caddr_t)nmp, sizeof (struct nfsmount), M_NFSMNT);
715 if (error == EWOULDBLOCK)
716 error = 0;
717 return (error);
718 }
719
720 #ifndef NFS_NOSERVER
721 /*
722 * Adds a socket to the list for servicing by nfsds.
723 */
724 static int
725 nfssvc_addsock(
726 socket_t so,
727 mbuf_t mynam,
728 __unused proc_t p)
729 {
730 int siz;
731 struct nfssvc_sock *slp;
732 struct nfssvc_sock *tslp = NULL;
733 int error, sodomain, sotype, soprotocol, on = 1;
734 struct timeval timeo;
735
736 /* make sure mbuf constants are set up */
737 if (!nfs_mbuf_mlen)
738 nfs_mbuf_init();
739
740 sock_gettype(so, &sodomain, &sotype, &soprotocol);
741
742 /*
743 * Add it to the list, as required.
744 */
745 if (soprotocol == IPPROTO_UDP) {
746 tslp = nfs_udpsock;
747 if (!tslp || (tslp->ns_flag & SLP_VALID)) {
748 mbuf_freem(mynam);
749 return (EPERM);
750 }
751 #if ISO
752 } else if (soprotocol == ISOPROTO_CLTP) {
753 tslp = nfs_cltpsock;
754 if (!tslp || (tslp->ns_flag & SLP_VALID)) {
755 mbuf_freem(mynam);
756 return (EPERM);
757 }
758 #endif /* ISO */
759 }
760 /* reserve buffer space for 2 maximally-sized packets */
761 siz = NFS_MAXPACKET;
762 if (sotype == SOCK_STREAM)
763 siz += sizeof (u_long);
764 siz *= 2;
765 if (siz > NFS_MAXSOCKBUF)
766 siz = NFS_MAXSOCKBUF;
767 if ((error = sock_setsockopt(so, SOL_SOCKET, SO_SNDBUF, &siz, sizeof(siz))) ||
768 (error = sock_setsockopt(so, SOL_SOCKET, SO_RCVBUF, &siz, sizeof(siz)))) {
769 mbuf_freem(mynam);
770 return (error);
771 }
772
773 /*
774 * Set protocol specific options { for now TCP only } and
775 * reserve some space. For datagram sockets, this can get called
776 * repeatedly for the same socket, but that isn't harmful.
777 */
778 if (sotype == SOCK_STREAM) {
779 sock_setsockopt(so, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on));
780 }
781 if (sodomain == AF_INET && soprotocol == IPPROTO_TCP) {
782 sock_setsockopt(so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
783 }
784
785 sock_nointerrupt(so, 0);
786
787 timeo.tv_usec = 0;
788 timeo.tv_sec = 0;
789 error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
790 error = sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
791
792 if (tslp) {
793 slp = tslp;
794 lck_mtx_lock(nfsd_mutex);
795 } else {
796 MALLOC(slp, struct nfssvc_sock *, sizeof(struct nfssvc_sock),
797 M_NFSSVC, M_WAITOK);
798 if (!slp) {
799 mbuf_freem(mynam);
800 return (ENOMEM);
801 }
802 bzero((caddr_t)slp, sizeof (struct nfssvc_sock));
803 lck_rw_init(&slp->ns_rwlock, nfs_slp_rwlock_group, nfs_slp_lock_attr);
804 lck_mtx_init(&slp->ns_wgmutex, nfs_slp_mutex_group, nfs_slp_lock_attr);
805 TAILQ_INIT(&slp->ns_uidlruhead);
806 lck_mtx_lock(nfsd_mutex);
807 TAILQ_INSERT_TAIL(&nfssvc_sockhead, slp, ns_chain);
808 }
809
810 sock_retain(so); /* grab a retain count on the socket */
811 slp->ns_so = so;
812 slp->ns_sotype = sotype;
813 slp->ns_nam = mynam;
814
815 socket_lock(so, 1);
816 so->so_upcallarg = (caddr_t)slp;
817 so->so_upcall = nfsrv_rcv;
818 so->so_rcv.sb_flags |= SB_UPCALL; /* required for freebsd merge */
819 socket_unlock(so, 1);
820
821 slp->ns_flag = SLP_VALID | SLP_NEEDQ;
822
823 nfsrv_wakenfsd(slp);
824 lck_mtx_unlock(nfsd_mutex);
825
826 return (0);
827 }
828
829 /*
830 * Called by nfssvc() for nfsds. Just loops around servicing rpc requests
831 * until it is killed by a signal.
832 */
833 static int
834 nfssvc_nfsd(nsd, argp, p)
835 struct nfsd_srvargs *nsd;
836 user_addr_t argp;
837 proc_t p;
838 {
839 mbuf_t m, mreq;
840 struct nfssvc_sock *slp;
841 struct nfsd *nfsd = nsd->nsd_nfsd;
842 struct nfsrv_descript *nd = NULL;
843 int error = 0, cacherep, writes_todo;
844 int siz, procrastinate;
845 u_quad_t cur_usec;
846 struct timeval now;
847 boolean_t funnel_state;
848
849 #ifndef nolint
850 cacherep = RC_DOIT;
851 writes_todo = 0;
852 #endif
853 if (nfsd == (struct nfsd *)0) {
854 MALLOC(nfsd, struct nfsd *, sizeof(struct nfsd), M_NFSD, M_WAITOK);
855 if (!nfsd)
856 return (ENOMEM);
857 nsd->nsd_nfsd = nfsd;
858 bzero((caddr_t)nfsd, sizeof (struct nfsd));
859 nfsd->nfsd_procp = p;
860 lck_mtx_lock(nfsd_mutex);
861 TAILQ_INSERT_TAIL(&nfsd_head, nfsd, nfsd_chain);
862 nfs_numnfsd++;
863 lck_mtx_unlock(nfsd_mutex);
864 }
865
866 funnel_state = thread_funnel_set(kernel_flock, FALSE);
867
868 /*
869 * Loop getting rpc requests until SIGKILL.
870 */
871 for (;;) {
872 if ((nfsd->nfsd_flag & NFSD_REQINPROG) == 0) {
873 lck_mtx_lock(nfsd_mutex);
874 while ((nfsd->nfsd_slp == NULL) && !(nfsd_head_flag & NFSD_CHECKSLP)) {
875 nfsd->nfsd_flag |= NFSD_WAITING;
876 nfsd_waiting++;
877 error = msleep(nfsd, nfsd_mutex, PSOCK | PCATCH, "nfsd", 0);
878 nfsd_waiting--;
879 if (error) {
880 lck_mtx_unlock(nfsd_mutex);
881 goto done;
882 }
883 }
884 if ((nfsd->nfsd_slp == NULL) && (nfsd_head_flag & NFSD_CHECKSLP)) {
885 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) {
886 lck_rw_lock_shared(&slp->ns_rwlock);
887 if ((slp->ns_flag & (SLP_VALID | SLP_DOREC))
888 == (SLP_VALID | SLP_DOREC)) {
889 if (lck_rw_lock_shared_to_exclusive(&slp->ns_rwlock)) {
890 /* upgrade failed and we lost the lock; take exclusive and recheck */
891 lck_rw_lock_exclusive(&slp->ns_rwlock);
892 if ((slp->ns_flag & (SLP_VALID | SLP_DOREC))
893 != (SLP_VALID | SLP_DOREC)) {
894 /* flags no longer set, so skip this socket */
895 lck_rw_done(&slp->ns_rwlock);
896 continue;
897 }
898 }
899 slp->ns_flag &= ~SLP_DOREC;
900 slp->ns_sref++;
901 nfsd->nfsd_slp = slp;
902 lck_rw_done(&slp->ns_rwlock);
903 break;
904 }
905 lck_rw_done(&slp->ns_rwlock);
906 }
907 if (slp == 0)
908 nfsd_head_flag &= ~NFSD_CHECKSLP;
909 }
910 lck_mtx_unlock(nfsd_mutex);
911 if ((slp = nfsd->nfsd_slp) == NULL)
912 continue;
913 lck_rw_lock_exclusive(&slp->ns_rwlock);
914 if (slp->ns_flag & SLP_VALID) {
915 if (slp->ns_flag & SLP_DISCONN) {
916 nfsrv_zapsock(slp);
917 } else if (slp->ns_flag & SLP_NEEDQ) {
918 slp->ns_flag &= ~SLP_NEEDQ;
919 nfsrv_rcv_locked(slp->ns_so, slp, MBUF_WAITOK);
920 }
921 error = nfsrv_dorec(slp, nfsd, &nd);
922 microuptime(&now);
923 cur_usec = (u_quad_t)now.tv_sec * 1000000 +
924 (u_quad_t)now.tv_usec;
925 if (error && slp->ns_wgtime && (slp->ns_wgtime <= cur_usec)) {
926 error = 0;
927 cacherep = RC_DOIT;
928 writes_todo = 1;
929 } else
930 writes_todo = 0;
931 nfsd->nfsd_flag |= NFSD_REQINPROG;
932 }
933 lck_rw_done(&slp->ns_rwlock);
934 } else {
935 error = 0;
936 slp = nfsd->nfsd_slp;
937 }
938 if (error || (slp->ns_flag & SLP_VALID) == 0) {
939 if (nd) {
940 if (nd->nd_nam2)
941 mbuf_freem(nd->nd_nam2);
942 if (nd->nd_cr)
943 kauth_cred_rele(nd->nd_cr);
944 FREE_ZONE((caddr_t)nd,
945 sizeof *nd, M_NFSRVDESC);
946 nd = NULL;
947 }
948 nfsd->nfsd_slp = NULL;
949 nfsd->nfsd_flag &= ~NFSD_REQINPROG;
950 nfsrv_slpderef(slp);
951 continue;
952 }
953 if (nd) {
954 microuptime(&nd->nd_starttime);
955 if (nd->nd_nam2)
956 nd->nd_nam = nd->nd_nam2;
957 else
958 nd->nd_nam = slp->ns_nam;
959
960 /*
961 * Check to see if authorization is needed.
962 */
963 if (nfsd->nfsd_flag & NFSD_NEEDAUTH) {
964 nfsd->nfsd_flag &= ~NFSD_NEEDAUTH;
965 nsd->nsd_haddr = ((struct sockaddr_in *)mbuf_data(nd->nd_nam))->sin_addr.s_addr;
966 nsd->nsd_authlen = nfsd->nfsd_authlen;
967 nsd->nsd_verflen = nfsd->nfsd_verflen;
968 if (!copyout(nfsd->nfsd_authstr,CAST_USER_ADDR_T(nsd->nsd_authstr),
969 nfsd->nfsd_authlen) &&
970 !copyout(nfsd->nfsd_verfstr, CAST_USER_ADDR_T(nsd->nsd_verfstr),
971 nfsd->nfsd_verflen) &&
972 !copyout((caddr_t)nsd, argp, sizeof (*nsd))) {
973 thread_funnel_set(kernel_flock, funnel_state);
974 return (ENEEDAUTH);
975 }
976 cacherep = RC_DROPIT;
977 } else
978 cacherep = nfsrv_getcache(nd, slp, &mreq);
979
980 if (nfsd->nfsd_flag & NFSD_AUTHFAIL) {
981 nfsd->nfsd_flag &= ~NFSD_AUTHFAIL;
982 nd->nd_procnum = NFSPROC_NOOP;
983 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_TOOWEAK);
984 cacherep = RC_DOIT;
985 } else if (nfs_privport) {
986 /* Check if source port is privileged */
987 u_short port;
988 struct sockaddr *nam = mbuf_data(nd->nd_nam);
989 struct sockaddr_in *sin;
990
991 sin = (struct sockaddr_in *)nam;
992 port = ntohs(sin->sin_port);
993 if (port >= IPPORT_RESERVED &&
994 nd->nd_procnum != NFSPROC_NULL) {
995 char strbuf[MAX_IPv4_STR_LEN];
996 nd->nd_procnum = NFSPROC_NOOP;
997 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_TOOWEAK);
998 cacherep = RC_DOIT;
999 printf("NFS request from unprivileged port (%s:%d)\n",
1000 inet_ntop(AF_INET, &sin->sin_addr, strbuf, sizeof(strbuf)),
1001 port);
1002 }
1003 }
1004
1005 }
1006
1007 /*
1008 * Loop to get all the write rpc relies that have been
1009 * gathered together.
1010 */
1011 do {
1012 switch (cacherep) {
1013 case RC_DOIT:
1014 if (nd && (nd->nd_flag & ND_NFSV3))
1015 procrastinate = nfsrvw_procrastinate_v3;
1016 else
1017 procrastinate = nfsrvw_procrastinate;
1018 lck_rw_lock_shared(&nfs_export_rwlock);
1019 if (writes_todo || ((nd->nd_procnum == NFSPROC_WRITE) && (procrastinate > 0)))
1020 error = nfsrv_writegather(&nd, slp, nfsd->nfsd_procp, &mreq);
1021 else
1022 error = (*(nfsrv3_procs[nd->nd_procnum]))(nd, slp, nfsd->nfsd_procp, &mreq);
1023 lck_rw_done(&nfs_export_rwlock);
1024 if (mreq == NULL)
1025 break;
1026 if (error) {
1027 OSAddAtomic(1, (SInt32*)&nfsstats.srv_errs);
1028 nfsrv_updatecache(nd, FALSE, mreq);
1029 if (nd->nd_nam2) {
1030 mbuf_freem(nd->nd_nam2);
1031 nd->nd_nam2 = NULL;
1032 }
1033 break;
1034 }
1035 OSAddAtomic(1, (SInt32*)&nfsstats.srvrpccnt[nd->nd_procnum]);
1036 nfsrv_updatecache(nd, TRUE, mreq);
1037 nd->nd_mrep = NULL;
1038 case RC_REPLY:
1039 m = mreq;
1040 siz = 0;
1041 while (m) {
1042 siz += mbuf_len(m);
1043 m = mbuf_next(m);
1044 }
1045 if (siz <= 0 || siz > NFS_MAXPACKET) {
1046 printf("mbuf siz=%d\n",siz);
1047 panic("Bad nfs svc reply");
1048 }
1049 m = mreq;
1050 mbuf_pkthdr_setlen(m, siz);
1051 error = mbuf_pkthdr_setrcvif(m, NULL);
1052 if (error)
1053 panic("nfsd setrcvif failed: %d", error);
1054 /*
1055 * For stream protocols, prepend a Sun RPC
1056 * Record Mark.
1057 */
1058 if (slp->ns_sotype == SOCK_STREAM) {
1059 error = mbuf_prepend(&m, NFSX_UNSIGNED, MBUF_WAITOK);
1060 if (!error)
1061 *(u_long*)mbuf_data(m) = htonl(0x80000000 | siz);
1062 }
1063 if (!error) {
1064 if (slp->ns_flag & SLP_VALID) {
1065 error = nfs_send(slp->ns_so, nd->nd_nam2, m, NULL);
1066 } else {
1067 error = EPIPE;
1068 mbuf_freem(m);
1069 }
1070 } else {
1071 mbuf_freem(m);
1072 }
1073 mreq = NULL;
1074 if (nfsrtton)
1075 nfsd_rt(slp->ns_sotype, nd, cacherep);
1076 if (nd->nd_nam2) {
1077 mbuf_freem(nd->nd_nam2);
1078 nd->nd_nam2 = NULL;
1079 }
1080 if (nd->nd_mrep) {
1081 mbuf_freem(nd->nd_mrep);
1082 nd->nd_mrep = NULL;
1083 }
1084 if (error == EPIPE) {
1085 lck_rw_lock_exclusive(&slp->ns_rwlock);
1086 nfsrv_zapsock(slp);
1087 lck_rw_done(&slp->ns_rwlock);
1088 }
1089 if (error == EINTR || error == ERESTART) {
1090 if (nd->nd_cr)
1091 kauth_cred_rele(nd->nd_cr);
1092 FREE_ZONE((caddr_t)nd, sizeof *nd, M_NFSRVDESC);
1093 nfsrv_slpderef(slp);
1094 goto done;
1095 }
1096 break;
1097 case RC_DROPIT:
1098 if (nfsrtton)
1099 nfsd_rt(slp->ns_sotype, nd, cacherep);
1100 mbuf_freem(nd->nd_mrep);
1101 mbuf_freem(nd->nd_nam2);
1102 nd->nd_mrep = nd->nd_nam2 = NULL;
1103 break;
1104 };
1105 if (nd) {
1106 if (nd->nd_mrep)
1107 mbuf_freem(nd->nd_mrep);
1108 if (nd->nd_nam2)
1109 mbuf_freem(nd->nd_nam2);
1110 if (nd->nd_cr)
1111 kauth_cred_rele(nd->nd_cr);
1112 FREE_ZONE((caddr_t)nd, sizeof *nd, M_NFSRVDESC);
1113 nd = NULL;
1114 }
1115
1116 /*
1117 * Check to see if there are outstanding writes that
1118 * need to be serviced.
1119 */
1120 microuptime(&now);
1121 cur_usec = (u_quad_t)now.tv_sec * 1000000 +
1122 (u_quad_t)now.tv_usec;
1123 if (slp->ns_wgtime && (slp->ns_wgtime <= cur_usec)) {
1124 cacherep = RC_DOIT;
1125 writes_todo = 1;
1126 } else {
1127 writes_todo = 0;
1128 }
1129 } while (writes_todo);
1130 lck_rw_lock_exclusive(&slp->ns_rwlock);
1131 if (nfsrv_dorec(slp, nfsd, &nd)) {
1132 lck_rw_done(&slp->ns_rwlock);
1133 nfsd->nfsd_flag &= ~NFSD_REQINPROG;
1134 nfsd->nfsd_slp = NULL;
1135 nfsrv_slpderef(slp);
1136 } else {
1137 lck_rw_done(&slp->ns_rwlock);
1138 }
1139 }
1140 done:
1141 thread_funnel_set(kernel_flock, funnel_state);
1142 lck_mtx_lock(nfsd_mutex);
1143 TAILQ_REMOVE(&nfsd_head, nfsd, nfsd_chain);
1144 FREE(nfsd, M_NFSD);
1145 nsd->nsd_nfsd = (struct nfsd *)0;
1146 if (--nfs_numnfsd == 0)
1147 nfsrv_init(TRUE); /* Reinitialize everything */
1148 lck_mtx_unlock(nfsd_mutex);
1149 return (error);
1150 }
1151
1152 static int
1153 nfssvc_export(user_addr_t argp, proc_t p)
1154 {
1155 int error = 0, is_64bit;
1156 struct user_nfs_export_args unxa;
1157 struct vfs_context context;
1158
1159 context.vc_proc = p;
1160 context.vc_ucred = kauth_cred_get();
1161 is_64bit = IS_64BIT_PROCESS(p);
1162
1163 /* copy in pointers to path and export args */
1164 if (is_64bit) {
1165 error = copyin(argp, (caddr_t)&unxa, sizeof(unxa));
1166 } else {
1167 struct nfs_export_args tnxa;
1168 error = copyin(argp, (caddr_t)&tnxa, sizeof(tnxa));
1169 if (error == 0) {
1170 /* munge into LP64 version of nfs_export_args structure */
1171 unxa.nxa_fsid = tnxa.nxa_fsid;
1172 unxa.nxa_expid = tnxa.nxa_expid;
1173 unxa.nxa_fspath = CAST_USER_ADDR_T(tnxa.nxa_fspath);
1174 unxa.nxa_exppath = CAST_USER_ADDR_T(tnxa.nxa_exppath);
1175 unxa.nxa_flags = tnxa.nxa_flags;
1176 unxa.nxa_netcount = tnxa.nxa_netcount;
1177 unxa.nxa_nets = CAST_USER_ADDR_T(tnxa.nxa_nets);
1178 }
1179 }
1180 if (error)
1181 return (error);
1182
1183 error = nfsrv_export(&unxa, &context);
1184
1185 return (error);
1186 }
1187
1188 #endif /* NFS_NOSERVER */
1189
1190 int nfs_defect = 0;
1191 /* XXX CSM 11/25/97 Upgrade sysctl.h someday */
1192 #ifdef notyet
1193 SYSCTL_INT(_vfs_nfs, OID_AUTO, defect, CTLFLAG_RW, &nfs_defect, 0, "");
1194 #endif
1195
1196 int
1197 nfsclnt(proc_t p, struct nfsclnt_args *uap, __unused int *retval)
1198 {
1199 struct lockd_ans la;
1200 int error;
1201
1202 if (uap->flag == NFSCLNT_LOCKDWAIT) {
1203 return (nfslockdwait(p));
1204 }
1205 if (uap->flag == NFSCLNT_LOCKDANS) {
1206 error = copyin(uap->argp, &la, sizeof(la));
1207 return (error != 0 ? error : nfslockdans(p, &la));
1208 }
1209 if (uap->flag == NFSCLNT_LOCKDFD)
1210 return (nfslockdfd(p, CAST_DOWN(int, uap->argp)));
1211 return EINVAL;
1212 }
1213
1214
1215 static int nfssvc_iod_continue(int);
1216
1217 /*
1218 * Asynchronous I/O daemons for client nfs.
1219 * They do read-ahead and write-behind operations on the block I/O cache.
1220 * Never returns unless it fails or gets killed.
1221 */
1222 static int
1223 nfssvc_iod(__unused proc_t p)
1224 {
1225 register int i, myiod;
1226 struct uthread *ut;
1227
1228 /*
1229 * Assign my position or return error if too many already running
1230 */
1231 myiod = -1;
1232 for (i = 0; i < NFS_MAXASYNCDAEMON; i++)
1233 if (nfs_asyncdaemon[i] == 0) {
1234 nfs_asyncdaemon[i]++;
1235 myiod = i;
1236 break;
1237 }
1238 if (myiod == -1)
1239 return (EBUSY);
1240 nfs_numasync++;
1241
1242 /* stuff myiod into uthread to get off local stack for continuation */
1243
1244 ut = (struct uthread *)get_bsdthread_info(current_thread());
1245 ut->uu_state.uu_nfs_myiod = myiod; /* squirrel away for continuation */
1246
1247 nfssvc_iod_continue(0);
1248 /* NOTREACHED */
1249 return (0);
1250 }
1251
1252 /*
1253 * Continuation for Asynchronous I/O daemons for client nfs.
1254 */
1255 static int
1256 nfssvc_iod_continue(int error)
1257 {
1258 register struct nfsbuf *bp;
1259 register int i, myiod;
1260 struct nfsmount *nmp;
1261 struct uthread *ut;
1262 proc_t p;
1263
1264 /*
1265 * real myiod is stored in uthread, recover it
1266 */
1267 ut = (struct uthread *)get_bsdthread_info(current_thread());
1268 myiod = ut->uu_state.uu_nfs_myiod;
1269 p = current_proc(); // XXX
1270
1271 /*
1272 * Just loop around doin our stuff until SIGKILL
1273 * - actually we don't loop with continuations...
1274 */
1275 lck_mtx_lock(nfs_iod_mutex);
1276 for (;;) {
1277 while (((nmp = nfs_iodmount[myiod]) == NULL
1278 || nmp->nm_bufq.tqh_first == NULL)
1279 && error == 0 && nfs_ioddelwri == 0) {
1280 if (nmp)
1281 nmp->nm_bufqiods--;
1282 nfs_iodwant[myiod] = p; // XXX this doesn't need to be a proc_t
1283 nfs_iodmount[myiod] = NULL;
1284 error = msleep0((caddr_t)&nfs_iodwant[myiod], nfs_iod_mutex,
1285 PWAIT | PCATCH | PDROP, "nfsidl", 0, nfssvc_iod_continue);
1286 lck_mtx_lock(nfs_iod_mutex);
1287 }
1288 if (error) {
1289 nfs_asyncdaemon[myiod] = 0;
1290 if (nmp) nmp->nm_bufqiods--;
1291 nfs_iodwant[myiod] = NULL;
1292 nfs_iodmount[myiod] = NULL;
1293 lck_mtx_unlock(nfs_iod_mutex);
1294 nfs_numasync--;
1295 if (error == EINTR || error == ERESTART)
1296 error = 0;
1297 unix_syscall_return(error);
1298 }
1299 if (nmp != NULL) {
1300 while ((bp = TAILQ_FIRST(&nmp->nm_bufq)) != NULL) {
1301 /* Take one off the front of the list */
1302 TAILQ_REMOVE(&nmp->nm_bufq, bp, nb_free);
1303 bp->nb_free.tqe_next = NFSNOLIST;
1304 nmp->nm_bufqlen--;
1305 if (nmp->nm_bufqwant && nmp->nm_bufqlen < 2 * nfs_numasync) {
1306 nmp->nm_bufqwant = FALSE;
1307 lck_mtx_unlock(nfs_iod_mutex);
1308 wakeup(&nmp->nm_bufq);
1309 } else {
1310 lck_mtx_unlock(nfs_iod_mutex);
1311 }
1312
1313 SET(bp->nb_flags, NB_IOD);
1314 if (ISSET(bp->nb_flags, NB_READ))
1315 nfs_doio(bp, bp->nb_rcred, NULL);
1316 else
1317 nfs_doio(bp, bp->nb_wcred, NULL);
1318
1319 lck_mtx_lock(nfs_iod_mutex);
1320 /*
1321 * If there are more than one iod on this mount, then defect
1322 * so that the iods can be shared out fairly between the mounts
1323 */
1324 if (nfs_defect && nmp->nm_bufqiods > 1) {
1325 nfs_iodmount[myiod] = NULL;
1326 nmp->nm_bufqiods--;
1327 break;
1328 }
1329 }
1330 }
1331 lck_mtx_unlock(nfs_iod_mutex);
1332
1333 if (nfs_ioddelwri) {
1334 i = 0;
1335 nfs_ioddelwri = 0;
1336 lck_mtx_lock(nfs_buf_mutex);
1337 while (i < 8 && (bp = TAILQ_FIRST(&nfsbufdelwri)) != NULL) {
1338 struct nfsnode *np = VTONFS(bp->nb_vp);
1339 nfs_buf_remfree(bp);
1340 nfs_buf_refget(bp);
1341 while ((error = nfs_buf_acquire(bp, 0, 0, 0)) == EAGAIN);
1342 nfs_buf_refrele(bp);
1343 if (error)
1344 break;
1345 if (!bp->nb_vp) {
1346 /* buffer is no longer valid */
1347 nfs_buf_drop(bp);
1348 continue;
1349 }
1350 if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
1351 /* put buffer at end of delwri list */
1352 TAILQ_INSERT_TAIL(&nfsbufdelwri, bp, nb_free);
1353 nfsbufdelwricnt++;
1354 nfs_buf_drop(bp);
1355 lck_mtx_unlock(nfs_buf_mutex);
1356 nfs_flushcommits(np->n_vnode, NULL, 1);
1357 } else {
1358 SET(bp->nb_flags, (NB_ASYNC | NB_IOD));
1359 lck_mtx_unlock(nfs_buf_mutex);
1360 nfs_buf_write(bp);
1361 }
1362 i++;
1363 lck_mtx_lock(nfs_buf_mutex);
1364 }
1365 lck_mtx_unlock(nfs_buf_mutex);
1366 }
1367
1368 lck_mtx_lock(nfs_iod_mutex);
1369 }
1370 }
1371
1372 /*
1373 * Shut down a socket associated with an nfssvc_sock structure.
1374 * Should be called with the send lock set, if required.
1375 * The trick here is to increment the sref at the start, so that the nfsds
1376 * will stop using it and clear ns_flag at the end so that it will not be
1377 * reassigned during cleanup.
1378 */
1379 static void
1380 nfsrv_zapsock(struct nfssvc_sock *slp)
1381 {
1382 socket_t so;
1383
1384 if ((slp->ns_flag & SLP_VALID) == 0)
1385 return;
1386 slp->ns_flag &= ~SLP_ALLFLAGS;
1387
1388 so = slp->ns_so;
1389 if (so == NULL)
1390 return;
1391
1392 socket_lock(so, 1);
1393 so->so_upcall = NULL;
1394 so->so_rcv.sb_flags &= ~SB_UPCALL;
1395 socket_unlock(so, 1);
1396 sock_shutdown(so, SHUT_RDWR);
1397 }
1398
1399 /*
1400 * Get an authorization string for the uid by having the mount_nfs sitting
1401 * on this mount point porpous out of the kernel and do it.
1402 */
1403 int
1404 nfs_getauth(nmp, rep, cred, auth_str, auth_len, verf_str, verf_len, key)
1405 register struct nfsmount *nmp;
1406 struct nfsreq *rep;
1407 kauth_cred_t cred;
1408 char **auth_str;
1409 int *auth_len;
1410 char *verf_str;
1411 int *verf_len;
1412 NFSKERBKEY_T key; /* return session key */
1413 {
1414 int error = 0;
1415
1416 while ((nmp->nm_state & NFSSTA_WAITAUTH) == 0) {
1417 nmp->nm_state |= NFSSTA_WANTAUTH;
1418 (void) tsleep((caddr_t)&nmp->nm_authtype, PSOCK,
1419 "nfsauth1", 2 * hz);
1420 error = nfs_sigintr(nmp, rep, rep->r_procp);
1421 if (error) {
1422 nmp->nm_state &= ~NFSSTA_WANTAUTH;
1423 return (error);
1424 }
1425 }
1426 nmp->nm_state &= ~NFSSTA_WANTAUTH;
1427 MALLOC(*auth_str, char *, RPCAUTH_MAXSIZ, M_TEMP, M_WAITOK);
1428 if (!*auth_str)
1429 return (ENOMEM);
1430 nmp->nm_authstr = *auth_str;
1431 nmp->nm_authlen = RPCAUTH_MAXSIZ;
1432 nmp->nm_verfstr = verf_str;
1433 nmp->nm_verflen = *verf_len;
1434 nmp->nm_authuid = kauth_cred_getuid(cred);
1435 nmp->nm_state &= ~NFSSTA_WAITAUTH;
1436 wakeup((caddr_t)&nmp->nm_authstr);
1437
1438 /*
1439 * And wait for mount_nfs to do its stuff.
1440 */
1441 while ((nmp->nm_state & NFSSTA_HASAUTH) == 0 && error == 0) {
1442 (void) tsleep((caddr_t)&nmp->nm_authlen, PSOCK,
1443 "nfsauth2", 2 * hz);
1444 error = nfs_sigintr(nmp, rep, rep->r_procp);
1445 }
1446 if (nmp->nm_state & NFSSTA_AUTHERR) {
1447 nmp->nm_state &= ~NFSSTA_AUTHERR;
1448 error = EAUTH;
1449 }
1450 if (error)
1451 FREE(*auth_str, M_TEMP);
1452 else {
1453 *auth_len = nmp->nm_authlen;
1454 *verf_len = nmp->nm_verflen;
1455 bcopy((caddr_t)nmp->nm_key, (caddr_t)key, sizeof (key));
1456 }
1457 nmp->nm_state &= ~NFSSTA_HASAUTH;
1458 nmp->nm_state |= NFSSTA_WAITAUTH;
1459 if (nmp->nm_state & NFSSTA_WANTAUTH) {
1460 nmp->nm_state &= ~NFSSTA_WANTAUTH;
1461 wakeup((caddr_t)&nmp->nm_authtype);
1462 }
1463 return (error);
1464 }
1465
1466 /*
1467 * Get a nickname authenticator and verifier.
1468 */
1469 int
1470 nfs_getnickauth(
1471 struct nfsmount *nmp,
1472 kauth_cred_t cred,
1473 char **auth_str,
1474 int *auth_len,
1475 char *verf_str,
1476 __unused int verf_len)
1477 {
1478 register struct nfsuid *nuidp;
1479 register u_long *nickp, *verfp;
1480 struct timeval ktvin, ktvout, now;
1481
1482 #if DIAGNOSTIC
1483 if (verf_len < (4 * NFSX_UNSIGNED))
1484 panic("nfs_getnickauth verf too small");
1485 #endif
1486 for (nuidp = NMUIDHASH(nmp, kauth_cred_getuid(cred))->lh_first;
1487 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
1488 if (kauth_cred_getuid(nuidp->nu_cr) == kauth_cred_getuid(cred))
1489 break;
1490 }
1491 microtime(&now);
1492 if (!nuidp || nuidp->nu_expire < now.tv_sec)
1493 return (EACCES);
1494
1495 MALLOC(nickp, u_long *, 2 * NFSX_UNSIGNED, M_TEMP, M_WAITOK);
1496 if (!nickp)
1497 return (ENOMEM);
1498
1499 /*
1500 * Move to the end of the lru list (end of lru == most recently used).
1501 */
1502 TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp, nu_lru);
1503 TAILQ_INSERT_TAIL(&nmp->nm_uidlruhead, nuidp, nu_lru);
1504
1505 *nickp++ = txdr_unsigned(RPCAKN_NICKNAME);
1506 *nickp = txdr_unsigned(nuidp->nu_nickname);
1507 *auth_str = (char *)nickp;
1508 *auth_len = 2 * NFSX_UNSIGNED;
1509
1510 /*
1511 * Now we must encrypt the verifier and package it up.
1512 */
1513 verfp = (u_long *)verf_str;
1514 *verfp++ = txdr_unsigned(RPCAKN_NICKNAME);
1515 microtime(&now);
1516 if (now.tv_sec > nuidp->nu_timestamp.tv_sec ||
1517 (now.tv_sec == nuidp->nu_timestamp.tv_sec &&
1518 now.tv_usec > nuidp->nu_timestamp.tv_usec))
1519 nuidp->nu_timestamp = now;
1520 else
1521 nuidp->nu_timestamp.tv_usec++;
1522 ktvin.tv_sec = txdr_unsigned(nuidp->nu_timestamp.tv_sec);
1523 ktvin.tv_usec = txdr_unsigned(nuidp->nu_timestamp.tv_usec);
1524
1525 /*
1526 * Now encrypt the timestamp verifier in ecb mode using the session
1527 * key.
1528 */
1529 #if NFSKERB
1530 XXX
1531 #endif
1532
1533 *verfp++ = ktvout.tv_sec;
1534 *verfp++ = ktvout.tv_usec;
1535 *verfp = 0;
1536 return (0);
1537 }
1538
1539 /*
1540 * Save the current nickname in a hash list entry on the mount point.
1541 */
1542 int
1543 nfs_savenickauth(nmp, cred, len, key, mdp, dposp, mrep)
1544 register struct nfsmount *nmp;
1545 kauth_cred_t cred;
1546 int len;
1547 NFSKERBKEY_T key;
1548 mbuf_t *mdp;
1549 char **dposp;
1550 mbuf_t mrep;
1551 {
1552 register struct nfsuid *nuidp;
1553 register u_long *tl;
1554 register long t1;
1555 mbuf_t md = *mdp;
1556 struct timeval ktvin, ktvout, now;
1557 u_long nick;
1558 char *dpos = *dposp, *cp2;
1559 int deltasec, error = 0;
1560
1561 if (len == (3 * NFSX_UNSIGNED)) {
1562 nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED);
1563 ktvin.tv_sec = *tl++;
1564 ktvin.tv_usec = *tl++;
1565 nick = fxdr_unsigned(u_long, *tl);
1566
1567 /*
1568 * Decrypt the timestamp in ecb mode.
1569 */
1570 #if NFSKERB
1571 XXX
1572 #endif
1573 ktvout.tv_sec = fxdr_unsigned(long, ktvout.tv_sec);
1574 ktvout.tv_usec = fxdr_unsigned(long, ktvout.tv_usec);
1575 microtime(&now);
1576 deltasec = now.tv_sec - ktvout.tv_sec;
1577 if (deltasec < 0)
1578 deltasec = -deltasec;
1579 /*
1580 * If ok, add it to the hash list for the mount point.
1581 */
1582 if (deltasec <= NFS_KERBCLOCKSKEW) {
1583 if (nmp->nm_numuids < nuidhash_max) {
1584 nmp->nm_numuids++;
1585 MALLOC_ZONE(nuidp, struct nfsuid *,
1586 sizeof (struct nfsuid),
1587 M_NFSUID, M_WAITOK);
1588 } else {
1589 nuidp = NULL;
1590 }
1591 if (!nuidp) {
1592 nuidp = nmp->nm_uidlruhead.tqh_first;
1593 if (!nuidp) {
1594 error = ENOMEM;
1595 goto nfsmout;
1596 }
1597 LIST_REMOVE(nuidp, nu_hash);
1598 TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp, nu_lru);
1599 kauth_cred_rele(nuidp->nu_cr);
1600 }
1601 nuidp->nu_flag = 0;
1602 kauth_cred_ref(cred);
1603 nuidp->nu_cr = cred;
1604 nuidp->nu_expire = now.tv_sec + NFS_KERBTTL;
1605 nuidp->nu_timestamp = ktvout;
1606 nuidp->nu_nickname = nick;
1607 bcopy(key, nuidp->nu_key, sizeof (key));
1608 TAILQ_INSERT_TAIL(&nmp->nm_uidlruhead, nuidp, nu_lru);
1609 LIST_INSERT_HEAD(NMUIDHASH(nmp, kauth_cred_getuid(cred)),
1610 nuidp, nu_hash);
1611 }
1612 } else
1613 nfsm_adv(nfsm_rndup(len));
1614 nfsmout:
1615 *mdp = md;
1616 *dposp = dpos;
1617 return (error);
1618 }
1619
1620 #ifndef NFS_NOSERVER
1621
1622 /*
1623 * cleanup and release a server socket structure.
1624 */
1625 static void
1626 nfsrv_slpfree(struct nfssvc_sock *slp)
1627 {
1628 struct nfsuid *nuidp, *nnuidp;
1629 struct nfsrv_descript *nwp, *nnwp;
1630
1631 if (slp->ns_so) {
1632 sock_release(slp->ns_so);
1633 slp->ns_so = NULL;
1634 }
1635 if (slp->ns_nam)
1636 mbuf_free(slp->ns_nam);
1637 if (slp->ns_raw)
1638 mbuf_freem(slp->ns_raw);
1639 if (slp->ns_rec)
1640 mbuf_freem(slp->ns_rec);
1641 slp->ns_nam = slp->ns_raw = slp->ns_rec = NULL;
1642
1643 for (nuidp = slp->ns_uidlruhead.tqh_first; nuidp != 0;
1644 nuidp = nnuidp) {
1645 nnuidp = nuidp->nu_lru.tqe_next;
1646 LIST_REMOVE(nuidp, nu_hash);
1647 TAILQ_REMOVE(&slp->ns_uidlruhead, nuidp, nu_lru);
1648 if (nuidp->nu_flag & NU_NAM)
1649 mbuf_freem(nuidp->nu_nam);
1650 kauth_cred_rele(nuidp->nu_cr);
1651 FREE_ZONE((caddr_t)nuidp,
1652 sizeof (struct nfsuid), M_NFSUID);
1653 }
1654
1655 for (nwp = slp->ns_tq.lh_first; nwp; nwp = nnwp) {
1656 nnwp = nwp->nd_tq.le_next;
1657 LIST_REMOVE(nwp, nd_tq);
1658 if (nwp->nd_cr)
1659 kauth_cred_rele(nwp->nd_cr);
1660 FREE_ZONE((caddr_t)nwp, sizeof *nwp, M_NFSRVDESC);
1661 }
1662 LIST_INIT(&slp->ns_tq);
1663
1664 lck_rw_destroy(&slp->ns_rwlock, nfs_slp_rwlock_group);
1665 lck_mtx_destroy(&slp->ns_wgmutex, nfs_slp_mutex_group);
1666 FREE(slp, M_NFSSVC);
1667 }
1668
1669 /*
1670 * Derefence a server socket structure. If it has no more references and
1671 * is no longer valid, you can throw it away.
1672 */
1673 void
1674 nfsrv_slpderef(struct nfssvc_sock *slp)
1675 {
1676 lck_mtx_lock(nfsd_mutex);
1677 lck_rw_lock_exclusive(&slp->ns_rwlock);
1678 slp->ns_sref--;
1679 if (slp->ns_sref || (slp->ns_flag & SLP_VALID)) {
1680 lck_rw_done(&slp->ns_rwlock);
1681 lck_mtx_unlock(nfsd_mutex);
1682 return;
1683 }
1684
1685 TAILQ_REMOVE(&nfssvc_sockhead, slp, ns_chain);
1686 lck_mtx_unlock(nfsd_mutex);
1687
1688 nfsrv_slpfree(slp);
1689 }
1690
1691
1692 /*
1693 * Initialize the data structures for the server.
1694 * Handshake with any new nfsds starting up to avoid any chance of
1695 * corruption.
1696 */
1697 void
1698 nfsrv_init(terminating)
1699 int terminating;
1700 {
1701 struct nfssvc_sock *slp, *nslp;
1702
1703 if (terminating) {
1704 for (slp = TAILQ_FIRST(&nfssvc_sockhead); slp != 0; slp = nslp) {
1705 nslp = TAILQ_NEXT(slp, ns_chain);
1706 if (slp->ns_flag & SLP_VALID) {
1707 lck_rw_lock_exclusive(&slp->ns_rwlock);
1708 nfsrv_zapsock(slp);
1709 lck_rw_done(&slp->ns_rwlock);
1710 }
1711 TAILQ_REMOVE(&nfssvc_sockhead, slp, ns_chain);
1712 /* grab the lock one final time in case anyone's using it */
1713 lck_rw_lock_exclusive(&slp->ns_rwlock);
1714 nfsrv_slpfree(slp);
1715 }
1716 nfsrv_cleancache(); /* And clear out server cache */
1717 /* XXX Revisit when enabling WebNFS */
1718 #ifdef WEBNFS_ENABLED
1719 } else
1720 nfs_pub.np_valid = 0;
1721 #else
1722 }
1723 #endif
1724
1725 TAILQ_INIT(&nfssvc_sockhead);
1726
1727 TAILQ_INIT(&nfsd_head);
1728 nfsd_head_flag &= ~NFSD_CHECKSLP;
1729
1730 MALLOC(nfs_udpsock, struct nfssvc_sock *, sizeof(struct nfssvc_sock),
1731 M_NFSSVC, M_WAITOK);
1732 if (nfs_udpsock) {
1733 bzero((caddr_t)nfs_udpsock, sizeof (struct nfssvc_sock));
1734 lck_rw_init(&nfs_udpsock->ns_rwlock, nfs_slp_rwlock_group, nfs_slp_lock_attr);
1735 TAILQ_INIT(&nfs_udpsock->ns_uidlruhead);
1736 TAILQ_INSERT_HEAD(&nfssvc_sockhead, nfs_udpsock, ns_chain);
1737 } else {
1738 printf("nfsrv_init() failed to allocate UDP socket\n");
1739 }
1740
1741 #if ISO
1742 MALLOC(nfs_cltpsock, struct nfssvc_sock *, sizeof(struct nfssvc_sock),
1743 M_NFSSVC, M_WAITOK);
1744 if (nfs_cltpsock) {
1745 bzero((caddr_t)nfs_cltpsock, sizeof (struct nfssvc_sock));
1746 lck_rw_init(&nfs_cltpsock->ns_rwlock, nfs_slp_rwlock_group, nfs_slp_lock_attr);
1747 TAILQ_INIT(&nfs_cltpsock->ns_uidlruhead);
1748 TAILQ_INSERT_TAIL(&nfssvc_sockhead, nfs_cltpsock, ns_chain);
1749 } else {
1750 printf("nfsrv_init() failed to allocate CLTP socket\n");
1751 }
1752 #endif
1753 }
1754
1755 /*
1756 * Add entries to the server monitor log.
1757 */
1758 static void
1759 nfsd_rt(sotype, nd, cacherep)
1760 int sotype;
1761 register struct nfsrv_descript *nd;
1762 int cacherep;
1763 {
1764 register struct drt *rt;
1765 struct timeval now;
1766
1767 rt = &nfsdrt.drt[nfsdrt.pos];
1768 if (cacherep == RC_DOIT)
1769 rt->flag = 0;
1770 else if (cacherep == RC_REPLY)
1771 rt->flag = DRT_CACHEREPLY;
1772 else
1773 rt->flag = DRT_CACHEDROP;
1774 if (sotype == SOCK_STREAM)
1775 rt->flag |= DRT_TCP;
1776 else if (nd->nd_flag & ND_NFSV3)
1777 rt->flag |= DRT_NFSV3;
1778 rt->proc = nd->nd_procnum;
1779 if (((struct sockaddr *)mbuf_data(nd->nd_nam))->sa_family == AF_INET)
1780 rt->ipadr = ((struct sockaddr_in *)mbuf_data(nd->nd_nam))->sin_addr.s_addr;
1781 else
1782 rt->ipadr = INADDR_ANY;
1783 microuptime(&now);
1784 rt->resptime = ((now.tv_sec - nd->nd_starttime.tv_sec) * 1000000) +
1785 (now.tv_usec - nd->nd_starttime.tv_usec);
1786 microtime(&rt->tstamp); // XXX unused
1787 nfsdrt.pos = (nfsdrt.pos + 1) % NFSRTTLOGSIZ;
1788 }
1789 #endif /* NFS_NOSERVER */