]> git.saurik.com Git - apple/xnu.git/blob - bsd/nfs/nfs_syscalls.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / bsd / nfs / nfs_syscalls.c
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
31 /*
32 * Copyright (c) 1989, 1993
33 * The Regents of the University of California. All rights reserved.
34 *
35 * This code is derived from software contributed to Berkeley by
36 * Rick Macklem at The University of Guelph.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)nfs_syscalls.c 8.5 (Berkeley) 3/30/95
67 * FreeBSD-Id: nfs_syscalls.c,v 1.32 1997/11/07 08:53:25 phk Exp $
68 */
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 /* XXX CSM 11/25/97 FreeBSD's generated syscall prototypes */
73 #ifdef notyet
74 #include <sys/sysproto.h>
75 #endif
76 #include <sys/kernel.h>
77 #include <sys/file_internal.h>
78 #include <sys/filedesc.h>
79 #include <sys/stat.h>
80 #include <sys/vnode_internal.h>
81 #include <sys/mount_internal.h>
82 #include <sys/proc_internal.h> /* for fdflags */
83 #include <sys/kauth.h>
84 #include <sys/sysctl.h>
85 #include <sys/ubc.h>
86 #include <sys/uio.h>
87 #include <sys/malloc.h>
88 #include <sys/kpi_mbuf.h>
89 #include <sys/socket.h>
90 #include <sys/socketvar.h>
91 #include <sys/domain.h>
92 #include <sys/protosw.h>
93 #include <sys/fcntl.h>
94 #include <sys/lockf.h>
95 #include <sys/syslog.h>
96 #include <sys/user.h>
97 #include <sys/sysproto.h>
98 #include <sys/kpi_socket.h>
99 #include <libkern/OSAtomic.h>
100
101 #include <bsm/audit_kernel.h>
102
103 #include <netinet/in.h>
104 #include <netinet/tcp.h>
105 #if ISO
106 #include <netiso/iso.h>
107 #endif
108 #include <nfs/xdr_subs.h>
109 #include <nfs/rpcv2.h>
110 #include <nfs/nfsproto.h>
111 #include <nfs/nfs.h>
112 #include <nfs/nfsm_subs.h>
113 #include <nfs/nfsrvcache.h>
114 #include <nfs/nfsmount.h>
115 #include <nfs/nfsnode.h>
116 #include <nfs/nfsrtt.h>
117 #include <nfs/nfs_lock.h>
118
119 extern void unix_syscall_return(int);
120
121 /* Global defs. */
122 extern int (*nfsrv3_procs[NFS_NPROCS])(struct nfsrv_descript *nd,
123 struct nfssvc_sock *slp,
124 proc_t procp,
125 mbuf_t *mreqp);
126 extern int nfs_numasync;
127 extern int nfs_ioddelwri;
128 extern int nfsrtton;
129 extern struct nfsstats nfsstats;
130 extern int nfsrvw_procrastinate;
131 extern int nfsrvw_procrastinate_v3;
132
133 struct nfssvc_sock *nfs_udpsock, *nfs_cltpsock;
134 static int nuidhash_max = NFS_MAXUIDHASH;
135
136 static void nfsrv_zapsock(struct nfssvc_sock *slp);
137 static int nfssvc_iod(proc_t);
138 static int nfskerb_clientd(struct nfsmount *, struct nfsd_cargs *, int, user_addr_t, proc_t);
139
140 static int nfs_asyncdaemon[NFS_MAXASYNCDAEMON];
141
142 #ifndef NFS_NOSERVER
143 int nfsd_waiting = 0;
144 static struct nfsdrt nfsdrt;
145 int nfs_numnfsd = 0;
146 static void nfsd_rt(int sotype, struct nfsrv_descript *nd, int cacherep);
147 static int nfssvc_addsock(socket_t, mbuf_t, proc_t);
148 static int nfssvc_nfsd(struct nfsd_srvargs *,user_addr_t, proc_t);
149 static int nfssvc_export(user_addr_t, proc_t);
150
151 static int nfs_privport = 0;
152 /* XXX CSM 11/25/97 Upgrade sysctl.h someday */
153 #ifdef notyet
154 SYSCTL_INT(_vfs_nfs, NFS_NFSPRIVPORT, nfs_privport, CTLFLAG_RW, &nfs_privport, 0, "");
155 SYSCTL_INT(_vfs_nfs, OID_AUTO, gatherdelay, CTLFLAG_RW, &nfsrvw_procrastinate, 0, "");
156 SYSCTL_INT(_vfs_nfs, OID_AUTO, gatherdelay_v3, CTLFLAG_RW, &nfsrvw_procrastinate_v3, 0, "");
157 #endif
158
159 /*
160 * NFS server system calls
161 * getfh() lives here too, but maybe should move to kern/vfs_syscalls.c
162 */
163
164 /*
165 * Get file handle system call
166 */
167 int
168 getfh(proc_t p, struct getfh_args *uap, __unused int *retval)
169 {
170 vnode_t vp;
171 struct nfs_filehandle nfh;
172 int error;
173 struct nameidata nd;
174 struct vfs_context context;
175 char path[MAXPATHLEN], *ptr;
176 u_int pathlen;
177 struct nfs_exportfs *nxfs;
178 struct nfs_export *nx;
179
180 context.vc_proc = p;
181 context.vc_ucred = kauth_cred_get();
182
183 /*
184 * Must be super user
185 */
186 error = proc_suser(p);
187 if (error)
188 return (error);
189
190 error = copyinstr(uap->fname, path, MAXPATHLEN, (size_t *)&pathlen);
191 if (error)
192 return (error);
193
194 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
195 UIO_SYSSPACE, path, &context);
196 error = namei(&nd);
197 if (error)
198 return (error);
199 nameidone(&nd);
200
201 vp = nd.ni_vp;
202
203 // find exportfs that matches f_mntonname
204 lck_rw_lock_shared(&nfs_export_rwlock);
205 ptr = vnode_mount(vp)->mnt_vfsstat.f_mntonname;
206 LIST_FOREACH(nxfs, &nfs_exports, nxfs_next) {
207 if (!strcmp(nxfs->nxfs_path, ptr))
208 break;
209 }
210 if (!nxfs || strncmp(nxfs->nxfs_path, path, strlen(nxfs->nxfs_path))) {
211 error = EINVAL;
212 goto out;
213 }
214 // find export that best matches remainder of path
215 ptr = path + strlen(nxfs->nxfs_path);
216 while (*ptr && (*ptr == '/'))
217 ptr++;
218 LIST_FOREACH(nx, &nxfs->nxfs_exports, nx_next) {
219 int len = strlen(nx->nx_path);
220 if (len == 0) // we've hit the export entry for the root directory
221 break;
222 if (!strncmp(nx->nx_path, ptr, len))
223 break;
224 }
225 if (!nx) {
226 error = EINVAL;
227 goto out;
228 }
229
230 bzero(&nfh, sizeof(nfh));
231 nfh.nfh_xh.nxh_version = NFS_FH_VERSION;
232 nfh.nfh_xh.nxh_fsid = nxfs->nxfs_id;
233 nfh.nfh_xh.nxh_expid = nx->nx_id;
234 nfh.nfh_xh.nxh_flags = 0;
235 nfh.nfh_xh.nxh_reserved = 0;
236 nfh.nfh_len = NFS_MAX_FID_SIZE;
237 error = VFS_VPTOFH(vp, &nfh.nfh_len, &nfh.nfh_fid[0], NULL);
238 if (nfh.nfh_len > (int)NFS_MAX_FID_SIZE)
239 error = EOVERFLOW;
240 nfh.nfh_xh.nxh_fidlen = nfh.nfh_len;
241 nfh.nfh_len += sizeof(nfh.nfh_xh);
242
243 out:
244 lck_rw_done(&nfs_export_rwlock);
245 vnode_put(vp);
246 if (error)
247 return (error);
248 error = copyout((caddr_t)&nfh, uap->fhp, sizeof(nfh));
249 return (error);
250 }
251
252 #endif /* NFS_NOSERVER */
253
254 extern struct fileops vnops;
255
256 /*
257 * syscall for the rpc.lockd to use to translate a NFS file handle into
258 * an open descriptor.
259 *
260 * warning: do not remove the suser() call or this becomes one giant
261 * security hole.
262 */
263 int
264 fhopen( proc_t p,
265 struct fhopen_args *uap,
266 register_t *retval)
267 {
268 vnode_t vp;
269 struct nfs_filehandle nfh;
270 struct nfs_export *nx;
271 struct nfs_export_options *nxo;
272 struct flock lf;
273 struct fileproc *fp, *nfp;
274 int fmode, error, type;
275 int indx;
276 kauth_cred_t cred = proc_ucred(p);
277 struct vfs_context context;
278 kauth_action_t action;
279
280 context.vc_proc = p;
281 context.vc_ucred = cred;
282
283 /*
284 * Must be super user
285 */
286 error = suser(cred, 0);
287 if (error)
288 return (error);
289
290 fmode = FFLAGS(uap->flags);
291 /* why not allow a non-read/write open for our lockd? */
292 if (((fmode & (FREAD | FWRITE)) == 0) || (fmode & O_CREAT))
293 return (EINVAL);
294
295 error = copyin(uap->u_fhp, &nfh.nfh_len, sizeof(nfh.nfh_len));
296 if (error)
297 return (error);
298 if ((nfh.nfh_len < (int)sizeof(struct nfs_exphandle)) ||
299 (nfh.nfh_len > (int)NFS_MAX_FH_SIZE))
300 return (EINVAL);
301 error = copyin(uap->u_fhp, &nfh, sizeof(nfh.nfh_len) + nfh.nfh_len);
302 if (error)
303 return (error);
304
305 lck_rw_lock_shared(&nfs_export_rwlock);
306 /* now give me my vnode, it gets returned to me with a reference */
307 error = nfsrv_fhtovp(&nfh, NULL, TRUE, &vp, &nx, &nxo);
308 lck_rw_done(&nfs_export_rwlock);
309 if (error)
310 return (error);
311
312 /*
313 * From now on we have to make sure not
314 * to forget about the vnode.
315 * Any error that causes an abort must vnode_put(vp).
316 * Just set error = err and 'goto bad;'.
317 */
318
319 /*
320 * from vn_open
321 */
322 if (vnode_vtype(vp) == VSOCK) {
323 error = EOPNOTSUPP;
324 goto bad;
325 }
326
327 /* disallow write operations on directories */
328 if (vnode_isdir(vp) && (fmode & (FWRITE | O_TRUNC))) {
329 error = EISDIR;
330 goto bad;
331 }
332
333 /* compute action to be authorized */
334 action = 0;
335 if (fmode & FREAD)
336 action |= KAUTH_VNODE_READ_DATA;
337 if (fmode & (FWRITE | O_TRUNC))
338 action |= KAUTH_VNODE_WRITE_DATA;
339 if ((error = vnode_authorize(vp, NULL, action, &context)) != 0)
340 goto bad;
341
342 if ((error = VNOP_OPEN(vp, fmode, &context)))
343 goto bad;
344 if ((error = vnode_ref_ext(vp, fmode)))
345 goto bad;
346
347 /*
348 * end of vn_open code
349 */
350
351 // starting here... error paths should call vn_close/vnode_put
352 if ((error = falloc(p, &nfp, &indx)) != 0) {
353 vn_close(vp, fmode & FMASK, cred, p);
354 goto bad;
355 }
356 fp = nfp;
357
358 fp->f_fglob->fg_flag = fmode & FMASK;
359 fp->f_fglob->fg_type = DTYPE_VNODE;
360 fp->f_fglob->fg_ops = &vnops;
361 fp->f_fglob->fg_data = (caddr_t)vp;
362
363 // XXX do we really need to support this with fhopen()?
364 if (fmode & (O_EXLOCK | O_SHLOCK)) {
365 lf.l_whence = SEEK_SET;
366 lf.l_start = 0;
367 lf.l_len = 0;
368 if (fmode & O_EXLOCK)
369 lf.l_type = F_WRLCK;
370 else
371 lf.l_type = F_RDLCK;
372 type = F_FLOCK;
373 if ((fmode & FNONBLOCK) == 0)
374 type |= F_WAIT;
375 if ((error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, F_SETLK, &lf, type, &context))) {
376 vn_close(vp, fp->f_fglob->fg_flag, fp->f_fglob->fg_cred, p);
377 fp_free(p, indx, fp);
378 return (error);
379 }
380 fp->f_fglob->fg_flag |= FHASLOCK;
381 }
382
383 vnode_put(vp);
384
385 proc_fdlock(p);
386 *fdflags(p, indx) &= ~UF_RESERVED;
387 fp_drop(p, indx, fp, 1);
388 proc_fdunlock(p);
389
390 *retval = indx;
391 return (0);
392
393 bad:
394 vnode_put(vp);
395 return (error);
396 }
397
398 /*
399 * Nfs server psuedo system call for the nfsd's
400 * Based on the flag value it either:
401 * - adds a socket to the selection list
402 * - remains in the kernel as an nfsd
403 * - remains in the kernel as an nfsiod
404 */
405 int
406 nfssvc(proc_t p, struct nfssvc_args *uap, __unused int *retval)
407 {
408 #ifndef NFS_NOSERVER
409 struct nameidata nd;
410 mbuf_t nam;
411 struct user_nfsd_args user_nfsdarg;
412 struct nfsd_srvargs nfsd_srvargs, *nsd = &nfsd_srvargs;
413 struct nfsd_cargs ncd;
414 struct nfsd *nfsd;
415 struct nfssvc_sock *slp;
416 struct nfsuid *nuidp;
417 struct nfsmount *nmp;
418 struct timeval now;
419 socket_t so;
420 struct vfs_context context;
421 struct ucred temp_cred;
422 #endif /* NFS_NOSERVER */
423 int error;
424
425 AUDIT_ARG(cmd, uap->flag);
426
427 /*
428 * Must be super user
429 */
430 error = proc_suser(p);
431 if(error)
432 return (error);
433 if (uap->flag & NFSSVC_BIOD)
434 error = nfssvc_iod(p);
435 #ifdef NFS_NOSERVER
436 else
437 error = ENXIO;
438 #else /* !NFS_NOSERVER */
439 else if (uap->flag & NFSSVC_MNTD) {
440
441 context.vc_proc = p;
442 context.vc_ucred = kauth_cred_get();
443
444 error = copyin(uap->argp, (caddr_t)&ncd, sizeof (ncd));
445 if (error)
446 return (error);
447
448 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
449 (proc_is64bit(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
450 CAST_USER_ADDR_T(ncd.ncd_dirp), &context);
451 error = namei(&nd);
452 if (error)
453 return (error);
454 nameidone(&nd);
455
456 if (vnode_isvroot(nd.ni_vp) == 0)
457 error = EINVAL;
458 nmp = VFSTONFS(vnode_mount(nd.ni_vp));
459 vnode_put(nd.ni_vp);
460 if (error)
461 return (error);
462
463 if ((nmp->nm_state & NFSSTA_MNTD) &&
464 (uap->flag & NFSSVC_GOTAUTH) == 0)
465 return (0);
466 nmp->nm_state |= NFSSTA_MNTD;
467 error = nfskerb_clientd(nmp, &ncd, uap->flag, uap->argp, p);
468 } else if (uap->flag & NFSSVC_ADDSOCK) {
469 if (IS_64BIT_PROCESS(p)) {
470 error = copyin(uap->argp, (caddr_t)&user_nfsdarg, sizeof(user_nfsdarg));
471 } else {
472 struct nfsd_args tmp_args;
473 error = copyin(uap->argp, (caddr_t)&tmp_args, sizeof(tmp_args));
474 if (error == 0) {
475 user_nfsdarg.sock = tmp_args.sock;
476 user_nfsdarg.name = CAST_USER_ADDR_T(tmp_args.name);
477 user_nfsdarg.namelen = tmp_args.namelen;
478 }
479 }
480 if (error)
481 return (error);
482 /* get the socket */
483 error = file_socket(user_nfsdarg.sock, &so);
484 if (error)
485 return (error);
486 /* Get the client address for connected sockets. */
487 if (user_nfsdarg.name == USER_ADDR_NULL || user_nfsdarg.namelen == 0) {
488 nam = NULL;
489 } else {
490 error = sockargs(&nam, user_nfsdarg.name, user_nfsdarg.namelen, MBUF_TYPE_SONAME);
491 if (error) {
492 /* drop the iocount file_socket() grabbed on the file descriptor */
493 file_drop(user_nfsdarg.sock);
494 return (error);
495 }
496 }
497 /*
498 * nfssvc_addsock() will grab a retain count on the socket
499 * to keep the socket from being closed when nfsd closes its
500 * file descriptor for it.
501 */
502 error = nfssvc_addsock(so, nam, p);
503 /* drop the iocount file_socket() grabbed on the file descriptor */
504 file_drop(user_nfsdarg.sock);
505 } else if (uap->flag & NFSSVC_NFSD) {
506 error = copyin(uap->argp, (caddr_t)nsd, sizeof (*nsd));
507 if (error)
508 return (error);
509
510 if ((uap->flag & NFSSVC_AUTHIN) && ((nfsd = nsd->nsd_nfsd)) &&
511 (nfsd->nfsd_slp->ns_flag & SLP_VALID)) {
512 slp = nfsd->nfsd_slp;
513
514 /*
515 * First check to see if another nfsd has already
516 * added this credential.
517 */
518 for (nuidp = NUIDHASH(slp,nsd->nsd_cr.cr_uid)->lh_first;
519 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
520 if (kauth_cred_getuid(nuidp->nu_cr) == nsd->nsd_cr.cr_uid &&
521 (!nfsd->nfsd_nd->nd_nam2 ||
522 netaddr_match(NU_NETFAM(nuidp),
523 &nuidp->nu_haddr, nfsd->nfsd_nd->nd_nam2)))
524 break;
525 }
526 if (nuidp) {
527 nfsrv_setcred(nuidp->nu_cr,nfsd->nfsd_nd->nd_cr);
528 nfsd->nfsd_nd->nd_flag |= ND_KERBFULL;
529 } else {
530 /*
531 * Nope, so we will.
532 */
533 if (slp->ns_numuids < nuidhash_max) {
534 slp->ns_numuids++;
535 nuidp = (struct nfsuid *)
536 _MALLOC_ZONE(sizeof (struct nfsuid),
537 M_NFSUID, M_WAITOK);
538 } else
539 nuidp = (struct nfsuid *)0;
540 if ((slp->ns_flag & SLP_VALID) == 0) {
541 if (nuidp) {
542 FREE_ZONE((caddr_t)nuidp,
543 sizeof (struct nfsuid), M_NFSUID);
544 slp->ns_numuids--;
545 }
546 } else {
547 if (nuidp == (struct nfsuid *)0) {
548 nuidp = slp->ns_uidlruhead.tqh_first;
549 if (!nuidp)
550 return (ENOMEM);
551 LIST_REMOVE(nuidp, nu_hash);
552 TAILQ_REMOVE(&slp->ns_uidlruhead, nuidp,
553 nu_lru);
554 if (nuidp->nu_flag & NU_NAM)
555 mbuf_freem(nuidp->nu_nam);
556 kauth_cred_rele(nuidp->nu_cr);
557 }
558 nuidp->nu_flag = 0;
559
560 if (nsd->nsd_cr.cr_ngroups > NGROUPS)
561 nsd->nsd_cr.cr_ngroups = NGROUPS;
562
563 nfsrv_setcred(&nsd->nsd_cr, &temp_cred);
564 nuidp->nu_cr = kauth_cred_create(&temp_cred);
565
566 if (!nuidp->nu_cr) {
567 FREE_ZONE(nuidp, sizeof(struct nfsuid), M_NFSUID);
568 slp->ns_numuids--;
569 return (ENOMEM);
570 }
571 nuidp->nu_timestamp = nsd->nsd_timestamp;
572 microtime(&now);
573 nuidp->nu_expire = now.tv_sec + nsd->nsd_ttl;
574 /*
575 * and save the session key in nu_key.
576 */
577 bcopy(nsd->nsd_key, nuidp->nu_key,
578 sizeof (nsd->nsd_key));
579 if (nfsd->nfsd_nd->nd_nam2) {
580 struct sockaddr_in *saddr;
581
582 saddr = mbuf_data(nfsd->nfsd_nd->nd_nam2);
583 switch (saddr->sin_family) {
584 case AF_INET:
585 nuidp->nu_flag |= NU_INETADDR;
586 nuidp->nu_inetaddr =
587 saddr->sin_addr.s_addr;
588 break;
589 case AF_ISO:
590 default:
591 nuidp->nu_flag |= NU_NAM;
592 error = mbuf_copym(nfsd->nfsd_nd->nd_nam2, 0,
593 MBUF_COPYALL, MBUF_WAITOK,
594 &nuidp->nu_nam);
595 if (error) {
596 kauth_cred_rele(nuidp->nu_cr);
597 FREE_ZONE(nuidp, sizeof(struct nfsuid), M_NFSUID);
598 slp->ns_numuids--;
599 return (error);
600 }
601 break;
602 };
603 }
604 TAILQ_INSERT_TAIL(&slp->ns_uidlruhead, nuidp,
605 nu_lru);
606 LIST_INSERT_HEAD(NUIDHASH(slp, nsd->nsd_uid),
607 nuidp, nu_hash);
608 nfsrv_setcred(nuidp->nu_cr,
609 nfsd->nfsd_nd->nd_cr);
610 nfsd->nfsd_nd->nd_flag |= ND_KERBFULL;
611 }
612 }
613 }
614 if ((uap->flag & NFSSVC_AUTHINFAIL) && (nfsd = nsd->nsd_nfsd))
615 nfsd->nfsd_flag |= NFSD_AUTHFAIL;
616 error = nfssvc_nfsd(nsd, uap->argp, p);
617 } else if (uap->flag & NFSSVC_EXPORT) {
618 error = nfssvc_export(uap->argp, p);
619 } else {
620 error = EINVAL;
621 }
622 #endif /* NFS_NOSERVER */
623 if (error == EINTR || error == ERESTART)
624 error = 0;
625 return (error);
626 }
627
628 /*
629 * NFSKERB client helper daemon.
630 * Gets authorization strings for "kerb" mounts.
631 */
632 static int
633 nfskerb_clientd(
634 struct nfsmount *nmp,
635 struct nfsd_cargs *ncd,
636 int flag,
637 user_addr_t argp,
638 proc_t p)
639 {
640 struct nfsuid *nuidp, *nnuidp;
641 int error = 0;
642 struct nfsreq *rp;
643 struct timeval now;
644
645 /*
646 * First initialize some variables
647 */
648 microtime(&now);
649
650 /*
651 * If an authorization string is being passed in, get it.
652 */
653 if ((flag & NFSSVC_GOTAUTH) && (nmp->nm_state & NFSSTA_MOUNTED) &&
654 ((nmp->nm_state & NFSSTA_WAITAUTH) == 0)) {
655 if (nmp->nm_state & NFSSTA_HASAUTH)
656 panic("cld kerb");
657 if ((flag & NFSSVC_AUTHINFAIL) == 0) {
658 if (ncd->ncd_authlen <= nmp->nm_authlen &&
659 ncd->ncd_verflen <= nmp->nm_verflen &&
660 !copyin(CAST_USER_ADDR_T(ncd->ncd_authstr),nmp->nm_authstr,ncd->ncd_authlen)&&
661 !copyin(CAST_USER_ADDR_T(ncd->ncd_verfstr),nmp->nm_verfstr,ncd->ncd_verflen)){
662 nmp->nm_authtype = ncd->ncd_authtype;
663 nmp->nm_authlen = ncd->ncd_authlen;
664 nmp->nm_verflen = ncd->ncd_verflen;
665 #if NFSKERB
666 nmp->nm_key = ncd->ncd_key;
667 #endif
668 } else
669 nmp->nm_state |= NFSSTA_AUTHERR;
670 } else
671 nmp->nm_state |= NFSSTA_AUTHERR;
672 nmp->nm_state |= NFSSTA_HASAUTH;
673 wakeup((caddr_t)&nmp->nm_authlen);
674 } else {
675 nmp->nm_state |= NFSSTA_WAITAUTH;
676 }
677
678 /*
679 * Loop every second updating queue until there is a termination sig.
680 */
681 while (nmp->nm_state & NFSSTA_MOUNTED) {
682 /* Get an authorization string, if required. */
683 if ((nmp->nm_state & (NFSSTA_WAITAUTH | NFSSTA_HASAUTH)) == 0) {
684 ncd->ncd_authuid = nmp->nm_authuid;
685 if (copyout((caddr_t)ncd, argp, sizeof (struct nfsd_cargs)))
686 nmp->nm_state |= NFSSTA_WAITAUTH;
687 else
688 return (ENEEDAUTH);
689 }
690 /* Wait a bit (no pun) and do it again. */
691 if ((nmp->nm_state & NFSSTA_MOUNTED) &&
692 (nmp->nm_state & (NFSSTA_WAITAUTH | NFSSTA_HASAUTH))) {
693 error = tsleep((caddr_t)&nmp->nm_authstr, PSOCK | PCATCH,
694 "nfskrbtimr", hz / 3);
695 if (error == EINTR || error == ERESTART)
696 dounmount(nmp->nm_mountp, 0, p);
697 }
698 }
699
700 /*
701 * Finally, we can free up the mount structure.
702 */
703 for (nuidp = nmp->nm_uidlruhead.tqh_first; nuidp != 0; nuidp = nnuidp) {
704 nnuidp = nuidp->nu_lru.tqe_next;
705 LIST_REMOVE(nuidp, nu_hash);
706 TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp, nu_lru);
707 kauth_cred_rele(nuidp->nu_cr);
708 FREE_ZONE((caddr_t)nuidp, sizeof (struct nfsuid), M_NFSUID);
709 }
710 /*
711 * Loop through outstanding request list and remove dangling
712 * references to defunct nfsmount struct
713 */
714 for (rp = nfs_reqq.tqh_first; rp; rp = rp->r_chain.tqe_next)
715 if (rp->r_nmp == nmp)
716 rp->r_nmp = (struct nfsmount *)0;
717 /* Need to wake up any rcvlock waiters so they notice the unmount. */
718 if (nmp->nm_state & NFSSTA_WANTRCV) {
719 nmp->nm_state &= ~NFSSTA_WANTRCV;
720 wakeup(&nmp->nm_state);
721 }
722 FREE_ZONE((caddr_t)nmp, sizeof (struct nfsmount), M_NFSMNT);
723 if (error == EWOULDBLOCK)
724 error = 0;
725 return (error);
726 }
727
728 #ifndef NFS_NOSERVER
729 /*
730 * Adds a socket to the list for servicing by nfsds.
731 */
732 static int
733 nfssvc_addsock(
734 socket_t so,
735 mbuf_t mynam,
736 __unused proc_t p)
737 {
738 int siz;
739 struct nfssvc_sock *slp;
740 struct nfssvc_sock *tslp = NULL;
741 int error, sodomain, sotype, soprotocol, on = 1;
742 struct timeval timeo;
743
744 /* make sure mbuf constants are set up */
745 if (!nfs_mbuf_mlen)
746 nfs_mbuf_init();
747
748 sock_gettype(so, &sodomain, &sotype, &soprotocol);
749
750 /*
751 * Add it to the list, as required.
752 */
753 if (soprotocol == IPPROTO_UDP) {
754 tslp = nfs_udpsock;
755 if (!tslp || (tslp->ns_flag & SLP_VALID)) {
756 mbuf_freem(mynam);
757 return (EPERM);
758 }
759 #if ISO
760 } else if (soprotocol == ISOPROTO_CLTP) {
761 tslp = nfs_cltpsock;
762 if (!tslp || (tslp->ns_flag & SLP_VALID)) {
763 mbuf_freem(mynam);
764 return (EPERM);
765 }
766 #endif /* ISO */
767 }
768 /* reserve buffer space for 2 maximally-sized packets */
769 siz = NFS_MAXPACKET;
770 if (sotype == SOCK_STREAM)
771 siz += sizeof (u_long);
772 siz *= 2;
773 if (siz > NFS_MAXSOCKBUF)
774 siz = NFS_MAXSOCKBUF;
775 if ((error = sock_setsockopt(so, SOL_SOCKET, SO_SNDBUF, &siz, sizeof(siz))) ||
776 (error = sock_setsockopt(so, SOL_SOCKET, SO_RCVBUF, &siz, sizeof(siz)))) {
777 mbuf_freem(mynam);
778 return (error);
779 }
780
781 /*
782 * Set protocol specific options { for now TCP only } and
783 * reserve some space. For datagram sockets, this can get called
784 * repeatedly for the same socket, but that isn't harmful.
785 */
786 if (sotype == SOCK_STREAM) {
787 sock_setsockopt(so, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on));
788 }
789 if (sodomain == AF_INET && soprotocol == IPPROTO_TCP) {
790 sock_setsockopt(so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
791 }
792
793 sock_nointerrupt(so, 0);
794
795 timeo.tv_usec = 0;
796 timeo.tv_sec = 0;
797 error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
798 error = sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
799
800 if (tslp) {
801 slp = tslp;
802 lck_mtx_lock(nfsd_mutex);
803 } else {
804 MALLOC(slp, struct nfssvc_sock *, sizeof(struct nfssvc_sock),
805 M_NFSSVC, M_WAITOK);
806 if (!slp) {
807 mbuf_freem(mynam);
808 return (ENOMEM);
809 }
810 bzero((caddr_t)slp, sizeof (struct nfssvc_sock));
811 lck_rw_init(&slp->ns_rwlock, nfs_slp_rwlock_group, nfs_slp_lock_attr);
812 lck_mtx_init(&slp->ns_wgmutex, nfs_slp_mutex_group, nfs_slp_lock_attr);
813 TAILQ_INIT(&slp->ns_uidlruhead);
814 lck_mtx_lock(nfsd_mutex);
815 TAILQ_INSERT_TAIL(&nfssvc_sockhead, slp, ns_chain);
816 }
817
818 sock_retain(so); /* grab a retain count on the socket */
819 slp->ns_so = so;
820 slp->ns_sotype = sotype;
821 slp->ns_nam = mynam;
822
823 socket_lock(so, 1);
824 so->so_upcallarg = (caddr_t)slp;
825 so->so_upcall = nfsrv_rcv;
826 so->so_rcv.sb_flags |= SB_UPCALL; /* required for freebsd merge */
827 socket_unlock(so, 1);
828
829 slp->ns_flag = SLP_VALID | SLP_NEEDQ;
830
831 nfsrv_wakenfsd(slp);
832 lck_mtx_unlock(nfsd_mutex);
833
834 return (0);
835 }
836
837 /*
838 * Called by nfssvc() for nfsds. Just loops around servicing rpc requests
839 * until it is killed by a signal.
840 */
841 static int
842 nfssvc_nfsd(nsd, argp, p)
843 struct nfsd_srvargs *nsd;
844 user_addr_t argp;
845 proc_t p;
846 {
847 mbuf_t m, mreq;
848 struct nfssvc_sock *slp;
849 struct nfsd *nfsd = nsd->nsd_nfsd;
850 struct nfsrv_descript *nd = NULL;
851 int error = 0, cacherep, writes_todo;
852 int siz, procrastinate;
853 u_quad_t cur_usec;
854 struct timeval now;
855 boolean_t funnel_state;
856
857 #ifndef nolint
858 cacherep = RC_DOIT;
859 writes_todo = 0;
860 #endif
861 if (nfsd == (struct nfsd *)0) {
862 MALLOC(nfsd, struct nfsd *, sizeof(struct nfsd), M_NFSD, M_WAITOK);
863 if (!nfsd)
864 return (ENOMEM);
865 nsd->nsd_nfsd = nfsd;
866 bzero((caddr_t)nfsd, sizeof (struct nfsd));
867 nfsd->nfsd_procp = p;
868 lck_mtx_lock(nfsd_mutex);
869 TAILQ_INSERT_TAIL(&nfsd_head, nfsd, nfsd_chain);
870 nfs_numnfsd++;
871 lck_mtx_unlock(nfsd_mutex);
872 }
873
874 funnel_state = thread_funnel_set(kernel_flock, FALSE);
875
876 /*
877 * Loop getting rpc requests until SIGKILL.
878 */
879 for (;;) {
880 if ((nfsd->nfsd_flag & NFSD_REQINPROG) == 0) {
881 lck_mtx_lock(nfsd_mutex);
882 while ((nfsd->nfsd_slp == NULL) && !(nfsd_head_flag & NFSD_CHECKSLP)) {
883 nfsd->nfsd_flag |= NFSD_WAITING;
884 nfsd_waiting++;
885 error = msleep(nfsd, nfsd_mutex, PSOCK | PCATCH, "nfsd", 0);
886 nfsd_waiting--;
887 if (error) {
888 lck_mtx_unlock(nfsd_mutex);
889 goto done;
890 }
891 }
892 if ((nfsd->nfsd_slp == NULL) && (nfsd_head_flag & NFSD_CHECKSLP)) {
893 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) {
894 lck_rw_lock_shared(&slp->ns_rwlock);
895 if ((slp->ns_flag & (SLP_VALID | SLP_DOREC))
896 == (SLP_VALID | SLP_DOREC)) {
897 if (lck_rw_lock_shared_to_exclusive(&slp->ns_rwlock)) {
898 /* upgrade failed and we lost the lock; take exclusive and recheck */
899 lck_rw_lock_exclusive(&slp->ns_rwlock);
900 if ((slp->ns_flag & (SLP_VALID | SLP_DOREC))
901 != (SLP_VALID | SLP_DOREC)) {
902 /* flags no longer set, so skip this socket */
903 lck_rw_done(&slp->ns_rwlock);
904 continue;
905 }
906 }
907 slp->ns_flag &= ~SLP_DOREC;
908 slp->ns_sref++;
909 nfsd->nfsd_slp = slp;
910 lck_rw_done(&slp->ns_rwlock);
911 break;
912 }
913 lck_rw_done(&slp->ns_rwlock);
914 }
915 if (slp == 0)
916 nfsd_head_flag &= ~NFSD_CHECKSLP;
917 }
918 lck_mtx_unlock(nfsd_mutex);
919 if ((slp = nfsd->nfsd_slp) == NULL)
920 continue;
921 lck_rw_lock_exclusive(&slp->ns_rwlock);
922 if (slp->ns_flag & SLP_VALID) {
923 if ((slp->ns_flag & (SLP_NEEDQ|SLP_DISCONN)) == SLP_NEEDQ) {
924 slp->ns_flag &= ~SLP_NEEDQ;
925 nfsrv_rcv_locked(slp->ns_so, slp, MBUF_WAITOK);
926 }
927 if (slp->ns_flag & SLP_DISCONN)
928 nfsrv_zapsock(slp);
929 error = nfsrv_dorec(slp, nfsd, &nd);
930 microuptime(&now);
931 cur_usec = (u_quad_t)now.tv_sec * 1000000 +
932 (u_quad_t)now.tv_usec;
933 if (error && slp->ns_wgtime && (slp->ns_wgtime <= cur_usec)) {
934 error = 0;
935 cacherep = RC_DOIT;
936 writes_todo = 1;
937 } else
938 writes_todo = 0;
939 nfsd->nfsd_flag |= NFSD_REQINPROG;
940 }
941 lck_rw_done(&slp->ns_rwlock);
942 } else {
943 error = 0;
944 slp = nfsd->nfsd_slp;
945 }
946 if (error || (slp->ns_flag & SLP_VALID) == 0) {
947 if (nd) {
948 if (nd->nd_nam2)
949 mbuf_freem(nd->nd_nam2);
950 if (nd->nd_cr)
951 kauth_cred_rele(nd->nd_cr);
952 FREE_ZONE((caddr_t)nd,
953 sizeof *nd, M_NFSRVDESC);
954 nd = NULL;
955 }
956 nfsd->nfsd_slp = NULL;
957 nfsd->nfsd_flag &= ~NFSD_REQINPROG;
958 nfsrv_slpderef(slp);
959 continue;
960 }
961 if (nd) {
962 microuptime(&nd->nd_starttime);
963 if (nd->nd_nam2)
964 nd->nd_nam = nd->nd_nam2;
965 else
966 nd->nd_nam = slp->ns_nam;
967
968 /*
969 * Check to see if authorization is needed.
970 */
971 if (nfsd->nfsd_flag & NFSD_NEEDAUTH) {
972 nfsd->nfsd_flag &= ~NFSD_NEEDAUTH;
973 nsd->nsd_haddr = ((struct sockaddr_in *)mbuf_data(nd->nd_nam))->sin_addr.s_addr;
974 nsd->nsd_authlen = nfsd->nfsd_authlen;
975 nsd->nsd_verflen = nfsd->nfsd_verflen;
976 if (!copyout(nfsd->nfsd_authstr,CAST_USER_ADDR_T(nsd->nsd_authstr),
977 nfsd->nfsd_authlen) &&
978 !copyout(nfsd->nfsd_verfstr, CAST_USER_ADDR_T(nsd->nsd_verfstr),
979 nfsd->nfsd_verflen) &&
980 !copyout((caddr_t)nsd, argp, sizeof (*nsd))) {
981 thread_funnel_set(kernel_flock, funnel_state);
982 return (ENEEDAUTH);
983 }
984 cacherep = RC_DROPIT;
985 } else
986 cacherep = nfsrv_getcache(nd, slp, &mreq);
987
988 if (nfsd->nfsd_flag & NFSD_AUTHFAIL) {
989 nfsd->nfsd_flag &= ~NFSD_AUTHFAIL;
990 nd->nd_procnum = NFSPROC_NOOP;
991 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_TOOWEAK);
992 cacherep = RC_DOIT;
993 } else if (nfs_privport) {
994 /* Check if source port is privileged */
995 u_short port;
996 struct sockaddr *nam = mbuf_data(nd->nd_nam);
997 struct sockaddr_in *sin;
998
999 sin = (struct sockaddr_in *)nam;
1000 port = ntohs(sin->sin_port);
1001 if (port >= IPPORT_RESERVED &&
1002 nd->nd_procnum != NFSPROC_NULL) {
1003 char strbuf[MAX_IPv4_STR_LEN];
1004 nd->nd_procnum = NFSPROC_NOOP;
1005 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_TOOWEAK);
1006 cacherep = RC_DOIT;
1007 printf("NFS request from unprivileged port (%s:%d)\n",
1008 inet_ntop(AF_INET, &sin->sin_addr, strbuf, sizeof(strbuf)),
1009 port);
1010 }
1011 }
1012
1013 }
1014
1015 /*
1016 * Loop to get all the write rpc relies that have been
1017 * gathered together.
1018 */
1019 do {
1020 switch (cacherep) {
1021 case RC_DOIT:
1022 if (nd && (nd->nd_flag & ND_NFSV3))
1023 procrastinate = nfsrvw_procrastinate_v3;
1024 else
1025 procrastinate = nfsrvw_procrastinate;
1026 lck_rw_lock_shared(&nfs_export_rwlock);
1027 if (writes_todo || ((nd->nd_procnum == NFSPROC_WRITE) && (procrastinate > 0)))
1028 error = nfsrv_writegather(&nd, slp, nfsd->nfsd_procp, &mreq);
1029 else
1030 error = (*(nfsrv3_procs[nd->nd_procnum]))(nd, slp, nfsd->nfsd_procp, &mreq);
1031 lck_rw_done(&nfs_export_rwlock);
1032 if (mreq == NULL)
1033 break;
1034 if (error) {
1035 OSAddAtomic(1, (SInt32*)&nfsstats.srv_errs);
1036 nfsrv_updatecache(nd, FALSE, mreq);
1037 if (nd->nd_nam2) {
1038 mbuf_freem(nd->nd_nam2);
1039 nd->nd_nam2 = NULL;
1040 }
1041 break;
1042 }
1043 OSAddAtomic(1, (SInt32*)&nfsstats.srvrpccnt[nd->nd_procnum]);
1044 nfsrv_updatecache(nd, TRUE, mreq);
1045 nd->nd_mrep = NULL;
1046 case RC_REPLY:
1047 m = mreq;
1048 siz = 0;
1049 while (m) {
1050 siz += mbuf_len(m);
1051 m = mbuf_next(m);
1052 }
1053 if (siz <= 0 || siz > NFS_MAXPACKET) {
1054 printf("mbuf siz=%d\n",siz);
1055 panic("Bad nfs svc reply");
1056 }
1057 m = mreq;
1058 mbuf_pkthdr_setlen(m, siz);
1059 error = mbuf_pkthdr_setrcvif(m, NULL);
1060 if (error)
1061 panic("nfsd setrcvif failed: %d", error);
1062 /*
1063 * For stream protocols, prepend a Sun RPC
1064 * Record Mark.
1065 */
1066 if (slp->ns_sotype == SOCK_STREAM) {
1067 error = mbuf_prepend(&m, NFSX_UNSIGNED, MBUF_WAITOK);
1068 if (!error)
1069 *(u_long*)mbuf_data(m) = htonl(0x80000000 | siz);
1070 }
1071 if (!error) {
1072 if (slp->ns_flag & SLP_VALID) {
1073 error = nfs_send(slp->ns_so, nd->nd_nam2, m, NULL);
1074 } else {
1075 error = EPIPE;
1076 mbuf_freem(m);
1077 }
1078 } else {
1079 mbuf_freem(m);
1080 }
1081 mreq = NULL;
1082 if (nfsrtton)
1083 nfsd_rt(slp->ns_sotype, nd, cacherep);
1084 if (nd->nd_nam2) {
1085 mbuf_freem(nd->nd_nam2);
1086 nd->nd_nam2 = NULL;
1087 }
1088 if (nd->nd_mrep) {
1089 mbuf_freem(nd->nd_mrep);
1090 nd->nd_mrep = NULL;
1091 }
1092 if (error == EPIPE) {
1093 lck_rw_lock_exclusive(&slp->ns_rwlock);
1094 nfsrv_zapsock(slp);
1095 lck_rw_done(&slp->ns_rwlock);
1096 }
1097 if (error == EINTR || error == ERESTART) {
1098 if (nd->nd_cr)
1099 kauth_cred_rele(nd->nd_cr);
1100 FREE_ZONE((caddr_t)nd, sizeof *nd, M_NFSRVDESC);
1101 nfsrv_slpderef(slp);
1102 goto done;
1103 }
1104 break;
1105 case RC_DROPIT:
1106 if (nfsrtton)
1107 nfsd_rt(slp->ns_sotype, nd, cacherep);
1108 mbuf_freem(nd->nd_mrep);
1109 mbuf_freem(nd->nd_nam2);
1110 nd->nd_mrep = nd->nd_nam2 = NULL;
1111 break;
1112 };
1113 if (nd) {
1114 if (nd->nd_mrep)
1115 mbuf_freem(nd->nd_mrep);
1116 if (nd->nd_nam2)
1117 mbuf_freem(nd->nd_nam2);
1118 if (nd->nd_cr)
1119 kauth_cred_rele(nd->nd_cr);
1120 FREE_ZONE((caddr_t)nd, sizeof *nd, M_NFSRVDESC);
1121 nd = NULL;
1122 }
1123
1124 /*
1125 * Check to see if there are outstanding writes that
1126 * need to be serviced.
1127 */
1128 microuptime(&now);
1129 cur_usec = (u_quad_t)now.tv_sec * 1000000 +
1130 (u_quad_t)now.tv_usec;
1131 if (slp->ns_wgtime && (slp->ns_wgtime <= cur_usec)) {
1132 cacherep = RC_DOIT;
1133 writes_todo = 1;
1134 } else {
1135 writes_todo = 0;
1136 }
1137 } while (writes_todo);
1138 lck_rw_lock_exclusive(&slp->ns_rwlock);
1139 if (nfsrv_dorec(slp, nfsd, &nd)) {
1140 lck_rw_done(&slp->ns_rwlock);
1141 nfsd->nfsd_flag &= ~NFSD_REQINPROG;
1142 nfsd->nfsd_slp = NULL;
1143 nfsrv_slpderef(slp);
1144 } else {
1145 lck_rw_done(&slp->ns_rwlock);
1146 }
1147 }
1148 done:
1149 thread_funnel_set(kernel_flock, funnel_state);
1150 lck_mtx_lock(nfsd_mutex);
1151 TAILQ_REMOVE(&nfsd_head, nfsd, nfsd_chain);
1152 FREE(nfsd, M_NFSD);
1153 nsd->nsd_nfsd = (struct nfsd *)0;
1154 if (--nfs_numnfsd == 0)
1155 nfsrv_init(TRUE); /* Reinitialize everything */
1156 lck_mtx_unlock(nfsd_mutex);
1157 return (error);
1158 }
1159
1160 static int
1161 nfssvc_export(user_addr_t argp, proc_t p)
1162 {
1163 int error = 0, is_64bit;
1164 struct user_nfs_export_args unxa;
1165 struct vfs_context context;
1166
1167 context.vc_proc = p;
1168 context.vc_ucred = kauth_cred_get();
1169 is_64bit = IS_64BIT_PROCESS(p);
1170
1171 /* copy in pointers to path and export args */
1172 if (is_64bit) {
1173 error = copyin(argp, (caddr_t)&unxa, sizeof(unxa));
1174 } else {
1175 struct nfs_export_args tnxa;
1176 error = copyin(argp, (caddr_t)&tnxa, sizeof(tnxa));
1177 if (error == 0) {
1178 /* munge into LP64 version of nfs_export_args structure */
1179 unxa.nxa_fsid = tnxa.nxa_fsid;
1180 unxa.nxa_expid = tnxa.nxa_expid;
1181 unxa.nxa_fspath = CAST_USER_ADDR_T(tnxa.nxa_fspath);
1182 unxa.nxa_exppath = CAST_USER_ADDR_T(tnxa.nxa_exppath);
1183 unxa.nxa_flags = tnxa.nxa_flags;
1184 unxa.nxa_netcount = tnxa.nxa_netcount;
1185 unxa.nxa_nets = CAST_USER_ADDR_T(tnxa.nxa_nets);
1186 }
1187 }
1188 if (error)
1189 return (error);
1190
1191 error = nfsrv_export(&unxa, &context);
1192
1193 return (error);
1194 }
1195
1196 #endif /* NFS_NOSERVER */
1197
1198 int nfs_defect = 0;
1199 /* XXX CSM 11/25/97 Upgrade sysctl.h someday */
1200 #ifdef notyet
1201 SYSCTL_INT(_vfs_nfs, OID_AUTO, defect, CTLFLAG_RW, &nfs_defect, 0, "");
1202 #endif
1203
1204 int
1205 nfsclnt(proc_t p, struct nfsclnt_args *uap, __unused int *retval)
1206 {
1207 struct lockd_ans la;
1208 int error;
1209
1210 if (uap->flag == NFSCLNT_LOCKDWAIT) {
1211 return (nfslockdwait(p));
1212 }
1213 if (uap->flag == NFSCLNT_LOCKDANS) {
1214 error = copyin(uap->argp, &la, sizeof(la));
1215 return (error != 0 ? error : nfslockdans(p, &la));
1216 }
1217 if (uap->flag == NFSCLNT_LOCKDFD)
1218 return (nfslockdfd(p, CAST_DOWN(int, uap->argp)));
1219 return EINVAL;
1220 }
1221
1222
1223 static int nfssvc_iod_continue(int);
1224
1225 /*
1226 * Asynchronous I/O daemons for client nfs.
1227 * They do read-ahead and write-behind operations on the block I/O cache.
1228 * Never returns unless it fails or gets killed.
1229 */
1230 static int
1231 nfssvc_iod(__unused proc_t p)
1232 {
1233 register int i, myiod;
1234 struct uthread *ut;
1235
1236 /*
1237 * Assign my position or return error if too many already running
1238 */
1239 myiod = -1;
1240 for (i = 0; i < NFS_MAXASYNCDAEMON; i++)
1241 if (nfs_asyncdaemon[i] == 0) {
1242 nfs_asyncdaemon[i]++;
1243 myiod = i;
1244 break;
1245 }
1246 if (myiod == -1)
1247 return (EBUSY);
1248 nfs_numasync++;
1249
1250 /* stuff myiod into uthread to get off local stack for continuation */
1251
1252 ut = (struct uthread *)get_bsdthread_info(current_thread());
1253 ut->uu_state.uu_nfs_myiod = myiod; /* squirrel away for continuation */
1254
1255 nfssvc_iod_continue(0);
1256 /* NOTREACHED */
1257 return (0);
1258 }
1259
1260 /*
1261 * Continuation for Asynchronous I/O daemons for client nfs.
1262 */
1263 static int
1264 nfssvc_iod_continue(int error)
1265 {
1266 register struct nfsbuf *bp;
1267 register int i, myiod;
1268 struct nfsmount *nmp;
1269 struct uthread *ut;
1270 proc_t p;
1271
1272 /*
1273 * real myiod is stored in uthread, recover it
1274 */
1275 ut = (struct uthread *)get_bsdthread_info(current_thread());
1276 myiod = ut->uu_state.uu_nfs_myiod;
1277 p = current_proc(); // XXX
1278
1279 /*
1280 * Just loop around doin our stuff until SIGKILL
1281 * - actually we don't loop with continuations...
1282 */
1283 lck_mtx_lock(nfs_iod_mutex);
1284 for (;;) {
1285 while (((nmp = nfs_iodmount[myiod]) == NULL
1286 || nmp->nm_bufq.tqh_first == NULL)
1287 && error == 0 && nfs_ioddelwri == 0) {
1288 if (nmp)
1289 nmp->nm_bufqiods--;
1290 nfs_iodwant[myiod] = p; // XXX this doesn't need to be a proc_t
1291 nfs_iodmount[myiod] = NULL;
1292 error = msleep0((caddr_t)&nfs_iodwant[myiod], nfs_iod_mutex,
1293 PWAIT | PCATCH | PDROP, "nfsidl", 0, nfssvc_iod_continue);
1294 lck_mtx_lock(nfs_iod_mutex);
1295 }
1296 if (error) {
1297 nfs_asyncdaemon[myiod] = 0;
1298 if (nmp) nmp->nm_bufqiods--;
1299 nfs_iodwant[myiod] = NULL;
1300 nfs_iodmount[myiod] = NULL;
1301 lck_mtx_unlock(nfs_iod_mutex);
1302 nfs_numasync--;
1303 if (error == EINTR || error == ERESTART)
1304 error = 0;
1305 unix_syscall_return(error);
1306 }
1307 if (nmp != NULL) {
1308 while ((bp = TAILQ_FIRST(&nmp->nm_bufq)) != NULL) {
1309 /* Take one off the front of the list */
1310 TAILQ_REMOVE(&nmp->nm_bufq, bp, nb_free);
1311 bp->nb_free.tqe_next = NFSNOLIST;
1312 nmp->nm_bufqlen--;
1313 if (nmp->nm_bufqwant && nmp->nm_bufqlen < 2 * nfs_numasync) {
1314 nmp->nm_bufqwant = FALSE;
1315 lck_mtx_unlock(nfs_iod_mutex);
1316 wakeup(&nmp->nm_bufq);
1317 } else {
1318 lck_mtx_unlock(nfs_iod_mutex);
1319 }
1320
1321 SET(bp->nb_flags, NB_IOD);
1322 if (ISSET(bp->nb_flags, NB_READ))
1323 nfs_doio(bp, bp->nb_rcred, NULL);
1324 else
1325 nfs_doio(bp, bp->nb_wcred, NULL);
1326
1327 lck_mtx_lock(nfs_iod_mutex);
1328 /*
1329 * If there are more than one iod on this mount, then defect
1330 * so that the iods can be shared out fairly between the mounts
1331 */
1332 if (nfs_defect && nmp->nm_bufqiods > 1) {
1333 nfs_iodmount[myiod] = NULL;
1334 nmp->nm_bufqiods--;
1335 break;
1336 }
1337 }
1338 }
1339 lck_mtx_unlock(nfs_iod_mutex);
1340
1341 if (nfs_ioddelwri) {
1342 i = 0;
1343 nfs_ioddelwri = 0;
1344 lck_mtx_lock(nfs_buf_mutex);
1345 while (i < 8 && (bp = TAILQ_FIRST(&nfsbufdelwri)) != NULL) {
1346 struct nfsnode *np = VTONFS(bp->nb_vp);
1347 nfs_buf_remfree(bp);
1348 nfs_buf_refget(bp);
1349 while ((error = nfs_buf_acquire(bp, 0, 0, 0)) == EAGAIN);
1350 nfs_buf_refrele(bp);
1351 if (error)
1352 break;
1353 if (!bp->nb_vp) {
1354 /* buffer is no longer valid */
1355 nfs_buf_drop(bp);
1356 continue;
1357 }
1358 if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
1359 /* put buffer at end of delwri list */
1360 TAILQ_INSERT_TAIL(&nfsbufdelwri, bp, nb_free);
1361 nfsbufdelwricnt++;
1362 nfs_buf_drop(bp);
1363 lck_mtx_unlock(nfs_buf_mutex);
1364 nfs_flushcommits(np->n_vnode, NULL, 1);
1365 } else {
1366 SET(bp->nb_flags, (NB_ASYNC | NB_IOD));
1367 lck_mtx_unlock(nfs_buf_mutex);
1368 nfs_buf_write(bp);
1369 }
1370 i++;
1371 lck_mtx_lock(nfs_buf_mutex);
1372 }
1373 lck_mtx_unlock(nfs_buf_mutex);
1374 }
1375
1376 lck_mtx_lock(nfs_iod_mutex);
1377 }
1378 }
1379
1380 /*
1381 * Shut down a socket associated with an nfssvc_sock structure.
1382 * Should be called with the send lock set, if required.
1383 * The trick here is to increment the sref at the start, so that the nfsds
1384 * will stop using it and clear ns_flag at the end so that it will not be
1385 * reassigned during cleanup.
1386 */
1387 static void
1388 nfsrv_zapsock(struct nfssvc_sock *slp)
1389 {
1390 socket_t so;
1391
1392 if ((slp->ns_flag & SLP_VALID) == 0)
1393 return;
1394 slp->ns_flag &= ~SLP_ALLFLAGS;
1395
1396 so = slp->ns_so;
1397 if (so == NULL)
1398 return;
1399
1400 /*
1401 * Attempt to deter future upcalls, but leave the
1402 * upcall info in place to avoid a race with the
1403 * networking code.
1404 */
1405 socket_lock(so, 1);
1406 so->so_rcv.sb_flags &= ~SB_UPCALL;
1407 socket_unlock(so, 1);
1408
1409 sock_shutdown(so, SHUT_RDWR);
1410 }
1411
1412 /*
1413 * Get an authorization string for the uid by having the mount_nfs sitting
1414 * on this mount point porpous out of the kernel and do it.
1415 */
1416 int
1417 nfs_getauth(nmp, rep, cred, auth_str, auth_len, verf_str, verf_len, key)
1418 register struct nfsmount *nmp;
1419 struct nfsreq *rep;
1420 kauth_cred_t cred;
1421 char **auth_str;
1422 int *auth_len;
1423 char *verf_str;
1424 int *verf_len;
1425 NFSKERBKEY_T key; /* return session key */
1426 {
1427 int error = 0;
1428
1429 while ((nmp->nm_state & NFSSTA_WAITAUTH) == 0) {
1430 nmp->nm_state |= NFSSTA_WANTAUTH;
1431 (void) tsleep((caddr_t)&nmp->nm_authtype, PSOCK,
1432 "nfsauth1", 2 * hz);
1433 error = nfs_sigintr(nmp, rep, rep->r_procp);
1434 if (error) {
1435 nmp->nm_state &= ~NFSSTA_WANTAUTH;
1436 return (error);
1437 }
1438 }
1439 nmp->nm_state &= ~NFSSTA_WANTAUTH;
1440 MALLOC(*auth_str, char *, RPCAUTH_MAXSIZ, M_TEMP, M_WAITOK);
1441 if (!*auth_str)
1442 return (ENOMEM);
1443 nmp->nm_authstr = *auth_str;
1444 nmp->nm_authlen = RPCAUTH_MAXSIZ;
1445 nmp->nm_verfstr = verf_str;
1446 nmp->nm_verflen = *verf_len;
1447 nmp->nm_authuid = kauth_cred_getuid(cred);
1448 nmp->nm_state &= ~NFSSTA_WAITAUTH;
1449 wakeup((caddr_t)&nmp->nm_authstr);
1450
1451 /*
1452 * And wait for mount_nfs to do its stuff.
1453 */
1454 while ((nmp->nm_state & NFSSTA_HASAUTH) == 0 && error == 0) {
1455 (void) tsleep((caddr_t)&nmp->nm_authlen, PSOCK,
1456 "nfsauth2", 2 * hz);
1457 error = nfs_sigintr(nmp, rep, rep->r_procp);
1458 }
1459 if (nmp->nm_state & NFSSTA_AUTHERR) {
1460 nmp->nm_state &= ~NFSSTA_AUTHERR;
1461 error = EAUTH;
1462 }
1463 if (error)
1464 FREE(*auth_str, M_TEMP);
1465 else {
1466 *auth_len = nmp->nm_authlen;
1467 *verf_len = nmp->nm_verflen;
1468 bcopy((caddr_t)nmp->nm_key, (caddr_t)key, sizeof (key));
1469 }
1470 nmp->nm_state &= ~NFSSTA_HASAUTH;
1471 nmp->nm_state |= NFSSTA_WAITAUTH;
1472 if (nmp->nm_state & NFSSTA_WANTAUTH) {
1473 nmp->nm_state &= ~NFSSTA_WANTAUTH;
1474 wakeup((caddr_t)&nmp->nm_authtype);
1475 }
1476 return (error);
1477 }
1478
1479 /*
1480 * Get a nickname authenticator and verifier.
1481 */
1482 int
1483 nfs_getnickauth(
1484 struct nfsmount *nmp,
1485 kauth_cred_t cred,
1486 char **auth_str,
1487 int *auth_len,
1488 char *verf_str,
1489 __unused int verf_len)
1490 {
1491 register struct nfsuid *nuidp;
1492 register u_long *nickp, *verfp;
1493 struct timeval ktvin, ktvout, now;
1494
1495 #if DIAGNOSTIC
1496 if (verf_len < (4 * NFSX_UNSIGNED))
1497 panic("nfs_getnickauth verf too small");
1498 #endif
1499 for (nuidp = NMUIDHASH(nmp, kauth_cred_getuid(cred))->lh_first;
1500 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
1501 if (kauth_cred_getuid(nuidp->nu_cr) == kauth_cred_getuid(cred))
1502 break;
1503 }
1504 microtime(&now);
1505 if (!nuidp || nuidp->nu_expire < now.tv_sec)
1506 return (EACCES);
1507
1508 MALLOC(nickp, u_long *, 2 * NFSX_UNSIGNED, M_TEMP, M_WAITOK);
1509 if (!nickp)
1510 return (ENOMEM);
1511
1512 /*
1513 * Move to the end of the lru list (end of lru == most recently used).
1514 */
1515 TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp, nu_lru);
1516 TAILQ_INSERT_TAIL(&nmp->nm_uidlruhead, nuidp, nu_lru);
1517
1518 *nickp++ = txdr_unsigned(RPCAKN_NICKNAME);
1519 *nickp = txdr_unsigned(nuidp->nu_nickname);
1520 *auth_str = (char *)nickp;
1521 *auth_len = 2 * NFSX_UNSIGNED;
1522
1523 /*
1524 * Now we must encrypt the verifier and package it up.
1525 */
1526 verfp = (u_long *)verf_str;
1527 *verfp++ = txdr_unsigned(RPCAKN_NICKNAME);
1528 microtime(&now);
1529 if (now.tv_sec > nuidp->nu_timestamp.tv_sec ||
1530 (now.tv_sec == nuidp->nu_timestamp.tv_sec &&
1531 now.tv_usec > nuidp->nu_timestamp.tv_usec))
1532 nuidp->nu_timestamp = now;
1533 else
1534 nuidp->nu_timestamp.tv_usec++;
1535 ktvin.tv_sec = txdr_unsigned(nuidp->nu_timestamp.tv_sec);
1536 ktvin.tv_usec = txdr_unsigned(nuidp->nu_timestamp.tv_usec);
1537
1538 /*
1539 * Now encrypt the timestamp verifier in ecb mode using the session
1540 * key.
1541 */
1542 #if NFSKERB
1543 XXX
1544 #endif
1545
1546 *verfp++ = ktvout.tv_sec;
1547 *verfp++ = ktvout.tv_usec;
1548 *verfp = 0;
1549 return (0);
1550 }
1551
1552 /*
1553 * Save the current nickname in a hash list entry on the mount point.
1554 */
1555 int
1556 nfs_savenickauth(nmp, cred, len, key, mdp, dposp, mrep)
1557 register struct nfsmount *nmp;
1558 kauth_cred_t cred;
1559 int len;
1560 NFSKERBKEY_T key;
1561 mbuf_t *mdp;
1562 char **dposp;
1563 mbuf_t mrep;
1564 {
1565 register struct nfsuid *nuidp;
1566 register u_long *tl;
1567 register long t1;
1568 mbuf_t md = *mdp;
1569 struct timeval ktvin, ktvout, now;
1570 u_long nick;
1571 char *dpos = *dposp, *cp2;
1572 int deltasec, error = 0;
1573
1574 if (len == (3 * NFSX_UNSIGNED)) {
1575 nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED);
1576 ktvin.tv_sec = *tl++;
1577 ktvin.tv_usec = *tl++;
1578 nick = fxdr_unsigned(u_long, *tl);
1579
1580 /*
1581 * Decrypt the timestamp in ecb mode.
1582 */
1583 #if NFSKERB
1584 XXX
1585 #endif
1586 ktvout.tv_sec = fxdr_unsigned(long, ktvout.tv_sec);
1587 ktvout.tv_usec = fxdr_unsigned(long, ktvout.tv_usec);
1588 microtime(&now);
1589 deltasec = now.tv_sec - ktvout.tv_sec;
1590 if (deltasec < 0)
1591 deltasec = -deltasec;
1592 /*
1593 * If ok, add it to the hash list for the mount point.
1594 */
1595 if (deltasec <= NFS_KERBCLOCKSKEW) {
1596 if (nmp->nm_numuids < nuidhash_max) {
1597 nmp->nm_numuids++;
1598 MALLOC_ZONE(nuidp, struct nfsuid *,
1599 sizeof (struct nfsuid),
1600 M_NFSUID, M_WAITOK);
1601 } else {
1602 nuidp = NULL;
1603 }
1604 if (!nuidp) {
1605 nuidp = nmp->nm_uidlruhead.tqh_first;
1606 if (!nuidp) {
1607 error = ENOMEM;
1608 goto nfsmout;
1609 }
1610 LIST_REMOVE(nuidp, nu_hash);
1611 TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp, nu_lru);
1612 kauth_cred_rele(nuidp->nu_cr);
1613 }
1614 nuidp->nu_flag = 0;
1615 kauth_cred_ref(cred);
1616 nuidp->nu_cr = cred;
1617 nuidp->nu_expire = now.tv_sec + NFS_KERBTTL;
1618 nuidp->nu_timestamp = ktvout;
1619 nuidp->nu_nickname = nick;
1620 bcopy(key, nuidp->nu_key, sizeof (key));
1621 TAILQ_INSERT_TAIL(&nmp->nm_uidlruhead, nuidp, nu_lru);
1622 LIST_INSERT_HEAD(NMUIDHASH(nmp, kauth_cred_getuid(cred)),
1623 nuidp, nu_hash);
1624 }
1625 } else
1626 nfsm_adv(nfsm_rndup(len));
1627 nfsmout:
1628 *mdp = md;
1629 *dposp = dpos;
1630 return (error);
1631 }
1632
1633 #ifndef NFS_NOSERVER
1634
1635 /*
1636 * cleanup and release a server socket structure.
1637 */
1638 void
1639 nfsrv_slpfree(struct nfssvc_sock *slp)
1640 {
1641 struct nfsuid *nuidp, *nnuidp;
1642 struct nfsrv_descript *nwp, *nnwp;
1643
1644 if (slp->ns_so) {
1645 sock_release(slp->ns_so);
1646 slp->ns_so = NULL;
1647 }
1648 if (slp->ns_nam)
1649 mbuf_free(slp->ns_nam);
1650 if (slp->ns_raw)
1651 mbuf_freem(slp->ns_raw);
1652 if (slp->ns_rec)
1653 mbuf_freem(slp->ns_rec);
1654 slp->ns_nam = slp->ns_raw = slp->ns_rec = NULL;
1655
1656 for (nuidp = slp->ns_uidlruhead.tqh_first; nuidp != 0;
1657 nuidp = nnuidp) {
1658 nnuidp = nuidp->nu_lru.tqe_next;
1659 LIST_REMOVE(nuidp, nu_hash);
1660 TAILQ_REMOVE(&slp->ns_uidlruhead, nuidp, nu_lru);
1661 if (nuidp->nu_flag & NU_NAM)
1662 mbuf_freem(nuidp->nu_nam);
1663 kauth_cred_rele(nuidp->nu_cr);
1664 FREE_ZONE((caddr_t)nuidp,
1665 sizeof (struct nfsuid), M_NFSUID);
1666 }
1667
1668 for (nwp = slp->ns_tq.lh_first; nwp; nwp = nnwp) {
1669 nnwp = nwp->nd_tq.le_next;
1670 LIST_REMOVE(nwp, nd_tq);
1671 if (nwp->nd_cr)
1672 kauth_cred_rele(nwp->nd_cr);
1673 FREE_ZONE((caddr_t)nwp, sizeof *nwp, M_NFSRVDESC);
1674 }
1675 LIST_INIT(&slp->ns_tq);
1676
1677 lck_rw_destroy(&slp->ns_rwlock, nfs_slp_rwlock_group);
1678 lck_mtx_destroy(&slp->ns_wgmutex, nfs_slp_mutex_group);
1679 FREE(slp, M_NFSSVC);
1680 }
1681
1682 /*
1683 * Derefence a server socket structure. If it has no more references and
1684 * is no longer valid, you can throw it away.
1685 */
1686 void
1687 nfsrv_slpderef(struct nfssvc_sock *slp)
1688 {
1689 struct timeval now;
1690
1691 lck_mtx_lock(nfsd_mutex);
1692 lck_rw_lock_exclusive(&slp->ns_rwlock);
1693 slp->ns_sref--;
1694 if (slp->ns_sref || (slp->ns_flag & SLP_VALID)) {
1695 lck_rw_done(&slp->ns_rwlock);
1696 lck_mtx_unlock(nfsd_mutex);
1697 return;
1698 }
1699
1700 /* queue the socket up for deletion */
1701 microuptime(&now);
1702 slp->ns_timestamp = now.tv_sec;
1703 TAILQ_REMOVE(&nfssvc_sockhead, slp, ns_chain);
1704 TAILQ_INSERT_TAIL(&nfssvc_deadsockhead, slp, ns_chain);
1705 lck_rw_done(&slp->ns_rwlock);
1706 if (slp == nfs_udpsock)
1707 nfs_udpsock = NULL;
1708 #if ISO
1709 else if (slp == nfs_cltpsock)
1710 nfs_cltpsock = NULL;
1711 #endif
1712 lck_mtx_unlock(nfsd_mutex);
1713 }
1714
1715
1716 /*
1717 * Initialize the data structures for the server.
1718 * Handshake with any new nfsds starting up to avoid any chance of
1719 * corruption.
1720 */
1721 void
1722 nfsrv_init(terminating)
1723 int terminating;
1724 {
1725 struct nfssvc_sock *slp, *nslp;
1726 struct timeval now;
1727
1728 if (terminating) {
1729 microuptime(&now);
1730 for (slp = TAILQ_FIRST(&nfssvc_sockhead); slp != 0; slp = nslp) {
1731 nslp = TAILQ_NEXT(slp, ns_chain);
1732 if (slp->ns_flag & SLP_VALID) {
1733 lck_rw_lock_exclusive(&slp->ns_rwlock);
1734 nfsrv_zapsock(slp);
1735 lck_rw_done(&slp->ns_rwlock);
1736 }
1737 /* queue the socket up for deletion */
1738 slp->ns_timestamp = now.tv_sec;
1739 TAILQ_REMOVE(&nfssvc_sockhead, slp, ns_chain);
1740 TAILQ_INSERT_TAIL(&nfssvc_deadsockhead, slp, ns_chain);
1741 if (slp == nfs_udpsock)
1742 nfs_udpsock = NULL;
1743 #if ISO
1744 else if (slp == nfs_cltpsock)
1745 nfs_cltpsock = NULL;
1746 #endif
1747 }
1748 nfsrv_cleancache(); /* And clear out server cache */
1749 /* XXX Revisit when enabling WebNFS */
1750 #ifdef WEBNFS_ENABLED
1751 } else
1752 nfs_pub.np_valid = 0;
1753 #else
1754 }
1755 #endif
1756
1757 if (!terminating) {
1758 TAILQ_INIT(&nfssvc_sockhead);
1759 TAILQ_INIT(&nfssvc_deadsockhead);
1760 TAILQ_INIT(&nfsd_head);
1761 nfsd_head_flag &= ~NFSD_CHECKSLP;
1762 }
1763
1764 MALLOC(nfs_udpsock, struct nfssvc_sock *, sizeof(struct nfssvc_sock),
1765 M_NFSSVC, M_WAITOK);
1766 if (nfs_udpsock) {
1767 bzero((caddr_t)nfs_udpsock, sizeof (struct nfssvc_sock));
1768 lck_rw_init(&nfs_udpsock->ns_rwlock, nfs_slp_rwlock_group, nfs_slp_lock_attr);
1769 lck_mtx_init(&nfs_udpsock->ns_wgmutex, nfs_slp_mutex_group, nfs_slp_lock_attr);
1770 TAILQ_INIT(&nfs_udpsock->ns_uidlruhead);
1771 TAILQ_INSERT_HEAD(&nfssvc_sockhead, nfs_udpsock, ns_chain);
1772 } else {
1773 printf("nfsrv_init() failed to allocate UDP socket\n");
1774 }
1775
1776 #if ISO
1777 MALLOC(nfs_cltpsock, struct nfssvc_sock *, sizeof(struct nfssvc_sock),
1778 M_NFSSVC, M_WAITOK);
1779 if (nfs_cltpsock) {
1780 bzero((caddr_t)nfs_cltpsock, sizeof (struct nfssvc_sock));
1781 lck_rw_init(&nfs_cltpsock->ns_rwlock, nfs_slp_rwlock_group, nfs_slp_lock_attr);
1782 lck_mtx_init(&nfs_cltpsock->ns_wgmutex, nfs_slp_mutex_group, nfs_slp_lock_attr);
1783 TAILQ_INIT(&nfs_cltpsock->ns_uidlruhead);
1784 TAILQ_INSERT_TAIL(&nfssvc_sockhead, nfs_cltpsock, ns_chain);
1785 } else {
1786 printf("nfsrv_init() failed to allocate CLTP socket\n");
1787 }
1788 #endif
1789 }
1790
1791 /*
1792 * Add entries to the server monitor log.
1793 */
1794 static void
1795 nfsd_rt(sotype, nd, cacherep)
1796 int sotype;
1797 register struct nfsrv_descript *nd;
1798 int cacherep;
1799 {
1800 register struct drt *rt;
1801 struct timeval now;
1802
1803 rt = &nfsdrt.drt[nfsdrt.pos];
1804 if (cacherep == RC_DOIT)
1805 rt->flag = 0;
1806 else if (cacherep == RC_REPLY)
1807 rt->flag = DRT_CACHEREPLY;
1808 else
1809 rt->flag = DRT_CACHEDROP;
1810 if (sotype == SOCK_STREAM)
1811 rt->flag |= DRT_TCP;
1812 else if (nd->nd_flag & ND_NFSV3)
1813 rt->flag |= DRT_NFSV3;
1814 rt->proc = nd->nd_procnum;
1815 if (((struct sockaddr *)mbuf_data(nd->nd_nam))->sa_family == AF_INET)
1816 rt->ipadr = ((struct sockaddr_in *)mbuf_data(nd->nd_nam))->sin_addr.s_addr;
1817 else
1818 rt->ipadr = INADDR_ANY;
1819 microuptime(&now);
1820 rt->resptime = ((now.tv_sec - nd->nd_starttime.tv_sec) * 1000000) +
1821 (now.tv_usec - nd->nd_starttime.tv_usec);
1822 microtime(&rt->tstamp); // XXX unused
1823 nfsdrt.pos = (nfsdrt.pos + 1) % NFSRTTLOGSIZ;
1824 }
1825 #endif /* NFS_NOSERVER */