]> git.saurik.com Git - apple/xnu.git/blob - bsd/nfs/nfs_syscalls.c
8a384d4ea702f159a1d346928dcbeaf547a7373d
[apple/xnu.git] / bsd / nfs / nfs_syscalls.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
23 /*
24 * Copyright (c) 1989, 1993
25 * The Regents of the University of California. All rights reserved.
26 *
27 * This code is derived from software contributed to Berkeley by
28 * Rick Macklem at The University of Guelph.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
32 * are met:
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by the University of
41 * California, Berkeley and its contributors.
42 * 4. Neither the name of the University nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * SUCH DAMAGE.
57 *
58 * @(#)nfs_syscalls.c 8.5 (Berkeley) 3/30/95
59 * FreeBSD-Id: nfs_syscalls.c,v 1.32 1997/11/07 08:53:25 phk Exp $
60 */
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 /* XXX CSM 11/25/97 FreeBSD's generated syscall prototypes */
65 #ifdef notyet
66 #include <sys/sysproto.h>
67 #endif
68 #include <sys/kernel.h>
69 #include <sys/file_internal.h>
70 #include <sys/filedesc.h>
71 #include <sys/stat.h>
72 #include <sys/vnode_internal.h>
73 #include <sys/mount_internal.h>
74 #include <sys/proc_internal.h> /* for fdflags */
75 #include <sys/kauth.h>
76 #include <sys/sysctl.h>
77 #include <sys/ubc.h>
78 #include <sys/uio.h>
79 #include <sys/malloc.h>
80 #include <sys/kpi_mbuf.h>
81 #include <sys/socket.h>
82 #include <sys/socketvar.h>
83 #include <sys/domain.h>
84 #include <sys/protosw.h>
85 #include <sys/fcntl.h>
86 #include <sys/lockf.h>
87 #include <sys/syslog.h>
88 #include <sys/user.h>
89 #include <sys/sysproto.h>
90 #include <sys/kpi_socket.h>
91 #include <libkern/OSAtomic.h>
92
93 #include <bsm/audit_kernel.h>
94
95 #include <netinet/in.h>
96 #include <netinet/tcp.h>
97 #if ISO
98 #include <netiso/iso.h>
99 #endif
100 #include <nfs/xdr_subs.h>
101 #include <nfs/rpcv2.h>
102 #include <nfs/nfsproto.h>
103 #include <nfs/nfs.h>
104 #include <nfs/nfsm_subs.h>
105 #include <nfs/nfsrvcache.h>
106 #include <nfs/nfsmount.h>
107 #include <nfs/nfsnode.h>
108 #include <nfs/nfsrtt.h>
109 #include <nfs/nfs_lock.h>
110
111 extern void unix_syscall_return(int);
112
113 /* Global defs. */
114 extern int (*nfsrv3_procs[NFS_NPROCS])(struct nfsrv_descript *nd,
115 struct nfssvc_sock *slp,
116 proc_t procp,
117 mbuf_t *mreqp);
118 extern int nfs_numasync;
119 extern int nfs_ioddelwri;
120 extern int nfsrtton;
121 extern struct nfsstats nfsstats;
122 extern int nfsrvw_procrastinate;
123 extern int nfsrvw_procrastinate_v3;
124
125 struct nfssvc_sock *nfs_udpsock, *nfs_cltpsock;
126 static int nuidhash_max = NFS_MAXUIDHASH;
127
128 static void nfsrv_zapsock(struct nfssvc_sock *slp);
129 static int nfssvc_iod(proc_t);
130 static int nfskerb_clientd(struct nfsmount *, struct nfsd_cargs *, int, user_addr_t, proc_t);
131
132 static int nfs_asyncdaemon[NFS_MAXASYNCDAEMON];
133
134 #ifndef NFS_NOSERVER
135 int nfsd_waiting = 0;
136 static struct nfsdrt nfsdrt;
137 int nfs_numnfsd = 0;
138 static void nfsd_rt(int sotype, struct nfsrv_descript *nd, int cacherep);
139 static int nfssvc_addsock(socket_t, mbuf_t, proc_t);
140 static int nfssvc_nfsd(struct nfsd_srvargs *,user_addr_t, proc_t);
141 static int nfssvc_export(user_addr_t, proc_t);
142
143 static int nfs_privport = 0;
144 /* XXX CSM 11/25/97 Upgrade sysctl.h someday */
145 #ifdef notyet
146 SYSCTL_INT(_vfs_nfs, NFS_NFSPRIVPORT, nfs_privport, CTLFLAG_RW, &nfs_privport, 0, "");
147 SYSCTL_INT(_vfs_nfs, OID_AUTO, gatherdelay, CTLFLAG_RW, &nfsrvw_procrastinate, 0, "");
148 SYSCTL_INT(_vfs_nfs, OID_AUTO, gatherdelay_v3, CTLFLAG_RW, &nfsrvw_procrastinate_v3, 0, "");
149 #endif
150
151 /*
152 * NFS server system calls
153 * getfh() lives here too, but maybe should move to kern/vfs_syscalls.c
154 */
155
156 /*
157 * Get file handle system call
158 */
159 int
160 getfh(proc_t p, struct getfh_args *uap, __unused int *retval)
161 {
162 vnode_t vp;
163 struct nfs_filehandle nfh;
164 int error;
165 struct nameidata nd;
166 struct vfs_context context;
167 char path[MAXPATHLEN], *ptr;
168 u_int pathlen;
169 struct nfs_exportfs *nxfs;
170 struct nfs_export *nx;
171
172 context.vc_proc = p;
173 context.vc_ucred = kauth_cred_get();
174
175 /*
176 * Must be super user
177 */
178 error = proc_suser(p);
179 if (error)
180 return (error);
181
182 error = copyinstr(uap->fname, path, MAXPATHLEN, (size_t *)&pathlen);
183 if (error)
184 return (error);
185
186 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
187 UIO_SYSSPACE, path, &context);
188 error = namei(&nd);
189 if (error)
190 return (error);
191 nameidone(&nd);
192
193 vp = nd.ni_vp;
194
195 // find exportfs that matches f_mntonname
196 lck_rw_lock_shared(&nfs_export_rwlock);
197 ptr = vnode_mount(vp)->mnt_vfsstat.f_mntonname;
198 LIST_FOREACH(nxfs, &nfs_exports, nxfs_next) {
199 if (!strcmp(nxfs->nxfs_path, ptr))
200 break;
201 }
202 if (!nxfs || strncmp(nxfs->nxfs_path, path, strlen(nxfs->nxfs_path))) {
203 error = EINVAL;
204 goto out;
205 }
206 // find export that best matches remainder of path
207 ptr = path + strlen(nxfs->nxfs_path);
208 while (*ptr && (*ptr == '/'))
209 ptr++;
210 LIST_FOREACH(nx, &nxfs->nxfs_exports, nx_next) {
211 int len = strlen(nx->nx_path);
212 if (len == 0) // we've hit the export entry for the root directory
213 break;
214 if (!strncmp(nx->nx_path, ptr, len))
215 break;
216 }
217 if (!nx) {
218 error = EINVAL;
219 goto out;
220 }
221
222 bzero(&nfh, sizeof(nfh));
223 nfh.nfh_xh.nxh_version = NFS_FH_VERSION;
224 nfh.nfh_xh.nxh_fsid = nxfs->nxfs_id;
225 nfh.nfh_xh.nxh_expid = nx->nx_id;
226 nfh.nfh_xh.nxh_flags = 0;
227 nfh.nfh_xh.nxh_reserved = 0;
228 nfh.nfh_len = NFS_MAX_FID_SIZE;
229 error = VFS_VPTOFH(vp, &nfh.nfh_len, &nfh.nfh_fid[0], NULL);
230 if (nfh.nfh_len > (int)NFS_MAX_FID_SIZE)
231 error = EOVERFLOW;
232 nfh.nfh_xh.nxh_fidlen = nfh.nfh_len;
233 nfh.nfh_len += sizeof(nfh.nfh_xh);
234
235 out:
236 lck_rw_done(&nfs_export_rwlock);
237 vnode_put(vp);
238 if (error)
239 return (error);
240 error = copyout((caddr_t)&nfh, uap->fhp, sizeof(nfh));
241 return (error);
242 }
243
244 #endif /* NFS_NOSERVER */
245
246 extern struct fileops vnops;
247
248 /*
249 * syscall for the rpc.lockd to use to translate a NFS file handle into
250 * an open descriptor.
251 *
252 * warning: do not remove the suser() call or this becomes one giant
253 * security hole.
254 */
255 int
256 fhopen( proc_t p,
257 struct fhopen_args *uap,
258 register_t *retval)
259 {
260 vnode_t vp;
261 struct nfs_filehandle nfh;
262 struct nfs_export *nx;
263 struct nfs_export_options *nxo;
264 struct flock lf;
265 struct fileproc *fp, *nfp;
266 int fmode, error, type;
267 int indx;
268 kauth_cred_t cred = proc_ucred(p);
269 struct vfs_context context;
270 kauth_action_t action;
271
272 context.vc_proc = p;
273 context.vc_ucred = cred;
274
275 /*
276 * Must be super user
277 */
278 error = suser(cred, 0);
279 if (error)
280 return (error);
281
282 fmode = FFLAGS(uap->flags);
283 /* why not allow a non-read/write open for our lockd? */
284 if (((fmode & (FREAD | FWRITE)) == 0) || (fmode & O_CREAT))
285 return (EINVAL);
286
287 error = copyin(uap->u_fhp, &nfh.nfh_len, sizeof(nfh.nfh_len));
288 if (error)
289 return (error);
290 if ((nfh.nfh_len < (int)sizeof(struct nfs_exphandle)) ||
291 (nfh.nfh_len > (int)NFS_MAX_FH_SIZE))
292 return (EINVAL);
293 error = copyin(uap->u_fhp, &nfh, sizeof(nfh.nfh_len) + nfh.nfh_len);
294 if (error)
295 return (error);
296
297 lck_rw_lock_shared(&nfs_export_rwlock);
298 /* now give me my vnode, it gets returned to me with a reference */
299 error = nfsrv_fhtovp(&nfh, NULL, TRUE, &vp, &nx, &nxo);
300 lck_rw_done(&nfs_export_rwlock);
301 if (error)
302 return (error);
303
304 /*
305 * From now on we have to make sure not
306 * to forget about the vnode.
307 * Any error that causes an abort must vnode_put(vp).
308 * Just set error = err and 'goto bad;'.
309 */
310
311 /*
312 * from vn_open
313 */
314 if (vnode_vtype(vp) == VSOCK) {
315 error = EOPNOTSUPP;
316 goto bad;
317 }
318
319 /* disallow write operations on directories */
320 if (vnode_isdir(vp) && (fmode & (FWRITE | O_TRUNC))) {
321 error = EISDIR;
322 goto bad;
323 }
324
325 /* compute action to be authorized */
326 action = 0;
327 if (fmode & FREAD)
328 action |= KAUTH_VNODE_READ_DATA;
329 if (fmode & (FWRITE | O_TRUNC))
330 action |= KAUTH_VNODE_WRITE_DATA;
331 if ((error = vnode_authorize(vp, NULL, action, &context)) != 0)
332 goto bad;
333
334 if ((error = VNOP_OPEN(vp, fmode, &context)))
335 goto bad;
336 if ((error = vnode_ref_ext(vp, fmode)))
337 goto bad;
338
339 /*
340 * end of vn_open code
341 */
342
343 // starting here... error paths should call vn_close/vnode_put
344 if ((error = falloc(p, &nfp, &indx)) != 0) {
345 vn_close(vp, fmode & FMASK, cred, p);
346 goto bad;
347 }
348 fp = nfp;
349
350 fp->f_fglob->fg_flag = fmode & FMASK;
351 fp->f_fglob->fg_type = DTYPE_VNODE;
352 fp->f_fglob->fg_ops = &vnops;
353 fp->f_fglob->fg_data = (caddr_t)vp;
354
355 // XXX do we really need to support this with fhopen()?
356 if (fmode & (O_EXLOCK | O_SHLOCK)) {
357 lf.l_whence = SEEK_SET;
358 lf.l_start = 0;
359 lf.l_len = 0;
360 if (fmode & O_EXLOCK)
361 lf.l_type = F_WRLCK;
362 else
363 lf.l_type = F_RDLCK;
364 type = F_FLOCK;
365 if ((fmode & FNONBLOCK) == 0)
366 type |= F_WAIT;
367 if ((error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, F_SETLK, &lf, type, &context))) {
368 vn_close(vp, fp->f_fglob->fg_flag, fp->f_fglob->fg_cred, p);
369 fp_free(p, indx, fp);
370 return (error);
371 }
372 fp->f_fglob->fg_flag |= FHASLOCK;
373 }
374
375 vnode_put(vp);
376
377 proc_fdlock(p);
378 procfdtbl_releasefd(p, indx, NULL);
379 fp_drop(p, indx, fp, 1);
380 proc_fdunlock(p);
381
382 *retval = indx;
383 return (0);
384
385 bad:
386 vnode_put(vp);
387 return (error);
388 }
389
390 /*
391 * Nfs server psuedo system call for the nfsd's
392 * Based on the flag value it either:
393 * - adds a socket to the selection list
394 * - remains in the kernel as an nfsd
395 * - remains in the kernel as an nfsiod
396 */
397 int
398 nfssvc(proc_t p, struct nfssvc_args *uap, __unused int *retval)
399 {
400 #ifndef NFS_NOSERVER
401 struct nameidata nd;
402 mbuf_t nam;
403 struct user_nfsd_args user_nfsdarg;
404 struct nfsd_srvargs nfsd_srvargs, *nsd = &nfsd_srvargs;
405 struct nfsd_cargs ncd;
406 struct nfsd *nfsd;
407 struct nfssvc_sock *slp;
408 struct nfsuid *nuidp;
409 struct nfsmount *nmp;
410 struct timeval now;
411 socket_t so;
412 struct vfs_context context;
413 struct ucred temp_cred;
414 #endif /* NFS_NOSERVER */
415 int error;
416
417 AUDIT_ARG(cmd, uap->flag);
418
419 /*
420 * Must be super user
421 */
422 error = proc_suser(p);
423 if(error)
424 return (error);
425 if (uap->flag & NFSSVC_BIOD)
426 error = nfssvc_iod(p);
427 #ifdef NFS_NOSERVER
428 else
429 error = ENXIO;
430 #else /* !NFS_NOSERVER */
431 else if (uap->flag & NFSSVC_MNTD) {
432
433 context.vc_proc = p;
434 context.vc_ucred = kauth_cred_get();
435
436 error = copyin(uap->argp, (caddr_t)&ncd, sizeof (ncd));
437 if (error)
438 return (error);
439
440 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
441 (proc_is64bit(p) ? UIO_USERSPACE64 : UIO_USERSPACE32),
442 CAST_USER_ADDR_T(ncd.ncd_dirp), &context);
443 error = namei(&nd);
444 if (error)
445 return (error);
446 nameidone(&nd);
447
448 if (vnode_isvroot(nd.ni_vp) == 0)
449 error = EINVAL;
450 nmp = VFSTONFS(vnode_mount(nd.ni_vp));
451 vnode_put(nd.ni_vp);
452 if (error)
453 return (error);
454
455 if ((nmp->nm_state & NFSSTA_MNTD) &&
456 (uap->flag & NFSSVC_GOTAUTH) == 0)
457 return (0);
458 nmp->nm_state |= NFSSTA_MNTD;
459 error = nfskerb_clientd(nmp, &ncd, uap->flag, uap->argp, p);
460 } else if (uap->flag & NFSSVC_ADDSOCK) {
461 if (IS_64BIT_PROCESS(p)) {
462 error = copyin(uap->argp, (caddr_t)&user_nfsdarg, sizeof(user_nfsdarg));
463 } else {
464 struct nfsd_args tmp_args;
465 error = copyin(uap->argp, (caddr_t)&tmp_args, sizeof(tmp_args));
466 if (error == 0) {
467 user_nfsdarg.sock = tmp_args.sock;
468 user_nfsdarg.name = CAST_USER_ADDR_T(tmp_args.name);
469 user_nfsdarg.namelen = tmp_args.namelen;
470 }
471 }
472 if (error)
473 return (error);
474 /* get the socket */
475 error = file_socket(user_nfsdarg.sock, &so);
476 if (error)
477 return (error);
478 /* Get the client address for connected sockets. */
479 if (user_nfsdarg.name == USER_ADDR_NULL || user_nfsdarg.namelen == 0) {
480 nam = NULL;
481 } else {
482 error = sockargs(&nam, user_nfsdarg.name, user_nfsdarg.namelen, MBUF_TYPE_SONAME);
483 if (error) {
484 /* drop the iocount file_socket() grabbed on the file descriptor */
485 file_drop(user_nfsdarg.sock);
486 return (error);
487 }
488 }
489 /*
490 * nfssvc_addsock() will grab a retain count on the socket
491 * to keep the socket from being closed when nfsd closes its
492 * file descriptor for it.
493 */
494 error = nfssvc_addsock(so, nam, p);
495 /* drop the iocount file_socket() grabbed on the file descriptor */
496 file_drop(user_nfsdarg.sock);
497 } else if (uap->flag & NFSSVC_NFSD) {
498 error = copyin(uap->argp, (caddr_t)nsd, sizeof (*nsd));
499 if (error)
500 return (error);
501
502 if ((uap->flag & NFSSVC_AUTHIN) && ((nfsd = nsd->nsd_nfsd)) &&
503 (nfsd->nfsd_slp->ns_flag & SLP_VALID)) {
504 slp = nfsd->nfsd_slp;
505
506 /*
507 * First check to see if another nfsd has already
508 * added this credential.
509 */
510 for (nuidp = NUIDHASH(slp,nsd->nsd_cr.cr_uid)->lh_first;
511 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
512 if (kauth_cred_getuid(nuidp->nu_cr) == nsd->nsd_cr.cr_uid &&
513 (!nfsd->nfsd_nd->nd_nam2 ||
514 netaddr_match(NU_NETFAM(nuidp),
515 &nuidp->nu_haddr, nfsd->nfsd_nd->nd_nam2)))
516 break;
517 }
518 if (nuidp) {
519 nfsrv_setcred(nuidp->nu_cr,nfsd->nfsd_nd->nd_cr);
520 nfsd->nfsd_nd->nd_flag |= ND_KERBFULL;
521 } else {
522 /*
523 * Nope, so we will.
524 */
525 if (slp->ns_numuids < nuidhash_max) {
526 slp->ns_numuids++;
527 nuidp = (struct nfsuid *)
528 _MALLOC_ZONE(sizeof (struct nfsuid),
529 M_NFSUID, M_WAITOK);
530 } else
531 nuidp = (struct nfsuid *)0;
532 if ((slp->ns_flag & SLP_VALID) == 0) {
533 if (nuidp) {
534 FREE_ZONE((caddr_t)nuidp,
535 sizeof (struct nfsuid), M_NFSUID);
536 slp->ns_numuids--;
537 }
538 } else {
539 if (nuidp == (struct nfsuid *)0) {
540 nuidp = slp->ns_uidlruhead.tqh_first;
541 if (!nuidp)
542 return (ENOMEM);
543 LIST_REMOVE(nuidp, nu_hash);
544 TAILQ_REMOVE(&slp->ns_uidlruhead, nuidp,
545 nu_lru);
546 if (nuidp->nu_flag & NU_NAM)
547 mbuf_freem(nuidp->nu_nam);
548 kauth_cred_rele(nuidp->nu_cr);
549 }
550 nuidp->nu_flag = 0;
551
552 if (nsd->nsd_cr.cr_ngroups > NGROUPS)
553 nsd->nsd_cr.cr_ngroups = NGROUPS;
554
555 nfsrv_setcred(&nsd->nsd_cr, &temp_cred);
556 nuidp->nu_cr = kauth_cred_create(&temp_cred);
557
558 if (!nuidp->nu_cr) {
559 FREE_ZONE(nuidp, sizeof(struct nfsuid), M_NFSUID);
560 slp->ns_numuids--;
561 return (ENOMEM);
562 }
563 nuidp->nu_timestamp = nsd->nsd_timestamp;
564 microtime(&now);
565 nuidp->nu_expire = now.tv_sec + nsd->nsd_ttl;
566 /*
567 * and save the session key in nu_key.
568 */
569 bcopy(nsd->nsd_key, nuidp->nu_key,
570 sizeof (nsd->nsd_key));
571 if (nfsd->nfsd_nd->nd_nam2) {
572 struct sockaddr_in *saddr;
573
574 saddr = mbuf_data(nfsd->nfsd_nd->nd_nam2);
575 switch (saddr->sin_family) {
576 case AF_INET:
577 nuidp->nu_flag |= NU_INETADDR;
578 nuidp->nu_inetaddr =
579 saddr->sin_addr.s_addr;
580 break;
581 case AF_ISO:
582 default:
583 nuidp->nu_flag |= NU_NAM;
584 error = mbuf_copym(nfsd->nfsd_nd->nd_nam2, 0,
585 MBUF_COPYALL, MBUF_WAITOK,
586 &nuidp->nu_nam);
587 if (error) {
588 kauth_cred_rele(nuidp->nu_cr);
589 FREE_ZONE(nuidp, sizeof(struct nfsuid), M_NFSUID);
590 slp->ns_numuids--;
591 return (error);
592 }
593 break;
594 };
595 }
596 TAILQ_INSERT_TAIL(&slp->ns_uidlruhead, nuidp,
597 nu_lru);
598 LIST_INSERT_HEAD(NUIDHASH(slp, nsd->nsd_uid),
599 nuidp, nu_hash);
600 nfsrv_setcred(nuidp->nu_cr,
601 nfsd->nfsd_nd->nd_cr);
602 nfsd->nfsd_nd->nd_flag |= ND_KERBFULL;
603 }
604 }
605 }
606 if ((uap->flag & NFSSVC_AUTHINFAIL) && (nfsd = nsd->nsd_nfsd))
607 nfsd->nfsd_flag |= NFSD_AUTHFAIL;
608 error = nfssvc_nfsd(nsd, uap->argp, p);
609 } else if (uap->flag & NFSSVC_EXPORT) {
610 error = nfssvc_export(uap->argp, p);
611 } else {
612 error = EINVAL;
613 }
614 #endif /* NFS_NOSERVER */
615 if (error == EINTR || error == ERESTART)
616 error = 0;
617 return (error);
618 }
619
620 /*
621 * NFSKERB client helper daemon.
622 * Gets authorization strings for "kerb" mounts.
623 */
624 static int
625 nfskerb_clientd(
626 struct nfsmount *nmp,
627 struct nfsd_cargs *ncd,
628 int flag,
629 user_addr_t argp,
630 proc_t p)
631 {
632 struct nfsuid *nuidp, *nnuidp;
633 int error = 0;
634 struct nfsreq *rp;
635 struct timeval now;
636
637 /*
638 * First initialize some variables
639 */
640 microtime(&now);
641
642 /*
643 * If an authorization string is being passed in, get it.
644 */
645 if ((flag & NFSSVC_GOTAUTH) && (nmp->nm_state & NFSSTA_MOUNTED) &&
646 ((nmp->nm_state & NFSSTA_WAITAUTH) == 0)) {
647 if (nmp->nm_state & NFSSTA_HASAUTH)
648 panic("cld kerb");
649 if ((flag & NFSSVC_AUTHINFAIL) == 0) {
650 if (ncd->ncd_authlen <= nmp->nm_authlen &&
651 ncd->ncd_verflen <= nmp->nm_verflen &&
652 !copyin(CAST_USER_ADDR_T(ncd->ncd_authstr),nmp->nm_authstr,ncd->ncd_authlen)&&
653 !copyin(CAST_USER_ADDR_T(ncd->ncd_verfstr),nmp->nm_verfstr,ncd->ncd_verflen)){
654 nmp->nm_authtype = ncd->ncd_authtype;
655 nmp->nm_authlen = ncd->ncd_authlen;
656 nmp->nm_verflen = ncd->ncd_verflen;
657 #if NFSKERB
658 nmp->nm_key = ncd->ncd_key;
659 #endif
660 } else
661 nmp->nm_state |= NFSSTA_AUTHERR;
662 } else
663 nmp->nm_state |= NFSSTA_AUTHERR;
664 nmp->nm_state |= NFSSTA_HASAUTH;
665 wakeup((caddr_t)&nmp->nm_authlen);
666 } else {
667 nmp->nm_state |= NFSSTA_WAITAUTH;
668 }
669
670 /*
671 * Loop every second updating queue until there is a termination sig.
672 */
673 while (nmp->nm_state & NFSSTA_MOUNTED) {
674 /* Get an authorization string, if required. */
675 if ((nmp->nm_state & (NFSSTA_WAITAUTH | NFSSTA_HASAUTH)) == 0) {
676 ncd->ncd_authuid = nmp->nm_authuid;
677 if (copyout((caddr_t)ncd, argp, sizeof (struct nfsd_cargs)))
678 nmp->nm_state |= NFSSTA_WAITAUTH;
679 else
680 return (ENEEDAUTH);
681 }
682 /* Wait a bit (no pun) and do it again. */
683 if ((nmp->nm_state & NFSSTA_MOUNTED) &&
684 (nmp->nm_state & (NFSSTA_WAITAUTH | NFSSTA_HASAUTH))) {
685 error = tsleep((caddr_t)&nmp->nm_authstr, PSOCK | PCATCH,
686 "nfskrbtimr", hz / 3);
687 if (error == EINTR || error == ERESTART)
688 dounmount(nmp->nm_mountp, 0, 0, p);
689 }
690 }
691
692 /*
693 * Finally, we can free up the mount structure.
694 */
695 for (nuidp = nmp->nm_uidlruhead.tqh_first; nuidp != 0; nuidp = nnuidp) {
696 nnuidp = nuidp->nu_lru.tqe_next;
697 LIST_REMOVE(nuidp, nu_hash);
698 TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp, nu_lru);
699 kauth_cred_rele(nuidp->nu_cr);
700 FREE_ZONE((caddr_t)nuidp, sizeof (struct nfsuid), M_NFSUID);
701 }
702 /*
703 * Loop through outstanding request list and remove dangling
704 * references to defunct nfsmount struct
705 */
706 for (rp = nfs_reqq.tqh_first; rp; rp = rp->r_chain.tqe_next)
707 if (rp->r_nmp == nmp)
708 rp->r_nmp = (struct nfsmount *)0;
709 /* Need to wake up any rcvlock waiters so they notice the unmount. */
710 if (nmp->nm_state & NFSSTA_WANTRCV) {
711 nmp->nm_state &= ~NFSSTA_WANTRCV;
712 wakeup(&nmp->nm_state);
713 }
714 FREE_ZONE((caddr_t)nmp, sizeof (struct nfsmount), M_NFSMNT);
715 if (error == EWOULDBLOCK)
716 error = 0;
717 return (error);
718 }
719
720 #ifndef NFS_NOSERVER
721 /*
722 * Adds a socket to the list for servicing by nfsds.
723 */
724 static int
725 nfssvc_addsock(
726 socket_t so,
727 mbuf_t mynam,
728 __unused proc_t p)
729 {
730 int siz;
731 struct nfssvc_sock *slp;
732 struct nfssvc_sock *tslp = NULL;
733 int error, sodomain, sotype, soprotocol, on = 1;
734 struct timeval timeo;
735
736 /* make sure mbuf constants are set up */
737 if (!nfs_mbuf_mlen)
738 nfs_mbuf_init();
739
740 sock_gettype(so, &sodomain, &sotype, &soprotocol);
741
742 /*
743 * Add it to the list, as required.
744 */
745 if (soprotocol == IPPROTO_UDP) {
746 tslp = nfs_udpsock;
747 if (!tslp || (tslp->ns_flag & SLP_VALID)) {
748 mbuf_freem(mynam);
749 return (EPERM);
750 }
751 #if ISO
752 } else if (soprotocol == ISOPROTO_CLTP) {
753 tslp = nfs_cltpsock;
754 if (!tslp || (tslp->ns_flag & SLP_VALID)) {
755 mbuf_freem(mynam);
756 return (EPERM);
757 }
758 #endif /* ISO */
759 }
760 /* reserve buffer space for 2 maximally-sized packets */
761 siz = NFS_MAXPACKET;
762 if (sotype == SOCK_STREAM)
763 siz += sizeof (u_long);
764 siz *= 2;
765 if (siz > NFS_MAXSOCKBUF)
766 siz = NFS_MAXSOCKBUF;
767 if ((error = sock_setsockopt(so, SOL_SOCKET, SO_SNDBUF, &siz, sizeof(siz))) ||
768 (error = sock_setsockopt(so, SOL_SOCKET, SO_RCVBUF, &siz, sizeof(siz)))) {
769 mbuf_freem(mynam);
770 return (error);
771 }
772
773 /*
774 * Set protocol specific options { for now TCP only } and
775 * reserve some space. For datagram sockets, this can get called
776 * repeatedly for the same socket, but that isn't harmful.
777 */
778 if (sotype == SOCK_STREAM) {
779 sock_setsockopt(so, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on));
780 }
781 if (sodomain == AF_INET && soprotocol == IPPROTO_TCP) {
782 sock_setsockopt(so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
783 }
784
785 sock_nointerrupt(so, 0);
786
787 timeo.tv_usec = 0;
788 timeo.tv_sec = 0;
789 error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
790 error = sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
791
792 if (tslp) {
793 slp = tslp;
794 lck_mtx_lock(nfsd_mutex);
795 } else {
796 MALLOC(slp, struct nfssvc_sock *, sizeof(struct nfssvc_sock),
797 M_NFSSVC, M_WAITOK);
798 if (!slp) {
799 mbuf_freem(mynam);
800 return (ENOMEM);
801 }
802 bzero((caddr_t)slp, sizeof (struct nfssvc_sock));
803 lck_rw_init(&slp->ns_rwlock, nfs_slp_rwlock_group, nfs_slp_lock_attr);
804 lck_mtx_init(&slp->ns_wgmutex, nfs_slp_mutex_group, nfs_slp_lock_attr);
805 TAILQ_INIT(&slp->ns_uidlruhead);
806 lck_mtx_lock(nfsd_mutex);
807 TAILQ_INSERT_TAIL(&nfssvc_sockhead, slp, ns_chain);
808 }
809
810 sock_retain(so); /* grab a retain count on the socket */
811 slp->ns_so = so;
812 slp->ns_sotype = sotype;
813 slp->ns_nam = mynam;
814
815 socket_lock(so, 1);
816 so->so_upcallarg = (caddr_t)slp;
817 so->so_upcall = nfsrv_rcv;
818 so->so_rcv.sb_flags |= SB_UPCALL; /* required for freebsd merge */
819 socket_unlock(so, 1);
820
821 slp->ns_flag = SLP_VALID | SLP_NEEDQ;
822
823 nfsrv_wakenfsd(slp);
824 lck_mtx_unlock(nfsd_mutex);
825
826 return (0);
827 }
828
829 /*
830 * Called by nfssvc() for nfsds. Just loops around servicing rpc requests
831 * until it is killed by a signal.
832 */
833 static int
834 nfssvc_nfsd(nsd, argp, p)
835 struct nfsd_srvargs *nsd;
836 user_addr_t argp;
837 proc_t p;
838 {
839 mbuf_t m, mreq;
840 struct nfssvc_sock *slp;
841 struct nfsd *nfsd = nsd->nsd_nfsd;
842 struct nfsrv_descript *nd = NULL;
843 int error = 0, cacherep, writes_todo;
844 int siz, procrastinate;
845 u_quad_t cur_usec;
846 struct timeval now;
847 boolean_t funnel_state;
848
849 #ifndef nolint
850 cacherep = RC_DOIT;
851 writes_todo = 0;
852 #endif
853 if (nfsd == (struct nfsd *)0) {
854 MALLOC(nfsd, struct nfsd *, sizeof(struct nfsd), M_NFSD, M_WAITOK);
855 if (!nfsd)
856 return (ENOMEM);
857 nsd->nsd_nfsd = nfsd;
858 bzero((caddr_t)nfsd, sizeof (struct nfsd));
859 nfsd->nfsd_procp = p;
860 lck_mtx_lock(nfsd_mutex);
861 TAILQ_INSERT_TAIL(&nfsd_head, nfsd, nfsd_chain);
862 nfs_numnfsd++;
863 lck_mtx_unlock(nfsd_mutex);
864 }
865
866 funnel_state = thread_funnel_set(kernel_flock, FALSE);
867
868 /*
869 * Loop getting rpc requests until SIGKILL.
870 */
871 for (;;) {
872 if ((nfsd->nfsd_flag & NFSD_REQINPROG) == 0) {
873 lck_mtx_lock(nfsd_mutex);
874 while ((nfsd->nfsd_slp == NULL) && !(nfsd_head_flag & NFSD_CHECKSLP)) {
875 nfsd->nfsd_flag |= NFSD_WAITING;
876 nfsd_waiting++;
877 error = msleep(nfsd, nfsd_mutex, PSOCK | PCATCH, "nfsd", 0);
878 nfsd_waiting--;
879 if (error) {
880 lck_mtx_unlock(nfsd_mutex);
881 goto done;
882 }
883 }
884 if ((nfsd->nfsd_slp == NULL) && (nfsd_head_flag & NFSD_CHECKSLP)) {
885 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) {
886 lck_rw_lock_shared(&slp->ns_rwlock);
887 if ((slp->ns_flag & (SLP_VALID | SLP_DOREC))
888 == (SLP_VALID | SLP_DOREC)) {
889 if (lck_rw_lock_shared_to_exclusive(&slp->ns_rwlock)) {
890 /* upgrade failed and we lost the lock; take exclusive and recheck */
891 lck_rw_lock_exclusive(&slp->ns_rwlock);
892 if ((slp->ns_flag & (SLP_VALID | SLP_DOREC))
893 != (SLP_VALID | SLP_DOREC)) {
894 /* flags no longer set, so skip this socket */
895 lck_rw_done(&slp->ns_rwlock);
896 continue;
897 }
898 }
899 slp->ns_flag &= ~SLP_DOREC;
900 slp->ns_sref++;
901 nfsd->nfsd_slp = slp;
902 lck_rw_done(&slp->ns_rwlock);
903 break;
904 }
905 lck_rw_done(&slp->ns_rwlock);
906 }
907 if (slp == 0)
908 nfsd_head_flag &= ~NFSD_CHECKSLP;
909 }
910 lck_mtx_unlock(nfsd_mutex);
911 if ((slp = nfsd->nfsd_slp) == NULL)
912 continue;
913 lck_rw_lock_exclusive(&slp->ns_rwlock);
914 if (slp->ns_flag & SLP_VALID) {
915 if ((slp->ns_flag & (SLP_NEEDQ|SLP_DISCONN)) == SLP_NEEDQ) {
916 slp->ns_flag &= ~SLP_NEEDQ;
917 nfsrv_rcv_locked(slp->ns_so, slp, MBUF_WAITOK);
918 }
919 if (slp->ns_flag & SLP_DISCONN)
920 nfsrv_zapsock(slp);
921 error = nfsrv_dorec(slp, nfsd, &nd);
922 microuptime(&now);
923 cur_usec = (u_quad_t)now.tv_sec * 1000000 +
924 (u_quad_t)now.tv_usec;
925 if (error && slp->ns_wgtime && (slp->ns_wgtime <= cur_usec)) {
926 error = 0;
927 cacherep = RC_DOIT;
928 writes_todo = 1;
929 } else
930 writes_todo = 0;
931 nfsd->nfsd_flag |= NFSD_REQINPROG;
932 }
933 lck_rw_done(&slp->ns_rwlock);
934 } else {
935 error = 0;
936 slp = nfsd->nfsd_slp;
937 }
938 if (error || (slp->ns_flag & SLP_VALID) == 0) {
939 if (nd) {
940 if (nd->nd_mrep)
941 mbuf_freem(nd->nd_mrep);
942 if (nd->nd_nam2)
943 mbuf_freem(nd->nd_nam2);
944 if (nd->nd_cr)
945 kauth_cred_rele(nd->nd_cr);
946 FREE_ZONE((caddr_t)nd,
947 sizeof *nd, M_NFSRVDESC);
948 nd = NULL;
949 }
950 nfsd->nfsd_slp = NULL;
951 nfsd->nfsd_flag &= ~NFSD_REQINPROG;
952 nfsrv_slpderef(slp);
953 continue;
954 }
955 if (nd) {
956 microuptime(&nd->nd_starttime);
957 if (nd->nd_nam2)
958 nd->nd_nam = nd->nd_nam2;
959 else
960 nd->nd_nam = slp->ns_nam;
961
962 /*
963 * Check to see if authorization is needed.
964 */
965 if (nfsd->nfsd_flag & NFSD_NEEDAUTH) {
966 nfsd->nfsd_flag &= ~NFSD_NEEDAUTH;
967 nsd->nsd_haddr = ((struct sockaddr_in *)mbuf_data(nd->nd_nam))->sin_addr.s_addr;
968 nsd->nsd_authlen = nfsd->nfsd_authlen;
969 nsd->nsd_verflen = nfsd->nfsd_verflen;
970 if (!copyout(nfsd->nfsd_authstr,CAST_USER_ADDR_T(nsd->nsd_authstr),
971 nfsd->nfsd_authlen) &&
972 !copyout(nfsd->nfsd_verfstr, CAST_USER_ADDR_T(nsd->nsd_verfstr),
973 nfsd->nfsd_verflen) &&
974 !copyout((caddr_t)nsd, argp, sizeof (*nsd))) {
975 thread_funnel_set(kernel_flock, funnel_state);
976 return (ENEEDAUTH);
977 }
978 cacherep = RC_DROPIT;
979 } else
980 cacherep = nfsrv_getcache(nd, slp, &mreq);
981
982 if (nfsd->nfsd_flag & NFSD_AUTHFAIL) {
983 nfsd->nfsd_flag &= ~NFSD_AUTHFAIL;
984 nd->nd_procnum = NFSPROC_NOOP;
985 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_TOOWEAK);
986 cacherep = RC_DOIT;
987 } else if (nfs_privport) {
988 /* Check if source port is privileged */
989 u_short port;
990 struct sockaddr *nam = mbuf_data(nd->nd_nam);
991 struct sockaddr_in *sin;
992
993 sin = (struct sockaddr_in *)nam;
994 port = ntohs(sin->sin_port);
995 if (port >= IPPORT_RESERVED &&
996 nd->nd_procnum != NFSPROC_NULL) {
997 char strbuf[MAX_IPv4_STR_LEN];
998 nd->nd_procnum = NFSPROC_NOOP;
999 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_TOOWEAK);
1000 cacherep = RC_DOIT;
1001 printf("NFS request from unprivileged port (%s:%d)\n",
1002 inet_ntop(AF_INET, &sin->sin_addr, strbuf, sizeof(strbuf)),
1003 port);
1004 }
1005 }
1006
1007 }
1008
1009 /*
1010 * Loop to get all the write rpc relies that have been
1011 * gathered together.
1012 */
1013 do {
1014 switch (cacherep) {
1015 case RC_DOIT:
1016 if (nd && (nd->nd_flag & ND_NFSV3))
1017 procrastinate = nfsrvw_procrastinate_v3;
1018 else
1019 procrastinate = nfsrvw_procrastinate;
1020 lck_rw_lock_shared(&nfs_export_rwlock);
1021 if (writes_todo || ((nd->nd_procnum == NFSPROC_WRITE) && (procrastinate > 0)))
1022 error = nfsrv_writegather(&nd, slp, nfsd->nfsd_procp, &mreq);
1023 else {
1024 error = (*(nfsrv3_procs[nd->nd_procnum]))(nd, slp, nfsd->nfsd_procp, &mreq);
1025 if (mreq == NULL)
1026 nd->nd_mrep = NULL;
1027 }
1028 lck_rw_done(&nfs_export_rwlock);
1029 if (mreq == NULL)
1030 break;
1031 if (error) {
1032 OSAddAtomic(1, (SInt32*)&nfsstats.srv_errs);
1033 nfsrv_updatecache(nd, FALSE, mreq);
1034 if (nd->nd_nam2) {
1035 mbuf_freem(nd->nd_nam2);
1036 nd->nd_nam2 = NULL;
1037 }
1038 nd->nd_mrep = NULL;
1039 break;
1040 }
1041 OSAddAtomic(1, (SInt32*)&nfsstats.srvrpccnt[nd->nd_procnum]);
1042 nfsrv_updatecache(nd, TRUE, mreq);
1043 nd->nd_mrep = NULL;
1044 case RC_REPLY:
1045 m = mreq;
1046 siz = 0;
1047 while (m) {
1048 siz += mbuf_len(m);
1049 m = mbuf_next(m);
1050 }
1051 if (siz <= 0 || siz > NFS_MAXPACKET) {
1052 printf("mbuf siz=%d\n",siz);
1053 panic("Bad nfs svc reply");
1054 }
1055 m = mreq;
1056 mbuf_pkthdr_setlen(m, siz);
1057 error = mbuf_pkthdr_setrcvif(m, NULL);
1058 if (error)
1059 panic("nfsd setrcvif failed: %d", error);
1060 /*
1061 * For stream protocols, prepend a Sun RPC
1062 * Record Mark.
1063 */
1064 if (slp->ns_sotype == SOCK_STREAM) {
1065 error = mbuf_prepend(&m, NFSX_UNSIGNED, MBUF_WAITOK);
1066 if (!error)
1067 *(u_long*)mbuf_data(m) = htonl(0x80000000 | siz);
1068 }
1069 if (!error) {
1070 if (slp->ns_flag & SLP_VALID) {
1071 error = nfs_send(slp->ns_so, nd->nd_nam2, m, NULL);
1072 } else {
1073 error = EPIPE;
1074 mbuf_freem(m);
1075 }
1076 } else {
1077 mbuf_freem(m);
1078 }
1079 mreq = NULL;
1080 if (nfsrtton)
1081 nfsd_rt(slp->ns_sotype, nd, cacherep);
1082 if (nd->nd_nam2) {
1083 mbuf_freem(nd->nd_nam2);
1084 nd->nd_nam2 = NULL;
1085 }
1086 if (nd->nd_mrep) {
1087 mbuf_freem(nd->nd_mrep);
1088 nd->nd_mrep = NULL;
1089 }
1090 if (error == EPIPE) {
1091 lck_rw_lock_exclusive(&slp->ns_rwlock);
1092 nfsrv_zapsock(slp);
1093 lck_rw_done(&slp->ns_rwlock);
1094 }
1095 if (error == EINTR || error == ERESTART) {
1096 if (nd->nd_cr)
1097 kauth_cred_rele(nd->nd_cr);
1098 FREE_ZONE((caddr_t)nd, sizeof *nd, M_NFSRVDESC);
1099 nfsrv_slpderef(slp);
1100 goto done;
1101 }
1102 break;
1103 case RC_DROPIT:
1104 if (nfsrtton)
1105 nfsd_rt(slp->ns_sotype, nd, cacherep);
1106 mbuf_freem(nd->nd_mrep);
1107 mbuf_freem(nd->nd_nam2);
1108 nd->nd_mrep = nd->nd_nam2 = NULL;
1109 break;
1110 };
1111 if (nd) {
1112 if (nd->nd_mrep)
1113 mbuf_freem(nd->nd_mrep);
1114 if (nd->nd_nam2)
1115 mbuf_freem(nd->nd_nam2);
1116 if (nd->nd_cr)
1117 kauth_cred_rele(nd->nd_cr);
1118 FREE_ZONE((caddr_t)nd, sizeof *nd, M_NFSRVDESC);
1119 nd = NULL;
1120 }
1121
1122 /*
1123 * Check to see if there are outstanding writes that
1124 * need to be serviced.
1125 */
1126 microuptime(&now);
1127 cur_usec = (u_quad_t)now.tv_sec * 1000000 +
1128 (u_quad_t)now.tv_usec;
1129 if (slp->ns_wgtime && (slp->ns_wgtime <= cur_usec)) {
1130 cacherep = RC_DOIT;
1131 writes_todo = 1;
1132 } else {
1133 writes_todo = 0;
1134 }
1135 } while (writes_todo);
1136 lck_rw_lock_exclusive(&slp->ns_rwlock);
1137 if (nfsrv_dorec(slp, nfsd, &nd)) {
1138 lck_rw_done(&slp->ns_rwlock);
1139 nfsd->nfsd_flag &= ~NFSD_REQINPROG;
1140 nfsd->nfsd_slp = NULL;
1141 nfsrv_slpderef(slp);
1142 } else {
1143 lck_rw_done(&slp->ns_rwlock);
1144 }
1145 }
1146 done:
1147 thread_funnel_set(kernel_flock, funnel_state);
1148 lck_mtx_lock(nfsd_mutex);
1149 TAILQ_REMOVE(&nfsd_head, nfsd, nfsd_chain);
1150 FREE(nfsd, M_NFSD);
1151 nsd->nsd_nfsd = (struct nfsd *)0;
1152 if (--nfs_numnfsd == 0)
1153 nfsrv_init(TRUE); /* Reinitialize everything */
1154 lck_mtx_unlock(nfsd_mutex);
1155 return (error);
1156 }
1157
1158 static int
1159 nfssvc_export(user_addr_t argp, proc_t p)
1160 {
1161 int error = 0, is_64bit;
1162 struct user_nfs_export_args unxa;
1163 struct vfs_context context;
1164
1165 context.vc_proc = p;
1166 context.vc_ucred = kauth_cred_get();
1167 is_64bit = IS_64BIT_PROCESS(p);
1168
1169 /* copy in pointers to path and export args */
1170 if (is_64bit) {
1171 error = copyin(argp, (caddr_t)&unxa, sizeof(unxa));
1172 } else {
1173 struct nfs_export_args tnxa;
1174 error = copyin(argp, (caddr_t)&tnxa, sizeof(tnxa));
1175 if (error == 0) {
1176 /* munge into LP64 version of nfs_export_args structure */
1177 unxa.nxa_fsid = tnxa.nxa_fsid;
1178 unxa.nxa_expid = tnxa.nxa_expid;
1179 unxa.nxa_fspath = CAST_USER_ADDR_T(tnxa.nxa_fspath);
1180 unxa.nxa_exppath = CAST_USER_ADDR_T(tnxa.nxa_exppath);
1181 unxa.nxa_flags = tnxa.nxa_flags;
1182 unxa.nxa_netcount = tnxa.nxa_netcount;
1183 unxa.nxa_nets = CAST_USER_ADDR_T(tnxa.nxa_nets);
1184 }
1185 }
1186 if (error)
1187 return (error);
1188
1189 error = nfsrv_export(&unxa, &context);
1190
1191 return (error);
1192 }
1193
1194 #endif /* NFS_NOSERVER */
1195
1196 int nfs_defect = 0;
1197 /* XXX CSM 11/25/97 Upgrade sysctl.h someday */
1198 #ifdef notyet
1199 SYSCTL_INT(_vfs_nfs, OID_AUTO, defect, CTLFLAG_RW, &nfs_defect, 0, "");
1200 #endif
1201
1202 int
1203 nfsclnt(proc_t p, struct nfsclnt_args *uap, __unused int *retval)
1204 {
1205 struct lockd_ans la;
1206 int error;
1207
1208 if (uap->flag == NFSCLNT_LOCKDWAIT) {
1209 return (nfslockdwait(p));
1210 }
1211 if (uap->flag == NFSCLNT_LOCKDANS) {
1212 error = copyin(uap->argp, &la, sizeof(la));
1213 return (error != 0 ? error : nfslockdans(p, &la));
1214 }
1215 if (uap->flag == NFSCLNT_LOCKDFD)
1216 return (nfslockdfd(p, CAST_DOWN(int, uap->argp)));
1217 return EINVAL;
1218 }
1219
1220
1221 static int nfssvc_iod_continue(int);
1222
1223 /*
1224 * Asynchronous I/O daemons for client nfs.
1225 * They do read-ahead and write-behind operations on the block I/O cache.
1226 * Never returns unless it fails or gets killed.
1227 */
1228 static int
1229 nfssvc_iod(__unused proc_t p)
1230 {
1231 register int i, myiod;
1232 struct uthread *ut;
1233
1234 /*
1235 * Assign my position or return error if too many already running
1236 */
1237 myiod = -1;
1238 for (i = 0; i < NFS_MAXASYNCDAEMON; i++)
1239 if (nfs_asyncdaemon[i] == 0) {
1240 nfs_asyncdaemon[i]++;
1241 myiod = i;
1242 break;
1243 }
1244 if (myiod == -1)
1245 return (EBUSY);
1246 nfs_numasync++;
1247
1248 /* stuff myiod into uthread to get off local stack for continuation */
1249
1250 ut = (struct uthread *)get_bsdthread_info(current_thread());
1251 ut->uu_state.uu_nfs_myiod = myiod; /* squirrel away for continuation */
1252
1253 nfssvc_iod_continue(0);
1254 /* NOTREACHED */
1255 return (0);
1256 }
1257
1258 /*
1259 * Continuation for Asynchronous I/O daemons for client nfs.
1260 */
1261 static int
1262 nfssvc_iod_continue(int error)
1263 {
1264 register struct nfsbuf *bp;
1265 register int i, myiod;
1266 struct nfsmount *nmp;
1267 struct uthread *ut;
1268 proc_t p;
1269 int exiterror = 0;
1270
1271 /*
1272 * real myiod is stored in uthread, recover it
1273 */
1274 ut = (struct uthread *)get_bsdthread_info(current_thread());
1275 myiod = ut->uu_state.uu_nfs_myiod;
1276 p = current_proc(); // XXX
1277
1278 /*
1279 * Just loop around doin our stuff until SIGKILL
1280 * - actually we don't loop with continuations...
1281 */
1282 lck_mtx_lock(nfs_iod_mutex);
1283 for (;;) {
1284 while (((nmp = nfs_iodmount[myiod]) == NULL
1285 || nmp->nm_bufq.tqh_first == NULL)
1286 && error == 0 && nfs_ioddelwri == 0) {
1287 if (nmp)
1288 nmp->nm_bufqiods--;
1289 nfs_iodwant[myiod] = p; // XXX this doesn't need to be a proc_t
1290 nfs_iodmount[myiod] = NULL;
1291 error = msleep0((caddr_t)&nfs_iodwant[myiod], nfs_iod_mutex,
1292 PWAIT | PCATCH | PDROP, "nfsidl", 0, nfssvc_iod_continue);
1293 lck_mtx_lock(nfs_iod_mutex);
1294 }
1295 if (error && !exiterror && nmp && (nmp->nm_bufqiods == 1) &&
1296 !TAILQ_EMPTY(&nmp->nm_bufq)) {
1297 /*
1298 * Finish processing the queued buffers before exitting.
1299 * Decrement the iod count now to make sure nfs_asyncio()
1300 * doesn't keep queueing up more work.
1301 */
1302 nmp->nm_bufqiods--;
1303 exiterror = error;
1304 error = 0;
1305 }
1306 if (error) {
1307 nfs_asyncdaemon[myiod] = 0;
1308 if (nmp && !exiterror)
1309 nmp->nm_bufqiods--;
1310 nfs_iodwant[myiod] = NULL;
1311 nfs_iodmount[myiod] = NULL;
1312 lck_mtx_unlock(nfs_iod_mutex);
1313 nfs_numasync--;
1314 if (error == EINTR || error == ERESTART)
1315 error = 0;
1316 unix_syscall_return(error);
1317 }
1318 if (nmp != NULL) {
1319 while ((bp = TAILQ_FIRST(&nmp->nm_bufq)) != NULL) {
1320 /* Take one off the front of the list */
1321 TAILQ_REMOVE(&nmp->nm_bufq, bp, nb_free);
1322 bp->nb_free.tqe_next = NFSNOLIST;
1323 nmp->nm_bufqlen--;
1324 if (nmp->nm_bufqwant && nmp->nm_bufqlen < 2 * nfs_numasync) {
1325 nmp->nm_bufqwant = FALSE;
1326 lck_mtx_unlock(nfs_iod_mutex);
1327 wakeup(&nmp->nm_bufq);
1328 } else {
1329 lck_mtx_unlock(nfs_iod_mutex);
1330 }
1331
1332 SET(bp->nb_flags, NB_IOD);
1333 if (ISSET(bp->nb_flags, NB_READ))
1334 nfs_doio(bp, bp->nb_rcred, NULL);
1335 else
1336 nfs_doio(bp, bp->nb_wcred, NULL);
1337
1338 lck_mtx_lock(nfs_iod_mutex);
1339 /*
1340 * If there are more than one iod on this mount, then defect
1341 * so that the iods can be shared out fairly between the mounts
1342 */
1343 if (!exiterror && nfs_defect && nmp->nm_bufqiods > 1) {
1344 nfs_iodmount[myiod] = NULL;
1345 nmp->nm_bufqiods--;
1346 break;
1347 }
1348 }
1349 }
1350 lck_mtx_unlock(nfs_iod_mutex);
1351
1352 if (nfs_ioddelwri) {
1353 i = 0;
1354 nfs_ioddelwri = 0;
1355 lck_mtx_lock(nfs_buf_mutex);
1356 while (i < 8 && (bp = TAILQ_FIRST(&nfsbufdelwri)) != NULL) {
1357 struct nfsnode *np = VTONFS(bp->nb_vp);
1358 nfs_buf_remfree(bp);
1359 nfs_buf_refget(bp);
1360 while ((error = nfs_buf_acquire(bp, 0, 0, 0)) == EAGAIN);
1361 nfs_buf_refrele(bp);
1362 if (error)
1363 break;
1364 if (!bp->nb_vp) {
1365 /* buffer is no longer valid */
1366 nfs_buf_drop(bp);
1367 continue;
1368 }
1369 if (ISSET(bp->nb_flags, NB_NEEDCOMMIT))
1370 nfs_buf_check_write_verifier(np, bp);
1371 if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) {
1372 /* put buffer at end of delwri list */
1373 TAILQ_INSERT_TAIL(&nfsbufdelwri, bp, nb_free);
1374 nfsbufdelwricnt++;
1375 nfs_buf_drop(bp);
1376 lck_mtx_unlock(nfs_buf_mutex);
1377 nfs_flushcommits(np->n_vnode, NULL, 1);
1378 } else {
1379 SET(bp->nb_flags, (NB_ASYNC | NB_IOD));
1380 lck_mtx_unlock(nfs_buf_mutex);
1381 nfs_buf_write(bp);
1382 }
1383 i++;
1384 lck_mtx_lock(nfs_buf_mutex);
1385 }
1386 lck_mtx_unlock(nfs_buf_mutex);
1387 }
1388
1389 lck_mtx_lock(nfs_iod_mutex);
1390 if (exiterror)
1391 error = exiterror;
1392 }
1393 }
1394
1395 /*
1396 * Shut down a socket associated with an nfssvc_sock structure.
1397 * Should be called with the send lock set, if required.
1398 * The trick here is to increment the sref at the start, so that the nfsds
1399 * will stop using it and clear ns_flag at the end so that it will not be
1400 * reassigned during cleanup.
1401 */
1402 static void
1403 nfsrv_zapsock(struct nfssvc_sock *slp)
1404 {
1405 socket_t so;
1406
1407 if ((slp->ns_flag & SLP_VALID) == 0)
1408 return;
1409 slp->ns_flag &= ~SLP_ALLFLAGS;
1410
1411 so = slp->ns_so;
1412 if (so == NULL)
1413 return;
1414
1415 /*
1416 * Attempt to deter future upcalls, but leave the
1417 * upcall info in place to avoid a race with the
1418 * networking code.
1419 */
1420 socket_lock(so, 1);
1421 so->so_rcv.sb_flags &= ~SB_UPCALL;
1422 socket_unlock(so, 1);
1423
1424 sock_shutdown(so, SHUT_RDWR);
1425 }
1426
1427 /*
1428 * Get an authorization string for the uid by having the mount_nfs sitting
1429 * on this mount point porpous out of the kernel and do it.
1430 */
1431 int
1432 nfs_getauth(nmp, rep, cred, auth_str, auth_len, verf_str, verf_len, key)
1433 register struct nfsmount *nmp;
1434 struct nfsreq *rep;
1435 kauth_cred_t cred;
1436 char **auth_str;
1437 int *auth_len;
1438 char *verf_str;
1439 int *verf_len;
1440 NFSKERBKEY_T key; /* return session key */
1441 {
1442 int error = 0;
1443
1444 while ((nmp->nm_state & NFSSTA_WAITAUTH) == 0) {
1445 nmp->nm_state |= NFSSTA_WANTAUTH;
1446 (void) tsleep((caddr_t)&nmp->nm_authtype, PSOCK,
1447 "nfsauth1", 2 * hz);
1448 error = nfs_sigintr(nmp, rep, rep->r_procp);
1449 if (error) {
1450 nmp->nm_state &= ~NFSSTA_WANTAUTH;
1451 return (error);
1452 }
1453 }
1454 nmp->nm_state &= ~NFSSTA_WANTAUTH;
1455 MALLOC(*auth_str, char *, RPCAUTH_MAXSIZ, M_TEMP, M_WAITOK);
1456 if (!*auth_str)
1457 return (ENOMEM);
1458 nmp->nm_authstr = *auth_str;
1459 nmp->nm_authlen = RPCAUTH_MAXSIZ;
1460 nmp->nm_verfstr = verf_str;
1461 nmp->nm_verflen = *verf_len;
1462 nmp->nm_authuid = kauth_cred_getuid(cred);
1463 nmp->nm_state &= ~NFSSTA_WAITAUTH;
1464 wakeup((caddr_t)&nmp->nm_authstr);
1465
1466 /*
1467 * And wait for mount_nfs to do its stuff.
1468 */
1469 while ((nmp->nm_state & NFSSTA_HASAUTH) == 0 && error == 0) {
1470 (void) tsleep((caddr_t)&nmp->nm_authlen, PSOCK,
1471 "nfsauth2", 2 * hz);
1472 error = nfs_sigintr(nmp, rep, rep->r_procp);
1473 }
1474 if (nmp->nm_state & NFSSTA_AUTHERR) {
1475 nmp->nm_state &= ~NFSSTA_AUTHERR;
1476 error = EAUTH;
1477 }
1478 if (error)
1479 FREE(*auth_str, M_TEMP);
1480 else {
1481 *auth_len = nmp->nm_authlen;
1482 *verf_len = nmp->nm_verflen;
1483 bcopy((caddr_t)nmp->nm_key, (caddr_t)key, sizeof (key));
1484 }
1485 nmp->nm_state &= ~NFSSTA_HASAUTH;
1486 nmp->nm_state |= NFSSTA_WAITAUTH;
1487 if (nmp->nm_state & NFSSTA_WANTAUTH) {
1488 nmp->nm_state &= ~NFSSTA_WANTAUTH;
1489 wakeup((caddr_t)&nmp->nm_authtype);
1490 }
1491 return (error);
1492 }
1493
1494 /*
1495 * Get a nickname authenticator and verifier.
1496 */
1497 int
1498 nfs_getnickauth(
1499 struct nfsmount *nmp,
1500 kauth_cred_t cred,
1501 char **auth_str,
1502 int *auth_len,
1503 char *verf_str,
1504 __unused int verf_len)
1505 {
1506 register struct nfsuid *nuidp;
1507 register u_long *nickp, *verfp;
1508 struct timeval ktvin, ktvout, now;
1509
1510 #if DIAGNOSTIC
1511 if (verf_len < (4 * NFSX_UNSIGNED))
1512 panic("nfs_getnickauth verf too small");
1513 #endif
1514 for (nuidp = NMUIDHASH(nmp, kauth_cred_getuid(cred))->lh_first;
1515 nuidp != 0; nuidp = nuidp->nu_hash.le_next) {
1516 if (kauth_cred_getuid(nuidp->nu_cr) == kauth_cred_getuid(cred))
1517 break;
1518 }
1519 microtime(&now);
1520 if (!nuidp || nuidp->nu_expire < now.tv_sec)
1521 return (EACCES);
1522
1523 MALLOC(nickp, u_long *, 2 * NFSX_UNSIGNED, M_TEMP, M_WAITOK);
1524 if (!nickp)
1525 return (ENOMEM);
1526
1527 /*
1528 * Move to the end of the lru list (end of lru == most recently used).
1529 */
1530 TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp, nu_lru);
1531 TAILQ_INSERT_TAIL(&nmp->nm_uidlruhead, nuidp, nu_lru);
1532
1533 *nickp++ = txdr_unsigned(RPCAKN_NICKNAME);
1534 *nickp = txdr_unsigned(nuidp->nu_nickname);
1535 *auth_str = (char *)nickp;
1536 *auth_len = 2 * NFSX_UNSIGNED;
1537
1538 /*
1539 * Now we must encrypt the verifier and package it up.
1540 */
1541 verfp = (u_long *)verf_str;
1542 *verfp++ = txdr_unsigned(RPCAKN_NICKNAME);
1543 microtime(&now);
1544 if (now.tv_sec > nuidp->nu_timestamp.tv_sec ||
1545 (now.tv_sec == nuidp->nu_timestamp.tv_sec &&
1546 now.tv_usec > nuidp->nu_timestamp.tv_usec))
1547 nuidp->nu_timestamp = now;
1548 else
1549 nuidp->nu_timestamp.tv_usec++;
1550 ktvin.tv_sec = txdr_unsigned(nuidp->nu_timestamp.tv_sec);
1551 ktvin.tv_usec = txdr_unsigned(nuidp->nu_timestamp.tv_usec);
1552
1553 /*
1554 * Now encrypt the timestamp verifier in ecb mode using the session
1555 * key.
1556 */
1557 #if NFSKERB
1558 XXX
1559 #endif
1560
1561 *verfp++ = ktvout.tv_sec;
1562 *verfp++ = ktvout.tv_usec;
1563 *verfp = 0;
1564 return (0);
1565 }
1566
1567 /*
1568 * Save the current nickname in a hash list entry on the mount point.
1569 */
1570 int
1571 nfs_savenickauth(nmp, cred, len, key, mdp, dposp, mrep)
1572 register struct nfsmount *nmp;
1573 kauth_cred_t cred;
1574 int len;
1575 NFSKERBKEY_T key;
1576 mbuf_t *mdp;
1577 char **dposp;
1578 mbuf_t mrep;
1579 {
1580 register struct nfsuid *nuidp;
1581 register u_long *tl;
1582 register long t1;
1583 mbuf_t md = *mdp;
1584 struct timeval ktvin, ktvout, now;
1585 u_long nick;
1586 char *dpos = *dposp, *cp2;
1587 int deltasec, error = 0;
1588
1589 if (len == (3 * NFSX_UNSIGNED)) {
1590 nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED);
1591 ktvin.tv_sec = *tl++;
1592 ktvin.tv_usec = *tl++;
1593 nick = fxdr_unsigned(u_long, *tl);
1594
1595 /*
1596 * Decrypt the timestamp in ecb mode.
1597 */
1598 #if NFSKERB
1599 XXX
1600 #endif
1601 ktvout.tv_sec = fxdr_unsigned(long, ktvout.tv_sec);
1602 ktvout.tv_usec = fxdr_unsigned(long, ktvout.tv_usec);
1603 microtime(&now);
1604 deltasec = now.tv_sec - ktvout.tv_sec;
1605 if (deltasec < 0)
1606 deltasec = -deltasec;
1607 /*
1608 * If ok, add it to the hash list for the mount point.
1609 */
1610 if (deltasec <= NFS_KERBCLOCKSKEW) {
1611 if (nmp->nm_numuids < nuidhash_max) {
1612 nmp->nm_numuids++;
1613 MALLOC_ZONE(nuidp, struct nfsuid *,
1614 sizeof (struct nfsuid),
1615 M_NFSUID, M_WAITOK);
1616 } else {
1617 nuidp = NULL;
1618 }
1619 if (!nuidp) {
1620 nuidp = nmp->nm_uidlruhead.tqh_first;
1621 if (!nuidp) {
1622 error = ENOMEM;
1623 goto nfsmout;
1624 }
1625 LIST_REMOVE(nuidp, nu_hash);
1626 TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp, nu_lru);
1627 kauth_cred_rele(nuidp->nu_cr);
1628 }
1629 nuidp->nu_flag = 0;
1630 kauth_cred_ref(cred);
1631 nuidp->nu_cr = cred;
1632 nuidp->nu_expire = now.tv_sec + NFS_KERBTTL;
1633 nuidp->nu_timestamp = ktvout;
1634 nuidp->nu_nickname = nick;
1635 bcopy(key, nuidp->nu_key, sizeof (key));
1636 TAILQ_INSERT_TAIL(&nmp->nm_uidlruhead, nuidp, nu_lru);
1637 LIST_INSERT_HEAD(NMUIDHASH(nmp, kauth_cred_getuid(cred)),
1638 nuidp, nu_hash);
1639 }
1640 } else
1641 nfsm_adv(nfsm_rndup(len));
1642 nfsmout:
1643 *mdp = md;
1644 *dposp = dpos;
1645 return (error);
1646 }
1647
1648 #ifndef NFS_NOSERVER
1649
1650 /*
1651 * cleanup and release a server socket structure.
1652 */
1653 void
1654 nfsrv_slpfree(struct nfssvc_sock *slp)
1655 {
1656 struct nfsuid *nuidp, *nnuidp;
1657 struct nfsrv_descript *nwp, *nnwp;
1658
1659 if (slp->ns_so) {
1660 sock_release(slp->ns_so);
1661 slp->ns_so = NULL;
1662 }
1663 if (slp->ns_nam)
1664 mbuf_free(slp->ns_nam);
1665 if (slp->ns_raw)
1666 mbuf_freem(slp->ns_raw);
1667 if (slp->ns_rec)
1668 mbuf_freem(slp->ns_rec);
1669 slp->ns_nam = slp->ns_raw = slp->ns_rec = NULL;
1670
1671 for (nuidp = slp->ns_uidlruhead.tqh_first; nuidp != 0;
1672 nuidp = nnuidp) {
1673 nnuidp = nuidp->nu_lru.tqe_next;
1674 LIST_REMOVE(nuidp, nu_hash);
1675 TAILQ_REMOVE(&slp->ns_uidlruhead, nuidp, nu_lru);
1676 if (nuidp->nu_flag & NU_NAM)
1677 mbuf_freem(nuidp->nu_nam);
1678 kauth_cred_rele(nuidp->nu_cr);
1679 FREE_ZONE((caddr_t)nuidp,
1680 sizeof (struct nfsuid), M_NFSUID);
1681 }
1682
1683 for (nwp = slp->ns_tq.lh_first; nwp; nwp = nnwp) {
1684 nnwp = nwp->nd_tq.le_next;
1685 LIST_REMOVE(nwp, nd_tq);
1686 if (nwp->nd_cr)
1687 kauth_cred_rele(nwp->nd_cr);
1688 FREE_ZONE((caddr_t)nwp, sizeof *nwp, M_NFSRVDESC);
1689 }
1690 LIST_INIT(&slp->ns_tq);
1691
1692 lck_rw_destroy(&slp->ns_rwlock, nfs_slp_rwlock_group);
1693 lck_mtx_destroy(&slp->ns_wgmutex, nfs_slp_mutex_group);
1694 FREE(slp, M_NFSSVC);
1695 }
1696
1697 /*
1698 * Derefence a server socket structure. If it has no more references and
1699 * is no longer valid, you can throw it away.
1700 */
1701 void
1702 nfsrv_slpderef(struct nfssvc_sock *slp)
1703 {
1704 struct timeval now;
1705
1706 lck_mtx_lock(nfsd_mutex);
1707 lck_rw_lock_exclusive(&slp->ns_rwlock);
1708 slp->ns_sref--;
1709 if (slp->ns_sref || (slp->ns_flag & SLP_VALID)) {
1710 lck_rw_done(&slp->ns_rwlock);
1711 lck_mtx_unlock(nfsd_mutex);
1712 return;
1713 }
1714
1715 /* queue the socket up for deletion */
1716 microuptime(&now);
1717 slp->ns_timestamp = now.tv_sec;
1718 TAILQ_REMOVE(&nfssvc_sockhead, slp, ns_chain);
1719 TAILQ_INSERT_TAIL(&nfssvc_deadsockhead, slp, ns_chain);
1720 lck_rw_done(&slp->ns_rwlock);
1721 if (slp == nfs_udpsock)
1722 nfs_udpsock = NULL;
1723 #if ISO
1724 else if (slp == nfs_cltpsock)
1725 nfs_cltpsock = NULL;
1726 #endif
1727 lck_mtx_unlock(nfsd_mutex);
1728 }
1729
1730
1731 /*
1732 * Initialize the data structures for the server.
1733 * Handshake with any new nfsds starting up to avoid any chance of
1734 * corruption.
1735 */
1736 void
1737 nfsrv_init(terminating)
1738 int terminating;
1739 {
1740 struct nfssvc_sock *slp, *nslp;
1741 struct timeval now;
1742
1743 if (terminating) {
1744 microuptime(&now);
1745 for (slp = TAILQ_FIRST(&nfssvc_sockhead); slp != 0; slp = nslp) {
1746 nslp = TAILQ_NEXT(slp, ns_chain);
1747 if (slp->ns_flag & SLP_VALID) {
1748 lck_rw_lock_exclusive(&slp->ns_rwlock);
1749 nfsrv_zapsock(slp);
1750 lck_rw_done(&slp->ns_rwlock);
1751 }
1752 /* queue the socket up for deletion */
1753 slp->ns_timestamp = now.tv_sec;
1754 TAILQ_REMOVE(&nfssvc_sockhead, slp, ns_chain);
1755 TAILQ_INSERT_TAIL(&nfssvc_deadsockhead, slp, ns_chain);
1756 if (slp == nfs_udpsock)
1757 nfs_udpsock = NULL;
1758 #if ISO
1759 else if (slp == nfs_cltpsock)
1760 nfs_cltpsock = NULL;
1761 #endif
1762 }
1763 nfsrv_cleancache(); /* And clear out server cache */
1764 /* XXX Revisit when enabling WebNFS */
1765 #ifdef WEBNFS_ENABLED
1766 } else
1767 nfs_pub.np_valid = 0;
1768 #else
1769 }
1770 #endif
1771
1772 if (!terminating) {
1773 TAILQ_INIT(&nfssvc_sockhead);
1774 TAILQ_INIT(&nfssvc_deadsockhead);
1775 TAILQ_INIT(&nfsd_head);
1776 nfsd_head_flag &= ~NFSD_CHECKSLP;
1777 }
1778
1779 MALLOC(nfs_udpsock, struct nfssvc_sock *, sizeof(struct nfssvc_sock),
1780 M_NFSSVC, M_WAITOK);
1781 if (nfs_udpsock) {
1782 bzero((caddr_t)nfs_udpsock, sizeof (struct nfssvc_sock));
1783 lck_rw_init(&nfs_udpsock->ns_rwlock, nfs_slp_rwlock_group, nfs_slp_lock_attr);
1784 lck_mtx_init(&nfs_udpsock->ns_wgmutex, nfs_slp_mutex_group, nfs_slp_lock_attr);
1785 TAILQ_INIT(&nfs_udpsock->ns_uidlruhead);
1786 TAILQ_INSERT_HEAD(&nfssvc_sockhead, nfs_udpsock, ns_chain);
1787 } else {
1788 printf("nfsrv_init() failed to allocate UDP socket\n");
1789 }
1790
1791 #if ISO
1792 MALLOC(nfs_cltpsock, struct nfssvc_sock *, sizeof(struct nfssvc_sock),
1793 M_NFSSVC, M_WAITOK);
1794 if (nfs_cltpsock) {
1795 bzero((caddr_t)nfs_cltpsock, sizeof (struct nfssvc_sock));
1796 lck_rw_init(&nfs_cltpsock->ns_rwlock, nfs_slp_rwlock_group, nfs_slp_lock_attr);
1797 lck_mtx_init(&nfs_cltpsock->ns_wgmutex, nfs_slp_mutex_group, nfs_slp_lock_attr);
1798 TAILQ_INIT(&nfs_cltpsock->ns_uidlruhead);
1799 TAILQ_INSERT_TAIL(&nfssvc_sockhead, nfs_cltpsock, ns_chain);
1800 } else {
1801 printf("nfsrv_init() failed to allocate CLTP socket\n");
1802 }
1803 #endif
1804 }
1805
1806 /*
1807 * Add entries to the server monitor log.
1808 */
1809 static void
1810 nfsd_rt(sotype, nd, cacherep)
1811 int sotype;
1812 register struct nfsrv_descript *nd;
1813 int cacherep;
1814 {
1815 register struct drt *rt;
1816 struct timeval now;
1817
1818 rt = &nfsdrt.drt[nfsdrt.pos];
1819 if (cacherep == RC_DOIT)
1820 rt->flag = 0;
1821 else if (cacherep == RC_REPLY)
1822 rt->flag = DRT_CACHEREPLY;
1823 else
1824 rt->flag = DRT_CACHEDROP;
1825 if (sotype == SOCK_STREAM)
1826 rt->flag |= DRT_TCP;
1827 else if (nd->nd_flag & ND_NFSV3)
1828 rt->flag |= DRT_NFSV3;
1829 rt->proc = nd->nd_procnum;
1830 if (((struct sockaddr *)mbuf_data(nd->nd_nam))->sa_family == AF_INET)
1831 rt->ipadr = ((struct sockaddr_in *)mbuf_data(nd->nd_nam))->sin_addr.s_addr;
1832 else
1833 rt->ipadr = INADDR_ANY;
1834 microuptime(&now);
1835 rt->resptime = ((now.tv_sec - nd->nd_starttime.tv_sec) * 1000000) +
1836 (now.tv_usec - nd->nd_starttime.tv_usec);
1837 microtime(&rt->tstamp); // XXX unused
1838 nfsdrt.pos = (nfsdrt.pos + 1) % NFSRTTLOGSIZ;
1839 }
1840 #endif /* NFS_NOSERVER */