]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/uipc_usrreq.c
xnu-1699.24.23.tar.gz
[apple/xnu.git] / bsd / kern / uipc_usrreq.c
CommitLineData
1c79356b 1/*
6d2010ae 2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * Copyright (c) 1982, 1986, 1989, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * From: @(#)uipc_usrreq.c 8.3 (Berkeley) 1/4/94
61 */
2d21ac55
A
62/*
63 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
64 * support for mandatory and extensible security protections. This notice
65 * is included in support of clause 2.2 (b) of the Apple Public License,
66 * Version 2.0.
67 */
1c79356b
A
68
69#include <sys/param.h>
70#include <sys/systm.h>
71#include <sys/kernel.h>
72#include <sys/domain.h>
73#include <sys/fcntl.h>
74#include <sys/malloc.h> /* XXX must be before <sys/file.h> */
91447636 75#include <sys/file_internal.h>
1c79356b
A
76#include <sys/filedesc.h>
77#include <sys/lock.h>
78#include <sys/mbuf.h>
79#include <sys/namei.h>
91447636
A
80#include <sys/proc_internal.h>
81#include <sys/kauth.h>
1c79356b
A
82#include <sys/protosw.h>
83#include <sys/socket.h>
84#include <sys/socketvar.h>
85#include <sys/stat.h>
86#include <sys/sysctl.h>
87#include <sys/un.h>
88#include <sys/unpcb.h>
91447636
A
89#include <sys/vnode_internal.h>
90#include <sys/kdebug.h>
1c79356b
A
91
92#include <kern/zalloc.h>
91447636 93#include <kern/locks.h>
1c79356b 94
b0d623f7 95#if CONFIG_MACF
2d21ac55 96#include <security/mac_framework.h>
b0d623f7 97#endif /* CONFIG_MACF */
2d21ac55
A
98
99#define f_msgcount f_fglob->fg_msgcount
100#define f_cred f_fglob->fg_cred
101#define f_ops f_fglob->fg_ops
102#define f_offset f_fglob->fg_offset
103#define f_data f_fglob->fg_data
1c79356b
A
104struct zone *unp_zone;
105static unp_gen_t unp_gencnt;
106static u_int unp_count;
37839358 107
2d21ac55
A
108static lck_attr_t *unp_mtx_attr;
109static lck_grp_t *unp_mtx_grp;
110static lck_grp_attr_t *unp_mtx_grp_attr;
111static lck_rw_t *unp_list_mtx;
1c79356b 112
b0d623f7
A
113static lck_mtx_t *unp_disconnect_lock;
114static lck_mtx_t *unp_connect_lock;
115static u_int disconnect_in_progress;
116
2d21ac55 117extern lck_mtx_t *uipc_lock;
1c79356b
A
118static struct unp_head unp_shead, unp_dhead;
119
6d2010ae
A
120/*
121 * mDNSResponder tracing. When enabled, endpoints connected to
122 * /var/run/mDNSResponder will be traced; during each send on
123 * the traced socket, we log the PID and process name of the
124 * sending process. We also print out a bit of info related
125 * to the data itself; this assumes ipc_msg_hdr in dnssd_ipc.h
126 * of mDNSResponder stays the same.
127 */
128#define MDNSRESPONDER_PATH "/var/run/mDNSResponder"
129
130static int unpst_tracemdns; /* enable tracing */
131
132#define MDNS_IPC_MSG_HDR_VERSION_1 1
133
134struct mdns_ipc_msg_hdr {
135 uint32_t version;
136 uint32_t datalen;
137 uint32_t ipc_flags;
138 uint32_t op;
139 union {
140 void *context;
141 uint32_t u32[2];
142 } __attribute__((packed));
143 uint32_t reg_index;
144} __attribute__((packed));
145
1c79356b
A
146/*
147 * Unix communications domain.
148 *
149 * TODO:
150 * SEQPACKET, RDM
151 * rethink name space problems
152 * need a proper out-of-band
153 * lock pushdown
154 */
2d21ac55 155static struct sockaddr sun_noname = { sizeof (sun_noname), AF_LOCAL, { 0 } };
1c79356b
A
156static ino_t unp_ino; /* prototype for fake inode numbers */
157
2d21ac55
A
158static int unp_attach(struct socket *);
159static void unp_detach(struct unpcb *);
160static int unp_bind(struct unpcb *, struct sockaddr *, proc_t);
161static int unp_connect(struct socket *, struct sockaddr *, proc_t);
162static void unp_disconnect(struct unpcb *);
163static void unp_shutdown(struct unpcb *);
164static void unp_drop(struct unpcb *, int);
e2fac8b1 165__private_extern__ void unp_gc(void);
2d21ac55
A
166static void unp_scan(struct mbuf *, void (*)(struct fileglob *));
167static void unp_mark(struct fileglob *);
168static void unp_discard(struct fileglob *);
169static void unp_discard_fdlocked(struct fileglob *, proc_t);
170static int unp_internalize(struct mbuf *, proc_t);
171static int unp_listen(struct unpcb *, proc_t);
b0d623f7
A
172static void unpcb_to_compat(struct unpcb *, struct unpcb_compat *);
173static void unp_get_locks_in_order(struct socket *so, struct socket *conn_so);
174
175static void
176unp_get_locks_in_order(struct socket *so, struct socket *conn_so)
177{
178 if (so < conn_so) {
179 socket_lock(conn_so, 1);
180 } else {
181 struct unpcb *unp = sotounpcb(so);
182 unp->unp_flags |= UNP_DONTDISCONNECT;
183 unp->rw_thrcount++;
184 socket_unlock(so, 0);
2d21ac55 185
b0d623f7
A
186 /* Get the locks in the correct order */
187 socket_lock(conn_so, 1);
188 socket_lock(so, 0);
189 unp->rw_thrcount--;
190 if (unp->rw_thrcount == 0) {
191 unp->unp_flags &= ~UNP_DONTDISCONNECT;
192 wakeup(unp);
193 }
194 }
195}
1c79356b
A
196
197static int
198uipc_abort(struct socket *so)
199{
200 struct unpcb *unp = sotounpcb(so);
201
202 if (unp == 0)
2d21ac55 203 return (EINVAL);
1c79356b 204 unp_drop(unp, ECONNABORTED);
91447636
A
205 unp_detach(unp);
206 sofree(so);
2d21ac55 207 return (0);
1c79356b
A
208}
209
210static int
211uipc_accept(struct socket *so, struct sockaddr **nam)
212{
213 struct unpcb *unp = sotounpcb(so);
214
215 if (unp == 0)
2d21ac55 216 return (EINVAL);
1c79356b
A
217
218 /*
219 * Pass back name of connected socket,
220 * if it was bound and we are still connected
221 * (our peer may have closed already!).
222 */
223 if (unp->unp_conn && unp->unp_conn->unp_addr) {
2d21ac55
A
224 *nam = dup_sockaddr((struct sockaddr *)
225 unp->unp_conn->unp_addr, 1);
1c79356b
A
226 } else {
227 *nam = dup_sockaddr((struct sockaddr *)&sun_noname, 1);
228 }
2d21ac55 229 return (0);
1c79356b
A
230}
231
2d21ac55
A
232/*
233 * Returns: 0 Success
234 * EISCONN
235 * unp_attach:
236 */
1c79356b 237static int
2d21ac55 238uipc_attach(struct socket *so, __unused int proto, __unused proc_t p)
1c79356b
A
239{
240 struct unpcb *unp = sotounpcb(so);
241
242 if (unp != 0)
2d21ac55
A
243 return (EISCONN);
244 return (unp_attach(so));
1c79356b
A
245}
246
247static int
2d21ac55 248uipc_bind(struct socket *so, struct sockaddr *nam, proc_t p)
1c79356b
A
249{
250 struct unpcb *unp = sotounpcb(so);
251
252 if (unp == 0)
2d21ac55 253 return (EINVAL);
1c79356b 254
2d21ac55 255 return (unp_bind(unp, nam, p));
1c79356b
A
256}
257
2d21ac55
A
258/*
259 * Returns: 0 Success
260 * EINVAL
261 * unp_connect:??? [See elsewhere in this file]
262 */
1c79356b 263static int
2d21ac55 264uipc_connect(struct socket *so, struct sockaddr *nam, proc_t p)
1c79356b
A
265{
266 struct unpcb *unp = sotounpcb(so);
267
268 if (unp == 0)
2d21ac55
A
269 return (EINVAL);
270 return (unp_connect(so, nam, p));
1c79356b
A
271}
272
2d21ac55
A
273/*
274 * Returns: 0 Success
275 * EINVAL
276 * unp_connect2:EPROTOTYPE Protocol wrong type for socket
277 * unp_connect2:EINVAL Invalid argument
278 */
1c79356b
A
279static int
280uipc_connect2(struct socket *so1, struct socket *so2)
281{
282 struct unpcb *unp = sotounpcb(so1);
283
284 if (unp == 0)
2d21ac55 285 return (EINVAL);
1c79356b 286
2d21ac55 287 return (unp_connect2(so1, so2));
1c79356b
A
288}
289
290/* control is EOPNOTSUPP */
291
292static int
293uipc_detach(struct socket *so)
294{
295 struct unpcb *unp = sotounpcb(so);
296
297 if (unp == 0)
2d21ac55 298 return (EINVAL);
1c79356b 299
6d2010ae 300 lck_mtx_assert(&unp->unp_mtx, LCK_MTX_ASSERT_OWNED);
1c79356b 301 unp_detach(unp);
2d21ac55 302 return (0);
1c79356b
A
303}
304
305static int
306uipc_disconnect(struct socket *so)
307{
308 struct unpcb *unp = sotounpcb(so);
309
310 if (unp == 0)
2d21ac55 311 return (EINVAL);
1c79356b 312 unp_disconnect(unp);
2d21ac55 313 return (0);
1c79356b
A
314}
315
2d21ac55
A
316/*
317 * Returns: 0 Success
318 * EINVAL
319 */
1c79356b 320static int
2d21ac55 321uipc_listen(struct socket *so, __unused proc_t p)
1c79356b
A
322{
323 struct unpcb *unp = sotounpcb(so);
324
325 if (unp == 0 || unp->unp_vnode == 0)
2d21ac55
A
326 return (EINVAL);
327 return (unp_listen(unp, p));
1c79356b
A
328}
329
330static int
331uipc_peeraddr(struct socket *so, struct sockaddr **nam)
332{
333 struct unpcb *unp = sotounpcb(so);
334
2d21ac55
A
335 if (unp == NULL)
336 return (EINVAL);
337 if (unp->unp_conn != NULL && unp->unp_conn->unp_addr != NULL) {
338 *nam = dup_sockaddr((struct sockaddr *)
339 unp->unp_conn->unp_addr, 1);
340 } else {
341 *nam = dup_sockaddr((struct sockaddr *)&sun_noname, 1);
342 }
343 return (0);
1c79356b
A
344}
345
346static int
91447636 347uipc_rcvd(struct socket *so, __unused int flags)
1c79356b
A
348{
349 struct unpcb *unp = sotounpcb(so);
350 struct socket *so2;
351
352 if (unp == 0)
2d21ac55 353 return (EINVAL);
1c79356b
A
354 switch (so->so_type) {
355 case SOCK_DGRAM:
356 panic("uipc_rcvd DGRAM?");
357 /*NOTREACHED*/
358
359 case SOCK_STREAM:
360#define rcv (&so->so_rcv)
2d21ac55 361#define snd (&so2->so_snd)
1c79356b
A
362 if (unp->unp_conn == 0)
363 break;
b0d623f7 364
1c79356b 365 so2 = unp->unp_conn->unp_socket;
b0d623f7 366 unp_get_locks_in_order(so, so2);
1c79356b
A
367 /*
368 * Adjust backpressure on sender
369 * and wakeup any waiting to write.
370 */
371 snd->sb_mbmax += unp->unp_mbcnt - rcv->sb_mbcnt;
372 unp->unp_mbcnt = rcv->sb_mbcnt;
373 snd->sb_hiwat += unp->unp_cc - rcv->sb_cc;
374 unp->unp_cc = rcv->sb_cc;
375 sowwakeup(so2);
b0d623f7
A
376
377 socket_unlock(so2, 1);
378
1c79356b
A
379#undef snd
380#undef rcv
381 break;
382
383 default:
384 panic("uipc_rcvd unknown socktype");
385 }
2d21ac55 386 return (0);
1c79356b
A
387}
388
389/* pru_rcvoob is EOPNOTSUPP */
390
2d21ac55
A
391/*
392 * Returns: 0 Success
393 * EINVAL
394 * EOPNOTSUPP
395 * EPIPE
396 * ENOTCONN
397 * EISCONN
398 * unp_internalize:EINVAL
399 * unp_internalize:EBADF
400 * unp_connect:EAFNOSUPPORT Address family not supported
401 * unp_connect:EINVAL Invalid argument
402 * unp_connect:ENOTSOCK Not a socket
403 * unp_connect:ECONNREFUSED Connection refused
404 * unp_connect:EISCONN Socket is connected
405 * unp_connect:EPROTOTYPE Protocol wrong type for socket
406 * unp_connect:???
407 * sbappendaddr:ENOBUFS [5th argument, contents modified]
408 * sbappendaddr:??? [whatever a filter author chooses]
409 */
1c79356b
A
410static int
411uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
2d21ac55 412 struct mbuf *control, proc_t p)
1c79356b
A
413{
414 int error = 0;
415 struct unpcb *unp = sotounpcb(so);
416 struct socket *so2;
417
418 if (unp == 0) {
419 error = EINVAL;
420 goto release;
421 }
422 if (flags & PRUS_OOB) {
423 error = EOPNOTSUPP;
424 goto release;
425 }
426
13fec989 427 if (control) {
b0d623f7 428 /* release lock to avoid deadlock (4436174) */
2d21ac55 429 socket_unlock(so, 0);
13fec989
A
430 error = unp_internalize(control, p);
431 socket_lock(so, 0);
432 if (error)
433 goto release;
434 }
1c79356b
A
435
436 switch (so->so_type) {
2d21ac55 437 case SOCK_DGRAM:
1c79356b
A
438 {
439 struct sockaddr *from;
440
441 if (nam) {
442 if (unp->unp_conn) {
443 error = EISCONN;
444 break;
445 }
446 error = unp_connect(so, nam, p);
447 if (error)
448 break;
449 } else {
450 if (unp->unp_conn == 0) {
451 error = ENOTCONN;
452 break;
453 }
454 }
b0d623f7 455
1c79356b 456 so2 = unp->unp_conn->unp_socket;
6d2010ae
A
457 if (so != so2)
458 unp_get_locks_in_order(so, so2);
b0d623f7 459
1c79356b
A
460 if (unp->unp_addr)
461 from = (struct sockaddr *)unp->unp_addr;
462 else
463 from = &sun_noname;
2d21ac55
A
464 /*
465 * sbappendaddr() will fail when the receiver runs out of
466 * space; in contrast to SOCK_STREAM, we will lose messages
467 * for the SOCK_DGRAM case when the receiver's queue overflows.
468 * SB_UNIX on the socket buffer implies that the callee will
469 * not free the control message, if any, because we would need
470 * to call unp_dispose() on it.
471 */
91447636 472 if (sbappendaddr(&so2->so_rcv, from, m, control, &error)) {
2d21ac55 473 control = NULL;
1c79356b 474 sorwakeup(so2);
2d21ac55
A
475 } else if (control != NULL && error == 0) {
476 /* A socket filter took control; don't touch it */
477 control = NULL;
91447636 478 }
b0d623f7 479
6d2010ae
A
480 if (so != so2)
481 socket_unlock(so2, 1);
b0d623f7 482
2d21ac55 483 m = NULL;
1c79356b
A
484 if (nam)
485 unp_disconnect(unp);
486 break;
487 }
488
91447636
A
489 case SOCK_STREAM: {
490 int didreceive = 0;
1c79356b
A
491#define rcv (&so2->so_rcv)
492#define snd (&so->so_snd)
493 /* Connect if not connected yet. */
494 /*
495 * Note: A better implementation would complain
496 * if not equal to the peer's address.
497 */
498 if ((so->so_state & SS_ISCONNECTED) == 0) {
499 if (nam) {
500 error = unp_connect(so, nam, p);
501 if (error)
502 break; /* XXX */
503 } else {
504 error = ENOTCONN;
505 break;
506 }
507 }
508
509 if (so->so_state & SS_CANTSENDMORE) {
510 error = EPIPE;
511 break;
512 }
513 if (unp->unp_conn == 0)
514 panic("uipc_send connected but no connection?");
b0d623f7 515
1c79356b 516 so2 = unp->unp_conn->unp_socket;
b0d623f7
A
517 unp_get_locks_in_order(so, so2);
518
519 /* Check socket state again as we might have unlocked the socket
520 * while trying to get the locks in order
521 */
522
523 if ((so->so_state & SS_CANTSENDMORE)) {
524 error = EPIPE;
525 socket_unlock(so2, 1);
526 break;
527 }
528
6d2010ae
A
529 if (unp->unp_flags & UNP_TRACE_MDNS) {
530 struct mdns_ipc_msg_hdr hdr;
531
532 if (mbuf_copydata(m, 0, sizeof (hdr), &hdr) == 0 &&
533 hdr.version == ntohl(MDNS_IPC_MSG_HDR_VERSION_1)) {
534 printf("%s[mDNSResponder] pid=%d (%s): op=0x%x\n",
535 __func__, p->p_pid, p->p_comm, ntohl(hdr.op));
536 }
537 }
538
1c79356b 539 /*
2d21ac55
A
540 * Send to paired receive port, and then reduce send buffer
541 * hiwater marks to maintain backpressure. Wake up readers.
542 * SB_UNIX flag will allow new record to be appended to the
543 * receiver's queue even when it is already full. It is
544 * possible, however, that append might fail. In that case,
545 * we will need to call unp_dispose() on the control message;
546 * the callee will not free it since SB_UNIX is set.
1c79356b 547 */
2d21ac55
A
548 didreceive = control ?
549 sbappendcontrol(rcv, m, control, &error) : sbappend(rcv, m);
550
551 snd->sb_mbmax -= rcv->sb_mbcnt - unp->unp_conn->unp_mbcnt;
1c79356b
A
552 unp->unp_conn->unp_mbcnt = rcv->sb_mbcnt;
553 snd->sb_hiwat -= rcv->sb_cc - unp->unp_conn->unp_cc;
554 unp->unp_conn->unp_cc = rcv->sb_cc;
2d21ac55
A
555 if (didreceive) {
556 control = NULL;
91447636 557 sorwakeup(so2);
2d21ac55
A
558 } else if (control != NULL && error == 0) {
559 /* A socket filter took control; don't touch it */
560 control = NULL;
561 }
b0d623f7
A
562
563 socket_unlock(so2, 1);
2d21ac55 564 m = NULL;
1c79356b
A
565#undef snd
566#undef rcv
91447636 567 }
1c79356b
A
568 break;
569
570 default:
571 panic("uipc_send unknown socktype");
572 }
573
574 /*
575 * SEND_EOF is equivalent to a SEND followed by
576 * a SHUTDOWN.
577 */
578 if (flags & PRUS_EOF) {
579 socantsendmore(so);
580 unp_shutdown(unp);
581 }
582
2d21ac55
A
583 if (control && error != 0) {
584 socket_unlock(so, 0);
91447636 585 unp_dispose(control);
2d21ac55
A
586 socket_lock(so, 0);
587 }
91447636 588
1c79356b
A
589release:
590 if (control)
591 m_freem(control);
592 if (m)
593 m_freem(m);
2d21ac55 594 return (error);
1c79356b
A
595}
596
597static int
2d21ac55 598uipc_sense(struct socket *so, void *ub, int isstat64)
1c79356b
A
599{
600 struct unpcb *unp = sotounpcb(so);
601 struct socket *so2;
2d21ac55 602 blksize_t blksize;
1c79356b
A
603
604 if (unp == 0)
2d21ac55
A
605 return (EINVAL);
606
607 blksize = so->so_snd.sb_hiwat;
1c79356b
A
608 if (so->so_type == SOCK_STREAM && unp->unp_conn != 0) {
609 so2 = unp->unp_conn->unp_socket;
2d21ac55 610 blksize += so2->so_rcv.sb_cc;
1c79356b 611 }
1c79356b
A
612 if (unp->unp_ino == 0)
613 unp->unp_ino = unp_ino++;
2d21ac55
A
614
615 if (isstat64 != 0) {
616 struct stat64 *sb64;
617
618 sb64 = (struct stat64 *)ub;
619 sb64->st_blksize = blksize;
620 sb64->st_dev = NODEV;
621 sb64->st_ino = (ino64_t)unp->unp_ino;
622 } else {
623 struct stat *sb;
624
625 sb = (struct stat *)ub;
626 sb->st_blksize = blksize;
627 sb->st_dev = NODEV;
b0d623f7 628 sb->st_ino = (ino_t)(uintptr_t)unp->unp_ino;
2d21ac55
A
629 }
630
1c79356b
A
631 return (0);
632}
633
2d21ac55
A
634/*
635 * Returns: 0 Success
636 * EINVAL
637 *
638 * Notes: This is not strictly correct, as unp_shutdown() also calls
639 * socantrcvmore(). These should maybe both be conditionalized
640 * on the 'how' argument in soshutdown() as called from the
641 * shutdown() system call.
642 */
1c79356b
A
643static int
644uipc_shutdown(struct socket *so)
645{
646 struct unpcb *unp = sotounpcb(so);
647
648 if (unp == 0)
2d21ac55 649 return (EINVAL);
1c79356b
A
650 socantsendmore(so);
651 unp_shutdown(unp);
2d21ac55 652 return (0);
1c79356b
A
653}
654
2d21ac55
A
655/*
656 * Returns: 0 Success
657 * EINVAL Invalid argument
658 */
1c79356b
A
659static int
660uipc_sockaddr(struct socket *so, struct sockaddr **nam)
661{
662 struct unpcb *unp = sotounpcb(so);
663
2d21ac55
A
664 if (unp == NULL)
665 return (EINVAL);
666 if (unp->unp_addr != NULL) {
1c79356b 667 *nam = dup_sockaddr((struct sockaddr *)unp->unp_addr, 1);
2d21ac55
A
668 } else {
669 *nam = dup_sockaddr((struct sockaddr *)&sun_noname, 1);
670 }
671 return (0);
1c79356b
A
672}
673
674struct pr_usrreqs uipc_usrreqs = {
675 uipc_abort, uipc_accept, uipc_attach, uipc_bind, uipc_connect,
676 uipc_connect2, pru_control_notsupp, uipc_detach, uipc_disconnect,
677 uipc_listen, uipc_peeraddr, uipc_rcvd, pru_rcvoob_notsupp,
678 uipc_send, uipc_sense, uipc_shutdown, uipc_sockaddr,
91447636 679 sosend, soreceive, pru_sopoll_notsupp
1c79356b 680};
91447636
A
681
682int
2d21ac55 683uipc_ctloutput(struct socket *so, struct sockopt *sopt)
91447636
A
684{
685 struct unpcb *unp = sotounpcb(so);
686 int error;
687
688 switch (sopt->sopt_dir) {
689 case SOPT_GET:
690 switch (sopt->sopt_name) {
691 case LOCAL_PEERCRED:
2d21ac55 692 if (unp->unp_flags & UNP_HAVEPC) {
91447636 693 error = sooptcopyout(sopt, &unp->unp_peercred,
2d21ac55
A
694 sizeof (unp->unp_peercred));
695 } else {
91447636
A
696 if (so->so_type == SOCK_STREAM)
697 error = ENOTCONN;
698 else
699 error = EINVAL;
700 }
701 break;
702 default:
703 error = EOPNOTSUPP;
704 break;
705 }
706 break;
707 case SOPT_SET:
708 default:
709 error = EOPNOTSUPP;
710 break;
711 }
712 return (error);
713}
2d21ac55 714
1c79356b
A
715/*
716 * Both send and receive buffers are allocated PIPSIZ bytes of buffering
717 * for stream sockets, although the total for sender and receiver is
718 * actually only PIPSIZ.
719 * Datagram sockets really use the sendspace as the maximum datagram size,
720 * and don't really want to reserve the sendspace. Their recvspace should
721 * be large enough for at least one max-size datagram plus address.
722 */
723#ifndef PIPSIZ
724#define PIPSIZ 8192
725#endif
b0d623f7
A
726static u_int32_t unpst_sendspace = PIPSIZ;
727static u_int32_t unpst_recvspace = PIPSIZ;
728static u_int32_t unpdg_sendspace = 2*1024; /* really max datagram size */
729static u_int32_t unpdg_recvspace = 4*1024;
1c79356b
A
730
731static int unp_rights; /* file descriptors in flight */
2d21ac55 732static int unp_disposed; /* discarded file descriptors */
1c79356b
A
733
734SYSCTL_DECL(_net_local_stream);
6d2010ae 735SYSCTL_INT(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW | CTLFLAG_LOCKED,
2d21ac55 736 &unpst_sendspace, 0, "");
6d2010ae 737SYSCTL_INT(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED,
2d21ac55 738 &unpst_recvspace, 0, "");
6d2010ae
A
739SYSCTL_INT(_net_local_stream, OID_AUTO, tracemdns, CTLFLAG_RW | CTLFLAG_LOCKED,
740 &unpst_tracemdns, 0, "");
1c79356b 741SYSCTL_DECL(_net_local_dgram);
6d2010ae 742SYSCTL_INT(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW | CTLFLAG_LOCKED,
2d21ac55 743 &unpdg_sendspace, 0, "");
6d2010ae 744SYSCTL_INT(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED,
2d21ac55 745 &unpdg_recvspace, 0, "");
1c79356b 746SYSCTL_DECL(_net_local);
6d2010ae 747SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD | CTLFLAG_LOCKED, &unp_rights, 0, "");
1c79356b 748
2d21ac55
A
749/*
750 * Returns: 0 Success
751 * ENOBUFS
752 * soreserve:ENOBUFS
753 */
1c79356b 754static int
91447636 755unp_attach(struct socket *so)
1c79356b 756{
91447636
A
757 struct unpcb *unp;
758 int error = 0;
1c79356b
A
759
760 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
761 switch (so->so_type) {
762
763 case SOCK_STREAM:
764 error = soreserve(so, unpst_sendspace, unpst_recvspace);
765 break;
766
767 case SOCK_DGRAM:
768 error = soreserve(so, unpdg_sendspace, unpdg_recvspace);
769 break;
770
771 default:
772 panic("unp_attach");
773 }
774 if (error)
775 return (error);
776 }
2d21ac55 777 unp = (struct unpcb *)zalloc(unp_zone);
1c79356b
A
778 if (unp == NULL)
779 return (ENOBUFS);
2d21ac55 780 bzero(unp, sizeof (*unp));
b0d623f7 781
6d2010ae
A
782 lck_mtx_init(&unp->unp_mtx,
783 unp_mtx_grp, unp_mtx_attr);
b0d623f7 784
37839358 785 lck_rw_lock_exclusive(unp_list_mtx);
1c79356b
A
786 LIST_INIT(&unp->unp_refs);
787 unp->unp_socket = so;
91447636
A
788 unp->unp_gencnt = ++unp_gencnt;
789 unp_count++;
2d21ac55
A
790 LIST_INSERT_HEAD(so->so_type == SOCK_DGRAM ?
791 &unp_dhead : &unp_shead, unp, unp_link);
b0d623f7 792 lck_rw_done(unp_list_mtx);
1c79356b 793 so->so_pcb = (caddr_t)unp;
2d21ac55
A
794 /*
795 * Mark AF_UNIX socket buffers accordingly so that:
796 *
797 * a. In the SOCK_STREAM case, socket buffer append won't fail due to
798 * the lack of space; this essentially loosens the sbspace() check,
799 * since there is disconnect between sosend() and uipc_send() with
800 * respect to flow control that might result in our dropping the
801 * data in uipc_send(). By setting this, we allow for slightly
802 * more records to be appended to the receiving socket to avoid
803 * losing data (which we can't afford in the SOCK_STREAM case).
804 * Flow control still takes place since we adjust the sender's
805 * hiwat during each send. This doesn't affect the SOCK_DGRAM
806 * case and append would still fail when the queue overflows.
807 *
808 * b. In the presence of control messages containing internalized
809 * file descriptors, the append routines will not free them since
810 * we'd need to undo the work first via unp_dispose().
811 */
812 so->so_rcv.sb_flags |= SB_UNIX;
813 so->so_snd.sb_flags |= SB_UNIX;
1c79356b
A
814 return (0);
815}
816
817static void
91447636 818unp_detach(struct unpcb *unp)
1c79356b 819{
b7266188
A
820 int so_locked = 1;
821
37839358 822 lck_rw_lock_exclusive(unp_list_mtx);
1c79356b 823 LIST_REMOVE(unp, unp_link);
37839358 824 lck_rw_done(unp_list_mtx);
1c79356b 825 if (unp->unp_vnode) {
b0d623f7
A
826 struct vnode *tvp = NULL;
827 socket_unlock(unp->unp_socket, 0);
828
829 /* Holding unp_connect_lock will avoid a race between
830 * a thread closing the listening socket and a thread
831 * connecting to it.
832 */
833 lck_mtx_lock(unp_connect_lock);
834 socket_lock(unp->unp_socket, 0);
835 if (unp->unp_vnode) {
836 tvp = unp->unp_vnode;
837 unp->unp_vnode->v_socket = NULL;
838 unp->unp_vnode = NULL;
839 }
840 lck_mtx_unlock(unp_connect_lock);
841 if (tvp != NULL)
842 vnode_rele(tvp); /* drop the usecount */
1c79356b
A
843 }
844 if (unp->unp_conn)
845 unp_disconnect(unp);
b0d623f7 846 while (unp->unp_refs.lh_first) {
b7266188
A
847 struct unpcb *unp2 = NULL;
848
849 /* This datagram socket is connected to one or more
850 * sockets. In order to avoid a race condition between removing
851 * this reference and closing the connected socket, we need
852 * to check disconnect_in_progress
853 */
854 if (so_locked == 1) {
855 socket_unlock(unp->unp_socket, 0);
856 so_locked = 0;
857 }
858 lck_mtx_lock(unp_disconnect_lock);
859 while (disconnect_in_progress != 0) {
860 (void)msleep((caddr_t)&disconnect_in_progress, unp_disconnect_lock,
861 PSOCK, "disconnect", NULL);
862 }
863 disconnect_in_progress = 1;
864 lck_mtx_unlock(unp_disconnect_lock);
865
866 /* Now we are sure that any unpcb socket disconnect is not happening */
867 if (unp->unp_refs.lh_first != NULL) {
868 unp2 = unp->unp_refs.lh_first;
869 socket_lock(unp2->unp_socket, 1);
870 }
871
872 lck_mtx_lock(unp_disconnect_lock);
873 disconnect_in_progress = 0;
874 wakeup(&disconnect_in_progress);
875 lck_mtx_unlock(unp_disconnect_lock);
876
877 if (unp2 != NULL) {
878 /* We already locked this socket and have a reference on it */
879 unp_drop(unp2, ECONNRESET);
880 socket_unlock(unp2->unp_socket, 1);
881 }
882 }
883
884 if (so_locked == 0) {
b0d623f7 885 socket_lock(unp->unp_socket, 0);
b7266188 886 so_locked = 1;
b0d623f7 887 }
1c79356b 888 soisdisconnected(unp->unp_socket);
2d21ac55
A
889 /* makes sure we're getting dealloced */
890 unp->unp_socket->so_flags |= SOF_PCBCLEARING;
1c79356b
A
891}
892
2d21ac55
A
893/*
894 * Returns: 0 Success
895 * EAFNOSUPPORT
896 * EINVAL
897 * EADDRINUSE
898 * namei:??? [anything namei can return]
899 * vnode_authorize:??? [anything vnode_authorize can return]
900 *
901 * Notes: p at this point is the current process, as this function is
902 * only called by sobind().
903 */
1c79356b 904static int
91447636
A
905unp_bind(
906 struct unpcb *unp,
907 struct sockaddr *nam,
2d21ac55 908 proc_t p)
1c79356b
A
909{
910 struct sockaddr_un *soun = (struct sockaddr_un *)nam;
91447636
A
911 struct vnode *vp, *dvp;
912 struct vnode_attr va;
2d21ac55 913 vfs_context_t ctx = vfs_context_current();
1c79356b
A
914 int error, namelen;
915 struct nameidata nd;
b0d623f7 916 struct socket *so = unp->unp_socket;
1c79356b
A
917 char buf[SOCK_MAXADDRLEN];
918
2d21ac55
A
919 if (nam->sa_family != 0 && nam->sa_family != AF_UNIX) {
920 return (EAFNOSUPPORT);
921 }
91447636 922
2d21ac55 923 if (unp->unp_vnode != NULL)
1c79356b 924 return (EINVAL);
1c79356b 925 namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path);
2d21ac55
A
926 if (namelen <= 0)
927 return (EINVAL);
928
b0d623f7
A
929 socket_unlock(so, 0);
930
2d21ac55 931 strlcpy(buf, soun->sun_path, namelen+1);
6d2010ae 932 NDINIT(&nd, CREATE, OP_MKFIFO, FOLLOW | LOCKPARENT, UIO_SYSSPACE,
2d21ac55
A
933 CAST_USER_ADDR_T(buf), ctx);
934 /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */
1c79356b
A
935 error = namei(&nd);
936 if (error) {
b0d623f7 937 socket_lock(so, 0);
1c79356b
A
938 return (error);
939 }
91447636 940 dvp = nd.ni_dvp;
1c79356b 941 vp = nd.ni_vp;
91447636 942
1c79356b 943 if (vp != NULL) {
2d21ac55 944 /*
91447636
A
945 * need to do this before the vnode_put of dvp
946 * since we may have to release an fs_nodelock
947 */
948 nameidone(&nd);
949
950 vnode_put(dvp);
951 vnode_put(vp);
952
b0d623f7 953 socket_lock(so, 0);
1c79356b
A
954 return (EADDRINUSE);
955 }
91447636 956
2d21ac55
A
957 VATTR_INIT(&va);
958 VATTR_SET(&va, va_type, VSOCK);
959 VATTR_SET(&va, va_mode, (ACCESSPERMS & ~p->p_fd->fd_cmask));
960
b0d623f7 961#if CONFIG_MACF
2d21ac55
A
962 error = mac_vnode_check_create(ctx,
963 nd.ni_dvp, &nd.ni_cnd, &va);
964
965 if (error == 0)
b0d623f7
A
966#endif /* CONFIG_MACF */
967#if CONFIG_MACF_SOCKET_SUBSET
968 error = mac_vnode_check_uipc_bind(ctx,
969 nd.ni_dvp, &nd.ni_cnd, &va);
970
971 if (error == 0)
972#endif /* MAC_SOCKET_SUBSET */
91447636 973 /* authorize before creating */
2d21ac55 974 error = vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_FILE, ctx);
91447636
A
975
976 if (!error) {
91447636 977 /* create the socket */
6d2010ae 978 error = vn_create(dvp, &vp, &nd, &va, 0, 0, NULL, ctx);
91447636 979 }
2d21ac55 980
91447636
A
981 nameidone(&nd);
982 vnode_put(dvp);
983
1c79356b 984 if (error) {
b0d623f7 985 socket_lock(so, 0);
1c79356b
A
986 return (error);
987 }
91447636 988 vnode_ref(vp); /* gain a longterm reference */
b0d623f7 989 socket_lock(so, 0);
1c79356b
A
990 vp->v_socket = unp->unp_socket;
991 unp->unp_vnode = vp;
992 unp->unp_addr = (struct sockaddr_un *)dup_sockaddr(nam, 1);
91447636
A
993 vnode_put(vp); /* drop the iocount */
994
1c79356b
A
995 return (0);
996}
997
2d21ac55
A
998
999/*
1000 * Returns: 0 Success
1001 * EAFNOSUPPORT Address family not supported
1002 * EINVAL Invalid argument
1003 * ENOTSOCK Not a socket
1004 * ECONNREFUSED Connection refused
1005 * EPROTOTYPE Protocol wrong type for socket
1006 * EISCONN Socket is connected
1007 * unp_connect2:EPROTOTYPE Protocol wrong type for socket
1008 * unp_connect2:EINVAL Invalid argument
1009 * namei:??? [anything namei can return]
1010 * vnode_authorize:???? [anything vnode_authorize can return]
1011 *
1012 * Notes: p at this point is the current process, as this function is
1013 * only called by sosend(), sendfile(), and soconnectlock().
1014 */
1c79356b 1015static int
2d21ac55 1016unp_connect(struct socket *so, struct sockaddr *nam, __unused proc_t p)
1c79356b 1017{
91447636
A
1018 struct sockaddr_un *soun = (struct sockaddr_un *)nam;
1019 struct vnode *vp;
b0d623f7 1020 struct socket *so2, *so3, *list_so=NULL;
91447636 1021 struct unpcb *unp, *unp2, *unp3;
2d21ac55 1022 vfs_context_t ctx = vfs_context_current();
1c79356b
A
1023 int error, len;
1024 struct nameidata nd;
1025 char buf[SOCK_MAXADDRLEN];
1026
2d21ac55
A
1027 if (nam->sa_family != 0 && nam->sa_family != AF_UNIX) {
1028 return (EAFNOSUPPORT);
1029 }
1030
b0d623f7 1031 unp = sotounpcb(so);
cc9f6e38 1032 so2 = so3 = NULL;
91447636 1033
1c79356b 1034 len = nam->sa_len - offsetof(struct sockaddr_un, sun_path);
2d21ac55
A
1035 if (len <= 0)
1036 return (EINVAL);
1037
1038 strlcpy(buf, soun->sun_path, len+1);
b0d623f7 1039 socket_unlock(so, 0);
1c79356b 1040
6d2010ae 1041 NDINIT(&nd, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
2d21ac55 1042 CAST_USER_ADDR_T(buf), ctx);
1c79356b
A
1043 error = namei(&nd);
1044 if (error) {
b0d623f7 1045 socket_lock(so, 0);
1c79356b
A
1046 return (error);
1047 }
91447636 1048 nameidone(&nd);
1c79356b
A
1049 vp = nd.ni_vp;
1050 if (vp->v_type != VSOCK) {
1051 error = ENOTSOCK;
b0d623f7
A
1052 socket_lock(so, 0);
1053 goto out;
1c79356b 1054 }
91447636 1055
b0d623f7
A
1056#if CONFIG_MACF_SOCKET_SUBSET
1057 error = mac_vnode_check_uipc_connect(ctx, vp);
1058 if (error) {
1059 socket_lock(so, 0);
1060 goto out;
1061 }
1062#endif /* MAC_SOCKET_SUBSET */
1063
2d21ac55 1064 error = vnode_authorize(vp, NULL, KAUTH_VNODE_WRITE_DATA, ctx);
b0d623f7
A
1065 if (error) {
1066 socket_lock(so, 0);
1067 goto out;
1068 }
1069
1070 lck_mtx_lock(unp_connect_lock);
1071
1072 if (vp->v_socket == 0) {
1073 lck_mtx_unlock(unp_connect_lock);
1c79356b 1074 error = ECONNREFUSED;
b0d623f7
A
1075 socket_lock(so, 0);
1076 goto out;
1c79356b 1077 }
91447636 1078
b0d623f7
A
1079 socket_lock(vp->v_socket, 1); /* Get a reference on the listening socket */
1080 so2 = vp->v_socket;
1081 lck_mtx_unlock(unp_connect_lock);
91447636 1082
b0d623f7
A
1083
1084 if (so2->so_pcb == NULL) {
1085 error = ECONNREFUSED;
6d2010ae
A
1086 if (so != so2) {
1087 socket_unlock(so2, 1);
1088 socket_lock(so, 0);
1089 } else {
1090 /* Release the reference held for the listen socket */
1091 so2->so_usecount--;
1092 }
b0d623f7 1093 goto out;
1c79356b 1094 }
2d21ac55 1095
b0d623f7
A
1096 if (so < so2) {
1097 socket_unlock(so2, 0);
1098 socket_lock(so, 0);
1099 socket_lock(so2, 0);
6d2010ae 1100 } else if (so > so2) {
b0d623f7
A
1101 socket_lock(so, 0);
1102 }
55e303ae
A
1103 /*
1104 * Check if socket was connected while we were trying to
b0d623f7 1105 * get the socket locks in order.
55e303ae
A
1106 * XXX - probably shouldn't return an error for SOCK_DGRAM
1107 */
1108 if ((so->so_state & SS_ISCONNECTED) != 0) {
1109 error = EISCONN;
6d2010ae 1110 goto decref_out;
b0d623f7
A
1111 }
1112
1113 if (so->so_type != so2->so_type) {
b0d623f7 1114 error = EPROTOTYPE;
6d2010ae 1115 goto decref_out;
55e303ae 1116 }
2d21ac55 1117
1c79356b 1118 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
b0d623f7
A
1119 /* Release the incoming socket but keep a reference */
1120 socket_unlock(so, 0);
1121
1c79356b 1122 if ((so2->so_options & SO_ACCEPTCONN) == 0 ||
91447636 1123 (so3 = sonewconn(so2, 0, nam)) == 0) {
1c79356b 1124 error = ECONNREFUSED;
b0d623f7
A
1125 socket_unlock(so2, 1);
1126 socket_lock(so, 0);
1127 goto out;
1c79356b
A
1128 }
1129 unp2 = sotounpcb(so2);
1130 unp3 = sotounpcb(so3);
1131 if (unp2->unp_addr)
1132 unp3->unp_addr = (struct sockaddr_un *)
2d21ac55 1133 dup_sockaddr((struct sockaddr *)unp2->unp_addr, 1);
91447636
A
1134
1135 /*
1136 * unp_peercred management:
1137 *
1138 * The connecter's (client's) credentials are copied
1139 * from its process structure at the time of connect()
1140 * (which is now).
1141 */
2d21ac55 1142 cru2x(vfs_context_ucred(ctx), &unp3->unp_peercred);
91447636
A
1143 unp3->unp_flags |= UNP_HAVEPC;
1144 /*
1145 * The receiver's (server's) credentials are copied
1146 * from the unp_peercred member of socket on which the
1147 * former called listen(); unp_listen() cached that
1148 * process's credentials at that time so we can use
1149 * them now.
1150 */
1151 KASSERT(unp2->unp_flags & UNP_HAVEPCCACHED,
1152 ("unp_connect: listener without cached peercred"));
b0d623f7
A
1153
1154 /* Here we need to have both so and so2 locks and so2
1155 * is already locked. Lock ordering is required.
1156 */
1157 if (so < so2) {
1158 socket_unlock(so2, 0);
1159 socket_lock(so, 0);
1160 socket_lock(so2, 0);
1161 } else {
1162 socket_lock(so, 0);
1163 }
1164
1165 /* Check again if the socket state changed when its lock was released */
1166 if ((so->so_state & SS_ISCONNECTED) != 0) {
1167 error = EISCONN;
1168 socket_unlock(so2, 1);
1169 socket_lock(so3, 0);
1170 sofreelastref(so3, 1);
1171 goto out;
1172 }
91447636 1173 memcpy(&unp->unp_peercred, &unp2->unp_peercred,
2d21ac55 1174 sizeof (unp->unp_peercred));
91447636
A
1175 unp->unp_flags |= UNP_HAVEPC;
1176
2d21ac55
A
1177#if CONFIG_MACF_SOCKET
1178 /* XXXMAC: recursive lock: SOCK_LOCK(so); */
1179 mac_socketpeer_label_associate_socket(so, so3);
1180 mac_socketpeer_label_associate_socket(so3, so);
1181 /* XXXMAC: SOCK_UNLOCK(so); */
1182#endif /* MAC_SOCKET */
b0d623f7
A
1183
1184 /* Hold the reference on listening socket until the end */
1185 socket_unlock(so2, 0);
1186 list_so = so2;
1187
1188 /* Lock ordering doesn't matter because so3 was just created */
1189 socket_lock(so3, 1);
1c79356b 1190 so2 = so3;
b0d623f7 1191
6d2010ae
A
1192 /*
1193 * Enable tracing for mDNSResponder endpoints. (The use
1194 * of sizeof instead of strlen below takes the null
1195 * terminating character into account.)
1196 */
1197 if (unpst_tracemdns &&
1198 !strncmp(soun->sun_path, MDNSRESPONDER_PATH,
1199 sizeof (MDNSRESPONDER_PATH))) {
1200 unp->unp_flags |= UNP_TRACE_MDNS;
1201 unp2->unp_flags |= UNP_TRACE_MDNS;
1202 }
1c79356b 1203 }
b0d623f7 1204
1c79356b 1205 error = unp_connect2(so, so2);
6d2010ae
A
1206
1207decref_out:
b0d623f7 1208 if (so2 != NULL) {
6d2010ae
A
1209 if (so != so2) {
1210 socket_unlock(so2, 1);
1211 } else {
1212 /* Release the extra reference held for the listen socket.
1213 * This is possible only for SOCK_DGRAM sockets. We refuse
1214 * connecting to the same socket for SOCK_STREAM sockets.
1215 */
1216 so2->so_usecount--;
1217 }
b0d623f7
A
1218 }
1219
1220 if (list_so != NULL) {
1221 socket_lock(list_so, 0);
1222 socket_unlock(list_so, 1);
1223 }
6d2010ae 1224
b0d623f7 1225out:
6d2010ae 1226 lck_mtx_assert(&unp->unp_mtx, LCK_MTX_ASSERT_OWNED);
91447636 1227 vnode_put(vp);
1c79356b
A
1228 return (error);
1229}
1230
2d21ac55
A
1231/*
1232 * Returns: 0 Success
1233 * EPROTOTYPE Protocol wrong type for socket
1234 * EINVAL Invalid argument
1235 */
1c79356b 1236int
2d21ac55 1237unp_connect2(struct socket *so, struct socket *so2)
1c79356b 1238{
91447636
A
1239 struct unpcb *unp = sotounpcb(so);
1240 struct unpcb *unp2;
1c79356b
A
1241
1242 if (so2->so_type != so->so_type)
1243 return (EPROTOTYPE);
b0d623f7 1244
1c79356b 1245 unp2 = sotounpcb(so2);
0b4e3aa0 1246
6d2010ae
A
1247 lck_mtx_assert(&unp->unp_mtx, LCK_MTX_ASSERT_OWNED);
1248 lck_mtx_assert(&unp2->unp_mtx, LCK_MTX_ASSERT_OWNED);
b0d623f7 1249
0b4e3aa0
A
1250 /* Verify both sockets are still opened */
1251 if (unp == 0 || unp2 == 0)
1252 return (EINVAL);
1253
1c79356b 1254 unp->unp_conn = unp2;
b0d623f7
A
1255 so2->so_usecount++;
1256
1c79356b
A
1257 switch (so->so_type) {
1258
1259 case SOCK_DGRAM:
1260 LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink);
b0d623f7 1261
6d2010ae
A
1262 if (so != so2) {
1263 /* Avoid lock order reversals due to drop/acquire in soisconnected. */
1264 /* Keep an extra reference on so2 that will be dropped
1265 * soon after getting the locks in order
1266 */
1267 socket_unlock(so2, 0);
1268 soisconnected(so);
1269 unp_get_locks_in_order(so, so2);
1270 so2->so_usecount--;
1271 } else {
1272 soisconnected(so);
1273 }
b0d623f7 1274
1c79356b
A
1275 break;
1276
1277 case SOCK_STREAM:
2d21ac55
A
1278 /* This takes care of socketpair */
1279 if (!(unp->unp_flags & UNP_HAVEPC) &&
1280 !(unp2->unp_flags & UNP_HAVEPC)) {
1281 cru2x(kauth_cred_get(), &unp->unp_peercred);
1282 unp->unp_flags |= UNP_HAVEPC;
1283
1284 cru2x(kauth_cred_get(), &unp2->unp_peercred);
1285 unp2->unp_flags |= UNP_HAVEPC;
1286 }
1c79356b 1287 unp2->unp_conn = unp;
b0d623f7
A
1288 so->so_usecount++;
1289
1290 /* Avoid lock order reversals due to drop/acquire in soisconnected. */
1291 socket_unlock(so, 0);
1c79356b 1292 soisconnected(so2);
b0d623f7
A
1293
1294 /* Keep an extra reference on so2, that will be dropped soon after
1295 * getting the locks in order again.
1296 */
1297 socket_unlock(so2, 0);
1298
1299 socket_lock(so, 0);
1300 soisconnected(so);
1301
1302 unp_get_locks_in_order(so, so2);
1303 /* Decrement the extra reference left before */
1304 so2->so_usecount--;
1c79356b
A
1305 break;
1306
1307 default:
b0d623f7 1308 panic("unknown socket type %d in unp_connect2", so->so_type);
1c79356b 1309 }
6d2010ae
A
1310 lck_mtx_assert(&unp->unp_mtx, LCK_MTX_ASSERT_OWNED);
1311 lck_mtx_assert(&unp2->unp_mtx, LCK_MTX_ASSERT_OWNED);
1c79356b
A
1312 return (0);
1313}
1314
1315static void
91447636 1316unp_disconnect(struct unpcb *unp)
1c79356b 1317{
b0d623f7
A
1318 struct unpcb *unp2 = NULL;
1319 struct socket *so2 = NULL, *so;
1320 struct socket *waitso;
1321 int so_locked = 1, strdisconn = 0;
1c79356b 1322
b0d623f7
A
1323 so = unp->unp_socket;
1324 if (unp->unp_conn == NULL) {
1c79356b 1325 return;
b0d623f7
A
1326 }
1327 lck_mtx_lock(unp_disconnect_lock);
1328 while (disconnect_in_progress != 0) {
1329 if (so_locked == 1) {
1330 socket_unlock(so, 0);
1331 so_locked = 0;
1332 }
1333 (void)msleep((caddr_t)&disconnect_in_progress, unp_disconnect_lock,
1334 PSOCK, "disconnect", NULL);
1335 }
1336 disconnect_in_progress = 1;
1337 lck_mtx_unlock(unp_disconnect_lock);
1338
1339 if (so_locked == 0) {
1340 socket_lock(so, 0);
1341 so_locked = 1;
1342 }
1343
1344 unp2 = unp->unp_conn;
1345
1346 if (unp2 == 0 || unp2->unp_socket == NULL) {
1347 goto out;
1348 }
1349 so2 = unp2->unp_socket;
1350
1351try_again:
6d2010ae
A
1352 if (so == so2) {
1353 if (so_locked == 0) {
1354 socket_lock(so, 0);
1355 }
1356 waitso = so;
1357 } else if (so < so2) {
b0d623f7
A
1358 if (so_locked == 0) {
1359 socket_lock(so, 0);
1360 }
1361 socket_lock(so2, 1);
1362 waitso = so2;
1363 } else {
1364 if (so_locked == 1) {
1365 socket_unlock(so, 0);
1366 }
1367 socket_lock(so2, 1);
1368 socket_lock(so, 0);
1369 waitso = so;
1370 }
6d2010ae 1371 so_locked = 1;
b0d623f7 1372
6d2010ae
A
1373 lck_mtx_assert(&unp->unp_mtx, LCK_MTX_ASSERT_OWNED);
1374 lck_mtx_assert(&unp2->unp_mtx, LCK_MTX_ASSERT_OWNED);
b0d623f7
A
1375
1376 /* Check for the UNP_DONTDISCONNECT flag, if it
1377 * is set, release both sockets and go to sleep
1378 */
1379
1380 if ((((struct unpcb *)waitso->so_pcb)->unp_flags & UNP_DONTDISCONNECT) != 0) {
6d2010ae
A
1381 if (so != so2) {
1382 socket_unlock(so2, 1);
1383 }
b0d623f7
A
1384 so_locked = 0;
1385
6d2010ae 1386 (void)msleep(waitso->so_pcb, &unp->unp_mtx,
b0d623f7
A
1387 PSOCK | PDROP, "unpdisconnect", NULL);
1388 goto try_again;
1389 }
1390
1391 if (unp->unp_conn == NULL) {
1392 panic("unp_conn became NULL after sleep");
1393 }
1394
2d21ac55 1395 unp->unp_conn = NULL;
b0d623f7
A
1396 so2->so_usecount--;
1397
6d2010ae
A
1398 if (unp->unp_flags & UNP_TRACE_MDNS)
1399 unp->unp_flags &= ~UNP_TRACE_MDNS;
1400
1c79356b
A
1401 switch (unp->unp_socket->so_type) {
1402
1403 case SOCK_DGRAM:
1404 LIST_REMOVE(unp, unp_reflink);
1405 unp->unp_socket->so_state &= ~SS_ISCONNECTED;
6d2010ae
A
1406 if (so != so2)
1407 socket_unlock(so2, 1);
1c79356b
A
1408 break;
1409
1410 case SOCK_STREAM:
2d21ac55 1411 unp2->unp_conn = NULL;
b0d623f7
A
1412 so->so_usecount--;
1413
1414 /* Set the socket state correctly but do a wakeup later when
1415 * we release all locks except the socket lock, this will avoid
1416 * a deadlock.
1417 */
1418 unp->unp_socket->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
1419 unp->unp_socket->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED);
1420
1421 unp2->unp_socket->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
1422 unp->unp_socket->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED);
6d2010ae
A
1423
1424 if (unp2->unp_flags & UNP_TRACE_MDNS)
1425 unp2->unp_flags &= ~UNP_TRACE_MDNS;
1426
b0d623f7 1427 strdisconn = 1;
1c79356b 1428 break;
b0d623f7
A
1429 default:
1430 panic("unknown socket type %d", so->so_type);
1c79356b 1431 }
b0d623f7
A
1432out:
1433 lck_mtx_lock(unp_disconnect_lock);
1434 disconnect_in_progress = 0;
1435 wakeup(&disconnect_in_progress);
1436 lck_mtx_unlock(unp_disconnect_lock);
1c79356b 1437
b0d623f7
A
1438 if (strdisconn) {
1439 socket_unlock(so, 0);
1440 soisdisconnected(so2);
1441 socket_unlock(so2, 1);
1c79356b 1442
b0d623f7
A
1443 socket_lock(so,0);
1444 soisdisconnected(so);
1445 }
6d2010ae 1446 lck_mtx_assert(&unp->unp_mtx, LCK_MTX_ASSERT_OWNED);
b0d623f7 1447 return;
1c79356b 1448}
b0d623f7
A
1449
1450/*
1451 * unpcb_to_compat copies specific bits of a unpcb to a unpcb_compat format.
1452 * The unpcb_compat data structure is passed to user space and must not change.
1453 */
1454static void
1455unpcb_to_compat(struct unpcb *up, struct unpcb_compat *cp)
1456{
1457#if defined(__LP64__)
1458 cp->unp_link.le_next = (u_int32_t)(uintptr_t)up->unp_link.le_next;
1459 cp->unp_link.le_prev = (u_int32_t)(uintptr_t)up->unp_link.le_prev;
1460#else
1461 cp->unp_link.le_next = (struct unpcb_compat *)up->unp_link.le_next;
1462 cp->unp_link.le_prev = (struct unpcb_compat **)up->unp_link.le_prev;
1463#endif
1464 cp->unp_socket = (_UNPCB_PTR(struct socket *))(uintptr_t)up->unp_socket;
1465 cp->unp_vnode = (_UNPCB_PTR(struct vnode *))(uintptr_t)up->unp_vnode;
1466 cp->unp_ino = up->unp_ino;
1467 cp->unp_conn = (_UNPCB_PTR(struct unpcb_compat *))
1468 (uintptr_t)up->unp_conn;
1469 cp->unp_refs = (u_int32_t)(uintptr_t)up->unp_refs.lh_first;
1470#if defined(__LP64__)
1471 cp->unp_reflink.le_next =
1472 (u_int32_t)(uintptr_t)up->unp_reflink.le_next;
1473 cp->unp_reflink.le_prev =
1474 (u_int32_t)(uintptr_t)up->unp_reflink.le_prev;
1475#else
1476 cp->unp_reflink.le_next =
1477 (struct unpcb_compat *)up->unp_reflink.le_next;
1478 cp->unp_reflink.le_prev =
1479 (struct unpcb_compat **)up->unp_reflink.le_prev;
1c79356b 1480#endif
b0d623f7
A
1481 cp->unp_addr = (_UNPCB_PTR(struct sockaddr_un *))
1482 (uintptr_t)up->unp_addr;
1483 cp->unp_cc = up->unp_cc;
1484 cp->unp_mbcnt = up->unp_mbcnt;
1485 cp->unp_gencnt = up->unp_gencnt;
1486}
1c79356b
A
1487
1488static int
1489unp_pcblist SYSCTL_HANDLER_ARGS
1490{
2d21ac55 1491#pragma unused(oidp,arg2)
1c79356b
A
1492 int error, i, n;
1493 struct unpcb *unp, **unp_list;
1494 unp_gen_t gencnt;
1495 struct xunpgen xug;
1496 struct unp_head *head;
1497
37839358 1498 lck_rw_lock_shared(unp_list_mtx);
1c79356b
A
1499 head = ((intptr_t)arg1 == SOCK_DGRAM ? &unp_dhead : &unp_shead);
1500
1501 /*
1502 * The process of preparing the PCB list is too time-consuming and
1503 * resource-intensive to repeat twice on every request.
1504 */
91447636 1505 if (req->oldptr == USER_ADDR_NULL) {
1c79356b 1506 n = unp_count;
2d21ac55
A
1507 req->oldidx = 2 * sizeof (xug) + (n + n / 8) *
1508 sizeof (struct xunpcb);
37839358 1509 lck_rw_done(unp_list_mtx);
2d21ac55 1510 return (0);
1c79356b
A
1511 }
1512
91447636 1513 if (req->newptr != USER_ADDR_NULL) {
37839358 1514 lck_rw_done(unp_list_mtx);
2d21ac55 1515 return (EPERM);
91447636 1516 }
1c79356b
A
1517
1518 /*
1519 * OK, now we're committed to doing something.
1520 */
1521 gencnt = unp_gencnt;
1522 n = unp_count;
1523
2d21ac55
A
1524 bzero(&xug, sizeof (xug));
1525 xug.xug_len = sizeof (xug);
1c79356b
A
1526 xug.xug_count = n;
1527 xug.xug_gen = gencnt;
1528 xug.xug_sogen = so_gencnt;
2d21ac55 1529 error = SYSCTL_OUT(req, &xug, sizeof (xug));
91447636 1530 if (error) {
37839358 1531 lck_rw_done(unp_list_mtx);
2d21ac55 1532 return (error);
91447636 1533 }
1c79356b 1534
0b4e3aa0
A
1535 /*
1536 * We are done if there is no pcb
1537 */
91447636 1538 if (n == 0) {
2d21ac55
A
1539 lck_rw_done(unp_list_mtx);
1540 return (0);
91447636 1541 }
0b4e3aa0 1542
2d21ac55
A
1543 MALLOC(unp_list, struct unpcb **, n * sizeof (*unp_list),
1544 M_TEMP, M_WAITOK);
91447636 1545 if (unp_list == 0) {
37839358 1546 lck_rw_done(unp_list_mtx);
2d21ac55 1547 return (ENOMEM);
91447636 1548 }
2d21ac55 1549
1c79356b 1550 for (unp = head->lh_first, i = 0; unp && i < n;
2d21ac55 1551 unp = unp->unp_link.le_next) {
1c79356b
A
1552 if (unp->unp_gencnt <= gencnt)
1553 unp_list[i++] = unp;
1554 }
1555 n = i; /* in case we lost some during malloc */
1556
1557 error = 0;
1558 for (i = 0; i < n; i++) {
1559 unp = unp_list[i];
1560 if (unp->unp_gencnt <= gencnt) {
1561 struct xunpcb xu;
3a60a9f5 1562
2d21ac55
A
1563 bzero(&xu, sizeof (xu));
1564 xu.xu_len = sizeof (xu);
b0d623f7
A
1565 xu.xu_unpp = (_UNPCB_PTR(struct unpcb_compat *))
1566 (uintptr_t)unp;
1c79356b
A
1567 /*
1568 * XXX - need more locking here to protect against
1569 * connect/disconnect races for SMP.
1570 */
1571 if (unp->unp_addr)
2d21ac55
A
1572 bcopy(unp->unp_addr, &xu.xu_addr,
1573 unp->unp_addr->sun_len);
1c79356b
A
1574 if (unp->unp_conn && unp->unp_conn->unp_addr)
1575 bcopy(unp->unp_conn->unp_addr,
2d21ac55
A
1576 &xu.xu_caddr,
1577 unp->unp_conn->unp_addr->sun_len);
b0d623f7 1578 unpcb_to_compat(unp, &xu.xu_unp);
1c79356b 1579 sotoxsocket(unp->unp_socket, &xu.xu_socket);
2d21ac55 1580 error = SYSCTL_OUT(req, &xu, sizeof (xu));
1c79356b
A
1581 }
1582 }
1583 if (!error) {
1584 /*
1585 * Give the user an updated idea of our state.
1586 * If the generation differs from what we told
1587 * her before, she knows that something happened
1588 * while we were processing this request, and it
1589 * might be necessary to retry.
1590 */
2d21ac55
A
1591 bzero(&xug, sizeof (xug));
1592 xug.xug_len = sizeof (xug);
1c79356b
A
1593 xug.xug_gen = unp_gencnt;
1594 xug.xug_sogen = so_gencnt;
1595 xug.xug_count = unp_count;
2d21ac55 1596 error = SYSCTL_OUT(req, &xug, sizeof (xug));
1c79356b
A
1597 }
1598 FREE(unp_list, M_TEMP);
37839358 1599 lck_rw_done(unp_list_mtx);
2d21ac55 1600 return (error);
1c79356b
A
1601}
1602
6d2010ae 1603SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist, CTLFLAG_RD | CTLFLAG_LOCKED,
b0d623f7
A
1604 (caddr_t)(long)SOCK_DGRAM, 0, unp_pcblist, "S,xunpcb",
1605 "List of active local datagram sockets");
6d2010ae 1606SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist, CTLFLAG_RD | CTLFLAG_LOCKED,
b0d623f7
A
1607 (caddr_t)(long)SOCK_STREAM, 0, unp_pcblist, "S,xunpcb",
1608 "List of active local stream sockets");
1609
1610#if !CONFIG_EMBEDDED
1611
1612static int
1613unp_pcblist64 SYSCTL_HANDLER_ARGS
1614{
1615#pragma unused(oidp,arg2)
1616 int error, i, n;
1617 struct unpcb *unp, **unp_list;
1618 unp_gen_t gencnt;
1619 struct xunpgen xug;
1620 struct unp_head *head;
1621
1622 lck_rw_lock_shared(unp_list_mtx);
1623 head = ((intptr_t)arg1 == SOCK_DGRAM ? &unp_dhead : &unp_shead);
1624
1625 /*
1626 * The process of preparing the PCB list is too time-consuming and
1627 * resource-intensive to repeat twice on every request.
1628 */
1629 if (req->oldptr == USER_ADDR_NULL) {
1630 n = unp_count;
1631 req->oldidx = 2 * sizeof (xug) + (n + n / 8) *
1632 (sizeof (struct xunpcb64));
1633 lck_rw_done(unp_list_mtx);
1634 return (0);
1635 }
1636
1637 if (req->newptr != USER_ADDR_NULL) {
1638 lck_rw_done(unp_list_mtx);
1639 return (EPERM);
1640 }
1641
1642 /*
1643 * OK, now we're committed to doing something.
1644 */
1645 gencnt = unp_gencnt;
1646 n = unp_count;
1647
1648 bzero(&xug, sizeof (xug));
1649 xug.xug_len = sizeof (xug);
1650 xug.xug_count = n;
1651 xug.xug_gen = gencnt;
1652 xug.xug_sogen = so_gencnt;
1653 error = SYSCTL_OUT(req, &xug, sizeof (xug));
1654 if (error) {
1655 lck_rw_done(unp_list_mtx);
1656 return (error);
1657 }
1658
1659 /*
1660 * We are done if there is no pcb
1661 */
1662 if (n == 0) {
1663 lck_rw_done(unp_list_mtx);
1664 return (0);
1665 }
1666
1667 MALLOC(unp_list, struct unpcb **, n * sizeof (*unp_list),
1668 M_TEMP, M_WAITOK);
1669 if (unp_list == 0) {
1670 lck_rw_done(unp_list_mtx);
1671 return (ENOMEM);
1672 }
1673
1674 for (unp = head->lh_first, i = 0; unp && i < n;
1675 unp = unp->unp_link.le_next) {
1676 if (unp->unp_gencnt <= gencnt)
1677 unp_list[i++] = unp;
1678 }
1679 n = i; /* in case we lost some during malloc */
1680
1681 error = 0;
1682 for (i = 0; i < n; i++) {
1683 unp = unp_list[i];
1684 if (unp->unp_gencnt <= gencnt) {
1685 struct xunpcb64 xu;
1686 size_t xu_len = sizeof(struct xunpcb64);
1687
1688 bzero(&xu, xu_len);
1689 xu.xu_len = xu_len;
1690 xu.xu_unpp = (u_int64_t)(uintptr_t)unp;
1691 xu.xunp_link.le_next =
1692 (u_int64_t)(uintptr_t)unp->unp_link.le_next;
1693 xu.xunp_link.le_prev =
1694 (u_int64_t)(uintptr_t)unp->unp_link.le_prev;
1695 xu.xunp_socket = (u_int64_t)(uintptr_t)unp->unp_socket;
1696 xu.xunp_vnode = (u_int64_t)(uintptr_t)unp->unp_vnode;
1697 xu.xunp_ino = unp->unp_ino;
1698 xu.xunp_conn = (u_int64_t)(uintptr_t)unp->unp_conn;
1699 xu.xunp_refs = (u_int64_t)(uintptr_t)unp->unp_refs.lh_first;
1700 xu.xunp_reflink.le_next =
1701 (u_int64_t)(uintptr_t)unp->unp_reflink.le_next;
1702 xu.xunp_reflink.le_prev =
1703 (u_int64_t)(uintptr_t)unp->unp_reflink.le_prev;
1704 xu.xunp_cc = unp->unp_cc;
1705 xu.xunp_mbcnt = unp->unp_mbcnt;
1706 xu.xunp_gencnt = unp->unp_gencnt;
1707
1708 if (unp->unp_socket)
1709 sotoxsocket64(unp->unp_socket, &xu.xu_socket);
1710
1711 /*
1712 * XXX - need more locking here to protect against
1713 * connect/disconnect races for SMP.
1714 */
1715 if (unp->unp_addr)
1716 bcopy(unp->unp_addr, &xu.xunp_addr,
1717 unp->unp_addr->sun_len);
1718 if (unp->unp_conn && unp->unp_conn->unp_addr)
1719 bcopy(unp->unp_conn->unp_addr,
1720 &xu.xunp_caddr,
1721 unp->unp_conn->unp_addr->sun_len);
1722
1723 error = SYSCTL_OUT(req, &xu, xu_len);
1724 }
1725 }
1726 if (!error) {
1727 /*
1728 * Give the user an updated idea of our state.
1729 * If the generation differs from what we told
1730 * her before, she knows that something happened
1731 * while we were processing this request, and it
1732 * might be necessary to retry.
1733 */
1734 bzero(&xug, sizeof (xug));
1735 xug.xug_len = sizeof (xug);
1736 xug.xug_gen = unp_gencnt;
1737 xug.xug_sogen = so_gencnt;
1738 xug.xug_count = unp_count;
1739 error = SYSCTL_OUT(req, &xug, sizeof (xug));
1740 }
1741 FREE(unp_list, M_TEMP);
1742 lck_rw_done(unp_list_mtx);
1743 return (error);
1744}
1745
6d2010ae 1746SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist64, CTLFLAG_RD | CTLFLAG_LOCKED,
b0d623f7
A
1747 (caddr_t)(long)SOCK_DGRAM, 0, unp_pcblist64, "S,xunpcb64",
1748 "List of active local datagram sockets 64 bit");
6d2010ae 1749SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist64, CTLFLAG_RD | CTLFLAG_LOCKED,
b0d623f7
A
1750 (caddr_t)(long)SOCK_STREAM, 0, unp_pcblist64, "S,xunpcb64",
1751 "List of active local stream sockets 64 bit");
1752
1753#endif /* !CONFIG_EMBEDDED */
1c79356b
A
1754
1755static void
91447636 1756unp_shutdown(struct unpcb *unp)
1c79356b 1757{
b0d623f7
A
1758 struct socket *so = unp->unp_socket;
1759 struct socket *so2;
1760 if (unp->unp_socket->so_type == SOCK_STREAM && unp->unp_conn) {
1761 so2 = unp->unp_conn->unp_socket;
1762 unp_get_locks_in_order(so, so2);
1763 socantrcvmore(so2);
1764 socket_unlock(so2, 1);
1765 }
1c79356b
A
1766}
1767
1768static void
2d21ac55 1769unp_drop(struct unpcb *unp, int errno)
1c79356b
A
1770{
1771 struct socket *so = unp->unp_socket;
1772
1773 so->so_error = errno;
1774 unp_disconnect(unp);
1c79356b
A
1775}
1776
2d21ac55
A
1777/*
1778 * Returns: 0 Success
1779 * EMSGSIZE The new fd's will not fit
1780 * ENOBUFS Cannot alloc struct fileproc
1781 */
1c79356b 1782int
91447636 1783unp_externalize(struct mbuf *rights)
1c79356b 1784{
2d21ac55 1785 proc_t p = current_proc(); /* XXX */
91447636
A
1786 int i;
1787 struct cmsghdr *cm = mtod(rights, struct cmsghdr *);
1788 struct fileglob **rp = (struct fileglob **)(cm + 1);
b0d623f7 1789 int *fds = (int *)(cm + 1);
91447636
A
1790 struct fileproc *fp;
1791 struct fileglob *fg;
2d21ac55 1792 int newfds = (cm->cmsg_len - sizeof (*cm)) / sizeof (int);
1c79356b
A
1793 int f;
1794
91447636 1795 proc_fdlock(p);
1c79356b
A
1796
1797 /*
1798 * if the new FD's will not fit, then we free them all
1799 */
1800 if (!fdavail(p, newfds)) {
1801 for (i = 0; i < newfds; i++) {
91447636
A
1802 fg = *rp;
1803 unp_discard_fdlocked(fg, p);
2d21ac55 1804 *rp++ = NULL;
1c79356b 1805 }
91447636 1806 proc_fdunlock(p);
1c79356b 1807
1c79356b
A
1808 return (EMSGSIZE);
1809 }
1810 /*
2d21ac55 1811 * now change each pointer to an fd in the global table to
1c79356b
A
1812 * an integer that is the index to the local fd table entry
1813 * that we set up to point to the global one we are transferring.
b0d623f7
A
1814 * XXX (1) this assumes a pointer and int are the same size,
1815 * XXX or the mbuf can hold the expansion
2d21ac55 1816 * XXX (2) allocation failures should be non-fatal
1c79356b
A
1817 */
1818 for (i = 0; i < newfds; i++) {
2d21ac55
A
1819#if CONFIG_MACF_SOCKET
1820 /*
1821 * If receive access is denied, don't pass along
1822 * and error message, just discard the descriptor.
1823 */
1824 if (mac_file_check_receive(kauth_cred_get(), *rp)) {
1825 fg = *rp;
1826 *rp++ = 0;
1827 unp_discard_fdlocked(fg, p);
1828 continue;
1829 }
1830#endif
1c79356b 1831 if (fdalloc(p, 0, &f))
2d21ac55 1832 panic("unp_externalize:fdalloc");
b0d623f7 1833 fg = rp[i];
2d21ac55
A
1834 MALLOC_ZONE(fp, struct fileproc *, sizeof (struct fileproc),
1835 M_FILEPROC, M_WAITOK);
1836 if (fp == NULL)
1837 panic("unp_externalize: MALLOC_ZONE");
1838 bzero(fp, sizeof (struct fileproc));
91447636
A
1839 fp->f_iocount = 0;
1840 fp->f_fglob = fg;
91447636 1841 fg_removeuipc(fg);
6601e61a 1842 procfdtbl_releasefd(p, f, fp);
b0d623f7
A
1843 (void) OSAddAtomic(-1, &unp_rights);
1844 fds[i] = f;
1c79356b 1845 }
91447636 1846 proc_fdunlock(p);
1c79356b 1847
1c79356b
A
1848 return (0);
1849}
1850
1851void
1852unp_init(void)
1853{
2d21ac55
A
1854 unp_zone = zinit(sizeof (struct unpcb),
1855 (nmbclusters * sizeof (struct unpcb)), 4096, "unpzone");
1856
1c79356b
A
1857 if (unp_zone == 0)
1858 panic("unp_init");
1859 LIST_INIT(&unp_dhead);
1860 LIST_INIT(&unp_shead);
2d21ac55 1861
37839358
A
1862 /*
1863 * allocate lock group attribute and group for udp pcb mutexes
1864 */
1865 unp_mtx_grp_attr = lck_grp_attr_alloc_init();
1866
1867 unp_mtx_grp = lck_grp_alloc_init("unp_list", unp_mtx_grp_attr);
2d21ac55 1868
37839358
A
1869 unp_mtx_attr = lck_attr_alloc_init();
1870
2d21ac55
A
1871 if ((unp_list_mtx = lck_rw_alloc_init(unp_mtx_grp,
1872 unp_mtx_attr)) == NULL)
37839358
A
1873 return; /* pretty much dead if this fails... */
1874
b0d623f7
A
1875 if ((unp_disconnect_lock = lck_mtx_alloc_init(unp_mtx_grp,
1876 unp_mtx_attr)) == NULL)
1877 return;
1878
1879 if ((unp_connect_lock = lck_mtx_alloc_init(unp_mtx_grp,
1880 unp_mtx_attr)) == NULL)
1881 return;
1c79356b
A
1882}
1883
1884#ifndef MIN
2d21ac55 1885#define MIN(a, b) (((a) < (b)) ? (a) : (b))
1c79356b
A
1886#endif
1887
2d21ac55
A
1888/*
1889 * Returns: 0 Success
1890 * EINVAL
1891 * fdgetf_noref:EBADF
1892 */
1c79356b 1893static int
2d21ac55 1894unp_internalize(struct mbuf *control, proc_t p)
1c79356b 1895{
91447636 1896 struct cmsghdr *cm = mtod(control, struct cmsghdr *);
b0d623f7 1897 int *fds;
91447636
A
1898 struct fileglob **rp;
1899 struct fileproc *fp;
2d21ac55 1900 int i, error;
1c79356b
A
1901 int oldfds;
1902
2d21ac55 1903 /* 64bit: cmsg_len is 'uint32_t', m_len is 'long' */
1c79356b 1904 if (cm->cmsg_type != SCM_RIGHTS || cm->cmsg_level != SOL_SOCKET ||
b0d623f7 1905 (socklen_t)cm->cmsg_len != (socklen_t)control->m_len) {
2d21ac55 1906 return (EINVAL);
1c79356b 1907 }
1c79356b 1908 oldfds = (cm->cmsg_len - sizeof (*cm)) / sizeof (int);
1c79356b 1909
91447636 1910 proc_fdlock(p);
b0d623f7 1911 fds = (int *)(cm + 1);
91447636
A
1912
1913 for (i = 0; i < oldfds; i++) {
b0d623f7
A
1914 struct fileproc *tmpfp;
1915 if (((error = fdgetf_noref(p, fds[i], &tmpfp)) != 0)) {
2d21ac55
A
1916 proc_fdunlock(p);
1917 return (error);
b0d623f7
A
1918 } else if (!filetype_issendable(tmpfp->f_fglob->fg_type)) {
1919 proc_fdunlock(p);
1920 return (EINVAL);
2d21ac55 1921 }
91447636
A
1922 }
1923 rp = (struct fileglob **)(cm + 1);
1c79356b 1924
b0d623f7
A
1925 /* On K64 we need to walk backwards because a fileglob * is twice the size of an fd
1926 * and doing them in-order would result in stomping over unprocessed fd's
1927 */
1928 for (i = (oldfds - 1); i >= 0; i--) {
1929 (void) fdgetf_noref(p, fds[i], &fp);
91447636 1930 fg_insertuipc(fp->f_fglob);
b0d623f7
A
1931 rp[i] = fp->f_fglob;
1932 (void) OSAddAtomic(1, &unp_rights);
1c79356b 1933 }
91447636 1934 proc_fdunlock(p);
1c79356b 1935
1c79356b
A
1936 return (0);
1937}
1938
6601e61a 1939static int unp_defer, unp_gcing, unp_gcwait;
e2fac8b1 1940static thread_t unp_gcthread = NULL;
2d21ac55 1941
6601e61a
A
1942/* always called under uipc_lock */
1943void
1944unp_gc_wait(void)
1945{
e2fac8b1
A
1946 if (unp_gcthread == current_thread())
1947 return;
1948
6601e61a
A
1949 while (unp_gcing != 0) {
1950 unp_gcwait = 1;
1951 msleep(&unp_gcing, uipc_lock, 0 , "unp_gc_wait", NULL);
1952 }
1953}
1c79356b 1954
2d21ac55 1955
e2fac8b1 1956__private_extern__ void
2d21ac55 1957unp_gc(void)
1c79356b 1958{
2d21ac55
A
1959 struct fileglob *fg, *nextfg;
1960 struct socket *so;
e2fac8b1 1961 static struct fileglob **extra_ref;
b0d623f7 1962 struct fileglob **fpp;
1c79356b 1963 int nunref, i;
6601e61a 1964 int need_gcwakeup = 0;
2d21ac55 1965
91447636
A
1966 lck_mtx_lock(uipc_lock);
1967 if (unp_gcing) {
1968 lck_mtx_unlock(uipc_lock);
1c79356b 1969 return;
91447636 1970 }
1c79356b
A
1971 unp_gcing = 1;
1972 unp_defer = 0;
e2fac8b1 1973 unp_gcthread = current_thread();
91447636 1974 lck_mtx_unlock(uipc_lock);
2d21ac55
A
1975 /*
1976 * before going through all this, set all FDs to
1c79356b
A
1977 * be NOT defered and NOT externally accessible
1978 */
91447636
A
1979 for (fg = fmsghead.lh_first; fg != 0; fg = fg->f_msglist.le_next) {
1980 lck_mtx_lock(&fg->fg_lock);
1981 fg->fg_flag &= ~(FMARK|FDEFER);
1982 lck_mtx_unlock(&fg->fg_lock);
1983 }
1c79356b 1984 do {
2d21ac55
A
1985 for (fg = fmsghead.lh_first; fg != 0;
1986 fg = fg->f_msglist.le_next) {
91447636 1987 lck_mtx_lock(&fg->fg_lock);
1c79356b
A
1988 /*
1989 * If the file is not open, skip it
1990 */
91447636
A
1991 if (fg->fg_count == 0) {
1992 lck_mtx_unlock(&fg->fg_lock);
1c79356b 1993 continue;
91447636 1994 }
1c79356b
A
1995 /*
1996 * If we already marked it as 'defer' in a
1997 * previous pass, then try process it this time
1998 * and un-mark it
1999 */
91447636
A
2000 if (fg->fg_flag & FDEFER) {
2001 fg->fg_flag &= ~FDEFER;
1c79356b
A
2002 unp_defer--;
2003 } else {
2004 /*
2005 * if it's not defered, then check if it's
2006 * already marked.. if so skip it
2007 */
2d21ac55 2008 if (fg->fg_flag & FMARK) {
91447636 2009 lck_mtx_unlock(&fg->fg_lock);
1c79356b 2010 continue;
91447636 2011 }
2d21ac55 2012 /*
1c79356b 2013 * If all references are from messages
2d21ac55 2014 * in transit, then skip it. it's not
1c79356b 2015 * externally accessible.
2d21ac55 2016 */
91447636
A
2017 if (fg->fg_count == fg->fg_msgcount) {
2018 lck_mtx_unlock(&fg->fg_lock);
1c79356b 2019 continue;
91447636 2020 }
2d21ac55 2021 /*
1c79356b
A
2022 * If it got this far then it must be
2023 * externally accessible.
2024 */
91447636 2025 fg->fg_flag |= FMARK;
1c79356b
A
2026 }
2027 /*
2d21ac55 2028 * either it was defered, or it is externally
1c79356b
A
2029 * accessible and not already marked so.
2030 * Now check if it is possibly one of OUR sockets.
2d21ac55 2031 */
91447636
A
2032 if (fg->fg_type != DTYPE_SOCKET ||
2033 (so = (struct socket *)fg->fg_data) == 0) {
2034 lck_mtx_unlock(&fg->fg_lock);
1c79356b 2035 continue;
91447636 2036 }
1c79356b 2037 if (so->so_proto->pr_domain != &localdomain ||
91447636
A
2038 (so->so_proto->pr_flags&PR_RIGHTS) == 0) {
2039 lck_mtx_unlock(&fg->fg_lock);
1c79356b 2040 continue;
91447636 2041 }
1c79356b 2042#ifdef notdef
2d21ac55
A
2043 /*
2044 * if this code is enabled need to run
2045 * under network funnel
2046 */
1c79356b
A
2047 if (so->so_rcv.sb_flags & SB_LOCK) {
2048 /*
2049 * This is problematical; it's not clear
2050 * we need to wait for the sockbuf to be
2051 * unlocked (on a uniprocessor, at least),
2052 * and it's also not clear what to do
2053 * if sbwait returns an error due to receipt
2054 * of a signal. If sbwait does return
2055 * an error, we'll go into an infinite
2056 * loop. Delete all of this for now.
2057 */
2058 (void) sbwait(&so->so_rcv);
2059 goto restart;
2060 }
2061#endif
2062 /*
2063 * So, Ok, it's one of our sockets and it IS externally
2064 * accessible (or was defered). Now we look
2065 * to see if we hold any file descriptors in its
2d21ac55 2066 * message buffers. Follow those links and mark them
1c79356b 2067 * as accessible too.
e2fac8b1 2068 *
b0d623f7 2069 * In case a file is passed onto itself we need to
e2fac8b1 2070 * release the file lock.
1c79356b 2071 */
91447636 2072 lck_mtx_unlock(&fg->fg_lock);
e2fac8b1
A
2073
2074 unp_scan(so->so_rcv.sb_mb, unp_mark);
1c79356b
A
2075 }
2076 } while (unp_defer);
2077 /*
2078 * We grab an extra reference to each of the file table entries
2079 * that are not otherwise accessible and then free the rights
2080 * that are stored in messages on them.
2081 *
2082 * The bug in the orginal code is a little tricky, so I'll describe
2083 * what's wrong with it here.
2084 *
2085 * It is incorrect to simply unp_discard each entry for f_msgcount
2086 * times -- consider the case of sockets A and B that contain
2087 * references to each other. On a last close of some other socket,
2088 * we trigger a gc since the number of outstanding rights (unp_rights)
2089 * is non-zero. If during the sweep phase the gc code un_discards,
2090 * we end up doing a (full) closef on the descriptor. A closef on A
2091 * results in the following chain. Closef calls soo_close, which
2092 * calls soclose. Soclose calls first (through the switch
2093 * uipc_usrreq) unp_detach, which re-invokes unp_gc. Unp_gc simply
2094 * returns because the previous instance had set unp_gcing, and
2095 * we return all the way back to soclose, which marks the socket
2096 * with SS_NOFDREF, and then calls sofree. Sofree calls sorflush
2097 * to free up the rights that are queued in messages on the socket A,
2098 * i.e., the reference on B. The sorflush calls via the dom_dispose
2099 * switch unp_dispose, which unp_scans with unp_discard. This second
2100 * instance of unp_discard just calls closef on B.
2101 *
2102 * Well, a similar chain occurs on B, resulting in a sorflush on B,
2103 * which results in another closef on A. Unfortunately, A is already
2104 * being closed, and the descriptor has already been marked with
2105 * SS_NOFDREF, and soclose panics at this point.
2106 *
2107 * Here, we first take an extra reference to each inaccessible
2108 * descriptor. Then, we call sorflush ourself, since we know
2109 * it is a Unix domain socket anyhow. After we destroy all the
2110 * rights carried in messages, we do a last closef to get rid
2111 * of our extra reference. This is the last close, and the
2112 * unp_detach etc will shut down the socket.
2113 *
2114 * 91/09/19, bsy@cs.cmu.edu
2115 */
2d21ac55
A
2116 extra_ref = _MALLOC(nfiles * sizeof (struct fileglob *),
2117 M_FILEGLOB, M_WAITOK);
b0d623f7
A
2118 if (extra_ref == NULL)
2119 goto bail;
91447636
A
2120 for (nunref = 0, fg = fmsghead.lh_first, fpp = extra_ref; fg != 0;
2121 fg = nextfg) {
2122 lck_mtx_lock(&fg->fg_lock);
2123
2124 nextfg = fg->f_msglist.le_next;
2d21ac55 2125 /*
1c79356b
A
2126 * If it's not open, skip it
2127 */
91447636
A
2128 if (fg->fg_count == 0) {
2129 lck_mtx_unlock(&fg->fg_lock);
1c79356b 2130 continue;
91447636 2131 }
2d21ac55 2132 /*
1c79356b
A
2133 * If all refs are from msgs, and it's not marked accessible
2134 * then it must be referenced from some unreachable cycle
2135 * of (shut-down) FDs, so include it in our
2136 * list of FDs to remove
2137 */
91447636
A
2138 if (fg->fg_count == fg->fg_msgcount && !(fg->fg_flag & FMARK)) {
2139 fg->fg_count++;
2140 *fpp++ = fg;
1c79356b 2141 nunref++;
1c79356b 2142 }
91447636 2143 lck_mtx_unlock(&fg->fg_lock);
1c79356b 2144 }
2d21ac55 2145 /*
1c79356b
A
2146 * for each FD on our hit list, do the following two things
2147 */
2148 for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp) {
91447636 2149 struct fileglob *tfg;
1c79356b 2150
91447636 2151 tfg = *fpp;
1c79356b 2152
91447636 2153 if (tfg->fg_type == DTYPE_SOCKET && tfg->fg_data != NULL) {
2d21ac55
A
2154 so = (struct socket *)(tfg->fg_data);
2155
e2fac8b1 2156 socket_lock(so, 0);
b0d623f7 2157
2d21ac55
A
2158 sorflush(so);
2159
e2fac8b1 2160 socket_unlock(so, 0);
91447636
A
2161 }
2162 }
1c79356b 2163 for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp)
2d21ac55
A
2164 closef_locked((struct fileproc *)0, *fpp, (proc_t)NULL);
2165
b0d623f7
A
2166 FREE((caddr_t)extra_ref, M_FILEGLOB);
2167bail:
2d21ac55 2168 lck_mtx_lock(uipc_lock);
1c79356b 2169 unp_gcing = 0;
e2fac8b1 2170 unp_gcthread = NULL;
6601e61a
A
2171
2172 if (unp_gcwait != 0) {
2173 unp_gcwait = 0;
2174 need_gcwakeup = 1;
2175 }
2176 lck_mtx_unlock(uipc_lock);
2177
2178 if (need_gcwakeup != 0)
2179 wakeup(&unp_gcing);
1c79356b
A
2180}
2181
2182void
91447636 2183unp_dispose(struct mbuf *m)
1c79356b 2184{
1c79356b 2185 if (m) {
1c79356b 2186 unp_scan(m, unp_discard);
1c79356b
A
2187 }
2188}
2189
2d21ac55
A
2190/*
2191 * Returns: 0 Success
2192 */
91447636 2193static int
2d21ac55 2194unp_listen(struct unpcb *unp, proc_t p)
91447636 2195{
0c530ab8
A
2196 kauth_cred_t safecred = kauth_cred_proc_ref(p);
2197 cru2x(safecred, &unp->unp_peercred);
2198 kauth_cred_unref(&safecred);
91447636
A
2199 unp->unp_flags |= UNP_HAVEPCCACHED;
2200 return (0);
2201}
2202
2d21ac55 2203/* should run under kernel funnel */
1c79356b 2204static void
2d21ac55 2205unp_scan(struct mbuf *m0, void (*op)(struct fileglob *))
1c79356b 2206{
91447636
A
2207 struct mbuf *m;
2208 struct fileglob **rp;
2209 struct cmsghdr *cm;
2210 int i;
1c79356b
A
2211 int qfds;
2212
2213 while (m0) {
2214 for (m = m0; m; m = m->m_next)
2215 if (m->m_type == MT_CONTROL &&
2d21ac55 2216 (size_t)m->m_len >= sizeof (*cm)) {
1c79356b
A
2217 cm = mtod(m, struct cmsghdr *);
2218 if (cm->cmsg_level != SOL_SOCKET ||
2219 cm->cmsg_type != SCM_RIGHTS)
2220 continue;
2d21ac55 2221 qfds = (cm->cmsg_len - sizeof (*cm)) /
b0d623f7 2222 sizeof (int);
91447636 2223 rp = (struct fileglob **)(cm + 1);
1c79356b
A
2224 for (i = 0; i < qfds; i++)
2225 (*op)(*rp++);
2226 break; /* XXX, but saves time */
2227 }
2228 m0 = m0->m_act;
2229 }
2230}
2231
2232/* should run under kernel funnel */
2233static void
91447636 2234unp_mark(struct fileglob *fg)
1c79356b 2235{
2d21ac55 2236 lck_mtx_lock(&fg->fg_lock);
1c79356b 2237
91447636 2238 if (fg->fg_flag & FMARK) {
2d21ac55 2239 lck_mtx_unlock(&fg->fg_lock);
1c79356b 2240 return;
91447636
A
2241 }
2242 fg->fg_flag |= (FMARK|FDEFER);
2243
2d21ac55 2244 lck_mtx_unlock(&fg->fg_lock);
91447636 2245
1c79356b 2246 unp_defer++;
1c79356b
A
2247}
2248
2249/* should run under kernel funnel */
2250static void
2d21ac55 2251unp_discard(struct fileglob *fg)
1c79356b 2252{
2d21ac55
A
2253 proc_t p = current_proc(); /* XXX */
2254
b0d623f7 2255 (void) OSAddAtomic(1, &unp_disposed);
91447636
A
2256
2257 proc_fdlock(p);
2258 unp_discard_fdlocked(fg, p);
2259 proc_fdunlock(p);
2260}
2261static void
2d21ac55 2262unp_discard_fdlocked(struct fileglob *fg, proc_t p)
91447636 2263{
91447636 2264 fg_removeuipc(fg);
1c79356b 2265
b0d623f7 2266 (void) OSAddAtomic(-1, &unp_rights);
91447636 2267 (void) closef_locked((struct fileproc *)0, fg, p);
1c79356b 2268}
b0d623f7
A
2269
2270int
2271unp_lock(struct socket *so, int refcount, void * lr)
2272 {
2273 void * lr_saved;
2274 if (lr == 0)
2275 lr_saved = (void *) __builtin_return_address(0);
2276 else lr_saved = lr;
2277
2278 if (so->so_pcb) {
6d2010ae 2279 lck_mtx_lock(&((struct unpcb *)so->so_pcb)->unp_mtx);
b0d623f7
A
2280 } else {
2281 panic("unp_lock: so=%p NO PCB! lr=%p ref=0x%x\n",
2282 so, lr_saved, so->so_usecount);
2283 }
2284
2285 if (so->so_usecount < 0)
2286 panic("unp_lock: so=%p so_pcb=%p lr=%p ref=0x%x\n",
2287 so, so->so_pcb, lr_saved, so->so_usecount);
2288
2289 if (refcount)
2290 so->so_usecount++;
2291
2292 so->lock_lr[so->next_lock_lr] = lr_saved;
2293 so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
2294 return (0);
2295}
2296
2297int
2298unp_unlock(struct socket *so, int refcount, void * lr)
2299{
2300 void * lr_saved;
2301 lck_mtx_t * mutex_held = NULL;
2302 struct unpcb *unp = sotounpcb(so);
2303
2304 if (lr == 0)
2305 lr_saved = (void *) __builtin_return_address(0);
2306 else lr_saved = lr;
2307
2308 if (refcount)
2309 so->so_usecount--;
2310
2311 if (so->so_usecount < 0)
2312 panic("unp_unlock: so=%p usecount=%x\n", so, so->so_usecount);
2313 if (so->so_pcb == NULL) {
2314 panic("unp_unlock: so=%p NO PCB usecount=%x\n", so, so->so_usecount);
2315 } else {
6d2010ae 2316 mutex_held = &((struct unpcb *)so->so_pcb)->unp_mtx;
b0d623f7
A
2317 }
2318 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
2319 so->unlock_lr[so->next_unlock_lr] = lr_saved;
2320 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
2321
2322 if (so->so_usecount == 0 && (so->so_flags & SOF_PCBCLEARING)) {
2323 sofreelastref(so, 1);
2324
2325 if (unp->unp_addr)
2326 FREE(unp->unp_addr, M_SONAME);
2327
2328 lck_mtx_unlock(mutex_held);
b0d623f7
A
2329
2330 unp->unp_gencnt = ++unp_gencnt;
2331 zfree(unp_zone, unp);
2332 --unp_count;
2333
2334 unp_gc();
2335 } else {
2336 lck_mtx_unlock(mutex_held);
2337 }
2338
2339 return (0);
2340}
2341
2342lck_mtx_t *
2343unp_getlock(struct socket *so, __unused int locktype)
2344{
2345 struct unpcb *unp = (struct unpcb *)so->so_pcb;
2346
2347
2348 if (so->so_pcb) {
2349 if (so->so_usecount < 0)
2350 panic("unp_getlock: so=%p usecount=%x\n", so, so->so_usecount);
6d2010ae 2351 return(&unp->unp_mtx);
b0d623f7
A
2352 } else {
2353 panic("unp_getlock: so=%p NULL so_pcb\n", so);
2354 return (so->so_proto->pr_domain->dom_mtx);
2355 }
2356}
2357