2 * Copyright (c) 2003-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/types.h>
33 #include <sys/socket.h>
34 #include <sys/socketvar.h>
35 #include <sys/param.h>
37 #include <sys/errno.h>
38 #include <sys/malloc.h>
39 #include <sys/protosw.h>
40 #include <sys/domain.h>
42 #include <sys/fcntl.h>
43 #include <sys/filio.h>
44 #include <sys/uio_internal.h>
45 #include <kern/lock.h>
46 #include <netinet/in.h>
47 #include <libkern/OSAtomic.h>
49 extern int soclose_locked(struct socket
*so
);
50 extern void soclose_wait_locked(struct socket
*so
);
51 extern int so_isdstlocal(struct socket
*so
);
53 errno_t
sock_send_internal(
55 const struct msghdr
*msg
,
60 typedef void (*so_upcall
)(struct socket
*, caddr_t
, int );
66 struct sockaddr
*from
,
74 struct socket
*new_so
;
75 lck_mtx_t
*mutex_held
;
79 if (sock
== NULL
|| new_sock
== NULL
) return EINVAL
;
81 if ((sock
->so_options
& SO_ACCEPTCONN
) == 0) {
82 socket_unlock(sock
, 1);
85 if ((flags
& ~(MSG_DONTWAIT
)) != 0) {
86 socket_unlock(sock
, 1);
89 if (((flags
& MSG_DONTWAIT
) != 0 || (sock
->so_state
& SS_NBIO
) != 0) &&
90 sock
->so_comp
.tqh_first
== NULL
) {
91 socket_unlock(sock
, 1);
95 if (sock
->so_proto
->pr_getlock
!= NULL
) {
96 mutex_held
= (*sock
->so_proto
->pr_getlock
)(sock
, 0);
100 mutex_held
= sock
->so_proto
->pr_domain
->dom_mtx
;
104 while (TAILQ_EMPTY(&sock
->so_comp
) && sock
->so_error
== 0) {
105 if (sock
->so_state
& SS_CANTRCVMORE
) {
106 sock
->so_error
= ECONNABORTED
;
109 error
= msleep((caddr_t
)&sock
->so_timeo
, mutex_held
, PSOCK
| PCATCH
, "sock_accept", NULL
);
111 socket_unlock(sock
, 1);
115 if (sock
->so_error
) {
116 error
= sock
->so_error
;
118 socket_unlock(sock
, 1);
122 new_so
= TAILQ_FIRST(&sock
->so_comp
);
123 TAILQ_REMOVE(&sock
->so_comp
, new_so
, so_list
);
127 * Pass the pre-accepted socket to any interested socket filter(s).
128 * Upon failure, the socket would have been closed by the callee.
130 if (new_so
->so_filt
!= NULL
) {
132 * Temporarily drop the listening socket's lock before we
133 * hand off control over to the socket filter(s), but keep
134 * a reference so that it won't go away. We'll grab it
135 * again once we're done with the filter(s).
137 socket_unlock(sock
, 0);
138 if ((error
= soacceptfilter(new_so
)) != 0) {
139 /* Drop reference on listening socket */
143 socket_lock(sock
, 0);
147 lck_mtx_assert(new_so
->so_proto
->pr_getlock(new_so
, 0),
148 LCK_MTX_ASSERT_NOTOWNED
);
149 socket_lock(new_so
, 1);
152 new_so
->so_state
&= ~SS_COMP
;
153 new_so
->so_head
= NULL
;
154 (void) soacceptlock(new_so
, &sa
, 0);
156 socket_unlock(sock
, 1); /* release the head */
159 new_so
->so_upcall
= (so_upcall
) callback
;
160 new_so
->so_upcallarg
= cookie
;
161 new_so
->so_rcv
.sb_flags
|= SB_UPCALL
;
163 new_so
->so_snd
.sb_flags
|= SB_UPCALL
;
169 if (fromlen
> sa
->sa_len
) fromlen
= sa
->sa_len
;
170 memcpy(from
, sa
, fromlen
);
172 if (sa
) FREE(sa
, M_SONAME
);
175 * If the socket has been marked as inactive by sosetdefunct(),
176 * disallow further operations on it.
178 if (new_so
->so_flags
& SOF_DEFUNCT
) {
179 (void) sodefunct(current_proc(), new_so
,
180 SHUTDOWN_SOCKET_LEVEL_DISCONNECT_INTERNAL
);
184 socket_unlock(new_so
, 1);
191 const struct sockaddr
*to
)
194 struct sockaddr
*sa
= NULL
;
195 struct sockaddr_storage ss
;
196 boolean_t want_free
= TRUE
;
198 if (sock
== NULL
|| to
== NULL
)
201 if (to
->sa_len
> sizeof(ss
)) {
202 MALLOC(sa
, struct sockaddr
*, to
->sa_len
, M_SONAME
, M_WAITOK
);
206 sa
= (struct sockaddr
*)&ss
;
209 memcpy(sa
, to
, to
->sa_len
);
211 error
= sobind(sock
, sa
);
213 if (sa
!= NULL
&& want_free
== TRUE
)
222 const struct sockaddr
*to
,
226 lck_mtx_t
*mutex_held
;
227 struct sockaddr
*sa
= NULL
;
228 struct sockaddr_storage ss
;
229 boolean_t want_free
= TRUE
;
231 if (sock
== NULL
|| to
== NULL
) return EINVAL
;
233 if (to
->sa_len
> sizeof(ss
)) {
234 MALLOC(sa
, struct sockaddr
*, to
->sa_len
, M_SONAME
,
235 (flags
& MSG_DONTWAIT
) ? M_NOWAIT
: M_WAITOK
);
239 sa
= (struct sockaddr
*)&ss
;
242 memcpy(sa
, to
, to
->sa_len
);
244 socket_lock(sock
, 1);
246 if ((sock
->so_state
& SS_ISCONNECTING
) &&
247 ((sock
->so_state
& SS_NBIO
) != 0 ||
248 (flags
& MSG_DONTWAIT
) != 0)) {
252 error
= soconnectlock(sock
, sa
, 0);
254 if ((sock
->so_state
& SS_ISCONNECTING
) &&
255 ((sock
->so_state
& SS_NBIO
) != 0 || (flags
& MSG_DONTWAIT
) != 0)) {
260 if (sock
->so_proto
->pr_getlock
!= NULL
)
261 mutex_held
= (*sock
->so_proto
->pr_getlock
)(sock
, 0);
263 mutex_held
= sock
->so_proto
->pr_domain
->dom_mtx
;
265 while ((sock
->so_state
& SS_ISCONNECTING
) && sock
->so_error
== 0) {
266 error
= msleep((caddr_t
)&sock
->so_timeo
, mutex_held
, PSOCK
| PCATCH
,
267 "sock_connect", NULL
);
273 error
= sock
->so_error
;
278 sock
->so_state
&= ~SS_ISCONNECTING
;
281 socket_unlock(sock
, 1);
283 if (sa
!= NULL
&& want_free
== TRUE
)
292 const struct timeval
*tv
)
294 lck_mtx_t
* mutex_held
;
298 socket_lock(sock
, 1);
300 // Check if we're already connected or if we've already errored out
301 if ((sock
->so_state
& SS_ISCONNECTING
) == 0 || sock
->so_error
) {
302 if (sock
->so_error
) {
303 retval
= sock
->so_error
;
307 if ((sock
->so_state
& SS_ISCONNECTED
) != 0)
315 // copied translation from timeval to hertz from SO_RCVTIMEO handling
316 if (tv
->tv_sec
< 0 || tv
->tv_sec
> SHRT_MAX
/ hz
||
317 tv
->tv_usec
< 0 || tv
->tv_usec
>= 1000000) {
322 ts
.tv_sec
= tv
->tv_sec
;
323 ts
.tv_nsec
= (tv
->tv_usec
* NSEC_PER_USEC
);
324 if ( (ts
.tv_sec
+ (ts
.tv_nsec
/NSEC_PER_SEC
))/100 > SHRT_MAX
) {
329 if (sock
->so_proto
->pr_getlock
!= NULL
)
330 mutex_held
= (*sock
->so_proto
->pr_getlock
)(sock
, 0);
332 mutex_held
= sock
->so_proto
->pr_domain
->dom_mtx
;
334 msleep((caddr_t
)&sock
->so_timeo
, mutex_held
, PSOCK
, "sock_connectwait", &ts
);
336 // Check if we're still waiting to connect
337 if ((sock
->so_state
& SS_ISCONNECTING
) && sock
->so_error
== 0) {
338 retval
= EINPROGRESS
;
342 if (sock
->so_error
) {
343 retval
= sock
->so_error
;
348 socket_unlock(sock
, 1);
357 socket_lock(sock
, 1);
360 sock
->so_rcv
.sb_flags
|= SB_NOINTR
; // This isn't safe
361 sock
->so_snd
.sb_flags
|= SB_NOINTR
; // This isn't safe
364 sock
->so_rcv
.sb_flags
&= ~SB_NOINTR
; // This isn't safe
365 sock
->so_snd
.sb_flags
&= ~SB_NOINTR
; // This isn't safe
368 socket_unlock(sock
, 1);
374 sock_getpeername(socket_t sock
, struct sockaddr
*peername
, int peernamelen
)
377 struct sockaddr
*sa
= NULL
;
379 if (sock
== NULL
|| peername
== NULL
|| peernamelen
< 0)
382 socket_lock(sock
, 1);
383 if (!(sock
->so_state
& (SS_ISCONNECTED
|SS_ISCONFIRMING
))) {
384 socket_unlock(sock
, 1);
387 error
= sogetaddr_locked(sock
, &sa
, 1);
388 socket_unlock(sock
, 1);
390 if (peernamelen
> sa
->sa_len
)
391 peernamelen
= sa
->sa_len
;
392 memcpy(peername
, sa
, peernamelen
);
399 sock_getsockname(socket_t sock
, struct sockaddr
*sockname
, int socknamelen
)
402 struct sockaddr
*sa
= NULL
;
404 if (sock
== NULL
|| sockname
== NULL
|| socknamelen
< 0)
407 socket_lock(sock
, 1);
408 error
= sogetaddr_locked(sock
, &sa
, 0);
409 socket_unlock(sock
, 1);
411 if (socknamelen
> sa
->sa_len
)
412 socknamelen
= sa
->sa_len
;
413 memcpy(sockname
, sa
, socknamelen
);
419 __private_extern__
int
420 sogetaddr_locked(struct socket
*so
, struct sockaddr
**psa
, int peer
)
424 if (so
== NULL
|| psa
== NULL
)
428 error
= peer
? so
->so_proto
->pr_usrreqs
->pru_peeraddr(so
, psa
) :
429 so
->so_proto
->pr_usrreqs
->pru_sockaddr(so
, psa
);
431 if (error
== 0 && *psa
== NULL
) {
433 } else if (error
!= 0 && *psa
!= NULL
) {
434 FREE(*psa
, M_SONAME
);
441 sock_getaddr(socket_t sock
, struct sockaddr
**psa
, int peer
)
445 if (sock
== NULL
|| psa
== NULL
)
448 socket_lock(sock
, 1);
449 error
= sogetaddr_locked(sock
, psa
, peer
);
450 socket_unlock(sock
, 1);
456 sock_freeaddr(struct sockaddr
*sa
)
473 if (sock
== NULL
|| optval
== NULL
|| optlen
== NULL
) return EINVAL
;
474 sopt
.sopt_dir
= SOPT_GET
;
475 sopt
.sopt_level
= level
;
476 sopt
.sopt_name
= optname
;
477 sopt
.sopt_val
= CAST_USER_ADDR_T(optval
);
478 sopt
.sopt_valsize
= *optlen
;
479 sopt
.sopt_p
= kernproc
;
480 error
= sogetopt(sock
, &sopt
); /* will lock socket */
481 if (error
== 0) *optlen
= sopt
.sopt_valsize
;
488 unsigned long request
,
491 return soioctl(sock
, request
, argp
, kernproc
); /* will lock socket */
504 if (sock
== NULL
|| optval
== NULL
) return EINVAL
;
505 sopt
.sopt_dir
= SOPT_SET
;
506 sopt
.sopt_level
= level
;
507 sopt
.sopt_name
= optname
;
508 sopt
.sopt_val
= CAST_USER_ADDR_T(optval
);
509 sopt
.sopt_valsize
= optlen
;
510 sopt
.sopt_p
= kernproc
;
511 return sosetopt(sock
, &sopt
); /* will lock socket */
515 * This follows the recommended mappings between DSCP code points and WMM access classes
517 static u_int32_t
so_tc_from_dscp(u_int8_t dscp
);
519 so_tc_from_dscp(u_int8_t dscp
)
523 if (dscp
>= 0x30 && dscp
<= 0x3f)
525 else if (dscp
>= 0x20 && dscp
<= 0x2f)
527 else if (dscp
>= 0x08 && dscp
<= 0x17)
545 if (sock
== NULL
|| optval
== NULL
|| optlen
!= sizeof(int)) return EINVAL
;
547 socket_lock(sock
, 1);
548 if (!(sock
->so_state
& SS_ISCONNECTED
)) {
549 /* If the socket is not connected then we don't know
550 * if the destination is on LAN or not. Skip
551 * setting traffic class in this case
557 if (sock
->so_proto
== NULL
|| sock
->so_proto
->pr_domain
== NULL
|| sock
->so_pcb
== NULL
) {
563 * Set the socket traffic class based on the passed DSCP code point
564 * regardless of the scope of the destination
566 sotc
= so_tc_from_dscp((*(const int *)optval
) >> 2);
568 sopt
.sopt_dir
= SOPT_SET
;
569 sopt
.sopt_val
= CAST_USER_ADDR_T(&sotc
);
570 sopt
.sopt_valsize
= sizeof(sotc
);
571 sopt
.sopt_p
= kernproc
;
572 sopt
.sopt_level
= SOL_SOCKET
;
573 sopt
.sopt_name
= SO_TRAFFIC_CLASS
;
575 socket_unlock(sock
, 0);
576 error
= sosetopt(sock
, &sopt
);
577 socket_lock(sock
, 0);
580 printf("sock_settclassopt: sosetopt SO_TRAFFIC_CLASS failed %d\n", error
);
584 /* Check if the destination address is LAN or link local address.
585 * We do not want to set traffic class bits if the destination
588 if (!so_isdstlocal(sock
)) {
592 sopt
.sopt_dir
= SOPT_SET
;
593 sopt
.sopt_val
= CAST_USER_ADDR_T(optval
);
594 sopt
.sopt_valsize
= optlen
;
595 sopt
.sopt_p
= kernproc
;
597 switch (sock
->so_proto
->pr_domain
->dom_family
) {
599 sopt
.sopt_level
= IPPROTO_IP
;
600 sopt
.sopt_name
= IP_TOS
;
603 sopt
.sopt_level
= IPPROTO_IPV6
;
604 sopt
.sopt_name
= IPV6_TCLASS
;
611 socket_unlock(sock
, 1);
612 return sosetopt(sock
, &sopt
);
614 socket_unlock(sock
, 1);
627 if (sock
== NULL
|| optval
== NULL
|| optlen
== NULL
) return EINVAL
;
629 sopt
.sopt_dir
= SOPT_GET
;
630 sopt
.sopt_val
= CAST_USER_ADDR_T(optval
);
631 sopt
.sopt_valsize
= *optlen
;
632 sopt
.sopt_p
= kernproc
;
634 socket_lock(sock
, 1);
635 if (sock
->so_proto
== NULL
|| sock
->so_proto
->pr_domain
== NULL
) {
636 socket_unlock(sock
, 1);
640 switch (sock
->so_proto
->pr_domain
->dom_family
) {
642 sopt
.sopt_level
= IPPROTO_IP
;
643 sopt
.sopt_name
= IP_TOS
;
646 sopt
.sopt_level
= IPPROTO_IPV6
;
647 sopt
.sopt_name
= IPV6_TCLASS
;
650 socket_unlock(sock
, 1);
654 socket_unlock(sock
, 1);
655 error
= sogetopt(sock
, &sopt
); /* will lock socket */
656 if (error
== 0) *optlen
= sopt
.sopt_valsize
;
665 if (sock
== NULL
) return EINVAL
;
666 return solisten(sock
, backlog
); /* will lock socket */
670 sock_receive_internal(
678 struct mbuf
*control
= NULL
;
681 struct sockaddr
*fromsa
;
682 char uio_buf
[ UIO_SIZEOF((msg
!= NULL
) ? msg
->msg_iovlen
: 0) ];
684 if (sock
== NULL
) return EINVAL
;
686 auio
= uio_createwithbuffer(((msg
!= NULL
) ? msg
->msg_iovlen
: 0),
687 0, UIO_SYSSPACE
, UIO_READ
,
688 &uio_buf
[0], sizeof(uio_buf
));
689 if (msg
&& data
== NULL
) {
691 struct iovec
*tempp
= msg
->msg_iov
;
693 for (i
= 0; i
< msg
->msg_iovlen
; i
++) {
694 uio_addiov(auio
, CAST_USER_ADDR_T((tempp
+ i
)->iov_base
), (tempp
+ i
)->iov_len
);
696 if (uio_resid(auio
) < 0) return EINVAL
;
699 uio_setresid(auio
, (uio_resid(auio
) + *recvdlen
));
701 length
= uio_resid(auio
);
706 /* let pru_soreceive handle the socket locking */
707 error
= sock
->so_proto
->pr_usrreqs
->pru_soreceive(sock
, &fromsa
, auio
,
708 data
, (msg
&& msg
->msg_control
) ? &control
: NULL
, &flags
);
709 if (error
) goto cleanup
;
712 *recvdlen
= length
- uio_resid(auio
);
714 msg
->msg_flags
= flags
;
719 salen
= msg
->msg_namelen
;
720 if (msg
->msg_namelen
> 0 && fromsa
!= 0)
722 salen
= MIN(salen
, fromsa
->sa_len
);
723 memcpy(msg
->msg_name
, fromsa
,
724 msg
->msg_namelen
> fromsa
->sa_len
? fromsa
->sa_len
: msg
->msg_namelen
);
728 if (msg
->msg_control
)
730 struct mbuf
* m
= control
;
731 u_char
* ctlbuf
= msg
->msg_control
;
732 int clen
= msg
->msg_controllen
;
733 msg
->msg_controllen
= 0;
735 while (m
&& clen
> 0)
738 if (clen
>= m
->m_len
)
744 msg
->msg_flags
|= MSG_CTRUNC
;
747 memcpy(ctlbuf
, mtod(m
, caddr_t
), tocopy
);
752 msg
->msg_controllen
= (uintptr_t)ctlbuf
- (uintptr_t)msg
->msg_control
;
757 if (control
) m_freem(control
);
758 if (fromsa
) FREE(fromsa
, M_SONAME
);
770 (msg
->msg_iovlen
< 1) ||
771 (msg
->msg_iov
[0].iov_len
== 0) ||
772 (msg
->msg_iov
[0].iov_base
== NULL
))
774 return sock_receive_internal(sock
, msg
, NULL
, flags
, recvdlen
);
785 if (data
== NULL
|| recvlen
== 0 || *recvlen
<= 0 || (msg
&&
786 (msg
->msg_iov
!= NULL
|| msg
->msg_iovlen
!= 0)))
788 return sock_receive_internal(sock
, msg
, data
, flags
, recvlen
);
794 const struct msghdr
*msg
,
800 struct mbuf
*control
= NULL
;
803 char uio_buf
[ UIO_SIZEOF((msg
!= NULL
? msg
->msg_iovlen
: 1)) ];
810 if (data
== 0 && msg
!= NULL
) {
811 struct iovec
*tempp
= msg
->msg_iov
;
813 auio
= uio_createwithbuffer(msg
->msg_iovlen
, 0, UIO_SYSSPACE
, UIO_WRITE
,
814 &uio_buf
[0], sizeof(uio_buf
));
819 for (i
= 0; i
< msg
->msg_iovlen
; i
++) {
820 uio_addiov(auio
, CAST_USER_ADDR_T((tempp
+ i
)->iov_base
), (tempp
+ i
)->iov_len
);
823 if (uio_resid(auio
) < 0) {
834 datalen
= uio_resid(auio
);
836 datalen
= data
->m_pkthdr
.len
;
838 if (msg
&& msg
->msg_control
)
840 if ((size_t)msg
->msg_controllen
< sizeof(struct cmsghdr
)) return EINVAL
;
841 if ((size_t)msg
->msg_controllen
> MLEN
) return EINVAL
;
842 control
= m_get(M_NOWAIT
, MT_CONTROL
);
843 if (control
== NULL
) {
847 memcpy(mtod(control
, caddr_t
), msg
->msg_control
, msg
->msg_controllen
);
848 control
->m_len
= msg
->msg_controllen
;
851 error
= sock
->so_proto
->pr_usrreqs
->pru_sosend(sock
, msg
!= NULL
?
852 (struct sockaddr
*)msg
->msg_name
: NULL
, auio
, data
, control
, flags
);
855 * Residual data is possible in the case of IO vectors but not
856 * in the mbuf case since the latter is treated as atomic send.
857 * If pru_sosend() consumed a portion of the iovecs data and
858 * the error returned is transient, treat it as success; this
859 * is consistent with sendit() behavior.
861 if (auio
!= NULL
&& uio_resid(auio
) != datalen
&&
862 (error
== ERESTART
|| error
== EINTR
|| error
== EWOULDBLOCK
))
865 if (error
== 0 && sentlen
!= NULL
) {
867 *sentlen
= datalen
- uio_resid(auio
);
875 * In cases where we detect an error before returning, we need to
876 * free the mbuf chain if there is one. sosend (and pru_sosend) will
877 * free the mbuf chain if they encounter an error.
892 const struct msghdr
*msg
,
896 if (msg
== NULL
|| msg
->msg_iov
== NULL
|| msg
->msg_iovlen
< 1)
898 return sock_send_internal(sock
, msg
, NULL
, flags
, sentlen
);
904 const struct msghdr
*msg
,
909 if (data
== NULL
|| (msg
&&
910 (msg
->msg_iov
!= NULL
|| msg
->msg_iovlen
!= 0))) {
915 return sock_send_internal(sock
, msg
, data
, flags
, sentlen
);
923 if (sock
== NULL
) return EINVAL
;
924 return soshutdown(sock
, how
);
933 sock_upcall callback
,
938 if (new_so
== NULL
) return EINVAL
;
939 /* socreate will create an initial so_count */
940 error
= socreate(domain
, new_so
, type
, protocol
);
941 if (error
== 0 && callback
)
943 (*new_so
)->so_rcv
.sb_flags
|= SB_UPCALL
;
945 (*new_so
)->so_snd
.sb_flags
|= SB_UPCALL
;
947 (*new_so
)->so_upcall
= (so_upcall
)callback
;
948 (*new_so
)->so_upcallarg
= context
;
949 (*new_so
)->last_pid
= 0;
950 (*new_so
)->last_upid
= 0;
959 if (sock
== NULL
) return;
963 /* Do we want this to be APPLE_PRIVATE API?: YES (LD 12/23/04)*/
968 if (sock
== NULL
) return;
969 socket_lock(sock
, 1);
970 sock
->so_retaincnt
++;
971 sock
->so_usecount
++; /* add extra reference for holding the socket */
972 socket_unlock(sock
, 1);
975 /* Do we want this to be APPLE_PRIVATE API? */
977 sock_release(socket_t sock
)
981 socket_lock(sock
, 1);
983 if (sock
->so_upcallusecount
)
984 soclose_wait_locked(sock
);
986 sock
->so_retaincnt
--;
987 if (sock
->so_retaincnt
< 0)
988 panic("sock_release: negative retain count for sock=%p "
989 "cnt=%x\n", sock
, sock
->so_retaincnt
);
990 if ((sock
->so_retaincnt
== 0) && (sock
->so_usecount
== 2)) {
991 /* close socket only if the FD is not holding it */
992 soclose_locked(sock
);
994 /* remove extra reference holding the socket */
997 socket_unlock(sock
, 1);
1005 if (sock
== NULL
) return EINVAL
;
1006 socket_lock(sock
, 1);
1009 sock
->so_state
|= SS_PRIV
;
1013 sock
->so_state
&= ~SS_PRIV
;
1015 socket_unlock(sock
, 1);
1024 socket_lock(sock
, 1);
1025 retval
= (sock
->so_state
& SS_ISCONNECTED
) != 0;
1026 socket_unlock(sock
, 1);
1035 socket_lock(sock
, 1);
1036 retval
= (sock
->so_state
& SS_NBIO
) != 0;
1037 socket_unlock(sock
, 1);
1048 socket_lock(sock
, 1);
1050 *outDomain
= sock
->so_proto
->pr_domain
->dom_family
;
1052 *outType
= sock
->so_type
;
1054 *outProtocol
= sock
->so_proto
->pr_protocol
;
1055 socket_unlock(sock
, 1);
1060 * Return the listening socket of a pre-accepted socket. It returns the
1061 * listener (so_head) value of a given socket. This is intended to be
1062 * called by a socket filter during a filter attach (sf_attach) callback.
1063 * The value returned by this routine is safe to be used only in the
1064 * context of that callback, because we hold the listener's lock across
1065 * the sflt_initsock() call.
1068 sock_getlistener(socket_t sock
)
1070 return (sock
->so_head
);
1074 sock_set_tcp_stream_priority(socket_t sock
)
1076 if ((sock
->so_proto
->pr_domain
->dom_family
== AF_INET
||
1077 sock
->so_proto
->pr_domain
->dom_family
== AF_INET6
) &&
1078 sock
->so_proto
->pr_type
== SOCK_STREAM
) {
1080 set_tcp_stream_priority(sock
);
1086 * Caller must have ensured socket is valid and won't be going away.
1089 socket_set_traffic_mgt_flags_locked(socket_t sock
, u_int32_t flags
)
1091 (void) OSBitOrAtomic(flags
, &sock
->so_traffic_mgt_flags
);
1092 sock_set_tcp_stream_priority(sock
);
1096 socket_set_traffic_mgt_flags(socket_t sock
, u_int32_t flags
)
1098 socket_lock(sock
, 1);
1099 socket_set_traffic_mgt_flags_locked(sock
, flags
);
1100 socket_unlock(sock
, 1);
1104 * Caller must have ensured socket is valid and won't be going away.
1107 socket_clear_traffic_mgt_flags_locked(socket_t sock
, u_int32_t flags
)
1109 (void) OSBitAndAtomic(~flags
, &sock
->so_traffic_mgt_flags
);
1110 sock_set_tcp_stream_priority(sock
);
1114 socket_clear_traffic_mgt_flags(socket_t sock
, u_int32_t flags
)
1116 socket_lock(sock
, 1);
1117 socket_clear_traffic_mgt_flags_locked(sock
, flags
);
1118 socket_unlock(sock
, 1);
1123 * Caller must have ensured socket is valid and won't be going away.
1126 socket_defunct(struct proc
*p
, socket_t so
, int level
)
1130 if (level
!= SHUTDOWN_SOCKET_LEVEL_DISCONNECT_SVC
&&
1131 level
!= SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL
)
1136 * SHUTDOWN_SOCKET_LEVEL_DISCONNECT_SVC level is meant to tear down
1137 * all of mDNSResponder IPC sockets, currently those of AF_UNIX; note
1138 * that this is an implementation artifact of mDNSResponder. We do
1139 * a quick test against the socket buffers for SB_UNIX, since that
1140 * would have been set by unp_attach() at socket creation time.
1142 if (level
== SHUTDOWN_SOCKET_LEVEL_DISCONNECT_SVC
&&
1143 (so
->so_rcv
.sb_flags
& so
->so_snd
.sb_flags
& SB_UNIX
) != SB_UNIX
) {
1144 socket_unlock(so
, 1);
1145 return (EOPNOTSUPP
);
1147 retval
= sosetdefunct(p
, so
, level
, TRUE
);
1149 retval
= sodefunct(p
, so
, level
);
1150 socket_unlock(so
, 1);
1155 sock_setupcall(socket_t sock
, sock_upcall callback
, void* context
)
1161 * Note that we don't wait for any in progress upcall to complete.
1163 socket_lock(sock
, 1);
1165 sock
->so_upcall
= (so_upcall
) callback
;
1166 sock
->so_upcallarg
= context
;
1168 sock
->so_rcv
.sb_flags
|= SB_UPCALL
;
1169 #if CONFIG_SOWUPCALL
1170 sock
->so_snd
.sb_flags
|= SB_UPCALL
;
1171 #endif /* CONFIG_SOWUPCALL */
1173 sock
->so_rcv
.sb_flags
&= ~SB_UPCALL
;
1174 #if CONFIG_SOWUPCALL
1175 sock
->so_snd
.sb_flags
&= ~SB_UPCALL
;
1176 #endif /* CONFIG_SOWUPCALL */
1179 socket_unlock(sock
, 1);