2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1982, 1986, 1988, 1993
30 * The Regents of the University of California. All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * From: @(#)tcp_usrreq.c 8.2 (Berkeley) 1/3/94
61 * $FreeBSD: src/sys/netinet/tcp_usrreq.c,v 1.51.2.9 2001/08/22 00:59:12 silby Exp $
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/kernel.h>
68 #include <sys/sysctl.h>
71 #include <sys/domain.h>
76 #include <sys/socket.h>
77 #include <sys/socketvar.h>
78 #include <sys/protosw.h>
79 #include <sys/syslog.h>
82 #include <net/route.h>
83 #include <net/ntstat.h>
84 #include <net/content_filter.h>
86 #include <netinet/in.h>
87 #include <netinet/in_systm.h>
89 #include <netinet/ip6.h>
91 #include <netinet/in_pcb.h>
93 #include <netinet6/in6_pcb.h>
95 #include <netinet/in_var.h>
96 #include <netinet/ip_var.h>
98 #include <netinet6/ip6_var.h>
100 #include <netinet/tcp.h>
101 #include <netinet/tcp_fsm.h>
102 #include <netinet/tcp_seq.h>
103 #include <netinet/tcp_timer.h>
104 #include <netinet/tcp_var.h>
105 #include <netinet/tcpip.h>
106 #include <netinet/tcp_cc.h>
107 #include <mach/sdt.h>
109 #include <netinet/tcp_debug.h>
112 #include <netinet/mptcp_var.h>
116 #include <netinet6/ipsec.h>
120 #include <netinet/flow_divert.h>
121 #endif /* FLOW_DIVERT */
123 errno_t
tcp_fill_info_for_info_tuple(struct info_tuple
*, struct tcp_info
*);
125 int tcp_sysctl_info(struct sysctl_oid
*, void *, int , struct sysctl_req
*);
126 static void tcp_connection_fill_info(struct tcpcb
*tp
,
127 struct tcp_connection_info
*tci
);
130 * TCP protocol interface to socket abstraction.
132 extern char *tcpstates
[]; /* XXX ??? */
134 static int tcp_attach(struct socket
*, struct proc
*);
135 static int tcp_connect(struct tcpcb
*, struct sockaddr
*, struct proc
*);
137 static int tcp6_connect(struct tcpcb
*, struct sockaddr
*, struct proc
*);
138 static int tcp6_usr_connect(struct socket
*, struct sockaddr
*,
141 static struct tcpcb
*tcp_disconnect(struct tcpcb
*);
142 static struct tcpcb
*tcp_usrclosed(struct tcpcb
*);
143 extern void tcp_sbrcv_trim(struct tcpcb
*tp
, struct sockbuf
*sb
);
146 #define TCPDEBUG0 int ostate = 0
147 #define TCPDEBUG1() ostate = tp ? tp->t_state : 0
148 #define TCPDEBUG2(req) if (tp && (so->so_options & SO_DEBUG)) \
149 tcp_trace(TA_USER, ostate, tp, 0, 0, req)
153 #define TCPDEBUG2(req)
156 SYSCTL_PROC(_net_inet_tcp
, OID_AUTO
, info
,
157 CTLFLAG_RW
| CTLFLAG_LOCKED
| CTLFLAG_ANYBODY
| CTLFLAG_KERN
,
158 0 , 0, tcp_sysctl_info
, "S", "TCP info per tuple");
161 * TCP attaches to socket via pru_attach(), reserving space,
162 * and an internet control block.
168 * tcp_attach:??? [IPSEC specific]
171 tcp_usr_attach(struct socket
*so
, __unused
int proto
, struct proc
*p
)
174 struct inpcb
*inp
= sotoinpcb(so
);
175 struct tcpcb
*tp
= 0;
184 error
= tcp_attach(so
, p
);
188 if ((so
->so_options
& SO_LINGER
) && so
->so_linger
== 0)
189 so
->so_linger
= TCP_LINGERTIME
* hz
;
192 TCPDEBUG2(PRU_ATTACH
);
197 * pru_detach() detaches the TCP protocol from the socket.
198 * If the protocol state is non-embryonic, then can't
199 * do this directly: have to initiate a pru_disconnect(),
200 * which may finish later; embryonic TCB's can just
204 tcp_usr_detach(struct socket
*so
)
207 struct inpcb
*inp
= sotoinpcb(so
);
211 if (inp
== 0 || (inp
->inp_state
== INPCB_STATE_DEAD
)) {
212 return EINVAL
; /* XXX */
214 socket_lock_assert_owned(so
);
216 /* In case we got disconnected from the peer */
221 calculate_tcp_clock();
223 tp
= tcp_disconnect(tp
);
225 TCPDEBUG2(PRU_DETACH
);
230 #define COMMON_START() TCPDEBUG0; \
232 if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) \
234 if (necp_socket_should_use_flow_divert(inp)) \
235 return (EPROTOTYPE); \
236 tp = intotcpcb(inp); \
238 calculate_tcp_clock(); \
241 #define COMMON_START() TCPDEBUG0; \
243 if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) \
245 tp = intotcpcb(inp); \
247 calculate_tcp_clock(); \
251 #define COMMON_END(req) out: TCPDEBUG2(req); return error; goto out
255 * Give the socket an address.
258 * EINVAL Invalid argument [COMMON_START]
259 * EAFNOSUPPORT Address family not supported
260 * in_pcbbind:EADDRNOTAVAIL Address not available.
261 * in_pcbbind:EINVAL Invalid argument
262 * in_pcbbind:EAFNOSUPPORT Address family not supported [notdef]
263 * in_pcbbind:EACCES Permission denied
264 * in_pcbbind:EADDRINUSE Address in use
265 * in_pcbbind:EAGAIN Resource unavailable, try again
266 * in_pcbbind:EPERM Operation not permitted
269 tcp_usr_bind(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
272 struct inpcb
*inp
= sotoinpcb(so
);
274 struct sockaddr_in
*sinp
;
278 if (nam
->sa_family
!= 0 && nam
->sa_family
!= AF_INET
) {
279 error
= EAFNOSUPPORT
;
284 * Must check for multicast addresses and disallow binding
287 sinp
= (struct sockaddr_in
*)(void *)nam
;
288 if (sinp
->sin_family
== AF_INET
&&
289 IN_MULTICAST(ntohl(sinp
->sin_addr
.s_addr
))) {
290 error
= EAFNOSUPPORT
;
293 error
= in_pcbbind(inp
, nam
, p
);
298 /* Update NECP client with bind result if not in middle of connect */
299 if ((inp
->inp_flags2
& INP2_CONNECT_IN_PROGRESS
) &&
300 !uuid_is_null(inp
->necp_client_uuid
)) {
301 socket_unlock(so
, 0);
302 necp_client_assign_from_socket(so
->last_pid
, inp
->necp_client_uuid
, inp
);
307 COMMON_END(PRU_BIND
);
313 tcp6_usr_bind(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
316 struct inpcb
*inp
= sotoinpcb(so
);
318 struct sockaddr_in6
*sin6p
;
322 if (nam
->sa_family
!= 0 && nam
->sa_family
!= AF_INET6
) {
323 error
= EAFNOSUPPORT
;
328 * Must check for multicast addresses and disallow binding
331 sin6p
= (struct sockaddr_in6
*)(void *)nam
;
332 if (sin6p
->sin6_family
== AF_INET6
&&
333 IN6_IS_ADDR_MULTICAST(&sin6p
->sin6_addr
)) {
334 error
= EAFNOSUPPORT
;
337 inp
->inp_vflag
&= ~INP_IPV4
;
338 inp
->inp_vflag
|= INP_IPV6
;
339 if ((inp
->inp_flags
& IN6P_IPV6_V6ONLY
) == 0) {
340 if (IN6_IS_ADDR_UNSPECIFIED(&sin6p
->sin6_addr
))
341 inp
->inp_vflag
|= INP_IPV4
;
342 else if (IN6_IS_ADDR_V4MAPPED(&sin6p
->sin6_addr
)) {
343 struct sockaddr_in sin
;
345 in6_sin6_2_sin(&sin
, sin6p
);
346 inp
->inp_vflag
|= INP_IPV4
;
347 inp
->inp_vflag
&= ~INP_IPV6
;
348 error
= in_pcbbind(inp
, (struct sockaddr
*)&sin
, p
);
352 error
= in6_pcbbind(inp
, nam
, p
);
355 COMMON_END(PRU_BIND
);
360 * Prepare to accept connections.
363 * EINVAL [COMMON_START]
364 * in_pcbbind:EADDRNOTAVAIL Address not available.
365 * in_pcbbind:EINVAL Invalid argument
366 * in_pcbbind:EAFNOSUPPORT Address family not supported [notdef]
367 * in_pcbbind:EACCES Permission denied
368 * in_pcbbind:EADDRINUSE Address in use
369 * in_pcbbind:EAGAIN Resource unavailable, try again
370 * in_pcbbind:EPERM Operation not permitted
373 tcp_usr_listen(struct socket
*so
, struct proc
*p
)
376 struct inpcb
*inp
= sotoinpcb(so
);
380 if (inp
->inp_lport
== 0)
381 error
= in_pcbbind(inp
, NULL
, p
);
383 tp
->t_state
= TCPS_LISTEN
;
384 COMMON_END(PRU_LISTEN
);
389 tcp6_usr_listen(struct socket
*so
, struct proc
*p
)
392 struct inpcb
*inp
= sotoinpcb(so
);
396 if (inp
->inp_lport
== 0) {
397 inp
->inp_vflag
&= ~INP_IPV4
;
398 if ((inp
->inp_flags
& IN6P_IPV6_V6ONLY
) == 0)
399 inp
->inp_vflag
|= INP_IPV4
;
400 error
= in6_pcbbind(inp
, NULL
, p
);
403 tp
->t_state
= TCPS_LISTEN
;
404 COMMON_END(PRU_LISTEN
);
409 tcp_connect_complete(struct socket
*so
)
411 struct tcpcb
*tp
= sototcpcb(so
);
412 struct inpcb
*inp
= sotoinpcb(so
);
415 /* TFO delays the tcp_output until later, when the app calls write() */
416 if (so
->so_flags1
& SOF1_PRECONNECT_DATA
) {
417 if (!necp_socket_is_allowed_to_send_recv(sotoinpcb(so
), NULL
, NULL
, NULL
))
418 return (EHOSTUNREACH
);
420 /* Initialize enough state so that we can actually send data */
421 tcp_mss(tp
, -1, IFSCOPE_NONE
);
422 tp
->snd_wnd
= tp
->t_maxseg
;
423 tp
->max_sndwnd
= tp
->snd_wnd
;
425 error
= tcp_output(tp
);
429 /* Update NECP client with connected five-tuple */
430 if (error
== 0 && !uuid_is_null(inp
->necp_client_uuid
)) {
431 socket_unlock(so
, 0);
432 necp_client_assign_from_socket(so
->last_pid
, inp
->necp_client_uuid
, inp
);
441 * Initiate connection to peer.
442 * Create a template for use in transmissions on this connection.
443 * Enter SYN_SENT state, and mark socket as connecting.
444 * Start keep-alive timer, and seed output sequence space.
445 * Send initial segment on connection.
448 tcp_usr_connect(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
451 struct inpcb
*inp
= sotoinpcb(so
);
453 struct sockaddr_in
*sinp
;
458 } else if (inp
->inp_state
== INPCB_STATE_DEAD
) {
460 error
= so
->so_error
;
468 else if (necp_socket_should_use_flow_divert(inp
)) {
469 uint32_t fd_ctl_unit
= necp_socket_get_flow_divert_control_unit(inp
);
470 if (fd_ctl_unit
> 0) {
471 error
= flow_divert_pcb_init(so
, fd_ctl_unit
);
473 error
= flow_divert_connect_out(so
, nam
, p
);
481 #endif /* FLOW_DIVERT */
483 error
= cfil_sock_attach(so
);
486 #endif /* CONTENT_FILTER */
491 calculate_tcp_clock();
493 if (nam
->sa_family
!= 0 && nam
->sa_family
!= AF_INET
) {
494 error
= EAFNOSUPPORT
;
498 * Must disallow TCP ``connections'' to multicast addresses.
500 sinp
= (struct sockaddr_in
*)(void *)nam
;
501 if (sinp
->sin_family
== AF_INET
502 && IN_MULTICAST(ntohl(sinp
->sin_addr
.s_addr
))) {
503 error
= EAFNOSUPPORT
;
507 if ((error
= tcp_connect(tp
, nam
, p
)) != 0)
510 error
= tcp_connect_complete(so
);
512 COMMON_END(PRU_CONNECT
);
516 tcp_usr_connectx_common(struct socket
*so
, int af
,
517 struct sockaddr
*src
, struct sockaddr
*dst
,
518 struct proc
*p
, uint32_t ifscope
, sae_associd_t aid
, sae_connid_t
*pcid
,
519 uint32_t flags
, void *arg
, uint32_t arglen
, struct uio
*auio
,
520 user_ssize_t
*bytes_written
)
522 #pragma unused(aid, flags, arg, arglen)
523 struct inpcb
*inp
= sotoinpcb(so
);
525 user_ssize_t datalen
= 0;
532 ASSERT(!(inp
->inp_flags2
& INP2_CONNECT_IN_PROGRESS
));
533 inp
->inp_flags2
|= INP2_CONNECT_IN_PROGRESS
;
536 inp_update_necp_policy(inp
, src
, dst
, ifscope
);
539 if ((so
->so_flags1
& SOF1_DATA_IDEMPOTENT
) &&
540 (tcp_fastopen
& TCP_FASTOPEN_CLIENT
))
541 sototcpcb(so
)->t_flagsext
|= TF_FASTOPEN
;
543 /* bind socket to the specified interface, if requested */
544 if (ifscope
!= IFSCOPE_NONE
&&
545 (error
= inp_bindif(inp
, ifscope
, NULL
)) != 0) {
549 /* if source address and/or port is specified, bind to it */
551 error
= sobindlock(so
, src
, 0); /* already locked */
559 error
= tcp_usr_connect(so
, dst
, p
);
563 error
= tcp6_usr_connect(so
, dst
, p
);
575 /* if there is data, copy it */
577 socket_unlock(so
, 0);
579 VERIFY(bytes_written
!= NULL
);
581 datalen
= uio_resid(auio
);
582 error
= so
->so_proto
->pr_usrreqs
->pru_sosend(so
, NULL
,
583 (uio_t
)auio
, NULL
, NULL
, 0);
586 if (error
== 0 || error
== EWOULDBLOCK
)
587 *bytes_written
= datalen
- uio_resid(auio
);
590 * sosend returns EWOULDBLOCK if it's a non-blocking
591 * socket or a timeout occured (this allows to return
592 * the amount of queued data through sendit()).
594 * However, connectx() returns EINPROGRESS in case of a
595 * blocking socket. So we change the return value here.
597 if (error
== EWOULDBLOCK
)
601 if (error
== 0 && pcid
!= NULL
)
602 *pcid
= 1; /* there is only one connection in regular TCP */
605 if (error
&& error
!= EINPROGRESS
)
606 so
->so_flags1
&= ~SOF1_PRECONNECT_DATA
;
608 inp
->inp_flags2
&= ~INP2_CONNECT_IN_PROGRESS
;
613 tcp_usr_connectx(struct socket
*so
, struct sockaddr
*src
,
614 struct sockaddr
*dst
, struct proc
*p
, uint32_t ifscope
,
615 sae_associd_t aid
, sae_connid_t
*pcid
, uint32_t flags
, void *arg
,
616 uint32_t arglen
, struct uio
*uio
, user_ssize_t
*bytes_written
)
618 return (tcp_usr_connectx_common(so
, AF_INET
, src
, dst
, p
, ifscope
, aid
,
619 pcid
, flags
, arg
, arglen
, uio
, bytes_written
));
624 tcp6_usr_connect(struct socket
*so
, struct sockaddr
*nam
, struct proc
*p
)
627 struct inpcb
*inp
= sotoinpcb(so
);
629 struct sockaddr_in6
*sin6p
;
634 } else if (inp
->inp_state
== INPCB_STATE_DEAD
) {
636 error
= so
->so_error
;
644 else if (necp_socket_should_use_flow_divert(inp
)) {
645 uint32_t fd_ctl_unit
= necp_socket_get_flow_divert_control_unit(inp
);
646 if (fd_ctl_unit
> 0) {
647 error
= flow_divert_pcb_init(so
, fd_ctl_unit
);
649 error
= flow_divert_connect_out(so
, nam
, p
);
657 #endif /* FLOW_DIVERT */
659 error
= cfil_sock_attach(so
);
662 #endif /* CONTENT_FILTER */
668 calculate_tcp_clock();
670 if (nam
->sa_family
!= 0 && nam
->sa_family
!= AF_INET6
) {
671 error
= EAFNOSUPPORT
;
676 * Must disallow TCP ``connections'' to multicast addresses.
678 sin6p
= (struct sockaddr_in6
*)(void *)nam
;
679 if (sin6p
->sin6_family
== AF_INET6
680 && IN6_IS_ADDR_MULTICAST(&sin6p
->sin6_addr
)) {
681 error
= EAFNOSUPPORT
;
685 if (IN6_IS_ADDR_V4MAPPED(&sin6p
->sin6_addr
)) {
686 struct sockaddr_in sin
;
688 if ((inp
->inp_flags
& IN6P_IPV6_V6ONLY
) != 0)
691 in6_sin6_2_sin(&sin
, sin6p
);
692 inp
->inp_vflag
|= INP_IPV4
;
693 inp
->inp_vflag
&= ~INP_IPV6
;
694 if ((error
= tcp_connect(tp
, (struct sockaddr
*)&sin
, p
)) != 0)
697 error
= tcp_connect_complete(so
);
700 inp
->inp_vflag
&= ~INP_IPV4
;
701 inp
->inp_vflag
|= INP_IPV6
;
702 if ((error
= tcp6_connect(tp
, nam
, p
)) != 0)
705 error
= tcp_connect_complete(so
);
706 COMMON_END(PRU_CONNECT
);
710 tcp6_usr_connectx(struct socket
*so
, struct sockaddr
*src
,
711 struct sockaddr
*dst
, struct proc
*p
, uint32_t ifscope
,
712 sae_associd_t aid
, sae_connid_t
*pcid
, uint32_t flags
, void *arg
,
713 uint32_t arglen
, struct uio
*uio
, user_ssize_t
*bytes_written
)
715 return (tcp_usr_connectx_common(so
, AF_INET6
, src
, dst
, p
, ifscope
, aid
,
716 pcid
, flags
, arg
, arglen
, uio
, bytes_written
));
721 * Initiate disconnect from peer.
722 * If connection never passed embryonic stage, just drop;
723 * else if don't need to let data drain, then can just drop anyways,
724 * else have to begin TCP shutdown process: mark socket disconnecting,
725 * drain unread data, state switch to reflect user close, and
726 * send segment (e.g. FIN) to peer. Socket will be really disconnected
727 * when peer sends FIN and acks ours.
729 * SHOULD IMPLEMENT LATER PRU_CONNECT VIA REALLOC TCPCB.
732 tcp_usr_disconnect(struct socket
*so
)
735 struct inpcb
*inp
= sotoinpcb(so
);
738 socket_lock_assert_owned(so
);
740 /* In case we got disconnected from the peer */
743 tp
= tcp_disconnect(tp
);
744 COMMON_END(PRU_DISCONNECT
);
748 * User-protocol pru_disconnectx callback.
751 tcp_usr_disconnectx(struct socket
*so
, sae_associd_t aid
, sae_connid_t cid
)
754 if (aid
!= SAE_ASSOCID_ANY
&& aid
!= SAE_ASSOCID_ALL
)
757 return (tcp_usr_disconnect(so
));
761 * Accept a connection. Essentially all the work is
762 * done at higher levels; just return the address
763 * of the peer, storing through addr.
766 tcp_usr_accept(struct socket
*so
, struct sockaddr
**nam
)
769 struct inpcb
*inp
= sotoinpcb(so
);
770 struct tcpcb
*tp
= NULL
;
773 in_getpeeraddr(so
, nam
);
775 if (so
->so_state
& SS_ISDISCONNECTED
) {
776 error
= ECONNABORTED
;
779 if (inp
== NULL
|| inp
->inp_state
== INPCB_STATE_DEAD
)
782 else if (necp_socket_should_use_flow_divert(inp
))
785 error
= cfil_sock_attach(so
);
788 #endif /* CONTENT_FILTER */
794 calculate_tcp_clock();
796 COMMON_END(PRU_ACCEPT
);
801 tcp6_usr_accept(struct socket
*so
, struct sockaddr
**nam
)
804 struct inpcb
*inp
= sotoinpcb(so
);
805 struct tcpcb
*tp
= NULL
;
808 if (so
->so_state
& SS_ISDISCONNECTED
) {
809 error
= ECONNABORTED
;
812 if (inp
== NULL
|| inp
->inp_state
== INPCB_STATE_DEAD
)
815 else if (necp_socket_should_use_flow_divert(inp
))
818 error
= cfil_sock_attach(so
);
821 #endif /* CONTENT_FILTER */
827 calculate_tcp_clock();
829 in6_mapped_peeraddr(so
, nam
);
830 COMMON_END(PRU_ACCEPT
);
835 * Mark the connection as being incapable of further output.
838 * EINVAL [COMMON_START]
839 * tcp_output:EADDRNOTAVAIL
841 * tcp_output:EMSGSIZE
842 * tcp_output:EHOSTUNREACH
843 * tcp_output:ENETUNREACH
844 * tcp_output:ENETDOWN
847 * tcp_output:EMSGSIZE
849 * tcp_output:??? [ignorable: mostly IPSEC/firewall/DLIL]
852 tcp_usr_shutdown(struct socket
*so
)
855 struct inpcb
*inp
= sotoinpcb(so
);
859 if (inp
== NULL
|| inp
->inp_state
== INPCB_STATE_DEAD
)
865 * In case we got disconnected from the peer, or if this is
866 * a socket that is to be flow-diverted (but not yet).
873 || (necp_socket_should_use_flow_divert(inp
))
881 calculate_tcp_clock();
883 tp
= tcp_usrclosed(tp
);
885 /* A reset has been sent but socket exists, do not send FIN */
886 if ((so
->so_flags
& SOF_MP_SUBFLOW
) &&
887 (tp
) && (tp
->t_mpflags
& TMPF_RESET
)) {
892 /* Don't send a FIN yet */
893 if (tp
&& !(so
->so_state
& SS_ISDISCONNECTED
) &&
894 cfil_sock_data_pending(&so
->so_snd
))
896 #endif /* CONTENT_FILTER */
898 error
= tcp_output(tp
);
899 COMMON_END(PRU_SHUTDOWN
);
903 * After a receive, possibly send window update to peer.
906 tcp_usr_rcvd(struct socket
*so
, __unused
int flags
)
909 struct inpcb
*inp
= sotoinpcb(so
);
913 /* In case we got disconnected from the peer */
916 tcp_sbrcv_trim(tp
, &so
->so_rcv
);
919 * This tcp_output is solely there to trigger window-updates.
920 * However, we really do not want these window-updates while we
921 * are still in SYN_SENT or SYN_RECEIVED.
923 if (TCPS_HAVEESTABLISHED(tp
->t_state
))
927 cfil_sock_buf_update(&so
->so_rcv
);
928 #endif /* CONTENT_FILTER */
930 COMMON_END(PRU_RCVD
);
934 * Do a send by putting data in output queue and updating urgent
935 * marker if URG set. Possibly send more data. Unlike the other
936 * pru_*() routines, the mbuf chains are our responsibility. We
937 * must either enqueue them or free them. The other pru_* routines
938 * generally are caller-frees.
944 * tcp_connect:EADDRINUSE Address in use
945 * tcp_connect:EADDRNOTAVAIL Address not available.
946 * tcp_connect:EINVAL Invalid argument
947 * tcp_connect:EAFNOSUPPORT Address family not supported [notdef]
948 * tcp_connect:EACCES Permission denied
949 * tcp_connect:EAGAIN Resource unavailable, try again
950 * tcp_connect:EPERM Operation not permitted
951 * tcp_output:EADDRNOTAVAIL
953 * tcp_output:EMSGSIZE
954 * tcp_output:EHOSTUNREACH
955 * tcp_output:ENETUNREACH
956 * tcp_output:ENETDOWN
959 * tcp_output:EMSGSIZE
961 * tcp_output:??? [ignorable: mostly IPSEC/firewall/DLIL]
962 * tcp6_connect:??? [IPV6 only]
965 tcp_usr_send(struct socket
*so
, int flags
, struct mbuf
*m
,
966 struct sockaddr
*nam
, struct mbuf
*control
, struct proc
*p
)
969 struct inpcb
*inp
= sotoinpcb(so
);
971 uint32_t msgpri
= MSG_PRI_DEFAULT
;
977 if (inp
== NULL
|| inp
->inp_state
== INPCB_STATE_DEAD
979 || (necp_socket_should_use_flow_divert(inp
))
983 * OOPS! we lost a race, the TCP session got reset after
984 * we checked SS_CANTSENDMORE, eg: while doing uiomove or a
985 * network interrupt in the non-splnet() section of sosend().
989 if (control
!= NULL
) {
995 error
= ECONNRESET
; /* XXX EPIPE? */
1003 isipv6
= nam
&& nam
->sa_family
== AF_INET6
;
1005 tp
= intotcpcb(inp
);
1008 calculate_tcp_clock();
1010 if (control
!= NULL
) {
1011 if (so
->so_flags
& SOF_ENABLE_MSGS
) {
1012 /* Get the msg priority from control mbufs */
1013 error
= tcp_get_msg_priority(control
, &msgpri
);
1024 } else if (control
->m_len
) {
1026 * if not unordered, TCP should not have
1039 if (so
->so_flags
& SOF_ENABLE_MSGS
) {
1040 VERIFY(m
->m_flags
& M_PKTHDR
);
1041 m
->m_pkthdr
.msg_pri
= msgpri
;
1044 /* MPTCP sublow socket buffers must not be compressed */
1045 VERIFY(!(so
->so_flags
& SOF_MP_SUBFLOW
) ||
1046 (so
->so_snd
.sb_flags
& SB_NOCOMPRESS
));
1048 if(!(flags
& PRUS_OOB
) || (so
->so_flags1
& SOF1_PRECONNECT_DATA
)) {
1049 /* Call msg send if message delivery is enabled */
1050 if (so
->so_flags
& SOF_ENABLE_MSGS
)
1051 sbappendmsg_snd(&so
->so_snd
, m
);
1053 sbappendstream(&so
->so_snd
, m
);
1055 if (nam
&& tp
->t_state
< TCPS_SYN_SENT
) {
1058 * Do implied connect if not yet connected,
1059 * initialize window to default value, and
1060 * initialize maxseg/maxopd using peer's cached
1065 error
= tcp6_connect(tp
, nam
, p
);
1068 error
= tcp_connect(tp
, nam
, p
);
1071 tp
->snd_wnd
= TTCP_CLIENT_SND_WND
;
1072 tp
->max_sndwnd
= tp
->snd_wnd
;
1073 tcp_mss(tp
, -1, IFSCOPE_NONE
);
1076 if (flags
& PRUS_EOF
) {
1078 * Close the send side of the connection after
1082 tp
= tcp_usrclosed(tp
);
1085 if (flags
& PRUS_MORETOCOME
)
1086 tp
->t_flags
|= TF_MORETOCOME
;
1087 error
= tcp_output(tp
);
1088 if (flags
& PRUS_MORETOCOME
)
1089 tp
->t_flags
&= ~TF_MORETOCOME
;
1092 if (sbspace(&so
->so_snd
) == 0) {
1093 /* if no space is left in sockbuf,
1094 * do not try to squeeze in OOB traffic */
1100 * According to RFC961 (Assigned Protocols),
1101 * the urgent pointer points to the last octet
1102 * of urgent data. We continue, however,
1103 * to consider it to indicate the first octet
1104 * of data past the urgent section.
1105 * Otherwise, snd_up should be one lower.
1107 sbappendstream(&so
->so_snd
, m
);
1108 if (nam
&& tp
->t_state
< TCPS_SYN_SENT
) {
1110 * Do implied connect if not yet connected,
1111 * initialize window to default value, and
1112 * initialize maxseg/maxopd using peer's cached
1117 error
= tcp6_connect(tp
, nam
, p
);
1120 error
= tcp_connect(tp
, nam
, p
);
1123 tp
->snd_wnd
= TTCP_CLIENT_SND_WND
;
1124 tp
->max_sndwnd
= tp
->snd_wnd
;
1125 tcp_mss(tp
, -1, IFSCOPE_NONE
);
1127 tp
->snd_up
= tp
->snd_una
+ so
->so_snd
.sb_cc
;
1128 tp
->t_flagsext
|= TF_FORCE
;
1129 error
= tcp_output(tp
);
1130 tp
->t_flagsext
&= ~TF_FORCE
;
1135 * We wait for the socket to successfully connect before returning.
1136 * This allows us to signal a timeout to the application.
1138 if (so
->so_state
& SS_ISCONNECTING
) {
1139 if (so
->so_state
& SS_NBIO
)
1140 error
= EWOULDBLOCK
;
1142 error
= sbwait(&so
->so_snd
);
1145 COMMON_END((flags
& PRUS_OOB
) ? PRU_SENDOOB
:
1146 ((flags
& PRUS_EOF
) ? PRU_SEND_EOF
: PRU_SEND
));
1153 tcp_usr_abort(struct socket
*so
)
1156 struct inpcb
*inp
= sotoinpcb(so
);
1160 /* In case we got disconnected from the peer */
1163 tp
= tcp_drop(tp
, ECONNABORTED
);
1164 VERIFY(so
->so_usecount
> 0);
1166 COMMON_END(PRU_ABORT
);
1170 * Receive out-of-band data.
1172 * Returns: 0 Success
1173 * EINVAL [COMMON_START]
1178 tcp_usr_rcvoob(struct socket
*so
, struct mbuf
*m
, int flags
)
1181 struct inpcb
*inp
= sotoinpcb(so
);
1185 if ((so
->so_oobmark
== 0 &&
1186 (so
->so_state
& SS_RCVATMARK
) == 0) ||
1187 so
->so_options
& SO_OOBINLINE
||
1188 tp
->t_oobflags
& TCPOOB_HADDATA
) {
1192 if ((tp
->t_oobflags
& TCPOOB_HAVEDATA
) == 0) {
1193 error
= EWOULDBLOCK
;
1197 *mtod(m
, caddr_t
) = tp
->t_iobc
;
1198 so
->so_state
&= ~SS_RCVATMARK
;
1199 if ((flags
& MSG_PEEK
) == 0)
1200 tp
->t_oobflags
^= (TCPOOB_HAVEDATA
| TCPOOB_HADDATA
);
1201 COMMON_END(PRU_RCVOOB
);
1205 tcp_usr_preconnect(struct socket
*so
)
1207 struct inpcb
*inp
= sotoinpcb(so
);
1211 if (necp_socket_should_use_flow_divert(inp
)) {
1212 /* May happen, if in tcp_usr_connect we did not had a chance
1213 * to set the usrreqs (due to some error). So, let's get out
1220 error
= tcp_output(sototcpcb(so
));
1222 soclearfastopen(so
);
1224 COMMON_END(PRU_PRECONNECT
);
1227 /* xxx - should be const */
1228 struct pr_usrreqs tcp_usrreqs
= {
1229 .pru_abort
= tcp_usr_abort
,
1230 .pru_accept
= tcp_usr_accept
,
1231 .pru_attach
= tcp_usr_attach
,
1232 .pru_bind
= tcp_usr_bind
,
1233 .pru_connect
= tcp_usr_connect
,
1234 .pru_connectx
= tcp_usr_connectx
,
1235 .pru_control
= in_control
,
1236 .pru_detach
= tcp_usr_detach
,
1237 .pru_disconnect
= tcp_usr_disconnect
,
1238 .pru_disconnectx
= tcp_usr_disconnectx
,
1239 .pru_listen
= tcp_usr_listen
,
1240 .pru_peeraddr
= in_getpeeraddr
,
1241 .pru_rcvd
= tcp_usr_rcvd
,
1242 .pru_rcvoob
= tcp_usr_rcvoob
,
1243 .pru_send
= tcp_usr_send
,
1244 .pru_shutdown
= tcp_usr_shutdown
,
1245 .pru_sockaddr
= in_getsockaddr
,
1246 .pru_sosend
= sosend
,
1247 .pru_soreceive
= soreceive
,
1248 .pru_preconnect
= tcp_usr_preconnect
,
1252 struct pr_usrreqs tcp6_usrreqs
= {
1253 .pru_abort
= tcp_usr_abort
,
1254 .pru_accept
= tcp6_usr_accept
,
1255 .pru_attach
= tcp_usr_attach
,
1256 .pru_bind
= tcp6_usr_bind
,
1257 .pru_connect
= tcp6_usr_connect
,
1258 .pru_connectx
= tcp6_usr_connectx
,
1259 .pru_control
= in6_control
,
1260 .pru_detach
= tcp_usr_detach
,
1261 .pru_disconnect
= tcp_usr_disconnect
,
1262 .pru_disconnectx
= tcp_usr_disconnectx
,
1263 .pru_listen
= tcp6_usr_listen
,
1264 .pru_peeraddr
= in6_mapped_peeraddr
,
1265 .pru_rcvd
= tcp_usr_rcvd
,
1266 .pru_rcvoob
= tcp_usr_rcvoob
,
1267 .pru_send
= tcp_usr_send
,
1268 .pru_shutdown
= tcp_usr_shutdown
,
1269 .pru_sockaddr
= in6_mapped_sockaddr
,
1270 .pru_sosend
= sosend
,
1271 .pru_soreceive
= soreceive
,
1272 .pru_preconnect
= tcp_usr_preconnect
,
1277 * Common subroutine to open a TCP connection to remote host specified
1278 * by struct sockaddr_in in mbuf *nam. Call in_pcbbind to assign a local
1279 * port number if needed. Call in_pcbladdr to do the routing and to choose
1280 * a local host address (interface). If there is an existing incarnation
1281 * of the same connection in TIME-WAIT state and if the remote host was
1282 * sending CC options and if the connection duration was < MSL, then
1283 * truncate the previous TIME-WAIT state and proceed.
1284 * Initialize connection parameters and enter SYN-SENT state.
1286 * Returns: 0 Success
1289 * in_pcbbind:EADDRNOTAVAIL Address not available.
1290 * in_pcbbind:EINVAL Invalid argument
1291 * in_pcbbind:EAFNOSUPPORT Address family not supported [notdef]
1292 * in_pcbbind:EACCES Permission denied
1293 * in_pcbbind:EADDRINUSE Address in use
1294 * in_pcbbind:EAGAIN Resource unavailable, try again
1295 * in_pcbbind:EPERM Operation not permitted
1296 * in_pcbladdr:EINVAL Invalid argument
1297 * in_pcbladdr:EAFNOSUPPORT Address family not supported
1298 * in_pcbladdr:EADDRNOTAVAIL Address not available
1301 tcp_connect(struct tcpcb
*tp
, struct sockaddr
*nam
, struct proc
*p
)
1303 struct inpcb
*inp
= tp
->t_inpcb
, *oinp
;
1304 struct socket
*so
= inp
->inp_socket
;
1306 struct sockaddr_in
*sin
= (struct sockaddr_in
*)(void *)nam
;
1307 struct in_addr laddr
;
1309 struct ifnet
*outif
= NULL
;
1311 if (inp
->inp_lport
== 0) {
1312 error
= in_pcbbind(inp
, NULL
, p
);
1318 * Cannot simply call in_pcbconnect, because there might be an
1319 * earlier incarnation of this same connection still in
1320 * TIME_WAIT state, creating an ADDRINUSE error.
1322 error
= in_pcbladdr(inp
, nam
, &laddr
, IFSCOPE_NONE
, &outif
, 0);
1326 socket_unlock(inp
->inp_socket
, 0);
1327 oinp
= in_pcblookup_hash(inp
->inp_pcbinfo
,
1328 sin
->sin_addr
, sin
->sin_port
,
1329 inp
->inp_laddr
.s_addr
!= INADDR_ANY
? inp
->inp_laddr
: laddr
,
1330 inp
->inp_lport
, 0, NULL
);
1332 socket_lock(inp
->inp_socket
, 0);
1334 if (oinp
!= inp
) /* 4143933: avoid deadlock if inp == oinp */
1335 socket_lock(oinp
->inp_socket
, 1);
1336 if (in_pcb_checkstate(oinp
, WNT_RELEASE
, 1) == WNT_STOPUSING
) {
1338 socket_unlock(oinp
->inp_socket
, 1);
1342 if (oinp
!= inp
&& (otp
= intotcpcb(oinp
)) != NULL
&&
1343 otp
->t_state
== TCPS_TIME_WAIT
&&
1344 ((int)(tcp_now
- otp
->t_starttime
)) < tcp_msl
&&
1345 (otp
->t_flags
& TF_RCVD_CC
)) {
1346 otp
= tcp_close(otp
);
1348 printf("tcp_connect: inp=0x%llx err=EADDRINUSE\n",
1349 (uint64_t)VM_KERNEL_ADDRPERM(inp
));
1351 socket_unlock(oinp
->inp_socket
, 1);
1356 socket_unlock(oinp
->inp_socket
, 1);
1359 if ((inp
->inp_laddr
.s_addr
== INADDR_ANY
? laddr
.s_addr
:
1360 inp
->inp_laddr
.s_addr
) == sin
->sin_addr
.s_addr
&&
1361 inp
->inp_lport
== sin
->sin_port
) {
1365 if (!lck_rw_try_lock_exclusive(inp
->inp_pcbinfo
->ipi_lock
)) {
1366 /*lock inversion issue, mostly with udp multicast packets */
1367 socket_unlock(inp
->inp_socket
, 0);
1368 lck_rw_lock_exclusive(inp
->inp_pcbinfo
->ipi_lock
);
1369 socket_lock(inp
->inp_socket
, 0);
1371 if (inp
->inp_laddr
.s_addr
== INADDR_ANY
) {
1372 inp
->inp_laddr
= laddr
;
1373 /* no reference needed */
1374 inp
->inp_last_outifp
= outif
;
1376 inp
->inp_flags
|= INP_INADDR_ANY
;
1378 inp
->inp_faddr
= sin
->sin_addr
;
1379 inp
->inp_fport
= sin
->sin_port
;
1381 lck_rw_done(inp
->inp_pcbinfo
->ipi_lock
);
1383 if (inp
->inp_flowhash
== 0)
1384 inp
->inp_flowhash
= inp_calc_flowhash(inp
);
1386 tcp_set_max_rwinscale(tp
, so
, outif
);
1389 tcpstat
.tcps_connattempt
++;
1390 tp
->t_state
= TCPS_SYN_SENT
;
1391 tp
->t_timer
[TCPT_KEEP
] = OFFSET_FROM_START(tp
, TCP_CONN_KEEPINIT(tp
));
1392 tp
->iss
= tcp_new_isn(tp
);
1393 tcp_sendseqinit(tp
);
1395 nstat_route_connect_attempt(inp
->inp_route
.ro_rt
);
1399 ifnet_release(outif
);
1406 tcp6_connect(struct tcpcb
*tp
, struct sockaddr
*nam
, struct proc
*p
)
1408 struct inpcb
*inp
= tp
->t_inpcb
, *oinp
;
1409 struct socket
*so
= inp
->inp_socket
;
1411 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)(void *)nam
;
1412 struct in6_addr addr6
;
1414 struct ifnet
*outif
= NULL
;
1416 if (inp
->inp_lport
== 0) {
1417 error
= in6_pcbbind(inp
, NULL
, p
);
1423 * Cannot simply call in_pcbconnect, because there might be an
1424 * earlier incarnation of this same connection still in
1425 * TIME_WAIT state, creating an ADDRINUSE error.
1427 * in6_pcbladdr() might return an ifp with its reference held
1428 * even in the error case, so make sure that it's released
1429 * whenever it's non-NULL.
1431 error
= in6_pcbladdr(inp
, nam
, &addr6
, &outif
);
1434 socket_unlock(inp
->inp_socket
, 0);
1435 oinp
= in6_pcblookup_hash(inp
->inp_pcbinfo
,
1436 &sin6
->sin6_addr
, sin6
->sin6_port
,
1437 IN6_IS_ADDR_UNSPECIFIED(&inp
->in6p_laddr
)
1440 inp
->inp_lport
, 0, NULL
);
1441 socket_lock(inp
->inp_socket
, 0);
1443 if (oinp
!= inp
&& (otp
= intotcpcb(oinp
)) != NULL
&&
1444 otp
->t_state
== TCPS_TIME_WAIT
&&
1445 ((int)(tcp_now
- otp
->t_starttime
)) < tcp_msl
&&
1446 (otp
->t_flags
& TF_RCVD_CC
)) {
1447 otp
= tcp_close(otp
);
1453 if (!lck_rw_try_lock_exclusive(inp
->inp_pcbinfo
->ipi_lock
)) {
1454 /*lock inversion issue, mostly with udp multicast packets */
1455 socket_unlock(inp
->inp_socket
, 0);
1456 lck_rw_lock_exclusive(inp
->inp_pcbinfo
->ipi_lock
);
1457 socket_lock(inp
->inp_socket
, 0);
1459 if (IN6_IS_ADDR_UNSPECIFIED(&inp
->in6p_laddr
)) {
1460 inp
->in6p_laddr
= addr6
;
1461 inp
->in6p_last_outifp
= outif
; /* no reference needed */
1462 inp
->in6p_flags
|= INP_IN6ADDR_ANY
;
1464 inp
->in6p_faddr
= sin6
->sin6_addr
;
1465 inp
->inp_fport
= sin6
->sin6_port
;
1466 if ((sin6
->sin6_flowinfo
& IPV6_FLOWINFO_MASK
) != 0)
1467 inp
->inp_flow
= sin6
->sin6_flowinfo
;
1469 lck_rw_done(inp
->inp_pcbinfo
->ipi_lock
);
1471 if (inp
->inp_flowhash
== 0)
1472 inp
->inp_flowhash
= inp_calc_flowhash(inp
);
1473 /* update flowinfo - RFC 6437 */
1474 if (inp
->inp_flow
== 0 && inp
->in6p_flags
& IN6P_AUTOFLOWLABEL
) {
1475 inp
->inp_flow
&= ~IPV6_FLOWLABEL_MASK
;
1477 (htonl(inp
->inp_flowhash
) & IPV6_FLOWLABEL_MASK
);
1480 tcp_set_max_rwinscale(tp
, so
, outif
);
1483 tcpstat
.tcps_connattempt
++;
1484 tp
->t_state
= TCPS_SYN_SENT
;
1485 tp
->t_timer
[TCPT_KEEP
] = OFFSET_FROM_START(tp
,
1486 TCP_CONN_KEEPINIT(tp
));
1487 tp
->iss
= tcp_new_isn(tp
);
1488 tcp_sendseqinit(tp
);
1490 nstat_route_connect_attempt(inp
->inp_route
.ro_rt
);
1494 ifnet_release(outif
);
1501 * Export TCP internal state information via a struct tcp_info
1504 tcp_fill_info(struct tcpcb
*tp
, struct tcp_info
*ti
)
1506 struct inpcb
*inp
= tp
->t_inpcb
;
1508 bzero(ti
, sizeof(*ti
));
1510 ti
->tcpi_state
= tp
->t_state
;
1511 ti
->tcpi_flowhash
= inp
->inp_flowhash
;
1513 if (tp
->t_state
> TCPS_LISTEN
) {
1514 if (TSTMP_SUPPORTED(tp
))
1515 ti
->tcpi_options
|= TCPI_OPT_TIMESTAMPS
;
1516 if (SACK_ENABLED(tp
))
1517 ti
->tcpi_options
|= TCPI_OPT_SACK
;
1518 if (TCP_WINDOW_SCALE_ENABLED(tp
)) {
1519 ti
->tcpi_options
|= TCPI_OPT_WSCALE
;
1520 ti
->tcpi_snd_wscale
= tp
->snd_scale
;
1521 ti
->tcpi_rcv_wscale
= tp
->rcv_scale
;
1523 if (TCP_ECN_ENABLED(tp
))
1524 ti
->tcpi_options
|= TCPI_OPT_ECN
;
1526 /* Are we in retranmission episode */
1527 if (IN_FASTRECOVERY(tp
) || tp
->t_rxtshift
> 0)
1528 ti
->tcpi_flags
|= TCPI_FLAG_LOSSRECOVERY
;
1530 if (tp
->t_flags
& TF_STREAMING_ON
)
1531 ti
->tcpi_flags
|= TCPI_FLAG_STREAMING_ON
;
1533 ti
->tcpi_rto
= tp
->t_timer
[TCPT_REXMT
] ? tp
->t_rxtcur
: 0;
1534 ti
->tcpi_snd_mss
= tp
->t_maxseg
;
1535 ti
->tcpi_rcv_mss
= tp
->t_maxseg
;
1537 ti
->tcpi_rttcur
= tp
->t_rttcur
;
1538 ti
->tcpi_srtt
= tp
->t_srtt
>> TCP_RTT_SHIFT
;
1539 ti
->tcpi_rttvar
= tp
->t_rttvar
>> TCP_RTTVAR_SHIFT
;
1540 ti
->tcpi_rttbest
= tp
->t_rttbest
>> TCP_RTT_SHIFT
;
1542 ti
->tcpi_snd_ssthresh
= tp
->snd_ssthresh
;
1543 ti
->tcpi_snd_cwnd
= tp
->snd_cwnd
;
1544 ti
->tcpi_snd_sbbytes
= inp
->inp_socket
->so_snd
.sb_cc
;
1546 ti
->tcpi_rcv_space
= tp
->rcv_wnd
;
1548 ti
->tcpi_snd_wnd
= tp
->snd_wnd
;
1549 ti
->tcpi_snd_nxt
= tp
->snd_nxt
;
1550 ti
->tcpi_rcv_nxt
= tp
->rcv_nxt
;
1552 /* convert bytes/msec to bits/sec */
1553 if ((tp
->t_flagsext
& TF_MEASURESNDBW
) != 0 &&
1554 tp
->t_bwmeas
!= NULL
) {
1555 ti
->tcpi_snd_bw
= (tp
->t_bwmeas
->bw_sndbw
* 8000);
1558 ti
->tcpi_last_outif
= (tp
->t_inpcb
->inp_last_outifp
== NULL
) ? 0 :
1559 tp
->t_inpcb
->inp_last_outifp
->if_index
;
1561 //atomic_get_64(ti->tcpi_txbytes, &inp->inp_stat->txbytes);
1562 ti
->tcpi_txpackets
= inp
->inp_stat
->txpackets
;
1563 ti
->tcpi_txbytes
= inp
->inp_stat
->txbytes
;
1564 ti
->tcpi_txretransmitbytes
= tp
->t_stat
.txretransmitbytes
;
1565 ti
->tcpi_txretransmitpackets
= tp
->t_stat
.rxmitpkts
;
1566 ti
->tcpi_txunacked
= tp
->snd_max
- tp
->snd_una
;
1568 //atomic_get_64(ti->tcpi_rxbytes, &inp->inp_stat->rxbytes);
1569 ti
->tcpi_rxpackets
= inp
->inp_stat
->rxpackets
;
1570 ti
->tcpi_rxbytes
= inp
->inp_stat
->rxbytes
;
1571 ti
->tcpi_rxduplicatebytes
= tp
->t_stat
.rxduplicatebytes
;
1572 ti
->tcpi_rxoutoforderbytes
= tp
->t_stat
.rxoutoforderbytes
;
1574 if (tp
->t_state
> TCPS_LISTEN
) {
1575 ti
->tcpi_synrexmits
= tp
->t_stat
.synrxtshift
;
1577 ti
->tcpi_cell_rxpackets
= inp
->inp_cstat
->rxpackets
;
1578 ti
->tcpi_cell_rxbytes
= inp
->inp_cstat
->rxbytes
;
1579 ti
->tcpi_cell_txpackets
= inp
->inp_cstat
->txpackets
;
1580 ti
->tcpi_cell_txbytes
= inp
->inp_cstat
->txbytes
;
1582 ti
->tcpi_wifi_rxpackets
= inp
->inp_wstat
->rxpackets
;
1583 ti
->tcpi_wifi_rxbytes
= inp
->inp_wstat
->rxbytes
;
1584 ti
->tcpi_wifi_txpackets
= inp
->inp_wstat
->txpackets
;
1585 ti
->tcpi_wifi_txbytes
= inp
->inp_wstat
->txbytes
;
1587 ti
->tcpi_wired_rxpackets
= inp
->inp_Wstat
->rxpackets
;
1588 ti
->tcpi_wired_rxbytes
= inp
->inp_Wstat
->rxbytes
;
1589 ti
->tcpi_wired_txpackets
= inp
->inp_Wstat
->txpackets
;
1590 ti
->tcpi_wired_txbytes
= inp
->inp_Wstat
->txbytes
;
1591 tcp_get_connectivity_status(tp
, &ti
->tcpi_connstatus
);
1593 ti
->tcpi_tfo_syn_data_rcv
= !!(tp
->t_tfo_stats
& TFO_S_SYNDATA_RCV
);
1594 ti
->tcpi_tfo_cookie_req_rcv
= !!(tp
->t_tfo_stats
& TFO_S_COOKIEREQ_RECV
);
1595 ti
->tcpi_tfo_cookie_sent
= !!(tp
->t_tfo_stats
& TFO_S_COOKIE_SENT
);
1596 ti
->tcpi_tfo_cookie_invalid
= !!(tp
->t_tfo_stats
& TFO_S_COOKIE_INVALID
);
1598 ti
->tcpi_tfo_cookie_req
= !!(tp
->t_tfo_stats
& TFO_S_COOKIE_REQ
);
1599 ti
->tcpi_tfo_cookie_rcv
= !!(tp
->t_tfo_stats
& TFO_S_COOKIE_RCV
);
1600 ti
->tcpi_tfo_syn_data_sent
= !!(tp
->t_tfo_stats
& TFO_S_SYN_DATA_SENT
);
1601 ti
->tcpi_tfo_syn_data_acked
= !!(tp
->t_tfo_stats
& TFO_S_SYN_DATA_ACKED
);
1602 ti
->tcpi_tfo_syn_loss
= !!(tp
->t_tfo_stats
& TFO_S_SYN_LOSS
);
1603 ti
->tcpi_tfo_cookie_wrong
= !!(tp
->t_tfo_stats
& TFO_S_COOKIE_WRONG
);
1604 ti
->tcpi_tfo_no_cookie_rcv
= !!(tp
->t_tfo_stats
& TFO_S_NO_COOKIE_RCV
);
1605 ti
->tcpi_tfo_heuristics_disable
= !!(tp
->t_tfo_stats
& TFO_S_HEURISTICS_DISABLE
);
1606 ti
->tcpi_tfo_send_blackhole
= !!(tp
->t_tfo_stats
& TFO_S_SEND_BLACKHOLE
);
1607 ti
->tcpi_tfo_recv_blackhole
= !!(tp
->t_tfo_stats
& TFO_S_RECV_BLACKHOLE
);
1608 ti
->tcpi_tfo_onebyte_proxy
= !!(tp
->t_tfo_stats
& TFO_S_ONE_BYTE_PROXY
);
1610 ti
->tcpi_ecn_client_setup
= !!(tp
->ecn_flags
& TE_SETUPSENT
);
1611 ti
->tcpi_ecn_server_setup
= !!(tp
->ecn_flags
& TE_SETUPRECEIVED
);
1612 ti
->tcpi_ecn_success
= (tp
->ecn_flags
& TE_ECN_ON
) == TE_ECN_ON
? 1 : 0;
1613 ti
->tcpi_ecn_lost_syn
= !!(tp
->ecn_flags
& TE_LOST_SYN
);
1614 ti
->tcpi_ecn_lost_synack
= !!(tp
->ecn_flags
& TE_LOST_SYNACK
);
1616 ti
->tcpi_local_peer
= !!(tp
->t_flags
& TF_LOCAL
);
1618 if (tp
->t_inpcb
->inp_last_outifp
!= NULL
) {
1619 if (IFNET_IS_CELLULAR(tp
->t_inpcb
->inp_last_outifp
))
1620 ti
->tcpi_if_cell
= 1;
1621 if (IFNET_IS_WIFI(tp
->t_inpcb
->inp_last_outifp
))
1622 ti
->tcpi_if_wifi
= 1;
1623 if (IFNET_IS_WIRED(tp
->t_inpcb
->inp_last_outifp
))
1624 ti
->tcpi_if_wired
= 1;
1625 if (IFNET_IS_WIFI_INFRA(tp
->t_inpcb
->inp_last_outifp
))
1626 ti
->tcpi_if_wifi_infra
= 1;
1627 if (tp
->t_inpcb
->inp_last_outifp
->if_eflags
& IFEF_AWDL
)
1628 ti
->tcpi_if_wifi_awdl
= 1;
1630 if (tp
->tcp_cc_index
== TCP_CC_ALGO_BACKGROUND_INDEX
)
1631 ti
->tcpi_snd_background
= 1;
1632 if (tcp_recv_bg
== 1 ||
1633 IS_TCP_RECV_BG(tp
->t_inpcb
->inp_socket
))
1634 ti
->tcpi_rcv_background
= 1;
1636 ti
->tcpi_ecn_recv_ce
= tp
->t_ecn_recv_ce
;
1637 ti
->tcpi_ecn_recv_cwr
= tp
->t_ecn_recv_cwr
;
1639 ti
->tcpi_rcvoopack
= tp
->t_rcvoopack
;
1640 ti
->tcpi_pawsdrop
= tp
->t_pawsdrop
;
1641 ti
->tcpi_sack_recovery_episode
= tp
->t_sack_recovery_episode
;
1642 ti
->tcpi_reordered_pkts
= tp
->t_reordered_pkts
;
1643 ti
->tcpi_dsack_sent
= tp
->t_dsack_sent
;
1644 ti
->tcpi_dsack_recvd
= tp
->t_dsack_recvd
;
1648 __private_extern__ errno_t
1649 tcp_fill_info_for_info_tuple(struct info_tuple
*itpl
, struct tcp_info
*ti
)
1651 struct inpcbinfo
*pcbinfo
= NULL
;
1652 struct inpcb
*inp
= NULL
;
1656 if (itpl
->itpl_proto
== IPPROTO_TCP
)
1661 if (itpl
->itpl_local_sa
.sa_family
== AF_INET
&&
1662 itpl
->itpl_remote_sa
.sa_family
== AF_INET
) {
1663 inp
= in_pcblookup_hash(pcbinfo
,
1664 itpl
->itpl_remote_sin
.sin_addr
,
1665 itpl
->itpl_remote_sin
.sin_port
,
1666 itpl
->itpl_local_sin
.sin_addr
,
1667 itpl
->itpl_local_sin
.sin_port
,
1669 } else if (itpl
->itpl_local_sa
.sa_family
== AF_INET6
&&
1670 itpl
->itpl_remote_sa
.sa_family
== AF_INET6
) {
1671 struct in6_addr ina6_local
;
1672 struct in6_addr ina6_remote
;
1674 ina6_local
= itpl
->itpl_local_sin6
.sin6_addr
;
1675 if (IN6_IS_SCOPE_LINKLOCAL(&ina6_local
) &&
1676 itpl
->itpl_local_sin6
.sin6_scope_id
)
1677 ina6_local
.s6_addr16
[1] = htons(itpl
->itpl_local_sin6
.sin6_scope_id
);
1679 ina6_remote
= itpl
->itpl_remote_sin6
.sin6_addr
;
1680 if (IN6_IS_SCOPE_LINKLOCAL(&ina6_remote
) &&
1681 itpl
->itpl_remote_sin6
.sin6_scope_id
)
1682 ina6_remote
.s6_addr16
[1] = htons(itpl
->itpl_remote_sin6
.sin6_scope_id
);
1684 inp
= in6_pcblookup_hash(pcbinfo
,
1686 itpl
->itpl_remote_sin6
.sin6_port
,
1688 itpl
->itpl_local_sin6
.sin6_port
,
1693 if (inp
== NULL
|| (so
= inp
->inp_socket
) == NULL
)
1697 if (in_pcb_checkstate(inp
, WNT_RELEASE
, 1) == WNT_STOPUSING
) {
1698 socket_unlock(so
, 0);
1701 tp
= intotcpcb(inp
);
1703 tcp_fill_info(tp
, ti
);
1704 socket_unlock(so
, 0);
1710 tcp_connection_fill_info(struct tcpcb
*tp
, struct tcp_connection_info
*tci
)
1712 struct inpcb
*inp
= tp
->t_inpcb
;
1714 bzero(tci
, sizeof(*tci
));
1715 tci
->tcpi_state
= tp
->t_state
;
1716 if (tp
->t_state
> TCPS_LISTEN
) {
1717 if (TSTMP_SUPPORTED(tp
))
1718 tci
->tcpi_options
|= TCPCI_OPT_TIMESTAMPS
;
1719 if (SACK_ENABLED(tp
))
1720 tci
->tcpi_options
|= TCPCI_OPT_SACK
;
1721 if (TCP_WINDOW_SCALE_ENABLED(tp
)) {
1722 tci
->tcpi_options
|= TCPCI_OPT_WSCALE
;
1723 tci
->tcpi_snd_wscale
= tp
->snd_scale
;
1724 tci
->tcpi_rcv_wscale
= tp
->rcv_scale
;
1726 if (TCP_ECN_ENABLED(tp
))
1727 tci
->tcpi_options
|= TCPCI_OPT_ECN
;
1728 if (IN_FASTRECOVERY(tp
) || tp
->t_rxtshift
> 0)
1729 tci
->tcpi_flags
|= TCPCI_FLAG_LOSSRECOVERY
;
1730 if (tp
->t_flagsext
& TF_PKTS_REORDERED
)
1731 tci
->tcpi_flags
|= TCPCI_FLAG_REORDERING_DETECTED
;
1732 tci
->tcpi_rto
= (tp
->t_timer
[TCPT_REXMT
] > 0) ?
1734 tci
->tcpi_maxseg
= tp
->t_maxseg
;
1735 tci
->tcpi_snd_ssthresh
= tp
->snd_ssthresh
;
1736 tci
->tcpi_snd_cwnd
= tp
->snd_cwnd
;
1737 tci
->tcpi_snd_wnd
= tp
->snd_wnd
;
1738 tci
->tcpi_snd_sbbytes
= inp
->inp_socket
->so_snd
.sb_cc
;
1739 tci
->tcpi_rcv_wnd
= tp
->rcv_wnd
;
1740 tci
->tcpi_rttcur
= tp
->t_rttcur
;
1741 tci
->tcpi_srtt
= (tp
->t_srtt
>> TCP_RTT_SHIFT
);
1742 tci
->tcpi_rttvar
= (tp
->t_rttvar
>> TCP_RTTVAR_SHIFT
);
1743 tci
->tcpi_txpackets
= inp
->inp_stat
->txpackets
;
1744 tci
->tcpi_txbytes
= inp
->inp_stat
->txbytes
;
1745 tci
->tcpi_txretransmitbytes
= tp
->t_stat
.txretransmitbytes
;
1746 tci
->tcpi_txretransmitpackets
= tp
->t_stat
.rxmitpkts
;
1747 tci
->tcpi_rxpackets
= inp
->inp_stat
->rxpackets
;
1748 tci
->tcpi_rxbytes
= inp
->inp_stat
->rxbytes
;
1749 tci
->tcpi_rxoutoforderbytes
= tp
->t_stat
.rxoutoforderbytes
;
1751 tci
->tcpi_tfo_syn_data_rcv
= !!(tp
->t_tfo_stats
& TFO_S_SYNDATA_RCV
);
1752 tci
->tcpi_tfo_cookie_req_rcv
= !!(tp
->t_tfo_stats
& TFO_S_COOKIEREQ_RECV
);
1753 tci
->tcpi_tfo_cookie_sent
= !!(tp
->t_tfo_stats
& TFO_S_COOKIE_SENT
);
1754 tci
->tcpi_tfo_cookie_invalid
= !!(tp
->t_tfo_stats
& TFO_S_COOKIE_INVALID
);
1755 tci
->tcpi_tfo_cookie_req
= !!(tp
->t_tfo_stats
& TFO_S_COOKIE_REQ
);
1756 tci
->tcpi_tfo_cookie_rcv
= !!(tp
->t_tfo_stats
& TFO_S_COOKIE_RCV
);
1757 tci
->tcpi_tfo_syn_data_sent
= !!(tp
->t_tfo_stats
& TFO_S_SYN_DATA_SENT
);
1758 tci
->tcpi_tfo_syn_data_acked
= !!(tp
->t_tfo_stats
& TFO_S_SYN_DATA_ACKED
);
1759 tci
->tcpi_tfo_syn_loss
= !!(tp
->t_tfo_stats
& TFO_S_SYN_LOSS
);
1760 tci
->tcpi_tfo_cookie_wrong
= !!(tp
->t_tfo_stats
& TFO_S_COOKIE_WRONG
);
1761 tci
->tcpi_tfo_no_cookie_rcv
= !!(tp
->t_tfo_stats
& TFO_S_NO_COOKIE_RCV
);
1762 tci
->tcpi_tfo_heuristics_disable
= !!(tp
->t_tfo_stats
& TFO_S_HEURISTICS_DISABLE
);
1763 tci
->tcpi_tfo_send_blackhole
= !!(tp
->t_tfo_stats
& TFO_S_SEND_BLACKHOLE
);
1764 tci
->tcpi_tfo_recv_blackhole
= !!(tp
->t_tfo_stats
& TFO_S_RECV_BLACKHOLE
);
1765 tci
->tcpi_tfo_onebyte_proxy
= !!(tp
->t_tfo_stats
& TFO_S_ONE_BYTE_PROXY
);
1770 __private_extern__
int
1771 tcp_sysctl_info(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, __unused
int arg2
, struct sysctl_req
*req
)
1774 struct tcp_info ti
= {};
1775 struct info_tuple itpl
;
1776 #if !CONFIG_EMBEDDED
1777 proc_t caller
= PROC_NULL
;
1778 proc_t caller_parent
= PROC_NULL
;
1779 char command_name
[MAXCOMLEN
+ 1] = "";
1780 char parent_name
[MAXCOMLEN
+ 1] = "";
1782 if ((caller
= proc_self()) != PROC_NULL
) {
1783 /* get process name */
1784 strlcpy(command_name
, caller
->p_comm
, sizeof(command_name
));
1786 /* get parent process name if possible */
1787 if ((caller_parent
= proc_find(caller
->p_ppid
)) != PROC_NULL
) {
1788 strlcpy(parent_name
, caller_parent
->p_comm
,
1789 sizeof(parent_name
));
1790 proc_rele(caller_parent
);
1793 if ((escape_str(command_name
, strlen(command_name
) + 1,
1794 sizeof(command_name
)) == 0) &&
1795 (escape_str(parent_name
, strlen(parent_name
) + 1,
1796 sizeof(parent_name
)) == 0)) {
1797 kern_asl_msg(LOG_DEBUG
, "messagetracer",
1799 "com.apple.message.domain",
1800 "com.apple.kernel.tcpstat", /* 1 */
1801 "com.apple.message.signature",
1803 "com.apple.message.signature2", command_name
, /* 3 */
1804 "com.apple.message.signature3", parent_name
, /* 4 */
1805 "com.apple.message.summarize", "YES", /* 5 */
1810 if (caller
!= PROC_NULL
)
1812 #endif /* !CONFIG_EMBEDDED */
1814 if (req
->newptr
== USER_ADDR_NULL
) {
1817 if (req
->newlen
< sizeof(struct info_tuple
)) {
1820 error
= SYSCTL_IN(req
, &itpl
, sizeof(struct info_tuple
));
1824 error
= tcp_fill_info_for_info_tuple(&itpl
, &ti
);
1828 error
= SYSCTL_OUT(req
, &ti
, sizeof(struct tcp_info
));
1837 tcp_lookup_peer_pid_locked(struct socket
*so
, pid_t
*out_pid
)
1839 int error
= EHOSTUNREACH
;
1841 if ((so
->so_state
& SS_ISCONNECTED
) == 0) return ENOTCONN
;
1843 struct inpcb
*inp
= (struct inpcb
*)so
->so_pcb
;
1844 uint16_t lport
= inp
->inp_lport
;
1845 uint16_t fport
= inp
->inp_fport
;
1846 struct inpcb
*finp
= NULL
;
1847 struct in6_addr laddr6
, faddr6
;
1848 struct in_addr laddr4
, faddr4
;
1850 if (inp
->inp_vflag
& INP_IPV6
) {
1851 laddr6
= inp
->in6p_laddr
;
1852 faddr6
= inp
->in6p_faddr
;
1853 } else if (inp
->inp_vflag
& INP_IPV4
) {
1854 laddr4
= inp
->inp_laddr
;
1855 faddr4
= inp
->inp_faddr
;
1858 socket_unlock(so
, 0);
1859 if (inp
->inp_vflag
& INP_IPV6
) {
1860 finp
= in6_pcblookup_hash(&tcbinfo
, &laddr6
, lport
, &faddr6
, fport
, 0, NULL
);
1861 } else if (inp
->inp_vflag
& INP_IPV4
) {
1862 finp
= in_pcblookup_hash(&tcbinfo
, laddr4
, lport
, faddr4
, fport
, 0, NULL
);
1866 *out_pid
= finp
->inp_socket
->last_pid
;
1868 in_pcb_checkstate(finp
, WNT_RELEASE
, 0);
1876 tcp_getconninfo(struct socket
*so
, struct conninfo_tcp
*tcp_ci
)
1878 (void) tcp_lookup_peer_pid_locked(so
, &tcp_ci
->tcpci_peer_pid
);
1879 tcp_fill_info(sototcpcb(so
), &tcp_ci
->tcpci_tcp_info
);
1883 * The new sockopt interface makes it possible for us to block in the
1884 * copyin/out step (if we take a page fault). Taking a page fault at
1885 * splnet() is probably a Bad Thing. (Since sockets and pcbs both now
1886 * use TSM, there probably isn't any need for this function to run at
1887 * splnet() any more. This needs more examination.)
1890 tcp_ctloutput(struct socket
*so
, struct sockopt
*sopt
)
1892 int error
= 0, opt
= 0, optval
= 0;
1896 inp
= sotoinpcb(so
);
1898 return (ECONNRESET
);
1900 /* Allow <SOL_SOCKET,SO_FLUSH/SO_TRAFFIC_MGT_BACKGROUND> at this level */
1901 if (sopt
->sopt_level
!= IPPROTO_TCP
&&
1902 !(sopt
->sopt_level
== SOL_SOCKET
&& (sopt
->sopt_name
== SO_FLUSH
||
1903 sopt
->sopt_name
== SO_TRAFFIC_MGT_BACKGROUND
))) {
1905 if (SOCK_CHECK_DOM(so
, PF_INET6
))
1906 error
= ip6_ctloutput(so
, sopt
);
1909 error
= ip_ctloutput(so
, sopt
);
1912 tp
= intotcpcb(inp
);
1914 return (ECONNRESET
);
1917 calculate_tcp_clock();
1919 switch (sopt
->sopt_dir
) {
1921 switch (sopt
->sopt_name
) {
1925 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
1930 switch (sopt
->sopt_name
) {
1941 opt
= 0; /* dead code to fool gcc */
1948 tp
->t_flags
&= ~opt
;
1950 case TCP_RXT_FINDROP
:
1951 case TCP_NOTIMEWAIT
:
1952 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
1956 switch (sopt
->sopt_name
) {
1957 case TCP_RXT_FINDROP
:
1958 opt
= TF_RXTFINDROP
;
1960 case TCP_NOTIMEWAIT
:
1961 opt
= TF_NOTIMEWAIT
;
1968 tp
->t_flagsext
|= opt
;
1970 tp
->t_flagsext
&= ~opt
;
1972 case TCP_MEASURE_SND_BW
:
1973 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
1977 opt
= TF_MEASURESNDBW
;
1979 if (tp
->t_bwmeas
== NULL
) {
1980 tp
->t_bwmeas
= tcp_bwmeas_alloc(tp
);
1981 if (tp
->t_bwmeas
== NULL
) {
1986 tp
->t_flagsext
|= opt
;
1988 tp
->t_flagsext
&= ~opt
;
1989 /* Reset snd bw measurement state */
1990 tp
->t_flagsext
&= ~(TF_BWMEAS_INPROGRESS
);
1991 if (tp
->t_bwmeas
!= NULL
) {
1992 tcp_bwmeas_free(tp
);
1996 case TCP_MEASURE_BW_BURST
: {
1997 struct tcp_measure_bw_burst in
;
1998 uint32_t minpkts
, maxpkts
;
1999 bzero(&in
, sizeof(in
));
2001 error
= sooptcopyin(sopt
, &in
, sizeof(in
),
2005 if ((tp
->t_flagsext
& TF_MEASURESNDBW
) == 0 ||
2006 tp
->t_bwmeas
== NULL
) {
2010 minpkts
= (in
.min_burst_size
!= 0) ? in
.min_burst_size
:
2011 tp
->t_bwmeas
->bw_minsizepkts
;
2012 maxpkts
= (in
.max_burst_size
!= 0) ? in
.max_burst_size
:
2013 tp
->t_bwmeas
->bw_maxsizepkts
;
2014 if (minpkts
> maxpkts
) {
2018 tp
->t_bwmeas
->bw_minsizepkts
= minpkts
;
2019 tp
->t_bwmeas
->bw_maxsizepkts
= maxpkts
;
2020 tp
->t_bwmeas
->bw_minsize
= (minpkts
* tp
->t_maxseg
);
2021 tp
->t_bwmeas
->bw_maxsize
= (maxpkts
* tp
->t_maxseg
);
2025 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2030 if (optval
> 0 && optval
<= tp
->t_maxseg
&&
2031 optval
+ 40 >= tcp_minmss
)
2032 tp
->t_maxseg
= optval
;
2038 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2042 if (optval
< 0 || optval
> UINT32_MAX
/TCP_RETRANSHZ
) {
2045 tp
->t_keepidle
= optval
* TCP_RETRANSHZ
;
2046 /* reset the timer to new value */
2047 tp
->t_timer
[TCPT_KEEP
] = OFFSET_FROM_START(tp
,
2048 TCP_CONN_KEEPIDLE(tp
));
2049 tcp_check_timer_state(tp
);
2053 case TCP_CONNECTIONTIMEOUT
:
2054 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2058 if (optval
< 0 || optval
> UINT32_MAX
/TCP_RETRANSHZ
) {
2061 tp
->t_keepinit
= optval
* TCP_RETRANSHZ
;
2062 if (tp
->t_state
== TCPS_SYN_RECEIVED
||
2063 tp
->t_state
== TCPS_SYN_SENT
) {
2064 tp
->t_timer
[TCPT_KEEP
] = OFFSET_FROM_START(tp
,
2065 TCP_CONN_KEEPINIT(tp
));
2066 tcp_check_timer_state(tp
);
2072 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2076 if (optval
< 0 || optval
> UINT32_MAX
/TCP_RETRANSHZ
) {
2079 tp
->t_keepintvl
= optval
* TCP_RETRANSHZ
;
2080 if (tp
->t_state
== TCPS_FIN_WAIT_2
&&
2081 TCP_CONN_MAXIDLE(tp
) > 0) {
2082 tp
->t_timer
[TCPT_2MSL
] = OFFSET_FROM_START(tp
,
2083 TCP_CONN_MAXIDLE(tp
));
2084 tcp_check_timer_state(tp
);
2090 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2094 if (optval
< 0 || optval
> INT32_MAX
) {
2097 tp
->t_keepcnt
= optval
;
2098 if (tp
->t_state
== TCPS_FIN_WAIT_2
&&
2099 TCP_CONN_MAXIDLE(tp
) > 0) {
2100 tp
->t_timer
[TCPT_2MSL
] = OFFSET_FROM_START(tp
,
2101 TCP_CONN_MAXIDLE(tp
));
2102 tcp_check_timer_state(tp
);
2107 case TCP_KEEPALIVE_OFFLOAD
:
2108 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2112 if (optval
< 0 || optval
> INT32_MAX
) {
2117 inp
->inp_flags2
|= INP2_KEEPALIVE_OFFLOAD
;
2119 inp
->inp_flags2
&= ~INP2_KEEPALIVE_OFFLOAD
;
2122 case PERSIST_TIMEOUT
:
2123 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2130 tp
->t_persist_timeout
= optval
* TCP_RETRANSHZ
;
2132 case TCP_RXT_CONNDROPTIME
:
2133 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2140 tp
->t_rxt_conndroptime
= optval
* TCP_RETRANSHZ
;
2142 case TCP_NOTSENT_LOWAT
:
2143 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2152 so
->so_flags
&= ~(SOF_NOTSENT_LOWAT
);
2153 tp
->t_notsent_lowat
= 0;
2155 so
->so_flags
|= SOF_NOTSENT_LOWAT
;
2156 tp
->t_notsent_lowat
= optval
;
2160 case TCP_ADAPTIVE_READ_TIMEOUT
:
2161 error
= sooptcopyin(sopt
, &optval
, sizeof (optval
),
2166 optval
> TCP_ADAPTIVE_TIMEOUT_MAX
) {
2169 } else if (optval
== 0) {
2170 tp
->t_adaptive_rtimo
= 0;
2171 tcp_keepalive_reset(tp
);
2174 mptcp_reset_keepalive(tp
);
2176 tp
->t_adaptive_rtimo
= optval
;
2179 case TCP_ADAPTIVE_WRITE_TIMEOUT
:
2180 error
= sooptcopyin(sopt
, &optval
, sizeof (optval
),
2185 optval
> TCP_ADAPTIVE_TIMEOUT_MAX
) {
2189 tp
->t_adaptive_wtimo
= optval
;
2192 case TCP_ENABLE_MSGS
:
2193 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2197 if (optval
< 0 || optval
> 1) {
2199 } else if (optval
== 1) {
2201 * Check if messages option is already
2202 * enabled, if so return.
2204 if (so
->so_flags
& SOF_ENABLE_MSGS
) {
2205 VERIFY(so
->so_msg_state
!= NULL
);
2210 * allocate memory for storing message
2213 VERIFY(so
->so_msg_state
== NULL
);
2214 MALLOC(so
->so_msg_state
,
2216 sizeof(struct msg_state
),
2217 M_TEMP
, M_WAITOK
| M_ZERO
);
2218 if (so
->so_msg_state
== NULL
) {
2223 /* Enable message delivery */
2224 so
->so_flags
|= SOF_ENABLE_MSGS
;
2227 * Can't disable message delivery on socket
2228 * because of restrictions imposed by
2234 case TCP_SENDMOREACKS
:
2235 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2239 if (optval
< 0 || optval
> 1) {
2241 } else if (optval
== 0) {
2242 tp
->t_flagsext
&= ~(TF_NOSTRETCHACK
);
2244 tp
->t_flagsext
|= TF_NOSTRETCHACK
;
2247 case TCP_DISABLE_BLACKHOLE_DETECTION
:
2248 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2252 if (optval
< 0 || optval
> 1) {
2254 } else if (optval
== 0) {
2255 tp
->t_flagsext
&= ~TF_NOBLACKHOLE_DETECTION
;
2257 tp
->t_flagsext
|= TF_NOBLACKHOLE_DETECTION
;
2258 if ((tp
->t_flags
& TF_BLACKHOLE
) &&
2259 tp
->t_pmtud_saved_maxopd
> 0)
2260 tcp_pmtud_revert_segment_size(tp
);
2264 if (!(tcp_fastopen
& TCP_FASTOPEN_SERVER
)) {
2269 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2273 if (optval
< 0 || optval
> 1) {
2277 if (tp
->t_state
!= TCPS_LISTEN
) {
2282 tp
->t_flagsext
|= TF_FASTOPEN
;
2284 tcp_disable_tfo(tp
);
2286 case TCP_FASTOPEN_FORCE_HEURISTICS
:
2287 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2292 if (optval
< 0 || optval
> 1) {
2297 if (tp
->t_state
!= TCPS_CLOSED
) {
2302 tp
->t_flagsext
|= TF_FASTOPEN_HEUR
;
2304 tp
->t_flagsext
&= ~TF_FASTOPEN_HEUR
;
2307 case TCP_ENABLE_ECN
:
2308 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2313 tp
->ecn_flags
|= TE_ECN_MODE_ENABLE
;
2314 tp
->ecn_flags
&= ~TE_ECN_MODE_DISABLE
;
2316 tp
->ecn_flags
&= ~TE_ECN_MODE_ENABLE
;
2317 tp
->ecn_flags
|= TE_ECN_MODE_DISABLE
;
2321 error
= sooptcopyin(sopt
, &optval
, sizeof optval
,
2325 if (optval
== ECN_MODE_DEFAULT
) {
2326 tp
->ecn_flags
&= ~TE_ECN_MODE_ENABLE
;
2327 tp
->ecn_flags
&= ~TE_ECN_MODE_DISABLE
;
2328 } else if (optval
== ECN_MODE_ENABLE
) {
2329 tp
->ecn_flags
|= TE_ECN_MODE_ENABLE
;
2330 tp
->ecn_flags
&= ~TE_ECN_MODE_DISABLE
;
2331 } else if (optval
== ECN_MODE_DISABLE
) {
2332 tp
->ecn_flags
&= ~TE_ECN_MODE_ENABLE
;
2333 tp
->ecn_flags
|= TE_ECN_MODE_DISABLE
;
2338 case TCP_NOTIFY_ACKNOWLEDGEMENT
:
2339 error
= sooptcopyin(sopt
, &optval
,
2340 sizeof(optval
), sizeof(optval
));
2347 if (tp
->t_notify_ack_count
>= TCP_MAX_NOTIFY_ACK
) {
2348 error
= ETOOMANYREFS
;
2353 * validate that the given marker id is not
2354 * a duplicate to avoid ambiguity
2356 if ((error
= tcp_notify_ack_id_valid(tp
, so
,
2360 error
= tcp_add_notify_ack_marker(tp
, optval
);
2363 if ((error
= sooptcopyin(sopt
, &optval
, sizeof (optval
),
2364 sizeof (optval
))) != 0)
2367 error
= inp_flush(inp
, optval
);
2370 case SO_TRAFFIC_MGT_BACKGROUND
:
2371 if ((error
= sooptcopyin(sopt
, &optval
, sizeof (optval
),
2372 sizeof (optval
))) != 0)
2376 socket_set_traffic_mgt_flags_locked(so
,
2377 TRAFFIC_MGT_SO_BACKGROUND
);
2379 socket_clear_traffic_mgt_flags_locked(so
,
2380 TRAFFIC_MGT_SO_BACKGROUND
);
2383 case TCP_RXT_MINIMUM_TIMEOUT
:
2384 error
= sooptcopyin(sopt
, &optval
, sizeof(optval
),
2393 tp
->t_rxt_minimum_timeout
= 0;
2395 tp
->t_rxt_minimum_timeout
= min(optval
,
2396 TCP_RXT_MINIMUM_TIMEOUT_LIMIT
);
2397 /* convert to milliseconds */
2398 tp
->t_rxt_minimum_timeout
*= TCP_RETRANSHZ
;
2402 error
= ENOPROTOOPT
;
2408 switch (sopt
->sopt_name
) {
2410 optval
= tp
->t_flags
& TF_NODELAY
;
2413 optval
= tp
->t_maxseg
;
2416 if (tp
->t_keepidle
> 0)
2417 optval
= tp
->t_keepidle
/ TCP_RETRANSHZ
;
2419 optval
= tcp_keepidle
/ TCP_RETRANSHZ
;
2422 if (tp
->t_keepintvl
> 0)
2423 optval
= tp
->t_keepintvl
/ TCP_RETRANSHZ
;
2425 optval
= tcp_keepintvl
/ TCP_RETRANSHZ
;
2428 if (tp
->t_keepcnt
> 0)
2429 optval
= tp
->t_keepcnt
;
2431 optval
= tcp_keepcnt
;
2433 case TCP_KEEPALIVE_OFFLOAD
:
2434 optval
= !!(inp
->inp_flags2
& INP2_KEEPALIVE_OFFLOAD
);
2437 optval
= tp
->t_flags
& TF_NOOPT
;
2440 optval
= tp
->t_flags
& TF_NOPUSH
;
2442 case TCP_ENABLE_ECN
:
2443 optval
= (tp
->ecn_flags
& TE_ECN_MODE_ENABLE
) ? 1 : 0;
2446 if (tp
->ecn_flags
& TE_ECN_MODE_ENABLE
)
2447 optval
= ECN_MODE_ENABLE
;
2448 else if (tp
->ecn_flags
& TE_ECN_MODE_DISABLE
)
2449 optval
= ECN_MODE_DISABLE
;
2451 optval
= ECN_MODE_DEFAULT
;
2453 case TCP_CONNECTIONTIMEOUT
:
2454 optval
= tp
->t_keepinit
/ TCP_RETRANSHZ
;
2456 case PERSIST_TIMEOUT
:
2457 optval
= tp
->t_persist_timeout
/ TCP_RETRANSHZ
;
2459 case TCP_RXT_CONNDROPTIME
:
2460 optval
= tp
->t_rxt_conndroptime
/ TCP_RETRANSHZ
;
2462 case TCP_RXT_FINDROP
:
2463 optval
= tp
->t_flagsext
& TF_RXTFINDROP
;
2465 case TCP_NOTIMEWAIT
:
2466 optval
= (tp
->t_flagsext
& TF_NOTIMEWAIT
) ? 1 : 0;
2469 if (tp
->t_state
!= TCPS_LISTEN
||
2470 !(tcp_fastopen
& TCP_FASTOPEN_SERVER
)) {
2474 optval
= tfo_enabled(tp
);
2476 case TCP_FASTOPEN_FORCE_HEURISTICS
:
2477 optval
= (tp
->t_flagsext
& TF_FASTOPEN_HEUR
) ? 1 : 0;
2479 case TCP_MEASURE_SND_BW
:
2480 optval
= tp
->t_flagsext
& TF_MEASURESNDBW
;
2485 tcp_fill_info(tp
, &ti
);
2486 error
= sooptcopyout(sopt
, &ti
, sizeof(struct tcp_info
));
2490 case TCP_CONNECTION_INFO
: {
2491 struct tcp_connection_info tci
;
2492 tcp_connection_fill_info(tp
, &tci
);
2493 error
= sooptcopyout(sopt
, &tci
,
2494 sizeof(struct tcp_connection_info
));
2497 case TCP_MEASURE_BW_BURST
: {
2498 struct tcp_measure_bw_burst out
= {};
2499 if ((tp
->t_flagsext
& TF_MEASURESNDBW
) == 0 ||
2500 tp
->t_bwmeas
== NULL
) {
2504 out
.min_burst_size
= tp
->t_bwmeas
->bw_minsizepkts
;
2505 out
.max_burst_size
= tp
->t_bwmeas
->bw_maxsizepkts
;
2506 error
= sooptcopyout(sopt
, &out
, sizeof(out
));
2509 case TCP_NOTSENT_LOWAT
:
2510 if ((so
->so_flags
& SOF_NOTSENT_LOWAT
) != 0) {
2511 optval
= tp
->t_notsent_lowat
;
2517 case TCP_ENABLE_MSGS
:
2518 if (so
->so_flags
& SOF_ENABLE_MSGS
) {
2524 case TCP_SENDMOREACKS
:
2525 if (tp
->t_flagsext
& TF_NOSTRETCHACK
)
2530 case TCP_DISABLE_BLACKHOLE_DETECTION
:
2531 if (tp
->t_flagsext
& TF_NOBLACKHOLE_DETECTION
)
2536 case TCP_PEER_PID
: {
2538 error
= tcp_lookup_peer_pid_locked(so
, &pid
);
2540 error
= sooptcopyout(sopt
, &pid
, sizeof(pid
));
2543 case TCP_ADAPTIVE_READ_TIMEOUT
:
2544 optval
= tp
->t_adaptive_rtimo
;
2546 case TCP_ADAPTIVE_WRITE_TIMEOUT
:
2547 optval
= tp
->t_adaptive_wtimo
;
2549 case SO_TRAFFIC_MGT_BACKGROUND
:
2550 optval
= (so
->so_flags1
&
2551 SOF1_TRAFFIC_MGT_SO_BACKGROUND
) ? 1 : 0;
2553 case TCP_NOTIFY_ACKNOWLEDGEMENT
: {
2554 struct tcp_notify_ack_complete retid
;
2556 if (sopt
->sopt_valsize
!= sizeof (retid
)) {
2560 bzero(&retid
, sizeof (retid
));
2561 tcp_get_notify_ack_count(tp
, &retid
);
2562 if (retid
.notify_complete_count
> 0)
2563 tcp_get_notify_ack_ids(tp
, &retid
);
2565 error
= sooptcopyout(sopt
, &retid
, sizeof (retid
));
2568 case TCP_RXT_MINIMUM_TIMEOUT
:
2569 optval
= tp
->t_rxt_minimum_timeout
/ TCP_RETRANSHZ
;
2572 error
= ENOPROTOOPT
;
2576 error
= sooptcopyout(sopt
, &optval
, sizeof optval
);
2584 * tcp_sendspace and tcp_recvspace are the default send and receive window
2585 * sizes, respectively. These are obsolescent (this information should
2586 * be set by the route).
2588 u_int32_t tcp_sendspace
= 1448*256;
2589 u_int32_t tcp_recvspace
= 1448*384;
2591 /* During attach, the size of socket buffer allocated is limited to
2592 * sb_max in sbreserve. Disallow setting the tcp send and recv space
2593 * to be more than sb_max because that will cause tcp_attach to fail
2594 * (see radar 5713060)
2597 sysctl_tcp_sospace(struct sysctl_oid
*oidp
, __unused
void *arg1
,
2598 int arg2
, struct sysctl_req
*req
)
2600 #pragma unused(arg2)
2601 u_int32_t new_value
= 0, *space_p
= NULL
;
2602 int changed
= 0, error
= 0;
2603 u_quad_t sb_effective_max
= (sb_max
/ (MSIZE
+MCLBYTES
)) * MCLBYTES
;
2605 switch (oidp
->oid_number
) {
2606 case TCPCTL_SENDSPACE
:
2607 space_p
= &tcp_sendspace
;
2609 case TCPCTL_RECVSPACE
:
2610 space_p
= &tcp_recvspace
;
2615 error
= sysctl_io_number(req
, *space_p
, sizeof(u_int32_t
),
2616 &new_value
, &changed
);
2618 if (new_value
> 0 && new_value
<= sb_effective_max
) {
2619 *space_p
= new_value
;
2620 SYSCTL_SKMEM_UPDATE_AT_OFFSET(arg2
, new_value
);
2629 SYSCTL_PROC(_net_inet_tcp
, TCPCTL_SENDSPACE
, sendspace
,
2630 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, &tcp_sendspace
,
2631 offsetof(skmem_sysctl
, tcp
.sendspace
), sysctl_tcp_sospace
,
2632 "IU", "Maximum outgoing TCP datagram size");
2633 SYSCTL_PROC(_net_inet_tcp
, TCPCTL_RECVSPACE
, recvspace
,
2634 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, &tcp_recvspace
,
2635 offsetof(skmem_sysctl
, tcp
.recvspace
), sysctl_tcp_sospace
,
2636 "IU", "Maximum incoming TCP datagram size");
2637 #else /* SYSCTL_SKMEM */
2638 SYSCTL_PROC(_net_inet_tcp
, TCPCTL_SENDSPACE
, sendspace
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2639 &tcp_sendspace
, 0, &sysctl_tcp_sospace
, "IU", "Maximum outgoing TCP datagram size");
2640 SYSCTL_PROC(_net_inet_tcp
, TCPCTL_RECVSPACE
, recvspace
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
2641 &tcp_recvspace
, 0, &sysctl_tcp_sospace
, "IU", "Maximum incoming TCP datagram size");
2642 #endif /* SYSCTL_SKMEM */
2645 * Attach TCP protocol to socket, allocating
2646 * internet protocol control block, tcp control block,
2647 * bufer space, and entering LISTEN state if to accept connections.
2649 * Returns: 0 Success
2650 * in_pcballoc:ENOBUFS
2651 * in_pcballoc:ENOMEM
2652 * in_pcballoc:??? [IPSEC specific]
2656 tcp_attach(struct socket
*so
, struct proc
*p
)
2662 int isipv6
= SOCK_CHECK_DOM(so
, PF_INET6
) != 0;
2665 error
= in_pcballoc(so
, &tcbinfo
, p
);
2669 inp
= sotoinpcb(so
);
2671 if (so
->so_snd
.sb_hiwat
== 0 || so
->so_rcv
.sb_hiwat
== 0) {
2672 error
= soreserve(so
, tcp_sendspace
, tcp_recvspace
);
2677 if (so
->so_snd
.sb_preconn_hiwat
== 0) {
2678 soreserve_preconnect(so
, 2048);
2681 if ((so
->so_rcv
.sb_flags
& SB_USRSIZE
) == 0)
2682 so
->so_rcv
.sb_flags
|= SB_AUTOSIZE
;
2683 if ((so
->so_snd
.sb_flags
& SB_USRSIZE
) == 0)
2684 so
->so_snd
.sb_flags
|= SB_AUTOSIZE
;
2688 inp
->inp_vflag
|= INP_IPV6
;
2689 inp
->in6p_hops
= -1; /* use kernel default */
2693 inp
->inp_vflag
|= INP_IPV4
;
2694 tp
= tcp_newtcpcb(inp
);
2696 int nofd
= so
->so_state
& SS_NOFDREF
; /* XXX */
2698 so
->so_state
&= ~SS_NOFDREF
; /* don't free the socket yet */
2705 so
->so_state
|= nofd
;
2709 nstat_tcp_new_pcb(inp
);
2710 tp
->t_state
= TCPS_CLOSED
;
2715 * Initiate (or continue) disconnect.
2716 * If embryonic state, just send reset (once).
2717 * If in ``let data drain'' option and linger null, just drop.
2718 * Otherwise (hard), mark socket disconnecting and drop
2719 * current input data; switch states based on user close, and
2720 * send segment to peer (with FIN).
2722 static struct tcpcb
*
2723 tcp_disconnect(struct tcpcb
*tp
)
2725 struct socket
*so
= tp
->t_inpcb
->inp_socket
;
2727 if (so
->so_rcv
.sb_cc
!= 0 || tp
->t_reassqlen
!= 0)
2728 return tcp_drop(tp
, 0);
2730 if (tp
->t_state
< TCPS_ESTABLISHED
)
2732 else if ((so
->so_options
& SO_LINGER
) && so
->so_linger
== 0)
2733 tp
= tcp_drop(tp
, 0);
2735 soisdisconnecting(so
);
2736 sbflush(&so
->so_rcv
);
2737 tp
= tcp_usrclosed(tp
);
2739 /* A reset has been sent but socket exists, do not send FIN */
2740 if ((so
->so_flags
& SOF_MP_SUBFLOW
) &&
2741 (tp
) && (tp
->t_mpflags
& TMPF_RESET
))
2745 (void) tcp_output(tp
);
2751 * User issued close, and wish to trail through shutdown states:
2752 * if never received SYN, just forget it. If got a SYN from peer,
2753 * but haven't sent FIN, then go to FIN_WAIT_1 state to send peer a FIN.
2754 * If already got a FIN from peer, then almost done; go to LAST_ACK
2755 * state. In all other cases, have already sent FIN to peer (e.g.
2756 * after PRU_SHUTDOWN), and just have to play tedious game waiting
2757 * for peer to send FIN or not respond to keep-alives, etc.
2758 * We can let the user exit from the close as soon as the FIN is acked.
2760 static struct tcpcb
*
2761 tcp_usrclosed(struct tcpcb
*tp
)
2763 switch (tp
->t_state
) {
2771 case TCPS_SYN_RECEIVED
:
2772 tp
->t_flags
|= TF_NEEDFIN
;
2775 case TCPS_ESTABLISHED
:
2776 DTRACE_TCP4(state__change
, void, NULL
,
2777 struct inpcb
*, tp
->t_inpcb
,
2779 int32_t, TCPS_FIN_WAIT_1
);
2780 tp
->t_state
= TCPS_FIN_WAIT_1
;
2783 case TCPS_CLOSE_WAIT
:
2784 DTRACE_TCP4(state__change
, void, NULL
,
2785 struct inpcb
*, tp
->t_inpcb
,
2787 int32_t, TCPS_LAST_ACK
);
2788 tp
->t_state
= TCPS_LAST_ACK
;
2791 if (tp
&& tp
->t_state
>= TCPS_FIN_WAIT_2
) {
2792 soisdisconnected(tp
->t_inpcb
->inp_socket
);
2793 /* To prevent the connection hanging in FIN_WAIT_2 forever. */
2794 if (tp
->t_state
== TCPS_FIN_WAIT_2
)
2795 tp
->t_timer
[TCPT_2MSL
] = OFFSET_FROM_START(tp
,
2796 TCP_CONN_MAXIDLE(tp
));
2802 tcp_in_cksum_stats(u_int32_t len
)
2804 tcpstat
.tcps_rcv_swcsum
++;
2805 tcpstat
.tcps_rcv_swcsum_bytes
+= len
;
2809 tcp_out_cksum_stats(u_int32_t len
)
2811 tcpstat
.tcps_snd_swcsum
++;
2812 tcpstat
.tcps_snd_swcsum_bytes
+= len
;
2817 tcp_in6_cksum_stats(u_int32_t len
)
2819 tcpstat
.tcps_rcv6_swcsum
++;
2820 tcpstat
.tcps_rcv6_swcsum_bytes
+= len
;
2824 tcp_out6_cksum_stats(u_int32_t len
)
2826 tcpstat
.tcps_snd6_swcsum
++;
2827 tcpstat
.tcps_snd6_swcsum_bytes
+= len
;
2831 * When messages are enabled on a TCP socket, the message priority
2832 * is sent as a control message. This function will extract it.
2835 tcp_get_msg_priority(struct mbuf
*control
, uint32_t *msgpri
)
2838 if (control
== NULL
)
2841 for (cm
= M_FIRST_CMSGHDR(control
); cm
;
2842 cm
= M_NXT_CMSGHDR(control
, cm
)) {
2843 if (cm
->cmsg_len
< sizeof(struct cmsghdr
) ||
2844 cm
->cmsg_len
> control
->m_len
) {
2847 if (cm
->cmsg_level
== SOL_SOCKET
&&
2848 cm
->cmsg_type
== SCM_MSG_PRIORITY
) {
2849 *msgpri
= *(unsigned int *)(void *)CMSG_DATA(cm
);
2854 VERIFY(*msgpri
>= MSG_PRI_MIN
&& *msgpri
<= MSG_PRI_MAX
);